def clone_git_repo(self): tenacity.retry( lambda: subprocess.run( ["git", "clone", "--quiet", self.repo_url, self.git_repo_path], check=True, ), wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(5), )() try: tenacity.retry( lambda: subprocess.run( ["git", "pull", "--quiet", self.repo_url, "master"], cwd=self.git_repo_path, capture_output=True, check=True, ), wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(5), )() except subprocess.CalledProcessError as e: # When the repo is empty. if b"Couldn't find remote ref master" in e.stdout: pass
def _blocking_poll(self, timeout=None): """Poll and wait for the Future to be resolved. Args: timeout (int): How long to wait for the operation to complete. If None, wait indefinitely. """ if self._result_set: return retry_on = tenacity.retry_if_result( functools.partial(operator.is_not, True)) # Use exponential backoff with jitter. wait_on = (tenacity.wait_exponential(multiplier=1, max=10) + tenacity.wait_random(0, 1)) if timeout is None: retry = tenacity.retry(retry=retry_on, wait=wait_on) else: retry = tenacity.retry(retry=retry_on, wait=wait_on, stop=tenacity.stop_after_delay(timeout)) try: retry(self.done)() except tenacity.RetryError as exc: six.raise_from( concurrent.futures.TimeoutError( 'Operation did not complete within the designated ' 'timeout.'), exc)
def clone_git_repo(self, repo_url, repo_dir): if not os.path.exists(repo_dir): tenacity.retry( wait=tenacity.wait_exponential(multiplier=1, min=16, max=64), stop=tenacity.stop_after_attempt(5), )( lambda: subprocess.run( ["git", "clone", "--quiet", repo_url, repo_dir], check=True ) )() logger.info(f"{repo_dir} cloned") logger.info(f"Fetching {repo_dir}") tenacity.retry( wait=tenacity.wait_exponential(multiplier=1, min=16, max=64), stop=tenacity.stop_after_attempt(5), )( lambda: subprocess.run( ["git", "fetch", "--quiet"], cwd=repo_dir, capture_output=True, check=True, ) )() logger.info(f"{repo_dir} fetched")
def test_retry_type_annotations(self): """The decorator should maintain types of decorated functions.""" # Just in case this is run with unit-test, return early for py2 if sys.version_info < (3, 0): return # Function-level import because we can't install this for python 2. from typeguard import check_type def num_to_str(number): # type: (int) -> str return str(number) # equivalent to a raw @retry decoration with_raw = retry(num_to_str) with_raw_result = with_raw(1) # equivalent to a @retry(...) decoration with_constructor = retry()(num_to_str) with_constructor_result = with_raw(1) # These raise TypeError exceptions if they fail check_type("with_raw", with_raw, typing.Callable[[int], str]) check_type("with_raw_result", with_raw_result, str) check_type("with_constructor", with_constructor, typing.Callable[[int], str]) check_type("with_constructor_result", with_constructor_result, str)
def clone_git_repo(self, repo_url, repo_dir, rev="origin/branches/default/tip"): logger.info(f"Cloning {repo_url}...") if not os.path.exists(repo_dir): tenacity.retry( wait=tenacity.wait_exponential(multiplier=1, min=16, max=64), stop=tenacity.stop_after_attempt(5), )(lambda: subprocess.run( ["git", "clone", "--quiet", repo_url, repo_dir], check=True))() tenacity.retry( wait=tenacity.wait_exponential(multiplier=1, min=16, max=64), stop=tenacity.stop_after_attempt(5), )(lambda: subprocess.run( ["git", "fetch"], cwd=repo_dir, capture_output=True, check=True, ))() subprocess.run(["git", "checkout", rev], cwd=repo_dir, capture_output=True, check=True)
def wait(self): """Blocks until mountebank is accessible (or waiting times out). """ def wait(): requests.get(self.mountebank_url).raise_for_status() retry(wait=wait_fixed(1), stop=stop_after_attempt(10))(wait)()
def clone_git_repo(self, repo_url, repo_dir): if not os.path.exists(repo_dir): tenacity.retry( lambda: subprocess.run( ["git", "clone", "--quiet", repo_url, repo_dir], check=True ), wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(5), )() logger.info(f"{repo_dir} cloned") logger.info(f"Pulling and updating {repo_dir}") tenacity.retry( lambda: subprocess.run( ["git", "pull", "--quiet", repo_url, "master"], cwd=repo_dir, capture_output=True, check=True, ), wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(5), )() logger.info(f"{repo_dir} pulled and updated")
async def multi_tasks(cls, tasks: Union[Iterable, Iterator], to_do_func: Optional[Callable] = None, concur_req: int = 4, rate: float = 1.5, logger: Optional[logging.Logger] = None): ''' Template for multiTasking TODO 1. asyncio.Semaphore 2. unit func ''' cls.init_logger('UnsyncFetch', logger) cls.retry_kwargs['after'] = after_log(cls.logger, logging.WARNING) cls.http_download = retry(cls.http_download, **cls.retry_kwargs) cls.ftp_download = retry(cls.ftp_download, **cls.retry_kwargs) semaphore = asyncio.Semaphore(concur_req) if to_do_func is None: tasks = [ cls.fetch_file(semaphore, method, info, path, rate) for method, info, path in tasks ] else: tasks = [ cls.fetch_file(semaphore, method, info, path, rate).then(to_do_func) for method, info, path in tasks ] # return await asyncio.gather(*tasks) return [ await fob for fob in tqdm(asyncio.as_completed(tasks), total=len(tasks)) ]
def generate(self): db_path = os.path.join("data", self.git_repo_path) db.register( db_path, "https://s3-us-west-2.amazonaws.com/communitytc-bugbug/data/", VERSION, ) is_old_version = db.is_old_schema(db_path) with ThreadPoolExecutorResult(max_workers=2) as executor: cloner = executor.submit(repository.clone, self.repo_dir) cloner.add_done_callback( lambda future: logger.info("mozilla-central cloned") ) git_user = get_secret("GIT_USER") git_password = get_secret("GIT_PASSWORD") repo_push_url = self.repo_url.replace( "https://", f"https://{git_user}:{git_password}@" ) if not is_old_version: executor.submit(self.clone_git_repo) else: executor.submit(self.init_git_repo) tenacity.retry( lambda: subprocess.run( ["git", "config", "--global", "http.postBuffer", "12M"], check=True ), wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(5), )() push_args = ["git", "push", repo_push_url, "master"] if is_old_version: push_args.append("--force") done = False while not done: done = generator.generate( self.repo_dir, self.git_repo_path, limit=COMMITS_STEP, tokenize=self.tokenize, remove_comments=self.remove_comments, ) tenacity.retry( lambda: subprocess.run(push_args, cwd=self.git_repo_path, check=True), wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(5), )() # We are not using db.upload as we don't need to upload the git repo. upload_s3([f"{db_path}.version"])
def generate(self): db_path = os.path.join("data", self.git_repo_path) db.register( db_path, f"https://community-tc.services.mozilla.com/api/index/v1/task/project.relman.bugbug.microannotate_{self.git_repo_path}.latest/artifacts/public/", VERSION, ) # TODO: Check the version again once we can run tasks for longer (https://bugzilla.mozilla.org/show_bug.cgi?id=1604175). is_old_version = False # db.is_old_schema(db_path) with ThreadPoolExecutorResult(max_workers=2) as executor: cloner = executor.submit(repository.clone, self.repo_dir) cloner.add_done_callback( lambda future: logger.info("mozilla-central cloned")) git_user = get_secret("GIT_USER") git_password = get_secret("GIT_PASSWORD") repo_push_url = self.repo_url.replace( "https://", f"https://{git_user}:{git_password}@") if not is_old_version: executor.submit(self.clone_git_repo) else: executor.submit(self.init_git_repo) tenacity.retry( lambda: subprocess.run( ["git", "config", "--global", "http.postBuffer", "12M"], check=True), wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(5), )() push_args = ["git", "push", repo_push_url, "master"] if is_old_version: push_args.append("--force") done = False while not done: done = generator.generate( self.repo_dir, self.git_repo_path, limit=COMMITS_STEP, tokenize=self.tokenize, remove_comments=self.remove_comments, ) tenacity.retry( lambda: subprocess.run( push_args, cwd=self.git_repo_path, check=True), wait=tenacity.wait_fixed(30), stop=tenacity.stop_after_attempt(5), )()
def clone_git_repo(self, repo_url, repo_dir): if not os.path.exists(repo_dir): retry(lambda: subprocess.run( ["git", "clone", "--quiet", repo_url, repo_dir], check=True)) retry(lambda: subprocess.run( ["git", "pull", "--quiet", repo_url, "master"], cwd=repo_dir, capture_output=True, check=True, ))
def __init__( self, base_url: str, default_params: Optional[dict] = None, default_headers: Optional[dict] = None, timeout: Optional[float] = None, ttl_dns_cache: Optional[float] = None, max_retries: Optional[int] = 2, retry_delay: Optional[float] = 0.5, proxy_url: Optional[str] = None, ): super().__init__() if base_url is None: raise RuntimeError( f'`base_url` must be passed for {self.__class__.__name__} constructor' ) self.base_url = base_url.rstrip('/') self.ttl_dns_cache = ttl_dns_cache self.proxy_url = proxy_url self.session = None self.default_params = CIMultiDict(filter_none(default_params or {})) self.default_headers = default_headers or {} self.timeout = timeout self.max_retries = max_retries self.retry_delay = retry_delay self.request = retry( retry=retry_if_exception_type(self.temporary_errors), stop=stop_after_attempt(max_retries) if max_retries is not None else None, wait=wait_fixed(retry_delay), before_sleep=before_sleep_log(logging.getLogger('aiobaseclient'), logging.WARNING), reraise=True, )(self.request)
def retry_upon_none_result(max_attempts, delay=0.5, max_delay=2): return tenacity.retry(reraise=True, retry=tenacity.retry_if_result(lambda x: x is None), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry)
def retry_upon_exception(exc, delay, max_delay, max_attempts): return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(exc), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry)
def decorated_function(*args, **kwargs) -> _RT: return tenacity.retry( retry=tenacity.retry_if_exception(is_retriable), stop=tenacity.stop.stop_after_attempt(_RETRY_UTILS_MAX_RETRIES), wait=tenacity.wait.wait_exponential(max=_RETRY_UTILS_MAX_RETRIES), after=tenacity.after.after_log(_LOGGER, logging.DEBUG), reraise=True)(function)(*args, **kwargs)
def __init__(self, queue, destination, namegen, add_metadata=False, dump_json=False, dump_only=False, pbar=None, session=None): super(InstaDownloader, self).__init__() self.queue = queue self.destination = destination self.namegen = namegen self.session = session or requests.Session() self.pbar = pbar self.dump_only = dump_only self.dump_json = dump_json or dump_only self.add_metadata = add_metadata self._killed = False self._downloading = None retry = tenacity.retry(**self._tenacity_options) self._DOWNLOAD_METHODS = { "GraphImage": retry(self._download_image), "GraphVideo": retry(self._download_video), "GraphSidecar": self._download_sidecar, }
def retry_on_conflict(func): wrapper = tenacity.retry(stop=tenacity.stop_after_attempt(11), wait=tenacity.wait_random(max=0.002), retry=tenacity.retry_if_exception_type( exception.ConcurrentTransaction), reraise=True) return wrapper(func)
def _retry(func, retry_if_result=is_false, **kwargs): kwargs.setdefault('retry', tenacity.retry_if_result(retry_if_result)) kwargs.setdefault('wait', tenacity.wait_fixed(1)) kwargs.setdefault('stop', tenacity.stop_after_delay(10)) decorator = tenacity.retry(**kwargs) decorated_func = decorator(func) return decorated_func()
def decorator_f(self, *args, **kwargs): retry_args = getattr(self, 'retry_args', None) if retry_args is None: return fun(self, *args, **kwargs) multiplier = retry_args.get('multiplier', 1) min_limit = retry_args.get('min', 1) max_limit = retry_args.get('max', 1) stop_after_delay = retry_args.get('stop_after_delay', 10) tenacity_logger = tenacity.before_log( self.log, logging.DEBUG) if self.log else None default_kwargs = { 'wait': tenacity.wait_exponential(multiplier=multiplier, max=max_limit, min=min_limit), 'retry': tenacity.retry_if_exception(should_retry), 'stop': tenacity.stop_after_delay(stop_after_delay), 'before': tenacity_logger, 'after': tenacity_logger, } return tenacity.retry(**default_kwargs)(fun)(self, *args, **kwargs)
def retry_on_conflict(func): wrapper = tenacity.retry(stop=tenacity.stop_after_attempt(11), wait=tenacity.wait_random_exponential( multiplier=0.5, max=60), retry=tenacity.retry_if_exception_type( exception.ConcurrentTransaction), reraise=True) return wrapper(func)
def channel(self): if not self._connection or not self._channel.is_open: stop = stop_after_delay(self.config.market_expiration_time) wait = wait_exponential(max=5) retry = tenacity.retry(stop=stop, wait=wait) retry(self.ensure_connected)() return self._channel
def test_retry(stop_max_delay=None, **kwargs): k = {"wait": _default_wait, "retry": lambda x: False} if kwargs: for key in kwargs: k[key] = kwargs[key] if stop_max_delay not in (True, False, None): k['stop'] = stop.stop_after_delay(stop_max_delay) return tenacity.retry(**k)
def svn_retry(): return retry( retry=retry_if_exception(is_retryable_svn_exception), wait=wait_exponential(exp_base=SVN_RETRY_WAIT_EXP_BASE), stop=stop_after_attempt(max_attempt_number=SVN_RETRY_MAX_ATTEMPTS), before_sleep=before_sleep_log(logger, logging.DEBUG), reraise=True, )
def make_retry_decorator( retries: int, delay: float ) -> typing.Callable[[typing.Callable], typing.Callable]: return retry(wait=wait_fixed(delay), retry=(retry_if_result(lambda res: res.status >= 500) | retry_if_exception_type( exception_types=aiohttp.ClientError)), stop=stop_after_attempt(retries + 1))
def wrapper(*args, **kwargs): retried = retry(*retry_args, **retry_kwargs)(f) # invoke the function using retries result = retried(*args, **kwargs) # if the result is a Deferred, wrap the inner # callback(s) in retries if isinstance(result, Deferred): result.callbacks[:] = [( ( retry(*retry_args, **retry_kwargs)(callback), callbackArgs, callbackKeywords, ), errback_spec, ) for (callback, callbackArgs, callbackKeywords), errback_spec in result.callbacks] return result
def retry_random_upon_exception(exc, delay=0.5, max_delay=5, max_attempts=DEFAULT_MAX_ATTEMPTS): return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(exc), wait=tenacity.wait_random_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry)
async def fetch(self): """ Fetch and return data. Calls self._fetch_ with a retry mechanism """ attempter = retry(**self._retry_config)(self._fetch_) res = await attempter() return res
def retry_on_conflict(func): wrapper = tenacity.retry( stop=tenacity.stop_after_attempt(11), wait=tenacity.wait_random(max=0.002), retry=tenacity.retry_if_exception_type(exception.ConcurrentTransaction), reraise=True, ) return wrapper(func)
def _safe_mongo_call(max_retries, retry_interval): return tenacity.retry( retry=tenacity.retry_if_exception_type( pymongo.errors.AutoReconnect), wait=tenacity.wait_fixed(retry_interval), stop=(tenacity.stop_after_attempt(max_retries) if max_retries >= 0 else tenacity.stop_never) )
def wrapped(*args, **kwargs): self = args[0] new_fn = tenacity.retry( reraise=True, retry=tenacity.retry_if_result(_ovsdb_result_pending), wait=tenacity.wait_exponential(multiplier=0.01, max=1), stop=tenacity.stop_after_delay(self.vsctl_timeout))(fn) return new_fn(*args, **kwargs)
def decorator(fun: T): default_kwargs = { 'wait': tenacity.wait_exponential(multiplier=1, max=300), 'retry': retry_if_operation_in_progress(), 'before': tenacity.before_log(log, logging.DEBUG), 'after': tenacity.after_log(log, logging.DEBUG), } default_kwargs.update(**kwargs) return cast(T, tenacity.retry(*args, **default_kwargs)(fun))
def decorator(fun: Callable): default_kwargs = { 'wait': tenacity.wait_exponential(multiplier=1, max=100), 'retry': retry_if_temporary_quota(), 'before': tenacity.before_log(log, logging.DEBUG), 'after': tenacity.after_log(log, logging.DEBUG), } default_kwargs.update(**kwargs) return tenacity.retry(*args, **default_kwargs)(fun)
def wrapped(*args, **kwargs): self = args[0] new_fn = tenacity.retry( reraise=True, retry=tenacity.retry_if_result(_ofport_result_pending), wait=tenacity.wait_exponential(multiplier=0.01, max=1), stop=tenacity.stop_after_delay( self.vsctl_timeout))(fn) return new_fn(*args, **kwargs)
def retry_upon_none_result(max_attempts, delay=0.5, max_delay=2, random=False): if random: wait_func = tenacity.wait_exponential( multiplier=delay, max=max_delay) else: wait_func = tenacity.wait_random_exponential( multiplier=delay, max=max_delay) return tenacity.retry(reraise=True, retry=tenacity.retry_if_result(lambda x: x is None), wait=wait_func, stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry)
def send_notification(self, target, ctxt, message, version, retry=None): if retry is None: retry = self._pika_engine.default_notification_retry_attempts def on_exception(ex): if isinstance(ex, (pika_drv_exc.ExchangeNotFoundException, pika_drv_exc.RoutingException)): LOG.warning("Problem during sending notification. %s", ex) try: self._declare_notification_queue_binding(target) except pika_drv_exc.ConnectionException as e: LOG.warning("Problem during declaring notification queue " "binding. %s", e) return True elif isinstance(ex, (pika_drv_exc.ConnectionException, pika_drv_exc.MessageRejectedException)): LOG.warning("Problem during sending notification. %s", ex) return True else: return False if retry: retrier = tenacity.retry( stop=(tenacity.stop_never if retry == -1 else tenacity.stop_after_attempt(retry)), retry=tenacity.retry_if_exception(on_exception), wait=tenacity.wait_fixed( self._pika_engine.notification_retry_delay ) ) else: retrier = None msg = pika_drv_msg.PikaOutgoingMessage(self._pika_engine, message, ctxt) return msg.send( exchange=( target.exchange or self._pika_engine.default_notification_exchange ), routing_key=target.topic, confirm=True, mandatory=True, persistent=self._pika_engine.notification_persistence, retrier=retrier )
def retry_upon_exception_exclude_error_codes( exc, excluded_errors, delay, max_delay, max_attempts): """Retry with the configured exponential delay, unless the exception error code is in the given list """ def retry_if_not_error_codes(e): # return True only for BadRequests without error codes or with error # codes not in the exclude list if isinstance(e, exc): error_code = _get_bad_request_error_code(e) if error_code and error_code not in excluded_errors: return True return False return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception( retry_if_not_error_codes), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry)
def retry_on_stale_data_error(func): wrapper = tenacity.retry( stop=tenacity.stop_after_attempt(3), retry=tenacity.retry_if_exception_type(exc.StaleDataError), reraise=True) return wrapper(func)
def retry(stop_max_delay=None, **kwargs): k = {"wait": _default_wait, "retry": lambda x: False} if stop_max_delay not in (True, False, None): k['stop'] = stop.stop_after_delay(stop_max_delay) return tenacity.retry(**k)
def request(self, method, additional_headers=None, retry=True, timeout=None, auth=None, use_gzip_encoding=None, params=None, max_attempts=None, **kwargs): """ Make an HTTP request by calling self._request with backoff retry. :param method: request method :type method: str :param additional_headers: additional headers to include in the request :type additional_headers: dict[str, str] :param retry: boolean indicating whether to retry if the request fails :type retry: boolean :param timeout: timeout in seconds, overrides default_timeout_secs :type timeout: float :param timeout: timeout in seconds :type timeout: float :param auth: auth scheme for the request :type auth: requests.auth.AuthBase :param use_gzip_encoding: boolean indicating whether to pass gzip encoding in the request headers or not :type use_gzip_encoding: boolean | None :param params: additional params to include in the request :type params: str | dict[str, T] | None :param max_attempts: maximum number of attempts to try for any request :type max_attempts: int :param kwargs: additional arguments to pass to requests.request :type kwargs: dict[str, T] :return: HTTP response :rtype: requests.Response """ request = self._request if retry: if max_attempts is None: max_attempts = self.default_max_attempts # We retry only when it makes sense: either due to a network # partition (e.g. connection errors) or if the request failed # due to a server error such as 500s, timeouts, and so on. request = tenacity.retry( stop=tenacity.stop_after_attempt(max_attempts), wait=tenacity.wait_exponential(), retry=tenacity.retry_if_exception_type(( requests.exceptions.Timeout, requests.exceptions.ConnectionError, MesosServiceUnavailableException, MesosInternalServerErrorException, )), reraise=True, )(request) try: return request( method=method, additional_headers=additional_headers, timeout=timeout, auth=auth, use_gzip_encoding=use_gzip_encoding, params=params, **kwargs ) # If the request itself failed, an exception subclassed from # RequestException will be raised. Catch this and reraise as # MesosException since we want the caller to be able to catch # and handle this. except requests.exceptions.RequestException as err: raise MesosException('Request failed', err)
def reply(self, reply=None, failure=None): """Send back reply to the RPC client :param reply: Dictionary, reply. In case of exception should be None :param failure: Tuple, should be a sys.exc_info() tuple. Should be None if RPC request was successfully processed. :return RpcReplyPikaIncomingMessage, message with reply """ if self.reply_q is None: return reply_outgoing_message = RpcReplyPikaOutgoingMessage( self._pika_engine, self.msg_id, reply=reply, failure_info=failure, content_type=self._content_type, ) def on_exception(ex): if isinstance(ex, pika_drv_exc.ConnectionException): LOG.warning( "Connectivity related problem during reply sending. %s", ex ) return True else: return False if self._pika_engine.rpc_reply_retry_attempts: retrier = tenacity.retry( stop=( tenacity.stop_never if self._pika_engine.rpc_reply_retry_attempts == -1 else tenacity.stop_after_attempt( self._pika_engine.rpc_reply_retry_attempts ) ), retry=tenacity.retry_if_exception(on_exception), wait=tenacity.wait_fixed( self._pika_engine.rpc_reply_retry_delay ) ) else: retrier = None try: timeout = (None if self.expiration_time is None else max(self.expiration_time - time.time(), 0)) with timeutils.StopWatch(duration=timeout) as stopwatch: reply_outgoing_message.send( reply_q=self.reply_q, stopwatch=stopwatch, retrier=retrier ) LOG.debug( "Message [id:'%s'] replied to '%s'.", self.msg_id, self.reply_q ) except Exception: LOG.exception( "Message [id:'%s'] wasn't replied to : %s", self.msg_id, self.reply_q )
def send(self, target, ctxt, message, wait_for_reply=None, timeout=None, retry=None): with timeutils.StopWatch(duration=timeout) as stopwatch: if retry is None: retry = self._pika_engine.default_rpc_retry_attempts exchange = self._pika_engine.get_rpc_exchange_name( target.exchange ) def on_exception(ex): if isinstance(ex, pika_drv_exc.ExchangeNotFoundException): # it is desired to create exchange because if we sent to # exchange which is not exists, we get ChannelClosed # exception and need to reconnect try: self._declare_rpc_exchange(exchange, stopwatch) except pika_drv_exc.ConnectionException as e: LOG.warning("Problem during declaring exchange. %s", e) return True elif isinstance(ex, (pika_drv_exc.ConnectionException, exceptions.MessageDeliveryFailure)): LOG.warning("Problem during message sending. %s", ex) return True else: return False if retry: retrier = tenacity.retry( stop=(tenacity.stop_never if retry == -1 else tenacity.stop_after_attempt(retry)), retry=tenacity.retry_if_exception(on_exception), wait=tenacity.wait_fixed(self._pika_engine.rpc_retry_delay) ) else: retrier = None if target.fanout: return self.cast_all_workers( exchange, target.topic, ctxt, message, stopwatch, retrier ) routing_key = self._pika_engine.get_rpc_queue_name( target.topic, target.server, retrier is None ) msg = pika_drv_msg.RpcPikaOutgoingMessage(self._pika_engine, message, ctxt) try: reply = msg.send( exchange=exchange, routing_key=routing_key, reply_listener=( self._reply_listener if wait_for_reply else None ), stopwatch=stopwatch, retrier=retrier ) except pika_drv_exc.ExchangeNotFoundException as ex: try: self._declare_rpc_exchange(exchange, stopwatch) except pika_drv_exc.ConnectionException as e: LOG.warning("Problem during declaring exchange. %s", e) raise ex if reply is not None: if reply.failure is not None: raise reply.failure return reply.result
setattr(klass, '_libvirt_connection', connection) return connection def is_disconnection_exception(e): if not libvirt: return False return (isinstance(e, libvirt.libvirtError) and e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)) retry_on_disconnect = tenacity.retry( retry=tenacity.retry_if_exception(is_disconnection_exception), stop=tenacity.stop_after_attempt(2)) def raise_nodata_if_unsupported(method): def inner(in_self, instance, *args, **kwargs): try: return method(in_self, instance, *args, **kwargs) except libvirt.libvirtError as e: # NOTE(sileht): At this point libvirt connection error # have been reraise as tenacity.RetryError() msg = _('Failed to inspect instance %(instance_uuid)s stats, ' 'can not get info from libvirt: %(error)s') % { "instance_uuid": instance.id, "error": e} raise virt_inspector.NoDataException(msg)