def svn_retry(): return retry( retry=retry_if_exception(is_retryable_svn_exception), wait=wait_exponential(exp_base=SVN_RETRY_WAIT_EXP_BASE), stop=stop_after_attempt(max_attempt_number=SVN_RETRY_MAX_ATTEMPTS), before_sleep=before_sleep_log(logger, logging.DEBUG), reraise=True, )
def auto_writewrite_retry(f: Callable) -> Callable: """Decorator to apply Arango Write-Write conflict retry preset""" return tenacity.retry( stop=tenacity.stop.stop_after_attempt(20), wait=tenacity.wait.wait_random_exponential( multiplier=0.1, max=6, ), retry=retry_if_exception(isArangoWriteWriteConflict), reraise=True, )(f)
def auto_batchjobdone_retry(f: Callable) -> Callable: return tenacity.retry( # a 30-minute timeout should be plenty in case the database # decides to do a checkpoint in the middle of a batch run stop=tenacity.stop.stop_after_delay(1800), wait=tenacity.wait.wait_random_exponential( multiplier=0.01, max=5, # check at least every N seconds ), retry=retry_if_exception(isArangoBatchJobNotDone), reraise=True, )(f)
async def GetGoogleCookie(self): """ Gets google cookie (used for each and every proxy) Blacklist proxies on error. """ def retry_if_proxies_remaining(ex): should_retry = True if isinstance(ex, ProxyError) and ex.response.status_code == 429: logging.info(( f"Proxy {self.proxies[self.proxy_index]} responded with 429." " Will retry request with another proxy.")) self._rate_limited_proxies.append( self.proxies[self.proxy_index]) del self.proxies[self.proxy_index] elif len(self.proxies) > 0: logging.error(( f"Proxy {self.proxies[self.proxy_index]} caused {str(ex)}." " Blacklisting proxy and will retry request with another." )) self.blacklisted_proxies.append(self.proxies[self.proxy_index]) del self.proxies[self.proxy_index] else: should_retry = False self._iterate_proxy() return should_retry cfg = self._retry_config if len(self.proxies) > 0: cfg = dict(retry=retry_if_exception(retry_if_proxies_remaining), reraise=cfg.get('reraise', True)) try: retryer = AsyncRetrying(**cfg) resp = await retryer.call( self._send_req, 'https://trends.google.com/?geo={geo}'.format( geo=self.hl[-2:]), timeout=self.timeout) finally: self.proxies.extend(self._rate_limited_proxies) self._rate_limited_proxies.clear() cookies = resp.cookies.items() return dict(filter(lambda i: i[0] == 'NID', cookies))
def retry_api_call(func, exceptions=('ThrottlingException', 'TooManyRequestsException'), attempt=5, multiplier=1, max_delay=1800, exp_base=2, logger=None, *args, **kwargs): retry = tenacity.Retrying( retry=retry_if_exception(lambda e: getattr(e, 'response', {}).get( 'Error', {}).get('Code', None) in exceptions if e else False), stop=stop_after_attempt(attempt), wait=wait_exponential(multiplier=multiplier, max=max_delay, exp_base=exp_base), after=after_log(logger, logger.level) if logger else None, reraise=True) return retry(func, *args, **kwargs)
class StateInAzure: def __init__(self, blob_client: BlobClient): self._blob_client = blob_client @tenacity.retry( retry=retry_if_exception(is_lease_exception), stop=tenacity.stop.stop_after_delay(20), wait=tenacity.wait.wait_fixed(1), ) def _with_blob(self, op: typing.Callable[[State], T]) -> T: lease: BlobLeaseClient with blob.lease(self._blob_client, lease_duration=STATE_LEASE_DURATION) as lease: dlc = self._blob_client.download_blob(lease=lease) bytes = dlc.readall() state = State.parse_raw(bytes.decode("utf-8")) output = op(state) self._blob_client.upload_blob(data=state.json(), lease=lease, overwrite=True) return output def upsert(self, id: config.Id, ingress_update: config.IngressUpdate, timestamp: datetime) -> UpdateEvent: return self._with_blob( lambda state: state.upsert(id, ingress_update, timestamp)) def get_by_id(self, id: config.Id) -> config.Ingress: with blob.lease(self._blob_client, lease_duration=STATE_LEASE_DURATION) as lease: dlc = self._blob_client.download_blob(lease=lease) bytes = dlc.readall() state = State.parse_raw(bytes.decode("utf-8")) return state.configs[id] def delete(self, id: config.Id, timestamp: datetime) -> typing.Optional[UpdateEvent]: return self._with_blob(lambda state: state.delete(id, timestamp))
class WebSocketRpcClient: """ RPC-client to connect to an WebsocketRPCEndpoint Can call methodes exposed by server Exposes methods that the server can call """ def logerror(retry_state: tenacity.RetryCallState): logger.exception(retry_state.outcome.exception()) DEFAULT_RETRY_CONFIG = { 'wait': wait.wait_random_exponential(min=0.1, max=120), 'retry': retry_if_exception(isNotForbbiden), 'reraise': True, "retry_error_callback": logerror } # RPC ping check on successful Websocket connection # @see wait_on_rpc_ready # interval to try RPC-pinging the sever (Seconds) WAIT_FOR_INITIAL_CONNECTION = 1 # How many times to try repinging before rejecting the entire connection MAX_CONNECTION_ATTEMPTS = 5 def __init__(self, uri: str, methods: RpcMethodsBase = None, retry_config=None, default_response_timeout: float = None, on_connect: List[OnConnectCallback] = None, on_disconnect: List[OnDisconnectCallback] = None, keep_alive: float = 0, **kwargs): """ Args: uri (str): server uri to connect to (e.g. 'http://localhost/ws/client1') methods (RpcMethodsBase): RPC methods to expose to the server retry_config (dict): Tenacity.retry config (@see https://tenacity.readthedocs.io/en/latest/api.html#retry-main-api) default_response_timeout (float): default time in seconds on_connect (List[Coroutine]): callbacks on connection being established (each callback is called with the channel) @note exceptions thrown in on_connect callbacks propagate to the client and will cause connection restart! on_disconnect (List[Coroutine]): callbacks on connection termination (each callback is called with the channel) keep_alive(float): interval in seconds to send a keep-alive ping, Defaults to 0, which means keep alive is disabled. **kwargs: Additional args passed to connect (@see class Connect at websockets/client.py) https://websockets.readthedocs.io/en/stable/api.html#websockets.client.connect usage: async with WebSocketRpcClient(uri, RpcUtilityMethods()) as client: response = await client.call("echo", {'text': "Hello World!"}) print (response) """ self.methods = methods or RpcMethodsBase() self.connect_kwargs = kwargs # Websocket connection self.conn = None # Websocket object self.ws = None # URI to connect on self.uri = uri # Pending requests - id mapped to async-event self.requests: Dict[str, asyncio.Event] = {} # Received responses self.responses = {} # Read worker self._read_task = None # Keep alive (Ping/Pong) task self._keep_alive_task = None self._keep_alive_interval = keep_alive # defaults self.default_response_timeout = default_response_timeout # RPC channel self.channel = None self.retry_config = retry_config if retry_config is not None else self.DEFAULT_RETRY_CONFIG # Event handlers self._on_disconnect = on_disconnect self._on_connect = on_connect async def __connect__(self): try: # Make sure we don't have any hanging tasks (from previous retry runs) self.cancel_tasks() logger.info(f"Trying server - {self.uri}") # Start connection self.conn = websockets.connect(self.uri, **self.connect_kwargs) # Get socket self.ws = await self.conn.__aenter__() # Init an RPC channel to work on-top of the connection self.channel = RpcChannel( self.methods, self.ws, default_response_timeout=self.default_response_timeout) # register handlers self.channel.register_connect_handler(self._on_connect) self.channel.register_disconnect_handler(self._on_disconnect) # Start reading incoming RPC calls self._read_task = asyncio.create_task(self.reader()) # start keep alive (if enabled i.e. value isn't 0) self._start_keep_alive_task() # Wait for RPC channel on the server to be ready (ping check) await self.wait_on_rpc_ready() # trigger connect handlers await self.channel.on_connect() return self except ConnectionRefusedError: logger.info("RPC connection was refused by server") raise except ConnectionClosedError: logger.info("RPC connection lost") raise except ConnectionClosedOK: logger.info("RPC connection closed") raise except InvalidStatusCode as err: logger.info( f"RPC Websocket failed - with invalid status code {err.status_code}" ) raise except WebSocketException as err: logger.info(f"RPC Websocket failed - with {err}") raise except OSError as err: logger.info("RPC Connection failed - %s", err) raise except Exception as err: logger.exception("RPC Error") raise async def __aenter__(self): if self.retry_config is False: return await self.__connect__() else: return await retry(**self.retry_config)(self.__connect__)() async def __aexit__(self, *args, **kwargs): await self.close() # close context of underlying socket if (hasattr(self.conn, "ws_client")): await self.conn.__aexit__(*args, **kwargs) async def close(self): logger.info("Closing RPC client") # Close underlying connection if self.ws is not None: await self.ws.close() # Notify callbacks (but just once) if not self.channel.isClosed(): # notify handlers (if any) await self.channel.on_disconnect() # Clear tasks self.cancel_tasks() def cancel_tasks(self): # Stop keep alive if enabled self._cancel_keep_alive_task() # Stop reader - if created self.cancel_reader_task() def cancel_reader_task(self): if self._read_task is not None: self._read_task.cancel() self._read_task = None async def reader(self): """ Read responses from socket worker """ try: while True: raw_message = await self.ws.recv() await self.channel.on_message(raw_message) # Graceful external termination options # task was canceled except asyncio.CancelledError: pass except websockets.exceptions.ConnectionClosed: logger.info("Connection was terminated.") await self.close() except: logger.exception("RPC Reader task failed") raise async def _keep_alive(self): try: while True: await asyncio.sleep(self._keep_alive_interval) answer = await self.ping() assert answer.result == PING_RESPONSE # Graceful external termination options # task was canceled except asyncio.CancelledError: pass async def wait_on_rpc_ready(self): received_response = None attempt_count = 0 while received_response is None and attempt_count < self.MAX_CONNECTION_ATTEMPTS: try: received_response = await asyncio.wait_for( self.ping(), self.WAIT_FOR_INITIAL_CONNECTION) except asyncio.exceptions.TimeoutError: attempt_count += 1 async def ping(self): logger.debug("Pinging server") answer = await self.channel.other._ping_() return answer def _cancel_keep_alive_task(self): if self._keep_alive_task is not None: logger.debug("Cancelling keep alive task") self._keep_alive_task.cancel() self._keep_alive_task = None def _start_keep_alive_task(self): if self._keep_alive_interval > 0: logger.debug( f"Starting keep alive task interval='{self._keep_alive_interval}' seconds" ) self._keep_alive_task = asyncio.create_task(self._keep_alive()) async def wait_on_reader(self): """ Join on the internal reader task """ try: await self._read_task except asyncio.CancelledError: logger.info(f"RPC Reader task was cancelled.") async def call(self, name, args={}, timeout=None): """ Call a method and wait for a response to be received Args: name (str): name of the method to call on the other side (As defined on the otherside's RpcMethods object) args (dict): keyword arguments to be passeds to otherside method """ return await self.channel.call(name, args, timeout=timeout) @property def other(self): """ Proxy object to call methods on the other side """ return self.channel.other
def retry_if_throttling_error() -> retry_if_exception: return retry_if_exception(predicate=is_throttling_error)