예제 #1
0
def wait_for(func, *args, **kwargs):
    max_time = kwargs.pop('max_time', 200)
    interval = kwargs.pop('interval', 2)

    decorator = backoff.on_exception(
        backoff.constant,
        AssertionError,
        interval=interval,
        max_time=max_time,
    )
    decorated = decorator(func)
    return decorated(*args, **kwargs)
예제 #2
0
    def __init__(self,
                 search_string='',
                 output_format='list',
                 max_number_of_requests=30,
                 rate_limit_timeout_period=60,
                 user_agent=None,
                 proxies=None):
        """
        This Python class is used to query multiple online repositories for the synonyms
        associated with a specific word.

        Usage Examples
        ----------

        >>> synonym = Synonyms('mother')
        >>> results = synonym.find_synonyms()

        >>> synonym = Synonyms(search_string='mother')
        >>> results = synonym.find_synonyms()

        Parameters
        ----------
        :param search_string: string containing the variable to obtain synonyms for

        :param output_format: Format to use for returned results.
               Default value: list; Acceptable values: dictionary or list

        :param max_number_of_requests: maximum number of requests for a specific timeout_period

        :param rate_limit_timeout_period: the time period before a session is placed in a temporary hibernation mode

        :param user_agent: string containing either a global user agent type or a specific user agent

        :param proxies: dictionary of proxies to use with Python Requests
        """
        self._proxies = proxies
        self._word = search_string
        self._user_agent = user_agent
        self._output_format = output_format

        rate_limit_status = False
        self._rate_limit_status = rate_limit_status

        # Retries the requests after a certain time period has elapsed
        handler = on_exception(expo,
                               RateLimitException,
                               max_time=60,
                               on_backoff=self._backoff_handler)
        # Establishes a rate limit for making requests to the synonyms repositories
        limiter = limits(calls=max_number_of_requests,
                         period=rate_limit_timeout_period)
        self.find_synonyms = handler(limiter(self.find_synonyms))
예제 #3
0
파일: db.py 프로젝트: jwhitlock/ichnaea
def retry_on_mysql_lock_fail(metric=None, metric_tags=None):
    """Function decorator to backoff and retry on MySQL lock failures.

    This handles these MySQL errors:
    * (1205) Lock wait timeout exceeded
    * (1213) Deadlock when trying to get lock

    In both cases, restarting the transaction may work.

    It expects the errors to be wrapped in a SQLAlchemy StatementError, and
    that SQLAlchemy issued a transaction rollback, so it is safe to retry after
    a short sleep. It uses backoff.on_exception to implement the exponential
    backoff.

    Other exceptions are raised, and if limits are met, then the final
    exception is raised as well.

    :arg str metric: An optional counter metric to track handled errors
    :arg list metric_tags: Additional tags to send with the metric
    :return: A function decorator implementing the retry logic
    """
    def is_mysql_lock_error(exception):
        """Is the exception a retryable MySQL lock error?"""
        return (isinstance(exception, StatementError)
                and isinstance(exception.orig, MySQLError) and
                exception.orig.args[0] in (LOCK_DEADLOCK, LOCK_WAIT_TIMEOUT))

    def count_exception(exception):
        """Increment the tracking metric for lock errors."""
        tags = ["errno:%s" % exception.orig.args[0]]
        if metric_tags:
            tags.extend(metric_tags)
        METRICS.incr(metric, 1, tags=tags)

    def giveup_handler(exception):
        """Based on this raised exception, should we give up or retry?

        If it is a SQLAlchemy wrapper for a retryable MySQL exception,
        then we should increment a metric and retry.

        If it isn't one of the special exceptions, then give up.
        """
        if is_mysql_lock_error(exception):
            if metric:
                count_exception(exception)
            return False  # Retry if possible
        return True  # Give up on other unknown errors.

    return backoff.on_exception(backoff.expo,
                                StatementError,
                                max_tries=3,
                                giveup=giveup_handler)
예제 #4
0
 def add_rate_limiting(self, f: Callable):
     if self.ratelimit_params:
         g = limits(**self.ratelimit_params)(f)
     else:
         g = limits(calls=self.ratelimit_calls_per_min, period=60)(f)
     g = sleep_and_retry(g)
     g = on_exception(
         expo,
         (RateLimitException, HTTPError),
         max_time=self.backoff_timeout_seconds,
         factor=4,
     )(g)
     return g
예제 #5
0
def backoff(exceptions, giveup):
    """Decorates a function to retry up to 5 times using an exponential backoff
    function.

    exceptions is a tuple of exception classes that are retried
    giveup is a function that accepts the exception and returns True to retry
    """
    return backoff_module.on_exception(
        backoff_module.expo,
        exceptions,
        max_tries=5,
        giveup=giveup,
        factor=2)
예제 #6
0
 def inner(func):  # pylint: disable=missing-docstring
     func_with_decode_backoff = backoff.on_exception(
         backoff.expo,
         JSONDecodeError,
         max_tries=MAX_TRIES,
         on_backoff=lambda details: _backoff_handler(details)  # pylint: disable=unnecessary-lambda
     )
     func_with_backoff = backoff.on_exception(
         backoff.expo,
         requests.exceptions.HTTPError,
         max_tries=MAX_TRIES,
         giveup=_http_status_giveup,
         on_backoff=lambda details: _backoff_handler(details)  # pylint: disable=unnecessary-lambda
     )
     func_with_timeout_backoff = backoff.on_exception(
         _wait_30_seconds,
         requests.exceptions.Timeout,
         max_tries=MAX_TRIES,
         on_backoff=lambda details: _backoff_handler(details)  # pylint: disable=unnecessary-lambda
     )
     return func_with_decode_backoff(
         func_with_backoff(func_with_timeout_backoff(func)))
예제 #7
0
def retry_pattern(backoff_type, exception, **wait_gen_kwargs):
    def log_retry_attempt(details):
        _, exc, _ = sys.exc_info()
        logger.info(str(exc))
        logger.info(
            f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} more seconds then retrying..."
        )

    def should_retry_api_error(exc: FacebookRequestError):
        # Retryable OAuth Error Codes
        if exc.api_error_type() == "OAuthException" and exc.api_error_code(
        ) in (1, 2, 4, 17, 341, 368):
            return True

        # Rate Limiting Error Codes
        if exc.api_error_code() in (4, 17, 32, 613):
            return True

        if exc.http_status() == status_codes.TOO_MANY_REQUESTS:
            return True

        if (exc.api_error_type() == "OAuthException"
                and exc.api_error_code() == 10 and exc.api_error_message()
                == "(#10) Not enough viewers for the media to show insights"):
            return True

        # Issue 4028, Sometimes an error about the Rate Limit is returned with a 400 HTTP code
        if exc.http_status(
        ) == status_codes.BAD_REQUEST and exc.api_error_code(
        ) == 100 and exc.api_error_subcode() == 33:
            return True

        if exc.api_transient_error():
            return True

        # The media was posted before the most recent time that the user's account
        # was converted to a business account from a personal account.
        if exc.api_error_type() == "OAuthException" and exc.api_error_code(
        ) == 100 and exc.api_error_subcode() == 2108006:
            return True

        return False

    return backoff.on_exception(
        backoff_type,
        exception,
        jitter=None,
        on_backoff=log_retry_attempt,
        giveup=lambda exc: not should_retry_api_error(exc),
        **wait_gen_kwargs,
    )
예제 #8
0
def retry_pattern(backoff_type, **wait_gen_kwargs):
    def log_retry_attempt(details):
        LOGGER.info(details)
        LOGGER.info(
            'Caught retryable error after {0} tries. Waiting {1} more seconds then retrying...'
            .format(details["tries"], details["wait"]))

    return backoff.on_exception(
        backoff_type, (requests.exceptions.RequestException,
                       aiohttp.ClientError, aiohttp.ServerTimeoutError),
        on_backoff=log_retry_attempt,
        giveup=lambda e: e.response is not None and e.response.status_code in
        range(400, 499),
        **wait_gen_kwargs)
예제 #9
0
def test_add_and_delete_cluster(capsys, dispose_of):
    dispose_of(INSTANCE)

    # This won't work, because the instance isn't created yet
    instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2)
    out = capsys.readouterr().out
    assert f"Instance {INSTANCE} does not exist" in out

    # Get the instance created
    instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1)
    capsys.readouterr()  # throw away output

    # Add a cluster to that instance
    # Avoid failing for "instance is currently being changed" by
    # applying an exponential backoff
    w_backoff = backoff.on_exception(backoff.expo,
                                     exceptions.ServiceUnavailable)
    w_backoff(instanceadmin.add_cluster)(PROJECT, INSTANCE, CLUSTER2)
    out = capsys.readouterr().out
    assert f"Adding cluster to instance {INSTANCE}" in out
    assert "Listing clusters..." in out
    assert f"\n{CLUSTER1}\n" in out
    assert f"Cluster created: {CLUSTER2}" in out

    # Try to add the same cluster again, won't work
    instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2)
    out = capsys.readouterr().out
    assert "Listing clusters..." in out
    assert f"\n{CLUSTER1}\n" in out
    assert f"\n{CLUSTER2}\n" in out
    assert f"Cluster not created, as {CLUSTER2} already exists."

    # Now delete it
    instanceadmin.delete_cluster(PROJECT, INSTANCE, CLUSTER2)
    out = capsys.readouterr().out
    assert "Deleting cluster" in out
    assert f"Cluster deleted: {CLUSTER2}" in out

    # Verify deletion
    instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1)
    out = capsys.readouterr().out
    assert "Listing clusters..." in out
    assert f"\n{CLUSTER1}\n" in out
    assert f"\n{CLUSTER2}\n" not in out

    # Try deleting it again, for fun (and coverage)
    instanceadmin.delete_cluster(PROJECT, INSTANCE, CLUSTER2)
    out = capsys.readouterr().out
    assert "Deleting cluster" in out
    assert f"Cluster {CLUSTER2} does not exist" in out
async def fetch_npm_registry_metadata(
    config: AIOHTTPClientConfig,
    package_names: Iterable[str],
    total_packages: Optional[int] = None,
) -> AsyncGenerator[Result[Dict[str, Dict]], None]:
    """Fetches npm registry metadata for one or more node package names

    config['auth_token'] is an optional npm registry access token to
    use a higher rate limit. Run 'npm token create --read-only' to
    create it.
    """
    total_groups: Optional[int] = None
    if total_packages:
        total_groups = math.ceil(total_packages / config["package_batch_size"])

    async with aiohttp_session(config) as s:
        async_query_with_backoff = backoff.on_exception(
            backoff.expo,
            (
                aiohttp.ClientError,
                aiohttp.ClientResponseError,
                aiohttp.ContentTypeError,
                asyncio.TimeoutError,
            ),
            max_tries=config["max_retries"],
            giveup=is_not_found_exception,
            logger=log,
        )(request_json)

        for i, group in enumerate(grouper(package_names,
                                          config["package_batch_size"]),
                                  start=1):
            log.info(f"fetching group {i} of {total_groups}")
            try:
                # NB: scoped packages OK e.g. https://registry.npmjs.com/@babel/core
                group_results = await asyncio.gather(*[
                    async_query_with_backoff(
                        s,
                        "GET",
                        f"{config['base_url']}{package_name}",
                    ) for package_name in group if package_name is not None
                ])
                for result in group_results:
                    if result is not None:
                        yield result
            except Exception as err:
                log.error(
                    f"error fetching group {i} for package names {group}: {err}:\n{exc_to_str()}"
                )
                yield err
예제 #11
0
    def wrapped(*args, **kwargs):
        def fatal_code(e):
            """Too many Requests(429)のときはリトライする。それ以外の4XXはretryしない"""
            if e.response is None:
                return True
            code = e.response.status_code
            return 400 <= code < 500 and code != 429

        return backoff.on_exception(backoff.expo,
                                    requests.exceptions.RequestException,
                                    jitter=backoff.full_jitter,
                                    max_time=300,
                                    giveup=fatal_code)(function)(*args,
                                                                 **kwargs)
예제 #12
0
 def _retry(self, target):
     def give_up_retry(e: RequestException):
         give_up = False
         if e.response:
             code = e.response.status_code
             give_up = code not in self.retry_codes
         return give_up
     return backoff.on_exception(
         backoff.expo,
         RequestException,
         factor=self.backoff_factor,
         max_time=self.timeout,
         max_tries=self.max_retries,
         giveup=give_up_retry,
     )(target)
예제 #13
0
async def fetch_hibp_breach_data(
    config: AIOHTTPClientConfig,
    emails: Iterable[str],
) -> AsyncGenerator[Result[Dict[str, Dict]], None]:
    """
    Fetches breach information for one or more email accounts

    Uses: https://haveibeenpwned.com/API/v3#BreachesForAccount
    """

    async_query_with_backoff = backoff.on_exception(
        backoff.constant,
        (aiohttp.ClientResponseError, aiohttp.ClientError,
         asyncio.TimeoutError),
        max_tries=config["max_retries"],
        giveup=is_not_found_exception,
        logger=log,
        interval=2,
    )(request_json)

    async with aiohttp_session(config) as s:
        results = await asyncio.gather(*[
            async_query_with_backoff(
                s,
                "GET",
                f"{config['base_url']}breachedaccount/{email}",
            ) for email in emails
        ])

        for result in results:
            if result is None:
                log.warn(f"got None HIBP results for emails {emails}")
                continue

            breach_details = await asyncio.gather(*[
                async_query_with_backoff(
                    s,
                    "GET",
                    f"{config['base_url']}breach/{breach['Name']}",
                ) for breach in result
            ])

            breach_dates = [detail["BreachDate"] for detail in breach_details]

            for dict, date in zip(result, breach_dates):
                dict["Date"] = date

            yield result
예제 #14
0
def retry_call(func: Callable[..., Any],
               *args: Any,
               strategy: Callable[..., Generator[int, None,
                                                 None]] = backoff.constant,
               exception: Type[Exception] = Exception,
               fargs: Optional[List[Any]] = None,
               fkwargs: Optional[Dict[str, Any]] = None,
               **kwargs: Any) -> Any:
    """
    Retry a given call.
    """
    decorated = backoff.on_exception(strategy, exception, *args,
                                     **kwargs)(func)
    fargs = fargs or []
    fkwargs = fkwargs or {}
    return decorated(*fargs, **fkwargs)
예제 #15
0
    def _retry(self, target):
        def give_up_retry(e: RequestException):
            give_up = False
            if e.response:
                code = e.response.status_code
                give_up = code not in self.retry_codes
            return give_up

        return backoff.on_exception(
            backoff.expo,
            RequestException,
            factor=self.backoff_factor,
            max_time=self.timeout,
            max_tries=self.max_retries,
            giveup=give_up_retry,
        )(target)
예제 #16
0
    def wrapped(*args, **kwargs):
        def fatal_code(e):
            if isinstance(e, socket.timeout):
                return False
            elif isinstance(e, URLError):
                return False
            else:
                return True

        return backoff.on_exception(
            backoff.expo,
            (socket.timeout, URLError, SlackClientError),
            jitter=backoff.full_jitter,
            max_time=300,
            giveup=fatal_code,
        )(function)(*args, **kwargs)
예제 #17
0
파일: api.py 프로젝트: NMWDI/airbyte
def retry_pattern(backoff_type, **wait_gen_kwargs):
    def sleep_on_ratelimit(details):
        _, exc, _ = sys.exc_info()
        logger.info(str(exc))
        logger.info(f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} more seconds then retrying...")

    def log_giveup(_details):
        logger.error("Max retry limit reached")

    return backoff.on_exception(
        backoff_type,
        (ZendeskRateLimited, ZendeskTimeout),
        jitter=None,
        on_backoff=sleep_on_ratelimit,
        on_giveup=log_giveup,
        **wait_gen_kwargs,
    )
예제 #18
0
 async def countriesChecker(self):
     send = backoff.on_exception(backoff.expo,
                                 Exception,
                                 max_time=90,
                                 max_tries=3,
                                 jitter=None)(self.send_)
     async with aiohttp.ClientSession(headers=self.headers,
                                      timeout=self.TIMEOUT) as session:
         ###########
         while True:
             ################
             # if self.event.is_set():
             # 	await asyncio.sleep(self.time)
             # 	continue
             #####################
             async with self.lock:
                 try:
                     i = self.proxies.pop()
                 except IndexError:
                     break
                 except Exception as e:
                     raise e
             #########
             try:
                 answer = await send(i, session)
             except KeyboardInterrupt:
                 break
             except Exception as e:
                 async with self.lock:
                     self.proxies.append(i)
                 raise e
             else:
                 if self.getCountriesStatus(answer):
                     async with self.lock:
                         self.green.append(i)
                     print(
                         self.NAME + colorama.Fore.GREEN +
                         f"[{str(len(self.proxies))}]Good proxy: {i.normal}"
                     )
                 else:
                     async with self.lock:
                         self.bad.append(i)
                     print(
                         self.NAME + colorama.Fore.YELLOW +
                         f"[{str(len(self.proxies))}]Proxy from blacklist country: {i.normal}"
                     )
예제 #19
0
파일: client.py 프로젝트: przlada/pyshoper
    def _list_resource_generator(self, method):
        method = backoff.on_exception(backoff.expo,
                                      exceptions.CallsLimitExceededError,
                                      max_tries=self.max_tries,
                                      max_time=self.max_time,
                                      jitter=None)(method)

        def new_method(*args, **kwargs):
            page = 1
            while True:
                kwargs['page'] = page
                result = method(*args, **kwargs)
                if not result['list']:
                    break
                for elem in result['list']:
                    yield elem
                page += 1

        return new_method
예제 #20
0
    def __init__(self,
                 source_language='',
                 str_to_translate='',
                 api_key=''
                 ):

        self._source_language = source_language
        self._str_to_translate = str_to_translate
        self._api_key = api_key

        ratelimit_status = False
        self._rate_limit_status = ratelimit_status

        # Retries the requests after a certain time period has elapsed
        handler = on_exception(expo, RateLimitException, max_time=60, on_backoff=self._backoff_handler)
        # Establishes a rate limit for making requests to the Deep translation service
        limiter = limits(calls=60, period=60)
        self.translate_word = handler(limiter(self.translate_word))
        self.reverse_translate = handler(limiter(self.reverse_translate))
예제 #21
0
def retry_pattern(backoff_type, exception, **wait_gen_kwargs):
    def log_retry_attempt(details):
        _, exc, _ = sys.exc_info()
        logger.info(str(exc))
        logger.info(f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} more seconds then retrying...")

    def should_retry_api_error(exc):
        if isinstance(exc, FacebookRequestError):
            return exc.api_transient_error() or exc.api_error_subcode() == FACEBOOK_UNKNOWN_ERROR_CODE
        return False

    return backoff.on_exception(
        backoff_type,
        exception,
        jitter=None,
        on_backoff=log_retry_attempt,
        giveup=lambda exc: not should_retry_api_error(exc),
        **wait_gen_kwargs,
    )
예제 #22
0
    async def asnChecker(self):
        '''
		Async task for checking ASN
		'''
        send = backoff.on_exception(backoff.expo,
                                    Exception,
                                    max_time=120,
                                    max_tries=self.MAXTRIES,
                                    jitter=None)(self.sendWithProxy)
        #############
        while True:
            # if self.event.is_set():
            # 		await asyncio.sleep(self.time)
            # 		continue
            ####################
            async with self.lock:
                try:
                    proxy = self.proxies.pop()
                except:
                    break
            #############
            try:
                answer = await send(proxy, timeout=self.TIMEOUT)
            except Exception as e:
                async with self.lock:
                    self.died.append(proxy)
                print(self.NAME +
                      f"[{str(len(self.proxies))}]Died proxy: {proxy.normal}")
            else:
                if self.getStatus(answer):
                    async with self.lock:
                        self.green.append(proxy)
                    print(
                        self.NAME + colorama.Fore.GREEN +
                        f"[{str(len(self.proxies))}]Good proxy: {proxy.normal}"
                    )
                else:
                    async with self.lock:
                        self.bad.append(proxy)
                    print(
                        self.NAME + colorama.Fore.YELLOW +
                        f"[{str(len(self.proxies))}]Proxy with blacklist ASN: {proxy.normal}"
                    )
예제 #23
0
def retry_connection_handler(**kwargs):
    """Retry helper, log each attempt"""

    def log_retry_attempt(details):
        _, exc, _ = sys.exc_info()
        logger.info(str(exc))
        logger.info(f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} more seconds then retrying...")

    def giveup_handler(exc):
        return exc.response is not None and 400 <= exc.response.status_code < 500

    return backoff.on_exception(
        backoff.expo,
        requests.exceptions.RequestException,
        jitter=None,
        on_backoff=log_retry_attempt,
        giveup=giveup_handler,
        **kwargs,
    )
예제 #24
0
def main(file, view, watch, timeit, draft, tex, latexmk, verbose, strict,
         retry, pandoc_args):

    if latexmk:
        tex = True

    if verbose:
        print(f'[pandocmk] {verbose=}')

    md_fn = Path(file)
    assert md_fn.suffix == '.md'
    assert md_fn.is_file()

    # Get Pandoc options from CLI and YAML
    # This also creates a temporary {filename}.yaml file with metadata based on styles
    pandoc_options = get_pandoc_options(pandoc_args,
                                        md_fn,
                                        verbose=verbose,
                                        strict=strict)

    # Optionally add watch
    f = monitor_file if watch else build_output

    # Optionally add back-off for errors
    # https://github.com/litl/backoff/blob/master/backoff/_wait_gen.py
    if retry:
        f = backoff.on_exception(wait_gen=backoff.expo,
                                 exception=Exception,
                                 base=1,
                                 max_value=20,
                                 max_tries=1000,
                                 max_time=600,
                                 giveup=error_is_fatal,
                                 on_backoff=print_backoff)(f)

    f(md_fn,
      view=view,
      timeit=timeit,
      tex=tex,
      latexmk=latexmk,
      verbose=verbose,
      pandoc_options=pandoc_options)
예제 #25
0
async def page_monitor(client: AsyncClient, conf: DotDict,
                       queue: asyncio.Queue, logger) -> None:
    """
    Collect webpage availability metrics.

    Metrics are placed into the queue for subsequent processing.
    In case of connectivity failures to Kafka Broker, retries till
    connection is available again.

    Network connctivity issues are mitigated by exponential backoff
    with configurable amount of retries.

    """
    # Configure exponential backoff without jitter;
    # no competing clients, as described here:
    # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
    backoff_deco = backoff.on_exception(
        backoff.expo,
        httpx.TransportError,
        on_backoff=_backoff_handler,
        max_tries=conf.backoff_retries,
        jitter=None,
    )

    while True:
        # Ping webpage
        resp = await backoff_deco(client.get)(conf.page_url)
        http_code = resp.status_code
        resp_time = resp.elapsed

        # Compose Kafka message
        msg = {
            # Following xkcd.com/1179, sorry ISO 8601
            "ts": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
            "page_url": conf.page_url,
            "http_code": http_code,
            "response_time": resp_time.microseconds,
        }
        logger.info(source="monitor", message=msg)

        asyncio.create_task(queue.put(msg))
        await asyncio.sleep(conf.ping_interval)
예제 #26
0
    def request(self, method, url, data=None, params=None, headers=None, retry_count=0, resource_name=None,
                retry_when_empty_result=None):

        def http_status_codes_to_retry():
            retry_codes = self.backoff_strategy.pop(RETRY_HTTP_STATUS_CODES, [])
            retry_codes = list(map(int, retry_codes)) if isinstance(retry_codes, (list, tuple)) else [int(retry_codes)]
            retry_codes.append(429)
            return retry_codes

        retry_http_status_codes = http_status_codes_to_retry()

        def fatal_code(e):
            return isinstance(e, error.AuthorisationError) or \
                   isinstance(e, error.HttpError) and \
                   400 <= e.status_code < 500 and \
                   e.status_code not in retry_http_status_codes

        def enable_backoff_logs():
            logging.getLogger('backoff').addHandler(logging.StreamHandler())

        enable_backoff_logs()
        decorated_request = backoff.on_exception(
            self.backoff_strategy.pop('wait_gen', backoff.constant),
            (error.ConnectionError, error.Timeout, error.TooManyRedirects, error.HttpError),
            max_time=self.backoff_max_time,
            giveup=fatal_code,
            **copy.deepcopy(self.backoff_strategy)
        )(self._request)

        if retry_when_empty_result:
            self.retry_when_empty_result = retry_when_empty_result
            decorated_request = backoff.on_predicate(
                wait_gen=self.backoff_strategy.pop('wait_gen', backoff.constant),
                predicate=self._response_does_not_have_data,
                max_time=self.backoff_max_time,
                **copy.deepcopy(self.backoff_strategy)
            )(decorated_request)

        return decorated_request(method, url, data=data,
                                 params=params, headers=headers,
                                 retry_count=retry_count,
                                 resource_name=resource_name)
예제 #27
0
def retry_pattern(backoff_type, exception, **wait_gen_kwargs):
    def log_retry_attempt(details):
        _, exc, _ = sys.exc_info()
        logger.info(str(exc))
        logger.info(
            f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} more seconds then retrying..."
        )

    def should_retry_api_error(exc):
        if isinstance(exc, FacebookRequestError):
            call_rate_limit_error = exc.api_error_code(
            ) in FACEBOOK_RATE_LIMIT_ERROR_CODES
            temporary_oauth_error = exc.api_error_code(
            ) == FACEBOOK_TEMPORARY_OAUTH_ERROR_CODE
            batch_timeout_error = exc.http_status(
            ) == http.client.BAD_REQUEST and exc.api_error_code(
            ) == FACEBOOK_BATCH_ERROR_CODE
            unknown_error = exc.api_error_subcode(
            ) == FACEBOOK_UNKNOWN_ERROR_CODE
            connection_reset_error = exc.api_error_code(
            ) == FACEBOOK_CONNECTION_RESET_ERROR_CODE
            server_error = exc.http_status(
            ) == http.client.INTERNAL_SERVER_ERROR
            return any((
                exc.api_transient_error(),
                unknown_error,
                call_rate_limit_error,
                batch_timeout_error,
                connection_reset_error,
                temporary_oauth_error,
                server_error,
            ))
        return True

    return backoff.on_exception(
        backoff_type,
        exception,
        jitter=None,
        on_backoff=log_retry_attempt,
        giveup=lambda exc: not should_retry_api_error(exc),
        **wait_gen_kwargs,
    )
예제 #28
0
def retry_pattern(backoff_type, exception, **wait_gen_kwargs):
    # HACK: Workaround added due to bug with Facebook prematurely deprecating 'relevance_score'
    # Issue being tracked here: https://developers.facebook.com/support/bugs/2489592517771422
    def is_relevance_score(exception):
        if getattr(exception, "body", None):
            return exception.body().get("error", {}).get(
                "message"
            ) == '(#100) relevance_score is not valid for fields param. please check https://developers.facebook.com/docs/marketing-api/reference/ads-insights/ for all valid values'
        else:
            return False

    def log_retry_attempt(details):
        _, exception, _ = sys.exc_info()
        if is_relevance_score(exception):
            raise Exception(
                "Due to a bug with Facebook prematurely deprecating 'relevance_score' that is "
                "not affecting all tap-facebook users in the same way, you need to "
                "deselect `relevance_score` from your Insights export. For further "
                "information, please see this Facebook bug report thread: "
                "https://developers.facebook.com/support/bugs/2489592517771422"
            ) from exception
        LOGGER.info(exception)
        LOGGER.info(
            'Caught retryable error after %s tries. Waiting %s more seconds then retrying...',
            details["tries"], details["wait"])

    def should_retry_api_error(exception):
        if isinstance(exception, FacebookRequestError):
            return exception.api_transient_error(
            ) or exception.api_error_subcode() == 99 or is_relevance_score(
                exception)
        elif isinstance(exception, InsightsJobTimeout):
            return True
        return False

    return backoff.on_exception(
        backoff_type,
        exception,
        jitter=None,
        on_backoff=log_retry_attempt,
        giveup=lambda exc: not should_retry_api_error(exc),
        **wait_gen_kwargs)
예제 #29
0
	def __init__(self):
		'''init'''
		config = configparser.ConfigParser()
		####################################
		config = configparser.ConfigParser()
		config.read("settings.ini", encoding="UTF-8")
		self.TIMEOUT = connect=config.getint("COUNTRIES_ADVANCED", "TIMEOUT")
		self.MAXTRIES = config.getint("COUNTRIES_ADVANCED", "MAXTRIES")
		self.TASKS = config.getint("COUNTRIES_ADVANCED", "TASKS")
		self.UNKNOWN = config.getboolean("COUNTRIES_ADVANCED", "UNKNOWN")
		self.NAME = "\x1b[32m" + config["main"]["NAME"] + "\x1b[0m"
		#####################################
		with open("texts/ASN.txt", mode="r") as file:
			self.ASN = file.read().split()
			while True:
				try:
					self.ASN.remove("")
				except:
					break
			print(self.NAME + "ASN-номера загружены успешно!")
		self.send = backoff.on_exception(backoff.expo, Exception, max_tries = self.MAXTRIES, jitter = None, max_time = 120)(requests.get)
예제 #30
0
파일: api.py 프로젝트: tesla-avant/airbyte
def retry_connection_handler(**kwargs):
    """Retry helper, log each attempt"""
    def log_retry_attempt(details):
        _, exc, _ = sys.exc_info()
        logger.info(str(exc))
        logger.info(
            f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} more seconds then retrying..."
        )

    def giveup_handler(exc):
        if isinstance(exc, (HubspotInvalidAuth, HubspotAccessDenied)):
            return True
        return exc.response is not None and HTTPStatus.BAD_REQUEST <= exc.response.status_code < HTTPStatus.INTERNAL_SERVER_ERROR

    return backoff.on_exception(
        backoff.expo,
        requests.exceptions.RequestException,
        jitter=None,
        on_backoff=log_retry_attempt,
        giveup=giveup_handler,
        **kwargs,
    )
예제 #31
0
def retry_pattern(backoff_type, exception, **wait_gen_kwargs):
    def log_retry_attempt(details):
        _, exception, _ = sys.exc_info()
        LOGGER.info(exception)
        LOGGER.info(
            'Caught retryable error after %s tries. Waiting %s more seconds then retrying...',
            details["tries"], details["wait"])

    def should_retry_api_error(exception):
        if isinstance(exception, FacebookRequestError):
            return exception.api_transient_error()
        elif isinstance(exception, InsightsJobTimeout):
            return True
        return False

    return backoff.on_exception(
        backoff_type,
        exception,
        jitter=None,
        on_backoff=log_retry_attempt,
        giveup=lambda exc: not should_retry_api_error(exc),
        **wait_gen_kwargs)
예제 #32
0
def user_defined_backoff_handler(max_tries: int, **kwargs):
    def sleep_on_ratelimit(details):
        _, exc, _ = sys.exc_info()
        if isinstance(exc, UserDefinedBackoffException):
            retry_after = exc.backoff
            logger.info(f"Retrying. Sleeping for {retry_after} seconds")
            time.sleep(retry_after + 1)  # extra second to cover any fractions of second

    def log_give_up(details):
        _, exc, _ = sys.exc_info()
        logger.error(f"Max retry limit reached. Request: {exc.request}, Response: {exc.response}")

    return backoff.on_exception(
        backoff.constant,
        UserDefinedBackoffException,
        interval=0,  # skip waiting, we'll wait in on_backoff handler
        on_backoff=sleep_on_ratelimit,
        on_giveup=log_give_up,
        jitter=None,
        max_tries=max_tries,
        **kwargs,
    )
def make_iter_method(method_name, model_name, url_path):
    """Make a page-concatenating iterator method from a find method

    :param method_name: The name of the find method to decorate
    :param model_name: The name of the model as it appears in the JSON response
    :param url_path: The URL path for the API -- FIXME parameter ignored?
    """
    backoff_decorator = backoff.on_exception(backoff.expo, HTTPTemporaryError, max_tries=5)

    def iter_method(self, *args, **kwargs):
        method = getattr(self, method_name)
        result = backoff_decorator(method)(*args, **kwargs)
        for model in result[model_name]:
            yield model

        while True:
            if 'next' not in result['links']:
                return
            result = backoff_decorator(self._get)(result['links']['next'])
            for model in result[model_name]:
                yield model

    return iter_method
예제 #34
0
import os

import backoff
import requests
import kinto_http
from requests.adapters import TimeoutSauce


PARALLEL_REQUESTS = int(os.getenv("PARALLEL_REQUESTS", 4))
REQUESTS_TIMEOUT_SECONDS = float(os.getenv("REQUESTS_TIMEOUT_SECONDS", 2))
REQUESTS_NB_RETRIES = int(os.getenv("REQUESTS_NB_RETRIES", 4))

retry_timeout = backoff.on_exception(
    backoff.expo,
    (requests.exceptions.Timeout, requests.exceptions.ConnectionError),
    max_tries=REQUESTS_NB_RETRIES,
)


class CustomTimeout(TimeoutSauce):
    def __init__(self, *args, **kwargs):
        if kwargs["connect"] is None:
            kwargs["connect"] = REQUESTS_TIMEOUT_SECONDS
        if kwargs["read"] is None:
            kwargs["read"] = REQUESTS_TIMEOUT_SECONDS
        super().__init__(*args, **kwargs)


requests.adapters.TimeoutSauce = CustomTimeout