Beispiel #1
0
        def wrap(f):
            if asyncio and asyncio.iscoroutinefunction(f):
                r = AsyncRetrying(*dargs, **dkw)
            else:
                r = Retrying(*dargs, **dkw)

            return r.wraps(f)
Beispiel #2
0
        def wrap(f):
            if asyncio and asyncio.iscoroutinefunction(f):
                r = AsyncRetrying(*dargs, **dkw)
            #elif tornado and tornado.gen.is_coroutine_function(f):
            #   r = TornadoRetrying(*dargs, **dkw)
            else:
                r = Retrying(*dargs, **dkw)

            return r.wraps(f)
Beispiel #3
0
    async def startrace(self, **kwargs):
        """
        Create a race.

        Creates a handler for the room and returns a handler object.
        """
        if kwargs.get('goal') and kwargs.get('custom_goal'):
            # TODO use a specific error class
            raise Exception('Either a goal or custom_goal can be specified, but not both.')

        try:
            async for attempt in AsyncRetrying(
                    stop=stop_after_attempt(5),
                    retry=retry_if_exception_type(aiohttp.ClientResponseError),
                    wait=wait_exponential(multiplier=1, min=4, max=10)):
                with attempt:
                    async with self.http.post(
                        url=self.http_uri(f'/o/{self.category_slug}/startrace'),
                        data=kwargs,
                        ssl=self.ssl_context,
                        headers={
                            'Authorization': 'Bearer ' + self.access_token,
                        }
                    ) as resp:
                        headers = resp.headers
        except RetryError as e:
            raise e.last_attempt._exception from e

        if 'Location' in headers:
            race_name = headers['Location'][1:]
            return await self.join_race_room(race_name)

        raise Exception('Received an unexpected response when creating a race.')
Beispiel #4
0
    async def start(self):
        self.access_token, self.reauthorize_every = await self.authorize()
        self.loop.create_task(self.reauthorize())
        self.loop.create_task(self.refresh_races())

        unlisted_rooms = await models.RTGGUnlistedRooms.filter(
            category=self.category_slug)
        for unlisted_room in unlisted_rooms:
            try:
                async for attempt in AsyncRetrying(
                        stop=stop_after_attempt(5),
                        retry=retry_if_exception_type(
                            aiohttp.ClientResponseError)):
                    with attempt:
                        async with self.http.get(
                                self.http_uri(
                                    f'/{unlisted_room.room_name}/data'),
                                ssl=self.ssl_context,
                        ) as resp:
                            race_data = await resp.json()

                if race_data['status']['value'] in [
                        'finished', 'cancelled'
                ] or not race_data['unlisted']:
                    await unlisted_room.delete()
                else:
                    await self.join_race_room(unlisted_room.room_name)

            except RetryError as e:
                raise e.last_attempt._exception from e
Beispiel #5
0
async def fetch_from_url(url: str, job: str, instance: str, retries: int = 5) -> Optional[str]:
    """ Fetch content from specified URL
    URL will be retried up to 'retries' times

    Args:
        url: str URL to fetch
        job: Class name or other description of download type, used when
        logging
        instance: Specific software instance being downloaded, used when
        logging
        retries: int specifying the number of times to retry URL

    Returns:
        The supplied 'Collection' class instance
    """
    count = 0
    try:
        async for attempt in AsyncRetrying(stop=stop_after_delay(retries)):
            with attempt:
                count += 1
                if count > 1:
                    fastapi_logger.info('Fetching %s "%s" metadata (try %s)',
                                        job, instance, count)
                # Create HTTP session
                async with aiohttp.ClientSession() as session:
                    # Fetch latest JSON from Ansible Galaxy API
                    async with session.get(url) as response:
                        # Cache latest JSON
                        return await response.text()
    except RetryError:
        fastapi_logger.exception('Error fetching %s "%s" URL %s', job,
                                 instance, url)
    return None
Beispiel #6
0
async def _connect_ssh(hostname: Hostname, *args,
                       **kwargs) -> AsyncIterator[Machine]:
    """Connect to the given host, retrying if necessary.

    A pretty simple wrapper around asyncssh.connect, with a couple of changes:

    - waits for /var/lib/cloud/instance/boot-finished to exist (AWS Ubuntu has
      this when the machine is ready)
    - yields a Machine instead of just connections: a nice wrapper of the
      connection with a hostname
    - retries until the machine is ready
    """

    reraise_err = None
    async for attempt in AsyncRetrying(wait=wait_fixed(2)):
        with attempt:
            async with asyncssh.connect(hostname, *args, **kwargs) as conn:
                # SSH may be ready but really the system isn't until this file exists.
                await conn.run("test -f /var/lib/cloud/instance/boot-finished",
                               check=True)
                try:
                    yield Machine(conn, Hostname(hostname), kwargs)
                except BaseException as err:  # pylint: disable=broad-except
                    # Exceptions from "yield" have nothing to do with us.
                    # We reraise them below without retrying.
                    reraise_err = err
    if reraise_err is not None:
        raise reraise_err from None
Beispiel #7
0
    async def get_current_version(self) -> version.Version:
        async with httpx.AsyncClient(timeout=self.timeout) as client:
            try:
                async for attempt in AsyncRetrying(
                        stop=stop_after_attempt(5),
                        reraise=True,
                        retry=retry_if_exception_type(httpx.ConnectTimeout),
                        wait=wait_random(min=self.timeout,
                                         max=self.timeout * 10),
                ):
                    with attempt:
                        r = await client.get(self.checkurl)
            except (httpx.HTTPStatusError, httpx.ReadTimeout):
                return None
        webpage = r.text

        # FIXME: this is a hardcode
        tarball_regex = re.compile(r"\bemacs-[0-9.]+\.tar\.[a-z]*\b")
        version_regex = re.compile(r"\b[0-9.]+\b")

        currentversion = None

        for tarball in tarball_regex.findall(webpage):
            versionstring = version_regex.findall(tarball)[0]
            if versionstring.endswith("."):
                versionstring = versionstring[:-1]
            emacsversion = version.parse(versionstring)
            if currentversion is None or currentversion < emacsversion:
                currentversion = emacsversion
        return emacsversion
Beispiel #8
0
    async def generate(self,
                       spoilers="off",
                       tournament=True,
                       allow_quickswap=True):
        if self.preset_data is None:
            await self.fetch()

        try:
            async for attempt in AsyncRetrying(
                    stop=stop_after_attempt(5),
                    retry=retry_if_exception_type(ClientResponseError)):
                with attempt:
                    try:
                        mystery = await mystery_generate(self.preset_data,
                                                         spoilers=spoilers)

                        if mystery.doors:
                            seed = await AlttprDoorDiscord.create(
                                settings=mystery.settings,
                                spoilers=spoilers != "mystery",
                            )
                        else:
                            if mystery.customizer:
                                endpoint = "/api/customizer"
                            else:
                                endpoint = "/api/randomizer"

                            mystery.settings['tournament'] = tournament
                            mystery.settings[
                                'allow_quickswap'] = allow_quickswap
                            seed = await ALTTPRDiscord.generate(
                                settings=mystery.settings, endpoint=endpoint)
                    except:
                        await models.AuditGeneratedGames.create(
                            randomizer='alttpr',
                            settings=mystery.settings,
                            gentype='mystery failure',
                            genoption=self.preset,
                            customizer=1 if mystery.customizer else 0,
                            doors=mystery.doors)
                        logging.exception(
                            "Failed to generate game, retrying...")
                        raise
        except RetryError as e:
            raise e.last_attempt._exception from e

        await models.AuditGeneratedGames.create(
            randomizer='alttpr',
            hash_id=seed.hash,
            permalink=seed.url,
            settings=mystery.settings,
            gentype='mystery',
            genoption=self.preset,
            customizer=1 if mystery.customizer else 0,
            doors=mystery.doors)

        mystery.seed = seed
        return mystery
Beispiel #9
0
 def build(self) -> AsyncRetrying:
     return AsyncRetrying(
         wait=self.wait,
         retry=self.retry_condition,
         stop=self.stop,
         before_sleep=self.before_sleep,
         after=self.after,
         reraise=self.reraise()
     )
Beispiel #10
0
    async def join_race_room(self, race_name, force=False):
        """
        Retrieve current race information for the requested race_name and
        joins that room, if the room isn't already being handled.  If it's
        already handled, no action is taken.

        This is useful if the room is unlisted, and as a result refresh_races
        would be unable to find and join the race room.

        `race_name` is in the format of categoryslug/adjective-verb-0123
        """
        def done(task_name, *args):
            del self.handlers[task_name]

        self.logger.info(f'Attempting to join {race_name}')

        if not race_name.split('/')[0] == self.category_slug:
            # TODO create a real exception class for this
            raise Exception('Race is not for the bot\'s category category.')

        try:
            async for attempt in AsyncRetrying(
                    stop=stop_after_attempt(5),
                    retry=retry_if_exception_type(aiohttp.ClientResponseError),
                    wait=wait_exponential(multiplier=1, min=4, max=10)):
                with attempt:
                    async with self.http.get(
                            self.http_uri(f'/{race_name}/data'),
                            ssl=self.ssl_context,
                    ) as resp:
                        data = json.loads(await resp.read())
        except RetryError as e:
            raise e.last_attempt._exception from e

        name = data['name']

        async with self.join_lock:
            if name in self.handlers:
                self.logger.info(f'Returning existing handler for {name}')
                return self.handlers[name]

            if self.should_handle(data) or force:
                handler = await self.create_handler(data)
                self.handlers[name] = TaskHandler()
                self.handlers[name].task = self.loop.create_task(handler.handle())
                self.handlers[name].task.add_done_callback(partial(done, name))
                self.handlers[name].handler = handler

                return handler
            else:
                if name in self.state:
                    del self.state[name]
                self.logger.info(
                    'Ignoring %(race)s by configuration.'
                    % {'race': data.get('name')}
                )
Beispiel #11
0
 async def create_client(url) -> aioredis.Redis:
     # create redis client
     client: Optional[aioredis.Redis] = None
     async for attempt in AsyncRetrying(**retry_upon_init_policy):
         with attempt:
             client = await aioredis.create_redis_pool(url,
                                                       encoding="utf-8")
             if not client:
                 raise ValueError(
                     "Expected aioredis client instance, got {client}")
     return client
Beispiel #12
0
        def wrap(f):
            if asyncio and asyncio.iscoroutinefunction(f):
                r = AsyncRetrying(*dargs, **dkw)
            else:
                r = Retrying(*dargs, **dkw)

            @six.wraps(f)
            def wrapped_f(*args, **kw):
                return r.call(f, *args, **kw)

            wrapped_f.retry = r
            return wrapped_f
Beispiel #13
0
async def _create_pg_engine(dsn: DataSourceName, min_size: int,
                            max_size: int) -> Engine:

    log.info("Creating pg engine for %s", dsn)

    async for attempt in AsyncRetrying(
            **PostgresRetryPolicyUponInitialization(log).kwargs):
        with attempt:
            engine = await create_pg_engine(dsn,
                                            minsize=min_size,
                                            maxsize=max_size)
            await raise_if_not_responsive(engine)

    return engine  # type: ignore # tenacity rules guarantee exit with exc
Beispiel #14
0
async def fetch_url(url: str, timeout: int = 5, retry: int = 5):
    async with httpx.AsyncClient(timeout=timeout) as client:
        try:
            async for attempt in AsyncRetrying(
                    stop=stop_after_attempt(5),
                    reraise=True,
                    retry=retry_if_exception_type(httpx.ConnectTimeout),
                    wait=wait_random(min=timeout, max=timeout * 10),
            ):
                with attempt:
                    r = await client.get(url)
        except (httpx.HTTPStatusError, httpx.ReadTimeout) as e:
            return str(e)
    return r.json()
Beispiel #15
0
async def make_request(method='get',
                       api_url='',
                       api_key='',
                       data={},
                       retries=0):
    """
    Make call to external urls using python request
    :param method: get|post (str)
    :param api_url:
    :param api_key:
    :param data: data to send in the request (dict)
    :param retries:
    :return:
    """
    headers = {'content-type': 'application/json'}
    if api_key and isinstance(data, dict):
        data['api_key'] = api_key
    method = method.upper()
    respx.route(method=method, path=api_url).mock(return_value=httpx.Response(
        status_code=MOCK_URLS[api_url]['status_code'],
        json=MOCK_URLS[api_url]['data']))
    event_hooks = {}
    if DEBUG_MODE:
        event_hooks = {'request': [log_request], 'response': [log_response]}
    async with httpx.AsyncClient(event_hooks=event_hooks) as client:
        try:
            async for attempt in AsyncRetrying(
                    stop=stop_after_attempt(retries), wait=wait_fixed(2)):
                with attempt:
                    if method == httpretty.GET:
                        response = await client.get(api_url,
                                                    params=data,
                                                    headers=headers)
                    elif method == httpretty.POST:
                        response = await client.post(
                            api_url,
                            data=json.dumps(data,
                                            default=date_time_json_serialize),
                            headers=headers)
                    if response.status_code in STATUS_FORCE_LIST:
                        response.raise_for_status()
                    return {
                        'status_code': response.status_code,
                        'data': response.json()
                    }
        except RetryError:
            return {
                'status_code': INTERNAL_SERVER_STATUS_CODE,
                'data': INTERNAL_SERVER_ERROR_MESSAGE
            }
Beispiel #16
0
 async def wrapper(db, *args, **kwargs):
     try:
         return await function(db, *args, **kwargs)
     except (InterfaceError, OperationalError, psycopg2.InterfaceError,
             psycopg2.OperationalError):
         try:
             async for attempt in AsyncRetrying(
                     wait=wait_exponential(multiplier=1, min=2, max=10),
                     before_sleep=before_sleep_log(log, logging.ERROR)):
                 with attempt:
                     db._objects.database._connect()
         except RetryError:
             pass
         else:
             return await function(db, *args, **kwargs)
Beispiel #17
0
async def test_retry_object(vault_prefix, mocker):
    config = Konfig(vault_backend=AsyncVaultBackend(
        vault_prefix,
        retry=AsyncRetrying(retry=retry_if_exception_type(KonfettiError),
                            reraise=True,
                            stop=stop_after_attempt(2)),
    ))
    mocker.patch("aiohttp.ClientSession._request", side_effect=KonfettiError)
    m = mocker.patch.object(config.vault_backend,
                            "_call",
                            wraps=config.vault_backend._call)
    with pytest.raises(KonfettiError):
        await config.SECRET
    assert m.called is True
    assert m.call_count == 2
Beispiel #18
0
async def pg_engine(app: web.Application):
    cfg = app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]
    pg_cfg = cfg["postgres"]

    app[f"{__name__}.dsn"] = dsn = DataSourceName(
        application_name=f"{__name__}_{id(app)}",
        database=pg_cfg["database"],
        user=pg_cfg["user"],
        password=pg_cfg["password"],
        host=pg_cfg["host"],
        port=pg_cfg["port"],
    )  # type: ignore

    pool_kwargs: Dict[str, Any] = {}
    if app[APP_CONFIG_KEY]["main"]["testing"]:
        pool_kwargs["echo"] = True

    log.info("Creating pg engine for %s", dsn)

    engine: Optional[Engine] = None

    async for attempt in AsyncRetrying(
            **PostgresRetryPolicyUponInitialization(log).kwargs):
        with attempt:
            engine = await create_pg_engine(dsn,
                                            minsize=pg_cfg["minsize"],
                                            maxsize=pg_cfg["maxsize"],
                                            **pool_kwargs)
            assert engine  # nosec
            await raise_if_not_responsive(engine)

    assert engine  # nosec
    app[APP_DB_ENGINE_KEY] = engine

    yield  # -------------------

    if engine is not app.get(APP_DB_ENGINE_KEY):
        log.critical(
            "app does not hold right db engine. Somebody has changed it??")

    engine.close()
    await engine.wait_closed()
    log.debug(
        "engine '%s' after shutdown: closed=%s, size=%d",
        engine.dsn,
        engine.closed,
        engine.size,
    )
Beispiel #19
0
 async def generate_game(self):
     try:
         async for attempt in AsyncRetrying(
                 stop=stop_after_attempt(3),
                 retry=retry_if_exception_type((aiohttp.ClientResponseError, aiohttp.client_exceptions.ServerDisconnectedError))):
             with attempt:
                 async with aiohttp.request(
                         method='post',
                         url=f'{self.baseurl}/randomizerWebService',
                         data=self.settings,
                         auth=self.auth,
                         raise_for_status=True) as resp:
                     req = await resp.json(content_type='text/html')
                 return req
     except RetryError as e:
         raise e.last_attempt._exception from e
Beispiel #20
0
    async def get_team(self, team_name):
        try:
            async for attempt in AsyncRetrying(
                    stop=stop_after_attempt(5),
                    retry=retry_if_exception_type(aiohttp.ClientResponseError),
                    wait=wait_exponential(multiplier=1, min=4, max=10)):
                with attempt:
                    async with self.http.get(
                            self.http_uri(f'/team/{team_name}/data'),
                            ssl=self.ssl_context,
                    ) as resp:
                        data = json.loads(await resp.read())
        except RetryError as e:
            raise e.last_attempt._exception from e

        return data
Beispiel #21
0
async def pg_engine(app: web.Application):
    engine = None

    pg_cfg: PostgresSettings = app[APP_CONFIG_KEY].STORAGE_POSTGRES
    dsn = DataSourceName(
        application_name=f"{__name__}_{id(app)}",
        database=pg_cfg.POSTGRES_DB,
        user=pg_cfg.POSTGRES_USER,
        password=pg_cfg.POSTGRES_PASSWORD.get_secret_value(),
        host=pg_cfg.POSTGRES_HOST,
        port=pg_cfg.POSTGRES_PORT,
    )  # type: ignore

    log.info("Creating pg engine for %s", dsn)
    async for attempt in AsyncRetrying(
        **PostgresRetryPolicyUponInitialization(log).kwargs
    ):
        with attempt:
            engine = await create_pg_engine(
                dsn, minsize=pg_cfg.POSTGRES_MINSIZE, maxsize=pg_cfg.POSTGRES_MAXSIZE
            )
            await raise_if_not_responsive(engine)

    if app[APP_CONFIG_KEY].STORAGE_TESTING:
        log.info("Initializing tables for %s", dsn)
        init_pg_tables(dsn, schema=metadata)

    assert engine  # nosec
    app[APP_DB_ENGINE_KEY] = engine

    yield  # ----------

    if engine is not app.get(APP_DB_ENGINE_KEY):
        log.critical("app does not hold right db engine. Somebody has changed it??")

    if engine:
        engine.close()
        await engine.wait_closed()
        log.debug(
            "engine '%s' after shutdown: closed=%s, size=%d",
            engine.dsn,
            engine.closed,
            engine.size,
        )
Beispiel #22
0
    async def GetGoogleCookie(self):
        """
        Gets google cookie (used for each and every proxy)
        Blacklist proxies on error.
        """
        def retry_if_proxies_remaining(ex):
            should_retry = True
            if isinstance(ex, ProxyError) and ex.response.status_code == 429:
                logging.info((
                    f"Proxy {self.proxies[self.proxy_index]} responded with 429."
                    " Will retry request with another proxy."))
                self._rate_limited_proxies.append(
                    self.proxies[self.proxy_index])
                del self.proxies[self.proxy_index]
            elif len(self.proxies) > 0:
                logging.error((
                    f"Proxy {self.proxies[self.proxy_index]} caused {str(ex)}."
                    " Blacklisting proxy and will retry request with another."
                ))
                self.blacklisted_proxies.append(self.proxies[self.proxy_index])
                del self.proxies[self.proxy_index]
            else:
                should_retry = False
            self._iterate_proxy()
            return should_retry

        cfg = self._retry_config
        if len(self.proxies) > 0:
            cfg = dict(retry=retry_if_exception(retry_if_proxies_remaining),
                       reraise=cfg.get('reraise', True))
        try:
            retryer = AsyncRetrying(**cfg)
            resp = await retryer.call(
                self._send_req,
                'https://trends.google.com/?geo={geo}'.format(
                    geo=self.hl[-2:]),
                timeout=self.timeout)
        finally:
            self.proxies.extend(self._rate_limited_proxies)
            self._rate_limited_proxies.clear()

        cookies = resp.cookies.items()
        return dict(filter(lambda i: i[0] == 'NID', cookies))
Beispiel #23
0
async def _assemble_cached_indexes(app: web.Application):
    """
    Currently the static resources are contain 3 folders: osparc, s4l, tis
    each of them contain and index.html to be served to as the root of the site
    for each type of frontend.

    Caching these 3 items on start. This
    """
    settings: StaticWebserverModuleSettings = get_settings(app)
    cached_indexes: Dict[str, str] = {}

    session: ClientSession = get_client_session(app)

    for frontend_name in FRONTEND_APPS_AVAILABLE:
        url = URL(settings.static_web_server_url) / frontend_name
        log.info("Fetching index from %s", url)

        try:
            # web-static server might still not be up
            async for attempt in AsyncRetrying(**RETRY_ON_STARTUP_POLICY):
                with attempt:
                    response = await session.get(url, raise_for_status=True)

            body = await response.text()

        except ClientError as err:
            log.error("Could not fetch index from static server: %s", err)

            # ANE: Yes this is supposed to fail the boot process
            raise RuntimeError(
                f"Could not fetch index at {str(url)}. Stopping application boot"
            ) from err

        # fixes relative paths
        body = body.replace(f"../resource/{frontend_name}",
                            f"resource/{frontend_name}")
        body = body.replace("boot.js", f"{frontend_name}/boot.js")

        log.info("Storing index for %s", url)
        cached_indexes[frontend_name] = body

    app[APP_FRONTEND_CACHED_INDEXES_KEY] = cached_indexes
Beispiel #24
0
async def generate_random_game(weightset='weighted',
                               weights=None,
                               tournament=True,
                               spoilers="off",
                               festive=False):
    if weights is None:
        weights = await get_weights(weightset)

    try:
        async for attempt in AsyncRetrying(
                stop=stop_after_attempt(5),
                retry=retry_if_exception_type(ClientResponseError)):
            with attempt:
                try:
                    settings, customizer = await generate_random_settings(
                        weights, festive=festive, spoilers=spoilers)

                    settings['tournament'] = tournament
                    seed = await alttpr(settings=settings,
                                        customizer=customizer,
                                        festive=festive)
                except ClientResponseError:
                    await audit.insert_generated_game(
                        randomizer='alttpr',
                        hash_id=None,
                        permalink=None,
                        settings=settings,
                        gentype='mystery failure',
                        genoption=weightset,
                        customizer=1 if customizer else 0)
                    raise
    except RetryError as e:
        raise e.last_attempt._exception from e

    await audit.insert_generated_game(randomizer='alttpr',
                                      hash_id=seed.hash,
                                      permalink=seed.url,
                                      settings=settings,
                                      gentype='mystery',
                                      genoption=weightset,
                                      customizer=1 if customizer else 0)
    return seed
Beispiel #25
0
 async def authorize(self):
     """
     Get an OAuth2 token from the authentication server.
     """
     try:
         async for attempt in AsyncRetrying(
                 stop=stop_after_attempt(5),
                 retry=retry_if_exception_type(aiohttp.ClientResponseError)):
             with attempt:
                 async with self.http.post(self.http_uri('/o/token'), data={
                     'client_id': self.client_id,
                     'client_secret': self.client_secret,
                     'grant_type': 'client_credentials',
                 }, ssl=self.ssl_context) as resp:
                     data = await resp.json()
                     if not data.get('access_token'):
                         raise Exception('Unable to retrieve access token.')
                     return data.get('access_token'), data.get('expires_in', 36000)
     except RetryError as e:
         raise e.last_attempt._exception from e
Beispiel #26
0
    async def _get_data(self, url, method=GET_METHOD, trim_chars=0, **kwargs):
        """Send a request to Google and return the JSON response as a Python object
        :param url: the url to which the request will be sent
        :param method: the HTTP method ('get' or 'post')
        :param trim_chars: how many characters should be trimmed off the beginning of the content of the response
            before this is passed to the JSON parser
        :param kwargs: any extra key arguments passed to the request builder (usually query parameters or data)
        :return:
        """
        if self.cookies is None or len(self.proxies) > 0:
            self.cookies = await self.GetGoogleCookie()

        retryer = AsyncRetrying(**self._retry_config)
        response = await retryer.call(self._send_req,
                                      url,
                                      method=method,
                                      timeout=self.timeout,
                                      cookies=self.cookies,
                                      headers={'accept-language': self.hl},
                                      **kwargs)
        # check if the response contains json and throw an exception otherwise
        # Google mostly sends 'application/json' in the Content-Type header,
        # but occasionally it sends 'application/javascript
        # and sometimes even 'text/javascript
        if response.status_code == 200 and 'application/json' in \
                response.headers['Content-Type'] or \
                'application/javascript' in response.headers['Content-Type'] or \
                'text/javascript' in response.headers['Content-Type']:
            # trim initial characters
            # some responses start with garbage characters, like ")]}',"
            # these have to be cleaned before being passed to the json parser
            content = response.text[trim_chars:]
            # parse json
            self._iterate_proxy()
            return json.loads(content)
        else:
            # error
            raise exceptions.ResponseError(
                'The request failed: Google returned a '
                'response with code {0}.'.format(response.status_code),
                response=response)
Beispiel #27
0
 async def additional_setup(self):
     with Halo("[infrastructure] starting etcd") as spinner:
         await self.publisher.ssh.run(
             f"HOSTNAME={self.publisher.hostname} "
             "envsubst '$HOSTNAME' "
             '    < "$HOME/config/etcd.template" '
             "    | sudo tee /etc/default/etcd "
             "    > /dev/null",
             check=True,
         )
         await self.publisher.ssh.run("sudo systemctl restart etcd",
                                      check=True)
         # Make sure etcd is healthy
         async for attempt in AsyncRetrying(wait=wait_fixed(2),
                                            stop=stop_after_attempt(20)):
             with attempt:
                 await self.publisher.ssh.run(
                     ("ETCDCTL_API=3 etcdctl "
                      f"--endpoints {self.publisher.hostname}:2379 "
                      "endpoint health"),
                     check=True,
                 )
         spinner.succeed("[infrastructure] etcd healthy")
Beispiel #28
0
async def _celery_app(app: web.Application):
    comp_settings: ComputationSettings = _get_computation_settings(app)

    celery_app: Optional[Celery] = None
    async for attempt in AsyncRetrying(**retry_upon_init_policy):
        with attempt:
            celery_app = Celery(
                comp_settings.task_name,
                broker=comp_settings.broker_url,
                backend=comp_settings.result_backend,
            )
            if not celery_app:
                raise ValueError(
                    "Expected celery client app instance, got {celery_app}")

    app[__APP_CLIENT_CELERY_CLIENT_KEY] = celery_app

    yield

    if celery_app is not app[__APP_CLIENT_CELERY_CLIENT_KEY]:
        log.critical("Invalid celery client in app")

    celery_app.close()
Beispiel #29
0
 async def test_retry_using_async_retying(self):
     thing = NoIOErrorAfterCount(5)
     retrying = AsyncRetrying()
     await retrying(_async_function, thing)
     assert thing.counter == thing.count
Beispiel #30
0
 def _a_get_retry_object(self) -> AsyncRetrying:
     """
     Instantiates an async retry object
     :return: instance of AsyncRetrying class
     """
     return AsyncRetrying(**self.retry_args)