Example #1
0
class Task:
    def __init__(self, path):
        self.path = path

    def get_data_from_django(self):
        url = f"http://127.0.0.1:8000/baiapp/drf{self.path}"
        response = requests.get(url=url)
        res = response.json()
        for i in res[0:]:
            if i['id'] == 1:
                data = i
        return data

    def get_vnum_from_django(self):
        data = self.get_data_from_django()
        vnum = data['vnum']
        return vnum

    retry(
        stop=stop_after_attempt(100),
        wait=wait_exponential(multiplier=1, min=1, max=10),
        before=before_log(logger, logging.INFO),  #retry之前的日志等级
        after=after_log(logger, logging.WARN),  #retry之后的日志等级
    )

    def wait_vnum_up_django(self):
        vnum = self.get_vnum_from_django()
        # if vnum == 0:
        #     logger.error("阅读量为0")
        #     raise ValueError("阅读量为0")

    @retry(
        stop=stop_after_attempt(600),  #尝试600次后就不尝试了
        wait=wait_exponential(
            multiplier=1, min=1,
            max=10),  #开始的时候等待 2^x * 1 秒,最少等待1秒,最多10秒,之后都是等待10秒
        before=before_log(logger, logging.INFO),
        after=after_log(logger, logging.WARN),
    )
    def get_vnum_is_growing(self):
        vnum = self.get_vnum_from_django()
        if (0 < vnum < 10):
            logger.error("阅读量增长中")
            raise ValueError("阅读量增长中")
        elif vnum == 0:
            logger.error("阅读量为0")
            raise ValueError("阅读量为0")
        return vnum

    def is_finish_vnum(self):
        self.wait_vnum_up_django()
        vnum = self.get_vnum_is_growing()
        if vnum == 10:
            logger.info(f"阅读量完成{vnum}")
        else:
            logger.error(f"阅读量不到10,只有 {vnum} 遗憾")
        return vnum
Example #2
0
class PlatTask(HttpPlatBase):
    """description"""

    def __init__(self,task_id,path):
        self.task_id = task_id
        self.times_out = 5
        self.path = path
        super().__init__(self.path)

    '''向中台发起task结果轮询'''

    def get_task_from_plat(self):
        res =  self.get_all()
        return res

    def get_task_state(self):
        task = self.get_task_from_plat()
        return task["state"]

    @retry(
        stop=stop_after_attempt(100),
        wait=wait_exponential(multiplier=1, min=1, max=10),
        before=before_log(logger, logging.INFO),
        after=after_log(logger, logging.WARN),
    )
    def wait_for_task_start(self):
        state = self.get_task_state()
        if state == "not_exist":
            logger.error("任务不存在")
            raise ValueError("任务不存在")

    @retry(
        stop=stop_after_attempt(600),
        wait=wait_exponential(multiplier=1, min=1, max=10),
        before=before_log(logger, logging.INFO),
        after=after_log(logger, logging.WARN),
    )
    def task_state(self):
        state = self.get_task_state()
        if state == "beginning":
            logger.error("任务进行中")
            raise ValueError("任务进行中")
        return state

    def is_task_successed(self):

        self.wait_for_task_start()
        state = self.task_state()
        if state not in ("success", "failed"):
            logger.error(f"task {self.task_id} 结果: {state}")  #not_exist或beginning
        else:
            logger.info(f"task {self.task_id} 结果: {state}")
        # result={"result":state}
        return state   #返回True或flase
def test_connection(
    ctx,
    max_tries=1,  # seconds
    wait_seconds=1,
):
    """Test database connection"""
    @retry(
        stop=stop_after_attempt(max_tries),
        wait=wait_fixed(wait_seconds),
        before=before_log(logger, logging.INFO),
        after=after_log(logger, logging.WARN),
    )
    def init() -> None:
        try:
            db = SessionLocal()
            # Try to create session to check if DB is awake
            db.execute("SELECT 1")
            ctx.database_connection = True
            logger.info("Database connection is working")
        except Exception as e:
            ctx.database_connection = False
            logger.error(e)
            logger.error("Could not connect to database")

    init()
    def batch_wait(self, tasks, timeout=300, wait_exp_multiplier=0.05, wait_exp_max=1.0):
        """
        Wait until a list of task are completed. Expires after 'timeout' seconds.

        Returns a tuple of list (pending_tasks, success_tasks, error_tasks).
        Each list contains a couple (original_position, task) sorted by original_position asc
        original_position gives the original index in the input tasks list parameter. This helps to keep the order.
        """
        try:
            positions = {}
            pending_tasks = []
            for pos, task in enumerate(tasks):
                positions[task.pk] = pos
                pending_tasks.append((pos, task))
            success_tasks = []
            error_tasks = []
            retryer = Retrying(wait=wait_random_exponential(multiplier=wait_exp_multiplier, max=wait_exp_max),
                               stop=stop_after_delay(timeout),
                               retry=retry_if_result(has_pending_tasks),
                               before=before_log(logger, logging.DEBUG),
                               after=after_log(logger, logging.DEBUG))
            retryer(self._refresh_tasks_status, pending_tasks, success_tasks, error_tasks, positions)
        except RetryError:
            pass

        return (sorted(pending_tasks, key=lambda v: v[0]),
                sorted(success_tasks, key=lambda v: v[0]),
                sorted(error_tasks, key=lambda v: v[0]))
async def health_check_emulator(
    client,
    health_check_path,
    *,
    min_num_checks=2,
    start_period: int = 0,
    timeout: int = 30,
    interval: int = 30,
    retries: int = 3,
):
    # Follows docker's health check protocol
    # SEE https://docs.docker.com/engine/reference/builder/#healthcheck
    checkpoint: Coroutine = client.get(health_check_path)

    check_count = 0

    @retry(
        wait=wait_fixed(interval),
        stop=stop_after_attempt(retries),
        before=before_log(logger, logging.WARNING),
    )
    async def _check_entrypoint():
        nonlocal check_count
        check_count += 1
        resp = await asyncio.wait_for(checkpoint, timeout=timeout)
        assert resp.status == web.HTTPOk.status_code

    await asyncio.sleep(start_period)

    while check_count < min_num_checks:
        await _check_entrypoint()
        await asyncio.sleep(interval)
Example #6
0
    def loop_client_recv(self):
        # The built-in client loop_forever seems busted (it doesn't retry
        # under all exceptions, so just do it ourselves...); arg...

        def on_connect(client, userdata, flags, rc):
            if rc == mqtt.MQTT_ERR_SUCCESS:
                self.log.info("MQTT connected to %s:%s over %s",
                              self.config['firehose_host'],
                              self.config['firehose_port'],
                              self.config['firehose_transport'])
                client.subscribe('#')
            else:
                self.log.error("MQTT not connected to %s:%s over %s, rc=%s",
                               self.config['firehose_host'],
                               self.config['firehose_port'],
                               self.config['firehose_transport'], rc)

        def on_message(client, userdata, msg):
            if not msg.topic or not msg.payload:
                return
            self.log.info(("Dispatching message on topic=%s"
                           " with payload=%s"), msg.topic, msg.payload)
            try:
                payload = msg.payload
                if isinstance(payload, six.binary_type):
                    payload = payload.decode("utf8")
                details = {'event': json.loads(payload)}
            except (UnicodeError, ValueError):
                self.log.exception("Received corrupted/invalid payload: %s",
                                   msg.payload)
            else:
                self.work_queue.put(details)

        @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=30),
                        before=tenacity.before_log(self.log, logging.INFO))
        def loop_forever_until_dead():
            if self.dying:
                return
            client = mqtt.Client(transport=self.config['firehose_transport'])
            client.on_connect = on_connect
            client.on_message = on_message
            try:
                client.connect(self.config['firehose_host'],
                               port=int(self.config['firehose_port']))
                max_timeout = 1
                while not self.dying:
                    rc = mqtt.MQTT_ERR_SUCCESS
                    start = time.time()
                    elapsed = 0
                    while rc == mqtt.MQTT_ERR_SUCCESS and (elapsed <
                                                           max_timeout):
                        rc = client.loop(timeout=max(0, max_timeout - elapsed))
                        elapsed = time.time() - start
                    if not self.dying:
                        time.sleep(0.1)
            except Exception:
                self.log.exception("Failed mqtt client usage, retrying")
                raise

        loop_forever_until_dead()
Example #7
0
 def decorator_f(self, *args, **kwargs):
     retry_args = getattr(self, 'retry_args', None)
     if retry_args is None:
         return fun(self, *args, **kwargs)
     multiplier = retry_args.get('multiplier', 1)
     min_limit = retry_args.get('min', 1)
     max_limit = retry_args.get('max', 1)
     stop_after_delay = retry_args.get('stop_after_delay', 10)
     tenacity_logger = tenacity.before_log(
         self.log, logging.DEBUG) if self.log else None
     default_kwargs = {
         'wait':
         tenacity.wait_exponential(multiplier=multiplier,
                                   max=max_limit,
                                   min=min_limit),
         'retry':
         tenacity.retry_if_exception(should_retry),
         'stop':
         tenacity.stop_after_delay(stop_after_delay),
         'before':
         tenacity_logger,
         'after':
         tenacity_logger,
     }
     return tenacity.retry(**default_kwargs)(fun)(self, *args,
                                                  **kwargs)
Example #8
0
 def wrapper(*args, **kwargs) -> Any:
     return Retrying(
         retry=(retry_if_network_error() | retry_if_throttling_error()),
         stop=stop_after_attempt(max_attempt_number=max_retries),
         wait=(wait_spotify_throttling() + wait_random(min=1, max=3)),
         before=before_log(retry_logger, logging.DEBUG),
         before_sleep=before_sleep_log(retry_logger, logging.WARNING),
     ).call(func, *args, **kwargs)
def retry(apply_func, retry_if, wait, stop, **kwargs):
    retryer = Retrying(retry=retry_if,
                       wait=wait,
                       stop=stop,
                       before=before_log(logger, logging.DEBUG),
                       after=after_log(logger, logging.DEBUG),
                       **kwargs)
    return retryer(apply_func)
Example #10
0
 def __init__(self, worker_config: Watcher):
     self.config = worker_config
     self.logger = get_logger(f'{self.config.room_id}.live')
     self._retry = partial(retry,
                           wait=wait_fixed(config.retry_delay),
                           before=before_log(self.logger, logging.DEBUG),
                           before_sleep=before_sleep_log(
                               self.logger, logging.WARNING))
Example #11
0
def retry(fn):
    return _retry(
        wait=wait_exponential(multiplier=1, min=4, max=10),
        stop=stop_after_attempt(5),
        reraise=True,
        before=before_log(logger, logging.INFO),
        before_sleep=before_sleep_log(logger, logging.INFO),
    )(fn)
Example #12
0
 def decorator(fun: Callable):
     default_kwargs = {
         'wait': tenacity.wait_exponential(multiplier=1, max=100),
         'retry': retry_if_temporary_quota(),
         'before': tenacity.before_log(log, logging.DEBUG),
         'after': tenacity.after_log(log, logging.DEBUG),
     }
     default_kwargs.update(**kwargs)
     return tenacity.retry(*args, **default_kwargs)(fun)
Example #13
0
def _async_retry_if_fails():
    # Helper that retries to account for some uncontrolled delays
    return tenacity.AsyncRetrying(
        wait=tenacity.wait_fixed(1),
        stop=tenacity.stop_after_delay(10),
        retry=tenacity.retry_if_exception_type(AssertionError),
        before=tenacity.before_log(logger, logging.INFO),
        reraise=True,
    )
Example #14
0
 def decorator(fun: T):
     default_kwargs = {
         'wait': tenacity.wait_exponential(multiplier=1, max=300),
         'retry': retry_if_operation_in_progress(),
         'before': tenacity.before_log(log, logging.DEBUG),
         'after': tenacity.after_log(log, logging.DEBUG),
     }
     default_kwargs.update(**kwargs)
     return cast(T, tenacity.retry(*args, **default_kwargs)(fun))
Example #15
0
class PostgresConnection:
    def __init__(self, name: str, dsn: str, timeout=30000, autocommit=False):
        self.name = name
        self.listening = False
        self.dsn = psycopg2.extensions.make_dsn(dsn, connect_timeout=timeout)
        self.isolation = ISOLATION_AUTOCOMMIT if autocommit else ISOLATION_DEFAULT
        self._connection = None

    @tenacity.retry(
        reraise=True,
        wait=tenacity.wait_exponential(multiplier=RETRY_CONNECT_MULTIPLIER),
        stop=tenacity.stop_after_attempt(RETRY_CONNECT_TRIES),
        before=tenacity.before_log(Context.logger, logging.DEBUG),
        after=tenacity.after_log(Context.logger, logging.DEBUG),
    )
    def _connect_db(self):
        Context.logger.info(
            f'Creating connection to PostgreSQL database "{self.name}"')
        connection = psycopg2.connect(dsn=self.dsn)
        connection.set_isolation_level(self.isolation)
        # test connection
        cursor = connection.cursor()
        cursor.execute(query='SELECT * FROM document_queue;')
        result = cursor.fetchall()
        Context.logger.debug(f'Jobs in queue: {result}')
        cursor.close()
        connection.commit()
        self._connection = connection
        self.listening = False

    def connect(self):
        if not self._connection or self._connection.closed != 0:
            self._connect_db()

    @property
    def connection(self):
        self.connect()
        return self._connection

    def new_cursor(self, use_dict: bool = False):
        return self.connection.cursor(
            cursor_factory=psycopg2.extras.DictCursor if use_dict else None, )

    def reset(self):
        self.close()
        self.connect()

    def close(self):
        if self._connection:
            Context.logger.info(
                f'Closing connection to PostgreSQL database "{self.name}"')
            self._connection.close()
        self._connection = None
Example #16
0
def deployed_simcore_stack(core_stack_name: str, core_stack_compose: Dict,
                           docker_client: DockerClient) -> List[Service]:

    # NOTE: the goal here is NOT to test time-to-deplopy but
    # rather guaranteing that the framework is fully deployed before starting
    # tests. Obviously in a critical state in which the frameworks has a problem
    # the fixture will fail

    try:
        for attempt in Retrying(
                wait=wait_fixed(MAX_TIME_TO_DEPLOY_SECS),
                stop=stop_after_attempt(5),
                before=before_log(log, logging.WARNING),
        ):
            with attempt:
                for service in docker_client.services.list():
                    for task in service.tasks():
                        # NOTE: Could have been restarted from latest test parameter, accept as well complete
                        assert task["Status"]["State"] in (
                            task["DesiredState"],
                            "complete",
                        ), (f"{service.name} still not ready or complete. Expected "
                            f"desired_state[{task['DesiredState']}] but got "
                            f"status_state[{task['Status']['State']}]). Details:"
                            f"\n{pformat(task)}")

    finally:
        subprocess.run(f"docker stack ps {core_stack_name}",
                       shell=True,
                       check=False)
        # logs table like
        #  ID                  NAME                  IMAGE                                      NODE                DESIRED STATE       CURRENT STATE                ERROR
        # xbrhmaygtb76        simcore_sidecar.1     itisfoundation/sidecar:latest              crespo-wkstn        Running             Running 53 seconds ago
        # zde7p8qdwk4j        simcore_rabbit.1      itisfoundation/rabbitmq:3.8.0-management   crespo-wkstn        Running             Running 59 seconds ago
        # f2gxmhwq7hhk        simcore_postgres.1    postgres:10.10                             crespo-wkstn        Running             Running about a minute ago
        # 1lh2hulxmc4q        simcore_director.1    itisfoundation/director:latest             crespo-wkstn        Running             Running 34 seconds ago
        # ...

    # TODO: find a more reliable way to list services in a stack
    core_stack_services: List[Service] = [
        service for service in docker_client.services.list()
        if service.name.startswith(f"{core_stack_name}_")
    ]  # type: ignore

    assert (core_stack_services
            ), f"Expected some services in core stack '{core_stack_name}'"

    assert len(
        core_stack_compose["services"].keys()) == len(core_stack_services)

    return core_stack_services
Example #17
0
def retriable(max_retries=10, retry_interval=5, upon_exception=False, code_block=False):
    retriable_kwargs = {'stop': stop_after_attempt(max_retries),
                        'wait': wait_fixed(retry_interval),
                        'before_sleep': before_sleep_log(logger, logging.DEBUG),
                        'before': before_log(logger, logging.DEBUG)
                        }

    if upon_exception:
        retriable_kwargs['retry'] = retry_if_exception_type(RetryTrigger)

    if code_block:
        # Tenacity class to retry a block of code
        return Retrying(**retriable_kwargs)
    else:
        return retry(**retriable_kwargs)
Example #18
0
def test_imperative():
    """
    Example demonstrating using Retrying directly.

    :return:
    """
    max_attempts = 3
    back_off = 5
    redo = Retrying(stop=stop_after_attempt(max_attempts),
                    reraise=True, wait=wait_fixed(back_off),
                    before=before_log(logger, logging.DEBUG))

    try:
        redo(_flaky_function, 'I really do try')
    except RetryableError as e:
        logger.error(f"Retryable: {e}, {e.retry_after}")
Example #19
0
File: env.py Project: rbnis/Mailu
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """

    # this callback is used to prevent an auto-migration from being generated
    # when there are no changes to the schema
    # reference: http://alembic.readthedocs.org/en/latest/cookbook.html
    def process_revision_directives(context, revision, directives):
        if getattr(config.cmd_opts, 'autogenerate', False):
            script = directives[0]
            if script.upgrade_ops.is_empty():
                directives[:] = []
                logger.info('No changes in schema detected.')

    engine = engine_from_config(config.get_section(config.config_ini_section),
                                prefix='sqlalchemy.',
                                poolclass=pool.NullPool)

    @tenacity.retry(
        stop=tenacity.stop_after_attempt(100),
        wait=tenacity.wait_random(min=2, max=5),
        before=tenacity.before_log(logging.getLogger('tenacity.retry'),
                                   logging.DEBUG),
        before_sleep=tenacity.before_sleep_log(
            logging.getLogger('tenacity.retry'), logging.INFO),
        after=tenacity.after_log(logging.getLogger('tenacity.retry'),
                                 logging.DEBUG))
    def try_connect(db):
        return db.connect()

    with try_connect(engine) as connection:

        context.configure(
            connection=connection,
            target_metadata=target_metadata,
            process_revision_directives=process_revision_directives,
            **current_app.extensions['migrate'].configure_args)

        with context.begin_transaction():
            context.run_migrations()

    connection.close()
Example #20
0
class MarketDataClient(object):
    logger = logging.getLogger(__name__)

    base_url = 'http://market-data:8000'

    def _make_request(self, url):
        response = requests.get(f"{self.base_url}/{url}",
                                headers={'content-type': 'application/json'})
        return response.json()

    @retry(stop=stop_after_attempt(3),
           before=before_log(logger, logging.DEBUG))
    def all_prices(self):
        return self._make_request("prices")

    def price(self, code):
        return self._make_request(f"prices/{code}")
Example #21
0
    async def run_with_retry(func: t.Callable[..., t.Awaitable[T]],
                             tries: t.Optional[int] = 2,
                             pause: t.Optional[int] = 15,
                             retry_exc: t.Union[
                                 t.Type[Exception],
                                 t.Sequence[t.Type[Exception]]] = Exception,
                             *args: t.Any,
                             **kwargs: t.Any) -> T:

        return await tnc.AsyncRetrying(  # type: ignore
            wait=tnc.wait_fixed(pause),
            stop=tnc.stop_after_attempt(tries),
            retry=tnc.retry_if_exception_type(retry_exc),
            reraise=True,
            before=tnc.before_log(logger, logging.DEBUG),
            after=tnc.after_log(logger, logging.DEBUG),
            retry_error_cls=tnc.RetryError,
        ).call(func, *args, **kwargs)
    def wait(self, timeout=60, wait_exp_multiplier=0.05, wait_exp_max=1.0):
        """
        Wait until task is completed. Expires after 'timeout' seconds.
        """
        try:
            retryer = Retrying(wait=wait_random_exponential(multiplier=wait_exp_multiplier, max=wait_exp_max),
                               stop=stop_after_delay(timeout),
                               retry=retry_if_result(is_pending_status),
                               before=before_log(logger, logging.DEBUG),
                               after=after_log(logger, logging.DEBUG))
            retryer(self._refresh_status)
        except RetryError:
            raise TaskTimeout(self.data())

        if is_error_status(self['status']):
            raise TaskError(self.data())

        return self
Example #23
0
def safe_json_request(method, url, **kwargs):
    """Convenience function for calling external APIs to simplify error handling.

    :param method: HTTP methond (GET, POST, PUT, etc.)
    :param url: Request URL.
    :param kwargs: Additional parameters. See requests.request for details.
    :return: tuple of status_code and json body as a python dict
    """
    from tenacity import retry, stop_after_attempt, before_log
    from requests import HTTPError, ConnectionError
    from logging import getLogger, WARNING
    import json
    logger = getLogger()
    status_code = None
    js = dict()

    @retry(stop=stop_after_attempt(3),
           reraise=True,
           before=before_log(logger=logger, log_level=WARNING))
    def make_request():
        import requests

        from requests.exceptions import HTTPError
        r = requests.request(method=method, url=url, **kwargs)
        if r.status_code >= 500:
            raise HTTPError(
                json.dumps(
                    dict(status_code=r.status_code,
                         response=format_response_body(response=r))))
        return r

    try:
        response = make_request()
    except ConnectionError:
        pass
    except HTTPError as exc:
        resp = json.loads(exc.args[0])
        status_code = resp['status_code']
        js = resp['response']
    else:
        status_code = response.status_code
        js = format_response_body(response=response)

    return status_code, js
Example #24
0
    def decorator(*args, **kwargs):
        client = args[0]
        max_retries = client._retry_count
        retry_wait = client._retry_wait
        if max_retries:

            @retry(
                stop=stop_after_attempt(max_retries),
                wait=wait_fixed(retry_wait),
                before=before_log(logging, logging.INFO),
                retry=retry_if_exception_type(EnsekError),
                reraise=True,
            )
            @wraps(func)
            def wrap():
                return func(*args, **kwargs)

            return wrap()
        else:
            return func(*args, **kwargs)
Example #25
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """

    # this callback is used to prevent an auto-migration from being generated
    # when there are no changes to the schema
    # reference: http://alembic.readthedocs.org/en/latest/cookbook.html
    def process_revision_directives(context, revision, directives):
        if getattr(config.cmd_opts, 'autogenerate', False):
            script = directives[0]
            if script.upgrade_ops.is_empty():
                directives[:] = []
                logger.info('No changes in schema detected.')

    engine = engine_from_config(config.get_section(config.config_ini_section),
                                prefix='sqlalchemy.',
                                poolclass=pool.NullPool)

    connection = tenacity.Retrying(
        stop=tenacity.stop_after_attempt(100),
        wait=tenacity.wait_random(min=2, max=5),
        before=tenacity.before_log(logging.getLogger("tenacity.retry"), logging.DEBUG),
        before_sleep=tenacity.before_sleep_log(logging.getLogger("tenacity.retry"), logging.INFO),
        after=tenacity.after_log(logging.getLogger("tenacity.retry"), logging.DEBUG)
        ).call(engine.connect)

    context.configure(connection=connection,
                      target_metadata=target_metadata,
                      process_revision_directives=process_revision_directives,
                      **current_app.extensions['migrate'].configure_args)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
Example #26
0
    def run_forever(self, min=1, max=300):
        """Repeatedly connect to the server and monitor for unseen mail.

        Exponentially backoff on retries unless the connection was
        aborted by the server. In that case, the backoff is reset to the
        minimum value.

        :param min: minimum backoff in seconds
        :type min: int
        :param max: maxiumum backoff in seconds
        :type max: int
        """

        try:
            logger = logging.getLogger()
            backoff = tenacity.Retrying(
                retry=tenacity.retry_unless_exception_type(
                    imaplib.IMAP4.abort),
                wait=tenacity.wait_exponential(min=min, max=max),
                before=tenacity.before_log(logger, logging.DEBUG))
            backoff(self.run)
        except:
            logging.exception("session aborted")
            raise
Example #27
0
def deployed_simcore_stack(make_up_prod: Dict,
                           docker_client: DockerClient) -> List[Service]:
    # NOTE: the goal here is NOT to test time-to-deplopy but
    # rather guaranteing that the framework is fully deployed before starting
    # tests. Obviously in a critical state in which the frameworks has a problem
    # the fixture will fail
    STACK_NAME = 'simcore'
    assert STACK_NAME in make_up_prod

    @retry(wait=wait_fixed(MAX_TIME_TO_DEPLOY_SECS),
           stop=stop_after_attempt(5),
           before=before_log(logger, logging.WARNING))
    def ensure_deployed():
        for service in docker_client.services.list():
            for task in service.tasks():
                assert task['Status']['State'] == task['DesiredState'], \
                    f'{service.name} still not ready: {pformat(task)}'

    try:
        ensure_deployed()
    finally:
        # logs table like
        #  ID                  NAME                  IMAGE                                      NODE                DESIRED STATE       CURRENT STATE                ERROR
        # xbrhmaygtb76        simcore_sidecar.1     itisfoundation/sidecar:latest              crespo-wkstn        Running             Running 53 seconds ago
        # zde7p8qdwk4j        simcore_rabbit.1      itisfoundation/rabbitmq:3.8.0-management   crespo-wkstn        Running             Running 59 seconds ago
        # f2gxmhwq7hhk        simcore_postgres.1    postgres:10.10                             crespo-wkstn        Running             Running about a minute ago
        # 1lh2hulxmc4q        simcore_director.1    itisfoundation/director:latest             crespo-wkstn        Running             Running 34 seconds ago
        # ...
        subprocess.run(f"docker stack ps {STACK_NAME}",
                       shell=True,
                       check=False)

    return [
        service for service in docker_client.services.list()
        if service.name.startswith(f"{STACK_NAME}_")
    ]
Example #28
0
    def loop_client_recv(self):
        # The built-in client loop_forever seems busted (it doesn't retry
        # under all exceptions, so just do it ourselves...); arg...

        def on_connect(client, userdata, flags, rc):
            if rc == mqtt.MQTT_ERR_SUCCESS:
                self.log.info("MQTT connected to %s:%s over %s",
                              self.config['firehose_host'],
                              self.config['firehose_port'],
                              self.config['firehose_transport'])
                client.subscribe('#')
            else:
                self.log.error(
                    "MQTT not connected to %s:%s over %s, rc=%s",
                    self.config['firehose_host'],
                    self.config['firehose_port'],
                    self.config['firehose_transport'], rc)

        def on_message(client, userdata, msg):
            if not msg.topic or not msg.payload:
                return
            self.log.info(("Dispatching message on topic=%s"
                           " with payload=%s"), msg.topic, msg.payload)
            try:
                payload = msg.payload
                if isinstance(payload, six.binary_type):
                    payload = payload.decode("utf8")
                details = {'event': json.loads(payload)}
            except (UnicodeError, ValueError):
                self.log.exception(
                    "Received corrupted/invalid payload: %s", msg.payload)
            else:
                self.work_queue.put(details)

        @tenacity.retry(
            wait=tenacity.wait_exponential(multiplier=1, max=30),
            before=tenacity.before_log(self.log, logging.INFO))
        def loop_forever_until_dead():
            if self.dying:
                return
            client = mqtt.Client(transport=self.config['firehose_transport'])
            client.on_connect = on_connect
            client.on_message = on_message
            try:
                client.connect(self.config['firehose_host'],
                               port=int(self.config['firehose_port']))
                max_timeout = 1
                while not self.dying:
                    rc = mqtt.MQTT_ERR_SUCCESS
                    start = time.time()
                    elapsed = 0
                    while rc == mqtt.MQTT_ERR_SUCCESS and (elapsed < max_timeout):
                        rc = client.loop(timeout=max(0, max_timeout - elapsed))
                        elapsed = time.time() - start
                    if not self.dying:
                        time.sleep(0.1)
            except Exception:
                self.log.exception("Failed mqtt client usage, retrying")
                raise

        loop_forever_until_dead()
Example #29
0
class SftpUploaderOperation(Operation):

    NAME = "SFTP Uploader"

    def __init__(self, *args, **kwargs):
        Operation.__init__(self, *args, **kwargs)
        self._grid = Gtk.Grid(
            border_width=5,
            row_spacing=5,
            column_spacing=5,
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        self.add(self._grid)

        # we need boxes for at least
        # hostname
        # port
        # username
        # password
        # auto add new host keys
        # remote directory
        # create remote directory if necessary

        # Hostname
        tempgrid = Gtk.Grid(
            row_spacing=5,
            column_spacing=5,
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        self._grid.attach(tempgrid, 0, 0, 1, 1)
        tempgrid.attach(
            Gtk.Label(
                label="Hostname",
                halign=Gtk.Align.START,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            0,
            0,
            1,
            1,
        )
        widget = self.register_widget(
            Gtk.Entry(
                halign=Gtk.Align.FILL,
                valign=Gtk.Align.CENTER,
                hexpand=True,
                vexpand=False,
            ),
            "hostname",
        )
        tempgrid.attach(widget, 1, 0, 1, 1)

        # Port
        tempgrid.attach(
            Gtk.Label(
                label="Port",
                halign=Gtk.Align.CENTER,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            2,
            0,
            1,
            1,
        )
        widget = self.register_widget(
            Gtk.SpinButton(
                adjustment=Gtk.Adjustment(
                    lower=1, upper=10000, value=5, page_size=0, step_increment=1
                ),
                value=22,
                update_policy=Gtk.SpinButtonUpdatePolicy.IF_VALID,
                numeric=True,
                climb_rate=5,
                halign=Gtk.Align.CENTER,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            "port",
        )
        tempgrid.attach(widget, 3, 0, 1, 1)

        # Username
        tempgrid = Gtk.Grid(
            row_spacing=5,
            column_spacing=5,
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        self._grid.attach(tempgrid, 0, 1, 1, 1)
        tempgrid.attach(
            Gtk.Label(
                label="Username",
                halign=Gtk.Align.CENTER,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            0,
            0,
            1,
            1,
        )
        widget = self.register_widget(
            Gtk.Entry(
                halign=Gtk.Align.FILL,
                valign=Gtk.Align.CENTER,
                hexpand=True,
                vexpand=False,
            ),
            "username",
        )
        tempgrid.attach(widget, 1, 0, 1, 1)

        # Password
        tempgrid.attach(
            Gtk.Label(
                label="Password",
                halign=Gtk.Align.CENTER,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            2,
            0,
            1,
            1,
        )
        widget = self.register_widget(
            Gtk.Entry(
                visibility=False,
                halign=Gtk.Align.FILL,
                valign=Gtk.Align.CENTER,
                hexpand=True,
                vexpand=False,
            ),
            "password",
            exportable=False,
        )
        tempgrid.attach(widget, 3, 0, 1, 1)

        # Remote directory
        tempgrid = Gtk.Grid(
            row_spacing=5,
            column_spacing=5,
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        self._grid.attach(tempgrid, 0, 2, 1, 1)
        tempgrid.attach(
            Gtk.Label(
                label="Destination folder",
                halign=Gtk.Align.CENTER,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            0,
            0,
            1,
            1,
        )
        widget = self.register_widget(
            Gtk.Entry(
                halign=Gtk.Align.FILL,
                valign=Gtk.Align.CENTER,
                hexpand=True,
                vexpand=False,
            ),
            "destination",
        )
        tempgrid.attach(widget, 1, 0, 1, 1)

        # Create directory
        widget = self.register_widget(
            Gtk.CheckButton(
                active=True,
                label="Create destination folder if necessary",
                halign=Gtk.Align.END,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            "force_folder_creation",
        )
        tempgrid.attach(widget, 2, 0, 1, 1)

        # Automatically accept new host keys
        tempgrid = Gtk.Grid(
            row_spacing=5,
            column_spacing=5,
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        self._grid.attach(tempgrid, 0, 3, 1, 1)
        widget = self.register_widget(
            Gtk.CheckButton(
                active=False,
                label="Automatically accept new host keys (dangerous!!)",
                halign=Gtk.Align.END,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            "auto_add_keys",
        )
        tempgrid.attach(widget, 0, 0, 1, 1)

        # Advanced options expander
        advanced_options = Gtk.Expander(
            label="Advanced Options",
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        self._grid.attach(advanced_options, 0, 4, 1, 1)

        advanced_options_grid = Gtk.Grid(
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
            row_spacing=5,
            column_spacing=5,
        )
        advanced_options.add(advanced_options_grid)

        advanced_options_grid.attach(
            self._get_chmod_grid("file", "644"), 0, 0, 1, 1
        )
        advanced_options_grid.attach(
            self._get_chmod_grid("directory", "755"), 1, 0, 1, 1
        )

    def _get_chmod_grid(self, kind: str, default_octal: str):
        permissions = (
            (stat.S_IRUSR, stat.S_IWUSR, stat.S_IXUSR),
            (stat.S_IRGRP, stat.S_IWGRP, stat.S_IXGRP),
            (stat.S_IROTH, stat.S_IWOTH, stat.S_IXOTH),
        )

        default_octal_as_int = int(default_octal, base=8)

        chmod_grid = Gtk.Grid(
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
            row_spacing=5,
            column_spacing=5,
        )

        title_grid = Gtk.Grid(
            halign=Gtk.Align.FILL,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
            row_spacing=5,
            column_spacing=5,
        )
        chmod_grid.attach(title_grid, 0, 0, 4, 1)
        chmod_grid._checkbutton = self.register_widget(
            Gtk.CheckButton(
                label=f"<b>Override {kind} UNIX permissions</b>",
                halign=Gtk.Align.START,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            f"{kind}_chmod_enabled",
        )
        chmod_grid._checkbutton.get_child().props.use_markup = True
        title_grid.attach(chmod_grid._checkbutton, 0, 0, 1, 1)

        chmod_grid._entry = self.register_widget(
            Gtk.Entry(
                editable=False,
                text=default_octal,
                halign=Gtk.Align.START,
                valign=Gtk.Align.CENTER,
                hexpand=False,
                vexpand=False,
            ),
            f"{kind}_chmod_octal",
        )
        chmod_grid._entry_handler_id = chmod_grid._entry.connect(
            "changed", self._chmod_grid_entry_changed_cb, chmod_grid
        )
        title_grid.attach(chmod_grid._entry, 1, 0, 1, 1)

        label = Gtk.Label(
            label="<b>Read</b>",
            use_markup=True,
            halign=Gtk.Align.CENTER,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        chmod_grid.attach(label, 1, 1, 1, 1)
        label = Gtk.Label(
            label="<b>Write</b>",
            use_markup=True,
            halign=Gtk.Align.CENTER,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        chmod_grid.attach(label, 2, 1, 1, 1)
        label = Gtk.Label(
            label="<b>Execute</b>",
            use_markup=True,
            halign=Gtk.Align.CENTER,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        chmod_grid.attach(label, 3, 1, 1, 1)

        label = Gtk.Label(
            label="<b>User</b>",
            use_markup=True,
            halign=Gtk.Align.CENTER,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        chmod_grid.attach(label, 0, 2, 1, 1)
        label = Gtk.Label(
            label="<b>Group</b>",
            use_markup=True,
            halign=Gtk.Align.CENTER,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        chmod_grid.attach(label, 0, 3, 1, 1)
        label = Gtk.Label(
            label="<b>Others</b>",
            use_markup=True,
            halign=Gtk.Align.CENTER,
            valign=Gtk.Align.CENTER,
            hexpand=True,
            vexpand=False,
        )
        chmod_grid.attach(label, 0, 4, 1, 1)

        for i in range(3):
            for j in range(3):
                check_button = Gtk.CheckButton(
                    halign=Gtk.Align.CENTER,
                    valign=Gtk.Align.CENTER,
                    hexpand=True,
                    vexpand=False,
                )
                chmod_grid.attach(check_button, j + 1, i + 2, 1, 1)
                check_button._permission = permissions[i][j]
                check_button._handler_id = check_button.connect(
                    "clicked",
                    self._chmod_grid_check_button_clicked_cb,
                    chmod_grid,
                )
                if check_button._permission & default_octal_as_int:
                    with check_button.handler_block(check_button._handler_id):
                        check_button.props.active = True

        return chmod_grid

    def _chmod_grid_check_button_clicked_cb(self, button, chmod_grid):
        # get entry value as int
        octal = int(chmod_grid._entry.props.text.strip(), base=8)

        if button.props.active:
            octal |= button._permission
        else:
            octal &= ~button._permission

        # update entry
        with chmod_grid._entry.handler_block(chmod_grid._entry_handler_id):
            chmod_grid._entry.props.text = f"{octal:03o}"

    def _chmod_grid_entry_changed_cb(self, entry, chmod_grid):
        # this method will only be called when loading from yaml
        # get entry value as int
        octal = int(chmod_grid._entry.props.text.strip(), base=8)

        for child in chmod_grid:
            if hasattr(child, "_permission"):
                # block the clicked handler to avoid signal loop
                with child.handler_block(child._handler_id):
                    child.props.active = octal & child._permission

    @classmethod
    def _preflight_check(cls, params: Munch):
        # try connecting to server and copy a simple file
        with paramiko.Transport(
            (params.hostname, int(params.port))
        ) as transport:

            transport.connect(
                username=params.username,
                password=params.password,
            )
            folder_created = False
            with paramiko.SFTPClient.from_transport(transport) as sftp_client:
                try:
                    sftp_client.chdir(params.destination)
                except IOError:
                    if params.force_folder_creation:
                        makedirs(sftp_client, params.destination)
                        if params.directory_chmod_enabled:
                            sftp_client.chmod(
                                params.destination,
                                int(params.directory_chmod_octal, base=8),
                            )
                        folder_created = True
                    else:
                        raise
                # cd back to home folder
                sftp_client.chdir()

                # try copying a file
                with tempfile.NamedTemporaryFile(delete=False) as f:
                    f.write(os.urandom(1024))  # 1 kB
                    tmpfile = f.name
                file_destination = (
                    params.destination + "/" + os.path.basename(tmpfile)
                )
                try:
                    sftp_client.put(tmpfile, file_destination)
                except:
                    if folder_created:
                        sftp_client.rmdir(params.destination)
                    raise
                else:
                    try:
                        if params.file_chmod_enabled:
                            sftp_client.chmod(
                                file_destination,
                                int(params.file_chmod_octal, base=8),
                            )
                    except:
                        # cleanup
                        sftp_client.remove(file_destination)
                        if folder_created:
                            sftp_client.rmdir(params.destination)
                        raise
                    else:
                        # if successful, remove it
                        sftp_client.remove(file_destination)
                finally:
                    os.unlink(tmpfile)

    def preflight_check(self):
        self._processed_dirs: List[str] = []
        self._processed_dirs_lock: RLock = RLock()
        self._preflight_check(self.params)

    @classmethod
    def _attach_metadata(
        cls,
        file: File,
        remote_filename_full: str,
        params: Munch,
        operation_index: int,
    ):
        file.operation_metadata[operation_index] = {
            "sftp url": f"sftp://{params.username}@{params.hostname}:{int(params.port)}{remote_filename_full}"
        }

    @classmethod
    @retry(
        retry=monitor_retry_condition(),
        reraise=True,
        stop=stop_after_attempt(5),
        wait=wait_exponential(),
        before=before_log(logger, logging.DEBUG),
        after=after_log(logger, logging.DEBUG),
        before_sleep=before_sleep_log(logger, logging.DEBUG),
    )
    def _run(
        cls,
        file: File,
        params: Munch,
        operation_index: int,
        processed_dirs: List[str],
        processed_dirs_lock: RLock,
    ):
        try:
            with paramiko.Transport(
                (params.hostname, int(params.port))
            ) as transport:

                transport.connect(
                    username=params.username,
                    password=params.password,
                )

                with paramiko.SFTPClient.from_transport(
                    transport
                ) as sftp_client:
                    sftp_client.chdir(params.destination)
                    rel_filename = str(
                        PurePosixPath(*file.relative_filename.parts)
                    )
                    dirname = posixpath.dirname(rel_filename)
                    with processed_dirs_lock:
                        if dirname not in processed_dirs:
                            if not isdir(sftp_client, dirname):
                                makedirs(sftp_client, dirname)

                                if params.directory_chmod_enabled:
                                    sftp_client.chmod(
                                        dirname,
                                        int(
                                            params.directory_chmod_octal, base=8
                                        ),
                                    )

                            processed_dirs.append(dirname)

                    # check if file already exists
                    # Note: ideally this would be done by calculating the checksum
                    # on the server, but very few SSH server implementations support
                    # this protocol extension, even though Paramiko does:
                    # http://docs.paramiko.org/en/stable/api/sftp.html#paramiko.sftp_file.SFTPFile.check
                    try:
                        remote_stat = sftp_client.stat(rel_filename)
                    except IOError:
                        pass
                    else:
                        # file exists -> compare with local stat
                        local_stat = Path(file.filename).stat()
                        # if local file is more recent or size differs -> upload again!
                        if (
                            local_stat.st_size == remote_stat.st_size
                            and local_stat.st_mtime <= remote_stat.st_mtime
                        ):
                            # add object URL to metadata
                            remote_filename_full = sftp_client.normalize(
                                rel_filename
                            )
                            cls._attach_metadata(
                                file,
                                remote_filename_full,
                                params,
                                operation_index,
                            )
                            raise SkippedOperation(
                                "File has been uploaded already"
                            )

                    # upload the file to the remote server
                    sftp_client.put(
                        file.filename,
                        rel_filename,
                        callback=SftpProgressPercentage(file, operation_index),
                    )
                    if params.file_chmod_enabled:
                        sftp_client.chmod(
                            rel_filename, int(params.file_chmod_octal, base=8)
                        )
                    remote_filename_full = sftp_client.normalize(rel_filename)
        except SkippedOperation:
            raise
        except Exception as e:
            logger.exception(f"SftpUploaderOperation.run exception")
            return str(e)
        else:
            # add object URL to metadata
            cls._attach_metadata(
                file, remote_filename_full, params, operation_index
            )
        return None

    @add_directory_support
    def run(self, file: File):
        return self._run(
            file,
            self.params,
            self.index,
            self._processed_dirs,
            self._processed_dirs_lock,
        )
Example #30
0
import logging

from legal.db.session import SessionLocal
from tenacity import after_log, before_log, retry, stop_after_attempt, wait_fixed

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

max_tries = 60 * 5  # 5 minutes
wait_seconds = 15


@retry(
    stop=stop_after_attempt(max_tries),
    wait=wait_fixed(wait_seconds),
    before=before_log(logger, logging.INFO),
    after=after_log(logger, logging.WARN),
)
def init() -> None:
    try:
        db = SessionLocal()
        # Try to create session to check if DB is awake
        db.execute("SELECT 1")
    except Exception as e:
        logger.error(e)
        raise e


def main() -> None:
    logger.info("Initializing service")
    init()
Example #31
0

broadcast = Broadcast(os.environ['BROADCAST_URI'])
cache = CACHE()

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

max_tries = 60 * 1  # 1 minutes
wait_seconds = 1


@retry(
    stop=stop_after_attempt(max_tries),
    wait=wait_fixed(wait_seconds),
    before=before_log(logger, logging.DEBUG),
    after=after_log(logger, logging.WARN),
)
async def get_connection() -> None:
    try:
        return await aiomysql.connect(user=os.environ["DB_USERNAME"],
                                      db=os.environ["DB_NAME"],
                                      host=os.environ["DB_HOST"],
                                      port=int(os.environ["DB_PORT"], 0),
                                      password=os.environ["DB_PASSWORD"],
                                      loop=loop)
    except Exception as e:
        logger.error(e)
        if sentry_enabled: sentry_sdk.capture_exception(e)
        raise e
Example #32
0
class GitHubClient:
    """HTTP REST client for GitHub's API v3 see https://docs.github.com/rest for
    more information."""
    def __init__(
        self,
        api_key: Optional[str] = os.getenv("GH_TOKEN"),
        base_url: str = "https://api.github.com",
    ) -> None:
        """Constructor of GitHubClient."""

        self._base_url = base_url
        self._repos_search_url = f"{base_url}/search/repositories?q="
        self._api_key = api_key

    @property
    def _headers(self) -> dict:
        """HTTP headers used when making requests upstream."""

        headers = {
            "Content-Type": "application/json",
        }
        if self._api_key:
            headers["Authorization"] = f"token {self._api_key}"
        return headers

    @cached(cache=TTLCache(maxsize=10_000_000, ttl=3600),
            key=cachekey)  # type: ignore
    def _search_repos_cached_request(
        self,
        url: str,
        timeout=10,
    ) -> Dict[str, Any]:
        """Cached search request up to ttl secs."""
        logger.info(f"It'll search with {url}")
        r = httpx.get(
            url,
            headers=self._headers,
            timeout=timeout,
        )
        r.raise_for_status()
        response_dict = r.json()
        logger.debug(
            f"search_repos, status_code: {r.status_code}, r: {response_dict}")
        return response_dict

    @retry(
        stop=stop_after_attempt(5),
        wait=wait_random(min=0, max=1),
        before=before_log(logger, logging.INFO),
        before_sleep=my_before_search_sleep,
        after=after_log(logger, logging.WARN),
        retry=retry_if_exception_type(
            (HTTPStatusError, TimeoutException, ConnectionError)),
    )
    def search_repos(
        self,
        search_options: SearchOptions,
        page=1,
        per_page=1000,
        timeout=10,
    ) -> PaginatedRepoModel:
        """Search repos, the recursion should be done at the caller site."""

        search_model = GHReposSearchModel(search_options=search_options,
                                          page=page,
                                          per_page=per_page).to_query()
        url = f"{self._repos_search_url}{search_model}"
        response_dict = self._search_repos_cached_request(url=url,
                                                          timeout=timeout)
        gh_response = GHReposRespModel.from_github(response_dict)

        if gh_response.incomplete_results:
            exc_msg = f"incomplete_results for {search_options}"
            raise TimeoutException(exc_msg, request=None)  # type: ignore
        return PaginatedRepoModel(
            pagination=dict(total_count=gh_response.total_count,
                            page=page,
                            per_page=per_page),
            items=gh_response.items,
        )
Example #33
0
        """Exports the info to string"""
        console = AirflowConsole(record=True)
        with console.capture():
            self.show(output=output, console=console)
        return console.export_text()


class FileIoException(Exception):
    """Raises when error happens in FileIo.io integration"""


@tenacity.retry(
    stop=tenacity.stop_after_attempt(5),
    wait=tenacity.wait_exponential(multiplier=1, max=10),
    retry=tenacity.retry_if_exception_type(FileIoException),
    before=tenacity.before_log(log, logging.DEBUG),
    after=tenacity.after_log(log, logging.DEBUG),
)
def _upload_text_to_fileio(content):
    """Upload text file to File.io service and return lnk"""
    resp = httpx.post("https://file.io", content=content)
    if resp.status_code not in [200, 201]:
        print(resp.json())
        raise FileIoException("Failed to send report to file.io service.")
    try:
        return resp.json()["link"]
    except ValueError as e:
        log.debug(e)
        raise FileIoException("Failed to send report to file.io service.")

Example #34
0
# NOTE: saved as a separate item to config
STATIC_WEBSERVER_SETTINGS_KEY = f"{__name__}.StaticWebserverModuleSettings"

#
# This retry policy aims to overcome the inconvenient fact that the swarm
# orchestrator does not guaranteed the order in which services are started.
#
# Here the web-server needs to pull some files from the web-static service
# which might still not be ready.
#
#
RETRY_ON_STARTUP_POLICY = dict(
    stop=stop_after_attempt(5),
    wait=wait_fixed(1.5),
    before=before_log(log, logging.WARNING),
    retry=retry_if_exception_type(ClientConnectionError),
    reraise=True,
)


def assemble_settings(app: web.Application) -> StaticWebserverModuleSettings:
    """creates stores and returns settings for this module"""
    settings = StaticWebserverModuleSettings()
    app[STATIC_WEBSERVER_SETTINGS_KEY] = settings
    return settings


def get_settings(app: web.Application) -> StaticWebserverModuleSettings:
    return app[STATIC_WEBSERVER_SETTINGS_KEY]