Exemplo n.º 1
0
 async def _part_uploader(
     self,
     upload_id: str,
     object_name: str,
     parts_queue: asyncio.Queue,
     results_queue: deque,
     part_upload_tries: int,
     **kwargs,
 ):
     backoff = asyncbackoff(
         None,
         None,
         max_tries=part_upload_tries,
         exceptions=(ClientError, ),
     )
     while True:
         msg = await parts_queue.get()
         if msg is DONE:
             break
         part_no, part_hash, part = msg
         etag = await backoff(self._put_part)(  # type: ignore
             upload_id=upload_id,
             object_name=object_name,
             part_no=part_no,
             data=part,
             content_sha256=part_hash,
             **kwargs,
         )
         log.debug(
             "Etag for part %d of %s is %s",
             part_no,
             upload_id,
             etag,
         )
         results_queue.append((part_no, etag))
Exemplo n.º 2
0
    def __init__(self,
                 send_timeout: Optional[Union[int, float]] = 1.0,
                 retry_backoff: Union[int, float] = 1.0,
                 max_tries: int = 3,
                 all_tries_timeout: Optional[Union[int, float]] = None,
                 retry_exceptions: Tuple[Exception] = (Exception, )):
        """Retry configuration for Sink connection. Configuring
        this retry it will be resilient when sending data to Sink.

        :param send_timeout: All timeouts when send in seconds,
            defaults to 1.0
        :type send_timeout: Optional[Union[int, float]], optional
        :param retry_backoff: Time to wait to another try in seconds,
            defaults to 1.0
        :type retry_backoff: Union[int, float], optional
        :param max_tries: Maximum number of tries, defaults to 3
        :type max_tries: int, optional
        :param all_tries_timeout: Total timeout from all retries in seconds,
            defaults to None
        :type all_tries_timeout: Optional[Union[int, float]], optional
        :param retry_exceptions: Exceptions that we should take in account
            to retry, defaults to (Exception,)
        :type retry_exceptions: Tuple[Exception], optional
        """
        self._decorator = asyncbackoff(attempt_timeout=send_timeout,
                                       deadline=all_tries_timeout,
                                       pause=retry_backoff,
                                       max_tries=max_tries,
                                       exceptions=retry_exceptions)
Exemplo n.º 3
0
def test_values(loop):
    with pytest.raises(ValueError):
        aiomisc.asyncbackoff(-1, 1)

    with pytest.raises(ValueError):
        aiomisc.asyncbackoff(0, -1)

    with pytest.raises(ValueError):
        aiomisc.asyncbackoff(0, 0, -0.1)

    with pytest.raises(TypeError):
        aiomisc.asyncbackoff(0, 0)(lambda x: None)
Exemplo n.º 4
0
 async def _download_worker(
     self,
     object_name: str,
     writer: t.Callable[[bytes, int, int], t.Coroutine],
     *,
     etag: str,
     range_step: int,
     range_start: int,
     range_end: int,
     buffer_size: int,
     range_get_tries: int = 3,
     headers: HeadersType = None,
     **kwargs,
 ):
     """
     Downloads data in range `[range_start, range_end)`
     with step `range_step` to file `file_path`.
     Uses `etag` to make sure that file wasn't changed in the process.
     """
     log.debug(
         "Starting download worker for range [%d:%d]",
         range_start,
         range_end,
     )
     backoff = asyncbackoff(
         None,
         None,
         max_tries=range_get_tries,
         exceptions=(ClientError, ),
     )
     req_range_end = range_start
     for req_range_start in range(range_start, range_end, range_step):
         req_range_end += range_step
         if req_range_end > range_end:
             req_range_end = range_end
         await backoff(self._download_range)(  # type: ignore
             object_name,  # type: ignore
             writer,
             etag=etag,
             pos=(req_range_start - range_start),
             range_start=range_start,
             req_range_start=req_range_start,
             req_range_end=req_range_end - 1,
             buffer_size=buffer_size,
             headers=headers,
             **kwargs,
         )