示例#1
0
 def _beat_forever_until_stopped(self):
     """Inner beating loop."""
     retry = tenacity.Retrying(
         wait=tenacity.wait_fixed(1),
         before_sleep=tenacity.before_sleep_log(LOG, logging.warning),
     )
     while not self._dead.is_set():
         with timeutils.StopWatch() as w:
             wait_until_next_beat = retry(self._driver.heartbeat)
         ran_for = w.elapsed()
         has_to_sleep_for = wait_until_next_beat - ran_for
         if has_to_sleep_for < 0:
             LOG.warning(
                 "Heartbeating took too long to execute (it ran for"
                 " %0.2f seconds which is %0.2f seconds longer than"
                 " the next heartbeat idle time). This may cause"
                 " timeouts (in locks, leadership, ...) to"
                 " happen (which will not end well).", ran_for,
                 ran_for - wait_until_next_beat)
         self._beats += 1
         # NOTE(harlowja): use the event object for waiting and
         # not a sleep function since doing that will allow this code
         # to terminate early if stopped via the stop() method vs
         # having to wait until the sleep function returns.
         # NOTE(jd): Wait for only the half time of what we should.
         # This is a measure of safety, better be too soon than too late.
         self._dead.wait(has_to_sleep_for / 2.0)
示例#2
0
class ShoutboxClient:
    def __init__(self, settings: ShoutboxSettings):
        self._settings = settings
        self._prewritten = re.compile(
            rf"({')+|('.join(settings.filter_phrases)})+", flags=re.I)

    def _detect_prewritten(self, text: str) -> bool:
        return bool(self._prewritten.search(text))

    @cached_property
    def enabled(self) -> bool:
        return self._settings.url is not None

    @tenacity.retry(
        reraise=True,
        retry=tenacity.retry_if_exception_type(requests.RequestException),
        stop=tenacity.stop_after_attempt(3),
        before_sleep=tenacity.before_sleep_log(logger, logger.level),
        after=tenacity.after_log(logger, logger.level),
    )
    def make_request(self, data: ShoutboxRequest) -> ShoutboxResponse:
        if self._settings.url is None:
            raise RuntimeError("Shoutbox is disabled, so should not be here!")
        response = requests.post(
            self._settings.url.human_repr(),
            json=data.dict(),
            timeout=self._settings.read_timeout,
        )
        response.raise_for_status()
        model = ShoutboxResponse.parse_obj(response.json())

        if self._detect_prewritten(model.text):
            raise ShoutboxPrewrittenDetectedError(
                f"Detected shoutbox prewritten: '{model.text}'", )
        return model
示例#3
0
class Robot(models.Model):
    id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
    label = models.CharField("Describing label for the robot",
                             max_length=255,
                             blank=True)
    device = models.CharField(max_length=255, unique=True)
    online = models.BooleanField(default=False)

    @classmethod
    @transaction.atomic
    @retry(retry=retry_if_exception_type(RequestException),
           reraise=True,
           stop=stop_after_attempt(5),
           wait=wait_fixed(60),
           before_sleep=before_sleep_log(logger, logging.DEBUG))
    def create_from_remote_copy(cls, host, session, object_id):
        remote_obj_url = urljoin(host,
                                 reverse('robot-detail', args=(object_id, )))
        r = session.get(remote_obj_url, timeout=60)
        r.raise_for_status()

        data = r.json()
        robot, _ = Robot.objects.update_or_create(
            pk=data.pop('id'),
            defaults=data,
        )
        return robot

    def __str__(self):
        return self.label
示例#4
0
def run_compute_singles(run, site, processed_output_path, database):
    """Compute the singles (underlying uncorrelated) rate."""
    path_prefix = os.path.join(processed_output_path, f'EH{site}')
    ads = common.dets_for(site, run)
    update_db = True
    iteration = 0
    extra_cut = '1'
    for ad in ads:
        infile = os.path.join(
            path_prefix,
            f'hadded_ad{ad}/out_ad{ad}_{run}.root',
        )
        # Ideally should check to see if rate has been computed before.
        # But, this is so fast that I will just re-compute every time.
        for attempt in tenacity.Retrying(
                reraise=True,
                wait=tenacity.wait_random_exponential(max=60),
                retry=tenacity.retry_if_exception_type(sqlite3.Error),
                before_sleep=tenacity.before_sleep_log(logging, logging.DEBUG),
        ):
            with attempt:
                compute_singles.main(
                    infile,
                    database,
                    GENERAL_LABEL,
                    update_db,
                    iteration,
                    extra_cut,
                )
    return
class HackerNewsStore:
    @tenacity.retry(stop=tenacity.stop_after_attempt(5),
                    wait=tenacity.wait_random(min=1, max=2),
                    before_sleep=tenacity.before_sleep_log(
                        logger, logging.DEBUG))  # noqa: E501
    def add_article(self, article):
        connection = DatabaseHelper.get_connection()
        with connection:
            cursor = connection.cursor()
            try:
                cursor.execute(self._get_query('add_article'),
                               (article.id, article.link, article.title))
            except psycopg2.IntegrityError as e:
                logger.info(e)

    def purge(self):
        connection = DatabaseHelper.get_connection()
        with connection:
            cursor = connection.cursor()
            lastCreateTime = datetime.date.today() - datetime.timedelta(
                days=10)  # noqa: E501
            cursor.execute(self._get_query('purge_removed_articles'),
                           (lastCreateTime, ))
            logger.info(f"deleted rows:{cursor.rowcount}")

    def _get_query(self, query_id):
        query = None
        if DATABASE_CONFIG['type'] == 'postgre':
            query = PostgreQueries[query_id]
        else:
            raise RuntimeError(
                "The specified db type:{} is not supported".format(
                    DATABASE_CONFIG['type']))  # noqa: E501
        return query
示例#6
0
class TapeSlot(models.Model):
    STATUS_CHOICES = (
        (0, 'Inactive'),
        (20, 'Write'),
        (100, 'FAIL'),
    )

    id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
    slot_id = models.IntegerField()
    medium_id = models.CharField(
        "The id for the medium, e.g. barcode",
        max_length=255,
        unique=True,
        blank=True,
        null=True,
    )
    robot = models.ForeignKey('Robot',
                              models.PROTECT,
                              related_name='tape_slots')
    status = models.IntegerField(choices=STATUS_CHOICES, default=20)

    @classmethod
    @transaction.atomic
    @retry(retry=retry_if_exception_type(RequestException),
           reraise=True,
           stop=stop_after_attempt(5),
           wait=wait_fixed(60),
           before_sleep=before_sleep_log(logger, logging.DEBUG))
    def create_from_remote_copy(cls,
                                host,
                                session,
                                object_id,
                                create_storage_medium=True):
        remote_obj_url = urljoin(
            host, reverse('tapeslot-detail', args=(object_id, )))
        r = session.get(remote_obj_url, timeout=60)
        r.raise_for_status()

        data = r.json()
        data.pop('locked', None)
        data.pop('mounted', None)
        data.pop('status_display', None)

        data['robot'] = Robot.create_from_remote_copy(host, session,
                                                      data['robot'])
        if not create_storage_medium:
            data.pop('storage_medium', None)

        tape_slot, _ = TapeSlot.objects.update_or_create(
            pk=data.pop('id'),
            defaults=data,
        )
        return tape_slot

    class Meta:
        ordering = ('slot_id', )
        unique_together = ('slot_id', 'robot')

    def __str__(self):
        return str(self.slot_id)
示例#7
0
    def __init__(
        self,
        base_url: str,
        default_params: Optional[dict] = None,
        default_headers: Optional[dict] = None,
        timeout: Optional[float] = None,
        ttl_dns_cache: Optional[float] = None,
        max_retries: Optional[int] = 2,
        retry_delay: Optional[float] = 0.5,
        proxy_url: Optional[str] = None,
    ):
        super().__init__()
        if base_url is None:
            raise RuntimeError(
                f'`base_url` must be passed for {self.__class__.__name__} constructor'
            )
        self.base_url = base_url.rstrip('/')
        self.ttl_dns_cache = ttl_dns_cache
        self.proxy_url = proxy_url
        self.session = None
        self.default_params = CIMultiDict(filter_none(default_params or {}))
        self.default_headers = default_headers or {}
        self.timeout = timeout
        self.max_retries = max_retries
        self.retry_delay = retry_delay

        self.request = retry(
            retry=retry_if_exception_type(self.temporary_errors),
            stop=stop_after_attempt(max_retries)
            if max_retries is not None else None,
            wait=wait_fixed(retry_delay),
            before_sleep=before_sleep_log(logging.getLogger('aiobaseclient'),
                                          logging.WARNING),
            reraise=True,
        )(self.request)
示例#8
0
    def sync_to_db(self, session: Optional[Session] = None):
        """Save attributes about list of DAG to the DB."""
        # To avoid circular import - airflow.models.dagbag -> airflow.models.dag -> airflow.models.dagbag
        from airflow.models.dag import DAG
        from airflow.models.serialized_dag import SerializedDagModel

        # Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
        # of any Operational Errors
        # In case of failures, provide_session handles rollback
        for attempt in tenacity.Retrying(
                retry=tenacity.retry_if_exception_type(
                    exception_types=OperationalError),
                wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
                stop=tenacity.stop_after_attempt(settings.MAX_DB_RETRIES),
                before_sleep=tenacity.before_sleep_log(self.log,
                                                       logging.DEBUG),
                reraise=True):
            with attempt:
                self.log.debug(
                    "Running dagbag.sync_to_db with retries. Try %d of %d",
                    attempt.retry_state.attempt_number,
                    settings.MAX_DB_RETRIES)
                self.log.debug("Calling the DAG.bulk_sync_to_db method")
                try:
                    DAG.bulk_write_to_db(self.dags.values(), session=session)

                    # Write Serialized DAGs to DB
                    self.log.debug(
                        "Calling the SerializedDagModel.bulk_sync_to_db method"
                    )
                    SerializedDagModel.bulk_sync_to_db(self.dags.values(),
                                                       session=session)
                except OperationalError:
                    session.rollback()
                    raise
示例#9
0
 def _beat_forever_until_stopped(self):
     """Inner beating loop."""
     retry = tenacity.Retrying(
         wait=tenacity.wait_fixed(1),
         before_sleep=tenacity.before_sleep_log(LOG, logging.WARNING),
     )
     while not self._dead.is_set():
         with timeutils.StopWatch() as w:
             wait_until_next_beat = retry(self._driver.heartbeat)
         ran_for = w.elapsed()
         has_to_sleep_for = wait_until_next_beat - ran_for
         if has_to_sleep_for < 0:
             LOG.warning(
                 "Heartbeating took too long to execute (it ran for"
                 " %0.2f seconds which is %0.2f seconds longer than"
                 " the next heartbeat idle time). This may cause"
                 " timeouts (in locks, leadership, ...) to"
                 " happen (which will not end well).", ran_for,
                 ran_for - wait_until_next_beat)
         self._beats += 1
         # NOTE(harlowja): use the event object for waiting and
         # not a sleep function since doing that will allow this code
         # to terminate early if stopped via the stop() method vs
         # having to wait until the sleep function returns.
         # NOTE(jd): Wait for only the half time of what we should.
         # This is a measure of safety, better be too soon than too late.
         self._dead.wait(has_to_sleep_for / 2.0)
示例#10
0
def minio_service(minio_config: Dict[str, str]) -> Iterator[Minio]:

    client = Minio(**minio_config["client"])

    for attempt in Retrying(
        wait=tenacity.wait_fixed(5),
        stop=tenacity.stop_after_attempt(60),
        before_sleep=tenacity.before_sleep_log(log, logging.WARNING),
        reraise=True,
    ):
        with attempt:
            # TODO: improve as https://docs.min.io/docs/minio-monitoring-guide.html
            if not client.bucket_exists("pytest"):
                client.make_bucket("pytest")
            client.remove_bucket("pytest")

    bucket_name = minio_config["bucket_name"]

    # cleans up in case a failing tests left this bucket
    _ensure_remove_bucket(client, bucket_name)

    client.make_bucket(bucket_name)
    assert client.bucket_exists(bucket_name)

    yield client

    # cleanup upon tear-down
    _ensure_remove_bucket(client, bucket_name)
示例#11
0
def retry(fn):
    return _retry(
        wait=wait_exponential(multiplier=1, min=4, max=10),
        stop=stop_after_attempt(5),
        reraise=True,
        before=before_log(logger, logging.INFO),
        before_sleep=before_sleep_log(logger, logging.INFO),
    )(fn)
示例#12
0
 def __init__(self, worker_config: Watcher):
     self.config = worker_config
     self.logger = get_logger(f'{self.config.room_id}.live')
     self._retry = partial(retry,
                           wait=wait_fixed(config.retry_delay),
                           before=before_log(self.logger, logging.DEBUG),
                           before_sleep=before_sleep_log(
                               self.logger, logging.WARNING))
示例#13
0
 def wrapper(*args, **kwargs) -> Any:
     return Retrying(
         retry=(retry_if_network_error() | retry_if_throttling_error()),
         stop=stop_after_attempt(max_attempt_number=max_retries),
         wait=(wait_spotify_throttling() + wait_random(min=1, max=3)),
         before=before_log(retry_logger, logging.DEBUG),
         before_sleep=before_sleep_log(retry_logger, logging.WARNING),
     ).call(func, *args, **kwargs)
示例#14
0
    def sync_to_db(self, session: Optional[Session] = None):
        """Save attributes about list of DAG to the DB."""
        # To avoid circular import - airflow.models.dagbag -> airflow.models.dag -> airflow.models.dagbag
        from airflow.models.dag import DAG
        from airflow.models.serialized_dag import SerializedDagModel

        def _serialze_dag_capturing_errors(dag, session):
            """
            Try to serialize the dag to the DB, but make a note of any errors.

            We can't place them directly in import_errors, as this may be retried, and work the next time
            """
            if dag.is_subdag:
                return []
            try:
                # We cant use bulk_write_to_db as we want to capture each error individually
                SerializedDagModel.write_dag(
                    dag,
                    min_update_interval=settings.MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
                    session=session,
                )
                return []
            except OperationalError:
                raise
            except Exception:  # pylint: disable=broad-except
                return [(dag.fileloc, traceback.format_exc(limit=-self.dagbag_import_error_traceback_depth))]

        # Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
        # of any Operational Errors
        # In case of failures, provide_session handles rollback
        for attempt in tenacity.Retrying(
            retry=tenacity.retry_if_exception_type(exception_types=OperationalError),
            wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
            stop=tenacity.stop_after_attempt(settings.MAX_DB_RETRIES),
            before_sleep=tenacity.before_sleep_log(self.log, logging.DEBUG),
            reraise=True,
        ):
            with attempt:
                serialize_errors = []
                self.log.debug(
                    "Running dagbag.sync_to_db with retries. Try %d of %d",
                    attempt.retry_state.attempt_number,
                    settings.MAX_DB_RETRIES,
                )
                self.log.debug("Calling the DAG.bulk_sync_to_db method")
                try:
                    # Write Serialized DAGs to DB, capturing errors
                    for dag in self.dags.values():
                        serialize_errors.extend(_serialze_dag_capturing_errors(dag, session))

                    DAG.bulk_write_to_db(self.dags.values(), session=session)
                except OperationalError:
                    session.rollback()
                    raise
                # Only now we are "complete" do we update import_errors - don't want to record errors from
                # previous failed attempts
                self.import_errors.update(dict(serialize_errors))
    def __init__(self, logger: Optional[logging.Logger] = None):
        logger = logger or log

        self.kwargs = dict(
            wait=wait_fixed(self.WAIT_SECS),
            stop=stop_after_attempt(self.ATTEMPTS_COUNT),
            before_sleep=before_sleep_log(logger, logging.INFO),
            reraise=True,
        )
示例#16
0
 def _wrapper(*args, **kwargs):
     r = tenacity.Retrying(
         before_sleep=tenacity.before_sleep_log(LOG, logging.DEBUG),
         after=tenacity.after_log(LOG, logging.DEBUG),
         stop=tenacity.stop_after_attempt(retries),
         reraise=True,
         retry=tenacity.retry_if_exception_type(exceptions),
         wait=wait)
     return r.call(f, *args, **kwargs)
示例#17
0
class TapeDrive(models.Model):
    STATUS_CHOICES = (
        (0, 'Inactive'),
        (20, 'Write'),
        (100, 'FAIL'),
    )

    id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
    drive_id = models.IntegerField()
    device = models.CharField(max_length=255, unique=True)
    io_queue_entry = models.OneToOneField('IOQueue',
                                          models.PROTECT,
                                          related_name='tape_drive',
                                          null=True,
                                          blank=True)
    num_of_mounts = models.IntegerField(default=0)
    idle_time = models.DurationField(default=timedelta(hours=1))
    last_change = models.DateTimeField(default=timezone.now)
    robot = models.ForeignKey('Robot',
                              models.PROTECT,
                              related_name='tape_drives')
    locked = models.BooleanField(default=False)
    status = models.IntegerField(choices=STATUS_CHOICES, default=20)

    @classmethod
    @transaction.atomic
    @retry(retry=retry_if_exception_type(RequestException),
           reraise=True,
           stop=stop_after_attempt(5),
           wait=wait_fixed(60),
           before_sleep=before_sleep_log(logger, logging.DEBUG))
    def create_from_remote_copy(cls,
                                host,
                                session,
                                object_id,
                                create_storage_medium=True):
        remote_obj_url = urljoin(
            host, reverse('tapedrive-detail', args=(object_id, )))
        r = session.get(remote_obj_url, timeout=60)
        r.raise_for_status()

        data = r.json()
        data.pop('status_display', None)

        data['robot'] = Robot.create_from_remote_copy(host, session,
                                                      data['robot'])
        if not create_storage_medium:
            data.pop('storage_medium', None)

        tape_drive, _ = TapeDrive.objects.update_or_create(
            pk=data.pop('id'),
            defaults=data,
        )
        return tape_drive

    def __str__(self):
        return self.device
示例#18
0
def pg_retry_policy(logger: Optional[logging.Logger] = None) -> Dict:
    """ Retry policy for postgres requests upon failure """
    logger = logger or logging.getLogger(__name__)
    return dict(
        wait=wait_fixed(5),
        stop=stop_after_attempt(20),
        before_sleep=before_sleep_log(log, logging.WARNING),
        reraise=True,
    )
示例#19
0
 def _wrapper(*args, **kwargs):
     r = tenacity.Retrying(sleep=tenacity.nap.sleep,
                           before_sleep=tenacity.before_sleep_log(
                               LOG, logging.DEBUG),
                           after=tenacity.after_log(LOG, logging.DEBUG),
                           stop=tenacity.stop_after_attempt(retries),
                           reraise=True,
                           retry=retry(retry_param),
                           wait=wait)
     return r.call(f, *args, **kwargs)
示例#20
0
 def _execute(self, *args, **kwargs):  # pylint: disable=signature-differs
     # Workaround TD bug: throttled operations are reported as internal.
     # Ref b/175345578
     retryer = tenacity.Retrying(
         retry=tenacity.retry_if_exception(self._operation_internal_error),
         wait=tenacity.wait_fixed(10),
         stop=tenacity.stop_after_delay(5 * 60),
         before_sleep=tenacity.before_sleep_log(logger, logging.DEBUG),
         reraise=True)
     retryer(super()._execute, *args, **kwargs)
示例#21
0
def wait_for_node_status(w3, predicate, sleep_time=30.0):
    retry = tenacity.retry(
        wait=tenacity.wait_exponential(multiplier=1, min=5, max=120),
        before_sleep=tenacity.before_sleep_log(logger, logging.WARN),
    )
    while True:
        node_status = retry(get_node_status)(w3)
        if predicate(node_status):
            return node_status
        gevent.sleep(sleep_time)
示例#22
0
def execute_request_with_logger(request, logger, level):
    @tenacity.retry(stop=tenacity.stop_after_attempt(MAX_ATTEMPTS),
                    wait=tenacity.wait_exponential(multiplier=EXP_MULTIPLIER,
                                                   max=EXP_MAX_WAIT),
                    retry=retry_exceptions,
                    before_sleep=tenacity.before_sleep_log(logger, level))
    def _execute():
        return request.execute()

    return _execute()
示例#23
0
 def _wrapper(*args, **kwargs):
     r = tenacity.Retrying(
         before_sleep=tenacity.before_sleep_log(LOG, logging.DEBUG),
         after=tenacity.after_log(LOG, logging.DEBUG),
         stop=tenacity.stop_after_attempt(retries),
         reraise=True,
         retry=tenacity.retry_if_exception_type(exceptions),
         wait=tenacity.wait_exponential(multiplier=interval,
                                        min=0,
                                        exp_base=backoff_rate))
     return r.call(f, *args, **kwargs)
示例#24
0
def retry_exponential_if_exception_type(exc_type, logger):
    """
    Decorator function that returns the tenacity @retry decorator with our commonly-used config
    :param exc_type: Type of exception (or tuple of types) to retry if encountered
    :param logger: A logger instance to send retry logs to
    :return: Result of tenacity.retry decorator function
    """
    return retry(retry=retry_if_exception_type(exc_type),
            wait=wait_exponential(multiplier=RetryParameters.MULTIPLIER, min=RetryParameters.MIN, max=RetryParameters.MAX),
            stop=stop_after_attempt(RetryParameters.ATTEMPTS),
            before_sleep=before_sleep_log(logger, logging.DEBUG),
            reraise=True)
示例#25
0
def enviroplus_retry(fn: Coroutine) -> Coroutine:
    @wraps(fn)
    @ten.retry(retry=ten.retry_if_exception_type(
        (aiohttp.ClientError, aiohttp.http_exceptions.HttpProcessingError)),
               reraise=True,
               wait=ten.wait_random(1, 3),
               stop=ten.stop_after_attempt(3),
               before_sleep=ten.before_sleep_log(logger, logging.DEBUG))
    async def wrapped_fn(*args, **kwargs):
        return await fn(*args, **kwargs)

    return wrapped_fn
def handle_retry(logger: logging.Logger):
    """
    Retry policy after connection timeout or a network error

    SEE https://www.python-httpx.org/exceptions/
    """
    return retry(
        retry=retry_if_exception_type((httpx.TimeoutException, httpx.NetworkError)),
        wait=wait_fixed(2),
        stop=stop_after_attempt(3),
        reraise=True,
        before_sleep=before_sleep_log(logger, logging.DEBUG),
    )
示例#27
0
    def _before_sleep_log():
        logger = logging.root
        logger_log = logger.log

        def extensive_log(level, msg, *args, **kwargs):
            logger_log(level, msg, *args, **kwargs)
            if DEBUG_ON:
                log_to_console(str(msg) % args)

        # Monkeypatch inner logging function so it produces an exhaustive log when
        # used under the before-sleep logging utility in `tenacity`.
        logger.log = extensive_log
        return before_sleep_log(logger, logging.DEBUG, exc_info=True)
示例#28
0
 async def wait_for_stable(self, release_name: str, arch: str, release_stream: str):
     go_arch = util.go_arch_for_brew_arch(arch)
     release_controller_url = f"https://{go_arch}.ocp.releases.ci.openshift.org"
     if self.runtime.dry_run:
         actual_phase = await self.get_release_phase(release_controller_url, release_stream, release_name)
         self._logger.warning("[DRY RUN] Release %s for %s has phase %s. Assume accepted.", release_name, arch, actual_phase)
         return
     return await retry(
         stop=(stop_after_attempt(36)),  # wait for 5m * 36 = 180m = 3 hours
         wait=wait_fixed(300),  # wait for 5 minutes between retries
         retry=(retry_if_result(lambda phase: phase != "Accepted") | retry_if_exception_type()),
         before_sleep=before_sleep_log(self._logger, logging.WARNING),
     )(self.get_release_phase)(release_controller_url, release_stream, release_name)
示例#29
0
def copy_file_remotely(src, dst, requests_session, block_size=DEFAULT_BLOCK_SIZE):
    fsize = os.stat(src).st_size
    idx = 0

    time_start = time.time()
    upload_id = copy_chunk_remotely(src, dst, idx * block_size, requests_session=requests_session,
                                    file_size=fsize, block_size=block_size)
    idx += 1

    while idx * block_size <= fsize:
        copy_chunk_remotely(src, dst, idx * block_size, requests_session=requests_session,
                            file_size=fsize, block_size=block_size, upload_id=upload_id)
        idx += 1

    md5 = calculate_checksum(src, algorithm='MD5', block_size=block_size)

    completion_url = dst.rstrip('/') + '_complete/'

    m = MultipartEncoder(
        fields={
            'path': os.path.basename(src),
            'upload_id': upload_id,
            'md5': md5,
            'dst': requests_session.params.get('dst')
        }
    )
    headers = {'Content-Type': m.content_type}

    @retry(retry=retry_if_exception_type(RequestException), reraise=True, stop=stop_after_attempt(5),
           wait=wait_fixed(60), before_sleep=before_sleep_log(logger, logging.DEBUG))
    def send_completion_request():
        response = requests_session.post(completion_url, data=m, headers=headers, timeout=60)
        response.raise_for_status()

    send_completion_request()

    time_end = time.time()
    time_elapsed = time_end - time_start

    fsize_mb = fsize / MB

    try:
        mb_per_sec = fsize_mb / time_elapsed
    except ZeroDivisionError:
        mb_per_sec = fsize_mb

    logger.info(
        'Copied {} ({} MB) to {} at {} MB/Sec ({} sec)'.format(
            src, fsize_mb, dst, mb_per_sec, time_elapsed
        )
    )
示例#30
0
def run_with_db_retries(max_retries: int = MAX_DB_RETRIES, logger: Optional[logging.Logger] = None, **kwargs):
    """Return Tenacity Retrying object with project specific default"""
    # Default kwargs
    retry_kwargs = dict(
        retry=tenacity.retry_if_exception_type(exception_types=(OperationalError, DBAPIError)),
        wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
        stop=tenacity.stop_after_attempt(max_retries),
        reraise=True,
        **kwargs,
    )
    if logger and isinstance(logger, logging.Logger):
        retry_kwargs["before_sleep"] = tenacity.before_sleep_log(logger, logging.DEBUG, True)

    return tenacity.Retrying(**retry_kwargs)
示例#31
0
def run_with_db_retries(logger: logging.Logger, **kwargs):
    """Return Tenacity Retrying object with project specific default"""
    # Default kwargs
    retry_kwargs = dict(
        retry=tenacity.retry_if_exception_type(
            exception_types=OperationalError),
        wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
        stop=tenacity.stop_after_attempt(MAX_DB_RETRIES),
        before_sleep=tenacity.before_sleep_log(logger, logging.DEBUG),
        reraise=True,
    )
    retry_kwargs.update(kwargs)

    return tenacity.Retrying(**retry_kwargs)
示例#32
0
文件: env.py 项目: itsonlycode/Mailu
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """

    # this callback is used to prevent an auto-migration from being generated
    # when there are no changes to the schema
    # reference: http://alembic.readthedocs.org/en/latest/cookbook.html
    def process_revision_directives(context, revision, directives):
        if getattr(config.cmd_opts, 'autogenerate', False):
            script = directives[0]
            if script.upgrade_ops.is_empty():
                directives[:] = []
                logger.info('No changes in schema detected.')

    engine = engine_from_config(config.get_section(config.config_ini_section),
                                prefix='sqlalchemy.',
                                poolclass=pool.NullPool)

    connection = tenacity.Retrying(
        stop=tenacity.stop_after_attempt(100),
        wait=tenacity.wait_random(min=2, max=5),
        before=tenacity.before_log(logging.getLogger("tenacity.retry"), logging.DEBUG),
        before_sleep=tenacity.before_sleep_log(logging.getLogger("tenacity.retry"), logging.INFO),
        after=tenacity.after_log(logging.getLogger("tenacity.retry"), logging.DEBUG)
        ).call(engine.connect)

    context.configure(connection=connection,
                      target_metadata=target_metadata,
                      process_revision_directives=process_revision_directives,
                      **current_app.extensions['migrate'].configure_args)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()