Exemple #1
0
 def test_valid_random_range(self, rr):
     rr.return_value = 0
     maximum = 100
     get_exponential_backoff_interval(factor=40,
                                      retries=10,
                                      maximum=maximum,
                                      full_jitter=True)
     rr.assert_called_once_with(maximum + 1)
Exemple #2
0
 def test_without_jitter(self):
     assert get_exponential_backoff_interval(
         factor=4,
         retries=3,
         maximum=100,
         full_jitter=False
     ) == 4 * (2 ** 3)
Exemple #3
0
 def run(*args, **kwargs):
     try:
         return task._orig_run(*args, **kwargs)
     except Ignore:
         # If Ignore signal occurs task shouldn't be retried,
         # even if it suits autoretry_for list
         raise
     except Retry:
         raise
     except dont_autoretry_for:
         raise
     except autoretry_for as exc:
         if retry_backoff:
             retry_kwargs['countdown'] = \
                 get_exponential_backoff_interval(
                     factor=retry_backoff,
                     retries=task.request.retries,
                     maximum=retry_backoff_max,
                     full_jitter=retry_jitter)
         # Override max_retries
         if hasattr(task, 'override_max_retries'):
             retry_kwargs['max_retries'] = getattr(
                 task, 'override_max_retries', task.max_retries)
         ret = task.retry(exc=exc, **retry_kwargs)
         # Stop propagation
         if hasattr(task, 'override_max_retries'):
             delattr(task, 'override_max_retries')
         raise ret
Exemple #4
0
    def store_result(self, task_id, result, state,
                     traceback=None, request=None, **kwargs):
        """Update task state and result.

        if always_retry_backend_operation is activated, in the event of a recoverable exception,
        then retry operation with an exponential backoff until a limit has been reached.
        """
        result = self.encode_result(result, state)

        retries = 0

        while True:
            try:
                self._store_result(task_id, result, state, traceback,
                                   request=request, **kwargs)
                return result
            except Exception as exc:
                if self.always_retry and self.exception_safe_to_retry(exc):
                    if retries < self.max_retries:
                        retries += 1

                        # get_exponential_backoff_interval computes integers
                        # and time.sleep accept floats for sub second sleep
                        sleep_amount = get_exponential_backoff_interval(
                            self.base_sleep_between_retries_ms, retries,
                            self.max_sleep_between_retries_ms, True) / 1000
                        self._sleep(sleep_amount)
                    else:
                        raise_with_context(
                            BackendStoreError("failed to store result on the backend", task_id=task_id, state=state),
                        )
                else:
                    raise
Exemple #5
0
            def run(*args, **kwargs):
                try:
                    return self._orig_run(*args, **kwargs)
                except self.autoretry_for as exc:
                    if self.request_stack:
                        if 'countdown' not in self.retry_kwargs:
                            countdown = get_exponential_backoff_interval(
                                factor=self.retry_backoff,
                                retries=self.request.retries,
                                maximum=self.retry_backoff_max,
                                full_jitter=self.retry_jitter,
                            )

                            retry_kwargs = self.retry_kwargs.copy()
                            retry_kwargs.update({'countdown': countdown})
                        else:
                            retry_kwargs = self.retry_kwargs

                        retry_kwargs.update({'exc': exc})
                        raise self.retry(**retry_kwargs)
                    else:
                        logging.warning('no celery task request stack')
                        logging.warning(traceback.format_exc())
                finally:
                    self.start_time = self.now_time
Exemple #6
0
 def test_bound_by_maximum(self):
     maximum_boundary = 100
     assert get_exponential_backoff_interval(
         factor=40,
         retries=3,
         maximum=maximum_boundary
     ) == maximum_boundary
Exemple #7
0
 def run(*args, **kwargs):
     try:
         return task._orig_run(*args, **kwargs)
     except autoretry_for as exc:
         if retry_backoff:
             retry_kwargs['countdown'] = \
                 get_exponential_backoff_interval(
                     factor=retry_backoff,
                     retries=task.request.retries,
                     maximum=retry_backoff_max,
                     full_jitter=retry_jitter)
         raise task.retry(exc=exc, **retry_kwargs)
Exemple #8
0
 def run(*args, **kwargs):
     try:
         return task._orig_run(*args, **kwargs)
     except autoretry_for as exc:
         if retry_backoff:
             retry_kwargs['countdown'] = \
                 get_exponential_backoff_interval(
                     factor=retry_backoff,
                     retries=task.request.retries,
                     maximum=retry_backoff_max,
                     full_jitter=retry_jitter)
         raise task.retry(exc=exc, **retry_kwargs)
Exemple #9
0
 def run(*args, **kwargs):
     try:
         return task._orig_run(*args, **kwargs)
     except Ignore:
         # If Ignore signal occures task shouldn't be retried,
         # even if it suits autoretry_for list
         raise
     except autoretry_for as exc:
         if retry_backoff:
             retry_kwargs['countdown'] = \
                 get_exponential_backoff_interval(
                     factor=retry_backoff,
                     retries=task.request.retries,
                     maximum=retry_backoff_max,
                     full_jitter=retry_jitter)
         raise task.retry(exc=exc, **retry_kwargs)
Exemple #10
0
def exponential_backoff(retries):
    """
    Return a number of seconds to delay the next task attempt using
    an exponential back-off algorithm with jitter.

    See https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/

    :param retries: Number of retries so far
    :return: number of seconds to delay the next try
    """
    backoff_minutes = get_exponential_backoff_interval(
        factor=2,
        retries=retries,
        maximum=settings.CELERY_MAX_RETRY_DELAY_MINUTES,
        full_jitter=True,
    )
    # wait for a minimum of 1 minute
    return max(1, backoff_minutes) * 60
    def _get_retry_countdown(self, task_func):
        retry_backoff = int(
            self.task_kwargs.get('retry_backoff', True)
        )
        retry_backoff_max = int(
            self.task_kwargs.get('retry_backoff_max', 600)
        )
        retry_jitter = self.task_kwargs.get(
            'retry_jitter', True
        )

        countdown = get_exponential_backoff_interval(
            factor=retry_backoff,
            retries=task_func.request.retries,
            maximum=retry_backoff_max,
            full_jitter=retry_jitter
        )

        return countdown
Exemple #12
0
    def get_task_meta(self, task_id, cache=True):
        """Get task meta from backend.

        if always_retry_backend_operation is activated, in the event of a recoverable exception,
        then retry operation with an exponential backoff until a limit has been reached.
        """
        self._ensure_not_eager()
        if cache:
            try:
                return self._cache[task_id]
            except KeyError:
                pass
        retries = 0
        while True:
            try:
                meta = self._get_task_meta_for(task_id)
                break
            except Exception as exc:
                if self.always_retry and self.exception_safe_to_retry(exc):
                    if retries < self.max_retries:
                        retries += 1

                        # get_exponential_backoff_interval computes integers
                        # and time.sleep accept floats for sub second sleep
                        sleep_amount = (get_exponential_backoff_interval(
                            self.base_sleep_between_retries_ms,
                            retries,
                            self.max_sleep_between_retries_ms,
                            True,
                        ) / 1000)
                        self._sleep(sleep_amount)
                    else:
                        raise_with_context(
                            BackendGetMetaError("failed to get meta",
                                                task_id=task_id), )
                else:
                    raise

        if cache and meta.get("status") == states.SUCCESS:
            self._cache[task_id] = meta
        return meta
 def prepare_models(self, engine):
     if not self.prepared:
         # SQLAlchemy will check if the items exist before trying to
         # create them, which is a race condition. If it raises an error
         # in one iteration, the next may pass all the existence checks
         # and the call will succeed.
         retries = 0
         while True:
             try:
                 ResultModelBase.metadata.create_all(engine)
             except DatabaseError:
                 if retries < PREPARE_MODELS_MAX_RETRIES:
                     sleep_amount_ms = get_exponential_backoff_interval(
                         10, retries, 1000, True)
                     time.sleep(sleep_amount_ms / 1000)
                     retries += 1
                 else:
                     raise
             else:
                 break
         self.prepared = True
Exemple #14
0
def calculate_max_retries(retry_max_elapsed_backoff, retry_backoff_max):
    """
    Calculate retry count to allow before reaching the max elapsed time.

    Args:
        retry_max_elapsed_backoff (int): maximum time in seconds to allow for
            waiting cumulatively across all retries
        retry_backoff_max (int): maximum time in seconds to allow for a retry

    Returns:
        int: number of allowed retries

    """
    max_retries = 0
    elapsed_time = 0
    while elapsed_time < retry_max_elapsed_backoff:
        retry_factor = get_exponential_backoff_interval(
            factor=1, retries=max_retries, maximum=retry_backoff_max)
        elapsed_time += retry_factor
        if (elapsed_time < retry_max_elapsed_backoff):
            max_retries += 1
    return max_retries
Exemple #15
0
    def wrapper(*args, **kwargs):
        task = args[0]
        assert isinstance(task, Task), 'bind=True must be set to enable retries'
        assert task.acks_late, 'acks_late=True must be set to send rejected tasks to the dead letter queue'

        autoretry_for = tuple(
            getattr(task, 'autoretry_for', ())
        )
        retry_backoff = int(
            getattr(task, 'retry_backoff', False)
        )
        retry_backoff_max = int(
            getattr(task, 'retry_backoff_max', 600)
        )
        retry_jitter = getattr(task, 'retry_jitter', True)

        countdown = None
        if retry_backoff:
            countdown = get_exponential_backoff_interval(
                factor=retry_backoff,
                retries=task.request.retries,
                maximum=retry_backoff_max,
                full_jitter=retry_jitter)

        try:
            if not autoretry_for:
                return func(*args, **kwargs)
            else:
                try:
                    return func(*args, **kwargs)
                except autoretry_for as retry_exc:
                    raise task.retry(exc=retry_exc, countdown=countdown)
        except TaskPredicate:
            # pass through celery specific exceptions
            raise
        except Exception as exc:
            # reject if max_retries exceeded
            raise Reject(exc, requeue=False) from exc
Exemple #16
0
 def test_negative_values(self):
     assert get_exponential_backoff_interval(factor=-40,
                                             retries=3,
                                             maximum=100) == 0
Exemple #17
0
 def test_negative_values(self):
     assert get_exponential_backoff_interval(
         factor=-40,
         retries=3,
         maximum=100
     ) == 0