Beispiel #1
0
    def _store_result(self,
                      task_id,
                      result,
                      state,
                      traceback=None,
                      request=None,
                      **kwargs):
        meta = self._get_result_meta(result=result,
                                     state=state,
                                     traceback=traceback,
                                     request=request)
        meta['task_id'] = bytes_to_str(task_id)

        # Retrieve metadata from the backend, if the status
        # is a success then we ignore any following update to the state.
        # This solves a task deduplication issue because of network
        # partitioning or lost workers. This issue involved a race condition
        # making a lost task overwrite the last successful result in the
        # result backend.
        current_meta = self._get_task_meta_for(task_id)

        if current_meta['status'] == states.SUCCESS:
            return result

        try:
            self._set_with_state(self.get_key_for_task(task_id),
                                 self.encode(meta), state)
        except BackendStoreError as ex:
            raise BackendStoreError(str(ex), state=state,
                                    task_id=task_id) from ex

        return result
Beispiel #2
0
    def store_result(self, task_id, result, state,
                     traceback=None, request=None, **kwargs):
        """Update task state and result.

        if always_retry_backend_operation is activated, in the event of a recoverable exception,
        then retry operation with an exponential backoff until a limit has been reached.
        """
        result = self.encode_result(result, state)

        retries = 0

        while True:
            try:
                self._store_result(task_id, result, state, traceback,
                                   request=request, **kwargs)
                return result
            except Exception as exc:
                if self.always_retry and self.exception_safe_to_retry(exc):
                    if retries < self.max_retries:
                        retries += 1

                        # get_exponential_backoff_interval computes integers
                        # and time.sleep accept floats for sub second sleep
                        sleep_amount = get_exponential_backoff_interval(
                            self.base_sleep_between_retries_ms, retries,
                            self.max_sleep_between_retries_ms, True) / 1000
                        self._sleep(sleep_amount)
                    else:
                        raise_with_context(
                            BackendStoreError("failed to store result on the backend", task_id=task_id, state=state),
                        )
                else:
                    raise
Beispiel #3
0
    def set(self, key, value, **retry_policy):
        if isinstance(value, str) and len(value) > self._MAX_STR_VALUE_SIZE:
            raise BackendStoreError('value too large for Redis backend')

        return self.ensure(self._set, (key, value), **retry_policy)