class DummyClient(object): def __init__(self, *args, **kwargs): self.cache = LRUCache(limit=5000) def get(self, key, *args, **kwargs): return self.cache.get(key) def get_multi(self, keys): cache = self.cache return dict((k, cache[k]) for k in keys if k in cache) def set(self, key, value, *args, **kwargs): self.cache[key] = value def delete(self, key, *args, **kwargs): self.cache.pop(key, None) def incr(self, key, delta=1): return self.cache.incr(key, delta)
class DummyClient: def __init__(self, *args, **kwargs): self.cache = LRUCache(limit=5000) def get(self, key, *args, **kwargs): return self.cache.get(key) def get_multi(self, keys): cache = self.cache return {k: cache[k] for k in keys if k in cache} def set(self, key, value, *args, **kwargs): self.cache[key] = value def delete(self, key, *args, **kwargs): self.cache.pop(key, None) def incr(self, key, delta=1): return self.cache.incr(key, delta) def touch(self, key, expire): pass
class BaseBackend(object): READY_STATES = states.READY_STATES UNREADY_STATES = states.UNREADY_STATES EXCEPTION_STATES = states.EXCEPTION_STATES TimeoutError = TimeoutError #: Time to sleep between polling each individual item #: in `ResultSet.iterate`. as opposed to the `interval` #: argument which is for each pass. subpolling_interval = None #: If true the backend must implement :meth:`get_many`. supports_native_join = False #: If true the backend must automatically expire results. #: The daily backend_cleanup periodic task will not be triggered #: in this case. supports_autoexpire = False #: Set to true if the backend is peristent by default. persistent = True retry_policy = { 'max_retries': 20, 'interval_start': 0, 'interval_step': 1, 'interval_max': 1, } def __init__(self, app, serializer=None, max_cached_results=None, accept=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] self._cache = LRUCache( limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS, ) self.accept = prepare_accept_content( conf.CELERY_ACCEPT_CONTENT if accept is None else accept, ) def mark_as_started(self, task_id, **meta): """Mark a task as started""" return self.store_result(task_id, meta, status=states.STARTED) def mark_as_done(self, task_id, result, request=None): """Mark task as successfully executed.""" return self.store_result(task_id, result, status=states.SUCCESS, request=request) def mark_as_failure(self, task_id, exc, traceback=None, request=None): """Mark task as executed with failure. Stores the execption.""" return self.store_result(task_id, exc, status=states.FAILURE, traceback=traceback, request=request) def fail_from_current_stack(self, task_id, exc=None): type_, real_exc, tb = sys.exc_info() try: exc = real_exc if exc is None else exc ei = ExceptionInfo((type_, exc, tb)) self.mark_as_failure(task_id, exc, ei.traceback) return ei finally: del(tb) def mark_as_retry(self, task_id, exc, traceback=None, request=None): """Mark task as being retries. Stores the current exception (if any).""" return self.store_result(task_id, exc, status=states.RETRY, traceback=traceback, request=request) def mark_as_revoked(self, task_id, reason='', request=None): return self.store_result(task_id, TaskRevokedError(reason), status=states.REVOKED, traceback=None, request=request) def prepare_exception(self, exc): """Prepare exception for serialization.""" if self.serializer in EXCEPTION_ABLE_CODECS: return get_pickleable_exception(exc) return {'exc_type': type(exc).__name__, 'exc_message': str(exc)} def exception_to_python(self, exc): """Convert serialized exception to Python exception.""" if self.serializer in EXCEPTION_ABLE_CODECS: return get_pickled_exception(exc) return create_exception_cls( from_utf8(exc['exc_type']), __name__)(exc['exc_message']) def prepare_value(self, result): """Prepare value for storage.""" if self.serializer != 'pickle' and isinstance(result, ResultBase): return result.as_tuple() return result def encode(self, data): _, _, payload = dumps(data, serializer=self.serializer) return payload def decode(self, payload): payload = PY3 and payload or str(payload) return loads(payload, content_type=self.content_type, content_encoding=self.content_encoding, accept=self.accept) def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5): """Wait for task and return its result. If the task raises an exception, this exception will be re-raised by :func:`wait_for`. If `timeout` is not :const:`None`, this raises the :class:`celery.exceptions.TimeoutError` exception if the operation takes longer than `timeout` seconds. """ time_elapsed = 0.0 while 1: status = self.get_status(task_id) if status == states.SUCCESS: return self.get_result(task_id) elif status in states.PROPAGATE_STATES: result = self.get_result(task_id) if propagate: raise result return result # avoid hammering the CPU checking status. time.sleep(interval) time_elapsed += interval if timeout and time_elapsed >= timeout: raise TimeoutError('The operation timed out.') def prepare_expires(self, value, type=None): if value is None: value = self.app.conf.CELERY_TASK_RESULT_EXPIRES if isinstance(value, timedelta): value = timeutils.timedelta_seconds(value) if value is not None and type: return type(value) return value def prepare_persistent(self, enabled=None): if enabled is not None: return enabled p = self.app.conf.CELERY_RESULT_PERSISTENT return self.persistent if p is None else p def encode_result(self, result, status): if status in self.EXCEPTION_STATES and isinstance(result, Exception): return self.prepare_exception(result) else: return self.prepare_value(result) def is_cached(self, task_id): return task_id in self._cache def store_result(self, task_id, result, status, traceback=None, request=None, **kwargs): """Update task state and result.""" result = self.encode_result(result, status) self._store_result(task_id, result, status, traceback, request=request, **kwargs) return result def forget(self, task_id): self._cache.pop(task_id, None) self._forget(task_id) def _forget(self, task_id): raise NotImplementedError('backend does not implement forget.') def get_status(self, task_id): """Get the status of a task.""" return self.get_task_meta(task_id)['status'] def get_traceback(self, task_id): """Get the traceback for a failed task.""" return self.get_task_meta(task_id).get('traceback') def get_result(self, task_id): """Get the result of a task.""" meta = self.get_task_meta(task_id) if meta['status'] in self.EXCEPTION_STATES: return self.exception_to_python(meta['result']) else: return meta['result'] def get_children(self, task_id): """Get the list of subtasks sent by a task.""" try: return self.get_task_meta(task_id)['children'] except KeyError: pass def get_task_meta(self, task_id, cache=True): if cache: try: return self._cache[task_id] except KeyError: pass meta = self._get_task_meta_for(task_id) if cache and meta.get('status') == states.SUCCESS: self._cache[task_id] = meta return meta def reload_task_result(self, task_id): """Reload task result, even if it has been previously fetched.""" self._cache[task_id] = self.get_task_meta(task_id, cache=False) def reload_group_result(self, group_id): """Reload group result, even if it has been previously fetched.""" self._cache[group_id] = self.get_group_meta(group_id, cache=False) def get_group_meta(self, group_id, cache=True): if cache: try: return self._cache[group_id] except KeyError: pass meta = self._restore_group(group_id) if cache and meta is not None: self._cache[group_id] = meta return meta def restore_group(self, group_id, cache=True): """Get the result for a group.""" meta = self.get_group_meta(group_id, cache=cache) if meta: return meta['result'] def save_group(self, group_id, result): """Store the result of an executed group.""" return self._save_group(group_id, result) def delete_group(self, group_id): self._cache.pop(group_id, None) return self._delete_group(group_id) def cleanup(self): """Backend cleanup. Is run by :class:`celery.task.DeleteExpiredTaskMetaTask`.""" pass def process_cleanup(self): """Cleanup actions to do at the end of a task worker process.""" pass def on_task_call(self, producer, task_id): return {} def on_chord_part_return(self, task, propagate=False): pass def fallback_chord_unlock(self, group_id, body, result=None, countdown=1, **kwargs): kwargs['result'] = [r.as_tuple() for r in result] self.app.tasks['celery.chord_unlock'].apply_async( (group_id, body, ), kwargs, countdown=countdown, ) def apply_chord(self, header, partial_args, group_id, body, **options): result = header(*partial_args, task_id=group_id) self.fallback_chord_unlock(group_id, body, **options) return result def current_task_children(self, request=None): request = request or getattr(current_task(), 'request', None) if request: return [r.as_tuple() for r in getattr(request, 'children', [])] def __reduce__(self, args=(), kwargs={}): return (unpickle_backend, (self.__class__, args, kwargs))