def __init__(self, callback=None, workers=None, tasks=None, taskheap=None, max_workers_in_memory=5000, max_tasks_in_memory=10000, on_node_join=None, on_node_leave=None, tasks_by_type=None, tasks_by_worker=None): self.event_callback = callback self.workers = (LRUCache(max_workers_in_memory) if workers is None else workers) self.tasks = (LRUCache(max_tasks_in_memory) if tasks is None else tasks) self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self.on_node_join = on_node_join self.on_node_leave = on_node_leave self._mutex = threading.Lock() self.handlers = {} self._seen_types = set() self._tasks_to_resolve = {} self.rebuild_taskheap() self.tasks_by_type = CallableDefaultdict( self._tasks_by_type, WeakSet) # type: Mapping[str, WeakSet[Task]] self.tasks_by_type.update( _deserialize_Task_WeakSet_Mapping(tasks_by_type, self.tasks)) self.tasks_by_worker = CallableDefaultdict( self._tasks_by_worker, WeakSet) # type: Mapping[str, WeakSet[Task]] self.tasks_by_worker.update( _deserialize_Task_WeakSet_Mapping(tasks_by_worker, self.tasks))
def assertSafeIter(self, method, interval=0.01, size=10000): if sys.version_info >= (3, 5): raise SkipTest('Fails on Py3.5') from threading import Thread, Event from time import sleep x = LRUCache(size) x.update(zip(range(size), range(size))) class Burglar(Thread): def __init__(self, cache): self.cache = cache self.__is_shutdown = Event() self.__is_stopped = Event() Thread.__init__(self) def run(self): while not self.__is_shutdown.isSet(): try: self.cache.popitem(last=False) except KeyError: break self.__is_stopped.set() def stop(self): self.__is_shutdown.set() self.__is_stopped.wait() self.join(THREAD_TIMEOUT_MAX) burglar = Burglar(x) burglar.start() try: for _ in getattr(x, method)(): sleep(0.0001) finally: burglar.stop()
def assertSafeIter(self, method, interval=0.01, size=10000): from threading import Thread, Event from time import sleep x = LRUCache(size) x.update(zip(range(size), range(size))) class Burglar(Thread): def __init__(self, cache): self.cache = cache self.__is_shutdown = Event() self.__is_stopped = Event() Thread.__init__(self) def run(self): while not self.__is_shutdown.isSet(): try: self.cache.data.popitem(last=False) except KeyError: break self.__is_stopped.set() def stop(self): self.__is_shutdown.set() self.__is_stopped.wait() self.join(THREAD_TIMEOUT_MAX) burglar = Burglar(x) burglar.start() try: for _ in getattr(x, method)(): sleep(0.0001) finally: burglar.stop()
def test_update_expires(self): limit = 100 x = LRUCache(limit=limit) slots = list(range(limit * 2)) for i in slots: x.update({i: i}) self.assertListEqual(list(x.keys()), list(slots[limit:]))
def test_expires(self): limit = 100 x = LRUCache(limit=limit) slots = list(range(limit * 2)) for i in slots: x[i] = i self.assertListEqual(list(x.keys()), list(slots[limit:])) self.assertTrue(x.items()) self.assertTrue(x.values())
def __init__(self, callback=None, max_workers_in_memory=5000, max_tasks_in_memory=10000): self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self.workers = LRUCache(limit=self.max_workers_in_memory) self.tasks = LRUCache(limit=self.max_tasks_in_memory) self._taskheap = [] self.event_callback = callback self._mutex = threading.Lock()
def __init__(self, app, serializer=None, max_cached_results=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER (self.content_type, self.content_encoding, self.encoder) = serialization.registry._encoders[self.serializer] self._cache = LRUCache( limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS, )
def __init__(self, app, serializer=None, max_cached_results=None, accept=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] self._cache = LRUCache( limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS, ) self.accept = prepare_accept_content( conf.CELERY_ACCEPT_CONTENT if accept is None else accept, )
def __init__(self, app, serializer=None, max_cached_results=None, accept=None, expires=None, expires_type=None, url=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.result_serializer (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] cmax = max_cached_results or conf.result_cache_max self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) self.expires = self.prepare_expires(expires, expires_type) # precedence: accept, conf.result_accept_content, conf.accept_content self.accept = conf.result_accept_content if accept is None else accept self.accept = conf.accept_content if self.accept is None else self.accept self.accept = prepare_accept_content(self.accept) self.always_retry = conf.get('result_backend_always_retry', False) self.max_sleep_between_retries_ms = conf.get( 'result_backend_max_sleep_between_retries_ms', 10000) self.base_sleep_between_retries_ms = conf.get( 'result_backend_base_sleep_between_retries_ms', 10) self.max_retries = conf.get('result_backend_max_retries', float("inf")) self._pending_results = pending_results_t({}, WeakValueDictionary()) self._pending_messages = BufferMap(MESSAGE_BUFFER_MAX) self.url = url
def test_least_recently_used(self): x = LRUCache(3) x[1], x[2], x[3] = 1, 2, 3 self.assertEqual(list(x.keys()), [1, 2, 3]) x[4], x[5] = 4, 5 self.assertEqual(list(x.keys()), [3, 4, 5]) # access 3, which makes it the last used key. x[3] x[6] = 6 self.assertEqual(list(x.keys()), [5, 3, 6]) x[7] = 7 self.assertEqual(list(x.keys()), [3, 6, 7])
def __init__(self, app, serializer=None, max_cached_results=None, accept=None, expires=None, expires_type=None, url=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.result_serializer (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] cmax = max_cached_results or conf.result_cache_max self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) self.expires = self.prepare_expires(expires, expires_type) # precedence: accept, conf.result_accept_content, conf.accept_content self.accept = conf.result_accept_content if accept is None else accept self.accept = conf.accept_content if self.accept is None else self.accept # noqa: E501 self.accept = prepare_accept_content(self.accept) self._pending_results = pending_results_t({}, WeakValueDictionary()) self._pending_messages = BufferMap(MESSAGE_BUFFER_MAX) self.url = url
def __init__(self, app, serializer=None, max_cached_results=None, accept=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] self._cache = LRUCache(limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS) self.accept = prepare_accept_content(conf.CELERY_ACCEPT_CONTENT if accept is None else accept)
def __init__(self, callback=None, workers=None, tasks=None, taskheap=None, max_workers_in_memory=5000, max_tasks_in_memory=10000, on_node_join=None, on_node_leave=None): self.event_callback = callback self.workers = (LRUCache(max_workers_in_memory) if workers is None else workers) self.tasks = (LRUCache(max_tasks_in_memory) if tasks is None else tasks) self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self.on_node_join = on_node_join self.on_node_leave = on_node_leave self._mutex = threading.Lock() self.handlers = {} self._seen_types = set() self.rebuild_taskheap()
def __init__(self, callback=None, workers=None, tasks=None, taskheap=None, max_workers_in_memory=5000, max_tasks_in_memory=10000): self.event_callback = callback self.workers = (LRUCache(max_workers_in_memory) if workers is None else workers) self.tasks = (LRUCache(max_tasks_in_memory) if tasks is None else tasks) self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self._mutex = threading.Lock() self.handlers = {'task': self.task_event, 'worker': self.worker_event} self._get_handler = self.handlers.__getitem__
def __init__( self, callback=None, workers=None, tasks=None, taskheap=None, max_workers_in_memory=5000, max_tasks_in_memory=10000, ): self.event_callback = callback self.workers = LRUCache(max_workers_in_memory) if workers is None else workers self.tasks = LRUCache(max_tasks_in_memory) if tasks is None else tasks self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self._mutex = threading.Lock() self.handlers = {} self._seen_types = set() self.rebuild_taskheap()
class DummyClient(object): def __init__(self, *args, **kwargs): self.cache = LRUCache(limit=5000) def get(self, key, *args, **kwargs): return self.cache.get(key) def get_multi(self, keys): cache = self.cache return dict((k, cache[k]) for k in keys if k in cache) def set(self, key, value, *args, **kwargs): self.cache[key] = value def delete(self, key, *args, **kwargs): self.cache.pop(key, None) def incr(self, key, delta=1): return self.cache.incr(key, delta)
class Config: DEBUG = False SERVICE_NAME = 'celery_caching' REDIS_HOST = "0.0.0.0" REDIS_PORT = 6379 BROKER_URL = "redis://{host}:{port}/0".format(host=REDIS_HOST, port=str(REDIS_PORT)) CELERY_RESULT_BACKEND = BROKER_URL # Set the cache with a key limit of 10 RESOURCE_CACHE = LRUCache(limit=10)
class DummyClient: def __init__(self, *args, **kwargs): self.cache = LRUCache(limit=5000) def get(self, key, *args, **kwargs): return self.cache.get(key) def get_multi(self, keys): cache = self.cache return {k: cache[k] for k in keys if k in cache} def set(self, key, value, *args, **kwargs): self.cache[key] = value def delete(self, key, *args, **kwargs): self.cache.pop(key, None) def incr(self, key, delta=1): return self.cache.incr(key, delta) def touch(self, key, expire): pass
def __init__(self, app, serializer=None, max_cached_results=None, accept=None, expires=None, expires_type=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] cmax = max_cached_results or conf.CELERY_MAX_CACHED_RESULTS self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) self.expires = self.prepare_expires(expires, expires_type) self.accept = prepare_accept_content( conf.CELERY_ACCEPT_CONTENT if accept is None else accept, )
def __init__(self, app, serializer=None, max_cached_results=None, accept=None, expires=None, expires_type=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.result_serializer (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] cmax = max_cached_results or conf.result_cache_max self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) self.expires = self.prepare_expires(expires, expires_type) self.accept = prepare_accept_content( conf.accept_content if accept is None else accept, )
def test_is_pickleable(self): x = LRUCache(limit=10) x.update(luke=1, leia=2) y = pickle.loads(pickle.dumps(x)) self.assertEqual(y.limit, y.limit) self.assertEqual(y, x)
def test_items(self): c = LRUCache() c.update(a=1, b=2, c=3) self.assertTrue(list(items(c)))
def test_update_larger_than_cache_size(self): x = LRUCache(2) x.update(dict((x, x) for x in range(100))) self.assertEqual(list(x.keys()), [98, 99])
class BaseBackend(object): READY_STATES = states.READY_STATES UNREADY_STATES = states.UNREADY_STATES EXCEPTION_STATES = states.EXCEPTION_STATES TimeoutError = TimeoutError #: Time to sleep between polling each individual item #: in `ResultSet.iterate`. as opposed to the `interval` #: argument which is for each pass. subpolling_interval = None #: If true the backend must implement :meth:`get_many`. supports_native_join = False #: If true the backend must automatically expire results. #: The daily backend_cleanup periodic task will not be triggered #: in this case. supports_autoexpire = False #: Set to true if the backend is peristent by default. persistent = True retry_policy = { 'max_retries': 20, 'interval_start': 0, 'interval_step': 1, 'interval_max': 1, } def __init__(self, app, serializer=None, max_cached_results=None, accept=None, **kwargs): self.app = app conf = self.app.conf self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER (self.content_type, self.content_encoding, self.encoder) = serializer_registry._encoders[self.serializer] self._cache = LRUCache( limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS, ) self.accept = prepare_accept_content( conf.CELERY_ACCEPT_CONTENT if accept is None else accept, ) def mark_as_started(self, task_id, **meta): """Mark a task as started""" return self.store_result(task_id, meta, status=states.STARTED) def mark_as_done(self, task_id, result, request=None): """Mark task as successfully executed.""" return self.store_result(task_id, result, status=states.SUCCESS, request=request) def mark_as_failure(self, task_id, exc, traceback=None, request=None): """Mark task as executed with failure. Stores the execption.""" return self.store_result(task_id, exc, status=states.FAILURE, traceback=traceback, request=request) def fail_from_current_stack(self, task_id, exc=None): type_, real_exc, tb = sys.exc_info() try: exc = real_exc if exc is None else exc ei = ExceptionInfo((type_, exc, tb)) self.mark_as_failure(task_id, exc, ei.traceback) return ei finally: del(tb) def mark_as_retry(self, task_id, exc, traceback=None, request=None): """Mark task as being retries. Stores the current exception (if any).""" return self.store_result(task_id, exc, status=states.RETRY, traceback=traceback, request=request) def mark_as_revoked(self, task_id, reason='', request=None): return self.store_result(task_id, TaskRevokedError(reason), status=states.REVOKED, traceback=None, request=request) def prepare_exception(self, exc): """Prepare exception for serialization.""" if self.serializer in EXCEPTION_ABLE_CODECS: return get_pickleable_exception(exc) return {'exc_type': type(exc).__name__, 'exc_message': str(exc)} def exception_to_python(self, exc): """Convert serialized exception to Python exception.""" if self.serializer in EXCEPTION_ABLE_CODECS: return get_pickled_exception(exc) return create_exception_cls( from_utf8(exc['exc_type']), __name__)(exc['exc_message']) def prepare_value(self, result): """Prepare value for storage.""" if self.serializer != 'pickle' and isinstance(result, ResultBase): return result.as_tuple() return result def encode(self, data): _, _, payload = dumps(data, serializer=self.serializer) return payload def decode(self, payload): payload = PY3 and payload or str(payload) return loads(payload, content_type=self.content_type, content_encoding=self.content_encoding, accept=self.accept) def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5): """Wait for task and return its result. If the task raises an exception, this exception will be re-raised by :func:`wait_for`. If `timeout` is not :const:`None`, this raises the :class:`celery.exceptions.TimeoutError` exception if the operation takes longer than `timeout` seconds. """ time_elapsed = 0.0 while 1: status = self.get_status(task_id) if status == states.SUCCESS: return self.get_result(task_id) elif status in states.PROPAGATE_STATES: result = self.get_result(task_id) if propagate: raise result return result # avoid hammering the CPU checking status. time.sleep(interval) time_elapsed += interval if timeout and time_elapsed >= timeout: raise TimeoutError('The operation timed out.') def prepare_expires(self, value, type=None): if value is None: value = self.app.conf.CELERY_TASK_RESULT_EXPIRES if isinstance(value, timedelta): value = timeutils.timedelta_seconds(value) if value is not None and type: return type(value) return value def prepare_persistent(self, enabled=None): if enabled is not None: return enabled p = self.app.conf.CELERY_RESULT_PERSISTENT return self.persistent if p is None else p def encode_result(self, result, status): if status in self.EXCEPTION_STATES and isinstance(result, Exception): return self.prepare_exception(result) else: return self.prepare_value(result) def is_cached(self, task_id): return task_id in self._cache def store_result(self, task_id, result, status, traceback=None, request=None, **kwargs): """Update task state and result.""" result = self.encode_result(result, status) self._store_result(task_id, result, status, traceback, request=request, **kwargs) return result def forget(self, task_id): self._cache.pop(task_id, None) self._forget(task_id) def _forget(self, task_id): raise NotImplementedError('backend does not implement forget.') def get_status(self, task_id): """Get the status of a task.""" return self.get_task_meta(task_id)['status'] def get_traceback(self, task_id): """Get the traceback for a failed task.""" return self.get_task_meta(task_id).get('traceback') def get_result(self, task_id): """Get the result of a task.""" meta = self.get_task_meta(task_id) if meta['status'] in self.EXCEPTION_STATES: return self.exception_to_python(meta['result']) else: return meta['result'] def get_children(self, task_id): """Get the list of subtasks sent by a task.""" try: return self.get_task_meta(task_id)['children'] except KeyError: pass def get_task_meta(self, task_id, cache=True): if cache: try: return self._cache[task_id] except KeyError: pass meta = self._get_task_meta_for(task_id) if cache and meta.get('status') == states.SUCCESS: self._cache[task_id] = meta return meta def reload_task_result(self, task_id): """Reload task result, even if it has been previously fetched.""" self._cache[task_id] = self.get_task_meta(task_id, cache=False) def reload_group_result(self, group_id): """Reload group result, even if it has been previously fetched.""" self._cache[group_id] = self.get_group_meta(group_id, cache=False) def get_group_meta(self, group_id, cache=True): if cache: try: return self._cache[group_id] except KeyError: pass meta = self._restore_group(group_id) if cache and meta is not None: self._cache[group_id] = meta return meta def restore_group(self, group_id, cache=True): """Get the result for a group.""" meta = self.get_group_meta(group_id, cache=cache) if meta: return meta['result'] def save_group(self, group_id, result): """Store the result of an executed group.""" return self._save_group(group_id, result) def delete_group(self, group_id): self._cache.pop(group_id, None) return self._delete_group(group_id) def cleanup(self): """Backend cleanup. Is run by :class:`celery.task.DeleteExpiredTaskMetaTask`.""" pass def process_cleanup(self): """Cleanup actions to do at the end of a task worker process.""" pass def on_task_call(self, producer, task_id): return {} def on_chord_part_return(self, task, propagate=False): pass def fallback_chord_unlock(self, group_id, body, result=None, countdown=1, **kwargs): kwargs['result'] = [r.as_tuple() for r in result] self.app.tasks['celery.chord_unlock'].apply_async( (group_id, body, ), kwargs, countdown=countdown, ) def apply_chord(self, header, partial_args, group_id, body, **options): result = header(*partial_args, task_id=group_id) self.fallback_chord_unlock(group_id, body, **options) return result def current_task_children(self, request=None): request = request or getattr(current_task(), 'request', None) if request: return [r.as_tuple() for r in getattr(request, 'children', [])] def __reduce__(self, args=(), kwargs={}): return (unpickle_backend, (self.__class__, args, kwargs))
class State(object): """Records clusters state.""" Worker = Worker Task = Task event_count = 0 task_count = 0 heap_multiplier = 4 def __init__( self, callback=None, workers=None, tasks=None, taskheap=None, max_workers_in_memory=5000, max_tasks_in_memory=10000, on_node_join=None, on_node_leave=None, ): self.event_callback = callback self.workers = LRUCache(max_workers_in_memory) if workers is None else workers self.tasks = LRUCache(max_tasks_in_memory) if tasks is None else tasks self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self.on_node_join = on_node_join self.on_node_leave = on_node_leave self._mutex = threading.Lock() self.handlers = {} self._seen_types = set() self.rebuild_taskheap() @cached_property def _event(self): return self._create_dispatcher() def freeze_while(self, fun, *args, **kwargs): clear_after = kwargs.pop("clear_after", False) with self._mutex: try: return fun(*args, **kwargs) finally: if clear_after: self._clear() def clear_tasks(self, ready=True): with self._mutex: return self._clear_tasks(ready) def _clear_tasks(self, ready=True): if ready: in_progress = {uuid: task for uuid, task in self.itertasks() if task.state not in states.READY_STATES} self.tasks.clear() self.tasks.update(in_progress) else: self.tasks.clear() self._taskheap[:] = [] def _clear(self, ready=True): self.workers.clear() self._clear_tasks(ready) self.event_count = 0 self.task_count = 0 def clear(self, ready=True): with self._mutex: return self._clear(ready) def get_or_create_worker(self, hostname, **kwargs): """Get or create worker by hostname. Return tuple of ``(worker, was_created)``. """ try: worker = self.workers[hostname] if kwargs: worker.update(kwargs) return worker, False except KeyError: worker = self.workers[hostname] = self.Worker(hostname, **kwargs) return worker, True def get_or_create_task(self, uuid): """Get or create task by uuid.""" try: return self.tasks[uuid], False except KeyError: task = self.tasks[uuid] = self.Task(uuid) return task, True def event(self, event): with self._mutex: return self._event(event) def task_event(self, type_, fields): """Deprecated, use :meth:`event`.""" return self._event(dict(fields, type="-".join(["task", type_])))[0] def worker_event(self, type_, fields): """Deprecated, use :meth:`event`.""" return self._event(dict(fields, type="-".join(["worker", type_])))[0] def _create_dispatcher(self): get_handler = self.handlers.__getitem__ event_callback = self.event_callback wfields = itemgetter("hostname", "timestamp", "local_received") tfields = itemgetter("uuid", "hostname", "timestamp", "local_received", "clock") taskheap = self._taskheap th_append = taskheap.append th_pop = taskheap.pop # Removing events from task heap is an O(n) operation, # so easier to just account for the common number of events # for each task (PENDING->RECEIVED->STARTED->final) #: an O(n) operation max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier add_type = self._seen_types.add on_node_join, on_node_leave = self.on_node_join, self.on_node_leave tasks, Task = self.tasks, self.Task workers, Worker = self.workers, self.Worker # avoid updating LRU entry at getitem get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__ def _event(event, timetuple=timetuple, KeyError=KeyError, insort=bisect.insort, created=True): self.event_count += 1 if event_callback: event_callback(self, event) group, _, subject = event["type"].partition("-") try: handler = get_handler(group) except KeyError: pass else: return handler(subject, event), subject if group == "worker": try: hostname, timestamp, local_received = wfields(event) except KeyError: pass else: is_offline = subject == "offline" try: worker, created = get_worker(hostname), False except KeyError: if is_offline: worker, created = Worker(hostname), False else: worker = workers[hostname] = Worker(hostname) worker.event(subject, timestamp, local_received, event) if on_node_join and (created or subject == "online"): on_node_join(worker) if on_node_leave and is_offline: on_node_leave(worker) workers.pop(hostname, None) return (worker, created), subject elif group == "task": (uuid, hostname, timestamp, local_received, clock) = tfields(event) # task-sent event is sent by client, not worker is_client_event = subject == "sent" try: task, created = get_task(uuid), False except KeyError: task = tasks[uuid] = Task(uuid) if is_client_event: task.client = hostname else: try: worker, created = get_worker(hostname), False except KeyError: worker = workers[hostname] = Worker(hostname) task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) origin = hostname if is_client_event else worker.id # remove oldest event if exceeding the limit. heaps = len(taskheap) if heaps + 1 > max_events_in_heap: th_pop(0) # most events will be dated later than the previous. timetup = timetuple(clock, timestamp, origin, ref(task)) if heaps and timetup > taskheap[-1]: th_append(timetup) else: insort(taskheap, timetup) if subject == "received": self.task_count += 1 task.event(subject, timestamp, local_received, event) task_name = task.name if task_name is not None: add_type(task_name) return (task, created), subject return _event def rebuild_taskheap(self, timetuple=timetuple): heap = self._taskheap[:] = [timetuple(t.clock, t.timestamp, t.origin, ref(t)) for t in values(self.tasks)] heap.sort() def itertasks(self, limit=None): for index, row in enumerate(items(self.tasks)): yield row if limit and index + 1 >= limit: break def tasks_by_time(self, limit=None): """Generator giving tasks ordered by time, in ``(uuid, Task)`` tuples.""" seen = set() for evtup in islice(reversed(self._taskheap), 0, limit): task = evtup[3]() if task is not None: uuid = task.uuid if uuid not in seen: yield uuid, task seen.add(uuid) tasks_by_timestamp = tasks_by_time def tasks_by_type(self, name, limit=None): """Get all tasks by type. Return a list of ``(uuid, Task)`` tuples. """ return islice(((uuid, task) for uuid, task in self.tasks_by_time() if task.name == name), 0, limit) def tasks_by_worker(self, hostname, limit=None): """Get all tasks by worker. """ return islice( ((uuid, task) for uuid, task in self.tasks_by_time() if task.worker.hostname == hostname), 0, limit ) def task_types(self): """Return a list of all seen task types.""" return sorted(self._seen_types) def alive_workers(self): """Return a list of (seemingly) alive workers.""" return [w for w in values(self.workers) if w.alive] def __repr__(self): return R_STATE.format(self) def __reduce__(self): return ( self.__class__, ( self.event_callback, self.workers, self.tasks, None, self.max_workers_in_memory, self.max_tasks_in_memory, self.on_node_join, self.on_node_leave, ), )
def __init__(self, *args, **kwargs): self.cache = LRUCache(limit=5000)
# -*- coding: utf-8 -*- """Utility to dump events to screen. This is a simple program that dumps events to the console as they happen. Think of it like a `tcpdump` for Celery events. """ from __future__ import absolute_import, print_function, unicode_literals import sys from datetime import datetime from celery.app import app_or_default from celery.utils.functional import LRUCache from celery.utils.time import humanize_seconds __all__ = ['Dumper', 'evdump'] TASK_NAMES = LRUCache(limit=0xFFF) HUMAN_TYPES = { 'worker-offline': 'shutdown', 'worker-online': 'started', 'worker-heartbeat': 'heartbeat', } CONNECTION_ERROR = """\ -> Cannot connect to %s: %s. Trying again %s """ def humanize_type(type): try:
class State(object): """Records clusters state.""" Worker = Worker Task = Task event_count = 0 task_count = 0 def __init__( self, callback=None, workers=None, tasks=None, taskheap=None, max_workers_in_memory=5000, max_tasks_in_memory=10000, ): self.event_callback = callback self.workers = LRUCache(max_workers_in_memory) if workers is None else workers self.tasks = LRUCache(max_tasks_in_memory) if tasks is None else tasks self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self._mutex = threading.Lock() self.handlers = {} self._seen_types = set() self.rebuild_taskheap() @cached_property def _event(self): return self._create_dispatcher() def freeze_while(self, fun, *args, **kwargs): clear_after = kwargs.pop("clear_after", False) with self._mutex: try: return fun(*args, **kwargs) finally: if clear_after: self._clear() def clear_tasks(self, ready=True): with self._mutex: return self._clear_tasks(ready) def _clear_tasks(self, ready=True): if ready: in_progress = dict((uuid, task) for uuid, task in self.itertasks() if task.state not in states.READY_STATES) self.tasks.clear() self.tasks.update(in_progress) else: self.tasks.clear() self._taskheap[:] = [] def _clear(self, ready=True): self.workers.clear() self._clear_tasks(ready) self.event_count = 0 self.task_count = 0 def clear(self, ready=True): with self._mutex: return self._clear(ready) def get_or_create_worker(self, hostname, **kwargs): """Get or create worker by hostname. Return tuple of ``(worker, was_created)``. """ try: worker = self.workers[hostname] if kwargs: worker.update(kwargs) return worker, False except KeyError: worker = self.workers[hostname] = self.Worker(hostname, **kwargs) return worker, True def get_or_create_task(self, uuid): """Get or create task by uuid.""" try: return self.tasks[uuid], False except KeyError: task = self.tasks[uuid] = self.Task(uuid) return task, True def event(self, event): with self._mutex: return self._event(event) def task_event(self, type_, fields): """Deprecated, use :meth:`event`.""" return self._event(dict(fields, type="-".join(["task", type_]))) def worker_event(self, type_, fields): """Deprecated, use :meth:`event`.""" return self._event(dict(fields, type="-".join(["worker", type_]))) def _create_dispatcher(self): get_handler = self.handlers.__getitem__ event_callback = self.event_callback wfields = itemgetter("hostname", "timestamp", "local_received") tfields = itemgetter("uuid", "hostname", "timestamp", "local_received", "clock") taskheap = self._taskheap maxtasks = self.max_tasks_in_memory * 2 add_type = self._seen_types.add tasks, Task = self.tasks, self.Task workers, Worker = self.workers, self.Worker # avoid updating LRU entry at getitem get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__ def _event(event, timetuple=timetuple, KeyError=KeyError, created=True): self.event_count += 1 if event_callback: event_callback(self, event) group, _, subject = event["type"].partition("-") try: handler = get_handler(group) except KeyError: pass else: return handler(subject, event) if group == "worker": try: hostname, timestamp, local_received = wfields(event) except KeyError: pass else: try: worker, created = get_worker(hostname), False except KeyError: worker = workers[hostname] = Worker(hostname) worker.event(subject, timestamp, local_received, event) return created elif group == "task": (uuid, hostname, timestamp, local_received, clock) = tfields(event) # task-sent event is sent by client, not worker is_client_event = subject == "sent" try: task, created = get_task(uuid), False except KeyError: task = tasks[uuid] = Task(uuid) if is_client_event: task.client = hostname else: try: worker, created = get_worker(hostname), False except KeyError: worker = workers[hostname] = Worker(hostname) task.worker = worker if worker is not None and local_received: worker.event(None, local_received, timestamp) origin = hostname if is_client_event else worker.id heappush(taskheap, timetuple(clock, timestamp, origin, ref(task))) if len(taskheap) > maxtasks: heappop(taskheap) if subject == "received": self.task_count += 1 task.event(subject, timestamp, local_received, event) task_name = task.name if task_name is not None: add_type(task_name) return created return _event def rebuild_taskheap(self, timetuple=timetuple, heapify=heapify): heap = self._taskheap[:] = [timetuple(t.clock, t.timestamp, t.origin, ref(t)) for t in values(self.tasks)] heapify(heap) def itertasks(self, limit=None): for index, row in enumerate(items(self.tasks)): yield row if limit and index + 1 >= limit: break def tasks_by_time(self, limit=None): """Generator giving tasks ordered by time, in ``(uuid, Task)`` tuples.""" seen = set() for evtup in islice(reversed(self._taskheap), 0, limit): task = evtup[3]() if task is not None: uuid = task.uuid if uuid not in seen: yield uuid, task seen.add(uuid) tasks_by_timestamp = tasks_by_time def tasks_by_type(self, name, limit=None): """Get all tasks by type. Return a list of ``(uuid, Task)`` tuples. """ return islice(((uuid, task) for uuid, task in self.tasks_by_time() if task.name == name), 0, limit) def tasks_by_worker(self, hostname, limit=None): """Get all tasks by worker. """ return islice( ((uuid, task) for uuid, task in self.tasks_by_time() if task.worker.hostname == hostname), 0, limit ) def task_types(self): """Return a list of all seen task types.""" return sorted(self._seen_types) def alive_workers(self): """Return a list of (seemingly) alive workers.""" return [w for w in values(self.workers) if w.alive] def __repr__(self): return R_STATE.format(self) def __reduce__(self): return ( self.__class__, (self.event_callback, self.workers, self.tasks, None, self.max_workers_in_memory, self.max_tasks_in_memory), )
def test_update_larger_than_cache_size(self): x = LRUCache(2) x.update({x: x for x in range(100)}) self.assertEqual(list(x.keys()), [98, 99])
__all__ = ('CacheBackend', ) _imp = [None] REQUIRES_BACKEND = """\ The Memcached backend requires either pylibmc or python-memcached.\ """ UNKNOWN_BACKEND = """\ The cache backend {0!r} is unknown, Please use one of the following backends instead: {1}\ """ # Global shared in-memory cache for in-memory cache client # This is to share cache between threads _DUMMY_CLIENT_CACHE = LRUCache(limit=5000) def import_best_memcache(): if _imp[0] is None: is_pylibmc, memcache_key_t = False, bytes_to_str try: import pylibmc as memcache is_pylibmc = True except ImportError: try: import memcache except ImportError: raise ImproperlyConfigured(REQUIRES_BACKEND) _imp[0] = (is_pylibmc, memcache, memcache_key_t) return _imp[0]
class State(object): """Records clusters state.""" event_count = 0 task_count = 0 def __init__(self, callback=None, max_workers_in_memory=5000, max_tasks_in_memory=10000): self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self.workers = LRUCache(limit=self.max_workers_in_memory) self.tasks = LRUCache(limit=self.max_tasks_in_memory) self._taskheap = [] self.event_callback = callback self._mutex = threading.Lock() def freeze_while(self, fun, *args, **kwargs): clear_after = kwargs.pop('clear_after', False) with self._mutex: try: return fun(*args, **kwargs) finally: if clear_after: self._clear() def clear_tasks(self, ready=True): with self._mutex: return self._clear_tasks(ready) def _clear_tasks(self, ready=True): if ready: in_progress = dict( (uuid, task) for uuid, task in self.itertasks() if task.state not in states.READY_STATES) self.tasks.clear() self.tasks.update(in_progress) else: self.tasks.clear() self._taskheap[:] = [] def _clear(self, ready=True): self.workers.clear() self._clear_tasks(ready) self.event_count = 0 self.task_count = 0 def clear(self, ready=True): with self._mutex: return self._clear(ready) def get_or_create_worker(self, hostname, **kwargs): """Get or create worker by hostname. Returns tuple of ``(worker, was_created)``. """ try: worker = self.workers[hostname] worker.update(kwargs) return worker, False except KeyError: worker = self.workers[hostname] = Worker( hostname=hostname, **kwargs) return worker, True def get_or_create_task(self, uuid): """Get or create task by uuid.""" try: return self.tasks[uuid], True except KeyError: task = self.tasks[uuid] = Task(uuid=uuid) return task, False def worker_event(self, type, fields): """Process worker event.""" try: hostname = fields['hostname'] except KeyError: pass else: worker, created = self.get_or_create_worker(hostname) handler = getattr(worker, 'on_' + type, None) if handler: handler(**fields) return worker, created def task_event(self, type, fields): """Process task event.""" uuid = fields['uuid'] hostname = fields['hostname'] worker, _ = self.get_or_create_worker(hostname) task, created = self.get_or_create_task(uuid) task.worker = worker maxtasks = self.max_tasks_in_memory * 2 taskheap = self._taskheap timestamp = fields.get('timestamp') or 0 clock = 0 if type == 'sent' else fields.get('clock') heappush(taskheap, _lamportinfo(clock, timestamp, worker.id, task)) if len(taskheap) > maxtasks: heappop(taskheap) handler = getattr(task, 'on_' + type, None) if type == 'received': self.task_count += 1 if handler: handler(**fields) else: task.on_unknown_event(type, **fields) return created def event(self, event): with self._mutex: return self._dispatch_event(event) def _dispatch_event(self, event): self.event_count += 1 event = kwdict(event) group, _, subject = event['type'].partition('-') getattr(self, group + '_event')(subject, event) if self.event_callback: self.event_callback(self, event) def itertasks(self, limit=None): for index, row in enumerate(items(self.tasks)): yield row if limit and index + 1 >= limit: break def tasks_by_time(self, limit=None): """Generator giving tasks ordered by time, in ``(uuid, Task)`` tuples.""" seen = set() for evtup in islice(reversed(self._taskheap), 0, limit): uuid = evtup[3].uuid if uuid not in seen: yield uuid, evtup[3] seen.add(uuid) tasks_by_timestamp = tasks_by_time def tasks_by_type(self, name, limit=None): """Get all tasks by type. Returns a list of ``(uuid, Task)`` tuples. """ return islice( ((uuid, task) for uuid, task in self.tasks_by_time() if task.name == name), 0, limit, ) def tasks_by_worker(self, hostname, limit=None): """Get all tasks by worker. """ return islice( ((uuid, task) for uuid, task in self.tasks_by_time() if task.worker.hostname == hostname), 0, limit, ) def task_types(self): """Returns a list of all seen task types.""" return list(sorted(set(task.name for task in values(self.tasks)))) def alive_workers(self): """Returns a list of (seemingly) alive workers.""" return [w for w in values(self.workers) if w.alive] def __repr__(self): return '<State: events={0.event_count} tasks={0.task_count}>' \ .format(self) def __getstate__(self): d = dict(vars(self)) d.pop('_mutex') return d def __setstate__(self, state): self.__dict__ = state self._mutex = threading.Lock()