示例#1
0
文件: state.py 项目: HonzaKral/celery
 def __init__(self, callback=None,
         max_workers_in_memory=5000, max_tasks_in_memory=10000):
     self.workers = LocalCache(max_workers_in_memory)
     self.tasks = LocalCache(max_tasks_in_memory)
     self.event_callback = callback
     self.group_handlers = {"worker": self.worker_event,
                            "task": self.task_event}
示例#2
0
 def test_expires(self):
     limit = 100
     x = LocalCache(limit=limit)
     slots = list(range(limit * 2))
     for i in slots:
         x[i] = i
     self.assertListEqual(x.keys(), slots[limit:])
示例#3
0
 def __init__(self, callback=None,
         max_workers_in_memory=5000, max_tasks_in_memory=10000):
     self.workers = LocalCache(max_workers_in_memory)
     self.tasks = LocalCache(max_tasks_in_memory)
     self.event_callback = callback
     self.group_handlers = {"worker": self.worker_event,
                            "task": self.task_event}
     self._mutex = Lock()
示例#4
0
文件: cache.py 项目: kornholi/celery
class DummyClient(object):
    def __init__(self, *args, **kwargs):
        self.cache = LocalCache(5000)

    def get(self, key, *args, **kwargs):
        return self.cache.get(key)

    def set(self, key, value, *args, **kwargs):
        self.cache[key] = value

    def delete(self, key, *args, **kwargs):
        self.cache.pop(key, None)
示例#5
0
文件: cache.py 项目: 66laps/celery
class DummyClient(object):

    def __init__(self, *args, **kwargs):
        self.cache = LocalCache(5000)

    def get(self, key, *args, **kwargs):
        return self.cache.get(key)

    def set(self, key, value, *args, **kwargs):
        self.cache[key] = value

    def delete(self, key, *args, **kwargs):
        self.cache.pop(key, None)
示例#6
0
class DummyClient(object):

    def __init__(self, *args, **kwargs):
        self.cache = LocalCache(5000)

    def get(self, key, *args, **kwargs):
        return self.cache.get(key)

    def get_multi(self, keys):
        cache = self.cache
        return dict((k, cache[k]) for k in keys if k in cache)

    def set(self, key, value, *args, **kwargs):
        self.cache[key] = value

    def delete(self, key, *args, **kwargs):
        self.cache.pop(key, None)
示例#7
0
class State(object):
    """Records clusters state."""
    event_count = 0
    task_count = 0

    def __init__(self, callback=None,
            max_workers_in_memory=5000, max_tasks_in_memory=10000):
        self.workers = LocalCache(max_workers_in_memory)
        self.tasks = LocalCache(max_tasks_in_memory)
        self.event_callback = callback
        self.group_handlers = {"worker": self.worker_event,
                               "task": self.task_event}
        self._mutex = Lock()

    def freeze_while(self, fun, *args, **kwargs):
        clear_after = kwargs.pop("clear_after", False)
        self._mutex.acquire()
        try:
            return fun(*args, **kwargs)
        finally:
            if clear_after:
                self._clear()
            self._mutex.release()

    def clear_tasks(self, ready=True):
        self._mutex.acquire()
        try:
            return self._clear_tasks(ready)
        finally:
            self._mutex.release()

    def _clear_tasks(self, ready=True):
        if ready:
            self.tasks = dict((uuid, task)
                                for uuid, task in self.tasks.items()
                                    if task.state not in states.READY_STATES)
        else:
            self.tasks.clear()

    def _clear(self, ready=True):
        self.workers.clear()
        self._clear_tasks(ready)
        self.event_count = 0
        self.task_count = 0

    def clear(self, ready=True):
        self._mutex.acquire()
        try:
            return self._clear(ready)
        finally:
            self._mutex.release()

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname."""
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
        except KeyError:
            worker = self.workers[hostname] = Worker(
                    hostname=hostname, **kwargs)
        return worker

    def get_or_create_task(self, uuid):
        """Get or create task by uuid."""
        try:
            return self.tasks[uuid]
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid)
            return task

    def worker_event(self, type, fields):
        """Process worker event."""
        hostname = fields.pop("hostname", None)
        if hostname:
            worker = self.get_or_create_worker(hostname)
            handler = getattr(worker, "on_%s" % type, None)
            if handler:
                handler(**fields)

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields.pop("uuid")
        hostname = fields.pop("hostname")
        worker = self.get_or_create_worker(hostname)
        task = self.get_or_create_task(uuid)
        handler = getattr(task, "on_%s" % type, None)
        if type == "received":
            self.task_count += 1
        if handler:
            handler(**fields)
        task.worker = worker

    def event(self, event):
        self._mutex.acquire()
        try:
            return self._dispatch_event(event)
        finally:
            self._mutex.release()

    def _dispatch_event(self, event):
        self.event_count += 1
        event = kwdict(event)
        group, _, type = partition(event.pop("type"), "-")
        self.group_handlers[group](type, event)
        if self.event_callback:
            self.event_callback(self, event)

    def tasks_by_timestamp(self, limit=None):
        """Get tasks by timestamp.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time(self.tasks.items()[:limit])

    def _sort_tasks_by_time(self, tasks):
        """Sort task items by time."""
        return sorted(tasks, key=lambda t: t[1].timestamp,
                      reverse=True)

    def tasks_by_type(self, name, limit=None):
        """Get all tasks by type.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([(uuid, task)
                for uuid, task in self.tasks.items()[:limit]
                    if task.name == name])

    def tasks_by_worker(self, hostname, limit=None):
        """Get all tasks by worker.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([(uuid, task)
                for uuid, task in self.tasks.items()[:limit]
                    if task.worker.hostname == hostname])

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(sorted(set(task.name for task in self.tasks.values())))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in self.workers.values() if w.alive]

    def __repr__(self):
        return "<ClusterState: events=%s tasks=%s>" % (self.event_count,
                                                       self.task_count)
示例#8
0
class State(object):
    """Records clusters state."""
    event_count = 0
    task_count = 0
    _buffering = False
    buffer = deque()
    frozen = False

    def __init__(self, callback=None,
            max_workers_in_memory=5000, max_tasks_in_memory=10000):
        self.workers = LocalCache(max_workers_in_memory)
        self.tasks = LocalCache(max_tasks_in_memory)
        self.event_callback = callback
        self.group_handlers = {"worker": self.worker_event,
                               "task": self.task_event}
        self._resource = RLock()

    def freeze(self, buffer=True):
        """Stop recording the event stream.

        :keyword buffer: If true, any events received while frozen
           will be buffered, you can use ``thaw(replay=True)`` to apply
           this buffer. :meth:`thaw` will clear the buffer and resume
           recording the stream.

        """
        self._buffering = buffer
        self.frozen = True

    def _replay(self):
        while self.buffer:
            try:
                event = self.buffer.popleft()
            except IndexError:
                pass
            self._dispatch_event(event)

    def thaw(self, replay=True):
        """Resume recording of the event stream.

        :keyword replay: Will replay buffered events received while
          the stream was frozen.

        This will always clear the buffer, deleting any events collected
        while the stream was frozen.

        """
        self._buffering = False
        try:
            if replay:
                self._replay()
            else:
                self.buffer.clear()
        finally:
            self.frozen = False

    def freeze_while(self, fun, *args, **kwargs):
        self.freeze()
        try:
            return fun(*args, **kwargs)
        finally:
            self.thaw(replay=True)

    def clear_tasks(self, ready=True):
        if ready:
            self.tasks = dict((uuid, task)
                                for uuid, task in self.tasks.items()
                                    if task.state not in states.READY_STATES)
        else:
            self.tasks.clear()

    def clear(self, ready=True):
        try:
            self.workers.clear()
            self.clear_tasks(ready)
            self.event_count = 0
            self.task_count = 0
        finally:
            pass

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname."""
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
        except KeyError:
            worker = self.workers[hostname] = Worker(
                    hostname=hostname, **kwargs)
        return worker

    def get_or_create_task(self, uuid):
        """Get or create task by uuid."""
        try:
            return self.tasks[uuid]
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid)
            return task

    def worker_event(self, type, fields):
        """Process worker event."""
        hostname = fields.pop("hostname", None)
        if hostname:
            worker = self.get_or_create_worker(hostname)
            handler = getattr(worker, "on_%s" % type, None)
            if handler:
                handler(**fields)

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields.pop("uuid")
        hostname = fields.pop("hostname")
        worker = self.get_or_create_worker(hostname)
        task = self.get_or_create_task(uuid)
        handler = getattr(task, "on_%s" % type, None)
        if type == "received":
            self.task_count += 1
        if handler:
            handler(**fields)
        task.worker = worker

    def _dispatch_event(self, event):
        self.event_count += 1
        event = kwdict(event)
        group, _, type = partition(event.pop("type"), "-")
        self.group_handlers[group](type, event)
        if self.event_callback:
            self.event_callback(self, event)

    def event(self, event):
        """Process event."""
        try:
            if not self.frozen:
                self._dispatch_event(event)
            elif self._buffering:
                self.buffer.append(event)
        finally:
            pass

    def tasks_by_timestamp(self, limit=None):
        """Get tasks by timestamp.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time(self.tasks.items()[:limit])

    def _sort_tasks_by_time(self, tasks):
        """Sort task items by time."""
        return sorted(tasks, key=lambda t: t[1].timestamp,
                      reverse=True)

    def tasks_by_type(self, name, limit=None):
        """Get all tasks by type.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([(uuid, task)
                for uuid, task in self.tasks.items()[:limit]
                    if task.name == name])

    def tasks_by_worker(self, hostname, limit=None):
        """Get all tasks by worker.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([(uuid, task)
                for uuid, task in self.tasks.items()[:limit]
                    if task.worker.hostname == hostname])

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(sorted(set(task.name for task in self.tasks.values())))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in self.workers.values() if w.alive]

    def __repr__(self):
        return "<ClusterState: events=%s tasks=%s>" % (self.event_count,
                                                       self.task_count)
示例#9
0
文件: cache.py 项目: 66laps/celery
 def __init__(self, *args, **kwargs):
     self.cache = LocalCache(5000)
示例#10
0
 def __init__(self, *args, **kwargs):
     self.cache = LocalCache(5000)
示例#11
0
文件: base.py 项目: jokar/minion
 def __init__(self, *args, **kwargs):
     super(BaseDictBackend, self).__init__(*args, **kwargs)
     self._cache = LocalCache(
         limit=kwargs.get("max_cached_results") or conf.MAX_CACHED_RESULTS)
示例#12
0
文件: base.py 项目: jokar/minion
class BaseDictBackend(BaseBackend):
    def __init__(self, *args, **kwargs):
        super(BaseDictBackend, self).__init__(*args, **kwargs)
        self._cache = LocalCache(
            limit=kwargs.get("max_cached_results") or conf.MAX_CACHED_RESULTS)

    def store_result(self, task_id, result, status, traceback=None):
        """Store task result and status."""
        result = self.encode_result(result, status)
        return self._store_result(task_id, result, status, traceback)

    def forget(self, task_id):
        self._cache.pop(task_id, None)
        self._forget(task_id)

    def get_status(self, task_id):
        """Get the status of a task."""
        return self.get_task_meta(task_id)["status"]

    def get_traceback(self, task_id):
        """Get the traceback for a failed task."""
        return self.get_task_meta(task_id).get("traceback")

    def get_result(self, task_id):
        """Get the result of a task."""
        meta = self.get_task_meta(task_id)
        if meta["status"] in self.EXCEPTION_STATES:
            return self.exception_to_python(meta["result"])
        else:
            return meta["result"]

    def get_task_meta(self, task_id, cache=True):
        if cache and task_id in self._cache:
            return self._cache[task_id]

        meta = self._get_task_meta_for(task_id)
        if cache and meta.get("status") == states.SUCCESS:
            self._cache[task_id] = meta
        return meta

    def reload_task_result(self, task_id):
        self._cache[task_id] = self.get_task_meta(task_id, cache=False)

    def reload_taskset_result(self, taskset_id):
        self._cache[taskset_id] = self.get_taskset_meta(taskset_id,
                                                        cache=False)

    def get_taskset_meta(self, taskset_id, cache=True):
        if cache and taskset_id in self._cache:
            return self._cache[taskset_id]

        meta = self._restore_taskset(taskset_id)
        if cache and meta is not None:
            self._cache[taskset_id] = meta
        return meta

    def restore_taskset(self, taskset_id, cache=True):
        """Get the result for a taskset."""
        meta = self.get_taskset_meta(taskset_id, cache=cache)
        if meta:
            return meta["result"]

    def save_taskset(self, taskset_id, result):
        """Store the result of an executed taskset."""
        return self._save_taskset(taskset_id, result)
示例#13
0
文件: state.py 项目: jokar/minion
class State(object):
    """Records clusters state."""
    event_count = 0
    task_count = 0

    def __init__(self,
                 callback=None,
                 max_workers_in_memory=5000,
                 max_tasks_in_memory=10000):
        self.workers = LocalCache(max_workers_in_memory)
        self.tasks = LocalCache(max_tasks_in_memory)
        self.event_callback = callback
        self.group_handlers = {
            "worker": self.worker_event,
            "task": self.task_event
        }
        self._mutex = Lock()

    def freeze_while(self, fun, *args, **kwargs):
        clear_after = kwargs.pop("clear_after", False)
        self._mutex.acquire()
        try:
            return fun(*args, **kwargs)
        finally:
            if clear_after:
                self._clear()
            self._mutex.release()

    def clear_tasks(self, ready=True):
        self._mutex.acquire()
        try:
            return self._clear_tasks(ready)
        finally:
            self._mutex.release()

    def _clear_tasks(self, ready=True):
        if ready:
            self.tasks = dict((uuid, task)
                              for uuid, task in self.tasks.items()
                              if task.state not in states.READY_STATES)
        else:
            self.tasks.clear()

    def _clear(self, ready=True):
        self.workers.clear()
        self._clear_tasks(ready)
        self.event_count = 0
        self.task_count = 0

    def clear(self, ready=True):
        self._mutex.acquire()
        try:
            return self._clear(ready)
        finally:
            self._mutex.release()

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname."""
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
        except KeyError:
            worker = self.workers[hostname] = Worker(hostname=hostname,
                                                     **kwargs)
        return worker

    def get_or_create_task(self, uuid):
        """Get or create task by uuid."""
        try:
            return self.tasks[uuid]
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid)
            return task

    def worker_event(self, type, fields):
        """Process worker event."""
        hostname = fields.pop("hostname", None)
        if hostname:
            worker = self.get_or_create_worker(hostname)
            handler = getattr(worker, "on_%s" % type, None)
            if handler:
                handler(**fields)

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields.pop("uuid")
        hostname = fields.pop("hostname")
        worker = self.get_or_create_worker(hostname)
        task = self.get_or_create_task(uuid)
        handler = getattr(task, "on_%s" % type, None)
        if type == "received":
            self.task_count += 1
        if handler:
            handler(**fields)
        task.worker = worker

    def event(self, event):
        self._mutex.acquire()
        try:
            return self._dispatch_event(event)
        finally:
            self._mutex.release()

    def _dispatch_event(self, event):
        self.event_count += 1
        event = kwdict(event)
        group, _, type = partition(event.pop("type"), "-")
        self.group_handlers[group](type, event)
        if self.event_callback:
            self.event_callback(self, event)

    def tasks_by_timestamp(self, limit=None):
        """Get tasks by timestamp.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time(self.tasks.items()[:limit])

    def _sort_tasks_by_time(self, tasks):
        """Sort task items by time."""
        return sorted(tasks, key=lambda t: t[1].timestamp, reverse=True)

    def tasks_by_type(self, name, limit=None):
        """Get all tasks by type.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([
            (uuid, task) for uuid, task in self.tasks.items()[:limit]
            if task.name == name
        ])

    def tasks_by_worker(self, hostname, limit=None):
        """Get all tasks by worker.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([
            (uuid, task) for uuid, task in self.tasks.items()[:limit]
            if task.worker.hostname == hostname
        ])

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(sorted(set(task.name for task in self.tasks.values())))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in self.workers.values() if w.alive]

    def __repr__(self):
        return "<ClusterState: events=%s tasks=%s>" % (self.event_count,
                                                       self.task_count)
示例#14
0
import sys

from datetime import datetime

from celery.app import app_or_default
from celery.datastructures import LocalCache

TASK_NAMES = LocalCache(0xFFF)

HUMAN_TYPES = {
    "worker-offline": "shutdown",
    "worker-online": "started",
    "worker-heartbeat": "heartbeat"
}


def humanize_type(type):
    try:
        return HUMAN_TYPES[type.lower()]
    except KeyError:
        return type.lower().replace("-", " ")


class Dumper(object):
    def on_event(self, event):
        timestamp = datetime.fromtimestamp(event.pop("timestamp"))
        type = event.pop("type").lower()
        hostname = event.pop("hostname")
        if type.startswith("task-"):
            uuid = event.pop("uuid")
            if type.startswith("task-received"):
示例#15
0
文件: base.py 项目: 66laps/celery
class BaseDictBackend(BaseBackend):

    def __init__(self, *args, **kwargs):
        super(BaseDictBackend, self).__init__(*args, **kwargs)
        self._cache = LocalCache(limit=kwargs.get("max_cached_results") or
                                 self.app.conf.CELERY_MAX_CACHED_RESULTS)

    def store_result(self, task_id, result, status, traceback=None):
        """Store task result and status."""
        result = self.encode_result(result, status)
        return self._store_result(task_id, result, status, traceback)

    def forget(self, task_id):
        self._cache.pop(task_id, None)
        self._forget(task_id)

    def get_status(self, task_id):
        """Get the status of a task."""
        return self.get_task_meta(task_id)["status"]

    def get_traceback(self, task_id):
        """Get the traceback for a failed task."""
        return self.get_task_meta(task_id).get("traceback")

    def get_result(self, task_id):
        """Get the result of a task."""
        meta = self.get_task_meta(task_id)
        if meta["status"] in self.EXCEPTION_STATES:
            return self.exception_to_python(meta["result"])
        else:
            return meta["result"]

    def get_task_meta(self, task_id, cache=True):
        if cache and task_id in self._cache:
            return self._cache[task_id]

        meta = self._get_task_meta_for(task_id)
        if cache and meta.get("status") == states.SUCCESS:
            self._cache[task_id] = meta
        return meta

    def reload_task_result(self, task_id):
        self._cache[task_id] = self.get_task_meta(task_id, cache=False)

    def reload_taskset_result(self, taskset_id):
        self._cache[taskset_id] = self.get_taskset_meta(taskset_id,
                                                        cache=False)

    def get_taskset_meta(self, taskset_id, cache=True):
        if cache and taskset_id in self._cache:
            return self._cache[taskset_id]

        meta = self._restore_taskset(taskset_id)
        if cache and meta is not None:
            self._cache[taskset_id] = meta
        return meta

    def restore_taskset(self, taskset_id, cache=True):
        """Get the result for a taskset."""
        meta = self.get_taskset_meta(taskset_id, cache=cache)
        if meta:
            return meta["result"]

    def save_taskset(self, taskset_id, result):
        """Store the result of an executed taskset."""
        return self._save_taskset(taskset_id, result)
示例#16
0
文件: state.py 项目: HonzaKral/celery
class State(object):
    """Represents a snapshot of a clusters state."""
    event_count = 0
    task_count = 0

    def __init__(self, callback=None,
            max_workers_in_memory=5000, max_tasks_in_memory=10000):
        self.workers = LocalCache(max_workers_in_memory)
        self.tasks = LocalCache(max_tasks_in_memory)
        self.event_callback = callback
        self.group_handlers = {"worker": self.worker_event,
                               "task": self.task_event}

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname."""
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
        except KeyError:
            worker = self.workers[hostname] = Worker(
                    hostname=hostname, **kwargs)
        return worker

    def get_or_create_task(self, uuid, **kwargs):
        """Get or create task by uuid."""
        try:
            task = self.tasks[uuid]
            task.update(kwargs)
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid, **kwargs)
        return task

    def worker_event(self, type, fields):
        """Process worker event."""
        hostname = fields.pop("hostname")
        worker = self.get_or_create_worker(hostname)
        handler = getattr(worker, "on_%s" % type)
        if handler:
            handler(**fields)

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields.pop("uuid")
        hostname = fields.pop("hostname")
        worker = self.get_or_create_worker(hostname)
        task = self.get_or_create_task(uuid)
        handler = getattr(task, "on_%s" % type)
        if type == "received":
            self.task_count += 1
        if handler:
            handler(**fields)
        task.worker = worker

    def event(self, event):
        """Process event."""
        self.event_count += 1
        event = kwdict(event)
        group, _, type = partition(event.pop("type"), "-")
        self.group_handlers[group](type, event)
        if self.event_callback:
            self.event_callback(self, event)

    def tasks_by_timestamp(self):
        """Get tasks by timestamp.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time(self.tasks.items())

    def _sort_tasks_by_time(self, tasks):
        """Sort task items by time."""
        return sorted(tasks, key=lambda t: t[1].timestamp, reverse=True)

    def tasks_by_type(self, name):
        """Get all tasks by type.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([(uuid, task)
                for uuid, task in self.tasks.items()
                    if task.name == name])

    def tasks_by_worker(self, hostname):
        """Get all tasks by worker.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([(uuid, task)
                for uuid, task in self.tasks.items()
                    if task.worker.hostname == hostname])

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(set(task.name for task in self.tasks.values()))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in self.workers.values() if w.alive]
示例#17
0
文件: base.py 项目: 66laps/celery
 def __init__(self, *args, **kwargs):
     super(BaseDictBackend, self).__init__(*args, **kwargs)
     self._cache = LocalCache(limit=kwargs.get("max_cached_results") or
                              self.app.conf.CELERY_MAX_CACHED_RESULTS)
class State(object):
    """Represents a snapshot of a clusters state."""
    event_count = 0
    task_count = 0

    def __init__(self,
                 callback=None,
                 max_workers_in_memory=5000,
                 max_tasks_in_memory=10000):
        self.workers = LocalCache(max_workers_in_memory)
        self.tasks = LocalCache(max_tasks_in_memory)
        self.event_callback = callback
        self.group_handlers = {
            "worker": self.worker_event,
            "task": self.task_event
        }

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname."""
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
        except KeyError:
            worker = self.workers[hostname] = Worker(hostname=hostname,
                                                     **kwargs)
        return worker

    def get_or_create_task(self, uuid, **kwargs):
        """Get or create task by uuid."""
        try:
            task = self.tasks[uuid]
            task.update(kwargs)
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid, **kwargs)
        return task

    def worker_event(self, type, fields):
        """Process worker event."""
        hostname = fields.pop("hostname")
        worker = self.get_or_create_worker(hostname)
        handler = getattr(worker, "on_%s" % type)
        if handler:
            handler(**fields)

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields.pop("uuid")
        hostname = fields.pop("hostname")
        worker = self.get_or_create_worker(hostname)
        task = self.get_or_create_task(uuid)
        handler = getattr(task, "on_%s" % type)
        if type == "received":
            self.task_count += 1
        if handler:
            handler(**fields)
        task.worker = worker

    def event(self, event):
        """Process event."""
        self.event_count += 1
        event = kwdict(event)
        group, _, type = partition(event.pop("type"), "-")
        self.group_handlers[group](type, event)
        if self.event_callback:
            self.event_callback(self, event)

    def tasks_by_timestamp(self):
        """Get tasks by timestamp.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time(self.tasks.items())

    def _sort_tasks_by_time(self, tasks):
        """Sort task items by time."""
        return sorted(tasks, key=lambda t: t[1].timestamp, reverse=True)

    def tasks_by_type(self, name):
        """Get all tasks by type.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([(uuid, task)
                                         for uuid, task in self.tasks.items()
                                         if task.name == name])

    def tasks_by_worker(self, hostname):
        """Get all tasks by worker.

        Returns a list of ``(uuid, task)`` tuples.

        """
        return self._sort_tasks_by_time([(uuid, task)
                                         for uuid, task in self.tasks.items()
                                         if task.worker.hostname == hostname])

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(set(task.name for task in self.tasks.values()))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in self.workers.values() if w.alive]