예제 #1
0
 def test_expires(self):
     limit = 100
     x = LRUCache(limit=limit)
     slots = list(xrange(limit * 2))
     for i in slots:
         x[i] = i
     self.assertListEqual(x.keys(), list(slots[limit:]))
예제 #2
0
    def assertSafeIter(self, method, interval=0.01, size=10000):
        from threading import Thread, Event
        from time import sleep
        x = LRUCache(size)
        x.update(zip(xrange(size), xrange(size)))

        class Burglar(Thread):

            def __init__(self, cache):
                self.cache = cache
                self._is_shutdown = Event()
                self._is_stopped = Event()
                Thread.__init__(self)

            def run(self):
                while not self._is_shutdown.isSet():
                    try:
                        self.cache.data.popitem(last=False)
                    except KeyError:
                        break
                self._is_stopped.set()

            def stop(self):
                self._is_shutdown.set()
                self._is_stopped.wait()
                self.join(THREAD_TIMEOUT_MAX)

        burglar = Burglar(x)
        burglar.start()
        try:
            for _ in getattr(x, method)():
                sleep(0.0001)
        finally:
            burglar.stop()
예제 #3
0
 def test_expires(self):
     limit = 100
     x = LRUCache(limit=limit)
     slots = list(xrange(limit * 2))
     for i in slots:
         x[i] = i
     self.assertListEqual(x.keys(), list(slots[limit:]))
예제 #4
0
    def assertSafeIter(self, method, interval=0.01, size=10000):
        from threading import Thread, Event
        from time import sleep
        x = LRUCache(size)
        x.update(zip(xrange(size), xrange(size)))

        class Burglar(Thread):
            def __init__(self, cache):
                self.cache = cache
                self._is_shutdown = Event()
                self._is_stopped = Event()
                Thread.__init__(self)

            def run(self):
                while not self._is_shutdown.isSet():
                    try:
                        self.cache.data.popitem(last=False)
                    except KeyError:
                        break
                self._is_stopped.set()

            def stop(self):
                self._is_shutdown.set()
                self._is_stopped.wait()
                self.join(1e10)

        burglar = Burglar(x)
        burglar.start()
        try:
            for _ in getattr(x, method)():
                sleep(0.0001)
        finally:
            burglar.stop()
예제 #5
0
    def test_update_expires(self):
        limit = 100
        x = LRUCache(limit=limit)
        slots = list(xrange(limit * 2))
        for i in slots:
            x.update({i: i})

        self.assertListEqual(list(x.keys()), list(slots[limit:]))
예제 #6
0
 def __init__(self,
              callback=None,
              max_workers_in_memory=5000,
              max_tasks_in_memory=10000):
     self.workers = LRUCache(limit=max_workers_in_memory)
     self.tasks = LRUCache(limit=max_tasks_in_memory)
     self.event_callback = callback
     self._mutex = threading.Lock()
예제 #7
0
 def __init__(self, callback=None,
         max_workers_in_memory=5000, max_tasks_in_memory=10000):
     self.workers = LRUCache(limit=max_workers_in_memory)
     self.tasks = LRUCache(limit=max_tasks_in_memory)
     self.event_callback = callback
     self.group_handlers = {'worker': self.worker_event,
                            'task': self.task_event}
     self._mutex = threading.Lock()
예제 #8
0
    def test_update_expires(self):
        limit = 100
        x = LRUCache(limit=limit)
        slots = list(xrange(limit * 2))
        for i in slots:
            x.update({i: i})

        self.assertListEqual(list(x.keys()), list(slots[limit:]))
예제 #9
0
파일: state.py 프로젝트: AdrianRibao/celery
 def __init__(self, callback=None,
         max_workers_in_memory=5000, max_tasks_in_memory=10000):
     self.workers = LRUCache(limit=max_workers_in_memory)
     self.tasks = LRUCache(limit=max_tasks_in_memory)
     self.event_callback = callback
     self.group_handlers = {"worker": self.worker_event,
                            "task": self.task_event}
     self._mutex = Lock()
예제 #10
0
파일: state.py 프로젝트: SalesSeek/celery
 def __init__(self, callback=None,
              max_workers_in_memory=5000, max_tasks_in_memory=10000):
     self.max_workers_in_memory = max_workers_in_memory
     self.max_tasks_in_memory = max_tasks_in_memory
     self.workers = LRUCache(limit=self.max_workers_in_memory)
     self.tasks = LRUCache(limit=self.max_tasks_in_memory)
     self._taskheap = []
     self.event_callback = callback
     self._mutex = threading.Lock()
예제 #11
0
 def __init__(self, app=None, serializer=None, max_cached_results=None,
         **kwargs):
     from celery.app import app_or_default
     self.app = app_or_default(app)
     self.serializer = serializer or self.app.conf.CELERY_RESULT_SERIALIZER
     (self.content_type,
      self.content_encoding,
      self.encoder) = serialization.registry._encoders[self.serializer]
     self._cache = LRUCache(limit=max_cached_results or
                                   self.app.conf.CELERY_MAX_CACHED_RESULTS)
예제 #12
0
 def __init__(self,
              callback=None,
              max_workers_in_memory=5000,
              max_tasks_in_memory=10000):
     self.workers = LRUCache(limit=max_workers_in_memory)
     self.tasks = LRUCache(limit=max_tasks_in_memory)
     self.event_callback = callback
     self.group_handlers = {
         'worker': self.worker_event,
         'task': self.task_event
     }
     self._mutex = Lock()
예제 #13
0
 def __init__(self,
              callback=None,
              max_workers_in_memory=5000,
              max_tasks_in_memory=10000):
     self.max_workers_in_memory = max_workers_in_memory
     self.max_tasks_in_memory = 10000
     self.workers = LRUCache(limit=self.max_workers_in_memory)
     self.tasks = LRUCache(limit=self.max_tasks_in_memory)
     self._taskheap = []
     self.event_callback = callback
     self.group_handlers = {
         'worker': self.worker_event,
         'task': self.task_event,
     }
     self._mutex = threading.Lock()
예제 #14
0
파일: base.py 프로젝트: kertz/celery
    def __init__(self, app=None, serializer=None, max_cached_results=None, **kwargs):
        from celery.app import app_or_default

        self.app = app_or_default(app)
        self.serializer = serializer or self.app.conf.CELERY_RESULT_SERIALIZER
        (self.content_type, self.content_encoding, self.encoder) = serialization.registry._encoders[self.serializer]
        self._cache = LRUCache(limit=max_cached_results or self.app.conf.CELERY_MAX_CACHED_RESULTS)
예제 #15
0
    def test_least_recently_used(self):
        x = LRUCache(3)

        x[1], x[2], x[3] = 1, 2, 3
        self.assertEqual(x.keys(), [1, 2, 3])

        x[4], x[5] = 4, 5
        self.assertEqual(x.keys(), [3, 4, 5])

        # access 3, which makes it the last used key.
        x[3]
        x[6] = 6
        self.assertEqual(x.keys(), [5, 3, 6])

        x[7] = 7
        self.assertEqual(x.keys(), [3, 6, 7])
예제 #16
0
 def __init__(self,
              callback=None,
              workers=None,
              tasks=None,
              taskheap=None,
              max_workers_in_memory=5000,
              max_tasks_in_memory=10000):
     self.event_callback = callback
     self.workers = (LRUCache(max_workers_in_memory)
                     if workers is None else workers)
     self.tasks = (LRUCache(max_tasks_in_memory)
                   if tasks is None else tasks)
     self._taskheap = None  # reserved for __reduce__ in 3.1
     self.max_workers_in_memory = max_workers_in_memory
     self.max_tasks_in_memory = max_tasks_in_memory
     self._mutex = threading.Lock()
     self.handlers = {'task': self.task_event, 'worker': self.worker_event}
     self._get_handler = self.handlers.__getitem__
예제 #17
0
파일: cache.py 프로젝트: wiennat/celery
class DummyClient(object):
    def __init__(self, *args, **kwargs):
        self.cache = LRUCache(limit=5000)

    def get(self, key, *args, **kwargs):
        return self.cache.get(key)

    def get_multi(self, keys):
        cache = self.cache
        return dict((k, cache[k]) for k in keys if k in cache)

    def set(self, key, value, *args, **kwargs):
        self.cache[key] = value

    def delete(self, key, *args, **kwargs):
        self.cache.pop(key, None)

    def incr(self, key, delta=1):
        return self.cache.incr(key, delta)
예제 #18
0
파일: cache.py 프로젝트: dctrwatson/celery
class DummyClient(object):

    def __init__(self, *args, **kwargs):
        self.cache = LRUCache(limit=5000)

    def get(self, key, *args, **kwargs):
        return self.cache.get(key)

    def get_multi(self, keys):
        cache = self.cache
        return dict((k, cache[k]) for k in keys if k in cache)

    def set(self, key, value, *args, **kwargs):
        self.cache[key] = value

    def delete(self, key, *args, **kwargs):
        self.cache.pop(key, None)

    def incr(self, key, delta=1):
        return self.cache.incr(key, delta)
예제 #19
0
    def test_least_recently_used(self):
        x = LRUCache(3)

        x[1], x[2], x[3] = 1, 2, 3
        self.assertEqual(x.keys(), [1, 2, 3])

        x[4], x[5] = 4, 5
        self.assertEqual(x.keys(), [3, 4, 5])

        # access 3, which makes it the last used key.
        x[3]
        x[6] = 6
        self.assertEqual(x.keys(), [5, 3, 6])

        x[7] = 7
        self.assertEqual(x.keys(), [3, 6, 7])
예제 #20
0
 def test_items(self):
     c = LRUCache()
     c.update(a=1, b=2, c=3)
     self.assertTrue(c.items())
예제 #21
0
파일: base.py 프로젝트: fxiang21/company_bt
class BaseDictBackend(BaseBackend):
    def __init__(self, *args, **kwargs):
        super(BaseDictBackend, self).__init__(*args, **kwargs)
        self._cache = LRUCache(limit=kwargs.get('max_cached_results')
                               or self.app.conf.CELERY_MAX_CACHED_RESULTS)

    def is_cached(self, task_id):
        return task_id in self._cache

    def store_result(self, task_id, result, status, traceback=None, **kwargs):
        """Store task result and status."""
        result = self.encode_result(result, status)
        self._store_result(task_id, result, status, traceback, **kwargs)
        return result

    def forget(self, task_id):
        self._cache.pop(task_id, None)
        self._forget(task_id)

    def _forget(self, task_id):
        raise NotImplementedError('%s does not implement forget.' %
                                  (self.__class__))

    def get_status(self, task_id):
        """Get the status of a task."""
        return self.get_task_meta(task_id)['status']

    def get_traceback(self, task_id):
        """Get the traceback for a failed task."""
        return self.get_task_meta(task_id).get('traceback')

    def get_result(self, task_id):
        """Get the result of a task."""
        meta = self.get_task_meta(task_id)
        if meta['status'] in self.EXCEPTION_STATES:
            return self.exception_to_python(meta['result'])
        else:
            return meta['result']

    def get_children(self, task_id):
        """Get the list of subtasks sent by a task."""
        try:
            return self.get_task_meta(task_id)['children']
        except KeyError:
            pass

    def get_task_meta(self, task_id, cache=True):
        if cache:
            try:
                return self._cache[task_id]
            except KeyError:
                pass

        meta = self._get_task_meta_for(task_id)
        if cache and meta.get('status') == states.SUCCESS:
            self._cache[task_id] = meta
        return meta

    def reload_task_result(self, task_id):
        self._cache[task_id] = self.get_task_meta(task_id, cache=False)

    def reload_group_result(self, group_id):
        self._cache[group_id] = self.get_group_meta(group_id, cache=False)

    def get_group_meta(self, group_id, cache=True):
        if cache:
            try:
                return self._cache[group_id]
            except KeyError:
                pass

        meta = self._restore_group(group_id)
        if cache and meta is not None:
            self._cache[group_id] = meta
        return meta

    def restore_group(self, group_id, cache=True):
        """Get the result for a group."""
        meta = self.get_group_meta(group_id, cache=cache)
        if meta:
            return meta['result']

    def save_group(self, group_id, result):
        """Store the result of an executed group."""
        return self._save_group(group_id, result)

    def delete_group(self, group_id):
        self._cache.pop(group_id, None)
        return self._delete_group(group_id)
예제 #22
0
파일: base.py 프로젝트: fxiang21/company_bt
 def __init__(self, *args, **kwargs):
     super(BaseDictBackend, self).__init__(*args, **kwargs)
     self._cache = LRUCache(limit=kwargs.get('max_cached_results')
                            or self.app.conf.CELERY_MAX_CACHED_RESULTS)
예제 #23
0
파일: cache.py 프로젝트: wiennat/celery
 def __init__(self, *args, **kwargs):
     self.cache = LRUCache(limit=5000)
예제 #24
0
파일: state.py 프로젝트: shahjahanw/celery
class State(object):
    """Records clusters state."""
    event_count = 0
    task_count = 0

    def __init__(self,
                 callback=None,
                 max_workers_in_memory=5000,
                 max_tasks_in_memory=10000):
        self.max_workers_in_memory = max_workers_in_memory
        self.max_tasks_in_memory = max_tasks_in_memory
        self.workers = LRUCache(limit=self.max_workers_in_memory)
        self.tasks = LRUCache(limit=self.max_tasks_in_memory)
        self._taskheap = []
        self.event_callback = callback
        self._mutex = threading.Lock()

    def freeze_while(self, fun, *args, **kwargs):
        clear_after = kwargs.pop('clear_after', False)
        with self._mutex:
            try:
                return fun(*args, **kwargs)
            finally:
                if clear_after:
                    self._clear()

    def clear_tasks(self, ready=True):
        with self._mutex:
            return self._clear_tasks(ready)

    def _clear_tasks(self, ready=True):
        if ready:
            in_progress = dict((uuid, task) for uuid, task in self.itertasks()
                               if task.state not in states.READY_STATES)
            self.tasks.clear()
            self.tasks.update(in_progress)
        else:
            self.tasks.clear()
        self._taskheap[:] = []

    def _clear(self, ready=True):
        self.workers.clear()
        self._clear_tasks(ready)
        self.event_count = 0
        self.task_count = 0

    def clear(self, ready=True):
        with self._mutex:
            return self._clear(ready)

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname.

        Returns tuple of ``(worker, was_created)``.
        """
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
            return worker, False
        except KeyError:
            worker = self.workers[hostname] = Worker(hostname=hostname,
                                                     **kwargs)
            return worker, True

    def get_or_create_task(self, uuid):
        """Get or create task by uuid."""
        try:
            return self.tasks[uuid], True
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid)
            return task, False

    def worker_event(self, type, fields):
        """Process worker event."""
        try:
            hostname = fields['hostname']
        except KeyError:
            pass
        else:
            worker, created = self.get_or_create_worker(hostname)
            handler = getattr(worker, 'on_' + type, None)
            if handler:
                handler(**fields)
            return worker, created

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields['uuid']
        hostname = fields['hostname']
        worker, _ = self.get_or_create_worker(hostname)
        task, created = self.get_or_create_task(uuid)
        task.worker = worker

        taskheap = self._taskheap
        timestamp = fields.get('timestamp') or 0
        clock = 0 if type == 'sent' else fields.get('clock')
        heappush(taskheap, _lamportinfo(clock, timestamp, worker.id, task))
        curcount = len(self.tasks)
        if len(taskheap) > self.max_tasks_in_memory * 2:
            taskheap[:] = taskheap[curcount:]

        handler = getattr(task, 'on_' + type, None)
        if type == 'received':
            self.task_count += 1
        if handler:
            handler(**fields)
        else:
            task.on_unknown_event(type, **fields)
        return created

    def event(self, event):
        with self._mutex:
            return self._dispatch_event(event)

    def _dispatch_event(self, event):
        self.event_count += 1
        event = kwdict(event)
        group, _, subject = event['type'].partition('-')
        getattr(self, group + '_event')(subject, event)
        if self.event_callback:
            self.event_callback(self, event)

    def itertasks(self, limit=None):
        for index, row in enumerate(items(self.tasks)):
            yield row
            if limit and index + 1 >= limit:
                break

    def tasks_by_time(self, limit=None):
        """Generator giving tasks ordered by time,
        in ``(uuid, Task)`` tuples."""
        seen = set()
        for evtup in islice(reversed(self._taskheap), 0, limit):
            uuid = evtup[3].uuid
            if uuid not in seen:
                yield uuid, evtup[3]
                seen.add(uuid)

    tasks_by_timestamp = tasks_by_time

    def tasks_by_type(self, name, limit=None):
        """Get all tasks by type.

        Returns a list of ``(uuid, Task)`` tuples.

        """
        return islice(
            ((uuid, task)
             for uuid, task in self.tasks_by_time() if task.name == name),
            0,
            limit,
        )

    def tasks_by_worker(self, hostname, limit=None):
        """Get all tasks by worker.

        """
        return islice(
            ((uuid, task) for uuid, task in self.tasks_by_time()
             if task.worker.hostname == hostname),
            0,
            limit,
        )

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(sorted(set(task.name for task in values(self.tasks))))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in values(self.workers) if w.alive]

    def __repr__(self):
        return '<State: events={0.event_count} tasks={0.task_count}>' \
            .format(self)

    def __getstate__(self):
        d = dict(vars(self))
        d.pop('_mutex')
        return d

    def __setstate__(self, state):
        self.__dict__ = state
        self._mutex = threading.Lock()
예제 #25
0
 def reset(self):
     self.task_count = 0
     self.error_rate = 0.0
     self.TASK_HISTORY = LRUCache(limit=10)
예제 #26
0
파일: cache.py 프로젝트: dctrwatson/celery
 def __init__(self, *args, **kwargs):
     self.cache = LRUCache(limit=5000)
예제 #27
0
파일: state.py 프로젝트: phobologic/celery
class State(object):
    """Records clusters state."""

    event_count = 0
    task_count = 0

    def __init__(self, callback=None, max_workers_in_memory=5000, max_tasks_in_memory=10000):
        self.max_workers_in_memory = max_workers_in_memory
        self.max_tasks_in_memory = max_tasks_in_memory
        self.workers = LRUCache(limit=self.max_workers_in_memory)
        self.tasks = LRUCache(limit=self.max_tasks_in_memory)
        self._taskheap = []
        self.event_callback = callback
        self.group_handlers = {"worker": self.worker_event, "task": self.task_event}
        self._mutex = threading.Lock()

    def freeze_while(self, fun, *args, **kwargs):
        clear_after = kwargs.pop("clear_after", False)
        with self._mutex:
            try:
                return fun(*args, **kwargs)
            finally:
                if clear_after:
                    self._clear()

    def clear_tasks(self, ready=True):
        with self._mutex:
            return self._clear_tasks(ready)

    def _clear_tasks(self, ready=True):
        if ready:
            in_progress = dict((uuid, task) for uuid, task in self.itertasks() if task.state not in states.READY_STATES)
            self.tasks.clear()
            self.tasks.update(in_progress)
        else:
            self.tasks.clear()
        self._taskheap[:] = []

    def _clear(self, ready=True):
        self.workers.clear()
        self._clear_tasks(ready)
        self.event_count = 0
        self.task_count = 0

    def clear(self, ready=True):
        with self._mutex:
            return self._clear(ready)

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname.

        Returns tuple of ``(worker, was_created)``.
        """
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
            return worker, False
        except KeyError:
            worker = self.workers[hostname] = Worker(hostname=hostname, **kwargs)
            return worker, True

    def get_or_create_task(self, uuid):
        """Get or create task by uuid."""
        try:
            return self.tasks[uuid], True
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid)
            return task, False

    def worker_event(self, type, fields):
        """Process worker event."""
        try:
            hostname = fields["hostname"]
        except KeyError:
            pass
        else:
            worker, created = self.get_or_create_worker(hostname)
            handler = getattr(worker, "on_" + type, None)
            if handler:
                handler(**fields)
            return worker, created

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields.pop("uuid")
        hostname = fields.pop("hostname")
        worker, _ = self.get_or_create_worker(hostname)
        task, created = self.get_or_create_task(uuid)
        task.worker = worker

        taskheap = self._taskheap
        timestamp = fields.get("timestamp") or 0
        clock = 0 if type == "sent" else fields.get("clock")
        heappush(taskheap, _lamportinfo(clock, timestamp, worker.id, task))
        curcount = len(self.tasks)
        if len(taskheap) > self.max_tasks_in_memory * 2:
            taskheap[:] = taskheap[curcount:]

        handler = getattr(task, "on_" + type, None)
        if type == "received":
            self.task_count += 1
        if handler:
            handler(**fields)
        else:
            task.on_unknown_event(type, **fields)
        return created

    def event(self, event):
        with self._mutex:
            return self._dispatch_event(event)

    def _dispatch_event(self, event):
        self.event_count += 1
        event = kwdict(event)
        group, _, subject = event.pop("type").partition("-")
        self.group_handlers[group](subject, event)
        if self.event_callback:
            self.event_callback(self, event)

    def itertasks(self, limit=None):
        for index, row in enumerate(items(self.tasks)):
            yield row
            if limit and index + 1 >= limit:
                break

    def tasks_by_time(self, limit=None):
        """Generator giving tasks ordered by time,
        in ``(uuid, Task)`` tuples."""
        seen = set()
        for evtup in islice(reversed(self._taskheap), 0, limit):
            uuid = evtup[3].uuid
            if uuid not in seen:
                yield uuid, evtup[3]
                seen.add(uuid)

    tasks_by_timestamp = tasks_by_time

    def tasks_by_type(self, name, limit=None):
        """Get all tasks by type.

        Returns a list of ``(uuid, Task)`` tuples.

        """
        return islice(((uuid, task) for uuid, task in self.tasks_by_time() if task.name == name), 0, limit)

    def tasks_by_worker(self, hostname, limit=None):
        """Get all tasks by worker.

        """
        return islice(
            ((uuid, task) for uuid, task in self.tasks_by_time() if task.worker.hostname == hostname), 0, limit
        )

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(sorted(set(task.name for task in values(self.tasks))))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in values(self.workers) if w.alive]

    def __repr__(self):
        return "<State: events={0.event_count} tasks={0.task_count}>".format(self)

    def __getstate__(self):
        d = dict(vars(self))
        d.pop("_mutex")
        return d

    def __setstate__(self, state):
        self.__dict__ = state
        self._mutex = threading.Lock()
예제 #28
0
 def test_items(self):
     c = LRUCache()
     c.update(a=1, b=2, c=3)
     self.assertTrue(list(items(c)))
예제 #29
0
 def test_items(self):
     c = LRUCache()
     c.update(a=1, b=2, c=3)
     self.assertTrue(list(items(c)))
예제 #30
0
파일: base.py 프로젝트: JonPeel/celery
class BaseBackend(object):
    READY_STATES = states.READY_STATES
    UNREADY_STATES = states.UNREADY_STATES
    EXCEPTION_STATES = states.EXCEPTION_STATES

    TimeoutError = TimeoutError

    #: Time to sleep between polling each individual item
    #: in `ResultSet.iterate`. as opposed to the `interval`
    #: argument which is for each pass.
    subpolling_interval = None

    #: If true the backend must implement :meth:`get_many`.
    supports_native_join = False

    def __init__(self, app=None, serializer=None,
                 max_cached_results=None, **kwargs):
        from celery.app import app_or_default
        self.app = app_or_default(app)
        conf = self.app.conf
        self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
        (self.content_type,
         self.content_encoding,
         self.encoder) = serialization.registry._encoders[self.serializer]
        self._cache = LRUCache(
            limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS,
        )

    def mark_as_started(self, task_id, **meta):
        """Mark a task as started"""
        return self.store_result(task_id, meta, status=states.STARTED)

    def mark_as_done(self, task_id, result):
        """Mark task as successfully executed."""
        return self.store_result(task_id, result, status=states.SUCCESS)

    def mark_as_failure(self, task_id, exc, traceback=None):
        """Mark task as executed with failure. Stores the execption."""
        return self.store_result(task_id, exc, status=states.FAILURE,
                                 traceback=traceback)

    def mark_as_retry(self, task_id, exc, traceback=None):
        """Mark task as being retries. Stores the current
        exception (if any)."""
        return self.store_result(task_id, exc, status=states.RETRY,
                                 traceback=traceback)

    def mark_as_revoked(self, task_id, reason=''):
        return self.store_result(task_id, TaskRevokedError(reason),
                                 status=states.REVOKED, traceback=None)

    def prepare_exception(self, exc):
        """Prepare exception for serialization."""
        if self.serializer in EXCEPTION_ABLE_CODECS:
            return get_pickleable_exception(exc)
        return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}

    def exception_to_python(self, exc):
        """Convert serialized exception to Python exception."""
        if self.serializer in EXCEPTION_ABLE_CODECS:
            return get_pickled_exception(exc)
        return create_exception_cls(from_utf8(exc['exc_type']),
                                    sys.modules[__name__])(exc['exc_message'])

    def prepare_value(self, result):
        """Prepare value for storage."""
        if isinstance(result, GroupResult):
            return result.serializable()
        return result

    def encode(self, data):
        _, _, payload = serialization.encode(data, serializer=self.serializer)
        return payload

    def decode(self, payload):
        payload = PY3 and payload or str(payload)
        return serialization.decode(payload,
                                    content_type=self.content_type,
                                    content_encoding=self.content_encoding)

    def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
        """Wait for task and return its result.

        If the task raises an exception, this exception
        will be re-raised by :func:`wait_for`.

        If `timeout` is not :const:`None`, this raises the
        :class:`celery.exceptions.TimeoutError` exception if the operation
        takes longer than `timeout` seconds.

        """

        time_elapsed = 0.0

        while 1:
            status = self.get_status(task_id)
            if status == states.SUCCESS:
                return self.get_result(task_id)
            elif status in states.PROPAGATE_STATES:
                result = self.get_result(task_id)
                if propagate:
                    raise result
                return result
            # avoid hammering the CPU checking status.
            time.sleep(interval)
            time_elapsed += interval
            if timeout and time_elapsed >= timeout:
                raise TimeoutError('The operation timed out.')

    def prepare_expires(self, value, type=None):
        if value is None:
            value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
        if isinstance(value, timedelta):
            value = timeutils.timedelta_seconds(value)
        if value is not None and type:
            return type(value)
        return value

    def encode_result(self, result, status):
        if status in self.EXCEPTION_STATES and isinstance(result, Exception):
            return self.prepare_exception(result)
        else:
            return self.prepare_value(result)

    def store_result(self, task_id, result, status, traceback=None, **kwargs):
        """Update task state and result."""
        result = self.encode_result(result, status)
        self._store_result(task_id, result, status, traceback, **kwargs)
        return result

    def forget(self, task_id):
        self._cache.pop(task_id, None)
        self._forget(task_id)

    def _forget(self, task_id):
        raise NotImplementedError('backend does not implement forget.')

    def get_status(self, task_id):
        """Get the status of a task."""
        return self.get_task_meta(task_id)['status']

    def get_traceback(self, task_id):
        """Get the traceback for a failed task."""
        return self.get_task_meta(task_id).get('traceback')

    def get_result(self, task_id):
        """Get the result of a task."""
        meta = self.get_task_meta(task_id)
        if meta['status'] in self.EXCEPTION_STATES:
            return self.exception_to_python(meta['result'])
        else:
            return meta['result']

    def get_children(self, task_id):
        """Get the list of subtasks sent by a task."""
        try:
            return self.get_task_meta(task_id)['children']
        except KeyError:
            pass

    def get_task_meta(self, task_id, cache=True):
        if cache:
            try:
                return self._cache[task_id]
            except KeyError:
                pass

        meta = self._get_task_meta_for(task_id)
        if cache and meta.get('status') == states.SUCCESS:
            self._cache[task_id] = meta
        return meta

    def reload_task_result(self, task_id):
        """Reload task result, even if it has been previously fetched."""
        self._cache[task_id] = self.get_task_meta(task_id, cache=False)

    def reload_group_result(self, group_id):
        """Reload group result, even if it has been previously fetched."""
        self._cache[group_id] = self.get_group_meta(group_id, cache=False)

    def get_group_meta(self, group_id, cache=True):
        if cache:
            try:
                return self._cache[group_id]
            except KeyError:
                pass

        meta = self._restore_group(group_id)
        if cache and meta is not None:
            self._cache[group_id] = meta
        return meta

    def restore_group(self, group_id, cache=True):
        """Get the result for a group."""
        meta = self.get_group_meta(group_id, cache=cache)
        if meta:
            return meta['result']

    def save_group(self, group_id, result):
        """Store the result of an executed group."""
        return self._save_group(group_id, result)

    def delete_group(self, group_id):
        self._cache.pop(group_id, None)
        return self._delete_group(group_id)

    def cleanup(self):
        """Backend cleanup. Is run by
        :class:`celery.task.DeleteExpiredTaskMetaTask`."""
        pass

    def process_cleanup(self):
        """Cleanup actions to do at the end of a task worker process."""
        pass

    def on_task_call(self, producer, task_id):
        return {}

    def on_chord_part_return(self, task, propagate=False):
        pass

    def fallback_chord_unlock(self, group_id, body, result=None, **kwargs):
        kwargs['result'] = [r.id for r in result]
        self.app.tasks['celery.chord_unlock'].apply_async((group_id, body, ),
                                                          kwargs, countdown=1)
    on_chord_apply = fallback_chord_unlock

    def current_task_children(self):
        current = current_task()
        if current:
            return [r.serializable() for r in current.request.children]

    def __reduce__(self, args=(), kwargs={}):
        return (unpickle_backend, (self.__class__, args, kwargs))
예제 #31
0
class State(object):
    """Records clusters state."""
    event_count = 0
    task_count = 0

    def __init__(self, callback=None,
            max_workers_in_memory=5000, max_tasks_in_memory=10000):
        self.workers = LRUCache(limit=max_workers_in_memory)
        self.tasks = LRUCache(limit=max_tasks_in_memory)
        self.event_callback = callback
        self.group_handlers = {'worker': self.worker_event,
                               'task': self.task_event}
        self._mutex = threading.Lock()

    def freeze_while(self, fun, *args, **kwargs):
        clear_after = kwargs.pop('clear_after', False)
        with self._mutex:
            try:
                return fun(*args, **kwargs)
            finally:
                if clear_after:
                    self._clear()

    def clear_tasks(self, ready=True):
        with self._mutex:
            return self._clear_tasks(ready)

    def _clear_tasks(self, ready=True):
        if ready:
            in_progress = dict((uuid, task) for uuid, task in self.itertasks()
                                if task.state not in states.READY_STATES)
            self.tasks.clear()
            self.tasks.update(in_progress)
        else:
            self.tasks.clear()

    def _clear(self, ready=True):
        self.workers.clear()
        self._clear_tasks(ready)
        self.event_count = 0
        self.task_count = 0

    def clear(self, ready=True):
        with self._mutex:
            return self._clear(ready)

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname."""
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
        except KeyError:
            worker = self.workers[hostname] = Worker(
                    hostname=hostname, **kwargs)
        return worker

    def get_or_create_task(self, uuid):
        """Get or create task by uuid."""
        try:
            return self.tasks[uuid]
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid)
            return task

    def worker_event(self, type, fields):
        """Process worker event."""
        hostname = fields.pop('hostname', None)
        if hostname:
            worker = self.get_or_create_worker(hostname)
            handler = getattr(worker, 'on_%s' % type, None)
            if handler:
                handler(**fields)

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields.pop('uuid')
        hostname = fields.pop('hostname')
        worker = self.get_or_create_worker(hostname)
        task = self.get_or_create_task(uuid)
        handler = getattr(task, 'on_%s' % type, None)
        if type == 'received':
            self.task_count += 1
        if handler:
            handler(**fields)
        else:
            task.on_unknown_event(type, **fields)
        task.worker = worker

    def event(self, event):
        with self._mutex:
            return self._dispatch_event(event)

    def _dispatch_event(self, event):
        self.event_count += 1
        event = kwdict(event)
        group, _, type = event.pop('type').partition('-')
        self.group_handlers[group](type, event)
        if self.event_callback:
            self.event_callback(self, event)

    def itertasks(self, limit=None):
        for index, row in enumerate(self.tasks.iteritems()):
            yield row
            if limit and index + 1 >= limit:
                break

    def tasks_by_timestamp(self, limit=None):
        """Get tasks by timestamp.

        Returns a list of `(uuid, task)` tuples.

        """
        return self._sort_tasks_by_time(self.itertasks(limit))

    def _sort_tasks_by_time(self, tasks):
        """Sort task items by time."""
        return sorted(tasks, key=lambda t: t[1].timestamp,
                      reverse=True)

    def tasks_by_type(self, name, limit=None):
        """Get all tasks by type.

        Returns a list of `(uuid, task)` tuples.

        """
        sorted_tasks = self._sort_tasks_by_time((uuid, task)
                for uuid, task in self.tasks.iteritems()
                    if task.name == name)

        return sorted_tasks[0:limit or None]

    def tasks_by_worker(self, hostname, limit=None):
        """Get all tasks by worker.

        Returns a list of `(uuid, task)` tuples.

        """
        return self._sort_tasks_by_time((uuid, task)
                for uuid, task in self.itertasks(limit)
                    if task.worker.hostname == hostname)

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(sorted(set(task.name for task in self.tasks.itervalues())))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in self.workers.values() if w.alive]

    def __repr__(self):
        return '<ClusterState: events=%s tasks=%s>' % (self.event_count,
                                                       self.task_count)
예제 #32
0
파일: dumper.py 프로젝트: rwillmer/celery
    ~~~~~~~~~~~~~~~~~~~~

    THis is a simple program that dumps events to the console
    as they happen.  Think of it like a `tcpdump` for Celery events.

"""
from __future__ import absolute_import

import sys

from datetime import datetime

from celery.app import app_or_default
from celery.datastructures import LRUCache

TASK_NAMES = LRUCache(limit=0xFFF)

HUMAN_TYPES = {
    'worker-offline': 'shutdown',
    'worker-online': 'started',
    'worker-heartbeat': 'heartbeat'
}


def humanize_type(type):
    try:
        return HUMAN_TYPES[type.lower()]
    except KeyError:
        return type.lower().replace('-', ' ')

예제 #33
0
class CeleryWatcher(Daemon):

    def __init__(self, app=None):
        super(CeleryWatcher, self).__init__(None)

        self.app = app or app_or_default(None)

    def reset(self):
        self.task_count = 0
        self.error_rate = 0.0
        self.TASK_HISTORY = LRUCache(limit=10)

    def execute_with_options(self, *args, **kwargs):
        self.options = kwargs
        self.conn = self.app.broker_connection()
        self.recv = self.app.events.Receiver(self.conn, handlers={
            'task-succeeded': self.on_task_succeeded,
            'task-failed': self.on_task_failed,
        })

        if self.options['daemonize']:
            self.pidfile = self.options['pidfile']
            self.stdout = '/tmp/celerywatch.log'
            self.start()
        else:
            self.pidfile = self.options['pidfile'] #even if we don't daemonize, must set it to a non-None value
            self.run()

    def run(self):
        def handler(signum, frame):
            self.stop()
        signal.signal(signal.SIGTERM, handler)

        try:
            self.reset()
            print 'Monitoring celeryd for %.0f%% error rate...' % (self.options['stop_threshold']*100, )
            sys.stdout.flush()
            self.recv.capture()
        except (KeyboardInterrupt, SystemExit):
            self.cleanup()

    def cleanup(self):
        self.conn and self.conn.close()

    def get_options(self):
        return (
            make_option('--daemonize', action='store_true', dest='daemonize', default=False, help='TODO'),
            make_option('--pidfile', action='store', type='string', dest='pidfile', default='/tmp/celerywatch.pid', help='TODO'),
            make_option('--procgrep', action='store', type='string', dest='process_grep', default=None, help='TODO'),
            make_option('--stopthreshold', action='store', type='float', dest='stop_threshold', default=0.5, help='TODO'),
            make_option('--mintasks', action='store', type='int', dest='min_tasks', default=10, help='TODO'),
            make_option('--poststopscript', action='store', type='string', dest='post_stop_script', default=None, help='TODO'),
            make_option('--override', action='store_true', dest='override_stop_script', default=False, help='TODO'),
        )

    def kill_celery(self, non_daemon=False):
        if not self.options['override_stop_script']:
            print 'stopping celeryd...'
            sys.stdout.flush()
            stop_script = resource_filename(__name__, 'bin/stopCeleryd.sh')
            cmds = ['sh', stop_script, self.options['process_grep'] or '']
            (returncode, output, error) = run_command(cmds)
            if returncode != 0:
                print 'error stopping celeryd:'
                print output.strip(), '\n', error.strip()
                sys.stdout.flush()
                self.reset()
            else:
                print output.strip()
                print 'done'
                sys.stdout.flush()

        post_stop_script = self.options['post_stop_script']
        if post_stop_script and os.path.isfile(post_stop_script):
            print 'running post-stop script %s' % (post_stop_script,)
            sys.stdout.flush()
            cmds = [post_stop_script]
            (returncode, output, error) = run_command(cmds)
            if returncode != 0:
                print 'error running post-stop script:'
                print output.strip()
                print error.strip()
                sys.stdout.flush()
            else:
                print output.strip()
                print 'done'
                sys.stdout.flush()
        self.reset()

    #task-failed event fields (unicode): exception, traceback, uuid, clock, timestamp, hostname, type
    def on_task_failed(self, event):
        self.task_count += 1
        self.TASK_HISTORY[event['uuid']] = 'fail'
        fails = filter(lambda pair: pair[1] == 'fail', self.TASK_HISTORY.items())
        if self.task_count >= self.options['min_tasks']:
            recent_task_count = len(self.TASK_HISTORY.keys())
            self.error_rate = len(fails)/float(recent_task_count)
            if self.error_rate > self.options['stop_threshold']:
                print 'Error rate of %.0f%% over last %d tasks; after %d lifetime tasks' % (
                        self.error_rate*100, recent_task_count, self.task_count,)
                sys.stdout.flush()
                self.kill_celery()

    #task-success event fields (unicode): runtime, uuid, clock, timestamp, hostname, type, result
    def on_task_succeeded(self, event):
        self.task_count += 1
        self.TASK_HISTORY[event['uuid']] = 'success'
예제 #34
0
class State(object):
    """Records clusters state."""
    event_count = 0
    task_count = 0

    def __init__(self,
                 callback=None,
                 max_workers_in_memory=5000,
                 max_tasks_in_memory=10000):
        self.workers = LRUCache(limit=max_workers_in_memory)
        self.tasks = LRUCache(limit=max_tasks_in_memory)
        self.event_callback = callback
        self.group_handlers = {
            'worker': self.worker_event,
            'task': self.task_event
        }
        self._mutex = Lock()

    def freeze_while(self, fun, *args, **kwargs):
        clear_after = kwargs.pop('clear_after', False)
        with self._mutex:
            try:
                return fun(*args, **kwargs)
            finally:
                if clear_after:
                    self._clear()

    def clear_tasks(self, ready=True):
        with self._mutex:
            return self._clear_tasks(ready)

    def _clear_tasks(self, ready=True):
        if ready:
            in_progress = dict((uuid, task) for uuid, task in self.itertasks()
                               if task.state not in states.READY_STATES)
            self.tasks.clear()
            self.tasks.update(in_progress)
        else:
            self.tasks.clear()

    def _clear(self, ready=True):
        self.workers.clear()
        self._clear_tasks(ready)
        self.event_count = 0
        self.task_count = 0

    def clear(self, ready=True):
        with self._mutex:
            return self._clear(ready)

    def get_or_create_worker(self, hostname, **kwargs):
        """Get or create worker by hostname."""
        try:
            worker = self.workers[hostname]
            worker.update(kwargs)
        except KeyError:
            worker = self.workers[hostname] = Worker(hostname=hostname,
                                                     **kwargs)
        return worker

    def get_or_create_task(self, uuid):
        """Get or create task by uuid."""
        try:
            return self.tasks[uuid]
        except KeyError:
            task = self.tasks[uuid] = Task(uuid=uuid)
            return task

    def worker_event(self, type, fields):
        """Process worker event."""
        hostname = fields.pop('hostname', None)
        if hostname:
            worker = self.get_or_create_worker(hostname)
            handler = getattr(worker, 'on_%s' % type, None)
            if handler:
                handler(**fields)

    def task_event(self, type, fields):
        """Process task event."""
        uuid = fields.pop('uuid')
        hostname = fields.pop('hostname')
        worker = self.get_or_create_worker(hostname)
        task = self.get_or_create_task(uuid)
        handler = getattr(task, 'on_%s' % type, None)
        if type == 'received':
            self.task_count += 1
        if handler:
            handler(**fields)
        else:
            task.on_unknown_event(type, **fields)
        task.worker = worker

    def event(self, event):
        with self._mutex:
            return self._dispatch_event(event)

    def _dispatch_event(self, event):
        self.event_count += 1
        event = kwdict(event)
        group, _, type = event.pop('type').partition('-')
        self.group_handlers[group](type, event)
        if self.event_callback:
            self.event_callback(self, event)

    def itertasks(self, limit=None):
        for index, row in enumerate(self.tasks.iteritems()):
            yield row
            if limit and index + 1 >= limit:
                break

    def tasks_by_timestamp(self, limit=None):
        """Get tasks by timestamp.

        Returns a list of `(uuid, task)` tuples.

        """
        return self._sort_tasks_by_time(self.itertasks(limit))

    def _sort_tasks_by_time(self, tasks):
        """Sort task items by time."""
        return sorted(tasks, key=lambda t: t[1].timestamp, reverse=True)

    def tasks_by_type(self, name, limit=None):
        """Get all tasks by type.

        Returns a list of `(uuid, task)` tuples.

        """
        return self._sort_tasks_by_time([
            (uuid, task) for uuid, task in self.itertasks(limit)
            if task.name == name
        ])

    def tasks_by_worker(self, hostname, limit=None):
        """Get all tasks by worker.

        Returns a list of `(uuid, task)` tuples.

        """
        return self._sort_tasks_by_time([
            (uuid, task) for uuid, task in self.itertasks(limit)
            if task.worker.hostname == hostname
        ])

    def task_types(self):
        """Returns a list of all seen task types."""
        return list(sorted(set(task.name for task in self.tasks.itervalues())))

    def alive_workers(self):
        """Returns a list of (seemingly) alive workers."""
        return [w for w in self.workers.values() if w.alive]

    def __repr__(self):
        return '<ClusterState: events=%s tasks=%s>' % (self.event_count,
                                                       self.task_count)
예제 #35
0
파일: base.py 프로젝트: robftz/celery
class BaseDictBackend(BaseBackend):
    def __init__(self, *args, **kwargs):
        super(BaseDictBackend, self).__init__(*args, **kwargs)
        self._cache = LRUCache(limit=kwargs.get("max_cached_results") or self.app.conf.CELERY_MAX_CACHED_RESULTS)

    def store_result(self, task_id, result, status, traceback=None, **kwargs):
        """Store task result and status."""
        result = self.encode_result(result, status)
        return self._store_result(task_id, result, status, traceback, **kwargs)

    def forget(self, task_id):
        self._cache.pop(task_id, None)
        self._forget(task_id)

    def _forget(self, task_id):
        raise NotImplementedError("%s does not implement forget." % (self.__class__))

    def get_status(self, task_id):
        """Get the status of a task."""
        return self.get_task_meta(task_id)["status"]

    def get_traceback(self, task_id):
        """Get the traceback for a failed task."""
        return self.get_task_meta(task_id).get("traceback")

    def get_result(self, task_id):
        """Get the result of a task."""
        meta = self.get_task_meta(task_id)
        if meta["status"] in self.EXCEPTION_STATES:
            return self.exception_to_python(meta["result"])
        else:
            return meta["result"]

    def get_children(self, task_id):
        """Get the list of subtasks sent by a task."""
        try:
            return self.get_task_meta(task_id)["children"]
        except KeyError:
            pass

    def get_task_meta(self, task_id, cache=True):
        if cache:
            try:
                return self._cache[task_id]
            except KeyError:
                pass

        meta = self._get_task_meta_for(task_id)
        if cache and meta.get("status") == states.SUCCESS:
            self._cache[task_id] = meta
        return meta

    def reload_task_result(self, task_id):
        self._cache[task_id] = self.get_task_meta(task_id, cache=False)

    def reload_group_result(self, group_id):
        self._cache[group_id] = self.get_group_meta(group_id, cache=False)

    def get_group_meta(self, group_id, cache=True):
        if cache:
            try:
                return self._cache[group_id]
            except KeyError:
                pass

        meta = self._restore_group(group_id)
        if cache and meta is not None:
            self._cache[group_id] = meta
        return meta

    def restore_group(self, group_id, cache=True):
        """Get the result for a group."""
        meta = self.get_group_meta(group_id, cache=cache)
        if meta:
            return meta["result"]

    def save_group(self, group_id, result):
        """Store the result of an executed group."""
        return self._save_group(group_id, result)

    def delete_group(self, group_id):
        self._cache.pop(group_id, None)
        return self._delete_group(group_id)
예제 #36
0
class BaseBackend(object):
    READY_STATES = states.READY_STATES
    UNREADY_STATES = states.UNREADY_STATES
    EXCEPTION_STATES = states.EXCEPTION_STATES

    TimeoutError = TimeoutError

    #: Time to sleep between polling each individual item
    #: in `ResultSet.iterate`. as opposed to the `interval`
    #: argument which is for each pass.
    subpolling_interval = None

    #: If true the backend must implement :meth:`get_many`.
    supports_native_join = False

    def __init__(self, app=None, serializer=None, max_cached_results=None,
            **kwargs):
        from celery.app import app_or_default
        self.app = app_or_default(app)
        self.serializer = serializer or self.app.conf.CELERY_RESULT_SERIALIZER
        (self.content_type,
         self.content_encoding,
         self.encoder) = serialization.registry._encoders[self.serializer]
        self._cache = LRUCache(limit=max_cached_results or
                                      self.app.conf.CELERY_MAX_CACHED_RESULTS)

    def mark_as_started(self, task_id, **meta):
        """Mark a task as started"""
        return self.store_result(task_id, meta, status=states.STARTED)

    def mark_as_done(self, task_id, result):
        """Mark task as successfully executed."""
        return self.store_result(task_id, result, status=states.SUCCESS)

    def mark_as_failure(self, task_id, exc, traceback=None):
        """Mark task as executed with failure. Stores the execption."""
        return self.store_result(task_id, exc, status=states.FAILURE,
                                 traceback=traceback)

    def mark_as_retry(self, task_id, exc, traceback=None):
        """Mark task as being retries. Stores the current
        exception (if any)."""
        return self.store_result(task_id, exc, status=states.RETRY,
                                 traceback=traceback)

    def mark_as_revoked(self, task_id, reason=''):
        return self.store_result(task_id, TaskRevokedError(reason),
                                 status=states.REVOKED, traceback=None)

    def prepare_exception(self, exc):
        """Prepare exception for serialization."""
        if self.serializer in EXCEPTION_ABLE_CODECS:
            return get_pickleable_exception(exc)
        return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}

    def exception_to_python(self, exc):
        """Convert serialized exception to Python exception."""
        if self.serializer in EXCEPTION_ABLE_CODECS:
            return get_pickled_exception(exc)
        return create_exception_cls(from_utf8(exc['exc_type']),
                                    sys.modules[__name__])(exc['exc_message'])

    def prepare_value(self, result):
        """Prepare value for storage."""
        if isinstance(result, GroupResult):
            return result.serializable()
        return result

    def encode(self, data):
        _, _, payload = serialization.encode(data, serializer=self.serializer)
        return payload

    def decode(self, payload):
        payload = PY3 and payload or str(payload)
        return serialization.decode(payload,
                                    content_type=self.content_type,
                                    content_encoding=self.content_encoding)

    def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
        """Wait for task and return its result.

        If the task raises an exception, this exception
        will be re-raised by :func:`wait_for`.

        If `timeout` is not :const:`None`, this raises the
        :class:`celery.exceptions.TimeoutError` exception if the operation
        takes longer than `timeout` seconds.

        """

        time_elapsed = 0.0

        while 1:
            status = self.get_status(task_id)
            if status == states.SUCCESS:
                return self.get_result(task_id)
            elif status in states.PROPAGATE_STATES:
                result = self.get_result(task_id)
                if propagate:
                    raise result
                return result
            # avoid hammering the CPU checking status.
            time.sleep(interval)
            time_elapsed += interval
            if timeout and time_elapsed >= timeout:
                raise TimeoutError('The operation timed out.')

    def prepare_expires(self, value, type=None):
        if value is None:
            value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
        if isinstance(value, timedelta):
            value = timeutils.timedelta_seconds(value)
        if value is not None and type:
            return type(value)
        return value

    def encode_result(self, result, status):
        if status in self.EXCEPTION_STATES and isinstance(result, Exception):
            return self.prepare_exception(result)
        else:
            return self.prepare_value(result)

    def store_result(self, task_id, result, status, traceback=None, **kwargs):
        """Update task state and result."""
        result = self.encode_result(result, status)
        self._store_result(task_id, result, status, traceback, **kwargs)
        return result

    def forget(self, task_id):
        self._cache.pop(task_id, None)
        self._forget(task_id)

    def _forget(self, task_id):
        raise NotImplementedError('backend does not implement forget.')

    def get_status(self, task_id):
        """Get the status of a task."""
        return self.get_task_meta(task_id)['status']

    def get_traceback(self, task_id):
        """Get the traceback for a failed task."""
        return self.get_task_meta(task_id).get('traceback')

    def get_result(self, task_id):
        """Get the result of a task."""
        meta = self.get_task_meta(task_id)
        if meta['status'] in self.EXCEPTION_STATES:
            return self.exception_to_python(meta['result'])
        else:
            return meta['result']

    def get_children(self, task_id):
        """Get the list of subtasks sent by a task."""
        try:
            return self.get_task_meta(task_id)['children']
        except KeyError:
            pass

    def get_task_meta(self, task_id, cache=True):
        if cache:
            try:
                return self._cache[task_id]
            except KeyError:
                pass

        meta = self._get_task_meta_for(task_id)
        if cache and meta.get('status') == states.SUCCESS:
            self._cache[task_id] = meta
        return meta

    def reload_task_result(self, task_id):
        """Reload task result, even if it has been previously fetched."""
        self._cache[task_id] = self.get_task_meta(task_id, cache=False)

    def reload_group_result(self, group_id):
        """Reload group result, even if it has been previously fetched."""
        self._cache[group_id] = self.get_group_meta(group_id, cache=False)

    def get_group_meta(self, group_id, cache=True):
        if cache:
            try:
                return self._cache[group_id]
            except KeyError:
                pass

        meta = self._restore_group(group_id)
        if cache and meta is not None:
            self._cache[group_id] = meta
        return meta

    def restore_group(self, group_id, cache=True):
        """Get the result for a group."""
        meta = self.get_group_meta(group_id, cache=cache)
        if meta:
            return meta['result']

    def save_group(self, group_id, result):
        """Store the result of an executed group."""
        return self._save_group(group_id, result)

    def delete_group(self, group_id):
        self._cache.pop(group_id, None)
        return self._delete_group(group_id)

    def cleanup(self):
        """Backend cleanup. Is run by
        :class:`celery.task.DeleteExpiredTaskMetaTask`."""
        pass

    def process_cleanup(self):
        """Cleanup actions to do at the end of a task worker process."""
        pass

    def on_task_call(self, producer, task_id):
        return {}

    def on_chord_part_return(self, task, propagate=False):
        pass

    def fallback_chord_unlock(self, group_id, body, result=None, **kwargs):
        kwargs['result'] = [r.id for r in result]
        self.app.tasks['celery.chord_unlock'].apply_async((group_id, body, ),
                                                          kwargs, countdown=1)
    on_chord_apply = fallback_chord_unlock

    def current_task_children(self):
        current = current_task()
        if current:
            return [r.serializable() for r in current.request.children]

    def __reduce__(self, args=(), kwargs={}):
        return (unpickle_backend, (self.__class__, args, kwargs))
예제 #37
0
 def test_items(self):
     c = LRUCache()
     c.update(a=1, b=2, c=3)
     self.assertTrue(c.items())
예제 #38
0
파일: base.py 프로젝트: robftz/celery
 def __init__(self, *args, **kwargs):
     super(BaseDictBackend, self).__init__(*args, **kwargs)
     self._cache = LRUCache(limit=kwargs.get("max_cached_results") or self.app.conf.CELERY_MAX_CACHED_RESULTS)