コード例 #1
0
ファイル: base.py プロジェクト: csunny/celery
    def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True,
                 on_message=None, on_interval=None, max_iterations=None,
                 READY_STATES=states.READY_STATES):
        interval = 0.5 if interval is None else interval
        ids = task_ids if isinstance(task_ids, set) else set(task_ids)
        cached_ids = set()
        cache = self._cache
        for task_id in ids:
            try:
                cached = cache[task_id]
            except KeyError:
                pass
            else:
                if cached['status'] in READY_STATES:
                    yield bytes_to_str(task_id), cached
                    cached_ids.add(task_id)

        ids.difference_update(cached_ids)
        iterations = 0
        while ids:
            keys = list(ids)
            r = self._mget_to_results(self.mget([self.get_key_for_task(k)
                                                 for k in keys]), keys)
            cache.update(r)
            ids.difference_update({bytes_to_str(v) for v in r})
            for key, value in items(r):
                if on_message is not None:
                    on_message(value)
                yield bytes_to_str(key), value
            if timeout and iterations * interval >= timeout:
                raise TimeoutError('Operation timed out ({0})'.format(timeout))
            if on_interval:
                on_interval()
            time.sleep(interval)  # don't busy loop.
            iterations += 1
            if max_iterations and iterations >= max_iterations:
                break
コード例 #2
0
ファイル: base.py プロジェクト: keerthi793/celery
    def wait_for(self,
                 task_id,
                 timeout=None,
                 propagate=True,
                 interval=0.5,
                 no_ack=True,
                 on_interval=None):
        """Wait for task and return its result.

        If the task raises an exception, this exception
        will be re-raised by :func:`wait_for`.

        If `timeout` is not :const:`None`, this raises the
        :class:`celery.exceptions.TimeoutError` exception if the operation
        takes longer than `timeout` seconds.

        """

        time_elapsed = 0.0

        while 1:
            status = self.get_status(task_id)
            if status == states.SUCCESS:
                return self.get_result(task_id)
            elif status in states.PROPAGATE_STATES:
                result = self.get_result(task_id)
                if propagate:
                    raise result
                return result
            if on_interval:
                on_interval()
            # avoid hammering the CPU checking status.
            time.sleep(interval)
            time_elapsed += interval
            if timeout and time_elapsed >= timeout:
                raise TimeoutError('The operation timed out.')
コード例 #3
0
    def iterate(self, timeout=None, propagate=True, interval=0.5):
        """Iterate over the return values of the tasks as they finish
        one by one.

        :raises: The exception if any of the tasks raised an exception.

        """
        elapsed = 0.0
        results = dict(
            (result.task_id, copy(result)) for result in self.results)

        while results:
            removed = set()
            for task_id, result in results.iteritems():
                yield result.get(timeout=timeout and timeout - elapsed,
                                 propagate=propagate,
                                 interval=0.0)
                removed.add(task_id)
            for task_id in removed:
                results.pop(task_id, None)
            time.sleep(interval)
            elapsed += interval
            if timeout and elapsed >= timeout:
                raise TimeoutError("The operation timed out")
コード例 #4
0
    def test_subsequent_syncs_when_job_complete(self):
        # First sync, return a timout. Ensure that the async_task_id gets set
        cache_id = restore_cache_key(self.domain,
                                     ASYNC_RESTORE_CACHE_KEY_PREFIX,
                                     self.user.user_id)
        with mock.patch('casexml.apps.phone.restore.get_async_restore_payload'
                        ) as task:
            delay = mock.MagicMock()
            delay.id = 'random_task_id'
            delay.get = mock.MagicMock(
                side_effect=TimeoutError())  # task not finished
            task.delay.return_value = delay

            restore_config = self._restore_config(async=True)
            initial_payload = restore_config.get_payload()
            self.assertIsNotNone(restore_config.cache.get(cache_id))
            self.assertIsInstance(initial_payload, AsyncRestoreResponse)
            # new synclog should not have been created
            self.assertIsNone(restore_config.restore_state.current_sync_log)

        # Second sync, don't timeout (can't use AsyncResult in tests, so mock
        # the return value).
        file_restore_response = mock.MagicMock(
            return_value=FileRestoreResponse())
        with mock.patch.object(AsyncResult, 'get',
                               file_restore_response) as get_result:
            with mock.patch.object(AsyncResult, 'status', ASYNC_RESTORE_SENT):
                subsequent_restore = self._restore_config(async=True)
                self.assertIsNotNone(restore_config.cache.get(cache_id))
                subsequent_restore.get_payload()

                # if the task actually ran, the cache should now not have the task id,
                # however, the task is not run in this test. See `test_completed_task_deletes_cache`
                # self.assertIsNone(restore_config.cache.get(cache_id))

                get_result.assert_called_with(timeout=1)
コード例 #5
0
 def test_run_task(self):
     try:
         clean_media.delay().get(timeout=2)
     except TimeoutError as e:
         message = str(e) + ' Is the Celery worker running?'
         raise TimeoutError(message)
コード例 #6
0
ファイル: result.py プロジェクト: joehybird/celery
    def join(self,
             timeout=None,
             propagate=True,
             interval=0.5,
             callback=None,
             no_ack=True,
             on_message=None,
             disable_sync_subtasks=True,
             on_interval=None):
        """Gather the results of all tasks as a list in order.

        Note:
            This can be an expensive operation for result store
            backends that must resort to polling (e.g., database).

            You should consider using :meth:`join_native` if your backend
            supports it.

        Warning:
            Waiting for tasks within a task may lead to deadlocks.
            Please see :ref:`task-synchronous-subtasks`.

        Arguments:
            timeout (float): The number of seconds to wait for results
                before the operation times out.
            propagate (bool): If any of the tasks raises an exception,
                the exception will be re-raised when this flag is set.
            interval (float): Time to wait (in seconds) before retrying to
                retrieve a result from the set.  Note that this does not have
                any effect when using the amqp result store backend,
                as it does not use polling.
            callback (Callable): Optional callback to be called for every
                result received.  Must have signature ``(task_id, value)``
                No results will be returned by this function if a callback
                is specified.  The order of results is also arbitrary when a
                callback is used.  To get access to the result object for
                a particular id you'll have to generate an index first:
                ``index = {r.id: r for r in gres.results.values()}``
                Or you can create new result objects on the fly:
                ``result = app.AsyncResult(task_id)`` (both will
                take advantage of the backend cache anyway).
            no_ack (bool): Automatic message acknowledgment (Note that if this
                is set to :const:`False` then the messages
                *will not be acknowledged*).
            disable_sync_subtasks (bool): Disable tasks to wait for sub tasks
                this is the default configuration. CAUTION do not enable this
                unless you must.

        Raises:
            celery.exceptions.TimeoutError: if ``timeout`` isn't
                :const:`None` and the operation takes longer than ``timeout``
                seconds.
        """
        if disable_sync_subtasks:
            assert_will_not_block()
        time_start = monotonic()
        remaining = None

        if on_message is not None:
            raise ImproperlyConfigured(
                'Backend does not support on_message callback')

        results = []
        for result in self.results:
            remaining = None
            if timeout:
                remaining = timeout - (monotonic() - time_start)
                if remaining <= 0.0:
                    raise TimeoutError('join operation timed out')
            value = result.get(
                timeout=remaining,
                propagate=propagate,
                interval=interval,
                no_ack=no_ack,
                on_interval=on_interval,
            )
            if callback:
                callback(result.id, value)
            else:
                results.append(value)
        return results
コード例 #7
0
 def on_timeout():
     raise TimeoutError("The operation timed out.")