Example #1
0
    def MockMessage(self, id=None, receipt_handle=None, body=None):
        m = Mock(name='message')
        m.id = id or uuid()
        m.receipt_handle = receipt_handle or uuid()
        m._body = body

        def _get_body():
            return m._body
        m.get_body.side_effect = _get_body

        def _set_body(value):
            m._body = value
        m.set_body.side_effect = _set_body

        return m
Example #2
0
    def test_can_consume(self, stdouts):
        stderr = io.StringIO()
        _restored = []

        class RestoreChannel(virtual.Channel):
            do_restore = True

            def _restore(self, message):
                _restored.append(message)

        assert self.q.can_consume()
        for i in range(self.q.prefetch_count - 1):
            self.q.append(i, uuid())
            assert self.q.can_consume()
        self.q.append(i + 1, uuid())
        assert not self.q.can_consume()

        tag1 = next(iter(self.q._delivered))
        self.q.ack(tag1)
        assert self.q.can_consume()

        tag2 = uuid()
        self.q.append(i + 2, tag2)
        assert not self.q.can_consume()
        self.q.reject(tag2)
        assert self.q.can_consume()

        self.q.channel = RestoreChannel(self.q.channel.connection)
        tag3 = uuid()
        self.q.append(i + 3, tag3)
        self.q.reject(tag3, requeue=True)
        self.q._flush()
        assert self.q._delivered
        assert not self.q._delivered.restored
        self.q.restore_unacked_once(stderr=stderr)
        assert _restored == [11, 9, 8, 7, 6, 5, 4, 3, 2, 1]
        assert self.q._delivered.restored
        assert not self.q._delivered

        self.q.restore_unacked_once(stderr=stderr)
        self.q._delivered.restored = False
        self.q.restore_unacked_once(stderr=stderr)

        assert stderr.getvalue()
        assert not stdouts.stdout.getvalue()

        self.q.restore_at_shutdown = False
        self.q.restore_unacked_once()
Example #3
0
 def test_fast_trace_task(self):
     from celery.app import trace
     setup_worker_optimizations(self.app)
     assert trace.trace_task_ret is trace._fast_trace_task
     tid = uuid()
     message = self.TaskMessage(self.mytask.name, tid, args=[4])
     assert len(message.payload) == 3
     try:
         self.mytask.__trace__ = build_tracer(
             self.mytask.name, self.mytask, self.app.loader, 'test',
             app=self.app,
         )
         failed, res, runtime = trace.trace_task_ret(
             self.mytask.name, tid, message.headers, message.body,
             message.content_type, message.content_encoding)
         assert not failed
         assert res == repr(4 ** 4)
         assert runtime is not None
         assert isinstance(runtime, numbers.Real)
     finally:
         reset_worker_optimizations()
         assert trace.trace_task_ret is trace._trace_task_ret
     delattr(self.mytask, '__trace__')
     failed, res, runtime = trace.trace_task_ret(
         self.mytask.name, tid, message.headers, message.body,
         message.content_type, message.content_encoding, app=self.app,
     )
     assert not failed
     assert res == repr(4 ** 4)
     assert runtime is not None
     assert isinstance(runtime, numbers.Real)
Example #4
0
    def freeze(self, _id=None, group_id=None, chord=None,
               root_id=None, parent_id=None):
        """Finalize the signature by adding a concrete task id.

        The task won't be called and you shouldn't call the signature
        twice after freezing it as that'll result in two task messages
        using the same task id.

        Returns:
            ~@AsyncResult: promise of future evaluation.
        """
        # pylint: disable=redefined-outer-name
        #   XXX chord is also a class in outer scope.
        opts = self.options
        try:
            tid = opts['task_id']
        except KeyError:
            tid = opts['task_id'] = _id or uuid()
        if root_id:
            opts['root_id'] = root_id
        if parent_id:
            opts['parent_id'] = parent_id
        if 'reply_to' not in opts:
            opts['reply_to'] = self.app.oid
        if group_id:
            opts['group_id'] = group_id
        if chord:
            opts['chord'] = chord
        # pylint: disable=too-many-function-args
        #   Borks on this, as it's a property.
        return self.AsyncResult(tid)
Example #5
0
    def freeze(self, _id=None, group_id=None, chord=None,
               root_id=None, parent_id=None):
        """Finalize the signature by adding a concrete task id.

        The task won't be called and you shouldn't call the signature
        twice after freezing it as that'll result in two task messages
        using the same task id.

        Returns:
            ~@AsyncResult: promise of future evaluation.
        """
        opts = self.options
        try:
            tid = opts['task_id']
        except KeyError:
            tid = opts['task_id'] = _id or uuid()
        if root_id:
            opts['root_id'] = root_id
        if parent_id:
            opts['parent_id'] = parent_id
        if 'reply_to' not in opts:
            opts['reply_to'] = self.app.oid
        if group_id:
            opts['group_id'] = group_id
        if chord:
            opts['chord'] = chord
        return self.AsyncResult(tid)
Example #6
0
 def test_on_success__with_events(self):
     job = self.zRequest(id=uuid())
     job.send_event = Mock(name='send_event')
     job.on_success((False, 'foo', 1.0))
     job.send_event.assert_called_with(
         'task-succeeded', result='foo', runtime=1.0,
     )
Example #7
0
 def _freeze_gid(self, options):
     # remove task_id and use that as the group_id,
     # if we don't remove it then every task will have the same id...
     options = dict(self.options, **options)
     options['group_id'] = group_id = (
         options.pop('task_id', uuid()))
     return options, group_id, options.get('root_id')
Example #8
0
 def freeze(self, _id=None, group_id=None, chord=None,
            root_id=None, parent_id=None):
     # pylint: disable=redefined-outer-name
     #   XXX chord is also a class in outer scope.
     opts = self.options
     try:
         gid = opts['task_id']
     except KeyError:
         gid = opts['task_id'] = uuid()
     if group_id:
         opts['group_id'] = group_id
     if chord:
         opts['chord'] = chord
     root_id = opts.setdefault('root_id', root_id)
     parent_id = opts.setdefault('parent_id', parent_id)
     new_tasks = []
     # Need to unroll subgroups early so that chord gets the
     # right result instance for chord_unlock etc.
     results = list(self._freeze_unroll(
         new_tasks, group_id, chord, root_id, parent_id,
     ))
     if isinstance(self.tasks, MutableSequence):
         self.tasks[:] = new_tasks
     else:
         self.tasks = new_tasks
     return self.app.GroupResult(gid, results)
Example #9
0
 def test_execute_using_pool__revoked(self):
     tid = uuid()
     job = self.zRequest(id=tid, revoked_tasks={tid})
     job.revoked = Mock()
     job.revoked.return_value = True
     with pytest.raises(TaskRevokedError):
         job.execute_using_pool(self.pool)
Example #10
0
 def test_revoke_terminate(self):
     request = Mock()
     request.id = tid = uuid()
     state = self.create_state()
     state.consumer = Mock()
     worker_state.task_reserved(request)
     try:
         r = control.revoke(state, tid, terminate=True)
         self.assertIn(tid, revoked)
         self.assertTrue(request.terminate.call_count)
         self.assertIn('terminate:', r['ok'])
         # unknown task id only revokes
         r = control.revoke(state, uuid(), terminate=True)
         self.assertIn('tasks unknown', r['ok'])
     finally:
         worker_state.task_ready(request)
Example #11
0
    def run(self, header, body, partial_args, app=None, interval=None,
            countdown=1, max_retries=None, eager=False,
            task_id=None, **options):
        app = app or self._get_app(body)
        group_id = header.options.get('task_id') or uuid()
        root_id = body.options.get('root_id')
        body.chord_size = self.__length_hint__()
        options = dict(self.options, **options) if options else self.options
        if options:
            options.pop('task_id', None)
            body.options.update(options)

        results = header.freeze(
            group_id=group_id, chord=body, root_id=root_id).results
        bodyres = body.freeze(task_id, root_id=root_id)

        # Chains should not be passed to the header tasks. See #3771
        options.pop('chain', None)

        parent = app.backend.apply_chord(
            header, partial_args, group_id, body,
            interval=interval, countdown=countdown,
            options=options, max_retries=max_retries,
            result=results)
        bodyres.parent = parent
        return bodyres
Example #12
0
 def test_revoke_terminate(self):
     request = Mock()
     request.id = tid = uuid()
     state = self.create_state()
     state.consumer = Mock()
     worker_state.task_reserved(request)
     try:
         r = control.revoke(state, tid, terminate=True)
         assert tid in revoked
         assert request.terminate.call_count
         assert 'terminate:' in r['ok']
         # unknown task id only revokes
         r = control.revoke(state, uuid(), terminate=True)
         assert 'tasks unknown' in r['ok']
     finally:
         worker_state.task_ready(request)
Example #13
0
    def send_task(self, name, args=None, kwargs=None, countdown=None,
                  eta=None, task_id=None, producer=None, connection=None,
                  router=None, result_cls=None, expires=None,
                  publisher=None, link=None, link_error=None,
                  add_to_parent=True, group_id=None, retries=0, chord=None,
                  reply_to=None, time_limit=None, soft_time_limit=None,
                  root_id=None, parent_id=None, route_name=None,
                  shadow=None, chain=None, task_type=None, **options):
        """Send task by name.

        Supports the same arguments as :meth:`@-Task.apply_async`.

        Arguments:
            name (str): Name of task to call (e.g. `"tasks.add"`).
            result_cls (~@AsyncResult): Specify custom result class.
        """
        parent = have_parent = None
        amqp = self.amqp
        task_id = task_id or uuid()
        producer = producer or publisher  # XXX compat
        router = router or amqp.router
        conf = self.conf
        if conf.task_always_eager:  # pragma: no cover
            warnings.warn(AlwaysEagerIgnored(
                'task_always_eager has no effect on send_task',
            ), stacklevel=2)
        options = router.route(
            options, route_name or name, args, kwargs, task_type)

        if root_id is None:
            parent, have_parent = self.current_worker_task, True
            if parent:
                root_id = parent.request.root_id or parent.request.id
        if parent_id is None:
            if not have_parent:
                parent, have_parent = self.current_worker_task, True
            if parent:
                parent_id = parent.request.id

        message = amqp.create_task_message(
            task_id, name, args, kwargs, countdown, eta, group_id,
            expires, retries, chord,
            maybe_list(link), maybe_list(link_error),
            reply_to or self.oid, time_limit, soft_time_limit,
            self.conf.task_send_sent_event,
            root_id, parent_id, shadow, chain,
        )

        if connection:
            producer = amqp.Producer(connection)
        with self.producer_or_acquire(producer) as P:
            self.backend.on_task_call(P, task_id)
            amqp.send_task_message(P, name, message, **options)
        result = (result_cls or self.AsyncResult)(task_id)
        if add_to_parent:
            if not have_parent:
                parent, have_parent = self.current_worker_task, True
            if parent:
                parent.add_trail(result)
        return result
Example #14
0
 def test_execute(self):
     tid = uuid()
     job = self.xRequest(id=tid, args=[4], kwargs={})
     assert job.execute() == 256
     meta = self.mytask.backend.get_task_meta(tid)
     assert meta['status'] == states.SUCCESS
     assert meta['result'] == 256
Example #15
0
 def test_execute(self):
     tid = uuid()
     job = self.xRequest(id=tid, args=[4], kwargs={})
     self.assertEqual(job.execute(), 256)
     meta = self.mytask.backend.get_task_meta(tid)
     self.assertEqual(meta['status'], states.SUCCESS)
     self.assertEqual(meta['result'], 256)
Example #16
0
 def test_fast_trace_task(self):
     from celery.app import trace
     setup_worker_optimizations(self.app)
     self.assertIs(trace.trace_task_ret, trace._fast_trace_task)
     tid = uuid()
     message = TaskMessage(self.mytask.name, tid, args=[4])
     assert len(message.payload) == 3
     try:
         self.mytask.__trace__ = build_tracer(
             self.mytask.name, self.mytask, self.app.loader, 'test',
             app=self.app,
         )
         failed, res, runtime = trace.trace_task_ret(
             self.mytask.name, tid, message.headers, message.body,
             message.content_type, message.content_encoding)
         self.assertFalse(failed)
         self.assertEqual(res, repr(4 ** 4))
         self.assertIsNotNone(runtime)
         self.assertIsInstance(runtime, numbers.Real)
     finally:
         reset_worker_optimizations()
         self.assertIs(trace.trace_task_ret, trace._trace_task_ret)
     delattr(self.mytask, '__trace__')
     failed, res, runtime = trace.trace_task_ret(
         self.mytask.name, tid, message.headers, message.body,
         message.content_type, message.content_encoding, app=self.app,
     )
     self.assertFalse(failed)
     self.assertEqual(res, repr(4 ** 4))
     self.assertIsNotNone(runtime)
     self.assertIsInstance(runtime, numbers.Real)
Example #17
0
 def test_revoke_with_name_not_in_registry(self):
     tid = uuid()
     m = {'method': 'revoke',
          'destination': hostname,
          'arguments': {'task_id': tid,
                        'task_name': 'xxxxxxxxx33333333388888'}}
     self.panel.handle_message(m, None)
     self.assertIn(tid, revoked)
Example #18
0
 def test_revoke_with_name(self):
     tid = uuid()
     m = {'method': 'revoke',
          'destination': hostname,
          'arguments': {'task_id': tid,
                        'task_name': self.mytask.name}}
     self.panel.handle_message(m, None)
     self.assertIn(tid, revoked)
Example #19
0
def create_message(channel, **data):
    data.setdefault('id', uuid())
    m = Message(channel, body=pickle.dumps(dict(**data)),
                content_type='application/x-python-serialize',
                content_encoding='binary',
                delivery_info={'consumer_tag': 'mock'})
    m.accept = ['application/x-python-serialize']
    return m
Example #20
0
 def test_process_cleanup_fails(self, _logger):
     self.mytask.backend = Mock()
     self.mytask.backend.process_cleanup = Mock(side_effect=KeyError())
     tid = uuid()
     ret = jail(self.app, tid, self.mytask.name, [2], {})
     self.assertEqual(ret, 4)
     self.mytask.backend.mark_as_done.assert_called()
     self.assertIn('Process cleanup failed', _logger.error.call_args[0][0])
Example #21
0
 def test_execute_using_pool__expired(self):
     tid = uuid()
     job = self.zRequest(id=tid, revoked_tasks=set())
     job.expires = 1232133
     job.revoked = Mock()
     job.revoked.return_value = True
     with self.assertRaises(TaskRevokedError):
         job.execute_using_pool(self.pool)
Example #22
0
    def test_can_consume(self, stdout, stderr):
        _restored = []

        class RestoreChannel(virtual.Channel):
            do_restore = True

            def _restore(self, message):
                _restored.append(message)

        self.assertTrue(self.q.can_consume())
        for i in range(self.q.prefetch_count - 1):
            self.q.append(i, uuid())
            self.assertTrue(self.q.can_consume())
        self.q.append(i + 1, uuid())
        self.assertFalse(self.q.can_consume())

        tag1 = next(iter(self.q._delivered))
        self.q.ack(tag1)
        self.assertTrue(self.q.can_consume())

        tag2 = uuid()
        self.q.append(i + 2, tag2)
        self.assertFalse(self.q.can_consume())
        self.q.reject(tag2)
        self.assertTrue(self.q.can_consume())

        self.q.channel = RestoreChannel(self.q.channel.connection)
        tag3 = uuid()
        self.q.append(i + 3, tag3)
        self.q.reject(tag3, requeue=True)
        self.q._flush()
        self.q.restore_unacked_once()
        self.assertListEqual(_restored, [11, 9, 8, 7, 6, 5, 4, 3, 2, 1])
        self.assertTrue(self.q._delivered.restored)
        self.assertFalse(self.q._delivered)

        self.q.restore_unacked_once()
        self.q._delivered.restored = False
        self.q.restore_unacked_once()

        self.assertTrue(stderr.getvalue())
        self.assertFalse(stdout.getvalue())

        self.q.restore_at_shutdown = False
        self.q.restore_unacked_once()
Example #23
0
    def test_trace_catches_exception(self):

        @self.app.task(request=None, shared=False)
        def raising():
            raise KeyError('baz')

        with pytest.warns(RuntimeWarning):
            res = trace_task(raising, uuid(), [], {}, app=self.app)[0]
            assert isinstance(res, ExceptionInfo)
Example #24
0
    def test_revoke(self):
        tid = uuid()
        m = {"method": "revoke", "destination": hostname, "arguments": {"task_id": tid}}
        self.panel.handle_message(m, None)
        assert tid in revoked

        m = {"method": "revoke", "destination": "does.not.exist", "arguments": {"task_id": tid + "xxx"}}
        self.panel.handle_message(m, None)
        assert tid + "xxx" not in revoked
Example #25
0
 def test_execute_using_pool__defaults_of_hybrid_to_proto2(self):
     weakref_ref = Mock(name='weakref.ref')
     headers = strategy.hybrid_to_proto2('', {'id': uuid(),
                                              'task': self.mytask.name})[1]
     job = self.zRequest(revoked_tasks=set(), ref=weakref_ref, **headers)
     job.execute_using_pool(self.pool)
     assert job._apply_result
     weakref_ref.assert_called_with(self.pool.apply_async())
     assert job._apply_result is weakref_ref()
Example #26
0
 def test_from_message(self):
     us = 'æØåveéðƒeæ'
     tid = uuid()
     m = TaskMessage(self.mytask.name, tid, args=[2], kwargs={us: 'bar'})
     job = Request(m, app=self.app)
     self.assertIsInstance(job, Request)
     self.assertEqual(job.name, self.mytask.name)
     self.assertEqual(job.id, tid)
     self.assertIs(job.message, m)
Example #27
0
 def test_revoke_with_name_not_in_registry(self):
     tid = uuid()
     m = {
         "method": "revoke",
         "destination": hostname,
         "arguments": {"task_id": tid, "task_name": "xxxxxxxxx33333333388888"},
     }
     self.panel.handle_message(m, None)
     assert tid in revoked
Example #28
0
 def test_process_cleanup_fails(self, patching):
     _logger = patching('celery.app.trace.logger')
     self.mytask.backend = Mock()
     self.mytask.backend.process_cleanup = Mock(side_effect=KeyError())
     tid = uuid()
     ret = jail(self.app, tid, self.mytask.name, [2], {})
     assert ret == 4
     self.mytask.backend.mark_as_done.assert_called()
     assert 'Process cleanup failed' in _logger.error.call_args[0][0]
Example #29
0
    def test_marked_as_started(self):
        _started = []

        def store_result(tid, meta, state, **kwargs):
            if state == states.STARTED:
                _started.append(tid)
        self.mytask.backend.store_result = Mock(name='store_result')
        self.mytask.backend.store_result.side_effect = store_result
        self.mytask.track_started = True

        tid = uuid()
        jail(self.app, tid, self.mytask.name, [2], {})
        assert tid in _started

        self.mytask.ignore_result = True
        tid = uuid()
        jail(self.app, tid, self.mytask.name, [2], {})
        assert tid not in _started
Example #30
0
    def election(self):
        type = self.type
        app = type.app
        tid = self.options.get('task_id') or uuid()

        with app.producer_or_acquire(None) as P:
            props = type.backend.on_task_call(P, tid)
            app.control.election(tid, 'task', self.clone(task_id=tid, **props),
                                 connection=P.connection)
            return type.AsyncResult(tid)
Example #31
0
 def test_trace_task_ret(self):
     self.mytask.__trace__ = build_tracer(
         self.mytask.name,
         self.mytask,
         self.app.loader,
         'test',
         app=self.app,
     )
     tid = uuid()
     message = self.TaskMessage(self.mytask.name, tid, args=[4])
     _, R, _ = _trace_task_ret(
         self.mytask.name,
         tid,
         message.headers,
         message.body,
         message.content_type,
         message.content_encoding,
         app=self.app,
     )
     assert R == repr(4**4)
Example #32
0
 def test_worker_task_trace_handle_retry(self):
     tid = uuid()
     self.mytask.push_request(id=tid)
     try:
         raise ValueError('foo')
     except Exception as exc:
         try:
             raise Retry(str(exc), exc=exc)
         except Retry as exc:
             w = TraceInfo(states.RETRY, exc)
             w.handle_retry(
                 self.mytask, self.mytask.request, store_errors=False,
             )
             assert self.mytask.backend.get_status(tid) == states.PENDING
             w.handle_retry(
                 self.mytask, self.mytask.request, store_errors=True,
             )
             assert self.mytask.backend.get_status(tid) == states.RETRY
     finally:
         self.mytask.pop_request()
Example #33
0
 def test_execute_using_pool_with_use_fast_trace_task(self):
     self.app.use_fast_trace_task = True
     weakref_ref = Mock(name='weakref.ref')
     job = self.zRequest(id=uuid(), revoked_tasks=set(), ref=weakref_ref)
     job.execute_using_pool(self.pool)
     self.pool.apply_async.assert_called_with(
         fast_trace_task,
         args=(job.type, job.id, job.request_dict, job.body,
               job.content_type, job.content_encoding),
         accept_callback=job.on_accepted,
         timeout_callback=job.on_timeout,
         callback=job.on_success,
         error_callback=job.on_failure,
         soft_timeout=self.task.soft_time_limit,
         timeout=self.task.time_limit,
         correlation_id=job.id,
     )
     assert job._apply_result
     weakref_ref.assert_called_with(self.pool.apply_async())
     assert job._apply_result is weakref_ref()
Example #34
0
    def test_reply__collect_uses_default_channel(self):
        class ConsumerCalled(Exception):
            pass

        def fake_Consumer(channel, *args, **kwargs):
            raise ConsumerCalled(channel)

        ticket = uuid()
        with patch('kombu.pidbox.Consumer') as Consumer:
            mailbox = pidbox.Mailbox('test_reply__collect')(self.connection)
            assert mailbox.connection.default_channel is not None
            Consumer.side_effect = fake_Consumer
            try:
                mailbox._collect(ticket, limit=1)
            except ConsumerCalled as c:
                assert c.args[0] is not None
            except Exception:
                raise
            else:
                assert False, "Consumer not called"
Example #35
0
 def test_fast_trace_task(self):
     assert self.app.use_fast_trace_task is False
     setup_worker_optimizations(self.app)
     assert self.app.use_fast_trace_task is True
     tid = uuid()
     message = self.TaskMessage(self.mytask.name, tid, args=[4])
     assert len(message.payload) == 3
     try:
         self.mytask.__trace__ = build_tracer(
             self.mytask.name,
             self.mytask,
             self.app.loader,
             'test',
             app=self.app,
         )
         failed, res, runtime = fast_trace_task(self.mytask.name, tid,
                                                message.headers,
                                                message.body,
                                                message.content_type,
                                                message.content_encoding)
         assert not failed
         assert res == repr(4**4)
         assert runtime is not None
         assert isinstance(runtime, numbers.Real)
     finally:
         reset_worker_optimizations(self.app)
         assert self.app.use_fast_trace_task is False
     delattr(self.mytask, '__trace__')
     failed, res, runtime = trace_task_ret(
         self.mytask.name,
         tid,
         message.headers,
         message.body,
         message.content_type,
         message.content_encoding,
         app=self.app,
     )
     assert not failed
     assert res == repr(4**4)
     assert runtime is not None
     assert isinstance(runtime, numbers.Real)
Example #36
0
    def test_revoke(self):
        tid = uuid()
        m = {
            'method': 'revoke',
            'destination': hostname,
            'arguments': {
                'task_id': tid
            }
        }
        self.panel.handle_message(m, None)
        self.assertIn(tid, revoked)

        m = {
            'method': 'revoke',
            'destination': 'does.not.exist',
            'arguments': {
                'task_id': tid + 'xxx'
            }
        }
        self.panel.handle_message(m, None)
        self.assertNotIn(tid + 'xxx', revoked)
Example #37
0
    def test_revoke(self):
        tid = uuid()
        m = {
            'method': 'revoke',
            'destination': hostname,
            'arguments': {
                'task_id': tid,
            },
        }
        self.panel.handle_message(m, None)
        assert tid in revoked

        m = {
            'method': 'revoke',
            'destination': 'does.not.exist',
            'arguments': {
                'task_id': tid + 'xxx',
            },
        }
        self.panel.handle_message(m, None)
        assert tid + 'xxx' not in revoked
Example #38
0
    def apply_async(self,
                    args=None,
                    kwargs=None,
                    task_id=None,
                    producer=None,
                    link=None,
                    link_error=None,
                    shadow=None,
                    **options):
        if self.singleton_config.app.conf.task_always_eager:
            return super(Singleton,
                         self).apply_async(args, kwargs, task_id, producer,
                                           link, link_error, shadow, **options)

        args = args or []
        kwargs = kwargs or {}
        task_id = task_id or uuid()
        lock = self.generate_lock(self.name, args, kwargs)

        run_args = dict(lock=lock,
                        args=args,
                        kwargs=kwargs,
                        task_id=task_id,
                        producer=producer,
                        link=link,
                        link_error=link_error,
                        shadow=shadow,
                        **options)

        task = self.lock_and_run(**run_args)
        if task:
            return task

        existing_task_id = self.get_existing_task_id(lock)
        while not existing_task_id:
            task = self.lock_and_run(**run_args)
            if task:
                return task
            existing_task_id = self.get_existing_task_id(lock)
        return self.on_duplicate(existing_task_id)
Example #39
0
    def run(self, header, body, partial_args, app=None, interval=None,
            countdown=1, max_retries=None, eager=False,
            task_id=None, **options):
        app = app or self._get_app(body)
        group_id = header.options.get('task_id') or uuid()
        root_id = body.options.get('root_id')
        body.chord_size = self.__length_hint__()
        options = dict(self.options, **options) if options else self.options
        if options:
            options.pop('task_id', None)
            body.options.update(options)

        bodyres = body.freeze(task_id, root_id=root_id)

        # Chains should not be passed to the header tasks. See #3771
        options.pop('chain', None)
        # Neither should chords, for deeply nested chords to work
        options.pop('chord', None)
        options.pop('task_id', None)

        header.freeze(group_id=group_id, chord=body, root_id=root_id)
        header_result = header(*partial_args, task_id=group_id, **options)

        if len(header_result) > 0:
            app.backend.apply_chord(
                header_result,
                body,
                interval=interval,
                countdown=countdown,
                max_retries=max_retries,
            )
        # The execution of a chord body is normally triggered by its header's
        # tasks completing. If the header is empty this will never happen, so
        # we execute the body manually here.
        else:
            body.delay([])

        bodyres.parent = header_result
        return bodyres
Example #40
0
    def apply_async(
        self,
        args=None,
        kwargs=None,
        task_id=None,
        producer=None,
        link=None,
        link_error=None,
        shadow=None,
        **options
    ):
        args = args or []
        kwargs = kwargs or {}
        task_id = task_id or uuid()
        lock = self.generate_lock(self.name, args, kwargs)

        run_args = dict(
            lock=lock,
            args=args,
            kwargs=kwargs,
            task_id=task_id,
            producer=producer,
            link=link,
            link_error=link_error,
            shadow=shadow,
            **options
        )

        task = self.lock_and_run(**run_args)
        if task:
            return task

        existing_task_id = self.get_existing_task_id(lock)
        while not existing_task_id:
            task = self.lock_and_run(**run_args)
            if task:
                return task
            existing_task_id = self.get_existing_task_id(lock)
        return self.AsyncResult(existing_task_id)
Example #41
0
 def test_fast_trace_task(self):
     from celery.app import trace
     setup_worker_optimizations(self.app)
     self.assertIs(trace.trace_task_ret, trace._fast_trace_task)
     tid = uuid()
     message = TaskMessage(self.mytask.name, tid, args=[4])
     assert len(message.payload) == 3
     try:
         self.mytask.__trace__ = build_tracer(
             self.mytask.name,
             self.mytask,
             self.app.loader,
             'test',
             app=self.app,
         )
         failed, res, runtime = trace.trace_task_ret(
             self.mytask.name, tid, message.headers, message.body,
             message.content_type, message.content_encoding)
         self.assertFalse(failed)
         self.assertEqual(res, repr(4**4))
         self.assertIsNotNone(runtime)
         self.assertIsInstance(runtime, numbers.Real)
     finally:
         reset_worker_optimizations()
         self.assertIs(trace.trace_task_ret, trace._trace_task_ret)
     delattr(self.mytask, '__trace__')
     failed, res, runtime = trace.trace_task_ret(
         self.mytask.name,
         tid,
         message.headers,
         message.body,
         message.content_type,
         message.content_encoding,
         app=self.app,
     )
     self.assertFalse(failed)
     self.assertEqual(res, repr(4**4))
     self.assertIsNotNone(runtime)
     self.assertIsInstance(runtime, numbers.Real)
Example #42
0
 def test_worker_task_trace_handle_failure(self):
     tid = uuid()
     self.mytask.push_request()
     try:
         self.mytask.request.id = tid
         try:
             raise ValueError('foo')
         except Exception as exc:
             w = TraceInfo(states.FAILURE, exc)
             w.handle_failure(
                 self.mytask,
                 self.mytask.request,
                 store_errors=False,
             )
             assert self.mytask.backend.get_status(tid) == states.PENDING
             w.handle_failure(
                 self.mytask,
                 self.mytask.request,
                 store_errors=True,
             )
             assert self.mytask.backend.get_status(tid) == states.FAILURE
     finally:
         self.mytask.pop_request()
Example #43
0
 def test_execute_using_pool_with_none_timelimit_header(self):
     from celery.app.trace import trace_task_ret as trace
     weakref_ref = Mock(name='weakref.ref')
     job = self.zRequest(id=uuid(),
                         revoked_tasks=set(),
                         ref=weakref_ref,
                         headers={'timelimit': None})
     job.execute_using_pool(self.pool)
     self.pool.apply_async.assert_called_with(
         trace,
         args=(job.type, job.id, job.request_dict, job.body,
               job.content_type, job.content_encoding),
         accept_callback=job.on_accepted,
         timeout_callback=job.on_timeout,
         callback=job.on_success,
         error_callback=job.on_failure,
         soft_timeout=self.task.soft_time_limit,
         timeout=self.task.time_limit,
         correlation_id=job.id,
     )
     assert job._apply_result
     weakref_ref.assert_called_with(self.pool.apply_async())
     assert job._apply_result is weakref_ref()
Example #44
0
def Mutex(client, name, expire):
    lock_id = uuid()
    i_won = client.setnx(name, lock_id)
    try:
        if i_won:
            client.expire(name, expire)
            yield
        else:
            if not client.ttl(name):
                client.expire(name, expire)
            raise MutexHeld()
    finally:
        if i_won:
            try:
                with client.pipeline(True) as pipe:
                    pipe.watch(name)
                    if pipe.get(name) == lock_id:
                        pipe.multi()
                        pipe.delete(name)
                        pipe.execute()
                    pipe.unwatch()
            except redis.WatchError:
                pass
Example #45
0
    def run(self, header, body, partial_args, app=None, interval=None,
            countdown=1, max_retries=None, eager=False,
            task_id=None, **options):
        app = app or self._get_app(body)
        group_id = header.options.get('task_id') or uuid()
        root_id = body.options.get('root_id')
        body.chord_size = self.__length_hint__()
        options = dict(self.options, **options) if options else self.options
        if options:
            options.pop('task_id', None)
            body.options.update(options)

        results = header.freeze(
            group_id=group_id, chord=body, root_id=root_id).results
        bodyres = body.freeze(task_id, root_id=root_id)

        parent = app.backend.apply_chord(
            header, partial_args, group_id, body,
            interval=interval, countdown=countdown,
            options=options, max_retries=max_retries,
            result=results)
        bodyres.parent = parent
        return bodyres
Example #46
0
    def test_basic_publish__get__consume__restore(self,
                                                  n='test_basic_publish'):
        c = memory_client().channel()

        c.exchange_declare(n)
        c.queue_declare(n)
        c.queue_bind(n, n, n)
        c.queue_declare(n + '2')
        c.queue_bind(n + '2', n, n)

        m = c.prepare_message('nthex quick brown fox...')
        c.basic_publish(m, n, n)

        r1 = c.message_to_python(c.basic_get(n))
        assert r1
        assert r1.body == 'nthex quick brown fox...'.encode('utf-8')
        assert c.basic_get(n) is None

        consumer_tag = uuid()

        c.basic_consume(n + '2', False,
                        consumer_tag=consumer_tag, callback=lambda *a: None)
        assert n + '2' in c._active_queues
        r2, _ = c.drain_events()
        r2 = c.message_to_python(r2)
        assert r2.body == 'nthex quick brown fox...'.encode('utf-8')
        assert r2.delivery_info['exchange'] == n
        assert r2.delivery_info['routing_key'] == n
        with pytest.raises(virtual.Empty):
            c.drain_events()
        c.basic_cancel(consumer_tag)

        c._restore(r2)
        r3 = c.message_to_python(c.basic_get(n))
        assert r3
        assert r3.body == 'nthex quick brown fox...'.encode('utf-8')
        assert c.basic_get(n) is None
Example #47
0
 def freeze(self, _id=None, group_id=None, chord=None,
            root_id=None, parent_id=None):
     opts = self.options
     try:
         gid = opts['task_id']
     except KeyError:
         gid = opts['task_id'] = uuid()
     if group_id:
         opts['group_id'] = group_id
     if chord:
         opts['chord'] = chord
     root_id = opts.setdefault('root_id', root_id)
     parent_id = opts.setdefault('parent_id', parent_id)
     new_tasks = []
     # Need to unroll subgroups early so that chord gets the
     # right result instance for chord_unlock etc.
     results = list(self._freeze_unroll(
         new_tasks, group_id, chord, root_id, parent_id,
     ))
     if isinstance(self.tasks, MutableSequence):
         self.tasks[:] = new_tasks
     else:
         self.tasks = new_tasks
     return self.app.GroupResult(gid, results)
Example #48
0
def Mutex(client, name, expire):
    """The Redis lock implementation (probably shaky)."""
    lock_id = uuid()
    i_won = client.setnx(name, lock_id)
    try:
        if i_won:
            client.expire(name, expire)
            yield
        else:
            if not client.ttl(name):
                client.expire(name, expire)
            raise MutexHeld()
    finally:
        if i_won:
            try:
                with client.pipeline(True) as pipe:
                    pipe.watch(name)
                    if bytes_to_str(pipe.get(name)) == lock_id:
                        pipe.multi()
                        pipe.delete(name)
                        pipe.execute()
                    pipe.unwatch()
            except redis.WatchError:
                pass
Example #49
0
 def test_on_success__calls_failure(self):
     job = self.zRequest(id=uuid())
     einfo = Mock(name='einfo')
     job.on_failure = Mock(name='on_failure')
     job.on_success((True, einfo, 1.0))
     job.on_failure.assert_called_with(einfo, return_ok=True)
Example #50
0
 def test_on_success__no_events(self):
     self.eventer = None
     job = self.zRequest(id=uuid())
     job.send_event = Mock(name='send_event')
     job.on_success((False, 'foo', 1.0))
     job.send_event.assert_not_called()
Example #51
0
    def apply(self,
              args=None,
              kwargs=None,
              link=None,
              link_error=None,
              task_id=None,
              retries=None,
              throw=None,
              logfile=None,
              loglevel=None,
              headers=None,
              **options):
        """Execute this task locally, by blocking until the task returns.

        Arguments:
            args (Tuple): positional arguments passed on to the task.
            kwargs (Dict): keyword arguments passed on to the task.
            throw (bool): Re-raise task exceptions.
                Defaults to the :setting:`task_eager_propagates` setting.

        Returns:
            celery.result.EagerResult: pre-evaluated result.
        """
        # trace imports Task, so need to import inline.
        from celery.app.trace import build_tracer

        app = self._get_app()
        args = args or ()
        kwargs = kwargs or {}
        task_id = task_id or uuid()
        retries = retries or 0
        if throw is None:
            throw = app.conf.task_eager_propagates

        # Make sure we get the task instance, not class.
        task = app._tasks[self.name]

        request = {
            'id': task_id,
            'retries': retries,
            'is_eager': True,
            'logfile': logfile,
            'loglevel': loglevel or 0,
            'hostname': gethostname(),
            'callbacks': maybe_list(link),
            'errbacks': maybe_list(link_error),
            'headers': headers,
            'delivery_info': {
                'is_eager': True
            },
        }
        tb = None
        tracer = build_tracer(
            task.name,
            task,
            eager=True,
            propagate=throw,
            app=self._get_app(),
        )
        ret = tracer(task_id, args, kwargs, request)
        retval = ret.retval
        if isinstance(retval, ExceptionInfo):
            retval, tb = retval.exception, retval.traceback
        state = states.SUCCESS if ret.info is None else ret.info.state
        return EagerResult(task_id, retval, state, traceback=tb)
Example #52
0
    def apply_async(self,
                    args=None,
                    kwargs=None,
                    task_id=None,
                    producer=None,
                    link=None,
                    link_error=None,
                    shadow=None,
                    **options):
        """Apply tasks asynchronously by sending a message.

        Arguments:
            args (Tuple): The positional arguments to pass on to the task.

            kwargs (Dict): The keyword arguments to pass on to the task.

            countdown (float): Number of seconds into the future that the
                task should execute.  Defaults to immediate execution.

            eta (~datetime.datetime): Absolute time and date of when the task
                should be executed.  May not be specified if `countdown`
                is also supplied.

            expires (float, ~datetime.datetime): Datetime or
                seconds in the future for the task should expire.
                The task won't be executed after the expiration time.

            shadow (str): Override task name used in logs/monitoring.
                Default is retrieved from :meth:`shadow_name`.

            connection (kombu.Connection): Re-use existing broker connection
                instead of acquiring one from the connection pool.

            retry (bool): If enabled sending of the task message will be
                retried in the event of connection loss or failure.
                Default is taken from the :setting:`task_publish_retry`
                setting.  Note that you need to handle the
                producer/connection manually for this to work.

            retry_policy (Mapping): Override the retry policy used.
                See the :setting:`task_publish_retry_policy` setting.

            queue (str, kombu.Queue): The queue to route the task to.
                This must be a key present in :setting:`task_queues`, or
                :setting:`task_create_missing_queues` must be
                enabled.  See :ref:`guide-routing` for more
                information.

            exchange (str, kombu.Exchange): Named custom exchange to send the
                task to.  Usually not used in combination with the ``queue``
                argument.

            routing_key (str): Custom routing key used to route the task to a
                worker server.  If in combination with a ``queue`` argument
                only used to specify custom routing keys to topic exchanges.

            priority (int): The task priority, a number between 0 and 9.
                Defaults to the :attr:`priority` attribute.

            serializer (str): Serialization method to use.
                Can be `pickle`, `json`, `yaml`, `msgpack` or any custom
                serialization method that's been registered
                with :mod:`kombu.serialization.registry`.
                Defaults to the :attr:`serializer` attribute.

            compression (str): Optional compression method
                to use.  Can be one of ``zlib``, ``bzip2``,
                or any custom compression methods registered with
                :func:`kombu.compression.register`.
                Defaults to the :setting:`task_compression` setting.

            link (Signature): A single, or a list of tasks signatures
                to apply if the task returns successfully.

            link_error (Signature): A single, or a list of task signatures
                to apply if an error occurs while executing the task.

            producer (kombu.Producer): custom producer to use when publishing
                the task.

            add_to_parent (bool): If set to True (default) and the task
                is applied while executing another task, then the result
                will be appended to the parent tasks ``request.children``
                attribute.  Trailing can also be disabled by default using the
                :attr:`trail` attribute

            publisher (kombu.Producer): Deprecated alias to ``producer``.

            headers (Dict): Message headers to be included in the message.

        Returns:
            celery.result.AsyncResult: Promise of future evaluation.

        Raises:
            TypeError: If not enough arguments are passed, or too many
                arguments are passed.  Note that signature checks may
                be disabled by specifying ``@task(typing=False)``.
            kombu.exceptions.OperationalError: If a connection to the
               transport cannot be made, or if the connection is lost.

        Note:
            Also supports all keyword arguments supported by
            :meth:`kombu.Producer.publish`.
        """
        if self.typing:
            try:
                check_arguments = self.__header__
            except AttributeError:  # pragma: no cover
                pass
            else:
                check_arguments(*(args or ()), **(kwargs or {}))

        app = self._get_app()
        if app.conf.task_always_eager:
            with app.producer_or_acquire(producer) as eager_producer:
                serializer = options.get(
                    'serializer',
                    (eager_producer.serializer if eager_producer.serializer
                     else app.conf.task_serializer))
                body = args, kwargs
                content_type, content_encoding, data = serialization.dumps(
                    body,
                    serializer,
                )
                args, kwargs = serialization.loads(data,
                                                   content_type,
                                                   content_encoding,
                                                   accept=[content_type])
            with denied_join_result():
                return self.apply(args,
                                  kwargs,
                                  task_id=task_id or uuid(),
                                  link=link,
                                  link_error=link_error,
                                  **options)

        if self.__v2_compat__:
            shadow = shadow or self.shadow_name(self(), args, kwargs, options)
        else:
            shadow = shadow or self.shadow_name(args, kwargs, options)

        preopts = self._get_exec_options()
        options = dict(preopts, **options) if options else preopts

        options.setdefault('ignore_result', self.ignore_result)
        if self.priority:
            options.setdefault('priority', self.priority)

        return app.send_task(self.name,
                             args,
                             kwargs,
                             task_id=task_id,
                             producer=producer,
                             link=link,
                             link_error=link_error,
                             result_cls=self.AsyncResult,
                             shadow=shadow,
                             task_type=self,
                             **options)
Example #53
0
    def send_task(self,
                  name,
                  args=None,
                  kwargs=None,
                  countdown=None,
                  eta=None,
                  task_id=None,
                  producer=None,
                  connection=None,
                  router=None,
                  result_cls=None,
                  expires=None,
                  publisher=None,
                  link=None,
                  link_error=None,
                  add_to_parent=True,
                  group_id=None,
                  retries=0,
                  chord=None,
                  reply_to=None,
                  time_limit=None,
                  soft_time_limit=None,
                  root_id=None,
                  parent_id=None,
                  route_name=None,
                  shadow=None,
                  chain=None,
                  task_type=None,
                  **options):
        """Send task by name.

        Supports the same arguments as :meth:`@-Task.apply_async`.

        Arguments:
            name (str): Name of task to call (e.g., `"tasks.add"`).
            result_cls (AsyncResult): Specify custom result class.
        """
        parent = have_parent = None
        amqp = self.amqp
        task_id = task_id or uuid()
        producer = producer or publisher  # XXX compat
        router = router or amqp.router
        conf = self.conf
        if conf.task_always_eager:  # pragma: no cover
            warnings.warn(AlwaysEagerIgnored(
                'task_always_eager has no effect on send_task', ),
                          stacklevel=2)

        ignored_result = options.pop('ignore_result', False)
        options = router.route(options, route_name or name, args, kwargs,
                               task_type)

        if not root_id or not parent_id:
            parent = self.current_worker_task
            if parent:
                if not root_id:
                    root_id = parent.request.root_id or parent.request.id
                if not parent_id:
                    parent_id = parent.request.id

        message = amqp.create_task_message(
            task_id,
            name,
            args,
            kwargs,
            countdown,
            eta,
            group_id,
            expires,
            retries,
            chord,
            maybe_list(link),
            maybe_list(link_error),
            reply_to or self.oid,
            time_limit,
            soft_time_limit,
            self.conf.task_send_sent_event,
            root_id,
            parent_id,
            shadow,
            chain,
            argsrepr=options.get('argsrepr'),
            kwargsrepr=options.get('kwargsrepr'),
        )

        if connection:
            producer = amqp.Producer(connection, auto_declare=False)

        with self.producer_or_acquire(producer) as P:
            with P.connection._reraise_as_library_errors():
                if not ignored_result:
                    self.backend.on_task_call(P, task_id)
                amqp.send_task_message(P, name, message, **options)
        result = (result_cls or self.AsyncResult)(task_id)
        # We avoid using the constructor since a custom result class
        # can be used, in which case the constructor may still use
        # the old signature.
        result.ignored = ignored_result

        if add_to_parent:
            if not have_parent:
                parent, have_parent = self.current_worker_task, True
            if parent:
                parent.add_trail(result)
        return result
Example #54
0
 def test_process_cleanup_BaseException(self):
     self.mytask.backend = Mock()
     self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit())
     with pytest.raises(SystemExit):
         jail(self.app, uuid(), self.mytask.name, [2], {})
Example #55
0
 def test_execute_jail_success(self):
     ret = jail(self.app, uuid(), self.mytask.name, [2], {})
     assert ret == 4
Example #56
0
 def test_from_message_empty_args(self):
     tid = uuid()
     m = self.TaskMessage(self.mytask.name, tid, args=[], kwargs={})
     job = Request(m, app=self.app)
     assert isinstance(job, Request)
Example #57
0
 def test_uuid(self) -> None:
     i1 = uuid()
     i2 = uuid()
     assert isinstance(i1, str)
     assert i1 != i2
Example #58
0
 def test_on_success__acks_late_disabled(self):
     self.task.acks_late = False
     job = self.zRequest(id=uuid())
     job.acknowledge = Mock(name='ack')
     job.on_success((False, 'foo', 1.0))
     job.acknowledge.assert_not_called()
Example #59
0
 def _next_delivery_tag(self):
     return uuid()
Example #60
0
 def test_uuid4(self) -> None:
     assert uuid() != uuid()