Ejemplo n.º 1
0
 def test_query_task(self):
     consumer = Consumer(self.app)
     consumer.controller = _WC(app=self.app)
     consumer.controller.consumer = consumer
     panel = self.create_panel(consumer=consumer)
     panel.app = self.app
     req1 = Request(
         self.TaskMessage(self.mytask.name, args=(2, 2)),
         app=self.app,
     )
     worker_state.task_reserved(req1)
     try:
         assert not panel.handle('query_task', {'ids': {'1daa'}})
         ret = panel.handle('query_task', {'ids': {req1.id}})
         assert req1.id in ret
         assert ret[req1.id][0] == 'reserved'
         worker_state.active_requests.add(req1)
         try:
             ret = panel.handle('query_task', {'ids': {req1.id}})
             assert ret[req1.id][0] == 'active'
         finally:
             worker_state.active_requests.clear()
         ret = panel.handle('query_task', {'ids': {req1.id}})
         assert ret[req1.id][0] == 'reserved'
     finally:
         worker_state.reserved_requests.clear()
Ejemplo n.º 2
0
 def test_query_task(self):
     consumer = Consumer(self.app)
     consumer.controller = _WC(app=self.app)
     consumer.controller.consumer = consumer
     panel = self.create_panel(consumer=consumer)
     panel.app = self.app
     req1 = Request(
         self.TaskMessage(self.mytask.name, args=(2, 2)),
         app=self.app,
     )
     worker_state.task_reserved(req1)
     try:
         assert not panel.handle('query_task', {'ids': {'1daa'}})
         ret = panel.handle('query_task', {'ids': {req1.id}})
         assert req1.id in ret
         assert ret[req1.id][0] == 'reserved'
         worker_state.active_requests.add(req1)
         try:
             ret = panel.handle('query_task', {'ids': {req1.id}})
             assert ret[req1.id][0] == 'active'
         finally:
             worker_state.active_requests.clear()
         ret = panel.handle('query_task', {'ids': {req1.id}})
         assert ret[req1.id][0] == 'reserved'
     finally:
         worker_state.reserved_requests.clear()
 def test_query_task(self):
     consumer = Consumer(self.app)
     consumer.controller = _WC(app=self.app)
     consumer.controller.consumer = consumer
     panel = self.create_panel(consumer=consumer)
     panel.app = self.app
     req1 = Request(
         TaskMessage(self.mytask.name, args=(2, 2)),
         app=self.app,
     )
     worker_state.task_reserved(req1)
     try:
         self.assertFalse(panel.handle('query_task', {'ids': {'1daa'}}))
         ret = panel.handle('query_task', {'ids': {req1.id}})
         self.assertIn(req1.id, ret)
         self.assertEqual(ret[req1.id][0], 'reserved')
         worker_state.active_requests.add(req1)
         try:
             ret = panel.handle('query_task', {'ids': {req1.id}})
             self.assertEqual(ret[req1.id][0], 'active')
         finally:
             worker_state.active_requests.clear()
         ret = panel.handle('query_task', {'ids': {req1.id}})
         self.assertEqual(ret[req1.id][0], 'reserved')
     finally:
         worker_state.reserved_requests.clear()
Ejemplo n.º 4
0
 def test_query_task(self):
     consumer = Consumer(self.app)
     consumer.controller = _WC(app=self.app)
     consumer.controller.consumer = consumer
     panel = self.create_panel(consumer=consumer)
     panel.app = self.app
     req1 = Request(
         TaskMessage(self.mytask.name, args=(2, 2)),
         app=self.app,
     )
     worker_state.task_reserved(req1)
     try:
         self.assertFalse(panel.handle('query_task', {'ids': {'1daa'}}))
         ret = panel.handle('query_task', {'ids': {req1.id}})
         self.assertIn(req1.id, ret)
         self.assertEqual(ret[req1.id][0], 'reserved')
         worker_state.active_requests.add(req1)
         try:
             ret = panel.handle('query_task', {'ids': {req1.id}})
             self.assertEqual(ret[req1.id][0], 'active')
         finally:
             worker_state.active_requests.clear()
         ret = panel.handle('query_task', {'ids': {req1.id}})
         self.assertEqual(ret[req1.id][0], 'reserved')
     finally:
         worker_state.reserved_requests.clear()
Ejemplo n.º 5
0
 def _limit_task(self, request, bucket, tokens):
     if not bucket.can_consume(tokens):
         hold = bucket.expected_time(tokens)
         pri = self._limit_order = (self._limit_order + 1) % 10
         self.timer.call_after(
             hold, self._limit_move_to_pool, (request,),
             priority=pri,
         )
     else:
         task_reserved(request)
         self.on_task_request(request)
Ejemplo n.º 6
0
 def _limit_task(self, request, bucket, tokens):
     if not bucket.can_consume(tokens):
         hold = bucket.expected_time(tokens)
         pri = self._limit_order = (self._limit_order + 1) % 10
         self.timer.call_after(
             hold, self._limit_move_to_pool, (request,),
             priority=pri,
         )
     else:
         task_reserved(request)
         self.on_task_request(request)
Ejemplo n.º 7
0
 def test_dump_reserved(self):
     consumer = Consumer(self.app)
     req = Request(
         self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app,
     )  # ^ need to keep reference for reserved_tasks WeakSet.
     worker_state.task_reserved(req)
     try:
         panel = self.create_panel(consumer=consumer)
         response = panel.handle('dump_reserved', {'safe': True})
         assert response[0]['name'] == self.mytask.name
         assert response[0]['hostname'] == socket.gethostname()
         worker_state.reserved_requests.clear()
         assert not panel.handle('dump_reserved')
     finally:
         worker_state.reserved_requests.clear()
Ejemplo n.º 8
0
 def test_dump_reserved(self):
     consumer = Consumer(self.app)
     req = Request(
         self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app,
     )  # ^ need to keep reference for reserved_tasks WeakSet.
     worker_state.task_reserved(req)
     try:
         panel = self.create_panel(consumer=consumer)
         response = panel.handle('dump_reserved', {'safe': True})
         assert response[0]['name'] == self.mytask.name
         assert response[0]['hostname'] == socket.gethostname()
         worker_state.reserved_requests.clear()
         assert not panel.handle('dump_reserved')
     finally:
         worker_state.reserved_requests.clear()
Ejemplo n.º 9
0
    def task_message_handler(message, body, ack, reject, callbacks,
                             to_timestamp=to_timestamp):
        req = Req(body, on_ack=ack, on_reject=reject,
                  app=app, hostname=hostname,
                  eventer=eventer, task=task,
                  connection_errors=connection_errors,
                  message=message)
        # do check revoke purge befor task handler, skip the expired revoke
        revoked_tasks.purge(limit=None, offset=REVOKES_MAX)
        # paused.purge(limit=None, offset=REVOKES_MAX)
        if req.revoked():
            return

        if _does_info:
            logger.info('hera Received task: %s', req)

        if events:
            send_event(
                'task-received',
                uuid=req.id, name=req.name,
                args=safe_repr(req.args), kwargs=safe_repr(req.kwargs),
                retries=req.request_dict.get('retries', 0),
                eta=req.eta and req.eta.isoformat(),
                expires=req.expires and req.expires.isoformat(),
            )

        if req.eta:
            try:
                if req.utc:
                    eta = to_timestamp(to_system_tz(req.eta))
                else:
                    eta = to_timestamp(req.eta, timezone.local)
            except OverflowError as exc:
                error("Couldn't convert eta %s to timestamp: %r. Task: %r",
                      req.eta, exc, req.info(safe=True), exc_info=True)
                req.acknowledge()
            else:
                consumer.qos.increment_eventually()
                call_at(eta, apply_eta_task, (req, ), priority=6)
        else:
            if rate_limits_enabled:
                bucket = get_bucket(task.name)
                if bucket:
                    return limit_task(req, bucket, 1)
            task_reserved(req)
            if callbacks:
                [callback() for callback in callbacks]
            handle(req)
Ejemplo n.º 10
0
 def test_revoke_terminate(self):
     request = Mock()
     request.id = tid = uuid()
     state = self.create_state()
     state.consumer = Mock()
     worker_state.task_reserved(request)
     try:
         r = control.revoke(state, tid, terminate=True)
         assert tid in revoked
         assert request.terminate.call_count
         assert 'terminate:' in r['ok']
         # unknown task id only revokes
         r = control.revoke(state, uuid(), terminate=True)
         assert 'tasks unknown' in r['ok']
     finally:
         worker_state.task_ready(request)
Ejemplo n.º 11
0
 def test_revoke_terminate(self):
     request = Mock()
     request.id = tid = uuid()
     state = self.create_state()
     state.consumer = Mock()
     worker_state.task_reserved(request)
     try:
         r = control.revoke(state, tid, terminate=True)
         assert tid in revoked
         assert request.terminate.call_count
         assert 'terminate:' in r['ok']
         # unknown task id only revokes
         r = control.revoke(state, uuid(), terminate=True)
         assert 'tasks unknown' in r['ok']
     finally:
         worker_state.task_ready(request)
Ejemplo n.º 12
0
 def test_revoke_terminate(self):
     request = Mock()
     request.id = tid = uuid()
     state = self.create_state()
     state.consumer = Mock()
     worker_state.task_reserved(request)
     try:
         r = control.revoke(state, tid, terminate=True)
         self.assertIn(tid, revoked)
         self.assertTrue(request.terminate.call_count)
         self.assertIn('terminate:', r['ok'])
         # unknown task id only revokes
         r = control.revoke(state, uuid(), terminate=True)
         self.assertIn('tasks unknown', r['ok'])
     finally:
         worker_state.task_ready(request)
Ejemplo n.º 13
0
 def test_revoke_terminate(self):
     request = Mock()
     request.id = tid = uuid()
     state = self.create_state()
     state.consumer = Mock()
     worker_state.task_reserved(request)
     try:
         r = control.revoke(state, tid, terminate=True)
         self.assertIn(tid, revoked)
         self.assertTrue(request.terminate.call_count)
         self.assertIn('terminate:', r['ok'])
         # unknown task id only revokes
         r = control.revoke(state, uuid(), terminate=True)
         self.assertIn('tasks unknown', r['ok'])
     finally:
         worker_state.task_ready(request)
Ejemplo n.º 14
0
 def test_dump_reserved(self):
     consumer = Consumer(self.app)
     req = Request(
         TaskMessage(self.mytask.name, args=(2, 2)), app=self.app,
     )  # ^ need to keep reference for reserved_tasks WeakSet.
     worker_state.task_reserved(req)
     try:
         panel = self.create_panel(consumer=consumer)
         response = panel.handle('dump_reserved', {'safe': True})
         self.assertDictContainsSubset(
             {'name': self.mytask.name,
              'hostname': socket.gethostname()},
             response[0],
         )
         worker_state.reserved_requests.clear()
         self.assertFalse(panel.handle('dump_reserved'))
     finally:
         worker_state.reserved_requests.clear()
Ejemplo n.º 15
0
 def test_dump_reserved(self):
     consumer = Consumer(self.app)
     req = Request(
         TaskMessage(self.mytask.name, args=(2, 2)), app=self.app,
     )  # ^ need to keep reference for reserved_tasks WeakSet.
     worker_state.task_reserved(req)
     try:
         panel = self.create_panel(consumer=consumer)
         response = panel.handle('dump_reserved', {'safe': True})
         self.assertDictContainsSubset(
             {'name': self.mytask.name,
              'hostname': socket.gethostname()},
             response[0],
         )
         worker_state.reserved_requests.clear()
         self.assertFalse(panel.handle('dump_reserved'))
     finally:
         worker_state.reserved_requests.clear()
Ejemplo n.º 16
0
    def test_no_negative_scale(self):
        total_num_processes = []
        worker = Mock(name='worker')
        x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
        x.body()  # the body func scales up or down

        _keep = [Mock(name=f'req{i}') for i in range(35)]
        for req in _keep:
            state.task_reserved(req)
            x.body()
            total_num_processes.append(self.pool.num_processes)

        for req in _keep:
            state.task_ready(req)
            x.body()
            total_num_processes.append(self.pool.num_processes)

        assert all(x.min_concurrency <= i <= x.max_concurrency
                   for i in total_num_processes)
Ejemplo n.º 17
0
    def test_no_negative_scale(self):
        total_num_processes = []
        worker = Mock(name='worker')
        x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
        x.body()  # the body func scales up or down

        _keep = [Mock(name='req{0}'.format(i)) for i in range(35)]
        for req in _keep:
            state.task_reserved(req)
            x.body()
            total_num_processes.append(self.pool.num_processes)

        for req in _keep:
            state.task_ready(req)
            x.body()
            total_num_processes.append(self.pool.num_processes)

        assert all(x.min_concurrency <= i <= x.max_concurrency
                   for i in total_num_processes)
Ejemplo n.º 18
0
 def test_query_task(self):
     consumer = Consumer(self.app)
     consumer.controller = _WC(app=self.app)
     consumer.controller.consumer = consumer
     panel = self.create_panel(consumer=consumer)
     panel.app = self.app
     req1 = Request(self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app)
     worker_state.task_reserved(req1)
     try:
         assert not panel.handle("query_task", {"ids": {"1daa"}})
         ret = panel.handle("query_task", {"ids": {req1.id}})
         assert req1.id in ret
         assert ret[req1.id][0] == "reserved"
         worker_state.active_requests.add(req1)
         try:
             ret = panel.handle("query_task", {"ids": {req1.id}})
             assert ret[req1.id][0] == "active"
         finally:
             worker_state.active_requests.clear()
         ret = panel.handle("query_task", {"ids": {req1.id}})
         assert ret[req1.id][0] == "reserved"
     finally:
         worker_state.reserved_requests.clear()
Ejemplo n.º 19
0
    def test_with_autoscaler_file_descriptor_safety(self):
        # Given: a test celery worker instance with auto scaling
        worker = self.create_worker(
            autoscale=[10, 5],
            use_eventloop=True,
            timer_cls='celery.utils.timer2.Timer',
            threads=False,
        )
        # Given: This test requires a QoS defined on the worker consumer
        worker.consumer.qos = qos = QoS(lambda prefetch_count: prefetch_count,
                                        2)
        qos.update()

        # Given: We have started the worker pool
        worker.pool.start()

        # Then: the worker pool is the same as the autoscaler pool
        auto_scaler = worker.autoscaler
        assert worker.pool == auto_scaler.pool

        # Given: Utilize kombu to get the global hub state
        hub = get_event_loop()
        # Given: Initial call the Async Pool to register events works fine
        worker.pool.register_with_event_loop(hub)

        # Create some mock queue message and read from them
        _keep = [Mock(name=f'req{i}') for i in range(20)]
        [state.task_reserved(m) for m in _keep]
        auto_scaler.body()

        # Simulate a file descriptor from the list is closed by the OS
        # auto_scaler.force_scale_down(5)
        # This actually works -- it releases the semaphore properly
        # Same with calling .terminate() on the process directly
        for fd, proc in worker.pool._pool._fileno_to_outq.items():
            # however opening this fd as a file and closing it will do it
            queue_worker_socket = open(str(fd), "w")
            queue_worker_socket.close()
            break  # Only need to do this once

        # When: Calling again to register with event loop ...
        worker.pool.register_with_event_loop(hub)

        # Then: test did not raise "OSError: [Errno 9] Bad file descriptor!"

        # Finally:  Clean up so the threads before/after fixture passes
        worker.terminate()
        worker.pool.terminate()
Ejemplo n.º 20
0
 def test_body(self):
     worker = Mock(name='worker')
     x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
     x.body()
     assert x.pool.num_processes == 3
     _keep = [Mock(name=f'req{i}') for i in range(20)]
     [state.task_reserved(m) for m in _keep]
     x.body()
     x.body()
     assert x.pool.num_processes == 10
     state.reserved_requests.clear()
     x.body()
     assert x.pool.num_processes == 10
     x._last_scale_up = monotonic() - 10000
     x.body()
     assert x.pool.num_processes == 3
Ejemplo n.º 21
0
 def test_body(self):
     worker = Mock(name='worker')
     x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
     x.body()
     assert x.pool.num_processes == 3
     _keep = [Mock(name='req{0}'.format(i)) for i in range(20)]
     [state.task_reserved(m) for m in _keep]
     x.body()
     x.body()
     assert x.pool.num_processes == 10
     worker.consumer._update_prefetch_count.assert_called()
     state.reserved_requests.clear()
     x.body()
     assert x.pool.num_processes == 10
     x._last_scale_up = monotonic() - 10000
     x.body()
     assert x.pool.num_processes == 3
     worker.consumer._update_prefetch_count.assert_called()
Ejemplo n.º 22
0
 def test_body(self):
     worker = Mock(name='worker')
     x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker)
     x.body()
     self.assertEqual(x.pool.num_processes, 3)
     _keep = [Mock(name='req{0}'.format(i)) for i in range(20)]
     [state.task_reserved(m) for m in _keep]
     x.body()
     x.body()
     self.assertEqual(x.pool.num_processes, 10)
     worker.consumer._update_prefetch_count.assert_called()
     state.reserved_requests.clear()
     x.body()
     self.assertEqual(x.pool.num_processes, 10)
     x._last_scale_up = monotonic() - 10000
     x.body()
     self.assertEqual(x.pool.num_processes, 3)
     worker.consumer._update_prefetch_count.assert_called()
Ejemplo n.º 23
0
    def task_message_handler(message,
                             body,
                             ack,
                             reject,
                             callbacks,
                             to_timestamp=to_timestamp):
        if body is None and 'args' not in message.payload:
            body, headers, decoded, utc = (
                message.body,
                message.headers,
                False,
                app.uses_utc_timezone(),
            )
            if not body_can_be_buffer:
                body = bytes(body) if isinstance(body, buffer_t) else body
        else:
            if 'args' in message.payload:
                body, headers, decoded, utc = hybrid_to_proto2(
                    message, message.payload)
            else:
                body, headers, decoded, utc = proto1_to_proto2(message, body)

        req = Req(
            message,
            on_ack=ack,
            on_reject=reject,
            app=app,
            hostname=hostname,
            eventer=eventer,
            task=task,
            connection_errors=connection_errors,
            body=body,
            headers=headers,
            decoded=decoded,
            utc=utc,
        )
        if _does_info:
            info('Received task: %s', req)
        if (req.expires or req.id in revoked_tasks) and req.revoked():
            return

        if task_sends_events:
            send_event(
                'task-received',
                uuid=req.id,
                name=req.name,
                args=req.argsrepr,
                kwargs=req.kwargsrepr,
                root_id=req.root_id,
                parent_id=req.parent_id,
                retries=req.request_dict.get('retries', 0),
                eta=req.eta and req.eta.isoformat(),
                expires=req.expires and req.expires.isoformat(),
            )

        bucket = None
        eta = None
        if req.eta:
            try:
                if req.utc:
                    eta = to_timestamp(to_system_tz(req.eta))
                else:
                    eta = to_timestamp(req.eta, app.timezone)
            except (OverflowError, ValueError) as exc:
                error("Couldn't convert ETA %r to timestamp: %r. Task: %r",
                      req.eta,
                      exc,
                      req.info(safe=True),
                      exc_info=True)
                req.reject(requeue=False)
        if rate_limits_enabled:
            bucket = get_bucket(task.name)

        if eta and bucket:
            consumer.qos.increment_eventually()
            return call_at(eta, limit_post_eta, (req, bucket, 1), priority=6)
        if eta:
            consumer.qos.increment_eventually()
            call_at(eta, apply_eta_task, (req, ), priority=6)
            return task_message_handler
        if bucket:
            return limit_task(req, bucket, 1)

        task_reserved(req)
        if callbacks:
            [callback(req) for callback in callbacks]
        handle(req)
Ejemplo n.º 24
0
 def apply_eta_task(self, task):
     """Method called by the timer to apply a task with an ETA/countdown."""
     task_reserved(task)
     self.on_task_request(task)
     self.qos.decrement_eventually()
Ejemplo n.º 25
0
 def _limit_move_to_pool(self, request):
     task_reserved(request)
     self.on_task_request(request)
Ejemplo n.º 26
0
 def _limit_move_to_pool(self, request):
     task_reserved(request)
     self.on_task_request(request)
Ejemplo n.º 27
0
    def task_message_handler(message,
                             body,
                             ack,
                             reject,
                             callbacks,
                             to_timestamp=to_timestamp):
        req = Req(body,
                  on_ack=ack,
                  on_reject=reject,
                  app=app,
                  hostname=hostname,
                  eventer=eventer,
                  task=task,
                  connection_errors=connection_errors,
                  message=message)
        # do check revoke purge befor task handler, skip the expired revoke
        revoked_tasks.purge(limit=None, offset=REVOKES_MAX)
        # paused.purge(limit=None, offset=REVOKES_MAX)
        if req.revoked():
            return

        if _does_info:
            logger.info('hera Received task: %s', req)

        if events:
            send_event(
                'task-received',
                uuid=req.id,
                name=req.name,
                args=safe_repr(req.args),
                kwargs=safe_repr(req.kwargs),
                retries=req.request_dict.get('retries', 0),
                eta=req.eta and req.eta.isoformat(),
                expires=req.expires and req.expires.isoformat(),
            )

        if req.eta:
            try:
                if req.utc:
                    eta = to_timestamp(to_system_tz(req.eta))
                else:
                    eta = to_timestamp(req.eta, timezone.local)
            except OverflowError as exc:
                error("Couldn't convert eta %s to timestamp: %r. Task: %r",
                      req.eta,
                      exc,
                      req.info(safe=True),
                      exc_info=True)
                req.acknowledge()
            else:
                consumer.qos.increment_eventually()
                call_at(eta, apply_eta_task, (req, ), priority=6)
        else:
            if rate_limits_enabled:
                bucket = get_bucket(task.name)
                if bucket:
                    return limit_task(req, bucket, 1)
            task_reserved(req)
            if callbacks:
                [callback() for callback in callbacks]
            handle(req)
Ejemplo n.º 28
0
 def apply_eta_task(self, task):
     """Method called by the timer to apply a task with an ETA/countdown."""
     task_reserved(task)
     self.on_task_request(task)
     self.qos.decrement_eventually()
Ejemplo n.º 29
0
    def task_message_handler(message,
                             body,
                             ack,
                             reject,
                             callbacks,
                             to_timestamp=to_timestamp):
        # print('crawl_task_message_handler %s %s' % (task_name, repr(body)))
        body, headers, decoded, utc = (
            message.body,
            message.headers,
            False,
            True,
        )
        if not body_can_be_buffer:
            body = bytes(body) if isinstance(body, buffer_t) else body

        req = BaseReq(
            message,
            on_ack=ack,
            on_reject=reject,
            app=app,
            hostname=hostname,
            eventer=eventer,
            task=task,
            connection_errors=connection_errors,
            body=body,
            headers=headers,
            decoded=decoded,
            utc=utc,
        )
        # if _does_info:
        meta = req.task_info()
        taskinfo = {'meta': meta}
        _info(u'收到任务', extra=taskinfo)

        if (req.expires
                or req.id in controller_revoked_tasks) and req.revoked():
            return

        # req_args, req_kwargs, req_embed = req._payload
        if task_sends_events:
            send_event(
                'task-received',
                uuid=req.id,
                name=req.name,
                args=req.argsrepr,
                kwargs=req.kwargsrepr,
                root_id=req.root_id,
                parent_id=req.parent_id,
                retries=req.request_dict.get('retries', 0),
                eta=req.eta and req.eta.isoformat(),
                expires=req.expires and req.expires.isoformat(),
            )

        # 保存
        # ti = get_task_info(req._args, req._kwargs)
        fields = dict(
            name=req.name,
            # project=req._project, page=req._page, url=req._url,
            kwargs=json.dumps(req._kwargs),
            # args=req_args, kwargs=req_kwargs,
            root_id=req.root_id,
            parent_id=req.parent_id,
            retries=req.request_dict.get('retries', 0),
            eta=req.eta and req.eta.isoformat(),
            expires=req.expires and req.expires.isoformat(),
            meta=meta)
        save_task_status('task-received', req.id, fields)

        # 限速
        if req._kwargs.get('__limit__'):
            try:
                key = 'rate:%s' % meta['project']
                pending = get_expected_time(key)
                # print '----Rate limit pending: %s %r' % (req.id, pending)
                if pending > 0:
                    req.eta = maybe_make_aware(datetime.utcnow() +
                                               timedelta(seconds=pending))
                    info('Rate Limit [%s.%s] %s', meta['project'],
                         meta['page'], pending)
            except Exception:
                error('Rate limit. Task: %r',
                      req.info(safe=True),
                      exc_info=True)

        if req.eta:
            try:
                if req.utc:
                    eta = to_timestamp(to_system_tz(req.eta))
                else:
                    eta = to_timestamp(req.eta, timezone.local)
            except (OverflowError, ValueError):
                error("Couldn't convert ETA %r to timestamp. Task: %r",
                      req.eta,
                      req.info(safe=True),
                      exc_info=True)
                req.reject(requeue=False)
            else:
                consumer.qos.increment_eventually()
                call_at(eta, apply_eta_task, (req, ), priority=6)
        else:
            if rate_limits_enabled:
                bucket = get_bucket(task.name)
                if bucket:
                    return limit_task(req, bucket, 1)
            task_reserved(req)
            if callbacks:
                [callback(req) for callback in callbacks]
            handle(req)