def test_poll_result(self): results = Queue() class Message(object): def __init__(self, **merge): self.payload = dict({'status': states.STARTED, 'result': None}, **merge) class MockBinding(object): def __init__(self, *args, **kwargs): pass def __call__(self, *args, **kwargs): return self def declare(self): pass def get(self, no_ack=False): try: return results.get(block=False) except Empty: pass class MockBackend(AMQPBackend): Queue = MockBinding backend = MockBackend() # FFWD's to the latest state. results.put(Message(status=states.RECEIVED, seq=1)) results.put(Message(status=states.STARTED, seq=2)) results.put(Message(status=states.FAILURE, seq=3)) r1 = backend.get_task_meta(uuid()) self.assertDictContainsSubset({'status': states.FAILURE, 'seq': 3}, r1, 'FFWDs to the last state') # Caches last known state. results.put(Message()) tid = uuid() backend.get_task_meta(tid) self.assertIn(tid, backend._cache, 'Caches last known state') # Returns cache if no new states. results.queue.clear() assert not results.qsize() backend._cache[tid] = 'hello' self.assertEqual(backend.get_task_meta(tid), 'hello', 'Returns cache if no new states')
class TokenBucketQueue(object): """Queue with rate limited get operations. This uses the token bucket algorithm to rate limit the queue on get operations. :param fill_rate: The rate in tokens/second that the bucket will be refilled. :keyword capacity: Maximum number of tokens in the bucket. Default is 1. """ RateLimitExceeded = RateLimitExceeded def __init__(self, fill_rate, queue=None, capacity=1): self._bucket = TokenBucket(fill_rate, capacity) self.queue = queue if not self.queue: self.queue = Queue() def put(self, item, block=True): """Put an item onto the queue.""" self.queue.put(item, block=block) def put_nowait(self, item): """Put an item into the queue without blocking. :raises Queue.Full: If a free slot is not immediately available. """ return self.put(item, block=False) def get(self, block=True): """Remove and return an item from the queue. :raises RateLimitExceeded: If a token could not be consumed from the token bucket (consuming from the queue too fast). :raises Queue.Empty: If an item is not immediately available. """ get = block and self.queue.get or self.queue.get_nowait if not block and not self.items: raise Empty() if not self._bucket.can_consume(1): raise RateLimitExceeded() return get() def get_nowait(self): """Remove and return an item from the queue without blocking. :raises RateLimitExceeded: If a token could not be consumed from the token bucket (consuming from the queue too fast). :raises Queue.Empty: If an item is not immediately available. """ return self.get(block=False) def qsize(self): """Returns the size of the queue.""" return self.queue.qsize() def empty(self): """Returns :const:`True` if the queue is empty.""" return self.queue.empty() def clear(self): """Delete all data in the queue.""" return self.items.clear() def wait(self, block=False): """Wait until a token can be retrieved from the bucket and return the next item.""" get = self.get expected_time = self.expected_time while 1: remaining = expected_time() if not remaining: return get(block=block) sleep(remaining) def expected_time(self, tokens=1): """Returns the expected time in seconds of when a new token should be available.""" if not self.items: return 0 return self._bucket.expected_time(tokens) @property def items(self): """Underlying data. Do not modify.""" return self.queue.queue
class Batches(Task): abstract = True #: Maximum number of message in buffer. flush_every = 10 #: Timeout in seconds before buffer is flushed anyway. flush_interval = 30 def __init__(self): self._buffer = Queue() self._count = count(1) self._tref = None self._pool = None def run(self, requests): raise NotImplementedError('must implement run(requests)') def Strategy(self, task, app, consumer): self._pool = consumer.pool hostname = consumer.hostname eventer = consumer.event_dispatcher Req = Request connection_errors = consumer.connection_errors timer = consumer.timer put_buffer = self._buffer.put flush_buffer = self._do_flush body_can_be_buffer = consumer.pool.body_can_be_buffer def task_message_handler(message, body, ack, reject, callbacks, **kw): if body is None: body, headers, decoded, utc = ( message.body, message.headers, False, True, ) if not body_can_be_buffer: body = bytes(body) if isinstance(body, buffer_t) else body else: body, headers, decoded, utc = proto1_to_proto2(message, body) request = Req( message, on_ack=ack, on_reject=reject, app=app, hostname=hostname, eventer=eventer, task=task, body=body, headers=headers, decoded=decoded, utc=utc, connection_errors=connection_errors, ) put_buffer(request) if self._tref is None: # first request starts flush timer. self._tref = timer.call_repeatedly( self.flush_interval, flush_buffer, ) if not next(self._count) % self.flush_every: flush_buffer() return task_message_handler def flush(self, requests): return self.apply_buffer(requests, ([SimpleRequest.from_request(r) for r in requests],)) def _do_flush(self): logger.debug('Batches: Wake-up to flush buffer...') requests = None if self._buffer.qsize(): requests = list(consume_queue(self._buffer)) if requests: logger.debug('Batches: Buffer complete: %s', len(requests)) self.flush(requests) if not requests: logger.debug('Batches: Canceling timer: Nothing in buffer.') if self._tref: self._tref.cancel() # cancel timer. self._tref = None def apply_buffer(self, requests, args=(), kwargs={}): acks_late = [], [] [acks_late[r.task.acks_late].append(r) for r in requests] assert requests and (acks_late[True] or acks_late[False]) def on_accepted(pid, time_accepted): [req.acknowledge() for req in acks_late[False]] def on_return(result): [req.acknowledge() for req in acks_late[True]] return self._pool.apply_async( apply_batches_task, (self, args, 0, None), accept_callback=on_accepted, callback=acks_late[True] and on_return or noop, )
class Batches(Task): abstract = True #: Maximum number of message in buffer. flush_every = 10 #: Timeout in seconds before buffer is flushed anyway. flush_interval = 30 def __init__(self): self._buffer = Queue() self._count = count(1) self._tref = None self._pool = None def run(self, requests): raise NotImplementedError('must implement run(requests)') def Strategy(self, task, app, consumer): self._pool = consumer.pool hostname = consumer.hostname eventer = consumer.event_dispatcher Req = Request connection_errors = consumer.connection_errors timer = consumer.timer put_buffer = self._buffer.put flush_buffer = self._do_flush def task_message_handler(message, body, ack, reject, callbacks, **kw): request = Req(body, on_ack=ack, app=app, hostname=hostname, events=eventer, task=task, connection_errors=connection_errors, delivery_info=message.delivery_info) put_buffer(request) if self._tref is None: # first request starts flush timer. self._tref = timer.call_repeatedly( self.flush_interval, flush_buffer, ) if not next(self._count) % self.flush_every: flush_buffer() return task_message_handler def flush(self, requests): return self.apply_buffer(requests, ([SimpleRequest.from_request(r) for r in requests], )) def _do_flush(self): logger.debug('Batches: Wake-up to flush buffer...') requests = None if self._buffer.qsize(): requests = list(consume_queue(self._buffer)) if requests: logger.debug('Batches: Buffer complete: %s', len(requests)) self.flush(requests) if not requests: logger.debug('Batches: Canceling timer: Nothing in buffer.') if self._tref: self._tref.cancel() # cancel timer. self._tref = None def apply_buffer(self, requests, args=(), kwargs={}): acks_late = [], [] [acks_late[r.task.acks_late].append(r) for r in requests] assert requests and (acks_late[True] or acks_late[False]) def on_accepted(pid, time_accepted): [req.acknowledge() for req in acks_late[False]] def on_return(result): [req.acknowledge() for req in acks_late[True]] return self._pool.apply_async( apply_batches_task, (self, args, 0, None), accept_callback=on_accepted, callback=acks_late[True] and on_return or noop, )
def test_poll_result(self): results = Queue() class Message(object): def __init__(self, **merge): self.payload = dict({'status': states.STARTED, 'result': None}, **merge) self.body = pickle.dumps(self.payload) self.content_type = 'application/x-python-serialize' self.content_encoding = 'binary' class MockBinding(object): def __init__(self, *args, **kwargs): self.channel = Mock() def __call__(self, *args, **kwargs): return self def declare(self): pass def get(self, no_ack=False): try: return results.get(block=False) except Empty: pass def is_bound(self): return True class MockBackend(AMQPBackend): Queue = MockBinding backend = MockBackend() backend._republish = Mock() # FFWD's to the latest state. results.put(Message(status=states.RECEIVED, seq=1)) results.put(Message(status=states.STARTED, seq=2)) results.put(Message(status=states.FAILURE, seq=3)) r1 = backend.get_task_meta(uuid()) self.assertDictContainsSubset({'status': states.FAILURE, 'seq': 3}, r1, 'FFWDs to the last state') # Caches last known state. results.put(Message()) tid = uuid() backend.get_task_meta(tid) self.assertIn(tid, backend._cache, 'Caches last known state') self.assertTrue(backend._republish.called) # Returns cache if no new states. results.queue.clear() assert not results.qsize() backend._cache[tid] = 'hello' self.assertEqual(backend.get_task_meta(tid), 'hello', 'Returns cache if no new states')
class Batches(Task): abstract = True #: Maximum number of message in buffer. flush_every = 10 #: Timeout in seconds before buffer is flushed anyway. flush_interval = 30 def __init__(self): self._buffer = Queue() self._count = count(1) self._tref = None self._pool = None def run(self, requests): raise NotImplementedError('must implement run(requests)') def Strategy(self, task, app, consumer): self._pool = consumer.pool hostname = consumer.hostname eventer = consumer.event_dispatcher Req = Request connection_errors = consumer.connection_errors timer = consumer.timer put_buffer = self._buffer.put flush_buffer = self._do_flush body_can_be_buffer = consumer.pool.body_can_be_buffer def task_message_handler(message, body, ack, reject, callbacks, **kw): if body is None: body, headers, decoded, utc = ( message.body, message.headers, False, True, ) if not body_can_be_buffer: body = bytes(body) if isinstance(body, buffer_t) else body else: body, headers, decoded, utc = proto1_to_proto2(message, body) request = Req( message, on_ack=ack, on_reject=reject, app=app, hostname=hostname, eventer=eventer, task=task, body=body, headers=headers, decoded=decoded, utc=utc, connection_errors=connection_errors, ) put_buffer(request) if self._tref is None: # first request starts flush timer. self._tref = timer.call_repeatedly( self.flush_interval, flush_buffer, ) if not next(self._count) % self.flush_every: flush_buffer() return task_message_handler def apply(self, args=None, kwargs=None, *_args, **_kwargs): """ Execute this task locally as a batch of size 1, by blocking until the task returns. Arguments: args (Tuple): positional arguments passed on to the task. Returns: celery.result.EagerResult: pre-evaluated result. """ request = SimpleRequest( id=_kwargs.get("task_id", uuid()), name="batch request", args=args or (), kwargs=kwargs or {}, delivery_info=None, hostname="localhost", ) return super(Batches, self).apply(([request], ), {}, *_args, **_kwargs) def flush(self, requests): return self.apply_buffer( requests, ([SimpleRequest.from_request(r) for r in requests], )) def _do_flush(self): logger.debug('Batches: Wake-up to flush buffer...') requests = None if self._buffer.qsize(): requests = list(consume_queue(self._buffer)) if requests: logger.debug('Batches: Buffer complete: %s', len(requests)) self.flush(requests) if not requests: logger.debug('Batches: Canceling timer: Nothing in buffer.') if self._tref: self._tref.cancel() # cancel timer. self._tref = None def apply_buffer(self, requests, args=(), kwargs={}): acks_late = [], [] [acks_late[r.task.acks_late].append(r) for r in requests] assert requests and (acks_late[True] or acks_late[False]) def on_accepted(pid, time_accepted): [req.acknowledge() for req in acks_late[False]] def on_return(result): [req.acknowledge() for req in acks_late[True]] return self._pool.apply_async( apply_batches_task, (self, args, 0, None), accept_callback=on_accepted, callback=acks_late[True] and on_return or noop, )
class Batches(Task): abstract = True #: Maximum number of message in buffer. flush_every = 10 #: Timeout in seconds before buffer is flushed anyway. flush_interval = 30 def __init__(self): self._buffer = Queue() self._count = count(1) self._tref = None self._pool = None def run(self, requests): raise NotImplementedError("must implement run(requests)") def Strategy(self, task, app, consumer): self._pool = consumer.pool hostname = consumer.hostname eventer = consumer.event_dispatcher Req = Request connection_errors = consumer.connection_errors timer = consumer.timer put_buffer = self._buffer.put flush_buffer = self._do_flush def task_message_handler(message, body, ack): request = Req( body, on_ack=ack, app=app, hostname=hostname, events=eventer, task=task, connection_errors=connection_errors, delivery_info=message.delivery_info, ) put_buffer(request) if self._tref is None: # first request starts flush timer. self._tref = timer.apply_interval(self.flush_interval * 1000.0, flush_buffer) if not next(self._count) % self.flush_every: flush_buffer() return task_message_handler def flush(self, requests): return self.apply_buffer(requests, ([SimpleRequest.from_request(r) for r in requests],)) def _do_flush(self): logger.debug("Batches: Wake-up to flush buffer...") requests = None if self._buffer.qsize(): requests = list(consume_queue(self._buffer)) if requests: logger.debug("Batches: Buffer complete: %s", len(requests)) self.flush(requests) if not requests: logger.debug("Batches: Cancelling timer: Nothing in buffer.") self._tref.cancel() # cancel timer. self._tref = None def apply_buffer(self, requests, args=(), kwargs={}): acks_late = [], [] [acks_late[r.task.acks_late].append(r) for r in requests] assert requests and (acks_late[True] or acks_late[False]) def on_accepted(pid, time_accepted): [req.acknowledge() for req in acks_late[False]] def on_return(result): [req.acknowledge() for req in acks_late[True]] return self._pool.apply_async( apply_batches_task, (self, args, 0, None), accept_callback=on_accepted, callback=acks_late[True] and on_return or None, )
def test_poll_result(self): results = Queue() class Message(object): def __init__(self, **merge): self.payload = dict({ 'status': states.STARTED, 'result': None }, **merge) self.body = pickle.dumps(self.payload) self.content_type = 'application/x-python-serialize' self.content_encoding = 'binary' class MockBinding(object): def __init__(self, *args, **kwargs): self.channel = Mock() def __call__(self, *args, **kwargs): return self def declare(self): pass def get(self, no_ack=False): try: return results.get(block=False) except Empty: pass def is_bound(self): return True class MockBackend(AMQPBackend): Queue = MockBinding backend = MockBackend() backend._republish = Mock() # FFWD's to the latest state. results.put(Message(status=states.RECEIVED, seq=1)) results.put(Message(status=states.STARTED, seq=2)) results.put(Message(status=states.FAILURE, seq=3)) r1 = backend.get_task_meta(uuid()) self.assertDictContainsSubset({ 'status': states.FAILURE, 'seq': 3 }, r1, 'FFWDs to the last state') # Caches last known state. results.put(Message()) tid = uuid() backend.get_task_meta(tid) self.assertIn(tid, backend._cache, 'Caches last known state') self.assertTrue(backend._republish.called) # Returns cache if no new states. results.queue.clear() assert not results.qsize() backend._cache[tid] = 'hello' self.assertEqual(backend.get_task_meta(tid), 'hello', 'Returns cache if no new states')