def test_qsize(): queue = TopicQueue("test_qsize") # Empty queue assert queue.qsize() == (0, 0) qn = 0 qs = 0 # Put for i in range(10): msg = "x" * (i + 1) qs += len(msg) qn += 1 queue.put(msg) assert queue.qsize() == (qn, qs) # Pull while qn > 0: msg = list(queue.iter_get())[0] qn -= 1 qs -= len(msg) assert queue.qsize() == (qn, qs) # Empty queue assert queue.qsize() == (0, 0)
def test_wait(): async def producer(): for msg in to_produce: queue.put(msg) await asyncio.sleep(sleep_timeout) queue.shutdown() async def consumer(): while not queue.to_shutdown or queue.qsize()[0]: await queue.wait(1) consumed["data"] += list(queue.iter_get(100)) async def test(): await asyncio.gather(producer(), consumer()) sleep_timeout = 0.1 to_produce = ["%04d" % i for i in range(10)] consumed = {"data": []} queue = TopicQueue("test_wait") with IOLoopContext() as loop: loop.run_until_complete(test()) assert to_produce == consumed["data"]
def test_wait(): @tornado.gen.coroutine def producer(): for msg in to_produce: queue.put(msg) yield tornado.gen.sleep(sleep_timeout) queue.shutdown() @tornado.gen.coroutine def consumer(): while not queue.to_shutdown or queue.qsize()[0]: yield queue.wait(1) consumed["data"] += list(queue.iter_get(100)) io_loop.stop() sleep_timeout = 0.1 to_produce = ["%04d" % i for i in range(10)] consumed = {"data": []} queue = TopicQueue("test_wait") io_loop = tornado.ioloop.IOLoop() io_loop.add_callback(producer) io_loop.add_callback(consumer) io_loop.start() assert to_produce == consumed["data"]
async def nsq_publisher(self, queue: TopicQueue): """ Publisher for NSQ topic :return: """ topic = queue.topic self.logger.info("[nsq|%s] Starting NSQ publisher", topic) while not queue.to_shutdown or not queue.is_empty(): # Message throttling. Wait and allow to collect more messages await queue.wait(timeout=10, rate=config.nsqd.topic_mpub_rate) # Get next batch up to `mpub_messages` messages or up to `mpub_size` size messages = list( queue.iter_get( n=config.nsqd.mpub_messages, size=config.nsqd.mpub_size, total_overhead=4, message_overhead=4, ) ) if not messages: continue try: self.logger.debug("[nsq|%s] Publishing %d messages", topic, len(messages)) await mpub(topic, messages, dcs=self.dcs) except NSQPubError: if queue.to_shutdown: self.logger.debug( "[nsq|%s] Failed to publish during shutdown. Dropping messages", topic ) else: # Return to queue self.logger.info( "[nsq|%s] Failed to publish. %d messages returned to queue", topic, len(messages), ) queue.return_messages(messages) del messages # Release memory self.logger.info("[nsq|%s] Stopping NSQ publisher", topic) # Queue is shut down and empty, notify queue.notify_shutdown()
def test_get_limit(input, get_kwargs, expected): queue = TopicQueue("test_get_limit") for msg in input: queue.put(msg) result = list(queue.iter_get(**get_kwargs)) assert result == expected
def test_put_order(put_kwargs, expected): queue = TopicQueue("test_put_order") for kw in put_kwargs: queue.put(**kw) result = list(queue.iter_get(len(put_kwargs))) assert result == expected
def test_metrics(): queue = TopicQueue("test_metrics") k = ("topic", queue.topic) # Initial metrics are zeroed metrics = {"other": 1} queue.apply_metrics(metrics) assert metrics.get("other") == 1 # Untouched assert metrics.get(("nsq_msg_put", k)) == 0 assert metrics.get(("nsq_msg_put_size", k)) == 0 assert metrics.get(("nsq_msg_get", k)) == 0 assert metrics.get(("nsq_msg_get_size", k)) == 0 assert metrics.get(("nsq_msg_requeued", k)) == 0 assert metrics.get(("nsq_msg_requeued_size", k)) == 0 # Put 100 messages of 10 octets each for i in range(100): msg = "%10d" % i queue.put(msg) queue.apply_metrics(metrics) assert metrics.get("other") == 1 # Untouched assert metrics.get(("nsq_msg_put", k)) == 100 assert metrics.get(("nsq_msg_put_size", k)) == 1000 assert metrics.get(("nsq_msg_get", k)) == 0 assert metrics.get(("nsq_msg_get_size", k)) == 0 assert metrics.get(("nsq_msg_requeued", k)) == 0 assert metrics.get(("nsq_msg_requeued_size", k)) == 0 # Get 50 messages of 10 octets each msgs = list(queue.iter_get(50)) queue.apply_metrics(metrics) assert metrics.get("other") == 1 # Untouched assert metrics.get(("nsq_msg_put", k)) == 100 assert metrics.get(("nsq_msg_put_size", k)) == 1000 assert metrics.get(("nsq_msg_get", k)) == 50 assert metrics.get(("nsq_msg_get_size", k)) == 500 assert metrics.get(("nsq_msg_requeued", k)) == 0 assert metrics.get(("nsq_msg_requeued_size", k)) == 0 # Return 10 messages back to queue queue.return_messages(msgs[:10]) queue.apply_metrics(metrics) assert metrics.get("other") == 1 # Untouched assert metrics.get(("nsq_msg_put", k)) == 100 assert metrics.get(("nsq_msg_put_size", k)) == 1000 assert metrics.get(("nsq_msg_get", k)) == 50 assert metrics.get(("nsq_msg_get_size", k)) == 500 assert metrics.get(("nsq_msg_requeued", k)) == 10 assert metrics.get(("nsq_msg_requeued_size", k)) == 100 # Get 60 messages (50 left + 10 returned) list(queue.iter_get(60)) queue.apply_metrics(metrics) assert metrics.get("other") == 1 # Untouched assert metrics.get(("nsq_msg_put", k)) == 100 assert metrics.get(("nsq_msg_put_size", k)) == 1000 assert metrics.get(("nsq_msg_get", k)) == 110 assert metrics.get(("nsq_msg_get_size", k)) == 1100 assert metrics.get(("nsq_msg_requeued", k)) == 10 assert metrics.get(("nsq_msg_requeued_size", k)) == 100
def test_is_empty(items, expected): queue = TopicQueue("test_is_empty") for item in items: queue.put(item) assert queue.is_empty() is expected
def test_shutdown(): queue = TopicQueue("test_shutdown") # Fill queue with 10 items to_produce = ["%04d" % i for i in range(10)] for msg in to_produce: queue.put(msg) # Consume 5 of them consumed = [] consumed += list(queue.iter_get(5)) # Shutdown the queue queue.shutdown() # Try to shutdown queue twice, raise Runtime error with pytest.raises(RuntimeError): queue.shutdown() # Try to put an item, raise Runtime error with pytest.raises(RuntimeError): queue.put("9999") # Try to return a message, raise Runtime error with pytest.raises(RuntimeError): queue.return_messages(["9999"]) # Consume all other items consumed += list(queue.iter_get(10)) # Check all items are consumed assert to_produce == consumed