def test_consumer_with_connection_issues(self, local_file, s3_mocked, broker_stage_messages, event_loop): total_messages = 4 topic = 'platform.upload.validation' with FakeMQ(connection_failing_attempt_countdown=1, disconnect_in_operation=2) as mq: client = ReconnectingClient(mq, "consumer") consumer = client.get_callback(app.handle_validation) for _ in range(total_messages): message = self._create_message_s3(local_file, broker_stage_messages, avoid_produce_queue=True, topic=topic) mq.send_and_wait(topic, json.dumps(message).encode('utf-8'), True) assert mq.produce_calls_count == total_messages assert mq.count_topic_messages(topic) == total_messages assert len(app.produce_queue) == 0 assert mq.consume_calls_count == 0 event_loop.run_until_complete(self.coroutine_test(consumer)) assert mq.consume_calls_count > 0 assert mq.consume_return_messages_count == 1 assert mq.count_topic_messages(topic) == 0 assert mq.disconnect_in_operation_called is True assert mq.trying_to_connect_failures_calls == 1 assert len(app.produce_queue) == 4
async def test_callback(event_loop): mq = FakeMQ() cl = ReconnectingClient(mq, "callback") coro = cl.get_callback(wrkr, done) task = event_loop.create_task(coro()) await task assert task.result() is None
def test_consumer_with_validation_unknown(self, local_file, s3_mocked, broker_stage_messages, event_loop): total_messages = 4 topic = 'platform.upload.validation' s3_storage.s3.create_bucket(Bucket=s3_storage.REJECT) produced_messages = [] with FakeMQ() as mq: client = ReconnectingClient(mq, "consumer") consumer = client.get_callback(app.handle_validation) for _ in range(total_messages): message = self._create_message_s3( local_file, broker_stage_messages, avoid_produce_queue=True, topic=topic, validation='unknown' ) mq.send_and_wait(topic, json.dumps(message).encode('utf-8'), True) produced_messages.append(message) assert mq.produce_calls_count == total_messages assert mq.count_topic_messages(topic) == total_messages assert len(app.produce_queue) == 0 assert mq.consume_calls_count == 0 event_loop.run_until_complete(self.coroutine_test(consumer)) assert mq.consume_calls_count > 0 assert mq.consume_return_messages_count == 1 assert mq.count_topic_messages(topic) == 0 assert mq.disconnect_in_operation_called is False assert mq.trying_to_connect_failures_calls == 0 assert len(app.produce_queue) == 0
async def test_callback_retries(event_loop): mq = FakeMQFails() cl = ReconnectingClient(mq, "callback_retries", retry_interval=0.1) coro = cl.get_callback(wrkr_fails, done) task = event_loop.create_task(coro()) await task assert task.result() is None
async def test_with_finalizer(event_loop): mq = FakeMQ() cl = ReconnectingClient(mq, "with_finalizer") finalizer_called = False def finalizer(): nonlocal finalizer_called finalizer_called = True task = event_loop.create_task(cl.run(wrkr, done, finalizer)) await task assert task.result() is None assert finalizer_called is True
def test_producer_with_s3_bucket(self, local_file, s3_mocked, broker_stage_messages, event_loop): total_messages = 4 [self._create_message_s3(local_file, broker_stage_messages) for _ in range(total_messages)] with FakeMQ() as mq: client = ReconnectingClient(mq, "producer") producer = client.get_callback(app.make_preprocessor()) assert mq.produce_calls_count == 0 assert len(app.produce_queue) == total_messages event_loop.run_until_complete(self.coroutine_test(producer)) assert mq.produce_calls_count == total_messages assert len(app.produce_queue) == 0 assert mq.disconnect_in_operation_called is False assert mq.trying_to_connect_failures_calls == 0
def test_consumer_with_s3_bucket(self, local_file, s3_mocked, broker_stage_messages, event_loop): total_messages = 4 topic = 'platform.upload.validation' produced_messages = [] with FakeMQ() as mq: client = ReconnectingClient(mq, "consumer") consumer = client.get_callback(app.handle_validation) for _ in range(total_messages): message = self._create_message_s3(local_file, broker_stage_messages, avoid_produce_queue=True, topic=topic) mq.send_and_wait(topic, json.dumps(message).encode('utf-8'), True) produced_messages.append(message) for m in produced_messages: assert s3_storage.ls( s3_storage.QUARANTINE, m['payload_id'] )['ResponseMetadata']['HTTPStatusCode'] == 200 assert mq.produce_calls_count == total_messages assert mq.count_topic_messages(topic) == total_messages assert len(app.produce_queue) == 0 assert mq.consume_calls_count == 0 event_loop.run_until_complete(self.coroutine_test(consumer)) for m in produced_messages: assert s3_storage.ls( s3_storage.QUARANTINE, m['payload_id'] )['ResponseMetadata']['HTTPStatusCode'] == 404 assert s3_storage.ls( s3_storage.PERM, m['payload_id'] )['ResponseMetadata']['HTTPStatusCode'] == 200 assert mq.consume_calls_count > 0 assert mq.consume_return_messages_count == 1 assert mq.count_topic_messages(topic) == 0 assert mq.disconnect_in_operation_called is False assert mq.trying_to_connect_failures_calls == 0 assert len(app.produce_queue) == 4
logger.addHandler(cw_handler) thread_pool_executor = ThreadPoolExecutor( max_workers=configuration.MAX_WORKERS) loop = asyncio.get_event_loop() kafka_consumer = AIOKafkaConsumer(configuration.PUP_QUEUE, loop=loop, bootstrap_servers=configuration.MQ, group_id=configuration.MQ_GROUP_ID) kafka_producer = AIOKafkaProducer(loop=loop, bootstrap_servers=configuration.MQ, request_timeout_ms=10000, connections_max_idle_ms=None) CONSUMER = ReconnectingClient(kafka_consumer, "consumer") PRODUCER = ReconnectingClient(kafka_producer, "producer") # local queue for pushing items into kafka, this queue fills up if kafka goes down produce_queue = collections.deque([], 999) async def consume(client): data = await client.getmany() for tp, msgs in data.items(): if tp.topic == configuration.PUP_QUEUE: logger.info("received messages: %s", msgs) loop.create_task(handle_file(msgs)) await asyncio.sleep(0.1)
loop = asyncio.get_event_loop() kafka_consumer = AIOKafkaConsumer(configuration.PUP_QUEUE, loop=loop, bootstrap_servers=configuration.MQ, group_id=configuration.MQ_GROUP_ID) kafka_producer = AIOKafkaProducer(loop=loop, bootstrap_servers=configuration.MQ, request_timeout_ms=10000, connections_max_idle_ms=None) system_profile_producer = AIOKafkaProducer(loop=loop, bootstrap_servers=configuration.MQ, request_timeout_ms=10000, connections_max_idle_ms=None) CONSUMER = ReconnectingClient(kafka_consumer, "consumer") PRODUCER = ReconnectingClient(kafka_producer, "producer") SYSTEM_PROFILE_PRODUCER = ReconnectingClient(system_profile_producer, "system-profile-producer") # local queue for pushing items into kafka, this queue fills up if kafka goes down produce_queue = collections.deque() mnm.produce_queue_size.set_function(lambda: len(produce_queue)) system_profile_queue = collections.deque() mnm.system_profile_queue_size.set_function(lambda: len(system_profile_queue)) current_archives = [] mnm.current_archives_size.set_function(lambda: len(current_archives)) def get_extra(account="unknown", request_id="unknown"): """Add extra indexable fields for logging.
async def test_run(event_loop): mq = FakeMQ() cl = ReconnectingClient(mq, "callback") task = event_loop.create_task(cl.run(wrkr, done)) await task assert task.result() is None
async def test_work_fails(): mq = FakeMQFails() cl = ReconnectingClient(mq, "works_fails", retry_interval=0.1) await cl.work(wrkr_fails) await cl.start() assert cl.connected is True
async def test_work(): mq = FakeMQ() cl = ReconnectingClient(mq, "works") await cl.work(wrkr)
async def test_reconnects(): mq = FakeMQFails() cl = ReconnectingClient(mq, "reconnects", retry_interval=0.1) await cl.start() assert cl.connected is True
async def test_connects(): mq = FakeMQ() cl = ReconnectingClient(mq, "connects") await cl.start() assert cl.connected is True