def test_async_produce_compression_large_message(self): # TODO: make payload size bigger once pypy snappy compression issue is # fixed large_payload = b''.join([uuid4().bytes for i in range(5)]) consumer = self._get_consumer() prod = self._get_producer(compression=CompressionType.SNAPPY, delivery_reports=True) prod.produce(large_payload) report = prod.get_delivery_report() self.assertEqual(report[0].value, large_payload) self.assertIsNone(report[1]) message = consumer.consume() assert message.value == large_payload for i in range(10): prod.produce(large_payload) # use retry logic to loop over delivery reports and ensure we can # produce a group of large messages reports = [] def ensure_all_messages_produced(): report = prod.get_delivery_report() reports.append(report) assert len(reports) == 10 retry(ensure_all_messages_produced, retry_time=30, wait_between_tries=0.5) for report in reports: self.assertEqual(report[0].value, large_payload) self.assertIsNone(report[1]) # cleanup and consumer all messages msgs = [] def ensure_all_messages_consumed(): msg = consumer.consume() if msg: msgs.append(msg) assert len(msgs) == 10 retry(ensure_all_messages_consumed, retry_time=15)
def test_async_produce_large_message(self): consumer = self._get_consumer() large_payload = b''.join([uuid4().bytes for i in range(50000)]) assert len(large_payload) / 1024 / 1024 < 1.0 prod = self._get_producer(delivery_reports=True, linger_ms=1000) prod.produce(large_payload) report = prod.get_delivery_report() self.assertEqual(report[0].value, large_payload) self.assertIsNone(report[1]) message = consumer.consume() assert message.value == large_payload for i in range(10): prod.produce(large_payload) # use retry logic to loop over delivery reports and ensure we can # produce a group of large messages reports = [] def ensure_all_messages_produced(): report = prod.get_delivery_report() reports.append(report) assert len(reports) == 10 retry(ensure_all_messages_produced, retry_time=30, wait_between_tries=0.5) for report in reports: self.assertEqual(report[0].value, large_payload) self.assertIsNone(report[1]) # cleanup and consumer all messages msgs = [] def ensure_all_messages_consumed(): msg = consumer.consume() if msg: msgs.append(msg) assert len(msgs) == 10 retry(ensure_all_messages_consumed, retry_time=15)
def test_async_produce_compression_large_message(self): # TODO: make payload size bigger once pypy snappy compression issue is # fixed large_payload = b''.join([uuid4().bytes for i in range(5)]) consumer = self._get_consumer() prod = self._get_producer( compression=CompressionType.SNAPPY, delivery_reports=True ) prod.produce(large_payload) report = prod.get_delivery_report() self.assertEqual(report[0].value, large_payload) self.assertIsNone(report[1]) message = consumer.consume() assert message.value == large_payload for i in range(10): prod.produce(large_payload) # use retry logic to loop over delivery reports and ensure we can # produce a group of large messages reports = [] def ensure_all_messages_produced(): report = prod.get_delivery_report() reports.append(report) assert len(reports) == 10 retry(ensure_all_messages_produced, retry_time=30, wait_between_tries=0.5) for report in reports: self.assertEqual(report[0].value, large_payload) self.assertIsNone(report[1]) # cleanup and consumer all messages msgs = [] def ensure_all_messages_consumed(): msg = consumer.consume() if msg: msgs.append(msg) assert len(msgs) == 10 retry(ensure_all_messages_consumed, retry_time=15)
def test_async_produce_large_message(self): consumer = self._get_consumer() large_payload = b''.join([uuid4().bytes for i in range(50000)]) assert len(large_payload) / 1024 / 1024 < 1.0 prod = self._get_producer(delivery_reports=True) prod.produce(large_payload) report = prod.get_delivery_report() self.assertEqual(report[0].value, large_payload) self.assertIsNone(report[1]) message = consumer.consume() assert message.value == large_payload for i in range(10): prod.produce(large_payload) # use retry logic to loop over delivery reports and ensure we can # produce a group of large messages reports = [] def ensure_all_messages_produced(): report = prod.get_delivery_report() reports.append(report) assert len(reports) == 10 retry(ensure_all_messages_produced, retry_time=30, wait_between_tries=0.5) for report in reports: self.assertEqual(report[0].value, large_payload) self.assertIsNone(report[1]) # cleanup and consumer all messages msgs = [] def ensure_all_messages_consumed(): msg = consumer.consume() if msg: msgs.append(msg) assert len(msgs) == 10 retry(ensure_all_messages_consumed, retry_time=15)