Ejemplo n.º 1
0
 def test_stopped_exception(self):
     """Check Consumer_consume raises exception if handle was stopped"""
     consumer = _rd_kafka.Consumer(brokers=get_bytes(self.kafka.brokers),
                                   topic_name=self.topic_name,
                                   partition_ids=self.partition_ids,
                                   start_offsets=self.start_offsets)
     consumer.stop()
     with self.assertRaises(RdKafkaStoppedException):
         consumer.consume(1)
Ejemplo n.º 2
0
 def test_start_fail(self):
     """See if Consumer_start cleans up upon failure"""
     with self.assert_thread_cnt_non_increasing():
         consumer = _rd_kafka.Consumer()
         with self.assertRaises(RdKafkaException):
             consumer.start(
                 brokers=b"",  # this causes the exception
                 topic_name=self.topic_name,
                 partition_ids=self.partition_ids,
                 start_offsets=self.start_offsets)
Ejemplo n.º 3
0
    def test_stop(self):
        """Check Consumer_stop really shuts down the librdkafka consumer

        This is to deal with the fact that librdkafka's _destroy functions are
        all async, and therefore we don't get direct feedback if we didn't
        clean up in the correct order, yet the underlying consumer may remain
        up even if the python object is long gone.  Getting a zero thread
        count in the test gives some reassurance that we didn't leave any
        loose ends.
        """
        with self.assert_thread_cnt_non_increasing():
            consumer = _rd_kafka.Consumer()
            consumer.configure(conf=[])
            consumer.configure(topic_conf=[])
            consumer.start(brokers=get_bytes(self.kafka.brokers),
                           topic_name=self.topic_name,
                           partition_ids=self.partition_ids,
                           start_offsets=self.start_offsets)
            consumer.consume(100)  # just to reliably get some threads going
            consumer.stop()