Пример #1
0
class ConsoleConsumerTest(Test):
    """Sanity checks on console consumer service class."""
    def __init__(self, test_context):
        super(ConsoleConsumerTest, self).__init__(test_context)

        self.topic = "topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(
            test_context,
            num_nodes=1,
            zk=self.zk,
            topics={self.topic: {
                "partitions": 1,
                "replication-factor": 1
            }})
        self.consumer = ConsoleConsumer(test_context,
                                        num_nodes=1,
                                        kafka=self.kafka,
                                        topic=self.topic)

    def setUp(self):
        self.zk.start()
        self.kafka.start()

    def test_lifecycle(self):
        t0 = time.time()
        self.consumer.start()
        node = self.consumer.nodes[0]

        if not wait_until(lambda: self.consumer.alive(node),
                          timeout_sec=10,
                          backoff_sec=.2):
            raise Exception("Consumer was too slow to start")
        self.logger.info("consumer started in %s seconds " %
                         str(time.time() - t0))

        # Verify that log output is happening
        if not wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE),
                          timeout_sec=10):
            raise Exception("Timed out waiting for log file to exist")
        assert line_count(node, ConsoleConsumer.LOG_FILE) > 0

        # Verify no consumed messages
        assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0

        self.consumer.stop_node(node)
        if not wait_until(lambda: not self.consumer.alive(node),
                          timeout_sec=10,
                          backoff_sec=.2):
            raise Exception("Took too long for consumer to die.")
class GetOffsetShellTest(Test):
    """
    Tests GetOffsetShell tool
    """
    def __init__(self, test_context):
        super(GetOffsetShellTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.messages_received_count = 0
        self.topics = {
            TOPIC: {'partitions': NUM_PARTITIONS, 'replication-factor': REPLICATION_FACTOR}
        }

        self.zk = ZookeeperService(test_context, self.num_zk)


    def setUp(self):
        self.zk.start()

    def start_kafka(self, security_protocol, interbroker_security_protocol):
        self.kafka = KafkaService(
            self.test_context, self.num_brokers,
            self.zk, security_protocol=security_protocol,
            interbroker_security_protocol=interbroker_security_protocol, topics=self.topics)
        self.kafka.start()

    def start_producer(self):
        # This will produce to kafka cluster
        self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=TOPIC, throughput=1000, max_messages=MAX_MESSAGES)
        self.producer.start()
        current_acked = self.producer.num_acked
        wait_until(lambda: self.producer.num_acked >= current_acked + MAX_MESSAGES, timeout_sec=10,
                   err_msg="Timeout awaiting messages to be produced and acked")

    def start_consumer(self):
        self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC,
                                        consumer_timeout_ms=1000)
        self.consumer.start()

    @cluster(num_nodes=4)
    def test_get_offset_shell(self, security_protocol='PLAINTEXT'):
        """
        Tests if GetOffsetShell is getting offsets correctly
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_producer()

        # Assert that offset fetched without any consumers consuming is 0
        assert self.kafka.get_offset_shell(TOPIC, None, 1000, 1, -1), "%s:%s:%s" % (TOPIC, NUM_PARTITIONS - 1, 0)

        self.start_consumer()

        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node), timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start")

        # Assert that offset is correctly indicated by GetOffsetShell tool
        wait_until(lambda: "%s:%s:%s" % (TOPIC, NUM_PARTITIONS - 1, MAX_MESSAGES) in self.kafka.get_offset_shell(TOPIC, None, 1000, 1, -1), timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")
Пример #3
0
class Log4jAppenderTest(KafkaTest):
    """
    Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic
    """
    def __init__(self, test_context):
        super(Log4jAppenderTest, self).__init__(test_context, num_zk=1, num_brokers=1, topics={
            TOPIC: {'partitions': 1, 'replication-factor': 1}
        })
        self.num_nodes = 1

        self.appender = KafkaLog4jAppender(self.test_context, self.num_nodes, self.kafka, TOPIC, MAX_MESSAGES)
        self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_nodes, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=1000)

    def test_log4j_appender(self):
        """
        Tests if KafkaLog4jAppender is producing to Kafka topic
        :return: None
        """
        self.appender.start()
        self.appender.wait()

        t0 = time.time()
        self.consumer.start()
        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node),
            timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start")
        self.logger.info("consumer started in %s seconds " % str(time.time() - t0))

        # Verify consumed messages count
        expected_lines_count = MAX_MESSAGES * 2  # two times to account for new lines introduced by log4j
        wait_until(lambda: len(self.consumer.messages_consumed[1]) == expected_lines_count, timeout_sec=10,
                   err_msg="Timed out waiting to consume expected number of messages.")

        self.consumer.stop_node(node)
Пример #4
0
class Log4jAppenderTest(Test):
    """
    Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic
    """
    def __init__(self, test_context):
        super(Log4jAppenderTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.topics = {
            TOPIC: {'partitions': 1, 'replication-factor': 1}
        }

        self.zk = ZookeeperService(test_context, self.num_zk)

    def setUp(self):
        self.zk.start()

    def start_kafka(self, security_protocol, interbroker_security_protocol):
        self.kafka = KafkaService(
            self.test_context, self.num_brokers,
            self.zk, security_protocol=security_protocol,
            interbroker_security_protocol=interbroker_security_protocol, topics=self.topics)
        self.kafka.start()

    def start_appender(self, security_protocol):
        self.appender = KafkaLog4jAppender(self.test_context, self.num_brokers, self.kafka, TOPIC, MAX_MESSAGES,
                                           security_protocol=security_protocol)
        self.appender.start()

    def start_consumer(self, security_protocol):
        enable_new_consumer = security_protocol == SecurityConfig.SSL
        self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC,
                                        consumer_timeout_ms=1000, new_consumer=enable_new_consumer)
        self.consumer.start()

    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    def test_log4j_appender(self, security_protocol='PLAINTEXT'):
        """
        Tests if KafkaLog4jAppender is producing to Kafka topic
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_appender(security_protocol)
        self.appender.wait()

        self.start_consumer(security_protocol)
        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node),
            timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start")

        # Verify consumed messages count
        expected_lines_count = MAX_MESSAGES * 2  # two times to account for new lines introduced by log4j
        wait_until(lambda: len(self.consumer.messages_consumed[1]) == expected_lines_count, timeout_sec=10,
                   err_msg="Timed out waiting to consume expected number of messages.")

        self.consumer.stop()
Пример #5
0
class ConsoleConsumerTest(Test):
    """Sanity checks on console consumer service class."""
    def __init__(self, test_context):
        super(ConsoleConsumerTest, self).__init__(test_context)

        self.topic = "topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)

    def setUp(self):
        self.zk.start()

    @parametrize(security_protocol='SSL', new_consumer=True)
    @matrix(security_protocol=['PLAINTEXT'], new_consumer=[False, True])
    def test_lifecycle(self, security_protocol, new_consumer):
        self.kafka = KafkaService(
            self.test_context,
            num_nodes=1,
            zk=self.zk,
            security_protocol=security_protocol,
            topics={self.topic: {
                "partitions": 1,
                "replication-factor": 1
            }})
        self.kafka.start()

        t0 = time.time()
        self.consumer = ConsoleConsumer(self.test_context,
                                        num_nodes=1,
                                        kafka=self.kafka,
                                        topic=self.topic,
                                        security_protocol=security_protocol,
                                        new_consumer=new_consumer)
        self.consumer.start()
        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node),
                   timeout_sec=10,
                   backoff_sec=.2,
                   err_msg="Consumer was too slow to start")
        self.logger.info("consumer started in %s seconds " %
                         str(time.time() - t0))

        # Verify that log output is happening
        wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE),
                   timeout_sec=10,
                   err_msg="Timed out waiting for logging to start.")
        assert line_count(node, ConsoleConsumer.LOG_FILE) > 0

        # Verify no consumed messages
        assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0

        self.consumer.stop_node(node)
Пример #6
0
 def get_messages_from_output_topic(self):
     consumer = ConsoleConsumer(context=self.test_context,
                                num_nodes=1,
                                kafka=self.kafka,
                                topic=self.output_topic,
                                new_consumer=True,
                                message_validator=is_int,
                                from_beginning=True,
                                consumer_timeout_ms=5000,
                                isolation_level="read_committed")
     consumer.start()
     # ensure that the consumer is up.
     wait_until(lambda: consumer.alive(consumer.nodes[0]) == True,
                timeout_sec=60,
                err_msg="Consumer failed to start for %ds" %\
                60)
     # wait until the consumer closes, which will be 5 seconds after
     # receiving the last message.
     wait_until(lambda: consumer.alive(consumer.nodes[0]) == False,
                timeout_sec=60,
                err_msg="Consumer failed to consume %d messages in %ds" %\
                (self.num_seed_messages, 60))
     return consumer.messages_consumed[1]
Пример #7
0
class ConsumerGroupCommandTest(Test):
    """
    Tests ConsumerGroupCommand
    """
    # Root directory for persistent output
    PERSISTENT_ROOT = "/mnt/consumer_group_command"
    COMMAND_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "command.properties")

    def __init__(self, test_context):
        super(ConsumerGroupCommandTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.topics = {TOPIC: {'partitions': 1, 'replication-factor': 1}}
        self.zk = ZookeeperService(test_context, self.num_zk)

    def setUp(self):
        self.zk.start()

    def start_kafka(self, security_protocol, interbroker_security_protocol):
        self.kafka = KafkaService(
            self.test_context,
            self.num_brokers,
            self.zk,
            security_protocol=security_protocol,
            interbroker_security_protocol=interbroker_security_protocol,
            topics=self.topics)
        self.kafka.start()

    def start_consumer(self, security_protocol):
        enable_new_consumer = security_protocol == SecurityConfig.SSL
        self.consumer = ConsoleConsumer(self.test_context,
                                        num_nodes=self.num_brokers,
                                        kafka=self.kafka,
                                        topic=TOPIC,
                                        consumer_timeout_ms=None,
                                        new_consumer=enable_new_consumer)
        self.consumer.start()

    def setup_and_verify(self, security_protocol, group=None):
        self.start_kafka(security_protocol, security_protocol)
        self.start_consumer(security_protocol)
        consumer_node = self.consumer.nodes[0]
        wait_until(lambda: self.consumer.alive(consumer_node),
                   timeout_sec=10,
                   backoff_sec=.2,
                   err_msg="Consumer was too slow to start")
        kafka_node = self.kafka.nodes[0]
        if security_protocol is not SecurityConfig.PLAINTEXT:
            prop_file = str(self.kafka.security_config.client_config())
            self.logger.debug(prop_file)
            kafka_node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT,
                                   allow_fail=False)
            kafka_node.account.create_file(self.COMMAND_CONFIG_FILE, prop_file)

        # Verify ConsumerGroupCommand lists expected consumer groups
        enable_new_consumer = security_protocol != SecurityConfig.PLAINTEXT
        command_config_file = None
        if enable_new_consumer:
            command_config_file = self.COMMAND_CONFIG_FILE

        if group:
            wait_until(
                lambda:
                ("%s, topic-consumer-group-command, 0," % group) in self.kafka.
                describe_consumer_group(group=group,
                                        node=kafka_node,
                                        new_consumer=enable_new_consumer,
                                        command_config=command_config_file),
                timeout_sec=10,
                err_msg="Timed out waiting to list expected consumer groups.")
        else:
            wait_until(
                lambda: "test-consumer-group" in self.kafka.
                list_consumer_groups(node=kafka_node,
                                     new_consumer=enable_new_consumer,
                                     command_config=command_config_file),
                timeout_sec=10,
                err_msg="Timed out waiting to list expected consumer groups.")

        self.consumer.stop()

    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    def test_list_consumer_groups(self, security_protocol='PLAINTEXT'):
        """
        Tests if ConsumerGroupCommand is listing correct consumer groups
        :return: None
        """
        self.setup_and_verify(security_protocol)

    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    def test_describe_consumer_group(self, security_protocol='PLAINTEXT'):
        """
        Tests if ConsumerGroupCommand is describing a consumer group correctly
        :return: None
        """
        self.setup_and_verify(security_protocol, group="test-consumer-group")
Пример #8
0
class Log4jAppenderTest(Test):
    """
    Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic
    """
    def __init__(self, test_context):
        super(Log4jAppenderTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.topics = {TOPIC: {'partitions': 1, 'replication-factor': 1}}

        self.zk = ZookeeperService(test_context, self.num_zk)

    def setUp(self):
        self.zk.start()

    def start_kafka(self, security_protocol, interbroker_security_protocol):
        self.kafka = KafkaService(
            self.test_context,
            self.num_brokers,
            self.zk,
            security_protocol=security_protocol,
            interbroker_security_protocol=interbroker_security_protocol,
            topics=self.topics)
        self.kafka.start()

    def start_appender(self, security_protocol):
        self.appender = KafkaLog4jAppender(self.test_context,
                                           self.num_brokers,
                                           self.kafka,
                                           TOPIC,
                                           MAX_MESSAGES,
                                           security_protocol=security_protocol)
        self.appender.start()

    def start_consumer(self, security_protocol):
        enable_new_consumer = security_protocol == SecurityConfig.SSL
        self.consumer = ConsoleConsumer(self.test_context,
                                        num_nodes=self.num_brokers,
                                        kafka=self.kafka,
                                        topic=TOPIC,
                                        consumer_timeout_ms=1000,
                                        new_consumer=enable_new_consumer,
                                        security_protocol=security_protocol)
        self.consumer.start()

    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    def test_log4j_appender(self, security_protocol='PLAINTEXT'):
        """
        Tests if KafkaLog4jAppender is producing to Kafka topic
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_appender(security_protocol)
        self.appender.wait()

        self.start_consumer(security_protocol)
        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node),
                   timeout_sec=10,
                   backoff_sec=.2,
                   err_msg="Consumer was too slow to start")

        # Verify consumed messages count
        expected_lines_count = MAX_MESSAGES * 2  # two times to account for new lines introduced by log4j
        wait_until(
            lambda: len(self.consumer.messages_consumed[1]
                        ) == expected_lines_count,
            timeout_sec=10,
            err_msg="Timed out waiting to consume expected number of messages."
        )

        self.consumer.stop()
class Log4jAppenderTest(Test):
    """
    Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic
    """
    def __init__(self, test_context):
        super(Log4jAppenderTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.messages_received_count = 0
        self.topics = {TOPIC: {'partitions': 1, 'replication-factor': 1}}

        self.zk = ZookeeperService(test_context, self.num_zk)

    def setUp(self):
        self.zk.start()

    def start_kafka(self, security_protocol, interbroker_security_protocol):
        self.kafka = KafkaService(
            self.test_context,
            self.num_brokers,
            self.zk,
            security_protocol=security_protocol,
            interbroker_security_protocol=interbroker_security_protocol,
            topics=self.topics)
        self.kafka.start()

    def start_appender(self, security_protocol):
        self.appender = KafkaLog4jAppender(self.test_context,
                                           self.num_brokers,
                                           self.kafka,
                                           TOPIC,
                                           MAX_MESSAGES,
                                           security_protocol=security_protocol)
        self.appender.start()

    def custom_message_validator(self, msg):
        if msg and "INFO : org.apache.kafka.tools.VerifiableLog4jAppender" in msg:
            self.logger.debug("Received message: %s" % msg)
            self.messages_received_count += 1

    def start_consumer(self):
        self.consumer = ConsoleConsumer(
            self.test_context,
            num_nodes=self.num_brokers,
            kafka=self.kafka,
            topic=TOPIC,
            consumer_timeout_ms=10000,
            message_validator=self.custom_message_validator)
        self.consumer.start()

    @cluster(num_nodes=4)
    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    @cluster(num_nodes=5)
    @matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL'])
    def test_log4j_appender(self, security_protocol='PLAINTEXT'):
        """
        Tests if KafkaLog4jAppender is producing to Kafka topic
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_appender(security_protocol)
        self.appender.wait()

        self.start_consumer()
        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node),
                   timeout_sec=20,
                   backoff_sec=.2,
                   err_msg="Consumer was too slow to start")

        # Verify consumed messages count
        wait_until(
            lambda: self.messages_received_count == MAX_MESSAGES,
            timeout_sec=10,
            err_msg="Timed out waiting to consume expected number of messages."
        )

        self.consumer.stop()
class ConsoleConsumerTest(Test):
    """Sanity checks on console consumer service class."""
    def __init__(self, test_context):
        super(ConsoleConsumerTest, self).__init__(test_context)

        self.topic = "topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk,
                                  topics={self.topic: {"partitions": 1, "replication-factor": 1}})
        self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, new_consumer=False)

    def setUp(self):
        self.zk.start()

    @cluster(num_nodes=3)
    @parametrize(security_protocol='PLAINTEXT', new_consumer=False)
    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    @cluster(num_nodes=4)
    @matrix(security_protocol=['SASL_SSL'], sasl_mechanism=['PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512'])
    @matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL'])
    def test_lifecycle(self, security_protocol, new_consumer=True, sasl_mechanism='GSSAPI'):
        """Check that console consumer starts/stops properly, and that we are capturing log output."""

        self.kafka.security_protocol = security_protocol
        self.kafka.client_sasl_mechanism = sasl_mechanism
        self.kafka.interbroker_sasl_mechanism = sasl_mechanism
        self.kafka.start()

        self.consumer.security_protocol = security_protocol
        self.consumer.new_consumer = new_consumer

        t0 = time.time()
        self.consumer.start()
        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node),
            timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start")
        self.logger.info("consumer started in %s seconds " % str(time.time() - t0))

        # Verify that log output is happening
        wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10,
                   err_msg="Timed out waiting for consumer log file to exist.")
        wait_until(lambda: line_count(node, ConsoleConsumer.LOG_FILE) > 0, timeout_sec=1,
                   backoff_sec=.25, err_msg="Timed out waiting for log entries to start.")

        # Verify no consumed messages
        assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0

        self.consumer.stop_node(node)

    @cluster(num_nodes=4)
    def test_version(self):
        """Check that console consumer v0.8.2.X successfully starts and consumes messages."""
        self.kafka.start()

        num_messages = 1000
        self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
                                           max_messages=num_messages, throughput=1000)
        self.producer.start()
        self.producer.wait()

        self.consumer.nodes[0].version = LATEST_0_8_2
        self.consumer.consumer_timeout_ms = 1000
        self.consumer.start()
        self.consumer.wait()

        num_consumed = len(self.consumer.messages_consumed[1])
        num_produced = self.producer.num_acked
        assert num_produced == num_consumed, "num_produced: %d, num_consumed: %d" % (num_produced, num_consumed)
class ConsumerGroupCommandTest(Test):
    """
    Tests ConsumerGroupCommand
    """
    # Root directory for persistent output
    PERSISTENT_ROOT = "/mnt/consumer_group_command"
    COMMAND_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "command.properties")

    def __init__(self, test_context):
        super(ConsumerGroupCommandTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.topics = {
            TOPIC: {'partitions': 1, 'replication-factor': 1}
        }
        self.zk = ZookeeperService(test_context, self.num_zk)

    def setUp(self):
        self.zk.start()

    def start_kafka(self, security_protocol, interbroker_security_protocol):
        self.kafka = KafkaService(
            self.test_context, self.num_brokers,
            self.zk, security_protocol=security_protocol,
            interbroker_security_protocol=interbroker_security_protocol, topics=self.topics)
        self.kafka.start()

    def start_consumer(self, security_protocol):
        enable_new_consumer = security_protocol == SecurityConfig.SSL
        self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC,
                                        consumer_timeout_ms=None, new_consumer=enable_new_consumer)
        self.consumer.start()

    def setup_and_verify(self, security_protocol, group=None):
        self.start_kafka(security_protocol, security_protocol)
        self.start_consumer(security_protocol)
        consumer_node = self.consumer.nodes[0]
        wait_until(lambda: self.consumer.alive(consumer_node),
                   timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start")
        kafka_node = self.kafka.nodes[0]
        if security_protocol is not SecurityConfig.PLAINTEXT:
            prop_file = str(self.kafka.security_config.client_config())
            self.logger.debug(prop_file)
            kafka_node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, allow_fail=False)
            kafka_node.account.create_file(self.COMMAND_CONFIG_FILE, prop_file)

        # Verify ConsumerGroupCommand lists expected consumer groups
        enable_new_consumer = security_protocol != SecurityConfig.PLAINTEXT
        command_config_file = None
        if enable_new_consumer:
            command_config_file = self.COMMAND_CONFIG_FILE

        if group:
            wait_until(lambda: re.search("topic-consumer-group-command",self.kafka.describe_consumer_group(group=group, node=kafka_node, new_consumer=enable_new_consumer, command_config=command_config_file)), timeout_sec=10,
                       err_msg="Timed out waiting to list expected consumer groups.")
        else:
            wait_until(lambda: "test-consumer-group" in self.kafka.list_consumer_groups(node=kafka_node, new_consumer=enable_new_consumer, command_config=command_config_file), timeout_sec=10,
                       err_msg="Timed out waiting to list expected consumer groups.")

        self.consumer.stop()

    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    def test_list_consumer_groups(self, security_protocol='PLAINTEXT'):
        """
        Tests if ConsumerGroupCommand is listing correct consumer groups
        :return: None
        """
        self.setup_and_verify(security_protocol)

    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    def test_describe_consumer_group(self, security_protocol='PLAINTEXT'):
        """
        Tests if ConsumerGroupCommand is describing a consumer group correctly
        :return: None
        """
        self.setup_and_verify(security_protocol, group="test-consumer-group")
class Log4jAppenderTest(Test):
    """
    Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic
    """
    def __init__(self, test_context):
        super(Log4jAppenderTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.messages_received_count = 0
        self.topics = {
            TOPIC: {'partitions': 1, 'replication-factor': 1}
        }

        self.zk = ZookeeperService(test_context, self.num_zk)

    def setUp(self):
        self.zk.start()

    def start_kafka(self, security_protocol, interbroker_security_protocol):
        self.kafka = KafkaService(
            self.test_context, self.num_brokers,
            self.zk, security_protocol=security_protocol,
            interbroker_security_protocol=interbroker_security_protocol, topics=self.topics)
        self.kafka.start()

    def start_appender(self, security_protocol):
        self.appender = KafkaLog4jAppender(self.test_context, self.num_brokers, self.kafka, TOPIC, MAX_MESSAGES,
                                           security_protocol=security_protocol)
        self.appender.start()

    def custom_message_validator(self, msg):
        if msg and "INFO : org.apache.kafka.tools.VerifiableLog4jAppender" in msg:
            self.logger.debug("Received message: %s" % msg)
            self.messages_received_count += 1


    def start_consumer(self, security_protocol):
        enable_new_consumer = security_protocol != SecurityConfig.PLAINTEXT
        self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC,
                                        consumer_timeout_ms=1000, new_consumer=enable_new_consumer,
                                        message_validator=self.custom_message_validator)
        self.consumer.start()

    @matrix(security_protocol=['PLAINTEXT', 'SSL', 'SASL_PLAINTEXT', 'SASL_SSL'])
    def test_log4j_appender(self, security_protocol='PLAINTEXT'):
        """
        Tests if KafkaLog4jAppender is producing to Kafka topic
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_appender(security_protocol)
        self.appender.wait()

        self.start_consumer(security_protocol)
        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node),
            timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start")

        # Verify consumed messages count
        wait_until(lambda: self.messages_received_count == MAX_MESSAGES, timeout_sec=10,
                   err_msg="Timed out waiting to consume expected number of messages.")

        self.consumer.stop()
Пример #13
0
class ConsoleConsumerTest(Test):
    """Sanity checks on console consumer service class."""
    def __init__(self, test_context):
        super(ConsoleConsumerTest, self).__init__(test_context)

        self.topic = "topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, zk_chroot="/kafka",
                                  topics={self.topic: {"partitions": 1, "replication-factor": 1}})
        self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic)

    def setUp(self):
        self.zk.start()

    @cluster(num_nodes=3)
    @matrix(security_protocol=['PLAINTEXT', 'SSL'])
    @cluster(num_nodes=4)
    @matrix(security_protocol=['SASL_SSL'], sasl_mechanism=['PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512'])
    @matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL'])
    def test_lifecycle(self, security_protocol, sasl_mechanism='GSSAPI'):
        """Check that console consumer starts/stops properly, and that we are capturing log output."""

        self.kafka.security_protocol = security_protocol
        self.kafka.client_sasl_mechanism = sasl_mechanism
        self.kafka.interbroker_sasl_mechanism = sasl_mechanism
        self.kafka.start()

        self.consumer.security_protocol = security_protocol

        t0 = time.time()
        self.consumer.start()
        node = self.consumer.nodes[0]

        wait_until(lambda: self.consumer.alive(node),
            timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start")
        self.logger.info("consumer started in %s seconds " % str(time.time() - t0))

        # Verify that log output is happening
        wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10,
                   err_msg="Timed out waiting for consumer log file to exist.")
        wait_until(lambda: line_count(node, ConsoleConsumer.LOG_FILE) > 0, timeout_sec=1,
                   backoff_sec=.25, err_msg="Timed out waiting for log entries to start.")

        # Verify no consumed messages
        assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0

        self.consumer.stop_node(node)

    @cluster(num_nodes=4)
    def test_version(self):
        """Check that console consumer v0.8.2.X successfully starts and consumes messages."""
        self.kafka.start()

        num_messages = 1000
        self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
                                           max_messages=num_messages, throughput=1000)
        self.producer.start()
        self.producer.wait()

        self.consumer.nodes[0].version = LATEST_0_8_2
        self.consumer.new_consumer = False
        self.consumer.consumer_timeout_ms = 1000
        self.consumer.start()
        self.consumer.wait()

        num_consumed = len(self.consumer.messages_consumed[1])
        num_produced = self.producer.num_acked
        assert num_produced == num_consumed, "num_produced: %d, num_consumed: %d" % (num_produced, num_consumed)
class GetOffsetShellTest(Test):
    """
    Tests GetOffsetShell tool
    """
    def __init__(self, test_context):
        super(GetOffsetShellTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.messages_received_count = 0
        self.topics = {
            TOPIC_TEST_NAME: {
                'partitions': NUM_PARTITIONS,
                'replication-factor': REPLICATION_FACTOR
            },
            TOPIC_TEST_PATTERN1: {
                'partitions': 1,
                'replication-factor': REPLICATION_FACTOR
            },
            TOPIC_TEST_PATTERN2: {
                'partitions': 1,
                'replication-factor': REPLICATION_FACTOR
            },
            TOPIC_TEST_PARTITIONS: {
                'partitions': 2,
                'replication-factor': REPLICATION_FACTOR
            },
            TOPIC_TEST_INTERNAL_FILTER: {
                'partitions': 1,
                'replication-factor': REPLICATION_FACTOR
            },
            TOPIC_TEST_TOPIC_PARTITIONS1: {
                'partitions': 2,
                'replication-factor': REPLICATION_FACTOR
            },
            TOPIC_TEST_TOPIC_PARTITIONS2: {
                'partitions': 2,
                'replication-factor': REPLICATION_FACTOR
            }
        }

        self.zk = ZookeeperService(test_context,
                                   self.num_zk) if quorum.for_test(
                                       test_context) == quorum.zk else None

    def setUp(self):
        if self.zk:
            self.zk.start()

    def start_kafka(self, security_protocol, interbroker_security_protocol):
        self.kafka = KafkaService(
            self.test_context,
            self.num_brokers,
            self.zk,
            security_protocol=security_protocol,
            interbroker_security_protocol=interbroker_security_protocol,
            topics=self.topics)
        self.kafka.start()

    def start_producer(self, topic):
        # This will produce to kafka cluster
        self.producer = VerifiableProducer(self.test_context,
                                           num_nodes=1,
                                           kafka=self.kafka,
                                           topic=topic,
                                           throughput=1000,
                                           max_messages=MAX_MESSAGES,
                                           repeating_keys=MAX_MESSAGES)
        self.producer.start()
        current_acked = self.producer.num_acked
        wait_until(
            lambda: self.producer.num_acked >= current_acked + MAX_MESSAGES,
            timeout_sec=10,
            err_msg="Timeout awaiting messages to be produced and acked")

    def start_consumer(self, topic):
        self.consumer = ConsoleConsumer(self.test_context,
                                        num_nodes=self.num_brokers,
                                        kafka=self.kafka,
                                        topic=topic,
                                        consumer_timeout_ms=1000)
        self.consumer.start()

    def check_message_count_sum_equals(self, message_count, **kwargs):
        sum = self.extract_message_count_sum(**kwargs)
        return sum == message_count

    def extract_message_count_sum(self, **kwargs):
        offsets = self.kafka.get_offset_shell(**kwargs).split("\n")
        sum = 0
        for offset in offsets:
            if len(offset) == 0:
                continue
            sum += int(offset.split(":")[-1])
        return sum

    @cluster(num_nodes=3)
    @matrix(metadata_quorum=quorum.all_non_upgrade)
    def test_get_offset_shell_topic_name(self,
                                         security_protocol='PLAINTEXT',
                                         metadata_quorum=quorum.zk):
        """
        Tests if GetOffsetShell handles --topic argument with a simple name correctly
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_producer(TOPIC_TEST_NAME)

        # Assert that offset is correctly indicated by GetOffsetShell tool
        wait_until(lambda: self.check_message_count_sum_equals(
            MAX_MESSAGES, topic=TOPIC_TEST_NAME),
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

    @cluster(num_nodes=4)
    @matrix(metadata_quorum=quorum.all_non_upgrade)
    def test_get_offset_shell_topic_pattern(self,
                                            security_protocol='PLAINTEXT',
                                            metadata_quorum=quorum.zk):
        """
        Tests if GetOffsetShell handles --topic argument with a pattern correctly
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_producer(TOPIC_TEST_PATTERN1)
        self.start_producer(TOPIC_TEST_PATTERN2)

        # Assert that offset is correctly indicated by GetOffsetShell tool
        wait_until(lambda: self.check_message_count_sum_equals(
            2 * MAX_MESSAGES, topic=TOPIC_TEST_PATTERN_PATTERN),
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

    @cluster(num_nodes=3)
    @matrix(metadata_quorum=quorum.all_non_upgrade)
    def test_get_offset_shell_partitions(self,
                                         security_protocol='PLAINTEXT',
                                         metadata_quorum=quorum.zk):
        """
        Tests if GetOffsetShell handles --partitions argument correctly
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_producer(TOPIC_TEST_PARTITIONS)

        def fetch_and_sum_partitions_separately():
            partition_count0 = self.extract_message_count_sum(
                topic=TOPIC_TEST_PARTITIONS, partitions="0")
            partition_count1 = self.extract_message_count_sum(
                topic=TOPIC_TEST_PARTITIONS, partitions="1")
            return partition_count0 + partition_count1 == MAX_MESSAGES

        # Assert that offset is correctly indicated when fetching partitions one by one
        wait_until(fetch_and_sum_partitions_separately,
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

        # Assert that offset is correctly indicated when fetching partitions together
        wait_until(lambda: self.check_message_count_sum_equals(
            MAX_MESSAGES, topic=TOPIC_TEST_PARTITIONS),
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

    @cluster(num_nodes=4)
    @matrix(metadata_quorum=quorum.all_non_upgrade)
    def test_get_offset_shell_topic_partitions(self,
                                               security_protocol='PLAINTEXT',
                                               metadata_quorum=quorum.zk):
        """
        Tests if GetOffsetShell handles --topic-partitions argument correctly
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_producer(TOPIC_TEST_TOPIC_PARTITIONS1)
        self.start_producer(TOPIC_TEST_TOPIC_PARTITIONS2)

        # Assert that a single topic pattern matches all 4 partitions
        wait_until(lambda: self.check_message_count_sum_equals(
            2 * MAX_MESSAGES,
            topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN),
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

        # Assert that a topic pattern with partition range matches all 4 partitions
        wait_until(lambda: self.check_message_count_sum_equals(
            2 * MAX_MESSAGES,
            topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN + ":0-2"),
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

        # Assert that 2 separate topic patterns match all 4 partitions
        wait_until(lambda: self.check_message_count_sum_equals(
            2 * MAX_MESSAGES,
            topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS1 + "," +
            TOPIC_TEST_TOPIC_PARTITIONS2),
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

        # Assert that 4 separate topic-partition patterns match all 4 partitions
        wait_until(lambda: self.check_message_count_sum_equals(
            2 * MAX_MESSAGES,
            topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS1 + ":0," +
            TOPIC_TEST_TOPIC_PARTITIONS1 + ":1," + TOPIC_TEST_TOPIC_PARTITIONS2
            + ":0," + TOPIC_TEST_TOPIC_PARTITIONS2 + ":1"),
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

        # Assert that only partitions #0 are matched with topic pattern and fix partition number
        filtered_partitions = self.kafka.get_offset_shell(
            topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN + ":0")
        assert 1 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 0))
        assert 0 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 1))
        assert 1 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 0))
        assert 0 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 1))

        # Assert that only partitions #1 are matched with topic pattern and partition lower bound
        filtered_partitions = self.kafka.get_offset_shell(
            topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN + ":1-")
        assert 1 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 1))
        assert 0 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 0))
        assert 1 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 1))
        assert 0 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 0))

        # Assert that only partitions #0 are matched with topic pattern and partition upper bound
        filtered_partitions = self.kafka.get_offset_shell(
            topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN + ":-1")
        assert 1 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 0))
        assert 0 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 1))
        assert 1 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 0))
        assert 0 == filtered_partitions.count(
            "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 1))

    @cluster(num_nodes=4)
    @matrix(metadata_quorum=quorum.all_non_upgrade)
    def test_get_offset_shell_internal_filter(self,
                                              security_protocol='PLAINTEXT',
                                              metadata_quorum=quorum.zk):
        """
        Tests if GetOffsetShell handles --exclude-internal-topics flag correctly
        :return: None
        """
        self.start_kafka(security_protocol, security_protocol)
        self.start_producer(TOPIC_TEST_INTERNAL_FILTER)

        # Create consumer and poll messages to create consumer offset record
        self.start_consumer(TOPIC_TEST_INTERNAL_FILTER)
        node = self.consumer.nodes[0]
        wait_until(lambda: self.consumer.alive(node),
                   timeout_sec=20,
                   backoff_sec=.2,
                   err_msg="Consumer was too slow to start")

        # Assert that a single topic pattern matches all 4 partitions
        wait_until(lambda: self.check_message_count_sum_equals(
            MAX_MESSAGES, topic_partitions=TOPIC_TEST_INTERNAL_FILTER),
                   timeout_sec=10,
                   err_msg="Timed out waiting to reach expected offset.")

        # No filters
        # Assert that without exclusion, we can find both the test topic and the __consumer_offsets internal topic
        offset_output = self.kafka.get_offset_shell()
        assert "__consumer_offsets" in offset_output
        assert TOPIC_TEST_INTERNAL_FILTER in offset_output

        # Assert that with exclusion, we can find the test topic but not the __consumer_offsets internal topic
        offset_output = self.kafka.get_offset_shell(
            exclude_internal_topics=True)
        assert "__consumer_offsets" not in offset_output
        assert TOPIC_TEST_INTERNAL_FILTER in offset_output

        # Topic filter
        # Assert that without exclusion, we can find both the test topic and the __consumer_offsets internal topic
        offset_output = self.kafka.get_offset_shell(topic=".*consumer_offsets")
        assert "__consumer_offsets" in offset_output
        assert TOPIC_TEST_INTERNAL_FILTER in offset_output

        # Assert that with exclusion, we can find the test topic but not the __consumer_offsets internal topic
        offset_output = self.kafka.get_offset_shell(
            topic=".*consumer_offsets", exclude_internal_topics=True)
        assert "__consumer_offsets" not in offset_output
        assert TOPIC_TEST_INTERNAL_FILTER in offset_output

        # Topic-partition filter
        # Assert that without exclusion, we can find both the test topic and the __consumer_offsets internal topic
        offset_output = self.kafka.get_offset_shell(
            topic_partitions=".*consumer_offsets:0")
        assert "__consumer_offsets" in offset_output
        assert TOPIC_TEST_INTERNAL_FILTER in offset_output

        # Assert that with exclusion, we can find the test topic but not the __consumer_offsets internal topic
        offset_output = self.kafka.get_offset_shell(
            topic_partitions=".*consumer_offsets:0",
            exclude_internal_topics=True)
        assert "__consumer_offsets" not in offset_output
        assert TOPIC_TEST_INTERNAL_FILTER in offset_output