class ConsoleConsumerTest(Test): """Sanity checks on console consumer service class.""" def __init__(self, test_context): super(ConsoleConsumerTest, self).__init__(test_context) self.topic = "topic" self.zk = ZookeeperService(test_context, num_nodes=1) self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk, topics={self.topic: {"partitions": 1, "replication-factor": 1}}) self.consumer = ConsoleConsumer(test_context, num_nodes=1, kafka=self.kafka, topic=self.topic) def setUp(self): self.zk.start() self.kafka.start() def test_lifecycle(self): t0 = time.time() self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify that log output is happening wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10, err_msg="Timed out waiting for logging to start.") assert line_count(node, ConsoleConsumer.LOG_FILE) > 0 # Verify no consumed messages assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0 self.consumer.stop_node(node)
class GetOffsetShellTest(Test): """ Tests GetOffsetShell tool """ def __init__(self, test_context): super(GetOffsetShellTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.messages_received_count = 0 self.topics = { TOPIC: {'partitions': NUM_PARTITIONS, 'replication-factor': REPLICATION_FACTOR} } self.zk = ZookeeperService(test_context, self.num_zk) def setUp(self): self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() def start_producer(self): # This will produce to kafka cluster self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=TOPIC, throughput=1000, max_messages=MAX_MESSAGES) self.producer.start() current_acked = self.producer.num_acked wait_until(lambda: self.producer.num_acked >= current_acked + MAX_MESSAGES, timeout_sec=10, err_msg="Timeout awaiting messages to be produced and acked") def start_consumer(self): self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=1000) self.consumer.start() @cluster(num_nodes=4) def test_get_offset_shell(self, security_protocol='PLAINTEXT'): """ Tests if GetOffsetShell is getting offsets correctly :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_producer() # Assert that offset fetched without any consumers consuming is 0 assert self.kafka.get_offset_shell(TOPIC, None, 1000, 1, -1), "%s:%s:%s" % (TOPIC, NUM_PARTITIONS - 1, 0) self.start_consumer() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start") # Assert that offset is correctly indicated by GetOffsetShell tool wait_until(lambda: "%s:%s:%s" % (TOPIC, NUM_PARTITIONS - 1, MAX_MESSAGES) in self.kafka.get_offset_shell(TOPIC, None, 1000, 1, -1), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.")
class Log4jAppenderTest(KafkaTest): """ Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic """ def __init__(self, test_context): super(Log4jAppenderTest, self).__init__(test_context, num_zk=1, num_brokers=1, topics={ TOPIC: {'partitions': 1, 'replication-factor': 1} }) self.num_nodes = 1 self.appender = KafkaLog4jAppender(self.test_context, self.num_nodes, self.kafka, TOPIC, MAX_MESSAGES) self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_nodes, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=1000) def test_log4j_appender(self): """ Tests if KafkaLog4jAppender is producing to Kafka topic :return: None """ self.appender.start() self.appender.wait() t0 = time.time() self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify consumed messages count expected_lines_count = MAX_MESSAGES * 2 # two times to account for new lines introduced by log4j wait_until(lambda: len(self.consumer.messages_consumed[1]) == expected_lines_count, timeout_sec=10, err_msg="Timed out waiting to consume expected number of messages.") self.consumer.stop_node(node)
class Log4jAppenderTest(Test): """ Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic """ def __init__(self, test_context): super(Log4jAppenderTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.topics = { TOPIC: {'partitions': 1, 'replication-factor': 1} } self.zk = ZookeeperService(test_context, self.num_zk) def setUp(self): self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() def start_appender(self, security_protocol): self.appender = KafkaLog4jAppender(self.test_context, self.num_brokers, self.kafka, TOPIC, MAX_MESSAGES, security_protocol=security_protocol) self.appender.start() def start_consumer(self, security_protocol): enable_new_consumer = security_protocol == SecurityConfig.SSL self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=1000, new_consumer=enable_new_consumer) self.consumer.start() @matrix(security_protocol=['PLAINTEXT', 'SSL']) def test_log4j_appender(self, security_protocol='PLAINTEXT'): """ Tests if KafkaLog4jAppender is producing to Kafka topic :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_appender(security_protocol) self.appender.wait() self.start_consumer(security_protocol) node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") # Verify consumed messages count expected_lines_count = MAX_MESSAGES * 2 # two times to account for new lines introduced by log4j wait_until(lambda: len(self.consumer.messages_consumed[1]) == expected_lines_count, timeout_sec=10, err_msg="Timed out waiting to consume expected number of messages.") self.consumer.stop()
def start_consumer(self, topic_to_read, group_id): consumer = ConsoleConsumer(context=self.test_context, num_nodes=1, kafka=self.kafka, topic=topic_to_read, group_id=group_id, message_validator=is_int, from_beginning=True, isolation_level="read_committed") consumer.start() # ensure that the consumer is up. wait_until(lambda: (len(consumer.messages_consumed[1]) > 0) == True, timeout_sec=60, err_msg="Consumer failed to consume any messages for %ds" %\ 60) return consumer
def get_messages_from_output_topic(self): consumer = ConsoleConsumer(context=self.test_context, num_nodes=1, kafka=self.kafka, topic=self.output_topic, new_consumer=True, message_validator=is_int, from_beginning=True, consumer_timeout_ms=5000, isolation_level="read_committed") consumer.start() # ensure that the consumer is up. wait_until(lambda: consumer.alive(consumer.nodes[0]) == True, timeout_sec=60, err_msg="Consumer failed to start for %ds" %\ 60) # wait until the consumer closes, which will be 5 seconds after # receiving the last message. wait_until(lambda: consumer.alive(consumer.nodes[0]) == False, timeout_sec=60, err_msg="Consumer failed to consume %d messages in %ds" %\ (self.num_seed_messages, 60)) return consumer.messages_consumed[1]
class ConsoleConsumerTest(Test): """Sanity checks on console consumer service class.""" def __init__(self, test_context): super(ConsoleConsumerTest, self).__init__(test_context) self.topic = "topic" self.zk = ZookeeperService(test_context, num_nodes=1) self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, topics={self.topic: {"partitions": 1, "replication-factor": 1}}) self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, new_consumer=False) def setUp(self): self.zk.start() @cluster(num_nodes=3) @parametrize(security_protocol='PLAINTEXT', new_consumer=False) @matrix(security_protocol=['PLAINTEXT', 'SSL']) @cluster(num_nodes=4) @matrix(security_protocol=['SASL_SSL'], sasl_mechanism=['PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512']) @matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL']) def test_lifecycle(self, security_protocol, new_consumer=True, sasl_mechanism='GSSAPI'): """Check that console consumer starts/stops properly, and that we are capturing log output.""" self.kafka.security_protocol = security_protocol self.kafka.client_sasl_mechanism = sasl_mechanism self.kafka.interbroker_sasl_mechanism = sasl_mechanism self.kafka.start() self.consumer.security_protocol = security_protocol self.consumer.new_consumer = new_consumer t0 = time.time() self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify that log output is happening wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10, err_msg="Timed out waiting for consumer log file to exist.") wait_until(lambda: line_count(node, ConsoleConsumer.LOG_FILE) > 0, timeout_sec=1, backoff_sec=.25, err_msg="Timed out waiting for log entries to start.") # Verify no consumed messages assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0 self.consumer.stop_node(node) @cluster(num_nodes=4) def test_version(self): """Check that console consumer v0.8.2.X successfully starts and consumes messages.""" self.kafka.start() num_messages = 1000 self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, max_messages=num_messages, throughput=1000) self.producer.start() self.producer.wait() self.consumer.nodes[0].version = LATEST_0_8_2 self.consumer.consumer_timeout_ms = 1000 self.consumer.start() self.consumer.wait() num_consumed = len(self.consumer.messages_consumed[1]) num_produced = self.producer.num_acked assert num_produced == num_consumed, "num_produced: %d, num_consumed: %d" % (num_produced, num_consumed)
class TestMirrorMakerService(Test): """Sanity checks on mirror maker service class.""" def __init__(self, test_context): super(TestMirrorMakerService, self).__init__(test_context) self.topic = "topic" self.source_zk = ZookeeperService(test_context, num_nodes=1) self.target_zk = ZookeeperService(test_context, num_nodes=1) self.source_kafka = KafkaService( test_context, num_nodes=1, zk=self.source_zk, topics={self.topic: { "partitions": 1, "replication-factor": 1 }}) self.target_kafka = KafkaService( test_context, num_nodes=1, zk=self.target_zk, topics={self.topic: { "partitions": 1, "replication-factor": 1 }}) self.num_messages = 1000 # This will produce to source kafka cluster self.producer = VerifiableProducer(test_context, num_nodes=1, kafka=self.source_kafka, topic=self.topic, max_messages=self.num_messages, throughput=1000) # Use a regex whitelist to check that the start command is well-formed in this case self.mirror_maker = MirrorMaker(test_context, num_nodes=1, source=self.source_kafka, target=self.target_kafka, whitelist=".*", consumer_timeout_ms=2000) # This will consume from target kafka cluster self.consumer = ConsoleConsumer(test_context, num_nodes=1, kafka=self.target_kafka, topic=self.topic, consumer_timeout_ms=1000) def setUp(self): # Source cluster self.source_zk.start() self.source_kafka.start() # Target cluster self.target_zk.start() self.target_kafka.start() def test_end_to_end(self): """ Test end-to-end behavior under non-failure conditions. Setup: two single node Kafka clusters, each connected to its own single node zookeeper cluster. One is source, and the other is target. Single-node mirror maker mirrors from source to target. - Start mirror maker. - Produce a small number of messages to the source cluster. - Consume messages from target. - Verify that number of consumed messages matches the number produced. """ self.mirror_maker.start() # Check that consumer_timeout_ms setting made it to config file self.mirror_maker.nodes[0].account.ssh( "grep \"consumer\.timeout\.ms\" %s" % MirrorMaker.CONSUMER_CONFIG, allow_fail=False) self.producer.start() self.producer.wait(10) self.consumer.start() self.consumer.wait(10) num_consumed = len(self.consumer.messages_consumed[1]) num_produced = self.producer.num_acked assert num_produced == self.num_messages, "num_produced: %d, num_messages: %d" % ( num_produced, self.num_messages) assert num_produced == num_consumed, "num_produced: %d, num_consumed: %d" % ( num_produced, num_consumed) self.mirror_maker.stop()
class DelegationTokenTest(Test): def __init__(self, test_context): super(DelegationTokenTest, self).__init__(test_context) self.test_context = test_context self.topic = "topic" self.zk = ZookeeperService(test_context, num_nodes=1) self.kafka = KafkaService( self.test_context, num_nodes=1, zk=self.zk, zk_chroot="/kafka", topics={self.topic: { "partitions": 1, "replication-factor": 1 }}, server_prop_overides=[[ config_property.DELEGATION_TOKEN_MAX_LIFETIME_MS, "604800000" ], [config_property.DELEGATION_TOKEN_EXPIRY_TIME_MS, "86400000" ], [config_property.DELEGATION_TOKEN_SECRET_KEY, "test12345"], [ config_property.SASL_ENABLED_MECHANISMS, "GSSAPI,SCRAM-SHA-256" ]]) self.jaas_deleg_conf_path = "/tmp/jaas_deleg.conf" self.jaas_deleg_conf = "" self.client_properties_content = """ security.protocol=SASL_PLAINTEXT sasl.mechanism=SCRAM-SHA-256 sasl.kerberos.service.name=kafka client.id=console-consumer """ self.client_kafka_opts = ' -Djava.security.auth.login.config=' + self.jaas_deleg_conf_path self.producer = VerifiableProducer( self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, max_messages=1, throughput=1, kafka_opts_override=self.client_kafka_opts, client_prop_file_override=self.client_properties_content) self.consumer = ConsoleConsumer( self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, kafka_opts_override=self.client_kafka_opts, client_prop_file_override=self.client_properties_content) self.kafka.security_protocol = 'SASL_PLAINTEXT' self.kafka.client_sasl_mechanism = 'GSSAPI,SCRAM-SHA-256' self.kafka.interbroker_sasl_mechanism = 'GSSAPI' def setUp(self): self.zk.start() def tearDown(self): self.producer.nodes[0].account.remove(self.jaas_deleg_conf_path) self.consumer.nodes[0].account.remove(self.jaas_deleg_conf_path) def generate_delegation_token(self): self.logger.debug("Request delegation token") self.delegation_tokens.generate_delegation_token() self.jaas_deleg_conf = self.delegation_tokens.create_jaas_conf_with_delegation_token( ) def expire_delegation_token(self): self.kafka.client_sasl_mechanism = 'GSSAPI,SCRAM-SHA-256' token_hmac = self.delegation_tokens.token_hmac() self.delegation_tokens.expire_delegation_token(token_hmac) def produce_with_delegation_token(self): self.producer.acked_values = [] self.producer.nodes[0].account.create_file(self.jaas_deleg_conf_path, self.jaas_deleg_conf) self.logger.debug(self.jaas_deleg_conf) self.producer.start() def consume_with_delegation_token(self): self.logger.debug("Consume messages with delegation token") self.consumer.nodes[0].account.create_file(self.jaas_deleg_conf_path, self.jaas_deleg_conf) self.logger.debug(self.jaas_deleg_conf) self.consumer.consumer_timeout_ms = 5000 self.consumer.start() self.consumer.wait() def get_datetime_ms(self, input_date): return int( time.mktime( datetime.strptime(input_date, "%Y-%m-%dT%H:%M").timetuple()) * 1000) def renew_delegation_token(self): dt = self.delegation_tokens.parse_delegation_token_out() orig_expiry_date_ms = self.get_datetime_ms(dt["expirydate"]) new_expirydate_ms = orig_expiry_date_ms + 1000 self.delegation_tokens.renew_delegation_token(dt["hmac"], new_expirydate_ms) @cluster(num_nodes=5) def test_delegation_token_lifecycle(self): self.kafka.start() self.delegation_tokens = DelegationTokens(self.kafka, self.test_context) self.generate_delegation_token() self.renew_delegation_token() self.produce_with_delegation_token() wait_until(lambda: self.producer.num_acked > 0, timeout_sec=30, err_msg="Expected producer to still be producing.") assert 1 == self.producer.num_acked, "number of acked messages: %d" % self.producer.num_acked self.consume_with_delegation_token() num_consumed = len(self.consumer.messages_consumed[1]) assert 1 == num_consumed, "number of consumed messages: %d" % num_consumed self.expire_delegation_token() self.produce_with_delegation_token() assert 0 == self.producer.num_acked, "number of acked messages: %d" % self.producer.num_acked
class GetOffsetShellTest(Test): """ Tests GetOffsetShell tool """ def __init__(self, test_context): super(GetOffsetShellTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.messages_received_count = 0 self.topics = { TOPIC_TEST_NAME: { 'partitions': NUM_PARTITIONS, 'replication-factor': REPLICATION_FACTOR }, TOPIC_TEST_PATTERN1: { 'partitions': 1, 'replication-factor': REPLICATION_FACTOR }, TOPIC_TEST_PATTERN2: { 'partitions': 1, 'replication-factor': REPLICATION_FACTOR }, TOPIC_TEST_PARTITIONS: { 'partitions': 2, 'replication-factor': REPLICATION_FACTOR }, TOPIC_TEST_INTERNAL_FILTER: { 'partitions': 1, 'replication-factor': REPLICATION_FACTOR }, TOPIC_TEST_TOPIC_PARTITIONS1: { 'partitions': 2, 'replication-factor': REPLICATION_FACTOR }, TOPIC_TEST_TOPIC_PARTITIONS2: { 'partitions': 2, 'replication-factor': REPLICATION_FACTOR } } self.zk = ZookeeperService(test_context, self.num_zk) if quorum.for_test( test_context) == quorum.zk else None def setUp(self): if self.zk: self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() def start_producer(self, topic): # This will produce to kafka cluster self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=topic, throughput=1000, max_messages=MAX_MESSAGES, repeating_keys=MAX_MESSAGES) self.producer.start() current_acked = self.producer.num_acked wait_until( lambda: self.producer.num_acked >= current_acked + MAX_MESSAGES, timeout_sec=10, err_msg="Timeout awaiting messages to be produced and acked") def start_consumer(self, topic): self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=topic, consumer_timeout_ms=1000) self.consumer.start() def check_message_count_sum_equals(self, message_count, **kwargs): sum = self.extract_message_count_sum(**kwargs) return sum == message_count def extract_message_count_sum(self, **kwargs): offsets = self.kafka.get_offset_shell(**kwargs).split("\n") sum = 0 for offset in offsets: if len(offset) == 0: continue sum += int(offset.split(":")[-1]) return sum @cluster(num_nodes=3) @matrix(metadata_quorum=quorum.all_non_upgrade) def test_get_offset_shell_topic_name(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.zk): """ Tests if GetOffsetShell handles --topic argument with a simple name correctly :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_producer(TOPIC_TEST_NAME) # Assert that offset is correctly indicated by GetOffsetShell tool wait_until(lambda: self.check_message_count_sum_equals( MAX_MESSAGES, topic=TOPIC_TEST_NAME), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") @cluster(num_nodes=4) @matrix(metadata_quorum=quorum.all_non_upgrade) def test_get_offset_shell_topic_pattern(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.zk): """ Tests if GetOffsetShell handles --topic argument with a pattern correctly :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_producer(TOPIC_TEST_PATTERN1) self.start_producer(TOPIC_TEST_PATTERN2) # Assert that offset is correctly indicated by GetOffsetShell tool wait_until(lambda: self.check_message_count_sum_equals( 2 * MAX_MESSAGES, topic=TOPIC_TEST_PATTERN_PATTERN), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") @cluster(num_nodes=3) @matrix(metadata_quorum=quorum.all_non_upgrade) def test_get_offset_shell_partitions(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.zk): """ Tests if GetOffsetShell handles --partitions argument correctly :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_producer(TOPIC_TEST_PARTITIONS) def fetch_and_sum_partitions_separately(): partition_count0 = self.extract_message_count_sum( topic=TOPIC_TEST_PARTITIONS, partitions="0") partition_count1 = self.extract_message_count_sum( topic=TOPIC_TEST_PARTITIONS, partitions="1") return partition_count0 + partition_count1 == MAX_MESSAGES # Assert that offset is correctly indicated when fetching partitions one by one wait_until(fetch_and_sum_partitions_separately, timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") # Assert that offset is correctly indicated when fetching partitions together wait_until(lambda: self.check_message_count_sum_equals( MAX_MESSAGES, topic=TOPIC_TEST_PARTITIONS), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") @cluster(num_nodes=4) @matrix(metadata_quorum=quorum.all_non_upgrade) def test_get_offset_shell_topic_partitions(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.zk): """ Tests if GetOffsetShell handles --topic-partitions argument correctly :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_producer(TOPIC_TEST_TOPIC_PARTITIONS1) self.start_producer(TOPIC_TEST_TOPIC_PARTITIONS2) # Assert that a single topic pattern matches all 4 partitions wait_until(lambda: self.check_message_count_sum_equals( 2 * MAX_MESSAGES, topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") # Assert that a topic pattern with partition range matches all 4 partitions wait_until(lambda: self.check_message_count_sum_equals( 2 * MAX_MESSAGES, topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN + ":0-2"), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") # Assert that 2 separate topic patterns match all 4 partitions wait_until(lambda: self.check_message_count_sum_equals( 2 * MAX_MESSAGES, topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS1 + "," + TOPIC_TEST_TOPIC_PARTITIONS2), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") # Assert that 4 separate topic-partition patterns match all 4 partitions wait_until(lambda: self.check_message_count_sum_equals( 2 * MAX_MESSAGES, topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS1 + ":0," + TOPIC_TEST_TOPIC_PARTITIONS1 + ":1," + TOPIC_TEST_TOPIC_PARTITIONS2 + ":0," + TOPIC_TEST_TOPIC_PARTITIONS2 + ":1"), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") # Assert that only partitions #0 are matched with topic pattern and fix partition number filtered_partitions = self.kafka.get_offset_shell( topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN + ":0") assert 1 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 0)) assert 0 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 1)) assert 1 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 0)) assert 0 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 1)) # Assert that only partitions #1 are matched with topic pattern and partition lower bound filtered_partitions = self.kafka.get_offset_shell( topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN + ":1-") assert 1 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 1)) assert 0 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 0)) assert 1 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 1)) assert 0 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 0)) # Assert that only partitions #0 are matched with topic pattern and partition upper bound filtered_partitions = self.kafka.get_offset_shell( topic_partitions=TOPIC_TEST_TOPIC_PARTITIONS_PATTERN + ":-1") assert 1 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 0)) assert 0 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS1, 1)) assert 1 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 0)) assert 0 == filtered_partitions.count( "%s:%s" % (TOPIC_TEST_TOPIC_PARTITIONS2, 1)) @cluster(num_nodes=4) @matrix(metadata_quorum=quorum.all_non_upgrade) def test_get_offset_shell_internal_filter(self, security_protocol='PLAINTEXT', metadata_quorum=quorum.zk): """ Tests if GetOffsetShell handles --exclude-internal-topics flag correctly :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_producer(TOPIC_TEST_INTERNAL_FILTER) # Create consumer and poll messages to create consumer offset record self.start_consumer(TOPIC_TEST_INTERNAL_FILTER) node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start") # Assert that a single topic pattern matches all 4 partitions wait_until(lambda: self.check_message_count_sum_equals( MAX_MESSAGES, topic_partitions=TOPIC_TEST_INTERNAL_FILTER), timeout_sec=10, err_msg="Timed out waiting to reach expected offset.") # No filters # Assert that without exclusion, we can find both the test topic and the __consumer_offsets internal topic offset_output = self.kafka.get_offset_shell() assert "__consumer_offsets" in offset_output assert TOPIC_TEST_INTERNAL_FILTER in offset_output # Assert that with exclusion, we can find the test topic but not the __consumer_offsets internal topic offset_output = self.kafka.get_offset_shell( exclude_internal_topics=True) assert "__consumer_offsets" not in offset_output assert TOPIC_TEST_INTERNAL_FILTER in offset_output # Topic filter # Assert that without exclusion, we can find both the test topic and the __consumer_offsets internal topic offset_output = self.kafka.get_offset_shell(topic=".*consumer_offsets") assert "__consumer_offsets" in offset_output assert TOPIC_TEST_INTERNAL_FILTER in offset_output # Assert that with exclusion, we can find the test topic but not the __consumer_offsets internal topic offset_output = self.kafka.get_offset_shell( topic=".*consumer_offsets", exclude_internal_topics=True) assert "__consumer_offsets" not in offset_output assert TOPIC_TEST_INTERNAL_FILTER in offset_output # Topic-partition filter # Assert that without exclusion, we can find both the test topic and the __consumer_offsets internal topic offset_output = self.kafka.get_offset_shell( topic_partitions=".*consumer_offsets:0") assert "__consumer_offsets" in offset_output assert TOPIC_TEST_INTERNAL_FILTER in offset_output # Assert that with exclusion, we can find the test topic but not the __consumer_offsets internal topic offset_output = self.kafka.get_offset_shell( topic_partitions=".*consumer_offsets:0", exclude_internal_topics=True) assert "__consumer_offsets" not in offset_output assert TOPIC_TEST_INTERNAL_FILTER in offset_output
class Log4jAppenderTest(Test): """ Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic """ def __init__(self, test_context): super(Log4jAppenderTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.topics = {TOPIC: {'partitions': 1, 'replication-factor': 1}} self.zk = ZookeeperService(test_context, self.num_zk) def setUp(self): self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() def start_appender(self, security_protocol): self.appender = KafkaLog4jAppender(self.test_context, self.num_brokers, self.kafka, TOPIC, MAX_MESSAGES, security_protocol=security_protocol) self.appender.start() def start_consumer(self, security_protocol): enable_new_consumer = security_protocol == SecurityConfig.SSL self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=1000, new_consumer=enable_new_consumer, security_protocol=security_protocol) self.consumer.start() @matrix(security_protocol=['PLAINTEXT', 'SSL']) def test_log4j_appender(self, security_protocol='PLAINTEXT'): """ Tests if KafkaLog4jAppender is producing to Kafka topic :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_appender(security_protocol) self.appender.wait() self.start_consumer(security_protocol) node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") # Verify consumed messages count expected_lines_count = MAX_MESSAGES * 2 # two times to account for new lines introduced by log4j wait_until( lambda: len(self.consumer.messages_consumed[1] ) == expected_lines_count, timeout_sec=10, err_msg="Timed out waiting to consume expected number of messages." ) self.consumer.stop()
class Log4jAppenderTest(Test): """ Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic """ def __init__(self, test_context): super(Log4jAppenderTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.messages_received_count = 0 self.topics = { TOPIC: {'partitions': 1, 'replication-factor': 1} } self.zk = ZookeeperService(test_context, self.num_zk) def setUp(self): self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() def start_appender(self, security_protocol): self.appender = KafkaLog4jAppender(self.test_context, self.num_brokers, self.kafka, TOPIC, MAX_MESSAGES, security_protocol=security_protocol) self.appender.start() def custom_message_validator(self, msg): if msg and "INFO : org.apache.kafka.tools.VerifiableLog4jAppender" in msg: self.logger.debug("Received message: %s" % msg) self.messages_received_count += 1 def start_consumer(self, security_protocol): enable_new_consumer = security_protocol != SecurityConfig.PLAINTEXT self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=1000, new_consumer=enable_new_consumer, message_validator=self.custom_message_validator) self.consumer.start() @matrix(security_protocol=['PLAINTEXT', 'SSL', 'SASL_PLAINTEXT', 'SASL_SSL']) def test_log4j_appender(self, security_protocol='PLAINTEXT'): """ Tests if KafkaLog4jAppender is producing to Kafka topic :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_appender(security_protocol) self.appender.wait() self.start_consumer(security_protocol) node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") # Verify consumed messages count wait_until(lambda: self.messages_received_count == MAX_MESSAGES, timeout_sec=10, err_msg="Timed out waiting to consume expected number of messages.") self.consumer.stop()
class ConsoleConsumerTest(Test): """Sanity checks on console consumer service class.""" def __init__(self, test_context): super(ConsoleConsumerTest, self).__init__(test_context) self.topic = "topic" self.zk = ZookeeperService(test_context, num_nodes=1) self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, zk_chroot="/kafka", topics={self.topic: {"partitions": 1, "replication-factor": 1}}) self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic) def setUp(self): self.zk.start() @cluster(num_nodes=3) @matrix(security_protocol=['PLAINTEXT', 'SSL']) @cluster(num_nodes=4) @matrix(security_protocol=['SASL_SSL'], sasl_mechanism=['PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512']) @matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL']) def test_lifecycle(self, security_protocol, sasl_mechanism='GSSAPI'): """Check that console consumer starts/stops properly, and that we are capturing log output.""" self.kafka.security_protocol = security_protocol self.kafka.client_sasl_mechanism = sasl_mechanism self.kafka.interbroker_sasl_mechanism = sasl_mechanism self.kafka.start() self.consumer.security_protocol = security_protocol t0 = time.time() self.consumer.start() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start") self.logger.info("consumer started in %s seconds " % str(time.time() - t0)) # Verify that log output is happening wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10, err_msg="Timed out waiting for consumer log file to exist.") wait_until(lambda: line_count(node, ConsoleConsumer.LOG_FILE) > 0, timeout_sec=1, backoff_sec=.25, err_msg="Timed out waiting for log entries to start.") # Verify no consumed messages assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0 self.consumer.stop_node(node) @cluster(num_nodes=4) def test_version(self): """Check that console consumer v0.8.2.X successfully starts and consumes messages.""" self.kafka.start() num_messages = 1000 self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, max_messages=num_messages, throughput=1000) self.producer.start() self.producer.wait() self.consumer.nodes[0].version = LATEST_0_8_2 self.consumer.new_consumer = False self.consumer.consumer_timeout_ms = 1000 self.consumer.start() self.consumer.wait() num_consumed = len(self.consumer.messages_consumed[1]) num_produced = self.producer.num_acked assert num_produced == num_consumed, "num_produced: %d, num_consumed: %d" % (num_produced, num_consumed)
class ReplicationTest(Test): """Replication tests. These tests verify that replication provides simple durability guarantees by checking that data acked by brokers is still available for consumption in the face of various failure scenarios.""" def __init__(self, test_context): """:type test_context: ducktape.tests.test.TestContext""" super(ReplicationTest, self).__init__(test_context=test_context) self.topic = "test_topic" self.zk = ZookeeperService(test_context, num_nodes=1) self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={ self.topic: { "partitions": 3, "replication-factor": 3, "min.insync.replicas": 2 } }) self.producer_throughput = 10000 self.num_producers = 1 self.num_consumers = 1 def setUp(self): self.zk.start() self.kafka.start() def min_cluster_size(self): """Override this since we're adding services outside of the constructor""" return super( ReplicationTest, self).min_cluster_size() + self.num_producers + self.num_consumers def run_with_failure(self, failure): """This is the top-level test template. The steps are: Produce messages in the background while driving some failure condition When done driving failures, immediately stop producing Consume all messages Validate that messages acked by brokers were consumed Note that consuming is a bit tricky, at least with console consumer. The goal is to consume all messages (foreach partition) in the topic. In this case, waiting for the last message may cause the consumer to stop too soon since console consumer is consuming multiple partitions from a single thread and therefore we lose ordering guarantees. Waiting on a count of consumed messages can be unreliable: if we stop consuming when num_consumed == num_acked, we might exit early if some messages are duplicated (though not an issue here since producer retries==0) Therefore rely here on the consumer.timeout.ms setting which times out on the interval between successively consumed messages. Since we run the producer to completion before running the consumer, this is a reliable indicator that nothing is left to consume. """ self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka, self.topic, throughput=self.producer_throughput) self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic, consumer_timeout_ms=3000) # Produce in a background thread while driving broker failures self.producer.start() if not wait_until(lambda: self.producer.num_acked > 5, timeout_sec=5): raise RuntimeError( "Producer failed to start in a reasonable amount of time.") failure() self.producer.stop() self.acked = self.producer.acked self.not_acked = self.producer.not_acked self.logger.info("num not acked: %d" % self.producer.num_not_acked) self.logger.info("num acked: %d" % self.producer.num_acked) # Consume all messages self.consumer.start() self.consumer.wait() self.consumed = self.consumer.messages_consumed[1] self.logger.info("num consumed: %d" % len(self.consumed)) # Check produced vs consumed success, msg = self.validate() if not success: self.mark_for_collect(self.producer) assert success, msg def clean_shutdown(self): """Discover leader node for our topic and shut it down cleanly.""" self.kafka.signal_leader(self.topic, partition=0, sig=signal.SIGTERM) def hard_shutdown(self): """Discover leader node for our topic and shut it down with a hard kill.""" self.kafka.signal_leader(self.topic, partition=0, sig=signal.SIGKILL) def clean_bounce(self): """Chase the leader of one partition and restart it cleanly.""" for i in range(5): prev_leader_node = self.kafka.leader(topic=self.topic, partition=0) self.kafka.restart_node(prev_leader_node, wait_sec=5, clean_shutdown=True) def hard_bounce(self): """Chase the leader and restart it cleanly.""" for i in range(5): prev_leader_node = self.kafka.leader(topic=self.topic, partition=0) self.kafka.restart_node(prev_leader_node, wait_sec=5, clean_shutdown=False) # Wait long enough for previous leader to probably be awake again time.sleep(6) def validate(self): """Check that produced messages were consumed.""" success = True msg = "" if len(set(self.consumed)) != len(self.consumed): # There are duplicates. This is ok, so report it but don't fail the test msg += "There are duplicate messages in the log\n" if not set(self.consumed).issuperset(set(self.acked)): # Every acked message must appear in the logs. I.e. consumed messages must be superset of acked messages. acked_minus_consumed = set(self.producer.acked) - set( self.consumed) success = False msg += "At least one acked message did not appear in the consumed messages. acked_minus_consumed: " + str( acked_minus_consumed) if not success: # Collect all the data logs if there was a failure self.mark_for_collect(self.kafka) return success, msg def test_clean_shutdown(self): self.run_with_failure(self.clean_shutdown) def test_hard_shutdown(self): self.run_with_failure(self.hard_shutdown) def test_clean_bounce(self): self.run_with_failure(self.clean_bounce) def test_hard_bounce(self): self.run_with_failure(self.hard_bounce)
class Log4jAppenderTest(Test): """ Tests KafkaLog4jAppender using VerifiableKafkaLog4jAppender that appends increasing ints to a Kafka topic """ def __init__(self, test_context): super(Log4jAppenderTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.messages_received_count = 0 self.topics = {TOPIC: {'partitions': 1, 'replication-factor': 1}} self.zk = ZookeeperService(test_context, self.num_zk) def setUp(self): self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() def start_appender(self, security_protocol): self.appender = KafkaLog4jAppender(self.test_context, self.num_brokers, self.kafka, TOPIC, MAX_MESSAGES, security_protocol=security_protocol) self.appender.start() def custom_message_validator(self, msg): if msg and "INFO : org.apache.kafka.tools.VerifiableLog4jAppender" in msg: self.logger.debug("Received message: %s" % msg) self.messages_received_count += 1 def start_consumer(self): self.consumer = ConsoleConsumer( self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=10000, message_validator=self.custom_message_validator) self.consumer.start() @cluster(num_nodes=4) @matrix(security_protocol=['PLAINTEXT', 'SSL']) @cluster(num_nodes=5) @matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL']) def test_log4j_appender(self, security_protocol='PLAINTEXT'): """ Tests if KafkaLog4jAppender is producing to Kafka topic :return: None """ self.start_kafka(security_protocol, security_protocol) self.start_appender(security_protocol) self.appender.wait() self.start_consumer() node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(node), timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start") # Verify consumed messages count wait_until( lambda: self.messages_received_count == MAX_MESSAGES, timeout_sec=10, err_msg="Timed out waiting to consume expected number of messages." ) self.consumer.stop()
class ConsumerGroupCommandTest(Test): """ Tests ConsumerGroupCommand """ # Root directory for persistent output PERSISTENT_ROOT = "/mnt/consumer_group_command" COMMAND_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "command.properties") def __init__(self, test_context): super(ConsumerGroupCommandTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.topics = { TOPIC: {'partitions': 1, 'replication-factor': 1} } self.zk = ZookeeperService(test_context, self.num_zk) def setUp(self): self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() def start_consumer(self, security_protocol): enable_new_consumer = security_protocol == SecurityConfig.SSL self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=None, new_consumer=enable_new_consumer) self.consumer.start() def setup_and_verify(self, security_protocol, group=None): self.start_kafka(security_protocol, security_protocol) self.start_consumer(security_protocol) consumer_node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(consumer_node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") kafka_node = self.kafka.nodes[0] if security_protocol is not SecurityConfig.PLAINTEXT: prop_file = str(self.kafka.security_config.client_config()) self.logger.debug(prop_file) kafka_node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, allow_fail=False) kafka_node.account.create_file(self.COMMAND_CONFIG_FILE, prop_file) # Verify ConsumerGroupCommand lists expected consumer groups enable_new_consumer = security_protocol != SecurityConfig.PLAINTEXT command_config_file = None if enable_new_consumer: command_config_file = self.COMMAND_CONFIG_FILE if group: wait_until(lambda: re.search("topic-consumer-group-command",self.kafka.describe_consumer_group(group=group, node=kafka_node, new_consumer=enable_new_consumer, command_config=command_config_file)), timeout_sec=10, err_msg="Timed out waiting to list expected consumer groups.") else: wait_until(lambda: "test-consumer-group" in self.kafka.list_consumer_groups(node=kafka_node, new_consumer=enable_new_consumer, command_config=command_config_file), timeout_sec=10, err_msg="Timed out waiting to list expected consumer groups.") self.consumer.stop() @matrix(security_protocol=['PLAINTEXT', 'SSL']) def test_list_consumer_groups(self, security_protocol='PLAINTEXT'): """ Tests if ConsumerGroupCommand is listing correct consumer groups :return: None """ self.setup_and_verify(security_protocol) @matrix(security_protocol=['PLAINTEXT', 'SSL']) def test_describe_consumer_group(self, security_protocol='PLAINTEXT'): """ Tests if ConsumerGroupCommand is describing a consumer group correctly :return: None """ self.setup_and_verify(security_protocol, group="test-consumer-group")
class ConsumerGroupCommandTest(Test): """ Tests ConsumerGroupCommand """ # Root directory for persistent output PERSISTENT_ROOT = "/mnt/consumer_group_command" COMMAND_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "command.properties") def __init__(self, test_context): super(ConsumerGroupCommandTest, self).__init__(test_context) self.num_zk = 1 self.num_brokers = 1 self.topics = {TOPIC: {'partitions': 1, 'replication-factor': 1}} self.zk = ZookeeperService(test_context, self.num_zk) def setUp(self): self.zk.start() def start_kafka(self, security_protocol, interbroker_security_protocol): self.kafka = KafkaService( self.test_context, self.num_brokers, self.zk, security_protocol=security_protocol, interbroker_security_protocol=interbroker_security_protocol, topics=self.topics) self.kafka.start() def start_consumer(self, security_protocol): enable_new_consumer = security_protocol == SecurityConfig.SSL self.consumer = ConsoleConsumer(self.test_context, num_nodes=self.num_brokers, kafka=self.kafka, topic=TOPIC, consumer_timeout_ms=None, new_consumer=enable_new_consumer) self.consumer.start() def setup_and_verify(self, security_protocol, group=None): self.start_kafka(security_protocol, security_protocol) self.start_consumer(security_protocol) consumer_node = self.consumer.nodes[0] wait_until(lambda: self.consumer.alive(consumer_node), timeout_sec=10, backoff_sec=.2, err_msg="Consumer was too slow to start") kafka_node = self.kafka.nodes[0] if security_protocol is not SecurityConfig.PLAINTEXT: prop_file = str(self.kafka.security_config.client_config()) self.logger.debug(prop_file) kafka_node.account.ssh("mkdir -p %s" % self.PERSISTENT_ROOT, allow_fail=False) kafka_node.account.create_file(self.COMMAND_CONFIG_FILE, prop_file) # Verify ConsumerGroupCommand lists expected consumer groups enable_new_consumer = security_protocol != SecurityConfig.PLAINTEXT command_config_file = None if enable_new_consumer: command_config_file = self.COMMAND_CONFIG_FILE if group: wait_until( lambda: ("%s, topic-consumer-group-command, 0," % group) in self.kafka. describe_consumer_group(group=group, node=kafka_node, new_consumer=enable_new_consumer, command_config=command_config_file), timeout_sec=10, err_msg="Timed out waiting to list expected consumer groups.") else: wait_until( lambda: "test-consumer-group" in self.kafka. list_consumer_groups(node=kafka_node, new_consumer=enable_new_consumer, command_config=command_config_file), timeout_sec=10, err_msg="Timed out waiting to list expected consumer groups.") self.consumer.stop() @matrix(security_protocol=['PLAINTEXT', 'SSL']) def test_list_consumer_groups(self, security_protocol='PLAINTEXT'): """ Tests if ConsumerGroupCommand is listing correct consumer groups :return: None """ self.setup_and_verify(security_protocol) @matrix(security_protocol=['PLAINTEXT', 'SSL']) def test_describe_consumer_group(self, security_protocol='PLAINTEXT'): """ Tests if ConsumerGroupCommand is describing a consumer group correctly :return: None """ self.setup_and_verify(security_protocol, group="test-consumer-group")
class DelegationTokenTest(Test): def __init__(self, test_context): super(DelegationTokenTest, self).__init__(test_context) self.test_context = test_context self.topic = "topic" self.zk = ZookeeperService(test_context, num_nodes=1) self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, zk_chroot="/kafka", topics={self.topic: {"partitions": 1, "replication-factor": 1}}, server_prop_overides=[ [config_property.DELEGATION_TOKEN_MAX_LIFETIME_MS, "604800000"], [config_property.DELEGATION_TOKEN_EXPIRY_TIME_MS, "86400000"], [config_property.DELEGATION_TOKEN_MASTER_KEY, "test12345"], [config_property.SASL_ENABLED_MECHANISMS, "GSSAPI,SCRAM-SHA-256"] ]) self.jaas_deleg_conf_path = "/tmp/jaas_deleg.conf" self.jaas_deleg_conf = "" self.client_properties_content = """ security.protocol=SASL_PLAINTEXT sasl.mechanism=SCRAM-SHA-256 sasl.kerberos.service.name=kafka client.id=console-consumer """ self.client_kafka_opts=' -Djava.security.auth.login.config=' + self.jaas_deleg_conf_path self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, max_messages=1, throughput=1, kafka_opts_override=self.client_kafka_opts, client_prop_file_override=self.client_properties_content) self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, kafka_opts_override=self.client_kafka_opts, client_prop_file_override=self.client_properties_content) self.kafka.security_protocol = 'SASL_PLAINTEXT' self.kafka.client_sasl_mechanism = 'GSSAPI,SCRAM-SHA-256' self.kafka.interbroker_sasl_mechanism = 'GSSAPI' def setUp(self): self.zk.start() def tearDown(self): self.producer.nodes[0].account.remove(self.jaas_deleg_conf_path) self.consumer.nodes[0].account.remove(self.jaas_deleg_conf_path) def generate_delegation_token(self): self.logger.debug("Request delegation token") self.delegation_tokens.generate_delegation_token() self.jaas_deleg_conf = self.delegation_tokens.create_jaas_conf_with_delegation_token() def expire_delegation_token(self): self.kafka.client_sasl_mechanism = 'GSSAPI,SCRAM-SHA-256' token_hmac = self.delegation_tokens.token_hmac() self.delegation_tokens.expire_delegation_token(token_hmac) def produce_with_delegation_token(self): self.producer.acked_values = [] self.producer.nodes[0].account.create_file(self.jaas_deleg_conf_path, self.jaas_deleg_conf) self.logger.debug(self.jaas_deleg_conf) self.producer.start() def consume_with_delegation_token(self): self.logger.debug("Consume messages with delegation token") self.consumer.nodes[0].account.create_file(self.jaas_deleg_conf_path, self.jaas_deleg_conf) self.logger.debug(self.jaas_deleg_conf) self.consumer.consumer_timeout_ms = 5000 self.consumer.start() self.consumer.wait() def get_datetime_ms(self, input_date): return int(time.mktime(datetime.strptime(input_date,"%Y-%m-%dT%H:%M").timetuple()) * 1000) def renew_delegation_token(self): dt = self.delegation_tokens.parse_delegation_token_out() orig_expiry_date_ms = self.get_datetime_ms(dt["expirydate"]) new_expirydate_ms = orig_expiry_date_ms + 1000 self.delegation_tokens.renew_delegation_token(dt["hmac"], new_expirydate_ms) def test_delegation_token_lifecycle(self): self.kafka.start() self.delegation_tokens = DelegationTokens(self.kafka, self.test_context) self.generate_delegation_token() self.renew_delegation_token() self.produce_with_delegation_token() wait_until(lambda: self.producer.num_acked > 0, timeout_sec=30, err_msg="Expected producer to still be producing.") assert 1 == self.producer.num_acked, "number of acked messages: %d" % self.producer.num_acked self.consume_with_delegation_token() num_consumed = len(self.consumer.messages_consumed[1]) assert 1 == num_consumed, "number of consumed messages: %d" % num_consumed self.expire_delegation_token() self.produce_with_delegation_token() assert 0 == self.producer.num_acked, "number of acked messages: %d" % self.producer.num_acked