Exemple #1
0
class ClientCompatibilityTestNewBroker(ProduceConsumeValidateTest):

    def __init__(self, test_context):
        super(ClientCompatibilityTestNewBroker, self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
            
        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 1000

    @cluster(num_nodes=6)
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH), compression_types=["none"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(LATEST_0_9), compression_types=["none"], new_consumer=False, timestamp_type=None)
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(LATEST_0_9), compression_types=["snappy"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_2_2), consumer_version=str(LATEST_2_2), compression_types=["none"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_2_1), consumer_version=str(LATEST_2_1), compression_types=["zstd"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_2_0), consumer_version=str(LATEST_2_0), compression_types=["snappy"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_1_1), consumer_version=str(LATEST_1_1), compression_types=["lz4"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_1_0), consumer_version=str(LATEST_1_0), compression_types=["none"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_11_0), consumer_version=str(LATEST_0_11_0), compression_types=["gzip"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_10_2), consumer_version=str(LATEST_0_10_2), compression_types=["lz4"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_10_1), consumer_version=str(LATEST_0_10_1), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_10_0), consumer_version=str(LATEST_0_10_0), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(DEV_BRANCH), compression_types=["none"], timestamp_type=None)
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(DEV_BRANCH), compression_types=["snappy"], timestamp_type=None)
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_8_2), consumer_version=str(LATEST_0_8_2), compression_types=["none"], new_consumer=False, timestamp_type=None)
    def test_compatibility(self, producer_version, consumer_version, compression_types, new_consumer=True, timestamp_type=None):

        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
        for node in self.kafka.nodes:
            if timestamp_type is not None:
                node.config[config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type
        self.kafka.start()
         
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int,
                                           compression_types=compression_types,
                                           version=KafkaVersion(producer_version))

        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
                                        self.topic, consumer_timeout_ms=30000, new_consumer=new_consumer,
                                        message_validator=is_int, version=KafkaVersion(consumer_version))

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
Exemple #2
0
class CompressionTest(ProduceConsumeValidateTest):
    """
    These tests validate produce / consume for compressed topics.
    """

    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(CompressionTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk, topics={self.topic: {
                                                                    "partitions": 10,
                                                                    "replication-factor": 1}})
        self.num_partitions = 10
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = 4
        self.messages_per_producer = 1000
        self.num_consumers = 1

    def setUp(self):
        self.zk.start()

    def min_cluster_size(self):
        # Override this since we're adding services outside of the constructor
        return super(CompressionTest, self).min_cluster_size() + self.num_producers + self.num_consumers

    @parametrize(compression_types=["snappy","gzip","lz4","none"], new_consumer=True)
    @parametrize(compression_types=["snappy","gzip","lz4","none"], new_consumer=False)
    def test_compressed_topic(self, compression_types, new_consumer):
        """Test produce => consume => validate for compressed topics
        Setup: 1 zk, 1 kafka node, 1 topic with partitions=10, replication-factor=1

        compression_types parameter gives a list of compression types (or no compression if
        "none"). Each producer in a VerifiableProducer group (num_producers = 4) will use a
        compression type from the list based on producer's index in the group.

            - Produce messages in the background
            - Consume messages in the background
            - Stop producing, and finish consuming
            - Validate that every acked message was consumed
        """

        self.kafka.security_protocol = "PLAINTEXT"
        self.kafka.interbroker_security_protocol = self.kafka.security_protocol
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int_with_prefix,
                                           compression_types=compression_types)
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
                                        new_consumer=new_consumer, consumer_timeout_ms=60000,
                                        message_validator=is_int_with_prefix)
        self.kafka.start()

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
class ClientCompatibilityProduceConsumeTest(ProduceConsumeValidateTest):
    """
    These tests validate that we can use a new client to produce and consume from older brokers.
    """

    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ClientCompatibilityProduceConsumeTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=3)
        self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic:{
                                                                    "partitions": 10,
                                                                    "replication-factor": 2}})
        self.num_partitions = 10
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = 2
        self.messages_per_producer = 1000
        self.num_consumers = 1

    def setUp(self):
        self.zk.start()

    def min_cluster_size(self):
        # Override this since we're adding services outside of the constructor
        return super(ClientCompatibilityProduceConsumeTest, self).min_cluster_size() + self.num_producers + self.num_consumers

    @parametrize(broker_version=str(DEV_BRANCH))
    @parametrize(broker_version=str(LATEST_0_10_0))
    @parametrize(broker_version=str(LATEST_0_10_1))
    @parametrize(broker_version=str(LATEST_0_10_2))
    @parametrize(broker_version=str(LATEST_0_11_0))
    @parametrize(broker_version=str(LATEST_1_0))
    @parametrize(broker_version=str(LATEST_1_1))
    @parametrize(broker_version=str(LATEST_2_0))
    @parametrize(broker_version=str(LATEST_2_1))
    @parametrize(broker_version=str(LATEST_2_2))
    @parametrize(broker_version=str(LATEST_2_3))
    @parametrize(broker_version=str(LATEST_2_4))
    @parametrize(broker_version=str(LATEST_2_5))
    def test_produce_consume(self, broker_version):
        print("running producer_consumer_compat with broker_version = %s" % broker_version, flush=True)
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.security_protocol = "PLAINTEXT"
        self.kafka.interbroker_security_protocol = self.kafka.security_protocol
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int_with_prefix)
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
                                        consumer_timeout_ms=60000,
                                        message_validator=is_int_with_prefix)
        self.kafka.start()

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
class ClientCompatibilityTestNewBroker(ProduceConsumeValidateTest):

    def __init__(self, test_context):
        super(ClientCompatibilityTestNewBroker, self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
            
        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 1000

    @cluster(num_nodes=6)
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH), compression_types=["none"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(LATEST_0_9), compression_types=["none"], new_consumer=False, timestamp_type=None)
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(LATEST_0_9), compression_types=["snappy"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_1_1), consumer_version=str(LATEST_1_1), compression_types=["lz4"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_1_0), consumer_version=str(LATEST_1_0), compression_types=["none"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_11_0), consumer_version=str(LATEST_0_11_0), compression_types=["gzip"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_10_2), consumer_version=str(LATEST_0_10_2), compression_types=["lz4"], timestamp_type=str("CreateTime"))
    @parametrize(producer_version=str(LATEST_0_10_1), consumer_version=str(LATEST_0_10_1), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_10_0), consumer_version=str(LATEST_0_10_0), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(DEV_BRANCH), compression_types=["none"], timestamp_type=None)
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(DEV_BRANCH), compression_types=["snappy"], timestamp_type=None)
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9), compression_types=["snappy"], timestamp_type=str("LogAppendTime"))
    @parametrize(producer_version=str(LATEST_0_8_2), consumer_version=str(LATEST_0_8_2), compression_types=["none"], new_consumer=False, timestamp_type=None)
    def test_compatibility(self, producer_version, consumer_version, compression_types, new_consumer=True, timestamp_type=None):

        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
        for node in self.kafka.nodes:
            if timestamp_type is not None:
                node.config[config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type
        self.kafka.start()
         
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int,
                                           compression_types=compression_types,
                                           version=KafkaVersion(producer_version))

        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
                                        self.topic, consumer_timeout_ms=30000, new_consumer=new_consumer,
                                        message_validator=is_int, version=KafkaVersion(consumer_version))

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
class ClientCompatibilityProduceConsumeTest(ProduceConsumeValidateTest):
    """
    These tests validate that we can use a new client to produce and consume from older brokers.
    """

    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ClientCompatibilityProduceConsumeTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=3)
        self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic:{
                                                                    "partitions": 10,
                                                                    "replication-factor": 2}})
        self.num_partitions = 10
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = 2
        self.messages_per_producer = 1000
        self.num_consumers = 1

    def setUp(self):
        self.zk.start()

    def min_cluster_size(self):
        # Override this since we're adding services outside of the constructor
        return super(ClientCompatibilityProduceConsumeTest, self).min_cluster_size() + self.num_producers + self.num_consumers

    @parametrize(broker_version=str(DEV_BRANCH))
    @parametrize(broker_version=str(LATEST_0_10_0))
    @parametrize(broker_version=str(LATEST_0_10_1))
    @parametrize(broker_version=str(LATEST_0_10_2))
    @parametrize(broker_version=str(LATEST_0_11_0))
    @parametrize(broker_version=str(LATEST_1_0))
    @parametrize(broker_version=str(LATEST_1_1))
    def test_produce_consume(self, broker_version):
        print("running producer_consumer_compat with broker_version = %s" % broker_version)
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.security_protocol = "PLAINTEXT"
        self.kafka.interbroker_security_protocol = self.kafka.security_protocol
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int_with_prefix)
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic,
                                        consumer_timeout_ms=60000,
                                        message_validator=is_int_with_prefix)
        self.kafka.start()

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
class CompressionTest(ProduceConsumeValidateTest):
    """
    These tests validate produce / consume for compressed topics.
    """
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(CompressionTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(
            test_context,
            num_nodes=1,
            zk=self.zk,
            topics={self.topic: {
                "partitions": 10,
                "replication-factor": 1
            }})
        self.num_partitions = 10
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = 4
        self.messages_per_producer = 1000
        self.num_consumers = 1

    def setUp(self):
        self.zk.start()

    def min_cluster_size(self):
        # Override this since we're adding services outside of the constructor
        return super(
            CompressionTest,
            self).min_cluster_size() + self.num_producers + self.num_consumers

    @cluster(num_nodes=7)
    @parametrize(compression_types=["snappy", "gzip", "lz4", "none"],
                 new_consumer=True)
    @parametrize(compression_types=["snappy", "gzip", "lz4", "none"],
                 new_consumer=False)
    def test_compressed_topic(self, compression_types, new_consumer):
        """Test produce => consume => validate for compressed topics
        Setup: 1 zk, 1 kafka node, 1 topic with partitions=10, replication-factor=1

        compression_types parameter gives a list of compression types (or no compression if
        "none"). Each producer in a VerifiableProducer group (num_producers = 4) will use a
        compression type from the list based on producer's index in the group.

            - Produce messages in the background
            - Consume messages in the background
            - Stop producing, and finish consuming
            - Validate that every acked message was consumed
        """

        self.kafka.security_protocol = "PLAINTEXT"
        self.kafka.interbroker_security_protocol = self.kafka.security_protocol
        self.producer = VerifiableProducer(
            self.test_context,
            self.num_producers,
            self.kafka,
            self.topic,
            throughput=self.producer_throughput,
            message_validator=is_int_with_prefix,
            compression_types=compression_types)
        self.consumer = ConsoleConsumer(self.test_context,
                                        self.num_consumers,
                                        self.kafka,
                                        self.topic,
                                        new_consumer=new_consumer,
                                        consumer_timeout_ms=60000,
                                        message_validator=is_int_with_prefix)
        self.kafka.start()

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(
                self.messages_per_producer) == True,
            timeout_sec=120,
            backoff_sec=1,
            err_msg=
            "Producer did not produce all messages in reasonable amount of time"
        ))
class MessageFormatChangeTest(ProduceConsumeValidateTest):
    def __init__(self, test_context):
        super(MessageFormatChangeTest,
              self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)

        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 100

    def produce_and_consume(self, producer_version, consumer_version, group):
        self.producer = VerifiableProducer(
            self.test_context,
            self.num_producers,
            self.kafka,
            self.topic,
            throughput=self.producer_throughput,
            message_validator=is_int,
            version=KafkaVersion(producer_version))
        self.consumer = ConsoleConsumer(self.test_context,
                                        self.num_consumers,
                                        self.kafka,
                                        self.topic,
                                        consumer_timeout_ms=30000,
                                        message_validator=is_int,
                                        version=KafkaVersion(consumer_version))
        self.consumer.group_id = group
        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(
                self.messages_per_producer) == True,
            timeout_sec=120,
            backoff_sec=1,
            err_msg=
            "Producer did not produce all messages in reasonable amount of time"
        ))

    @parametrize(producer_version=str(TRUNK), consumer_version=str(TRUNK))
    @parametrize(producer_version=str(LATEST_0_9),
                 consumer_version=str(LATEST_0_9))
    def test_compatibility(self, producer_version, consumer_version):
        """ This tests performs the following checks:
        The workload is a mix of 0.9.x and 0.10.x producers and consumers 
        that produce to and consume from a 0.10.x cluster
        1. initially the topic is using message format 0.9.0
        2. change the message format version for topic to 0.10.0 on the fly.
        3. change the message format version for topic back to 0.9.0 on the fly.
        - The producers and consumers should not have any issue.
        - Note that for 0.9.x consumers/producers we only do steps 1 and 2
        """
        self.kafka = KafkaService(self.test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  version=TRUNK,
                                  topics={
                                      self.topic: {
                                          "partitions": 3,
                                          "replication-factor": 3,
                                          'configs': {
                                              "min.insync.replicas": 2
                                          }
                                      }
                                  })

        self.kafka.start()
        self.logger.info("First format change to 0.9.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
        self.produce_and_consume(producer_version, consumer_version, "group1")

        self.logger.info("Second format change to 0.10.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
        self.produce_and_consume(producer_version, consumer_version, "group2")

        if producer_version == str(TRUNK) and consumer_version == str(TRUNK):
            self.logger.info("Third format change back to 0.9.0")
            self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
            self.produce_and_consume(producer_version, consumer_version,
                                     "group3")
Exemple #8
0
class MessageFormatChangeTest(ProduceConsumeValidateTest):
    def __init__(self, test_context):
        super(MessageFormatChangeTest,
              self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)

        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 100

    def produce_and_consume(self, producer_version, consumer_version, group):
        self.producer = VerifiableProducer(
            self.test_context,
            self.num_producers,
            self.kafka,
            self.topic,
            throughput=self.producer_throughput,
            message_validator=is_int,
            version=KafkaVersion(producer_version))
        self.consumer = ConsoleConsumer(self.test_context,
                                        self.num_consumers,
                                        self.kafka,
                                        self.topic,
                                        consumer_timeout_ms=30000,
                                        message_validator=is_int,
                                        version=KafkaVersion(consumer_version))
        self.consumer.group_id = group
        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(
                self.messages_per_producer) == True,
            timeout_sec=120,
            backoff_sec=1,
            err_msg=
            "Producer did not produce all messages in reasonable amount of time"
        ))

    @cluster(num_nodes=12)
    @parametrize(producer_version=str(DEV_BRANCH),
                 consumer_version=str(DEV_BRANCH))
    @parametrize(producer_version=str(LATEST_0_10),
                 consumer_version=str(LATEST_0_10))
    @parametrize(producer_version=str(LATEST_0_9),
                 consumer_version=str(LATEST_0_9))
    def test_compatibility(self, producer_version, consumer_version):
        """ This tests performs the following checks:
        The workload is a mix of 0.9.x, 0.10.x and 0.11.x producers and consumers
        that produce to and consume from a DEV_BRANCH cluster
        1. initially the topic is using message format 0.9.0
        2. change the message format version for topic to 0.10.0 on the fly.
        3. change the message format version for topic to 0.11.0 on the fly.
        4. change the message format version for topic back to 0.10.0 on the fly (only if the client version is 0.11.0 or newer)
        - The producers and consumers should not have any issue.

        Note regarding step number 4. Downgrading the message format version is generally unsupported as it breaks
        older clients. More concretely, if we downgrade a topic from 0.11.0 to 0.10.0 after it contains messages with
        version 0.11.0, we will return the 0.11.0 messages without down conversion due to an optimisation in the
        handling of fetch requests. This will break any consumer that doesn't support 0.11.0. So, in practice, step 4
        is similar to step 2 and it didn't seem worth it to increase the cluster size to in order to add a step 5 that
        would change the message format version for the topic back to 0.9.0.0.
        """
        self.kafka = KafkaService(self.test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  version=DEV_BRANCH,
                                  topics={
                                      self.topic: {
                                          "partitions": 3,
                                          "replication-factor": 3,
                                          'configs': {
                                              "min.insync.replicas": 2
                                          }
                                      }
                                  })

        self.kafka.start()
        self.logger.info("First format change to 0.9.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
        self.produce_and_consume(producer_version, consumer_version, "group1")

        self.logger.info("Second format change to 0.10.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
        self.produce_and_consume(producer_version, consumer_version, "group2")

        self.logger.info("Third format change to 0.11.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_11))
        self.produce_and_consume(producer_version, consumer_version, "group3")

        if producer_version == str(DEV_BRANCH) and consumer_version == str(
                DEV_BRANCH):
            self.logger.info("Fourth format change back to 0.10.0")
            self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
            self.produce_and_consume(producer_version, consumer_version,
                                     "group4")
class ClientCompatibilityTestNewBroker(ProduceConsumeValidateTest):
    def __init__(self, test_context):
        super(ClientCompatibilityTestNewBroker,
              self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(
            self.test_context, num_nodes=1) if quorum.for_test(
                self.test_context) == quorum.zk else None

        if self.zk:
            self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 1000

    @cluster(num_nodes=6)
    @matrix(producer_version=[str(DEV_BRANCH)],
            consumer_version=[str(DEV_BRANCH)],
            compression_types=[["snappy"]],
            timestamp_type=[str("LogAppendTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(DEV_BRANCH)],
            consumer_version=[str(DEV_BRANCH)],
            compression_types=[["none"]],
            timestamp_type=[str("LogAppendTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @parametrize(producer_version=str(DEV_BRANCH),
                 consumer_version=str(LATEST_0_9),
                 compression_types=["none"],
                 new_consumer=False,
                 timestamp_type=None)
    @matrix(producer_version=[str(DEV_BRANCH)],
            consumer_version=[str(LATEST_0_9)],
            compression_types=[["snappy"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_2)],
            consumer_version=[str(LATEST_2_2)],
            compression_types=[["none"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_3)],
            consumer_version=[str(LATEST_2_3)],
            compression_types=[["none"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_4)],
            consumer_version=[str(LATEST_2_4)],
            compression_types=[["none"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_5)],
            consumer_version=[str(LATEST_2_5)],
            compression_types=[["none"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_6)],
            consumer_version=[str(LATEST_2_6)],
            compression_types=[["none"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_7)],
            consumer_version=[str(LATEST_2_7)],
            compression_types=[["none"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_8)],
            consumer_version=[str(LATEST_2_8)],
            compression_types=[["none"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_1)],
            consumer_version=[str(LATEST_2_1)],
            compression_types=[["zstd"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_2_0)],
            consumer_version=[str(LATEST_2_0)],
            compression_types=[["snappy"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_1_1)],
            consumer_version=[str(LATEST_1_1)],
            compression_types=[["lz4"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_1_0)],
            consumer_version=[str(LATEST_1_0)],
            compression_types=[["none"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_0_11_0)],
            consumer_version=[str(LATEST_0_11_0)],
            compression_types=[["gzip"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_0_10_2)],
            consumer_version=[str(LATEST_0_10_2)],
            compression_types=[["lz4"]],
            timestamp_type=[str("CreateTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_0_10_1)],
            consumer_version=[str(LATEST_0_10_1)],
            compression_types=[["snappy"]],
            timestamp_type=[str("LogAppendTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_0_10_0)],
            consumer_version=[str(LATEST_0_10_0)],
            compression_types=[["snappy"]],
            timestamp_type=[str("LogAppendTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_0_9)],
            consumer_version=[str(DEV_BRANCH)],
            compression_types=[["none"]],
            timestamp_type=[None],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_0_9)],
            consumer_version=[str(DEV_BRANCH)],
            compression_types=[["snappy"]],
            timestamp_type=[None],
            metadata_quorum=quorum.all_non_upgrade)
    @matrix(producer_version=[str(LATEST_0_9)],
            consumer_version=[str(LATEST_0_9)],
            compression_types=[["snappy"]],
            timestamp_type=[str("LogAppendTime")],
            metadata_quorum=quorum.all_non_upgrade)
    @parametrize(producer_version=str(LATEST_0_8_2),
                 consumer_version=str(LATEST_0_8_2),
                 compression_types=["none"],
                 new_consumer=False,
                 timestamp_type=None)
    def test_compatibility(self,
                           producer_version,
                           consumer_version,
                           compression_types,
                           new_consumer=True,
                           timestamp_type=None,
                           metadata_quorum=quorum.zk):
        if not new_consumer and metadata_quorum != quorum.zk:
            raise Exception(
                "ZooKeeper-based consumers are not supported when using a KRaft metadata quorum"
            )
        self.kafka = KafkaService(self.test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  version=DEV_BRANCH,
                                  topics={
                                      self.topic: {
                                          "partitions": 3,
                                          "replication-factor": 3,
                                          'configs': {
                                              "min.insync.replicas": 2
                                          }
                                      }
                                  },
                                  controller_num_nodes_override=1)
        for node in self.kafka.nodes:
            if timestamp_type is not None:
                node.config[
                    config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type
        self.kafka.start()

        self.producer = VerifiableProducer(
            self.test_context,
            self.num_producers,
            self.kafka,
            self.topic,
            throughput=self.producer_throughput,
            message_validator=is_int,
            compression_types=compression_types,
            version=KafkaVersion(producer_version))

        self.consumer = ConsoleConsumer(self.test_context,
                                        self.num_consumers,
                                        self.kafka,
                                        self.topic,
                                        consumer_timeout_ms=30000,
                                        new_consumer=new_consumer,
                                        message_validator=is_int,
                                        version=KafkaVersion(consumer_version))

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(
                self.messages_per_producer) == True,
            timeout_sec=120,
            backoff_sec=1,
            err_msg=
            "Producer did not produce all messages in reasonable amount of time"
        ))
class MessageFormatChangeTest(ProduceConsumeValidateTest):

    def __init__(self, test_context):
        super(MessageFormatChangeTest, self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
            
        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 100

    def produce_and_consume(self, producer_version, consumer_version, group):
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic,
                                           throughput=self.producer_throughput,
                                           message_validator=is_int,
                                           version=KafkaVersion(producer_version))
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
                                        self.topic, consumer_timeout_ms=30000,
                                        message_validator=is_int, version=KafkaVersion(consumer_version))
        self.consumer.group_id = group
        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
        
    @parametrize(producer_version=str(TRUNK), consumer_version=str(TRUNK))
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
    def test_compatibility(self, producer_version, consumer_version):
        """ This tests performs the following checks:
        The workload is a mix of 0.9.x and 0.10.x producers and consumers 
        that produce to and consume from a 0.10.x cluster
        1. initially the topic is using message format 0.9.0
        2. change the message format version for topic to 0.10.0 on the fly.
        3. change the message format version for topic back to 0.9.0 on the fly.
        - The producers and consumers should not have any issue.
        - Note that for 0.9.x consumers/producers we only do steps 1 and 2
        """
        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=TRUNK, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
       
        self.kafka.start()
        self.logger.info("First format change to 0.9.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
        self.produce_and_consume(producer_version, consumer_version, "group1")

        self.logger.info("Second format change to 0.10.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
        self.produce_and_consume(producer_version, consumer_version, "group2")

        if producer_version == str(TRUNK) and consumer_version == str(TRUNK):
            self.logger.info("Third format change back to 0.9.0")
            self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
            self.produce_and_consume(producer_version, consumer_version, "group3")
class MessageFormatChangeTest(ProduceConsumeValidateTest):

    def __init__(self, test_context):
        super(MessageFormatChangeTest, self).__init__(test_context=test_context)

    def setUp(self):
        self.topic = "test_topic"
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
            
        self.zk.start()

        # Producer and consumer
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
        self.messages_per_producer = 100

    def produce_and_consume(self, producer_version, consumer_version, group):
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic,
                                           throughput=self.producer_throughput,
                                           message_validator=is_int,
                                           version=KafkaVersion(producer_version))
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
                                        self.topic, consumer_timeout_ms=30000,
                                        message_validator=is_int, version=KafkaVersion(consumer_version))
        self.consumer.group_id = group
        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))

    @cluster(num_nodes=12)
    @parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH))
    @parametrize(producer_version=str(LATEST_0_10), consumer_version=str(LATEST_0_10))
    @parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
    def test_compatibility(self, producer_version, consumer_version):
        """ This tests performs the following checks:
        The workload is a mix of 0.9.x, 0.10.x and 0.11.x producers and consumers
        that produce to and consume from a DEV_BRANCH cluster
        1. initially the topic is using message format 0.9.0
        2. change the message format version for topic to 0.10.0 on the fly.
        3. change the message format version for topic to 0.11.0 on the fly.
        4. change the message format version for topic back to 0.10.0 on the fly (only if the client version is 0.11.0 or newer)
        - The producers and consumers should not have any issue.

        Note regarding step number 4. Downgrading the message format version is generally unsupported as it breaks
        older clients. More concretely, if we downgrade a topic from 0.11.0 to 0.10.0 after it contains messages with
        version 0.11.0, we will return the 0.11.0 messages without down conversion due to an optimisation in the
        handling of fetch requests. This will break any consumer that doesn't support 0.11.0. So, in practice, step 4
        is similar to step 2 and it didn't seem worth it to increase the cluster size to in order to add a step 5 that
        would change the message format version for the topic back to 0.9.0.0.
        """
        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
       
        self.kafka.start()
        self.logger.info("First format change to 0.9.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
        self.produce_and_consume(producer_version, consumer_version, "group1")

        self.logger.info("Second format change to 0.10.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
        self.produce_and_consume(producer_version, consumer_version, "group2")

        self.logger.info("Third format change to 0.11.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_11))
        self.produce_and_consume(producer_version, consumer_version, "group3")

        if producer_version == str(DEV_BRANCH) and consumer_version == str(DEV_BRANCH):
            self.logger.info("Fourth format change back to 0.10.0")
            self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
            self.produce_and_consume(producer_version, consumer_version, "group4")