Exemplo n.º 1
0
    def setUp(self):
        self.topic = "test_topic"
        self.group = "group"
        self.producer_throughput = 100
        self.num_producers = 1
        self.num_consumers = 1

        self.zk = ZookeeperService(self.test_context,
                                   num_nodes=3,
                                   zk_client_port=False,
                                   zk_client_secure_port=True,
                                   zk_tls_encrypt_only=True)

        self.kafka = KafkaService(self.test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  zk_client_secure=True,
                                  topics={
                                      self.topic: {
                                          "partitions": 3,
                                          "replication-factor": 3,
                                          'configs': {
                                              "min.insync.replicas": 2
                                          }
                                      }
                                  })
Exemplo n.º 2
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(QuotaTest, self).__init__(test_context=test_context)

        self.topic = 'test_topic'
        self.logger.info('use topic ' + self.topic)

        # quota related parameters
        self.quota_config = {'quota_producer_default': 2500000,
                             'quota_consumer_default': 2000000,
                             'quota_producer_bytes_per_second_overrides': 'overridden_id=3750000',
                             'quota_consumer_bytes_per_second_overrides': 'overridden_id=3000000'}
        self.maximum_client_deviation_percentage = 100.0
        self.maximum_broker_deviation_percentage = 5.0
        self.num_records = 100000
        self.record_size = 3000
        self.security_protocol = 'PLAINTEXT'
        self.interbroker_security_protocol = 'PLAINTEXT'

        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk,
                                  security_protocol=self.security_protocol,
                                  interbroker_security_protocol=self.interbroker_security_protocol,
                                  topics={self.topic: {'partitions': 6, 'replication-factor': 1, 'min.insync.replicas': 1}},
                                  quota_config=self.quota_config,
                                  jmx_object_names=['kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec',
                                                    'kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec'],
                                  jmx_attributes=['OneMinuteRate'])
        self.num_producers = 1
        self.num_consumers = 2
Exemplo n.º 3
0
 def __init__(self, test_context):
     """:type test_context: ducktape.tests.test.TestContext"""
     super(ConsumeBenchTest, self).__init__(test_context)
     self.zk = ZookeeperService(test_context,
                                num_nodes=3) if quorum.for_test(
                                    test_context) == quorum.zk else None
     self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk)
     self.producer_workload_service = ProduceBenchWorkloadService(
         test_context, self.kafka)
     self.consumer_workload_service = ConsumeBenchWorkloadService(
         test_context, self.kafka)
     self.consumer_workload_service_2 = ConsumeBenchWorkloadService(
         test_context, self.kafka)
     self.active_topics = {
         "consume_bench_topic[0-5]": {
             "numPartitions": 5,
             "replicationFactor": 3
         }
     }
     self.trogdor = TrogdorService(context=self.test_context,
                                   client_services=[
                                       self.kafka,
                                       self.producer_workload_service,
                                       self.consumer_workload_service,
                                       self.consumer_workload_service_2
                                   ])
Exemplo n.º 4
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ReassignPartitionsTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.num_partitions = 20
        self.zk = ZookeeperService(test_context, num_nodes=1)
        # We set the min.insync.replicas to match the replication factor because
        # it makes the test more stringent. If min.isr = 2 and
        # replication.factor=3, then the test would tolerate the failure of
        # reassignment for upto one replica per partition, which is not
        # desirable for this test in particular.
        self.kafka = KafkaService(
            test_context,
            num_nodes=4,
            zk=self.zk,
            server_prop_overides=[[
                config_property.LOG_ROLL_TIME_MS, "5000"
            ], [config_property.LOG_RETENTION_CHECK_INTERVAL_MS, "5000"]],
            topics={
                self.topic: {
                    "partitions": self.num_partitions,
                    "replication-factor": 3,
                    'configs': {
                        "min.insync.replicas": 3,
                    }
                }
            })
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = 1
        self.num_consumers = 1
Exemplo n.º 5
0
    def setup_services(self,
                       security_protocol=SecurityConfig.PLAINTEXT,
                       timestamp_type=None,
                       broker_version=DEV_BRANCH,
                       auto_create_topics=False):
        self.kafka = KafkaService(
            self.test_context,
            self.num_brokers,
            self.zk,
            security_protocol=security_protocol,
            interbroker_security_protocol=security_protocol,
            topics=self.topics,
            version=broker_version,
            server_prop_overides=[[
                "auto.create.topics.enable",
                str(auto_create_topics)
            ]])
        if timestamp_type is not None:
            for node in self.kafka.nodes:
                node.config[
                    config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type

        self.cc = ConnectDistributedService(
            self.test_context, 3, self.kafka,
            [self.INPUT_FILE, self.OUTPUT_FILE])
        self.cc.log_level = "DEBUG"

        self.zk.start()
        self.kafka.start()
Exemplo n.º 6
0
    def test_version_probing_upgrade(self):
        """
        Starts 3 KafkaStreams instances, and upgrades one-by-one to "future version"
        """

        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()

        self.kafka = KafkaService(self.test_context,
                                  num_nodes=1,
                                  zk=self.zk,
                                  topics=self.topics)
        self.kafka.start()

        self.driver = StreamsSmokeTestDriverService(self.test_context,
                                                    self.kafka)
        self.driver.disable_auto_terminate()
        self.processor1 = StreamsUpgradeTestJobRunnerService(
            self.test_context, self.kafka)
        self.processor2 = StreamsUpgradeTestJobRunnerService(
            self.test_context, self.kafka)
        self.processor3 = StreamsUpgradeTestJobRunnerService(
            self.test_context, self.kafka)

        self.driver.start()
        self.start_all_nodes_with("")  # run with TRUNK

        self.processors = [self.processor1, self.processor2, self.processor3]
        self.old_processors = [
            self.processor1, self.processor2, self.processor3
        ]
        self.upgraded_processors = []

        counter = 1
        current_generation = 3

        random.seed()
        random.shuffle(self.processors)

        for p in self.processors:
            p.CLEAN_NODE_ENABLED = False
            current_generation = self.do_rolling_bounce(
                p, counter, current_generation)
            counter = counter + 1

        # shutdown
        self.driver.stop()
        self.driver.wait()

        random.shuffle(self.processors)
        for p in self.processors:
            node = p.node
            with node.account.monitor_log(p.STDOUT_FILE) as monitor:
                p.stop()
                monitor.wait_until(
                    "UPGRADE-TEST-CLIENT-CLOSED",
                    timeout_sec=60,
                    err_msg="Never saw output 'UPGRADE-TEST-CLIENT-CLOSED' on"
                    + str(node.account))
        self.driver.stop()
Exemplo n.º 7
0
    def setup_system(self, start_processor=True):
        # Setup phase
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()

        self.kafka = KafkaService(self.test_context,
                                  num_nodes=self.replication,
                                  zk=self.zk,
                                  topics=self.topics)
        self.kafka.start()

        # allow some time for topics to be created
        wait_until(lambda: self.confirm_topics_on_all_brokers(
            set(self.topics.keys())),
                   timeout_sec=60,
                   err_msg="Broker did not create all topics in 60 seconds ")

        # Start test harness
        self.driver = StreamsSmokeTestDriverService(self.test_context,
                                                    self.kafka)
        self.processor1 = StreamsSmokeTestJobRunnerService(
            self.test_context, self.kafka)

        self.driver.start()

        if (start_processor):
            self.processor1.start()
Exemplo n.º 8
0
    def test_compatibility(self, producer_version, consumer_version, compression_types, new_consumer=True, timestamp_type=None):

        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
        for node in self.kafka.nodes:
            if timestamp_type is not None:
                node.config[config_property.MESSAGE_TIMESTAMP_TYPE] = timestamp_type
        self.kafka.start()
         
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
                                           self.topic, throughput=self.producer_throughput,
                                           message_validator=is_int,
                                           compression_types=compression_types,
                                           version=KafkaVersion(producer_version))

        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
                                        self.topic, consumer_timeout_ms=30000, new_consumer=new_consumer,
                                        message_validator=is_int, version=KafkaVersion(consumer_version))

        self.run_produce_consume_validate(lambda: wait_until(
            lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
            timeout_sec=120, backoff_sec=1,
            err_msg="Producer did not produce all messages in reasonable amount of time"))
Exemplo n.º 9
0
    def __init__(self, test_context):
        super(TestBounce, self).__init__(test_context)

        quorum_size_arg_name = 'quorum_size'
        default_quorum_size = 1
        quorum_size = default_quorum_size if not test_context.injected_args else test_context.injected_args.get(
            quorum_size_arg_name, default_quorum_size)
        if quorum_size < 1:
            raise Exception("Illegal %s value provided for the test: %s" %
                            (quorum_size_arg_name, quorum_size))
        self.topic = "topic"
        self.zk = ZookeeperService(test_context,
                                   num_nodes=quorum_size) if quorum.for_test(
                                       test_context) == quorum.zk else None
        num_kafka_nodes = quorum_size if quorum.for_test(
            test_context) == quorum.colocated_kraft else 1
        self.kafka = KafkaService(
            test_context,
            num_nodes=num_kafka_nodes,
            zk=self.zk,
            topics={self.topic: {
                "partitions": 1,
                "replication-factor": 1
            }},
            controller_num_nodes_override=quorum_size)
        self.num_messages = 1000
    def __init__(self, test_context):
        super(StreamsBrokerCompatibility,
              self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=1,
                                  zk=self.zk,
                                  topics={
                                      self.input: {
                                          'partitions': 1,
                                          'replication-factor': 1
                                      },
                                      self.output: {
                                          'partitions': 1,
                                          'replication-factor': 1
                                      }
                                  })

        self.processor = StreamsBrokerCompatibilityService(
            self.test_context, self.kafka)

        self.consumer = VerifiableConsumer(
            test_context, 1, self.kafka, self.output,
            "stream-broker-compatibility-verify-consumer")
Exemplo n.º 11
0
 def __init__(self, test_context):
     """:type test_context: ducktape.tests.test.TestContext"""
     super(RoundTripFaultTest, self).__init__(test_context)
     self.zk = ZookeeperService(test_context, num_nodes=3)
     self.kafka = KafkaService(test_context, num_nodes=4, zk=self.zk)
     self.workload_service = RoundTripWorkloadService(
         test_context, self.kafka)
     self.trogdor = TrogdorService(
         context=self.test_context,
         client_services=[self.zk, self.kafka, self.workload_service])
     topic_name = "round_trip_topic%d" % RoundTripFaultTest.topic_name_index
     RoundTripFaultTest.topic_name_index = RoundTripFaultTest.topic_name_index + 1
     active_topics = {
         topic_name: {
             "partitionAssignments": {
                 "0": [0, 1, 2]
             }
         }
     }
     self.round_trip_spec = RoundTripWorkloadSpec(
         0,
         TaskSpec.MAX_DURATION_MS,
         self.workload_service.client_node,
         self.workload_service.bootstrap_servers,
         target_messages_per_sec=10000,
         max_messages=100000,
         active_topics=active_topics)
Exemplo n.º 12
0
    def __init__(self, test_context):
        super(FetchFromFollowerTest, self).__init__(test_context=test_context)
        self.jmx_tool = JmxTool(test_context, jmx_poll_ms=100)
        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  topics={
                                      self.topic: {
                                          "partitions": 1,
                                          "replication-factor": 3,
                                          "configs": {
                                              "min.insync.replicas": 1
                                          }
                                      },
                                  },
                                  server_prop_overides=[[
                                      "replica.selector.class",
                                      self.RACK_AWARE_REPLICA_SELECTOR
                                  ]],
                                  per_node_server_prop_overrides={
                                      1: [("broker.rack", "rack-a")],
                                      2: [("broker.rack", "rack-b")],
                                      3: [("broker.rack", "rack-c")]
                                  })

        self.producer_throughput = 1000
        self.num_producers = 1
        self.num_consumers = 1
Exemplo n.º 13
0
    def test_compatibility(self, producer_version, consumer_version):
        """ This tests performs the following checks:
        The workload is a mix of 0.9.x and 0.10.x producers and consumers 
        that produce to and consume from a 0.10.x cluster
        1. initially the topic is using message format 0.9.0
        2. change the message format version for topic to 0.10.0 on the fly.
        3. change the message format version for topic back to 0.9.0 on the fly.
        - The producers and consumers should not have any issue.
        - Note that for 0.9.x consumers/producers we only do steps 1 and 2
        """
        self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=TRUNK, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    'configs': {"min.insync.replicas": 2}}})
       
        self.kafka.start()
        self.logger.info("First format change to 0.9.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
        self.produce_and_consume(producer_version, consumer_version, "group1")

        self.logger.info("Second format change to 0.10.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
        self.produce_and_consume(producer_version, consumer_version, "group2")

        if producer_version == str(TRUNK) and consumer_version == str(TRUNK):
            self.logger.info("Third format change back to 0.9.0")
            self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
            self.produce_and_consume(producer_version, consumer_version, "group3")
Exemplo n.º 14
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(LogDirFailureTest, self).__init__(test_context=test_context)

        self.topic1 = "test_topic_1"
        self.topic2 = "test_topic_2"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  topics={
                                      self.topic1: {"partitions": 1, "replication-factor": 3, "configs": {"min.insync.replicas": 2}},
                                      self.topic2: {"partitions": 1, "replication-factor": 3, "configs": {"min.insync.replicas": 1}}
                                  },
                                  # Set log.roll.ms to 3 seconds so that broker will detect disk error sooner when it creates log segment
                                  # Otherwise broker will still be able to read/write the log file even if the log directory is inaccessible.
                                  server_prop_overides=[
                                      [config_property.LOG_FLUSH_INTERVAL_MESSAGE, "5"],
                                      [config_property.REPLICA_HIGHWATERMARK_CHECKPOINT_INTERVAL_MS, "60000"],
                                      [config_property.LOG_ROLL_TIME_MS, "3000"]
                                  ])

        self.producer_throughput = 1000
        self.num_producers = 1
        self.num_consumers = 1
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(GroupModeTransactionsTest,
              self).__init__(test_context=test_context)

        self.input_topic = "input-topic"
        self.output_topic = "output-topic"

        self.num_brokers = 3

        # Test parameters
        self.num_input_partitions = 9
        self.num_output_partitions = 9
        self.num_copiers = 3
        self.num_seed_messages = 100000
        self.transaction_size = 750
        # The transaction timeout should be lower than the progress timeout, but at
        # least as high as the request timeout (which is 30s by default). When the
        # client is hard-bounced, progress may depend on the previous transaction
        # being aborted. When the broker is hard-bounced, we may have to wait as
        # long as the request timeout to get a `Produce` response and we do not
        # want the coordinator timing out the transaction.
        self.transaction_timeout = 40000
        self.progress_timeout_sec = 60
        self.consumer_group = "grouped-transactions-test-consumer-group"

        self.zk = ZookeeperService(test_context,
                                   num_nodes=1) if quorum.for_test(
                                       test_context) == quorum.zk else None
        self.kafka = KafkaService(test_context,
                                  num_nodes=self.num_brokers,
                                  zk=self.zk,
                                  controller_num_nodes_override=1)
Exemplo n.º 16
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ClientCompatibilityFeaturesTest,
              self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context,
                                   num_nodes=3) if quorum.for_test(
                                       test_context) == quorum.zk else None

        # Generate a unique topic name
        topic_name = "client_compat_features_topic_%d%d" % (int(
            time.time()), randint(0, 2147483647))
        self.topics = {
            topic_name: {
                "partitions":
                1,  # Use only one partition to avoid worrying about ordering
                "replication-factor": 3
            }
        }
        self.kafka = KafkaService(test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  topics=self.topics)
        # Always use the latest version of org.apache.kafka.tools.ClientCompatibilityTest
        # so store away the path to the DEV version before we set the Kafka version
        self.dev_script_path = self.kafka.path.script("kafka-run-class.sh",
                                                      self.kafka.nodes[0])
Exemplo n.º 17
0
    def __init__(self, test_context):
        super(StreamsOptimizedTest, self).__init__(test_context)
        self.topics = {
            self.input_topic: {
                'partitions': 6
            },
            self.aggregation_topic: {
                'partitions': 6
            },
            self.reduce_topic: {
                'partitions': 6
            },
            self.join_topic: {
                'partitions': 6
            }
        }

        self.zookeeper = ZookeeperService(self.test_context, num_nodes=1)
        self.kafka = KafkaService(self.test_context,
                                  num_nodes=3,
                                  zk=self.zookeeper,
                                  topics=self.topics)

        self.producer = VerifiableProducer(self.test_context,
                                           1,
                                           self.kafka,
                                           self.input_topic,
                                           throughput=1000,
                                           acks=1)
    def test_simple_benchmark(self, test, scale):
        """
        Run simple Kafka Streams benchmark
        """
        self.driver = [None] * (scale + 1)
        node = [None] * (scale)
        data = [None] * (scale)

        #############
        # SETUP PHASE
        #############
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()
        self.kafka = KafkaService(self.test_context, num_nodes=scale, zk=self.zk, version=DEV_BRANCH, topics={
            'simpleBenchmarkSourceTopic' : { 'partitions': scale, 'replication-factor': self.replication },
            'countTopic' : { 'partitions': scale, 'replication-factor': self.replication },
            'simpleBenchmarkSinkTopic' : { 'partitions': scale, 'replication-factor': self.replication },
            'joinSourceTopic1KStreamKStream' : { 'partitions': scale, 'replication-factor': self.replication },
            'joinSourceTopic2KStreamKStream' : { 'partitions': scale, 'replication-factor': self.replication },
            'joinSourceTopic1KStreamKTable' : { 'partitions': scale, 'replication-factor': self.replication },
            'joinSourceTopic2KStreamKTable' : { 'partitions': scale, 'replication-factor': self.replication },
            'joinSourceTopic1KTableKTable' : { 'partitions': scale, 'replication-factor': self.replication },
            'joinSourceTopic2KTableKTable' : { 'partitions': scale, 'replication-factor': self.replication }
        })
        self.kafka.start()
 
        ################
        # LOAD PHASE
        ################
        self.load_driver = StreamsSimpleBenchmarkService(self.test_context, self.kafka,
                                                         self.num_records * scale, "true", test)
        self.load_driver.start()
        self.load_driver.wait()
        self.load_driver.stop()

        ################
        # RUN PHASE
        ################
        for num in range(0, scale):
            self.driver[num] = StreamsSimpleBenchmarkService(self.test_context, self.kafka,
                                                             self.num_records/(scale), "false", test)
            self.driver[num].start()

        #######################
        # STOP + COLLECT PHASE
        #######################
        for num in range(0, scale):    
            self.driver[num].wait()    
            self.driver[num].stop()
            node[num] = self.driver[num].node
            node[num].account.ssh("grep Performance %s" % self.driver[num].STDOUT_FILE, allow_fail=False)
            data[num] = self.driver[num].collect_data(node[num], "" )
                

        final = {}
        for num in range(0, scale):
            for key in data[num]:
                final[key + str(num)] = data[num][key]
        
        return final
Exemplo n.º 19
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(TransactionsTest, self).__init__(test_context=test_context)

        self.input_topic = "input-topic"
        self.output_topic = "output-topic"

        self.num_brokers = 3

        # Test parameters
        self.num_input_partitions = 2
        self.num_output_partitions = 3
        self.num_seed_messages = 100000
        self.transaction_size = 750
        # The timeout of transaction should be lower than the timeout of verification. The transactional message sent by
        # client may be not correctly completed in hard_bounce mode. The pending transaction (unstable offset) stored by
        # broker obstructs TransactionMessageCopier from getting offset of partition which is used to calculate
        # remaining messages after restarting.
        self.transaction_timeout = 5000
        self.consumer_group = "transactions-test-consumer-group"

        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=self.num_brokers,
                                  zk=self.zk)
Exemplo n.º 20
0
 def start_kafka(self, security_protocol, interbroker_security_protocol):
     self.kafka = KafkaService(
         self.test_context, self.num_brokers,
         self.zk, security_protocol=security_protocol,
         interbroker_security_protocol=interbroker_security_protocol, topics=self.topics,
         controller_num_nodes_override=self.num_zk)
     self.kafka.start()
Exemplo n.º 21
0
    def test_upgrade_brokers(self, from_version, to_version):
        """
        Start a smoke test client then perform rolling upgrades on the broker. 
        """
        # Setup phase
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()

        # number of nodes needs to be >= 3 for the smoke test
        self.kafka = KafkaService(self.test_context, num_nodes=3,
                                  zk=self.zk, version=KafkaVersion(from_version), topics=self.topics)
        self.kafka.start()
        
        # allow some time for topics to be created
        time.sleep(10)
        
        self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
        self.processor1 = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka)

        
        self.driver.start()
        self.processor1.start()
        time.sleep(15)

        self.perform_broker_upgrade(to_version)

        time.sleep(15)
        self.driver.wait()
        self.driver.stop()

        self.processor1.stop()

        node = self.driver.node
        node.account.ssh("grep ALL-RECORDS-DELIVERED %s" % self.driver.STDOUT_FILE, allow_fail=False)
        self.processor1.node.account.ssh_capture("grep SMOKE-TEST-CLIENT-CLOSED %s" % self.processor1.STDOUT_FILE, allow_fail=False)
Exemplo n.º 22
0
    def __init__(self, test_context):
        super(TestMirrorMakerService, self).__init__(test_context)

        self.topic = "topic"
        self.source_zk = ZookeeperService(test_context, num_nodes=1)
        self.target_zk = ZookeeperService(test_context, num_nodes=1)

        self.source_kafka = KafkaService(
            test_context,
            num_nodes=1,
            zk=self.source_zk,
            topics={self.topic: {
                "partitions": 1,
                "replication-factor": 1
            }})
        self.target_kafka = KafkaService(
            test_context,
            num_nodes=1,
            zk=self.target_zk,
            topics={self.topic: {
                "partitions": 1,
                "replication-factor": 1
            }})

        self.num_messages = 1000
        # This will produce to source kafka cluster
        self.producer = VerifiableProducer(test_context,
                                           num_nodes=1,
                                           kafka=self.source_kafka,
                                           topic=self.topic,
                                           max_messages=self.num_messages,
                                           throughput=1000)

        # Use a regex whitelist to check that the start command is well-formed in this case
        self.mirror_maker = MirrorMaker(test_context,
                                        num_nodes=1,
                                        source=self.source_kafka,
                                        target=self.target_kafka,
                                        whitelist=".*",
                                        consumer_timeout_ms=2000)

        # This will consume from target kafka cluster
        self.consumer = ConsoleConsumer(test_context,
                                        num_nodes=1,
                                        kafka=self.target_kafka,
                                        topic=self.topic,
                                        consumer_timeout_ms=1000)
Exemplo n.º 23
0
    def test_metadata_upgrade(self, from_version, to_version):
        """
        Starts 3 KafkaStreams instances with version <from_version> and upgrades one-by-one to <to_version>
        """

        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()

        self.kafka = KafkaService(self.test_context,
                                  num_nodes=1,
                                  zk=self.zk,
                                  topics=self.topics)
        self.kafka.start()

        self.driver = StreamsSmokeTestDriverService(self.test_context,
                                                    self.kafka)
        self.driver.disable_auto_terminate()
        self.processor1 = StreamsUpgradeTestJobRunnerService(
            self.test_context, self.kafka)
        self.processor2 = StreamsUpgradeTestJobRunnerService(
            self.test_context, self.kafka)
        self.processor3 = StreamsUpgradeTestJobRunnerService(
            self.test_context, self.kafka)

        self.driver.start()
        self.start_all_nodes_with(from_version)

        self.processors = [self.processor1, self.processor2, self.processor3]

        counter = 1
        random.seed()

        # first rolling bounce
        random.shuffle(self.processors)
        for p in self.processors:
            p.CLEAN_NODE_ENABLED = False
            self.do_stop_start_bounce(p, from_version[:-2], to_version,
                                      counter)
            counter = counter + 1

        # second rolling bounce
        random.shuffle(self.processors)
        for p in self.processors:
            self.do_stop_start_bounce(p, None, to_version, counter)
            counter = counter + 1

        # shutdown
        self.driver.stop()

        random.shuffle(self.processors)
        for p in self.processors:
            node = p.node
            with node.account.monitor_log(p.STDOUT_FILE) as monitor:
                p.stop()
                monitor.wait_until(
                    "UPGRADE-TEST-CLIENT-CLOSED",
                    timeout_sec=60,
                    err_msg="Never saw output 'UPGRADE-TEST-CLIENT-CLOSED' on"
                    + str(node.account))
    def __init__(self, test_context):
        super(ConsoleConsumerTest, self).__init__(test_context)

        self.topic = "topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk,
                                  topics={self.topic: {"partitions": 1, "replication-factor": 1}})
        self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic, new_consumer=False)
Exemplo n.º 25
0
 def start_kafka(self, security_protocol, interbroker_security_protocol, version, tls_version=None):
     self.kafka = KafkaService(
         self.test_context, self.num_brokers,
         self.zk, security_protocol=security_protocol,
         interbroker_security_protocol=interbroker_security_protocol, topics=self.topics,
         version=version, tls_version=tls_version)
     self.kafka.log_level = "INFO"  # We don't DEBUG logging here
     self.kafka.start()
Exemplo n.º 26
0
    def test_compatibility(self,
                           producer_version,
                           consumer_version,
                           metadata_quorum=quorum.zk):
        """ This tests performs the following checks:
        The workload is a mix of 0.9.x, 0.10.x and 0.11.x producers and consumers
        that produce to and consume from a DEV_BRANCH cluster
        1. initially the topic is using message format 0.9.0
        2. change the message format version for topic to 0.10.0 on the fly.
        3. change the message format version for topic to 0.11.0 on the fly.
        4. change the message format version for topic back to 0.10.0 on the fly (only if the client version is 0.11.0 or newer)
        - The producers and consumers should not have any issue.

        Note regarding step number 4. Downgrading the message format version is generally unsupported as it breaks
        older clients. More concretely, if we downgrade a topic from 0.11.0 to 0.10.0 after it contains messages with
        version 0.11.0, we will return the 0.11.0 messages without down conversion due to an optimisation in the
        handling of fetch requests. This will break any consumer that doesn't support 0.11.0. So, in practice, step 4
        is similar to step 2 and it didn't seem worth it to increase the cluster size to in order to add a step 5 that
        would change the message format version for the topic back to 0.9.0.0.
        """
        self.kafka = KafkaService(self.test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  version=DEV_BRANCH,
                                  topics={
                                      self.topic: {
                                          "partitions": 3,
                                          "replication-factor": 3,
                                          'configs': {
                                              "min.insync.replicas": 2
                                          }
                                      }
                                  },
                                  controller_num_nodes_override=1)
        for node in self.kafka.nodes:
            node.config[config_property.INTER_BROKER_PROTOCOL_VERSION] = str(
                V_2_8_0)  # required for writing old message formats

        self.kafka.start()
        self.logger.info("First format change to 0.9.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
        self.produce_and_consume(producer_version, consumer_version, "group1")

        self.logger.info("Second format change to 0.10.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
        self.produce_and_consume(producer_version, consumer_version, "group2")

        self.logger.info("Third format change to 0.11.0")
        self.kafka.alter_message_format(self.topic, str(LATEST_0_11))
        self.produce_and_consume(producer_version, consumer_version, "group3")

        if producer_version == str(DEV_BRANCH) and consumer_version == str(
                DEV_BRANCH):
            self.logger.info("Fourth format change back to 0.10.0")
            self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
            self.produce_and_consume(producer_version, consumer_version,
                                     "group4")
Exemplo n.º 27
0
 def start_kafka(self, security_protocol, interbroker_security_protocol):
     self.kafka = KafkaService(
         self.test_context,
         self.num_brokers,
         self.zk,
         security_protocol=security_protocol,
         interbroker_security_protocol=interbroker_security_protocol,
         topics=self.topics)
     self.kafka.start()
Exemplo n.º 28
0
    def test_0_8_2(self):
        """Test kafka service node-versioning api - verify that we can bring up a single-node 0.8.2.X cluster."""
        self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk,
                                  topics={self.topic: {"partitions": 1, "replication-factor": 1}})
        node = self.kafka.nodes[0]
        node.version = LATEST_0_8_2
        self.kafka.start()

        assert is_version(node, [LATEST_0_8_2])
Exemplo n.º 29
0
    def __init__(self, test_context):
        super(ZooKeeperAuthorizerTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        # setup ZooKeeper even with KRaft
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk,
                                  topics={self.topic: {"partitions": 1, "replication-factor": 1}},
                                  controller_num_nodes_override=1, allow_zk_with_kraft=True)
Exemplo n.º 30
0
 def __init__(self, test_context):
     """:type test_context: ducktape.tests.test.TestContext"""
     super(ProduceBenchTest, self).__init__(test_context)
     self.zk = ZookeeperService(test_context, num_nodes=3)
     self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk)
     self.workload_service = ProduceBenchWorkloadService(
         test_context, self.kafka)
     self.trogdor = TrogdorService(
         context=self.test_context,
         client_services=[self.kafka, self.workload_service])