示例#1
0
    def setup_node(self, node):
        if self.has_ssl:
            self.setup_ssl(node)

        if self.has_sasl:
            self.setup_sasl(node)

        if java_version(node) <= 11 and self.properties.get('tls.version') == 'TLSv1.3':
            self.properties.update({'tls.version': 'TLSv1.2'})
示例#2
0
def fix_opts_for_new_jvm(node):
    # Startup scripts for early versions of Kafka contains options
    # that not supported on latest versions of JVM like -XX:+PrintGCDateStamps or -XX:UseParNewGC.
    # When system test run on JVM that doesn't support these options
    # we should setup environment variables with correct options.
    java_ver = java_version(node)
    if java_ver <= 9:
        return ""

    cmd = ""
    if node.version == LATEST_0_8_2 or node.version == LATEST_0_9 or node.version == LATEST_0_10_0 or node.version == LATEST_0_10_1 or node.version == LATEST_0_10_2 or node.version == LATEST_0_11_0 or node.version == LATEST_1_0:
        cmd += "export KAFKA_GC_LOG_OPTS=\"-Xlog:gc*:file=kafka-gc.log:time,tags:filecount=10,filesize=102400\"; "
        cmd += "export KAFKA_JVM_PERFORMANCE_OPTS=\"-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true\"; "
    return cmd
示例#3
0
    def test_upgrade(self,
                     from_kafka_version,
                     to_message_format_version,
                     compression_types,
                     security_protocol="PLAINTEXT"):
        """Test upgrade of Kafka broker cluster from various versions to the current version

        from_kafka_version is a Kafka version to upgrade from

        If to_message_format_version is None, it means that we will upgrade to default (latest)
        message format version. It is possible to upgrade to 0.10 brokers but still use message
        format version 0.9

        - Start 3 node broker cluster on version 'from_kafka_version'
        - Start producer and consumer in the background
        - Perform two-phase rolling upgrade
            - First phase: upgrade brokers to 0.10 with inter.broker.protocol.version set to
            from_kafka_version and log.message.format.version set to from_kafka_version
            - Second phase: remove inter.broker.protocol.version config with rolling bounce; if
            to_message_format_version is set to 0.9, set log.message.format.version to
            to_message_format_version, otherwise remove log.message.format.version config
        - Finally, validate that every message acked by the producer was consumed by the consumer
        """
        self.zk = ZookeeperService(self.test_context,
                                   num_nodes=1,
                                   version=KafkaVersion(from_kafka_version))
        self.kafka = KafkaService(self.test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  version=KafkaVersion(from_kafka_version),
                                  topics={
                                      self.topic: {
                                          "partitions": self.partitions,
                                          "replication-factor":
                                          self.replication_factor,
                                          'configs': {
                                              "min.insync.replicas": 2
                                          }
                                      }
                                  })
        self.kafka.security_protocol = security_protocol
        self.kafka.interbroker_security_protocol = security_protocol

        jdk_version = java_version(self.kafka.nodes[0])

        if jdk_version > 9 and from_kafka_version in new_jdk_not_supported:
            self.logger.info("Test ignored! Kafka " + from_kafka_version +
                             " not support jdk " + str(jdk_version))
            return

        self.zk.start()
        self.kafka.start()

        old_id = self.kafka.topic_id(self.topic)

        self.producer = VerifiableProducer(
            self.test_context,
            self.num_producers,
            self.kafka,
            self.topic,
            throughput=self.producer_throughput,
            message_validator=is_int,
            compression_types=compression_types,
            version=KafkaVersion(from_kafka_version))

        if from_kafka_version <= LATEST_0_10_0:
            assert self.kafka.cluster_id() is None

        # With older message formats before KIP-101, message loss may occur due to truncation
        # after leader change. Tolerate limited data loss for this case to avoid transient test failures.
        self.may_truncate_acked_records = False if from_kafka_version >= V_0_11_0_0 else True

        new_consumer = from_kafka_version.consumer_supports_bootstrap_server()
        # TODO - reduce the timeout
        self.consumer = ConsoleConsumer(
            self.test_context,
            self.num_consumers,
            self.kafka,
            self.topic,
            new_consumer=new_consumer,
            consumer_timeout_ms=30000,
            message_validator=is_int,
            version=KafkaVersion(from_kafka_version))

        self.run_produce_consume_validate(
            core_test_action=lambda: self.perform_upgrade(
                from_kafka_version, to_message_format_version))

        cluster_id = self.kafka.cluster_id()
        assert cluster_id is not None
        assert len(cluster_id) == 22

        assert self.kafka.all_nodes_support_topic_ids()
        new_id = self.kafka.topic_id(self.topic)
        if from_kafka_version >= V_2_8_0:
            assert old_id is not None
            assert new_id is not None
            assert old_id == new_id
        else:
            assert old_id is None
            assert new_id is not None

        assert self.kafka.check_protocol_errors(self)