Exemplo n.º 1
0
    def __init__(self, test_context):
        super(StreamsBrokerCompatibility,
              self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=1,
                                  zk=self.zk,
                                  topics={
                                      self.input: {
                                          'partitions': 1,
                                          'replication-factor': 1
                                      },
                                      self.output: {
                                          'partitions': 1,
                                          'replication-factor': 1
                                      }
                                  })

        self.processor = StreamsBrokerCompatibilityService(
            self.test_context, self.kafka)

        self.consumer = VerifiableConsumer(
            test_context, 1, self.kafka, self.output,
            "stream-broker-compatibility-verify-consumer")
Exemplo n.º 2
0
 def create_consumer(self, num_nodes=1, group_id="test_group", **kwargs):
     self.consumer = VerifiableConsumer(self.test_context,
                                        num_nodes=num_nodes,
                                        kafka=self.kafka,
                                        topic=self.topic,
                                        group_id=group_id,
                                        on_record_consumed=self.on_record_consumed,
                                        **kwargs)
 def get_consumer(self, num_messages):
     return VerifiableConsumer(self.test_context,
                               1,
                               self.kafka,
                               self.outputTopic,
                               "stream-broker-resilience-verify-consumer",
                               max_messages=num_messages)
Exemplo n.º 4
0
 def get_consumer(self, client_id, topic, num_messages):
     return VerifiableConsumer(self.test_context,
                               1,
                               self.kafka,
                               topic,
                               client_id,
                               max_messages=num_messages)
Exemplo n.º 5
0
 def create_consumer(self, num_nodes=1, group_id="test_group", **kwargs):
     self.consumer = VerifiableConsumer(self.test_context,
                                        num_nodes=num_nodes,
                                        kafka=self.kafka,
                                        topic=self.topic,
                                        group_id=group_id,
                                        on_record_consumed=self.on_record_consumed,
                                        **kwargs)
Exemplo n.º 6
0
 def _setup_consumer(self, topic, enable_autocommit=False):
     return VerifiableConsumer(self.test_context,
                               self.num_consumers,
                               self.kafka,
                               topic,
                               self.GROUP_ID,
                               session_timeout=self.session_timeout,
                               enable_autocommit=enable_autocommit)
 def __init__(self, test_context):
     super(StreamsBrokerCompatibility, self).__init__(test_context=test_context)
     self.zk = ZookeeperService(test_context, num_nodes=1)
     self.kafka = KafkaService(test_context,
                               num_nodes=1,
                               zk=self.zk,
                               topics={
                                   self.input: {'partitions': 1, 'replication-factor': 1},
                                   self.output: {'partitions': 1, 'replication-factor': 1}
                               },
                               server_prop_overrides=[
                                   ["transaction.state.log.replication.factor", "1"],
                                   ["transaction.state.log.min.isr", "1"]
                               ])
     self.consumer = VerifiableConsumer(test_context,
                                        1,
                                        self.kafka,
                                        self.output,
                                        "stream-broker-compatibility-verify-consumer")
Exemplo n.º 8
0
 def setup_consumer(
         self,
         topic,
         enable_autocommit=False,
         assignment_strategy="org.apache.kafka.clients.consumer.RangeAssignor"
 ):
     return VerifiableConsumer(self.test_context,
                               self.num_consumers,
                               self.kafka,
                               topic,
                               self.group_id,
                               session_timeout_sec=self.session_timeout_sec,
                               assignment_strategy=assignment_strategy,
                               enable_autocommit=enable_autocommit)
 def __init__(self, test_context):
     super(StreamsBrokerCompatibility, self).__init__(test_context=test_context)
     self.zk = ZookeeperService(test_context, num_nodes=1)
     self.kafka = KafkaService(test_context,
                               num_nodes=1,
                               zk=self.zk,
                               topics={
                                   self.input: {'partitions': 1, 'replication-factor': 1},
                                   self.output: {'partitions': 1, 'replication-factor': 1}
                               })
     self.consumer = VerifiableConsumer(test_context,
                                        1,
                                        self.kafka,
                                        self.output,
                                        "stream-broker-compatibility-verify-consumer")
Exemplo n.º 10
0
 def setup_consumer(
         self,
         topic,
         static_membership=False,
         enable_autocommit=False,
         assignment_strategy="org.apache.kafka.clients.consumer.RangeAssignor",
         **kwargs):
     return VerifiableConsumer(self.test_context,
                               self.num_consumers,
                               self.kafka,
                               topic,
                               self.group_id,
                               static_membership=static_membership,
                               session_timeout_sec=self.session_timeout_sec,
                               assignment_strategy=assignment_strategy,
                               enable_autocommit=enable_autocommit,
                               log_level="TRACE",
                               **kwargs)
Exemplo n.º 11
0
class StreamsBrokerCompatibility(Test):
    """
    These tests validates that
    - Streams 0.11+ w/ EOS fails fast for older brokers 0.10.2 and 0.10.1
    - Streams 0.11+ w/o EOS works for older brokers 0.10.2 and 0.10.1
    - Streams fails fast for 0.10.0 brokers
    - Streams times-out for pre-0.10.0 brokers
    """

    input = "brokerCompatibilitySourceTopic"
    output = "brokerCompatibilitySinkTopic"

    def __init__(self, test_context):
        super(StreamsBrokerCompatibility,
              self).__init__(test_context=test_context)
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=1,
                                  zk=self.zk,
                                  topics={
                                      self.input: {
                                          'partitions': 1,
                                          'replication-factor': 1
                                      },
                                      self.output: {
                                          'partitions': 1,
                                          'replication-factor': 1
                                      }
                                  })
        self.consumer = VerifiableConsumer(
            test_context, 1, self.kafka, self.output,
            "stream-broker-compatibility-verify-consumer")

    def setUp(self):
        self.zk.start()

    @parametrize(broker_version=str(LATEST_0_10_2))
    @parametrize(broker_version=str(LATEST_0_10_1))
    def test_fail_fast_on_incompatible_brokers_if_eos_enabled(
            self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context,
                                                      self.kafka, True)
        processor.start()

        processor.node.account.ssh(processor.start_cmd(processor.node))
        with processor.node.account.monitor_log(
                processor.STDERR_FILE) as monitor:
            monitor.wait_until(
                'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException: The broker does not support LIST_OFFSETS ',
                timeout_sec=60,
                err_msg=
                "Never saw 'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException: The broker does not support LIST_OFFSETS ' error message "
                + str(processor.node.account))

        self.kafka.stop()

    @parametrize(broker_version=str(LATEST_0_11_0))
    @parametrize(broker_version=str(LATEST_0_10_2))
    @parametrize(broker_version=str(LATEST_0_10_1))
    def test_compatible_brokers_eos_disabled(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context,
                                                      self.kafka, False)
        processor.start()

        self.consumer.start()

        processor.wait()

        wait_until(
            lambda: self.consumer.total_consumed() > 0,
            timeout_sec=30,
            err_msg=
            "Did expect to read a message but got none within 30 seconds.")

        self.consumer.stop()
        self.kafka.stop()

    @parametrize(broker_version=str(LATEST_0_10_0))
    def test_fail_fast_on_incompatible_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context,
                                                      self.kafka, False)
        processor.start()

        processor.node.account.ssh(processor.start_cmd(processor.node))
        with processor.node.account.monitor_log(
                processor.STDERR_FILE) as monitor:
            monitor.wait_until(
                'FATAL: An unexpected exception org.apache.kafka.streams.errors.StreamsException: Could not create internal topics.',
                timeout_sec=60,
                err_msg=
                "Never saw 'FATAL: An unexpected exception org.apache.kafka.streams.errors.StreamsException: Could not create internal topics.' error message "
                + str(processor.node.account))

        self.kafka.stop()

    @ignore
    @parametrize(broker_version=str(LATEST_0_9))
    @parametrize(broker_version=str(LATEST_0_8_2))
    def test_timeout_on_pre_010_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context,
                                                      self.kafka, False)
        processor.start()

        processor.node.account.ssh(processor.start_cmd(processor.node))
        with processor.node.account.monitor_log(
                processor.STDERR_FILE) as monitor:
            monitor.wait_until(
                'Exception in thread "main" org.apache.kafka.streams.errors.BrokerNotFoundException: Could not find any available broker.',
                timeout_sec=60,
                err_msg="Never saw 'no available brokers' error message " +
                str(processor.node.account))

        self.kafka.stop()
Exemplo n.º 12
0
 def _setup_consumer(self, topic):
     return VerifiableConsumer(self.test_context, self.num_consumers, self.kafka,
                               topic, self.GROUP_ID, session_timeout=self.session_timeout)
Exemplo n.º 13
0
class StreamsBrokerCompatibility(Test):
    """
    These tests validate that Streams v0.10.2+ can connect to older brokers v0.10.1+
    and that Streams fails fast for pre-0.10.0 brokers
    """

    input = "brokerCompatibilitySourceTopic"
    output = "brokerCompatibilitySinkTopic"

    def __init__(self, test_context):
        super(StreamsBrokerCompatibility,
              self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=1,
                                  zk=self.zk,
                                  topics={
                                      self.input: {
                                          'partitions': 1,
                                          'replication-factor': 1
                                      },
                                      self.output: {
                                          'partitions': 1,
                                          'replication-factor': 1
                                      }
                                  })

        self.processor = StreamsBrokerCompatibilityService(
            self.test_context, self.kafka)

        self.consumer = VerifiableConsumer(
            test_context, 1, self.kafka, self.output,
            "stream-broker-compatibility-verify-consumer")

    def setUp(self):
        self.zk.start()

    @parametrize(broker_version=str(DEV_BRANCH))
    @parametrize(broker_version=str(LATEST_0_10_1))
    def test_compatible_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        self.processor.start()
        self.consumer.start()

        self.processor.wait()

        wait_until(
            lambda: self.consumer.total_consumed() > 0,
            timeout_sec=30,
            err_msg=
            "Did expect to read a message but got none within 30 seconds.")

        self.consumer.stop()
        self.kafka.stop()

    @parametrize(broker_version=str(LATEST_0_10_0))
    def test_fail_fast_on_incompatible_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        self.processor.start()

        self.processor.node.account.ssh(
            self.processor.start_cmd(self.processor.node))
        with self.processor.node.account.monitor_log(
                self.processor.STDERR_FILE) as monitor:
            monitor.wait_until(
                'Exception in thread "main" org.apache.kafka.streams.errors.StreamsException: Kafka Streams requires broker version 0.10.1.x or higher.',
                timeout_sec=60,
                err_msg="Never saw 'incompatible broker' error message " +
                str(self.processor.node.account))

        self.kafka.stop()
Exemplo n.º 14
0
    def rolling_update_test(self):
        """
        Verify rolling updates of partition assignment strategies works correctly. In this
        test, we use a rolling restart to change the group's assignment strategy from "range" 
        to "roundrobin." We verify after every restart that all members are still in the group
        and that the correct assignment strategy was used.
        """

        # initialize the consumer using range assignment
        consumer = VerifiableConsumer(self.test_context, self.num_consumers, self.kafka,
                                      self.TOPIC, self.GROUP_ID, session_timeout=self.session_timeout,
                                      assignment_strategy=self.RANGE)
        consumer.start()
        self._await_all_members(consumer)
        self._verify_range_assignment(consumer)

        # change consumer configuration to prefer round-robin assignment, but still support range assignment
        consumer.assignment_strategy = self.ROUND_ROBIN + "," + self.RANGE

        # restart one of the nodes and verify that we are still using range assignment
        consumer.stop_node(consumer.nodes[0])
        consumer.start_node(consumer.nodes[0])
        self._await_all_members(consumer)
        self._verify_range_assignment(consumer)
        
        # now restart the other node and verify that we have switched to round-robin
        consumer.stop_node(consumer.nodes[1])
        consumer.start_node(consumer.nodes[1])
        self._await_all_members(consumer)
        self._verify_roundrobin_assignment(consumer)

        # if we want, we can now drop support for range assignment
        consumer.assignment_strategy = self.ROUND_ROBIN
        for node in consumer.nodes:
            consumer.stop_node(node)
            consumer.start_node(node)
            self._await_all_members(consumer)
            self._verify_roundrobin_assignment(consumer)
class StreamsBrokerCompatibility(Test):
    """
    These tests validates that
    - Streams works for older brokers 0.11 (or newer)
    - Streams w/ EOS-alpha works for older brokers 0.11 (or newer)
    - Streams w/ EOS-beta works for older brokers 2.5 (or newer)
    - Streams fails fast for older brokers 0.10.0, 0.10.2, and 0.10.1
    - Streams w/ EOS-beta fails fast for older brokers 2.4 or older
    """

    input = "brokerCompatibilitySourceTopic"
    output = "brokerCompatibilitySinkTopic"

    def __init__(self, test_context):
        super(StreamsBrokerCompatibility,
              self).__init__(test_context=test_context)
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(
            test_context,
            num_nodes=1,
            zk=self.zk,
            topics={
                self.input: {
                    'partitions': 1,
                    'replication-factor': 1
                },
                self.output: {
                    'partitions': 1,
                    'replication-factor': 1
                }
            },
            server_prop_overides=[[
                "transaction.state.log.replication.factor", "1"
            ], ["transaction.state.log.min.isr", "1"]])
        self.consumer = VerifiableConsumer(
            test_context, 1, self.kafka, self.output,
            "stream-broker-compatibility-verify-consumer")

    def setUp(self):
        self.zk.start()

    @parametrize(broker_version=str(LATEST_2_4))
    @parametrize(broker_version=str(LATEST_2_3))
    @parametrize(broker_version=str(LATEST_2_2))
    @parametrize(broker_version=str(LATEST_2_1))
    @parametrize(broker_version=str(LATEST_2_0))
    @parametrize(broker_version=str(LATEST_1_1))
    @parametrize(broker_version=str(LATEST_1_0))
    @parametrize(broker_version=str(LATEST_0_11_0))
    def test_compatible_brokers_eos_disabled(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context,
                                                      self.kafka,
                                                      "at_least_once")
        processor.start()

        self.consumer.start()

        processor.wait()

        wait_until(
            lambda: self.consumer.total_consumed() > 0,
            timeout_sec=30,
            err_msg=
            "Did expect to read a message but got none within 30 seconds.")

        self.consumer.stop()
        self.kafka.stop()

    @parametrize(broker_version=str(LATEST_2_5))
    @parametrize(broker_version=str(LATEST_2_4))
    @parametrize(broker_version=str(LATEST_2_3))
    @parametrize(broker_version=str(LATEST_2_2))
    @parametrize(broker_version=str(LATEST_2_1))
    @parametrize(broker_version=str(LATEST_2_0))
    @parametrize(broker_version=str(LATEST_1_1))
    @parametrize(broker_version=str(LATEST_1_0))
    @parametrize(broker_version=str(LATEST_0_11_0))
    def test_compatible_brokers_eos_alpha_enabled(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context,
                                                      self.kafka,
                                                      "exactly_once")
        processor.start()

        self.consumer.start()

        processor.wait()

        wait_until(
            lambda: self.consumer.total_consumed() > 0,
            timeout_sec=30,
            err_msg=
            "Did expect to read a message but got none within 30 seconds.")

        self.consumer.stop()
        self.kafka.stop()

    # TODO enable after 2.5 is released
    # @parametrize(broker_version=str(LATEST_2_5))
    # def test_compatible_brokers_eos_beta_enabled(self, broker_version):
    #     self.kafka.set_version(KafkaVersion(broker_version))
    #     self.kafka.start()
    #
    #     processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, "exactly_once_beta")
    #     processor.start()
    #
    #     self.consumer.start()
    #
    #     processor.wait()
    #
    #     wait_until(lambda: self.consumer.total_consumed() > 0, timeout_sec=30, err_msg="Did expect to read a message but got none within 30 seconds.")
    #
    #     self.consumer.stop()
    #     self.kafka.stop()

    @parametrize(broker_version=str(LATEST_0_10_2))
    @parametrize(broker_version=str(LATEST_0_10_1))
    @parametrize(broker_version=str(LATEST_0_10_0))
    def test_fail_fast_on_incompatible_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context,
                                                      self.kafka,
                                                      "at_least_once")

        with processor.node.account.monitor_log(
                processor.STDERR_FILE) as monitor:
            processor.start()
            monitor.wait_until(
                'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException',
                timeout_sec=60,
                err_msg=
                "Never saw 'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException "
                + str(processor.node.account))

        self.kafka.stop()

    @parametrize(broker_version=str(LATEST_2_4))
    @parametrize(broker_version=str(LATEST_2_3))
    @parametrize(broker_version=str(LATEST_2_2))
    @parametrize(broker_version=str(LATEST_2_1))
    @parametrize(broker_version=str(LATEST_2_0))
    @parametrize(broker_version=str(LATEST_1_1))
    @parametrize(broker_version=str(LATEST_1_0))
    @parametrize(broker_version=str(LATEST_0_11_0))
    def test_fail_fast_on_incompatible_brokers_if_eos_beta_enabled(
            self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context,
                                                      self.kafka,
                                                      "exactly_once_beta")

        with processor.node.account.monitor_log(
                processor.STDERR_FILE) as monitor:
            with processor.node.account.monitor_log(processor.LOG_FILE) as log:
                processor.start()
                log.wait_until(
                    'Shutting down because the Kafka cluster seems to be on a too old version. Setting processing\.guarantee="exactly_once_beta" requires broker version 2\.5 or higher\.',
                    timeout_sec=60,
                    err_msg=
                    "Never saw 'Shutting down, because the Kafka cluster seems to be on a too old version. Setting `processing.guarantee=\"exaclty_once_beta\"` requires broker version 2.5 or higher.' log message "
                    + str(processor.node.account))
                monitor.wait_until(
                    'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException',
                    timeout_sec=60,
                    err_msg=
                    "Never saw 'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException' error message "
                    + str(processor.node.account))

        self.kafka.stop()
class StreamsBrokerCompatibility(Test):
    """
    These tests validate that Streams v0.10.2+ can connect to older brokers v0.10+
    and that Streams fails fast for pre-0.10 brokers
    """

    input = "brokerCompatibilitySourceTopic"
    output = "brokerCompatibilitySinkTopic"

    def __init__(self, test_context):
        super(StreamsBrokerCompatibility, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=1,
                                  zk=self.zk,
                                  topics={
                                      self.input: {'partitions': 1, 'replication-factor': 1},
                                      self.output: {'partitions': 1, 'replication-factor': 1}
                                  })

        self.processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka)

        self.consumer = VerifiableConsumer(test_context,
                                           1,
                                           self.kafka,
                                           self.output,
                                           "stream-broker-compatibility-verify-consumer")

    def setUp(self):
        self.zk.start()

    @parametrize(broker_version=str(DEV_BRANCH))
    @parametrize(broker_version=str(LATEST_0_10_1))
    def test_compatible_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        self.processor.start()
        self.consumer.start()

        self.processor.wait()

        num_consumed_mgs = self.consumer.total_consumed()

        self.consumer.stop()
        self.kafka.stop()

        assert num_consumed_mgs == 1, \
            "Did expect to read exactly one message but got %d" % num_consumed_mgs

    @parametrize(broker_version=str(LATEST_0_10_0))
    def test_fail_fast_on_incompatible_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        self.processor.start()

        self.processor.node.account.ssh(self.processor.start_cmd(self.processor.node))
        with self.processor.node.account.monitor_log(self.processor.STDERR_FILE) as monitor:
            monitor.wait_until('Exception in thread "main" org.apache.kafka.streams.errors.StreamsException: Kafka Streams requires broker version 0.10.1.x or higher.',
                        timeout_sec=60,
                        err_msg="Never saw 'incompatible broker' error message " + str(self.processor.node.account))

        self.kafka.stop()
class StreamsBrokerCompatibility(Test):
    """
    These tests validates that
    - Streams 0.11+ w/ EOS fails fast for older brokers 0.10.2 and 0.10.1
    - Streams 0.11+ w/o EOS works for older brokers 0.10.2 and 0.10.1
    - Streams fails fast for 0.10.0 brokers
    - Streams times-out for pre-0.10.0 brokers
    """

    input = "brokerCompatibilitySourceTopic"
    output = "brokerCompatibilitySinkTopic"

    def __init__(self, test_context):
        super(StreamsBrokerCompatibility, self).__init__(test_context=test_context)
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=1,
                                  zk=self.zk,
                                  topics={
                                      self.input: {'partitions': 1, 'replication-factor': 1},
                                      self.output: {'partitions': 1, 'replication-factor': 1}
                                  })
        self.consumer = VerifiableConsumer(test_context,
                                           1,
                                           self.kafka,
                                           self.output,
                                           "stream-broker-compatibility-verify-consumer")

    def setUp(self):
        self.zk.start()

   
    @parametrize(broker_version=str(LATEST_0_10_2))
    @parametrize(broker_version=str(LATEST_0_10_1))
    def test_fail_fast_on_incompatible_brokers_if_eos_enabled(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, True)

        with processor.node.account.monitor_log(processor.STDERR_FILE) as monitor:
            processor.start()
            monitor.wait_until('FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException: Cannot create a v0 FindCoordinator request because we require features supported only in 1 or later.',
                               timeout_sec=60,
                               err_msg="Never saw 'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException: Cannot create a v0 FindCoordinator request because we require features supported only in 1 or later.' error message " + str(processor.node.account))

        self.kafka.stop()

    @parametrize(broker_version=str(LATEST_0_11_0))
    @parametrize(broker_version=str(LATEST_0_10_2))
    @parametrize(broker_version=str(LATEST_0_10_1))
    def test_compatible_brokers_eos_disabled(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, False)
        processor.start()

        self.consumer.start()

        processor.wait()

        wait_until(lambda: self.consumer.total_consumed() > 0, timeout_sec=30, err_msg="Did expect to read a message but got none within 30 seconds.")

        self.consumer.stop()
        self.kafka.stop()

    @parametrize(broker_version=str(LATEST_0_10_0))
    def test_fail_fast_on_incompatible_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, False)

        with processor.node.account.monitor_log(processor.STDERR_FILE) as monitor:
            processor.start()
            monitor.wait_until('FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException: The broker does not support CREATE_TOPICS',
                        timeout_sec=60,
                        err_msg="Never saw 'FATAL: An unexpected exception org.apache.kafka.common.errors.UnsupportedVersionException: The broker does not support CREATE_TOPICS' error message " + str(processor.node.account))

        self.kafka.stop()

    @ignore
    @parametrize(broker_version=str(LATEST_0_9))
    @parametrize(broker_version=str(LATEST_0_8_2))
    def test_timeout_on_pre_010_brokers(self, broker_version):
        self.kafka.set_version(KafkaVersion(broker_version))
        self.kafka.start()

        processor = StreamsBrokerCompatibilityService(self.test_context, self.kafka, False)

        with processor.node.account.monitor_log(processor.STDERR_FILE) as monitor:
            processor.start()
            monitor.wait_until('Exception in thread "main" org.apache.kafka.streams.errors.BrokerNotFoundException: Could not find any available broker.',
                               timeout_sec=60,
                               err_msg="Never saw 'no available brokers' error message " + str(processor.node.account))

        self.kafka.stop()
Exemplo n.º 18
0
    def test_offset_truncate(self):
        """
        Verify correct consumer behavior when the brokers are consecutively restarted.

        Setup: single Kafka cluster with one producer writing messages to a single topic with one
        partition, an a set of consumers in the same group reading from the same topic.

        - Start a producer which continues producing new messages throughout the test.
        - Start up the consumers and wait until they've joined the group.
        - In a loop, restart each broker consecutively, waiting for the group to stabilize between
          each broker restart.
        - Verify delivery semantics according to the failure type and that the broker bounces
          did not cause unexpected group rebalances.
        """
        tp = TopicPartition(self.TOPIC, 0)

        producer = self.setup_producer(self.TOPIC, throughput=10)
        producer.start()
        self.await_produced_messages(producer, min_messages=10)

        consumer = self.setup_consumer(self.TOPIC, reset_policy="earliest", verify_offsets=False)
        consumer.start()
        self.await_all_members(consumer)

        # Reduce ISR to one node
        isr = self.kafka.isr_idx_list(self.TOPIC, 0)
        node1 = self.kafka.get_node(isr[0])
        self.kafka.stop_node(node1)
        self.logger.info("Reduced ISR to one node, consumer is at %s", consumer.current_position(tp))

        # Ensure remaining ISR member has a little bit of data
        current_total = consumer.total_consumed()
        wait_until(lambda: consumer.total_consumed() > current_total + 10,
                   timeout_sec=30,
                   err_msg="Timed out waiting for consumer to move ahead by 10 messages")

        # Kill last ISR member
        node2 = self.kafka.get_node(isr[1])
        self.kafka.stop_node(node2)
        self.logger.info("No members in ISR, consumer is at %s", consumer.current_position(tp))

        # Keep consuming until we've caught up to HW
        def none_consumed(this, consumer):
            new_total = consumer.total_consumed()
            if new_total == this.last_total:
                return True
            else:
                this.last_total = new_total
                return False

        self.last_total = consumer.total_consumed()
        wait_until(lambda: none_consumed(self, consumer),
                   timeout_sec=30,
                   err_msg="Timed out waiting for the consumer to catch up")

        self.kafka.start_node(node1)
        self.logger.info("Out of sync replica is online, but not electable. Consumer is at  %s", consumer.current_position(tp))

        pre_truncation_pos = consumer.current_position(tp)

        self.kafka.set_unclean_leader_election(self.TOPIC)
        self.logger.info("New unclean leader, consumer is at %s", consumer.current_position(tp))

        # Wait for truncation to be detected
        self.kafka.start_node(node2)
        wait_until(lambda: consumer.current_position(tp) >= pre_truncation_pos,
                   timeout_sec=30,
                   err_msg="Timed out waiting for truncation")

        # Make sure we didn't reset to beginning of log
        total_records_consumed = len(self.all_values_consumed)
        assert total_records_consumed == len(set(self.all_values_consumed)), "Received duplicate records"

        consumer.stop()
        producer.stop()

        # Re-consume all the records
        consumer2 = VerifiableConsumer(self.test_context, 1, self.kafka, self.TOPIC, group_id="group2",
                                       reset_policy="earliest", verify_offsets=True)

        consumer2.start()
        self.await_all_members(consumer2)

        wait_until(lambda: consumer2.total_consumed() > 0,
           timeout_sec=30,
           err_msg="Timed out waiting for consumer to consume at least 10 messages")

        self.last_total = consumer2.total_consumed()
        wait_until(lambda: none_consumed(self, consumer2),
               timeout_sec=30,
               err_msg="Timed out waiting for the consumer to fully consume data")

        second_total_consumed = consumer2.total_consumed()
        assert second_total_consumed < total_records_consumed, "Expected fewer records with new consumer since we truncated"
        self.logger.info("Second consumer saw only %s, meaning %s were truncated",
                         second_total_consumed, total_records_consumed - second_total_consumed)
Exemplo n.º 19
0
class EndToEndTest(Test):
    """This class provides a shared template for tests which follow the common pattern of:

        - produce to a topic in the background
        - consume from that topic in the background
        - run some logic, e.g. fail topic leader etc.
        - perform validation
    """

    DEFAULT_TOPIC_CONFIG = {"partitions": 2, "replication-factor": 1}

    def __init__(self,
                 test_context,
                 topic="test_topic",
                 topic_config=DEFAULT_TOPIC_CONFIG):
        super(EndToEndTest, self).__init__(test_context=test_context)
        self.topic = topic
        self.topic_config = topic_config
        self.records_consumed = []
        self.last_consumed_offsets = {}

    def create_zookeeper(self, num_nodes=1, **kwargs):
        self.zk = ZookeeperService(self.test_context,
                                   num_nodes=num_nodes,
                                   **kwargs)

    def create_kafka(self, num_nodes=1, **kwargs):
        group_metadata_config = {
            "partitions": num_nodes,
            "replication-factor": min(num_nodes, 3),
            "configs": {
                "cleanup.policy": "compact"
            }
        }

        topics = {
            self.topic: self.topic_config,
            "__consumer_offsets": group_metadata_config
        }
        self.kafka = KafkaService(self.test_context,
                                  num_nodes=num_nodes,
                                  zk=self.zk,
                                  topics=topics,
                                  **kwargs)

    def create_consumer(self, num_nodes=1, group_id="test_group", **kwargs):
        self.consumer = VerifiableConsumer(
            self.test_context,
            num_nodes=num_nodes,
            kafka=self.kafka,
            topic=self.topic,
            group_id=group_id,
            on_record_consumed=self.on_record_consumed,
            **kwargs)

    def create_producer(self, num_nodes=1, throughput=1000, **kwargs):
        self.producer = VerifiableProducer(self.test_context,
                                           num_nodes=num_nodes,
                                           kafka=self.kafka,
                                           topic=self.topic,
                                           throughput=throughput,
                                           **kwargs)

    def on_record_consumed(self, record, node):
        partition = TopicPartition(record["topic"], record["partition"])
        record_id = int(record["value"])
        offset = record["offset"]
        self.last_consumed_offsets[partition] = offset
        self.records_consumed.append(record_id)

    def await_consumed_offsets(self, last_acked_offsets, timeout_sec):
        def has_finished_consuming():
            for partition, offset in last_acked_offsets.iteritems():
                if not partition in self.last_consumed_offsets:
                    return False
                if self.last_consumed_offsets[partition] < offset:
                    return False
            return True

        wait_until(has_finished_consuming,
                   timeout_sec=timeout_sec,
                   err_msg="Consumer failed to consume up to offsets %s after waiting %ds." %\
                   (str(last_acked_offsets), timeout_sec))

    def _collect_all_logs(self):
        for s in self.test_context.services:
            self.mark_for_collect(s)

    def await_startup(self, min_records=5, timeout_sec=30):
        try:
            wait_until(lambda: self.consumer.total_consumed() >= min_records,
                       timeout_sec=timeout_sec,
                       err_msg="Timed out after %ds while awaiting initial record delivery of %d records" %\
                       (timeout_sec, min_records))
        except BaseException:
            self._collect_all_logs()
            raise

    def run_validation(self,
                       min_records=5000,
                       producer_timeout_sec=30,
                       consumer_timeout_sec=30,
                       enable_idempotence=False):
        try:
            wait_until(lambda: self.producer.num_acked > min_records,
                       timeout_sec=producer_timeout_sec,
                       err_msg="Producer failed to produce messages for %ds." %\
                       producer_timeout_sec)

            self.logger.info("Stopping producer after writing up to offsets %s" %\
                         str(self.producer.last_acked_offsets))
            self.producer.stop()

            self.await_consumed_offsets(self.producer.last_acked_offsets,
                                        consumer_timeout_sec)
            self.consumer.stop()

            self.validate(enable_idempotence)
        except BaseException:
            self._collect_all_logs()
            raise

    def validate(self, enable_idempotence):
        self.logger.info("Number of acked records: %d" %
                         len(self.producer.acked))
        self.logger.info("Number of consumed records: %d" %
                         len(self.records_consumed))

        def check_lost_data(missing_records):
            return self.kafka.search_data_files(self.topic, missing_records)

        succeeded, error_msg = validate_delivery(self.producer.acked,
                                                 self.records_consumed,
                                                 enable_idempotence,
                                                 check_lost_data)

        # Collect all logs if validation fails
        if not succeeded:
            self._collect_all_logs()

        assert succeeded, error_msg
Exemplo n.º 20
0
class EndToEndTest(Test):
    """This class provides a shared template for tests which follow the common pattern of:

        - produce to a topic in the background
        - consume from that topic in the background
        - run some logic, e.g. fail topic leader etc.
        - perform validation
    """

    DEFAULT_TOPIC_CONFIG = {"partitions": 2, "replication-factor": 1}

    def __init__(self, test_context, topic="test_topic", topic_config=DEFAULT_TOPIC_CONFIG):
        super(EndToEndTest, self).__init__(test_context=test_context)
        self.topic = topic
        self.topic_config = topic_config
        self.records_consumed = []
        self.last_consumed_offsets = {}
        
    def create_zookeeper(self, num_nodes=1, **kwargs):
        self.zk = ZookeeperService(self.test_context, num_nodes=num_nodes, **kwargs)

    def create_kafka(self, num_nodes=1, **kwargs):
        group_metadata_config = {
            "partitions": num_nodes,
            "replication-factor": min(num_nodes, 3),
            "configs": {"cleanup.policy": "compact"}
        }

        topics = {
            self.topic: self.topic_config,
            "__consumer_offsets": group_metadata_config
        }
        self.kafka = KafkaService(self.test_context, num_nodes=num_nodes,
                                  zk=self.zk, topics=topics, **kwargs)

    def create_consumer(self, num_nodes=1, group_id="test_group", **kwargs):
        self.consumer = VerifiableConsumer(self.test_context,
                                           num_nodes=num_nodes,
                                           kafka=self.kafka,
                                           topic=self.topic,
                                           group_id=group_id,
                                           on_record_consumed=self.on_record_consumed,
                                           **kwargs)
                                    

    def create_producer(self, num_nodes=1, throughput=1000, **kwargs):
        self.producer = VerifiableProducer(self.test_context,
                                           num_nodes=num_nodes,
                                           kafka=self.kafka,
                                           topic=self.topic,
                                           throughput=throughput,
                                           **kwargs)

    def on_record_consumed(self, record, node):
        partition = TopicPartition(record["topic"], record["partition"])
        record_id = int(record["value"])
        offset = record["offset"]
        self.last_consumed_offsets[partition] = offset
        self.records_consumed.append(record_id)

    def await_consumed_offsets(self, last_acked_offsets, timeout_sec):
        def has_finished_consuming():
            for partition, offset in last_acked_offsets.iteritems():
                if not partition in self.last_consumed_offsets:
                    return False
                if self.last_consumed_offsets[partition] < offset:
                    return False
            return True

        wait_until(has_finished_consuming,
                   timeout_sec=timeout_sec,
                   err_msg="Consumer failed to consume up to offsets %s after waiting %ds." %\
                   (str(last_acked_offsets), timeout_sec))


    def _collect_all_logs(self):
        for s in self.test_context.services:
            self.mark_for_collect(s)

    def await_startup(self, min_records=5, timeout_sec=30):
        try:
            wait_until(lambda: self.consumer.total_consumed() >= min_records,
                       timeout_sec=timeout_sec,
                       err_msg="Timed out after %ds while awaiting initial record delivery of %d records" %\
                       (timeout_sec, min_records))
        except BaseException:
            self._collect_all_logs()
            raise

    def run_validation(self, min_records=5000, producer_timeout_sec=30,
                       consumer_timeout_sec=30, enable_idempotence=False):
        try:
            wait_until(lambda: self.producer.num_acked > min_records,
                       timeout_sec=producer_timeout_sec,
                       err_msg="Producer failed to produce messages for %ds." %\
                       producer_timeout_sec)

            self.logger.info("Stopping producer after writing up to offsets %s" %\
                         str(self.producer.last_acked_offsets))
            self.producer.stop()

            self.await_consumed_offsets(self.producer.last_acked_offsets, consumer_timeout_sec)
            self.consumer.stop()
            
            self.validate(enable_idempotence)
        except BaseException:
            self._collect_all_logs()
            raise

    def validate(self, enable_idempotence):
        self.logger.info("Number of acked records: %d" % len(self.producer.acked))
        self.logger.info("Number of consumed records: %d" % len(self.records_consumed))

        def check_lost_data(missing_records):
            return self.kafka.search_data_files(self.topic, missing_records)

        succeeded, error_msg = validate_delivery(self.producer.acked, self.records_consumed,
                                                 enable_idempotence, check_lost_data)

        # Collect all logs if validation fails
        if not succeeded:
            self._collect_all_logs()

        assert succeeded, error_msg