Beispiel #1
0
    def __init__(self,
                 test_context,
                 num_zk,
                 num_brokers,
                 num_hadoop,
                 num_schema_registry,
                 num_rest,
                 hadoop_distro='cdh',
                 hadoop_version=2,
                 topics=None):

        super(CamusTest, self).__init__(test_context)
        self.num_zk = num_zk
        self.num_brokers = num_brokers
        self.num_hadoop = num_hadoop
        self.num_schema_registry = num_schema_registry
        self.num_rest = num_rest
        self.topics = topics
        self.hadoop_distro = hadoop_distro
        self.hadoop_version = hadoop_version

        self.zk = ZookeeperService(test_context, self.num_zk)
        self.kafka = KafkaService(test_context,
                                  self.num_brokers,
                                  self.zk,
                                  topics=self.topics)
        self.hadoop = create_hadoop_service(test_context, self.num_hadoop,
                                            self.hadoop_distro,
                                            self.hadoop_version)
        self.schema_registry = SchemaRegistryService(test_context,
                                                     self.num_schema_registry,
                                                     self.zk, self.kafka)
        self.rest = KafkaRestService(test_context, self.num_rest, self.zk,
                                     self.kafka, self.schema_registry)
class EverythingRunsTest(Test):
    """ Sanity check to ensure that various core services all run.
    """
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(EverythingRunsTest, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=2)
        self.kafka = KafkaService(test_context, 1, self.zk)
        self.schema_registry = SchemaRegistryService(test_context, 1, self.zk,
                                                     self.kafka)
        self.rest_proxy = KafkaRestService(test_context, 1, self.zk,
                                           self.kafka, self.schema_registry)
        self.register_driver = RegisterSchemasService(test_context,
                                                      1,
                                                      self.schema_registry,
                                                      retry_wait_sec=.02,
                                                      num_tries=5,
                                                      max_time_seconds=10,
                                                      max_schemas=50)

    def test(self):
        self.zk.start()
        self.kafka.start()
        self.schema_registry.start()
        self.rest_proxy.start()

        self.register_driver.start()
        self.register_driver.wait()  # block until register_driver finishes
Beispiel #3
0
class CamusTest(Test):
    def __init__(self, test_context, num_zk, num_brokers, num_hadoop, num_schema_registry, num_rest,
                 hadoop_distro='cdh', hadoop_version=2, topics=None):

        super(CamusTest, self).__init__(test_context)
        self.num_zk = num_zk
        self.num_brokers = num_brokers
        self.num_hadoop = num_hadoop
        self.num_schema_registry = num_schema_registry
        self.num_rest = num_rest
        self.topics = topics
        self.hadoop_distro = hadoop_distro
        self.hadoop_version = hadoop_version

        self.zk = ZookeeperService(test_context, self.num_zk)
        self.kafka = KafkaService(test_context, self.num_brokers, self.zk, topics=self.topics)
        self.hadoop = create_hadoop_service(test_context, self.num_hadoop, self.hadoop_distro, self.hadoop_version)
        self.schema_registry = SchemaRegistryService(test_context, self.num_schema_registry, self.zk, self.kafka)
        self.rest = KafkaRestService(test_context, self.num_rest, self.zk, self.kafka, self.schema_registry)

    def setUp(self):
        self.zk.start()
        self.kafka.start()
        self.hadoop.start()
        self.schema_registry.start()
        self.rest.start()
Beispiel #4
0
class MiniTest(Test):
    def __init__(self, test_context):
        super(MiniTest, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, 1)
        self.kafka = KafkaService(test_context, 1, self.zk)

    def test(self):
        self.zk.start()
        self.kafka.start()
Beispiel #5
0
    def __init__(self, test_context, num_zk, num_brokers, topics=None):
        super(KafkaTest, self).__init__(test_context)
        self.num_zk = num_zk
        self.num_brokers = num_brokers
        self.topics = topics

        self.zk = ZookeeperService(test_context, self.num_zk)

        self.kafka = KafkaService(test_context,
                                  self.num_brokers,
                                  self.zk,
                                  topics=self.topics)
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(EverythingRunsTest, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=2)
        self.kafka = KafkaService(test_context, 1, self.zk)
        self.schema_registry = SchemaRegistryService(test_context, 1, self.zk,
                                                     self.kafka)
        self.rest_proxy = KafkaRestService(test_context, 1, self.zk,
                                           self.kafka, self.schema_registry)
        self.register_driver = RegisterSchemasService(test_context,
                                                      1,
                                                      self.schema_registry,
                                                      retry_wait_sec=.02,
                                                      num_tries=5,
                                                      max_time_seconds=10,
                                                      max_schemas=50)
Beispiel #7
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ReplicationTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  topics={
                                      self.topic: {
                                          "partitions": 3,
                                          "replication-factor": 3,
                                          "min.insync.replicas": 2
                                      }
                                  })
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
Beispiel #8
0
    def __init__(self, test_context, num_zk, num_brokers, topics=None):
        super(KafkaTest, self).__init__(test_context)
        self.num_zk = num_zk
        self.num_brokers = num_brokers
        self.topics = topics

        self.zk = ZookeeperService(test_context, self.num_zk)

        self.kafka = KafkaService(
            test_context, self.num_brokers,
            self.zk, topics=self.topics)
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(EverythingRunsTest, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=2)
        self.kafka = KafkaService(test_context, 1, self.zk)
        self.schema_registry = SchemaRegistryService(test_context, 1, self.zk, self.kafka)
        self.rest_proxy = KafkaRestService(test_context, 1, self.zk, self.kafka, self.schema_registry)
        self.register_driver = RegisterSchemasService(
            test_context, 1, self.schema_registry,
            retry_wait_sec=.02, num_tries=5, max_time_seconds=10, max_schemas=50)
Beispiel #10
0
class KafkaTest(Test):
    """
    Helper class that manages setting up a Kafka cluster. Use this if the
    default settings for Kafka are sufficient for your test; any customization
    needs to be done manually. Your run() method should call tearDown and
    setUp. The Zookeeper and Kafka services are available as the fields
    KafkaTest.zk and KafkaTest.kafka.
    """
    def __init__(self, test_context, num_zk, num_brokers, topics=None):
        super(KafkaTest, self).__init__(test_context)
        self.num_zk = num_zk
        self.num_brokers = num_brokers
        self.topics = topics

        self.zk = ZookeeperService(test_context, self.num_zk)

        self.kafka = KafkaService(
            test_context, self.num_brokers,
            self.zk, topics=self.topics)

    def setUp(self):
        self.zk.start()
        self.kafka.start()
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ReplicationTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    "min.insync.replicas": 2}
                                                                })
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1
class EverythingRunsTest(Test):
    """ Sanity check to ensure that various core services all run.
    """
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(EverythingRunsTest, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=2)
        self.kafka = KafkaService(test_context, 1, self.zk)
        self.schema_registry = SchemaRegistryService(test_context, 1, self.zk, self.kafka)
        self.rest_proxy = KafkaRestService(test_context, 1, self.zk, self.kafka, self.schema_registry)
        self.register_driver = RegisterSchemasService(
            test_context, 1, self.schema_registry,
            retry_wait_sec=.02, num_tries=5, max_time_seconds=10, max_schemas=50)

    def test(self):
        self.zk.start()
        self.kafka.start()
        self.schema_registry.start()
        self.rest_proxy.start()

        self.register_driver.start()
        self.register_driver.wait()  # block until register_driver finishes
Beispiel #13
0
class KafkaTest(Test):
    """
    Helper class that manages setting up a Kafka cluster. Use this if the
    default settings for Kafka are sufficient for your test; any customization
    needs to be done manually. Your run() method should call tearDown and
    setUp. The Zookeeper and Kafka services are available as the fields
    KafkaTest.zk and KafkaTest.kafka.
    """
    def __init__(self, test_context, num_zk, num_brokers, topics=None):
        super(KafkaTest, self).__init__(test_context)
        self.num_zk = num_zk
        self.num_brokers = num_brokers
        self.topics = topics

        self.zk = ZookeeperService(test_context, self.num_zk)

        self.kafka = KafkaService(test_context,
                                  self.num_brokers,
                                  self.zk,
                                  topics=self.topics)

    def setUp(self):
        self.zk.start()
        self.kafka.start()
class ReplicationTest(Test):
    """Replication tests.
    These tests verify that replication provides simple durability guarantees by checking that data acked by
    brokers is still available for consumption in the face of various failure scenarios."""

    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ReplicationTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic: {
                                                                    "partitions": 3,
                                                                    "replication-factor": 3,
                                                                    "min.insync.replicas": 2}
                                                                })
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1

    def setUp(self):
        self.zk.start()
        self.kafka.start()

    def min_cluster_size(self):
        """Override this since we're adding services outside of the constructor"""
        return super(ReplicationTest, self).min_cluster_size() + self.num_producers + self.num_consumers

    def run_with_failure(self, failure):
        """This is the top-level test template.

        The steps are:
            Produce messages in the background while driving some failure condition
            When done driving failures, immediately stop producing
            Consume all messages
            Validate that messages acked by brokers were consumed

        Note that consuming is a bit tricky, at least with console consumer. The goal is to consume all messages
        (foreach partition) in the topic. In this case, waiting for the last message may cause the consumer to stop
        too soon since console consumer is consuming multiple partitions from a single thread and therefore we lose
        ordering guarantees.

        Waiting on a count of consumed messages can be unreliable: if we stop consuming when num_consumed == num_acked,
        we might exit early if some messages are duplicated (though not an issue here since producer retries==0)

        Therefore rely here on the consumer.timeout.ms setting which times out on the interval between successively
        consumed messages. Since we run the producer to completion before running the consumer, this is a reliable
        indicator that nothing is left to consume.

        """
        self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka, self.topic, throughput=self.producer_throughput)
        self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka, self.topic, consumer_timeout_ms=3000)

        # Produce in a background thread while driving broker failures
        self.producer.start()
        if not wait_until(lambda: self.producer.num_acked > 5, timeout_sec=5):
            raise RuntimeError("Producer failed to start in a reasonable amount of time.")
        failure()
        self.producer.stop()

        self.acked = self.producer.acked
        self.not_acked = self.producer.not_acked
        self.logger.info("num not acked: %d" % self.producer.num_not_acked)
        self.logger.info("num acked:     %d" % self.producer.num_acked)

        # Consume all messages
        self.consumer.start()
        self.consumer.wait()
        self.consumed = self.consumer.messages_consumed[1]
        self.logger.info("num consumed:  %d" % len(self.consumed))

        # Check produced vs consumed
        self.validate()

    def clean_shutdown(self):
        """Discover leader node for our topic and shut it down cleanly."""
        self.kafka.signal_leader(self.topic, partition=0, sig=signal.SIGTERM)

    def hard_shutdown(self):
        """Discover leader node for our topic and shut it down with a hard kill."""
        self.kafka.signal_leader(self.topic, partition=0, sig=signal.SIGKILL)

    def clean_bounce(self):
        """Chase the leader of one partition and restart it cleanly."""
        for i in range(5):
            prev_leader_node = self.kafka.leader(topic=self.topic, partition=0)
            self.kafka.restart_node(prev_leader_node, wait_sec=5, clean_shutdown=True)

    def hard_bounce(self):
        """Chase the leader and restart it cleanly."""
        for i in range(5):
            prev_leader_node = self.kafka.leader(topic=self.topic, partition=0)
            self.kafka.restart_node(prev_leader_node, wait_sec=5, clean_shutdown=False)

            # Wait long enough for previous leader to probably be awake again
            time.sleep(6)

    def validate(self):
        """Check that produced messages were consumed."""

        success = True
        msg = ""

        if len(set(self.consumed)) != len(self.consumed):
            # There are duplicates. This is ok, so report it but don't fail the test
            msg += "There are duplicate messages in the log\n"

        if not set(self.consumed).issuperset(set(self.acked)):
            # Every acked message must appear in the logs. I.e. consumed messages must be superset of acked messages.
            acked_minus_consumed = set(self.producer.acked) - set(self.consumed)
            success = False
            msg += "At least one acked message did not appear in the consumed messages. acked_minus_consumed: " + str(acked_minus_consumed)

        if not success:
            # Collect all the data logs if there was a failure
            self.mark_for_collect(self.kafka)

        assert success, msg

    def test_clean_shutdown(self):
        self.run_with_failure(self.clean_shutdown)

    def test_hard_shutdown(self):
        self.run_with_failure(self.hard_shutdown)

    def test_clean_bounce(self):
        self.run_with_failure(self.clean_bounce)

    def test_hard_bounce(self):
        self.run_with_failure(self.hard_bounce)
Beispiel #15
0
class ReplicationTest(Test):
    """Replication tests.
    These tests verify that replication provides simple durability guarantees by checking that data acked by
    brokers is still available for consumption in the face of various failure scenarios."""
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ReplicationTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  topics={
                                      self.topic: {
                                          "partitions": 3,
                                          "replication-factor": 3,
                                          "min.insync.replicas": 2
                                      }
                                  })
        self.producer_throughput = 10000
        self.num_producers = 1
        self.num_consumers = 1

    def setUp(self):
        self.zk.start()
        self.kafka.start()

    def min_cluster_size(self):
        """Override this since we're adding services outside of the constructor"""
        return super(
            ReplicationTest,
            self).min_cluster_size() + self.num_producers + self.num_consumers

    def run_with_failure(self, failure):
        """This is the top-level test template.

        The steps are:
            Produce messages in the background while driving some failure condition
            When done driving failures, immediately stop producing
            Consume all messages
            Validate that messages acked by brokers were consumed

        Note that consuming is a bit tricky, at least with console consumer. The goal is to consume all messages
        (foreach partition) in the topic. In this case, waiting for the last message may cause the consumer to stop
        too soon since console consumer is consuming multiple partitions from a single thread and therefore we lose
        ordering guarantees.

        Waiting on a count of consumed messages can be unreliable: if we stop consuming when num_consumed == num_acked,
        we might exit early if some messages are duplicated (though not an issue here since producer retries==0)

        Therefore rely here on the consumer.timeout.ms setting which times out on the interval between successively
        consumed messages. Since we run the producer to completion before running the consumer, this is a reliable
        indicator that nothing is left to consume.

        """
        self.producer = VerifiableProducer(self.test_context,
                                           self.num_producers,
                                           self.kafka,
                                           self.topic,
                                           throughput=self.producer_throughput)
        self.consumer = ConsoleConsumer(self.test_context,
                                        self.num_consumers,
                                        self.kafka,
                                        self.topic,
                                        consumer_timeout_ms=3000)

        # Produce in a background thread while driving broker failures
        self.producer.start()
        if not wait_until(lambda: self.producer.num_acked > 5, timeout_sec=5):
            raise RuntimeError(
                "Producer failed to start in a reasonable amount of time.")
        failure()
        self.producer.stop()

        self.acked = self.producer.acked
        self.not_acked = self.producer.not_acked
        self.logger.info("num not acked: %d" % self.producer.num_not_acked)
        self.logger.info("num acked:     %d" % self.producer.num_acked)

        # Consume all messages
        self.consumer.start()
        self.consumer.wait()
        self.consumed = self.consumer.messages_consumed[1]
        self.logger.info("num consumed:  %d" % len(self.consumed))

        # Check produced vs consumed
        self.validate()

    def clean_shutdown(self):
        """Discover leader node for our topic and shut it down cleanly."""
        self.kafka.signal_leader(self.topic, partition=0, sig=signal.SIGTERM)

    def hard_shutdown(self):
        """Discover leader node for our topic and shut it down with a hard kill."""
        self.kafka.signal_leader(self.topic, partition=0, sig=signal.SIGKILL)

    def clean_bounce(self):
        """Chase the leader of one partition and restart it cleanly."""
        for i in range(5):
            prev_leader_node = self.kafka.leader(topic=self.topic, partition=0)
            self.kafka.restart_node(prev_leader_node,
                                    wait_sec=5,
                                    clean_shutdown=True)

    def hard_bounce(self):
        """Chase the leader and restart it cleanly."""
        for i in range(5):
            prev_leader_node = self.kafka.leader(topic=self.topic, partition=0)
            self.kafka.restart_node(prev_leader_node,
                                    wait_sec=5,
                                    clean_shutdown=False)

            # Wait long enough for previous leader to probably be awake again
            time.sleep(6)

    def validate(self):
        """Check that produced messages were consumed."""

        success = True
        msg = ""

        if len(set(self.consumed)) != len(self.consumed):
            # There are duplicates. This is ok, so report it but don't fail the test
            msg += "There are duplicate messages in the log\n"

        if not set(self.consumed).issuperset(set(self.acked)):
            # Every acked message must appear in the logs. I.e. consumed messages must be superset of acked messages.
            acked_minus_consumed = set(self.producer.acked) - set(
                self.consumed)
            success = False
            msg += "At least one acked message did not appear in the consumed messages. acked_minus_consumed: " + str(
                acked_minus_consumed)

        if not success:
            # Collect all the data logs if there was a failure
            self.mark_for_collect(self.kafka)

        assert success, msg

    def test_clean_shutdown(self):
        self.run_with_failure(self.clean_shutdown)

    def test_hard_shutdown(self):
        self.run_with_failure(self.hard_shutdown)

    def test_clean_bounce(self):
        self.run_with_failure(self.clean_bounce)

    def test_hard_bounce(self):
        self.run_with_failure(self.hard_bounce)
Beispiel #16
0
    def __init__(self, test_context):
        super(MiniTest, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, 1)
        self.kafka = KafkaService(test_context, 1, self.zk)