예제 #1
0
 def _create_initial_topics(self):
     config = self.redpanda.security_config()
     user = config.get("sasl_plain_username")
     passwd = config.get("sasl_plain_password")
     client = KafkaCliTools(self.redpanda, user=user, passwd=passwd)
     for spec in self.topics:
         self.logger.debug(f"Creating initial topic {spec}")
         client.create_topic(spec)
예제 #2
0
 def setUp(self):
     self.mount_mu = MountMuService()
     self.redpanda_mu = RedpandaMuService()
     self.kafkakv_mu = KafkaKVMuService(self.redpanda_mu)
     seq = SeqMuService([self.mount_mu, self.redpanda_mu, self.kafkakv_mu])
     self.service = MuServiceRunner(seq, self.test_context, self.num_nodes)
     self.service.start()
     redpanda = RedpandaMuServiceServiceProxy(self.service,
                                              self.redpanda_mu)
     tools = KafkaCliTools(redpanda, KafkaCliTools.VERSIONS[0])
     tools.create_topic(KafkaKVMuService.TOPIC)
예제 #3
0
    def test_produce_topic(self):
        """
        Create a topic and verify that pandaproxy can produce to it.
        """
        name = "pandaproxy-topic-{}".format(uuid.uuid4())
        self.logger.debug("Topic name %s", name)

        prev = set(self._get_topics())
        self.logger.debug("Existing topics %s", prev)
        assert prev.isdisjoint(name)

        data = '{"records": [{"value": "dmVjdG9yaXplZA==", "partition": 0},{"value": "cGFuZGFwcm94eQ==", "partition": 1},{"value": "bXVsdGlicm9rZXI=", "partition": 2}]}'

        self.logger.debug("Producing to non-existant topic")
        produce_result = self._produce_topic(name, data)
        for o in produce_result["offsets"]:
            assert o["error_code"] == 3
            assert o["offset"] == -1

        kc = KafkaCat(self.redpanda)

        self.logger.debug("Creating test topic")
        kafka_tools = KafkaCliTools(self.redpanda)
        kafka_tools.create_topic(
            TopicSpec(name=name, replication_factor=1, partition_count=3))

        self.logger.debug("Waiting for leaders to settle")
        has_leaders = False
        while not has_leaders:
            topics = kc.metadata()["topics"]
            maybe_leaders = True
            for t in topics:
                if t["topic"] == name:
                    for p in t["partitions"]:
                        if p["leader"] == -1:
                            maybe_leaders = False
            has_leaders = maybe_leaders
        # TODO:
        #  Despite the above test, Pandaproxy can still get back no leaders
        #  Query Pandaproxy metadata to see when leaders have settled
        #  The retry logic for produce should have sufficient time for this
        #  additional settle time.

        self.logger.debug("Producing to topic")
        produce_result = self._produce_topic(name, data)
        self.logger.debug("Producing to topic: %s", produce_result)
        for o in produce_result["offsets"]:
            assert o["offset"] == 1, f'error_code {o["error_code"]}'

        self.logger.debug(f"Consuming topic: {name}")
        assert kc.consume_one(name, 0, 1)["payload"] == "vectorized"
        assert kc.consume_one(name, 1, 1)["payload"] == "pandaproxy"
        assert kc.consume_one(name, 2, 1)["payload"] == "multibroker"
예제 #4
0
 def _create_topics(self,
                    names=create_topic_names(1),
                    partitions=1,
                    replicas=1):
     self.logger.debug(f"Creating topics: {names}")
     kafka_tools = KafkaCliTools(self.redpanda)
     for name in names:
         kafka_tools.create_topic(
             TopicSpec(name=name,
                       partition_count=partitions,
                       replication_factor=replicas))
     return names
 def _create_topics(self,
                    names=create_topic_names(1),
                    partitions=1,
                    replicas=1,
                    cleanup_policy=TopicSpec.CLEANUP_DELETE):
     self.logger.debug(f"Creating topics: {names}")
     kafka_tools = KafkaCliTools(self.redpanda)
     for name in names:
         kafka_tools.create_topic(
             TopicSpec(name=name,
                       partition_count=partitions,
                       replication_factor=replicas))
     assert set(names).issubset(self._get_topics().json())
     return names
예제 #6
0
 def _create_topics(self,
                    names=create_topic_names(1),
                    partitions=1,
                    replicas=1):
     self.logger.debug(f"Creating topics: {names}")
     kafka_tools = KafkaCliTools(self.redpanda)
     for name in names:
         kafka_tools.create_topic(
             TopicSpec(name=name,
                       partition_count=partitions,
                       replication_factor=replicas))
     wait_until(lambda: set(names).issubset(self._get_topics().json()),
                timeout_sec=30,
                backoff_sec=1,
                err_msg="Topics failed to settle")
     return names
예제 #7
0
    def test_list_topics(self):
        """
        Create some topics and verify that pandaproxy lists them.
        """
        names = set("pandaproxy-topic-{}".format(uuid.uuid4())
                    for _ in range(3))
        self.logger.debug("Topic names %s", names)

        prev = set(self._get_topics())
        self.logger.debug("Existing topics %s", prev)
        assert prev.isdisjoint(names)

        self.logger.debug("Creating test topics")
        kafka_tools = KafkaCliTools(self.redpanda)
        for name in names:
            kafka_tools.create_topic(name, replication_factor=1)

        curr = set(self._get_topics())
        self.logger.debug("Current topics %s", curr)
        assert names <= curr
예제 #8
0
class EndToEndShadowIndexingBase(EndToEndTest):
    segment_size = 1048576  # 1 Mb
    s3_topic_name = "panda-topic"

    num_brokers = 3

    topics = (TopicSpec(
        name=s3_topic_name,
        partition_count=1,
        replication_factor=3,
    ), )

    def __init__(self, test_context, extra_rp_conf=None):
        super(EndToEndShadowIndexingBase,
              self).__init__(test_context=test_context)

        self.test_context = test_context
        self.topic = EndToEndShadowIndexingTest.s3_topic_name

        self.si_settings = SISettings(
            cloud_storage_reconciliation_interval_ms=500,
            cloud_storage_max_connections=5,
            log_segment_size=EndToEndShadowIndexingTest.segment_size,  # 1MB
        )
        self.s3_bucket_name = self.si_settings.cloud_storage_bucket
        self.si_settings.load_context(self.logger, test_context)
        self.scale = Scale(test_context)

        self.redpanda = RedpandaService(context=self.test_context,
                                        num_brokers=self.num_brokers,
                                        si_settings=self.si_settings,
                                        extra_rp_conf=extra_rp_conf)
        self.kafka_tools = KafkaCliTools(self.redpanda)

    def setUp(self):
        self.redpanda.start()
        for topic in EndToEndShadowIndexingBase.topics:
            self.kafka_tools.create_topic(topic)

    def tearDown(self):
        self.s3_client.empty_bucket(self.s3_bucket_name)
예제 #9
0
    def start(self):
        super(RedpandaService, self).start()
        self.logger.info("Waiting for all brokers to join cluster")

        expected = set(self.nodes)
        wait_until(lambda: {n
                            for n in self.nodes
                            if self.registered(n)} == expected,
                   timeout_sec=30,
                   backoff_sec=1,
                   err_msg="Cluster membership did not stabilize")

        # verify storage is in an expected initial state
        storage = self.storage()
        for node in storage.nodes:
            assert set(node.ns) == {"redpanda"}
            assert set(node.ns["redpanda"].topics) == {"controller", "kvstore"}

        kafka_tools = KafkaCliTools(self)
        for topic, cfg in self._topics.items():
            self.logger.debug("Creating initial topic %s / %s", topic, cfg)
            kafka_tools.create_topic(topic, **cfg)
예제 #10
0
    def test_scram(self):
        topic = TopicSpec()
        client = KafkaCliTools(self.redpanda,
                               user="******",
                               passwd="redpanda_pass")
        client.create_topic(topic)

        client = KafkaCliTools(self.redpanda,
                               user="******",
                               passwd="bad_password")
        try:
            client.list_topics()
            assert False, "Listing topics should fail"
        except Exception:
            pass

        client = KafkaCliTools(self.redpanda,
                               user="******",
                               passwd="redpanda_pass")
        topics = client.list_topics()
        print(topics)
        assert topic.name in topics
예제 #11
0
 def test_describe_topics(self):
     tools = KafkaCliTools(self.redpanda)
     tools.create_topic("topic", partitions=2, replication_factor=3)
     output = tools.describe_topics()
     assert "partition_count=2" in output
     assert "replication_factor=3" in output
예제 #12
0
class EndToEndShadowIndexingTest(EndToEndTest):
    segment_size = 1048576  # 1 Mb
    s3_host_name = "minio-s3"
    s3_access_key = "panda-user"
    s3_secret_key = "panda-secret"
    s3_region = "panda-region"
    s3_topic_name = "panda-topic"
    topics = (TopicSpec(
        name=s3_topic_name,
        partition_count=1,
        replication_factor=3,
    ), )

    def __init__(self, test_context):
        super(EndToEndShadowIndexingTest,
              self).__init__(test_context=test_context)

        self.s3_bucket_name = f"panda-bucket-{uuid.uuid1()}"
        self.topic = EndToEndShadowIndexingTest.s3_topic_name
        self._extra_rp_conf = dict(
            cloud_storage_enabled=True,
            cloud_storage_enable_remote_read=True,
            cloud_storage_enable_remote_write=True,
            cloud_storage_access_key=EndToEndShadowIndexingTest.s3_access_key,
            cloud_storage_secret_key=EndToEndShadowIndexingTest.s3_secret_key,
            cloud_storage_region=EndToEndShadowIndexingTest.s3_region,
            cloud_storage_bucket=self.s3_bucket_name,
            cloud_storage_disable_tls=True,
            cloud_storage_api_endpoint=EndToEndShadowIndexingTest.s3_host_name,
            cloud_storage_api_endpoint_port=9000,
            cloud_storage_reconciliation_interval_ms=500,
            cloud_storage_max_connections=5,
            log_segment_size=EndToEndShadowIndexingTest.segment_size,  # 1MB
        )

        self.scale = Scale(test_context)
        self.redpanda = RedpandaService(
            context=test_context,
            num_brokers=3,
            extra_rp_conf=self._extra_rp_conf,
        )

        self.kafka_tools = KafkaCliTools(self.redpanda)
        self.s3_client = S3Client(
            region=EndToEndShadowIndexingTest.s3_region,
            access_key=EndToEndShadowIndexingTest.s3_access_key,
            secret_key=EndToEndShadowIndexingTest.s3_secret_key,
            endpoint=f"http://{EndToEndShadowIndexingTest.s3_host_name}:9000",
            logger=self.logger,
        )

    def setUp(self):
        self.s3_client.empty_bucket(self.s3_bucket_name)
        self.s3_client.create_bucket(self.s3_bucket_name)
        self.redpanda.start()
        for topic in EndToEndShadowIndexingTest.topics:
            self.kafka_tools.create_topic(topic)

    def tearDown(self):
        self.s3_client.empty_bucket(self.s3_bucket_name)

    @cluster(num_nodes=5)
    def test_write(self):
        """Write at least 10 segments, set retention policy to leave only 5
        segments, wait for segments removal, consume data and run validation,
        that everything that is acked is consumed."""
        self.start_producer()
        produce_until_segments(
            redpanda=self.redpanda,
            topic=self.topic,
            partition_idx=0,
            count=10,
        )

        self.kafka_tools.alter_topic_config(
            self.topic,
            {
                TopicSpec.PROPERTY_RETENTION_BYTES:
                5 * EndToEndShadowIndexingTest.segment_size,
            },
        )
        wait_for_segments_removal(redpanda=self.redpanda,
                                  topic=self.topic,
                                  partition_idx=0,
                                  count=6)

        self.start_consumer()
        self.run_validation()
예제 #13
0
 def create_topic(self, specs):
     if isinstance(specs, TopicSpec):
         specs = [specs]
     client = KafkaCliTools(self._redpanda)
     for spec in specs:
         client.create_topic(spec)
예제 #14
0
 def test_create_topic(self, version):
     tools = KafkaCliTools(self.redpanda, version)
     topics = ["v{}.{}".format(version, i) for i in range(3)]
     for topic in topics:
         tools.create_topic(topic)
     assert set(topics) <= set(tools.list_topics())