Exemplo n.º 1
0
class WasmMultiInputTopicIdentityTest(WasmIdentityTest):
    """
    In this test spec there are three input topics and three coprocessors.
    Each coprocessor consumes from the same input topic and produces
    to one output topic, making three materialized topics per script.
    """
    topics = (
        TopicSpec(partition_count=3,
                  replication_factor=3,
                  cleanup_policy=TopicSpec.CLEANUP_DELETE),
        TopicSpec(partition_count=3,
                  replication_factor=3,
                  cleanup_policy=TopicSpec.CLEANUP_DELETE),
        TopicSpec(partition_count=3,
                  replication_factor=3,
                  cleanup_policy=TopicSpec.CLEANUP_DELETE),
    )

    def __init__(self, test_context):
        super(WasmMultiInputTopicIdentityTest,
              self).__init__(test_context, num_records=10240, record_size=1024)

    def wasm_test_plan(self):
        """
        The materialized logs:
        [
          itopic[0]._script_a_output_,
          itopic[1]._script_a_output_,
          itopic[2]._script_a_output_,
          itopic[0]._script_b_output_,
          itopic[1]._script_b_output_,
          itopic[2]._script_b_output_,
          itopic[0]._script_c_output_,
          itopic[1]._script_c_output_,
          itopic[2]._script_c_output_,
        ]
        Should exist by tests end and be identical to their respective input logs
        """
        return [
            WasmScript(inputs=self.wasm_test_input(),
                       outputs=['script_a_output'],
                       script=WasmTemplateRepository.IDENTITY_TRANSFORM),
            WasmScript(inputs=self.wasm_test_input(),
                       outputs=['script_b_output'],
                       script=WasmTemplateRepository.IDENTITY_TRANSFORM),
            WasmScript(inputs=self.wasm_test_input(),
                       outputs=['script_c_output'],
                       script=WasmTemplateRepository.IDENTITY_TRANSFORM)
        ]

    @cluster(num_nodes=6, log_allow_list=WASM_LOG_ALLOW_LIST)
    def verify_materialized_topics_test(self):
        self.verify_results(materialized_result_set_compare)
Exemplo n.º 2
0
class WasmIdentityTest(WasmTest):
    topics = (TopicSpec(partition_count=3,
                        replication_factor=3,
                        cleanup_policy=TopicSpec.CLEANUP_DELETE), )

    def __init__(self, test_context, num_records=1024, record_size=1024):
        super(WasmIdentityTest, self).__init__(test_context, extra_rp_conf={})
        self._num_records = num_records
        self._record_size = record_size
        assert len(self.topics) >= 1

    def wasm_test_plan(self):
        input_topic = self.topics[0].name
        mapped_topic = "myoutputtopic"
        output_topic = construct_materialized_topic(input_topic, mapped_topic)

        # The identity transform produces 1 identital record onto a topic for
        # each input record. The result should be a 1 to 1 mapping between a
        # source and destination topic, they should be identical when compared
        basic_script = WasmScript(
            inputs=[(input_topic, (self._num_records, self._record_size))],
            outputs=[(output_topic, self._num_records)],
            script=WasmTemplateRepository.IDENTITY_TRANSFORM)

        return [basic_script]

    @cluster(num_nodes=3)
    def ensure_identical_output_test(self):
        input_results, output_results = self._start(self.topics,
                                                    self.wasm_test_plan())
        assert input_results.num_records() == self._num_records
        if not materialized_result_set_compare(input_results, output_results):
            raise Exception(
                "Expected all records across topics to be equivalent")
Exemplo n.º 3
0
    def test_bootstrapping_after_move(self):
        """
        Move partitions with active consumer / producer
        """
        self.start_redpanda(num_nodes=3)
        spec = TopicSpec(name="topic", partition_count=3, replication_factor=3)
        self.client().create_topic(spec)
        self.topic = spec.name
        self.start_producer(1)
        self.start_consumer(1)
        self.await_startup()
        # execute single move
        self._move_and_verify()
        self.run_validation(enable_idempotence=False, consumer_timeout_sec=45)

        # snapshot offsets
        rpk = RpkTool(self.redpanda)
        partitions = rpk.describe_topic(spec.name)
        offset_map = {}
        for p in partitions:
            offset_map[p.id] = p.high_watermark

        # restart all the nodes
        self.redpanda.restart_nodes(self.redpanda.nodes)

        def offsets_are_recovered():

            return all([
                offset_map[p.id] == p.high_watermark
                for p in rpk.describe_topic(spec.name)
            ])

        wait_until(offsets_are_recovered, 30, 2)
    def test_decommissioning_working_node(self):
        self.start_redpanda(num_nodes=4)
        topics = []
        for partition_count in range(1, 5):
            for replication_factor in (3, 3):
                name = f"topic{len(topics)}"
                spec = TopicSpec(name=name,
                                 partition_count=partition_count,
                                 replication_factor=replication_factor)
                topics.append(spec)

        for spec in topics:
            self.client().create_topic(spec)
            self.topic = spec.name

        self.start_producer(1)
        self.start_consumer(1)
        self.await_startup()
        admin = Admin(self.redpanda)

        brokers = admin.get_brokers()
        to_decommission = random.choice(brokers)
        self.logger.info(f"decommissioning node: {to_decommission}", )
        admin.decommission_broker(to_decommission['node_id'])

        def node_removed():
            brokers = admin.get_brokers()
            for b in brokers:
                if b['node_id'] == to_decommission['node_id']:
                    return False
            return True

        wait_until(node_removed, timeout_sec=120, backoff_sec=2)

        self.run_validation(enable_idempotence=False, consumer_timeout_sec=45)
Exemplo n.º 5
0
    def test_recovery_after_multiple_restarts(self):
        self.start_redpanda(3, extra_rp_conf=self._extra_rp_conf)
        spec = TopicSpec(partition_count=60, replication_factor=3)

        DefaultClient(self.redpanda).create_topic(spec)
        self.topic = spec.name

        rpk = RpkTool(self.redpanda)
        rpk.alter_topic_config(spec.name, 'redpanda.remote.write', 'true')
        rpk.alter_topic_config(spec.name, 'redpanda.remote.read', 'true')

        self.start_producer(1, throughput=100)
        self.start_consumer(1)
        self.await_startup()

        def no_under_replicated_partitions():
            metric_sample = self.redpanda.metrics_sample("under_replicated")
            for s in metric_sample.samples:
                if s.value > 0:
                    return False
            return True

        # restart all the nodes and wait for recovery
        for i in range(0, 10):
            for n in self.redpanda.nodes:
                self.redpanda.signal_redpanda(n)
                self.redpanda.start_node(n)
            wait_until(no_under_replicated_partitions, 30, 2)

        self.run_validation(enable_idempotence=False,
                            producer_timeout_sec=60,
                            consumer_timeout_sec=180)
Exemplo n.º 6
0
    def test_cross_shard(self):
        """
        Test interaction between the shadow indexing and the partition movement.
        Move partitions with SI enabled between shards.
        """
        throughput, records, moves, partitions = self._get_scale_params()

        self.start_redpanda(num_nodes=3)
        spec = TopicSpec(name="topic",
                         partition_count=partitions,
                         replication_factor=3)
        self.client().create_topic(spec)
        self.topic = spec.name
        self.start_producer(1, throughput=throughput)
        self.start_consumer(1)
        self.await_startup()

        admin = Admin(self.redpanda)
        topic = self.topic
        partition = 0

        for _ in range(moves):
            assignments = self._get_assignments(admin, topic, partition)
            for a in assignments:
                # Bounce between core 0 and 1
                a['core'] = (a['core'] + 1) % 2
            admin.set_partition_replicas(topic, partition, assignments)
            self._wait_post_move(topic, partition, assignments, 360)

        self.run_validation(enable_idempotence=False,
                            consumer_timeout_sec=45,
                            min_records=records)
Exemplo n.º 7
0
    def test_move_consumer_offsets_intranode(self):
        """
        Exercise moving the consumer_offsets/0 partition between shards
        within the same nodes.  This reproduces certain bugs in the special
        handling of this topic.
        """
        throughput, records, moves = self._get_scale_params()

        self.start_redpanda(num_nodes=3,
                            extra_rp_conf={"default_topic_replications": 3})
        spec = TopicSpec(name="topic", partition_count=3, replication_factor=3)
        self.client().create_topic(spec)
        self.topic = spec.name
        self.start_producer(1, throughput=throughput)
        self.start_consumer(1)
        self.await_startup()

        admin = Admin(self.redpanda)
        topic = "__consumer_offsets"
        partition = 0

        for _ in range(moves):
            assignments = self._get_assignments(admin, topic, partition)
            for a in assignments:
                # Bounce between core 0 and 1
                a['core'] = (a['core'] + 1) % 2
            admin.set_partition_replicas(topic, partition, assignments)
            self._wait_post_move(topic, partition, assignments, 360)

        self.run_validation(enable_idempotence=False,
                            consumer_timeout_sec=45,
                            min_records=records)
Exemplo n.º 8
0
    def test_node_resize(self):
        # Create a topic and write some data to make sure the cluster
        # is all up & initialized, and that subsequent checks are happening
        # with some partitions actually assigned to shards.
        self._client.create_topic(
            TopicSpec(name="test", partition_count=10, replication_factor=3))
        producer = RpkProducer(context=self.test_context,
                               redpanda=self.redpanda,
                               topic="test",
                               msg_size=4096,
                               msg_count=1000,
                               acks=-1)
        producer.start()
        producer.wait()

        # Choose one node from the cluster to exercise checks on.
        target_node = self.redpanda.nodes[0]

        # Attempt to decrease CPU count relative to initial: redpanda should fail to start
        self._restart_with_num_cpus(node=target_node,
                                    num_cpus=self.INITIAL_NUM_CPUS - 1,
                                    expect_fail=True)

        # Increase CPU count: redpanda should accept this
        self._restart_with_num_cpus(node=target_node,
                                    num_cpus=self.INITIAL_NUM_CPUS + 1,
                                    expect_fail=False)

        # Now decrease back to original core count: this should fail, because we previously
        # increased so the original core count is now below the high water mark
        self._restart_with_num_cpus(node=target_node,
                                    num_cpus=self.INITIAL_NUM_CPUS,
                                    expect_fail=True)
Exemplo n.º 9
0
    def test_simple_end_to_end(self, source_type):
        # start brokers
        self.start_brokers(source_type=source_type)
        # start mirror maker
        self.mirror_maker = MirrorMaker2(self.test_context,
                                         num_nodes=1,
                                         source_cluster=self.source_broker,
                                         target_cluster=self.redpanda)
        topics = []
        for i in range(0, 10):
            topics.append(
                TopicSpec(partition_count=random.randint(1, 10),
                          retention_bytes=random.randint(100000000, 300000000),
                          retention_ms=random.randint(1 * 3600000,
                                                      10 * 3600000)))
        self.source_client.create_topic(topics)
        self.mirror_maker.start()
        # start source producer & target consumer
        self.start_workload()

        self.run_validation(consumer_timeout_sec=120)
        self.mirror_maker.stop()
        target_client = DefaultClient(self.redpanda)
        for t in topics:
            desc = target_client.describe_topic(t.name)
            self.logger.debug(f'source topic: {t}, target topic: {desc}')
            assert len(desc.partitions) == t.partition_count
Exemplo n.º 10
0
class WasmIdentityTest(WasmTest):
    topics = (TopicSpec(partition_count=3,
                        replication_factor=3,
                        cleanup_policy=TopicSpec.CLEANUP_DELETE), )

    def __init__(self,
                 test_context,
                 extra_rp_conf=None,
                 num_records=1024,
                 record_size=1024):
        super(WasmIdentityTest, self).__init__(test_context,
                                               extra_rp_conf=extra_rp_conf
                                               or {},
                                               record_size=record_size)
        self._num_records = num_records

    def wasm_inputs_throughput(self):
        """
        Producer parameters across all input topics
        """
        return {topic: self._num_records for topic in self.wasm_test_input()}

    def wasm_outputs_throughput(self):
        """
        Consumer parameters across all output topics
        """
        return {topic: self._num_records for topic in self.wasm_test_output()}

    @cluster(num_nodes=4, log_allow_list=WASM_LOG_ALLOW_LIST)
    def verify_materialized_topics_test(self):
        self.verify_results(materialized_result_set_compare)
Exemplo n.º 11
0
    def describe_topic(self, topic):
        self._redpanda.logger.debug("Describing topics")
        args = ["--describe", "--topic", topic]
        res = self._run("kafka-topics.sh", args)
        self._redpanda.logger.debug("Describe topics result: %s", res)
        if res is None:
            raise RuntimeError(f"Error describing topic {topic}")

        # parse/extract the topic configuration
        configs = None
        for part in [part.strip() for part in res.split("\t")]:
            if part.startswith("Configs:"):
                configs = part[8:]

        def maybe_int(key, value):
            if key in [
                    "partition_count", "replication_factor", "retention_ms",
                    "retention_bytes", 'segment_bytes'
            ]:
                value = int(value)
            return value

        def fix_key(key):
            return key.replace(".", "_")

        self._redpanda.logger.debug(f"Describe topics configs: {configs}")
        configs = [config.split("=") for config in configs.split(",")]
        configs = {fix_key(kv[0].strip()): kv[1].strip() for kv in configs}
        configs = {kv[0]: maybe_int(kv[0], kv[1]) for kv in configs.items()}
        return TopicSpec(name=topic, **configs)
Exemplo n.º 12
0
    def _prime_env(self):
        output_topic = "identity_output2"
        self.start_redpanda_nodes(3)
        spec = TopicSpec(name="topic2",
                         partition_count=3,
                         replication_factor=3)
        self.client().create_topic(spec)
        self._deploy_identity_copro([spec.name], [output_topic])
        self.topic = spec.name
        self.start_producer(num_nodes=1, throughput=10000)
        self.start_consumer(1)
        self.await_startup(min_records=500)
        materialized_topic = construct_materialized_topic(
            spec.name, output_topic)

        def topic_created():
            metadata = self.client().describe_topics()
            self.logger.info(f"metadata: {metadata}")
            return any([x['topic'] == materialized_topic for x in metadata])

        wait_until(topic_created, timeout_sec=30, backoff_sec=2)

        self._start_mconsumer(materialized_topic)
        t, p = self._grab_input(spec.name)
        return {
            'topic': t,
            'partition': p,
            'materialized_topic': materialized_topic
        }
Exemplo n.º 13
0
    def test_adding_nodes_to_cluster(self):
        self.redpanda = RedpandaService(
            self.test_context, 3, extra_rp_conf={"group_topic_partitions": 1})
        # start single node cluster
        self.redpanda.start(nodes=[self.redpanda.nodes[0]])
        # create some topics
        topics = []
        # include __consumer_offsets topic replica
        total_replicas = 1
        for partition_count in range(1, 5):
            name = f"topic{len(topics)}"
            spec = TopicSpec(name=name,
                             partition_count=partition_count,
                             replication_factor=1)
            total_replicas += partition_count
            topics.append(spec)

        for spec in topics:
            DefaultClient(self.redpanda).create_topic(spec)
            self.topic = spec.name

        self.start_producer(1)
        self.start_consumer(1)
        self.await_startup()
        # add second node
        self.redpanda.start_node(self.redpanda.nodes[1])
        kafkacat = KafkaCat(self.redpanda)

        def _replicas_per_node():
            node_replicas = {}
            md = kafkacat.metadata()
            self.redpanda.logger.info(f"metadata: {md}")
            for topic in md['topics']:
                for p in topic['partitions']:
                    for r in p['replicas']:
                        id = r['id']
                        if id not in node_replicas:
                            node_replicas[id] = 0
                        node_replicas[id] += 1

            return node_replicas

        def partitions_rebalanced():
            per_node = _replicas_per_node()
            self.redpanda.logger.info(f"replicas per node: {per_node}")
            if len(per_node) < len(self.redpanda.started_nodes()):
                return False

            replicas = sum(per_node.values())
            if replicas != total_replicas:
                return False

            return all(p[1] > 1 for p in per_node.items())

        wait_until(partitions_rebalanced, timeout_sec=30, backoff_sec=1)
        # add third node
        self.redpanda.start_node(self.redpanda.nodes[2])
        wait_until(partitions_rebalanced, timeout_sec=30, backoff_sec=1)

        self.run_validation(enable_idempotence=False, consumer_timeout_sec=45)
Exemplo n.º 14
0
    def stress_test(self):
        for i in range(10):
            spec = TopicSpec(partition_count=2,
                             cleanup_policy=TopicSpec.CLEANUP_COMPACT)
            topic_name = spec.name
            self.client().create_topic(spec)

            producer = RpkProducer(self.test_context, self.redpanda,
                                   topic_name, 1024, 100000)
            producer.start()

            metrics = [
                MetricCheck(self.logger, self.redpanda, n,
                            'vectorized_storage_log_compacted_segment_total',
                            {}, sum) for n in self.redpanda.nodes
            ]

            def check_compaction():
                return all([
                    m.evaluate([
                        ('vectorized_storage_log_compacted_segment_total',
                         lambda a, b: b > 3)
                    ]) for m in metrics
                ])

            wait_until(check_compaction,
                       timeout_sec=120,
                       backoff_sec=5,
                       err_msg="Segments were not compacted")

            self.client().delete_topic(topic_name)

            try:
                producer.stop()
            except:
                # Should ignore exception form rpk
                pass
            producer.free()

            def topic_storage_purged():
                storage = self.redpanda.storage()
                return all(
                    map(lambda n: topic_name not in n.ns["kafka"].topics,
                        storage.nodes))

            try:
                wait_until(lambda: topic_storage_purged(),
                           timeout_sec=60,
                           backoff_sec=2,
                           err_msg="Topic storage was not removed")

            except:
                # On errors, dump listing of the storage location
                for node in self.redpanda.nodes:
                    self.logger.error(f"Storage listing on {node.name}:")
                    for line in node.account.ssh_capture(
                            f"find {self.redpanda.DATA_DIR}"):
                        self.logger.error(line.strip())

                raise
Exemplo n.º 15
0
    def test_availability_when_one_node_failed(self):
        self.redpanda = RedpandaService(
            self.test_context,
            3,
            KafkaCliTools,
            extra_rp_conf={
                "enable_auto_rebalance_on_node_add": True,
                "group_topic_partitions": 1,
                "default_topic_replications": 3,
            })

        self.redpanda.start()
        spec = TopicSpec(name="test-topic",
                         partition_count=6,
                         replication_factor=3)

        self.redpanda.create_topic(spec)
        self.topic = spec.name

        self.start_producer(1, throughput=10000)
        self.start_consumer(1)
        self.await_startup()
        # start failure injector with default parameters
        self.start_finjector()

        self.validate_records()
Exemplo n.º 16
0
    def test_enable_sasl_live(self):
        """
        Verify that when enable_sasl is set to true at runtime, subsequent
        unauthenticated kafka clients are rejected.
        """

        unauthenticated_client = PythonLibrdkafka(self.redpanda)
        topic = TopicSpec(replication_factor=1)
        unauthenticated_client.create_topic(topic)
        assert len(unauthenticated_client.topics()) == 1

        # Switch on authentication
        admin = Admin(self.redpanda)
        admin.patch_cluster_config(upsert={'enable_sasl': True})

        # An unauthenticated client should be rejected
        try:
            unauthenticated_client.topics()
        except Exception as e:
            self.logger.exception(f"Unauthenticated: {e}")
        else:
            self.logger.error(
                "Unauthenticated client should have been rejected")
            assert False

        # Switch off authentication
        admin.patch_cluster_config(upsert={'enable_sasl': False})

        # An unauthenticated client should be accepted again
        assert len(unauthenticated_client.topics()) == 1
Exemplo n.º 17
0
    def describe_topic(self, topic):
        self._redpanda.logger.debug("Describing topics")
        args = ["--describe", "--topic", topic]
        res = self._run("kafka-topics.sh", args)
        self._redpanda.logger.debug("Describe topics result: %s", res)

        # parse/extract the topic configuration
        configs = None
        for part in [part.strip() for part in res.split("\t")]:
            if part.startswith("Configs:"):
                configs = part[8:]

        def maybe_int(key, value):
            if key in ["partition_count", "replication_factor"]:
                value = int(value)
            return value

        def fix_key(key):
            if key == "cleanup.policy":
                return "cleanup_policy"
            return key

        self._redpanda.logger.debug(f"Describe topics configs: {configs}")
        configs = [config.split("=") for config in configs.split(",")]
        configs = {fix_key(kv[0].strip()): kv[1].strip() for kv in configs}
        configs = {kv[0]: maybe_int(kv[0], kv[1]) for kv in configs.items()}
        return TopicSpec(name=topic, **configs)
Exemplo n.º 18
0
class WasmMultiScriptIdentityTest(WasmIdentityTest):
    """
    In this test spec there is one input topic and three coprocessors.
    Each coprocessor consumes from the same sole input topic and produces
    to one output topic.
    """
    topics = (TopicSpec(partition_count=3,
                        replication_factor=3,
                        cleanup_policy=TopicSpec.CLEANUP_DELETE), )

    def __init__(self, test_context, num_records=1024, record_size=1024):
        super(WasmMultiScriptIdentityTest,
              self).__init__(test_context,
                             num_records=num_records,
                             record_size=record_size)

    def wasm_test_outputs(self):
        """
        The materialized logs:
        [
          itopic.$script_a_output$,
          itopic.$script_b_output$,
          itopic.$script_c_output$,
        ]
        Should exist by tests end and be identical to their respective input logs

        """
        return [["sou_a"], ["sou_b"], ["sou_c"]]
Exemplo n.º 19
0
    def test_fetch_after_committed_offset_was_removed(self,
                                                      transactions_enabled):
        """
        Test fetching when consumer offset was deleted by retention
        """

        self.redpanda._extra_rp_conf[
            "enable_transactions"] = transactions_enabled
        self.redpanda._extra_rp_conf[
            "enable_idempotence"] = transactions_enabled
        self.redpanda.start()

        topic = TopicSpec(partition_count=1,
                          replication_factor=3,
                          cleanup_policy=TopicSpec.CLEANUP_DELETE)
        self.client().create_topic(topic)

        kafka_tools = KafkaCliTools(self.redpanda)

        # produce until segments have been compacted
        produce_until_segments(
            self.redpanda,
            topic=topic.name,
            partition_idx=0,
            count=10,
        )
        consumer_group = 'test'
        rpk = RpkTool(self.redpanda)

        def consume(n=1):

            out = rpk.consume(topic.name, group=consumer_group, n=n)
            split = out.split('}')
            split = filter(lambda s: "{" in s, split)

            return map(lambda s: json.loads(s + "}"), split)

        #consume from the beggining
        msgs = consume(10)
        last = list(msgs).pop()
        offset = last['offset']

        # change retention time
        kafka_tools.alter_topic_config(
            topic.name, {
                TopicSpec.PROPERTY_RETENTION_BYTES: 2 * self.segment_size,
            })

        wait_for_segments_removal(self.redpanda,
                                  topic.name,
                                  partition_idx=0,
                                  count=5)

        partitions = list(rpk.describe_topic(topic.name))
        p = partitions[0]
        assert p.start_offset > offset
        # consume from the offset that doesn't exists,
        # the one that was committed previously was already removed
        out = list(consume(1))
        assert out[0]['offset'] == p.start_offset
Exemplo n.º 20
0
class WasmMultiScriptIdentityTest(WasmIdentityTest):
    """
    In this test spec there is one input topic and three coprocessors.
    Each coprocessor consumes from the same sole input topic and produces
    to one output topic.
    """
    topics = (TopicSpec(partition_count=3,
                        replication_factor=3,
                        cleanup_policy=TopicSpec.CLEANUP_DELETE), )

    def __init__(self, test_context):
        super(WasmMultiScriptIdentityTest, self).__init__(test_context,
                                                          num_records=10240,
                                                          record_size=1024)

    def wasm_test_plan(self):
        """
        The materialized logs:
        [
          itopic._script_a_output_,
          itopic._script_b_output_,
          itopic._script_c_output_,
        ]
        """
        return [
            WasmScript(inputs=self.wasm_test_input(),
                       outputs=["sou_a", "sou_b", "sou_c"],
                       script=WasmTemplateRepository.IDENTITY_TRANSFORM)
        ]
Exemplo n.º 21
0
class ListGroupsReplicationFactorTest(RedpandaTest):
    """
    We encountered an issue where listing groups would return a
    coordinator-loading error when the underlying group membership topic had a
    replication factor of 3 (we had not noticed this until we noticed that
    replication factor were defaulted to 1). it isn't clear if this is specific
    to `kcl` but that is the client that we encountered the issue with.
    """
    topics = (TopicSpec(), )

    def __init__(self, test_context):
        extra_rp_conf = dict(default_topic_replications=3, )

        super(ListGroupsReplicationFactorTest,
              self).__init__(test_context=test_context,
                             num_brokers=3,
                             extra_rp_conf=extra_rp_conf)

    @cluster(num_node=3)
    def test_list_groups(self):
        kcl = KCL(self.redpanda)
        kcl.produce(self.topic, "msg\n")
        kcl.consume(self.topic, n=1, group="g0")
        kcl.list_groups()
        out = kcl.list_groups()
        assert "COORDINATOR_LOAD_IN_PROGRESS" not in out
Exemplo n.º 22
0
    def test_moving_not_fully_initialized_partition(self):
        """
        Move partition before first leader is elected
        """
        self.start_redpanda(num_nodes=3)

        hb = HoneyBadger()
        # if failure injector is not enabled simply skip this test
        if not hb.is_enabled(self.redpanda.nodes[0]):
            return

        for n in self.redpanda.nodes:
            hb.set_exception(n, 'raftgen_service::failure_probes', 'vote')
        topic = "topic-1"
        partition = 0
        spec = TopicSpec(name=topic, partition_count=1, replication_factor=3)
        self.redpanda.create_topic(spec)
        admin = Admin(self.redpanda)

        # choose a random topic-partition
        self.logger.info(f"selected topic-partition: {topic}-{partition}")

        # get the partition's replica set, including core assignments. the kafka
        # api doesn't expose core information, so we use the redpanda admin api.
        assignments = self._get_assignments(admin, topic, partition)
        self.logger.info(f"assignments for {topic}-{partition}: {assignments}")

        brokers = admin.get_brokers()
        # replace all node cores in assignment
        for assignment in assignments:
            for broker in brokers:
                if broker['node_id'] == assignment['node_id']:
                    assignment['core'] = random.randint(
                        0, broker["num_cores"] - 1)
        self.logger.info(
            f"new assignments for {topic}-{partition}: {assignments}")

        admin.set_partition_replicas(topic, partition, assignments)

        def status_done():
            info = admin.get_partitions(topic, partition)
            self.logger.info(
                f"current assignments for {topic}-{partition}: {info}")
            converged = self._equal_assignments(info["replicas"], assignments)
            return converged and info["status"] == "done"

        # unset failures
        for n in self.redpanda.nodes:
            hb.unset_failures(n, 'raftgen_service::failure_probes', 'vote')
        # wait until redpanda reports complete
        wait_until(status_done, timeout_sec=30, backoff_sec=1)

        def derived_done():
            info = self._get_current_partitions(admin, topic, partition)
            self.logger.info(
                f"derived assignments for {topic}-{partition}: {info}")
            return self._equal_assignments(info, assignments)

        wait_until(derived_done, timeout_sec=30, backoff_sec=1)
Exemplo n.º 23
0
class KafkaStreamsJsonToAvro(KafkaStreamsDriverBase):
    """
    Test KafkaStreams JsontoAvro example which converts records from JSON to AVRO
    using the schema registry.
    """
    topics = (
        TopicSpec(name="json-source"),
        TopicSpec(name="avro-sink"),
    )

    Example = KafkaStreamExamples.KafkaStreamsJsonToAvro
    Driver = KafkaStreamExamples.KafkaStreamsJsonToAvro

    def __init__(self, test_context):
        super(KafkaStreamsJsonToAvro, self).__init__(test_context=test_context,
                                                     enable_pp=True,
                                                     enable_sr=True)
Exemplo n.º 24
0
    def create_topics(self):
        topics = []
        for i in range(0, 8):
            topics.append(
                TopicSpec(partition_count=random.randint(1, 6),
                          replication_factor=3))

        self.client().create_topic(topics)
Exemplo n.º 25
0
class KafkaStreamsSumLambda(KafkaStreamsDriverBase):
    """
    Test KafkaStreams SumLambda example that sums odd numbers
    using reduce
    """
    topics = (
        TopicSpec(name="numbers-topic"),
        TopicSpec(name="sum-of-odd-numbers-topic"),
    )

    Example = KafkaStreamExamples.KafkaStreamsSumLambda
    Driver = KafkaStreamExamples.KafkaStreamsSumLambda

    def __init__(self, test_context):
        super(KafkaStreamsSumLambda, self).__init__(test_context=test_context,
                                                    enable_pp=True,
                                                    enable_sr=True)
Exemplo n.º 26
0
class DescribeTopicsTest(RedpandaTest):
    topics = (TopicSpec(partition_count=2, replication_factor=3), )

    def test_describe_topics(self):
        tools = KafkaCliTools(self.redpanda)
        output = tools.describe_topics()
        assert "partition_count=2" in output
        assert "replication_factor=3" in output
Exemplo n.º 27
0
class DataPolicyTest(RedpandaTest):
    topics = (TopicSpec(partition_count=1, replication_factor=3), )

    def __init__(self, test_context):
        super(DataPolicyTest, self).__init__(test_context=test_context,
                                             num_brokers=3)

        self.kafka_tools = KafkaCliTools(self.redpanda)

    def _get_data_policy(self, function_name, script_name):
        return "function_name: {} script_name: {}".format(
            function_name, script_name)

    @cluster(num_nodes=3)
    def test_default_data_policy(self):
        topic = self.topics[0].name
        kafka_tools = KafkaCliTools(self.redpanda)
        spec = kafka_tools.describe_topic(topic)
        assert spec.redpanda_datapolicy == None

    @cluster(num_nodes=3)
    def test_set_data_policy(self):
        topic = self.topics[0].name
        kafka_tools = KafkaCliTools(self.redpanda)
        res = kafka_tools.alter_topic_config(
            topic, {
                TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "1",
                TopicSpec.PROPERTY_DATA_POLICY_SCRIPT_NAME: "2"
            })
        spec = kafka_tools.describe_topic(topic)
        assert spec.redpanda_datapolicy == self._get_data_policy(1, 2)

    @cluster(num_nodes=3)
    def test_incremental_config(self):
        topic = self.topics[0].name
        kafka_tools = KafkaCliTools(self.redpanda)
        kafka_tools.alter_topic_config(
            topic, {
                TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "1",
                TopicSpec.PROPERTY_DATA_POLICY_SCRIPT_NAME: "2"
            })
        spec = kafka_tools.describe_topic(topic)
        assert spec.redpanda_datapolicy == self._get_data_policy(1, 2)

        # Expect that trying to set with a function name but no script fails.
        try:
            r = kafka_tools.alter_topic_config(
                topic, {TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "3"})
        except subprocess.CalledProcessError as e:
            # Expected: request fails to update topic
            self.logger.info(f"Kafka CLI alter failed as expected: {e.stdout}")
            assert "unable to parse property" in e.stdout
        else:
            raise RuntimeError(f"Expected API error, got {r}")

        # Expect that the failed alter operation has not modified the topic
        spec = kafka_tools.describe_topic(topic)
        assert spec.redpanda_datapolicy == self._get_data_policy(1, 2)
Exemplo n.º 28
0
 def test_scram_pythonlib(self):
     topic = TopicSpec()
     client = self.make_superuser_client()
     client.create_topics([
         NewTopic(name=topic.name, num_partitions=1, replication_factor=1)
     ])
     topics = set(client.list_topics())
     self.redpanda.logger.info(f"Topics {topics}")
     assert set([topic.name]) == topics
Exemplo n.º 29
0
 def setup_producer(self, p_cnt):
     # create topic
     self.topic_spec = TopicSpec(partition_count=p_cnt,
                                 replication_factor=3)
     self.client().create_topic(specs=self.topic_spec)
     # produce some messages to the topic
     self.producer = RpkProducer(self._ctx, self.redpanda,
                                 self.topic_spec.name, 128, 5000, -1)
     self.producer.start()
Exemplo n.º 30
0
 def test_create_topic(self):
     for client_factory in KafkaCliTools.instances():
         client = client_factory(self.redpanda)
         topics = [TopicSpec() for _ in range(3)]
         for topic in topics:
             client.create_topic(topic)
         for topic in topics:
             spec = client.describe_topic(topic.name)
             assert spec == topic