示例#1
0
    def verify(self, tests):
        verifier_jar = "/opt/tx-verifier/tx-verifier.jar"

        self.redpanda.logger.info("creating topics")

        rpk = RpkTool(self.redpanda)
        rpk.create_topic("topic1")
        rpk.create_topic("topic2")

        errors = ""

        for test in tests:
            self.redpanda.logger.info(
                "testing txn test \"{test}\"".format(test=test))
            try:
                cmd = "{java} -jar {verifier_jar} {test} {brokers}".format(
                    java="java",
                    verifier_jar=verifier_jar,
                    test=test,
                    brokers=self.redpanda.brokers())
                subprocess.check_output(["/bin/sh", "-c", cmd],
                                        stderr=subprocess.STDOUT)
                self.redpanda.logger.info(
                    "txn test \"{test}\" passed".format(test=test))
            except subprocess.CalledProcessError as e:
                self.redpanda.logger.info(
                    "txn test \"{test}\" failed".format(test=test))
                errors += test + "\n"
                errors += str(e.output) + "\n"
                errors += "---------------------------\n"

        if len(errors) > 0:
            raise DucktapeError(errors)
    def test_fetch_after_committed_offset_was_removed(self,
                                                      transactions_enabled):
        """
        Test fetching when consumer offset was deleted by retention
        """

        self.redpanda._extra_rp_conf[
            "enable_transactions"] = transactions_enabled
        self.redpanda._extra_rp_conf[
            "enable_idempotence"] = transactions_enabled
        self.redpanda.start()

        topic = TopicSpec(partition_count=1,
                          replication_factor=3,
                          cleanup_policy=TopicSpec.CLEANUP_DELETE)
        self.client().create_topic(topic)

        kafka_tools = KafkaCliTools(self.redpanda)

        # produce until segments have been compacted
        produce_until_segments(
            self.redpanda,
            topic=topic.name,
            partition_idx=0,
            count=10,
        )
        consumer_group = 'test'
        rpk = RpkTool(self.redpanda)

        def consume(n=1):

            out = rpk.consume(topic.name, group=consumer_group, n=n)
            split = out.split('}')
            split = filter(lambda s: "{" in s, split)

            return map(lambda s: json.loads(s + "}"), split)

        #consume from the beggining
        msgs = consume(10)
        last = list(msgs).pop()
        offset = last['offset']

        # change retention time
        kafka_tools.alter_topic_config(
            topic.name, {
                TopicSpec.PROPERTY_RETENTION_BYTES: 2 * self.segment_size,
            })

        wait_for_segments_removal(self.redpanda,
                                  topic.name,
                                  partition_idx=0,
                                  count=5)

        partitions = list(rpk.describe_topic(topic.name))
        p = partitions[0]
        assert p.start_offset > offset
        # consume from the offset that doesn't exists,
        # the one that was committed previously was already removed
        out = list(consume(1))
        assert out[0]['offset'] == p.start_offset
    def test_bootstrapping_after_move(self):
        """
        Move partitions with active consumer / producer
        """
        self.start_redpanda(num_nodes=3)
        spec = TopicSpec(name="topic", partition_count=3, replication_factor=3)
        self.client().create_topic(spec)
        self.topic = spec.name
        self.start_producer(1)
        self.start_consumer(1)
        self.await_startup()
        # execute single move
        self._move_and_verify()
        self.run_validation(enable_idempotence=False, consumer_timeout_sec=45)

        # snapshot offsets
        rpk = RpkTool(self.redpanda)
        partitions = rpk.describe_topic(spec.name)
        offset_map = {}
        for p in partitions:
            offset_map[p.id] = p.high_watermark

        # restart all the nodes
        self.redpanda.restart_nodes(self.redpanda.nodes)

        def offsets_are_recovered():

            return all([
                offset_map[p.id] == p.high_watermark
                for p in rpk.describe_topic(spec.name)
            ])

        wait_until(offsets_are_recovered, 30, 2)
示例#4
0
    def test_reads_writes(self):
        verifier_jar = "/opt/tx-verifier/tx-verifier.jar"

        self.redpanda.logger.info("creating topics")

        rpk = RpkTool(self.redpanda)
        rpk.create_topic("topic1", partitions=1, replicas=1)

        test = "concurrent-reads-writes"

        try:
            cmd = "{java} -jar {verifier_jar} {test} {brokers}".format(
                java="java",
                verifier_jar=verifier_jar,
                test=test,
                brokers=self.redpanda.brokers())
            subprocess.check_output(["/bin/sh", "-c", cmd],
                                    stderr=subprocess.STDOUT)
            self.redpanda.logger.info(
                "txn test \"{test}\" passed".format(test=test))
        except subprocess.CalledProcessError as e:
            self.redpanda.logger.info(
                "txn test \"{test}\" failed".format(test=test))
            errors = ""
            errors += test + "\n"
            errors += str(e.output) + "\n"
            errors += "---------------------------\n"
            raise DucktapeError(errors)
示例#5
0
    def __init__(self, test_context):
        self.s3_bucket_name = f"panda-bucket-{uuid.uuid1()}"
        self._extra_rp_conf = dict(
            cloud_storage_enabled=True,
            cloud_storage_access_key=ArchivalTest.s3_access_key,
            cloud_storage_secret_key=ArchivalTest.s3_secret_key,
            cloud_storage_region=ArchivalTest.s3_region,
            cloud_storage_bucket=self.s3_bucket_name,
            cloud_storage_disable_tls=True,
            cloud_storage_api_endpoint=ArchivalTest.s3_host_name,
            cloud_storage_api_endpoint_port=9000,
            cloud_storage_reconciliation_interval_ms=500,
            cloud_storage_max_connections=5,
            log_compaction_interval_ms=self.log_compaction_interval_ms,
            log_segment_size=self.log_segment_size,
        )
        if test_context.function_name == "test_timeboxed_uploads":
            self._extra_rp_conf.update(
                log_segment_size=1024 * 1024 * 1024,
                cloud_storage_segment_max_upload_interval_sec=1)

        super(ArchivalTest, self).__init__(test_context=test_context,
                                           extra_rp_conf=self._extra_rp_conf)

        self.kafka_tools = KafkaCliTools(self.redpanda)
        self.rpk = RpkTool(self.redpanda)
        self.s3_client = S3Client(
            region='panda-region',
            access_key=u"panda-user",
            secret_key=u"panda-secret",
            endpoint=f'http://{ArchivalTest.s3_host_name}:9000',
            logger=self.logger)
示例#6
0
 def describe_topic_configs(self, topic: str):
     rpk = RpkTool(self._redpanda)
     configs = rpk.describe_topic_configs(topic)
     return {
         key: TopicConfigValue(value=value[0], source=value[1])
         for key, value in configs.items()
     }
示例#7
0
 def alter_topic_config(self, topic: str, key: str,
                        value: typing.Union[str, int]):
     """
     Alter a topic configuration property.
     """
     rpk = RpkTool(self._redpanda)
     rpk.alter_topic_config(topic, key, value)
示例#8
0
    def test_produce(self):
        verifier_bin = "/opt/redpanda-tests/go/sarama/produce_test/produce_test"

        self.redpanda.logger.info("creating topics")

        rpk = RpkTool(self.redpanda)
        rpk.create_topic("topic1")

        self.redpanda.logger.info("testing sarama produce")
        retries = 5
        for i in range(0, retries):
            try:
                cmd = "{verifier_bin} --brokers {brokers}".format(
                    verifier_bin=verifier_bin, brokers=self.redpanda.brokers())
                subprocess.check_output(["/bin/sh", "-c", cmd],
                                        stderr=subprocess.STDOUT)
                self.redpanda.logger.info("sarama produce test passed")
                break
            except subprocess.CalledProcessError as e:
                error = str(e.output)
                self.redpanda.logger.info("sarama produce failed with " +
                                          error)
                if i + 1 != retries and NOT_LEADER_FOR_PARTITION in error:
                    sleep(5)
                    continue
                raise DucktapeError("sarama produce failed with " + error)
示例#9
0
    def test_cluster_id(self):
        """
        That the cluster_id exposed in Kafka metadata is automatically
        populated with a uuid, that it starts with redpanda. and that
        it can be overridden by setting the property to something else.
        """

        rpk = RpkTool(self.redpanda)

        # An example, we will compare lengths with this
        uuid_example = "redpanda.87e8c0c3-7c2a-4f7b-987f-11fc1d2443a4"

        def has_uuid_cluster_id():
            cluster_id = rpk.cluster_metadata_id()
            self.logger.info(f"cluster_id={cluster_id}")
            return cluster_id is not None and len(cluster_id) == len(
                uuid_example)

        # This is a wait_until because the initialization of cluster_id
        # is async and can happen after the cluster starts answering Kafka requests.
        wait_until(has_uuid_cluster_id, timeout_sec=20, backoff_sec=1)

        # Verify that the cluster_id does not change on a restart
        initial_cluster_id = rpk.cluster_metadata_id()
        self.redpanda.restart_nodes(self.redpanda.nodes)
        assert rpk.cluster_metadata_id() == initial_cluster_id

        # Verify that a manually set cluster_id is respected
        manual_id = "rhubarb"
        self.redpanda.set_cluster_config(values={"cluster_id": manual_id},
                                         expect_restart=False)

        assert rpk.cluster_metadata_id() == f"redpanda.{manual_id}"
示例#10
0
    def __init__(self, test_context):
        super(TopicAutocreateTest, self).__init__(
            test_context=test_context,
            num_brokers=1,
            extra_rp_conf={'auto_create_topics_enabled': False})

        self.kafka_tools = KafkaCliTools(self.redpanda)
        self.rpk = RpkTool(self.redpanda)
    def _all_have_leaders(self, topic):
        rpk = RpkTool(self.redpanda)
        partitions = rpk.describe_topic(topic)

        for p in partitions:
            self.logger.debug(f"rpk partition: {p}")
            if p.leader is None or p.leader == -1:
                return False
        return True
示例#12
0
    def test_consumer_rejoin(self, static_members):
        """
        Test validating that re-joining static member will not casuse rebalance
        """
        self.setup_producer(20)
        group = 'test-gr-1'

        consumers = self.create_consumers(
            2,
            self.topic_spec.name,
            group,
            static_members=static_members,
            consumer_properties={"session.timeout.ms": 40000})

        # wait for some messages
        wait_until(lambda: ConsumerGroupTest.consumed_at_least(consumers, 50),
                   30, 2)
        rpk = RpkTool(self.redpanda)
        # at this point we have 2 consumers in stable group
        self.validate_group_state(group,
                                  expected_state="Stable",
                                  static_members=static_members)

        # stop one of the consumers
        consumers[0].stop()
        consumers[0].wait()

        rpk_group = rpk.group_describe(group)
        if static_members:
            # with static members group should still be in stable state
            assert rpk_group.state == "Stable"
            assert rpk_group.members == 2
        else:
            # consumer will request group leave when shutdown gracefully and it is dynamic
            self.wait_for_members(group, 1)

        # start the consumer again
        consumers[0].start()
        consumers[0].wait_for_started()
        # wait for consumer to start
        if static_members:
            # with static members group should be stable immediately as the
            # consumer is rejoining with the same instance id
            self.validate_group_state(group,
                                      expected_state="Stable",
                                      static_members=static_members)
        else:
            # group should get back to its original 2 members state
            self.wait_for_members(group, 2)

        self.producer.wait()
        self.producer.free()

        for c in consumers:
            c.stop()
            c.wait()
            c.free()
示例#13
0
    def test_tx_init_passes(self):
        rpk = RpkTool(self.redpanda)
        rpk.create_topic("topic1")

        producer = Producer({
            "bootstrap.servers": self.redpanda.brokers(),
            "enable.idempotence": True,
            "transactional.id": "tx-id-1",
            "retries": 5
        })
        producer.init_transactions()
    def __init__(self, test_context):
        si_settings = SISettings(cloud_storage_reconciliation_interval_ms=500,
                                 cloud_storage_max_connections=5,
                                 log_segment_size=self.log_segment_size)

        super(ShadowIndexingFirewallTest,
              self).__init__(test_context=test_context,
                             si_settings=si_settings)

        self._s3_port = si_settings.cloud_storage_api_endpoint_port
        self.rpk = RpkTool(self.redpanda)
示例#15
0
 def test_metadata_request_contains_all_brokers(self):
     """
     Check if broker list returned from metadata request is complete
     """
     wait_until(lambda: self.controller_present, 10, 1)
     rpk = RpkTool(self.redpanda)
     nodes = rpk.cluster_info()
     assert len(nodes) == 3
     all_ids = [self.redpanda.idx(n) for n in self.redpanda.nodes]
     returned_node_ids = [n.id for n in nodes]
     assert sorted(all_ids) == sorted(returned_node_ids)
示例#16
0
    def __init__(self, *args, **kwargs):
        rp_conf = BOOTSTRAP_CONFIG.copy()

        # Force verbose logging for the secret redaction test
        kwargs['log_level'] = 'trace'

        super(ClusterConfigTest, self).__init__(*args,
                                                extra_rp_conf=rp_conf,
                                                **kwargs)

        self.admin = Admin(self.redpanda)
        self.rpk = RpkTool(self.redpanda)
示例#17
0
    def __init__(self, *args, **kwargs):
        rp_conf = BOOTSTRAP_CONFIG.copy()

        # Enable our feature flag
        rp_conf['enable_central_config'] = True

        super(ClusterConfigTest, self).__init__(*args,
                                                extra_rp_conf=rp_conf,
                                                **kwargs)

        self.admin = Admin(self.redpanda)
        self.rpk = RpkTool(self.redpanda)
示例#18
0
    def validate_group_state(self, group, expected_state, static_members):
        rpk = RpkTool(self.redpanda)
        # validate group state
        rpk_group = rpk.group_describe(group)

        assert rpk_group.members == 2
        assert rpk_group.state == expected_state

        for p in rpk_group.partitions:
            if static_members:
                assert 'panda-consumer' in p.instance_id
            else:
                assert p.instance_id is None
示例#19
0
    def test_idempotent_write_passes(self):
        rpk = RpkTool(self.redpanda)
        rpk.create_topic("topic1")

        producer = Producer({
            "bootstrap.servers": self.redpanda.brokers(),
            "enable.idempotence": True,
            "retries": 5
        })
        producer.produce("topic1",
                         key="key1".encode('utf-8'),
                         value="value1".encode('utf-8'),
                         callback=on_delivery)
        producer.flush()
示例#20
0
    def get_super_client(self):
        if self.security.enable_mtls_identity:
            return RpkTool(self.redpanda, tls_cert=self.admin_user_cert)

        username, password, _ = self.redpanda.SUPERUSER_CREDENTIALS

        # uses base user cert with no explicit permissions. the cert should only
        # participate in tls handshake and not principal extraction.
        cert = None if not self.security.tls_provider else self.base_user_cert
        return RpkTool(self.redpanda,
                       username=username,
                       password=password,
                       sasl_mechanism=self.algorithm,
                       tls_cert=cert)
示例#21
0
    def test_idempotency_compacted_topic(self):
        rpk = RpkTool(self.redpanda)
        rpk.create_topic("topic1", config={"cleanup.policy": "compact"})

        producer = Producer({
            "bootstrap.servers": self.redpanda.brokers(),
            "enable.idempotence": True,
            "retries": 5
        })
        producer.produce("topic1",
                         key="key1".encode('utf-8'),
                         value="value1".encode('utf-8'),
                         callback=on_delivery)
        producer.flush()
示例#22
0
    def _ping_pong(self):
        kc = KafkaCat(self.redpanda)
        rpk = RpkTool(self.redpanda)

        payload = str(random.randint(0, 1000))
        start = time.time()
        offset = rpk.produce(self.topic, "tkey", payload, timeout=5)
        consumed = kc.consume_one(self.topic, 0, offset)
        latency = time.time() - start
        self.logger.info(
            f"_ping_pong produced '{payload}' consumed '{consumed}' in {(latency)*1000.0:.2f} ms"
        )
        if consumed['payload'] != payload:
            raise RuntimeError(f"expected '{payload}' got '{consumed}'")
示例#23
0
    def test_delete_topic_from_ongoin_tx(self):
        tx_id = "0"
        producer = ck.Producer({
            'bootstrap.servers': self.redpanda.brokers(),
            'transactional.id': tx_id,
        })
        producer.init_transactions()
        producer.begin_transaction()

        for topic in self.topics:
            for partition in range(topic.partition_count):
                producer.produce(topic.name, '0', '0', partition)

        producer.flush()

        txs_info = self.admin.get_all_transactions()
        assert len(
            txs_info) == 1, "Should be only one transaction in current time"

        rpk = RpkTool(self.redpanda)
        topic_name = self.topics[0].name
        rpk.delete_topic(topic_name)

        tx = txs_info[0]
        assert tx[
            "transactional_id"] == tx_id, f"Expected transactional_id: {tx_id}, but got {tx['transactional_id']}"

        for partition in tx["partitions"]:
            assert (partition["ns"] == "kafka")
            if partition["topic"] == topic_name:
                self.admin.delete_partition_from_transaction(
                    tx["transactional_id"], partition["ns"],
                    partition["topic"], partition["partition_id"],
                    partition["etag"])

        producer.commit_transaction()

        producer = ck.Producer({
            'bootstrap.servers': self.redpanda.brokers(),
            'transactional.id': tx_id,
        })
        producer.init_transactions()
        producer.begin_transaction()

        for topic in self.topics:
            if topic.name is not topic_name:
                for partition in range(topic.partition_count):
                    producer.produce(topic.name, '0', '0', partition)

        producer.commit_transaction()
示例#24
0
    def __init__(self, test_context, extra_rp_conf=dict(), num_brokers=3):
        def enable_wasm_options():
            return dict(
                developer_mode=True,
                enable_coproc=True,
            )

        wasm_opts = enable_wasm_options()
        wasm_opts.update(extra_rp_conf)
        super(WasmTest, self).__init__(test_context,
                                       extra_rp_conf=wasm_opts,
                                       num_brokers=num_brokers)
        self._rpk_tool = RpkTool(self.redpanda)
        self._build_tool = WasmBuildTool(self._rpk_tool)
示例#25
0
    def test_idempotent_write_fails(self):
        username, password, algorithm = self.redpanda.SUPERUSER_CREDENTIALS

        rpk = RpkTool(self.redpanda)
        rpk.sasl_create_user("bob", "bob", algorithm)

        try:
            self.write_by_bob(algorithm)
            assert False, "bob should not have access to topic1"
        except AssertionError as e:
            raise e
        except KafkaException as e:
            assert e.args[0].code(
            ) == TOPIC_AUTHORIZATION_FAILED, "TOPIC_AUTHORIZATION_FAILED error is expected"
示例#26
0
class TopicAutocreateTest(RedpandaTest):
    """
    Verify that autocreation works, and that the settings of an autocreated
    topic match those for a topic created by hand with rpk.
    """
    def __init__(self, test_context):
        super(TopicAutocreateTest, self).__init__(
            test_context=test_context,
            num_brokers=1,
            extra_rp_conf={'auto_create_topics_enabled': False})

        self.kafka_tools = KafkaCliTools(self.redpanda)
        self.rpk = RpkTool(self.redpanda)

    @cluster(num_nodes=1)
    def topic_autocreate_test(self):
        auto_topic = 'autocreated'
        manual_topic = "manuallycreated"

        # With autocreation disabled, producing to a nonexistent topic should not work.
        try:
            # Use rpk rather than kafka CLI because rpk errors out promptly
            self.rpk.produce(auto_topic, "foo", "bar")
        except Exception:
            # The write failed, and shouldn't have created a topic
            assert auto_topic not in self.kafka_tools.list_topics()
        else:
            assert False, "Producing to a nonexistent topic should fail"

        # Enable autocreation
        self.redpanda.restart_nodes(self.redpanda.nodes,
                                    {'auto_create_topics_enabled': True})

        # Auto create topic
        assert auto_topic not in self.kafka_tools.list_topics()
        self.kafka_tools.produce(auto_topic, 1, 4096)
        assert auto_topic in self.kafka_tools.list_topics()
        auto_topic_spec = self.kafka_tools.describe_topic(auto_topic)
        assert auto_topic_spec.retention_ms is None
        assert auto_topic_spec.retention_bytes is None

        # Create topic by hand, compare its properties to the autocreated one
        self.rpk.create_topic(manual_topic)
        manual_topic_spec = self.kafka_tools.describe_topic(auto_topic)
        assert manual_topic_spec.retention_ms == auto_topic_spec.retention_ms
        assert manual_topic_spec.retention_bytes == auto_topic_spec.retention_bytes

        # Clear name and compare the rest of the attributes
        manual_topic_spec.name = auto_topic_spec.name = None
        assert manual_topic_spec == auto_topic_spec
示例#27
0
    def test_verify_compacted_topics(self):
        partition_count = 1
        record_count = 10000
        key_cardinality = 200
        v = CompactedTopicVerifier(self.redpanda)
        rpk = RpkTool(self.redpanda)
        res = v.produce(record_count=record_count,
                        p=partition_count,
                        key_cardinality=key_cardinality)

        def records_readable():
            partitions = rpk.describe_topic(v.topic)
            for p in partitions:
                if p.high_watermark < record_count:
                    return False
            return True

        wait_until(lambda: records_readable(),
                   timeout_sec=10,
                   backoff_sec=2,
                   err_msg="Records are not readable")
        #TODO: added verification if segments were actually compacted
        res = v.verify()
        if res == None:
            raise DucktapeError("Compacted topic verification failed")
示例#28
0
class RpkClusterTest(RedpandaTest):
    def __init__(self, ctx):
        super(RpkClusterTest, self).__init__(test_context=ctx)
        self._ctx = ctx
        self._rpk = RpkTool(self.redpanda)

    @cluster(num_nodes=3)
    def test_cluster_info(self):
        def condition():
            brokers = self._rpk.cluster_info()

            if len(brokers) != len(self.redpanda.nodes):
                return False

            advertised_addrs = self.redpanda.brokers()

            ok = True
            for b in brokers:
                ok = ok and \
                    b.address in advertised_addrs

            return ok

        wait_until(condition,
                   timeout_sec=10,
                   backoff_sec=1,
                   err_msg="No brokers found or output doesn't match")
示例#29
0
    def test_topic_recreation(self):
        rpk = RpkTool(self.redpanda)
        topic_spec = next(filter(lambda x: x.partition_count == 1,
                                 self.topics))
        topic = topic_spec.name
        group = "g0"

        def check_metric():
            for i in range(100):
                payload = str(random.randint(0, 1000))
                offset = rpk.produce(topic, "", payload, timeout=5)

            metric_key = (topic, "0")
            rpk.consume(topic, group=group, n=50)
            metrics_offsets = self._get_offset_from_metrics(group)
            assert metric_key in metrics_offsets
            assert metrics_offsets[metric_key] == 50

            self.redpanda.delete_topic(topic)

        check_metric()
        metrics_offsets = self._get_offset_from_metrics(group)
        assert metrics_offsets is None
        self.redpanda.create_topic(topic_spec)
        check_metric()
    def create_and_validate(self, name, custom_assignment):
        self.redpanda.logger.info(
            f"creating topic {name} with {custom_assignment}")
        cli = KafkaCliTools(self.redpanda)
        rpk = RpkTool(self.redpanda)

        cli.create_topic_with_assignment(name, custom_assignment)

        def replica_matches():
            replicas_per_partition = {}
            for p in rpk.describe_topic(name):
                replicas_per_partition[p.id] = list(p.replicas)
            self.redpanda.logger.debug(
                f"requested replicas: {custom_assignment}, current replicas: {replicas_per_partition}"
            )

            for p_id, replicas in enumerate(custom_assignment):
                if p_id not in replicas_per_partition:
                    return False

                if set(replicas) != set(replicas_per_partition[p_id]):
                    return False

            return True

        # each assignment defines a partition
        wait_until(replica_matches, 10, backoff_sec=1)