Пример #1
0
    def test_multiple_topics_and_partitions(self):
        rpk = RpkTool(self.redpanda)
        topics = self.topics
        group = "g0"

        for i in range(100):
            payload = str(random.randint(0, 1000))
            for topic_spec in topics:
                for p in range(topic_spec.partition_count):
                    rpk.produce(topic_spec.name,
                                "",
                                payload,
                                partition=p,
                                timeout=5)

        for topic_spec in topics:
            rpk.consume(topic_spec.name,
                        group=group,
                        n=100 * topic_spec.partition_count)

        metrics_offsets = self._get_offset_from_metrics(group)

        partitions_amount = sum(map(lambda x: x.partition_count, topics))
        assert len(metrics_offsets) == partitions_amount
        for topic_spec in topics:
            for i in range(topic_spec.partition_count):
                assert (topic_spec.name, str(i)) in metrics_offsets
                assert metrics_offsets[(topic_spec.name, str(i))] == 100
Пример #2
0
    def test_check_value(self):
        rpk = RpkTool(self.redpanda)
        topic = next(filter(lambda x: x.partition_count == 1,
                            self.topics)).name

        for i in range(100):
            payload = str(random.randint(0, 1000))
            offset = rpk.produce(topic, "", payload, timeout=5)

        group_1 = "g1"
        metric_key = (topic, "0")
        for i in range(10):
            rpk.consume(topic, group=group_1, n=10)
            metrics_offsets = self._get_offset_from_metrics(group_1)
            assert metric_key in metrics_offsets
            assert metrics_offsets[metric_key] == (i + 1) * 10

        group_2 = "g2"
        rpk.consume(topic, group=group_2, n=50)
        gr_2_metrics_offsets = self._get_offset_from_metrics(group_2)
        assert metric_key in gr_2_metrics_offsets
        assert gr_2_metrics_offsets[metric_key] == 50

        rpk.group_seek_to_group(group_1, group_2)
        gr_1_metrics_offsets = self._get_offset_from_metrics(group_1)
        assert metric_key in gr_1_metrics_offsets
        assert gr_1_metrics_offsets[metric_key] == 50

        rpk.group_seek_to(group_2, "start")
        gr_2_metrics_offsets = self._get_offset_from_metrics(group_2)
        assert metric_key in gr_2_metrics_offsets
        assert gr_2_metrics_offsets[metric_key] == 0

        self.client().delete_topic(topic)

        def metrics_gone():
            metrics_offsets = self._get_offset_from_metrics(group_1)
            return metrics_offsets is None

        wait_until(metrics_gone, timeout_sec=30, backoff_sec=5)
Пример #3
0
    def test_partition_metrics(self):
        num_records = 10240
        records_size = 512

        # initially all metrics have to be equal to 0
        assert self._bytes_produced() == 0
        assert self._records_produced() == 0

        assert self._bytes_fetched() == 0
        assert self._records_fetched() == 0

        # Produce some data (10240 records * 512 bytes = 5MB of data)
        kafka_tools = KafkaCliTools(self.redpanda)
        kafka_tools.produce(self.topic, num_records, records_size, acks=-1)

        rec_produced = self._records_produced()
        self.redpanda.logger.info(f"records produced: {rec_produced}")
        assert rec_produced == num_records
        bytes_produced = self._bytes_produced()
        self.redpanda.logger.info(f"bytes produced: {bytes_produced}")
        # bytes produced should be bigger than sent records size because of
        # batch headers overhead
        assert bytes_produced >= num_records * records_size

        # fetch metrics shouldn't change
        assert self._bytes_fetched() == 0
        assert self._records_fetched() == 0

        # read all messages
        rpk = RpkTool(self.redpanda)
        rpk.consume(self.topic, n=num_records)

        rec_fetched = self._records_fetched()
        self.redpanda.logger.info(f"records fetched: {rec_fetched}")

        bytes_fetched = self._bytes_fetched()
        self.redpanda.logger.info(f"bytes fetched: {bytes_fetched}")

        assert bytes_fetched == bytes_produced
        assert rec_fetched == rec_produced
class ShadowIndexingFirewallTest(RedpandaTest):
    log_segment_size = 1048576  # 1MB
    retention_bytes = 1024  # 1 KB

    s3_topic_name = "panda-topic"
    topics = (TopicSpec(name=s3_topic_name,
                        partition_count=1,
                        replication_factor=3), )

    def __init__(self, test_context):
        si_settings = SISettings(cloud_storage_reconciliation_interval_ms=500,
                                 cloud_storage_max_connections=5,
                                 log_segment_size=self.log_segment_size)

        super(ShadowIndexingFirewallTest,
              self).__init__(test_context=test_context,
                             si_settings=si_settings)

        self._s3_port = si_settings.cloud_storage_api_endpoint_port
        self.rpk = RpkTool(self.redpanda)

    @cluster(num_nodes=3, log_allow_list=CONNECTION_ERROR_LOGS)
    def test_consume_from_blocked_s3(self):
        produce_until_segments(redpanda=self.redpanda,
                               topic=self.s3_topic_name,
                               partition_idx=0,
                               count=5,
                               acks=-1)

        self.rpk.alter_topic_config(self.s3_topic_name,
                                    TopicSpec.PROPERTY_RETENTION_BYTES,
                                    self.retention_bytes)

        wait_for_segments_removal(redpanda=self.redpanda,
                                  topic=self.s3_topic_name,
                                  partition_idx=0,
                                  count=4)
        """Disconnect redpanda from S3 and try to read starting with offset 0"""
        with firewall_blocked(self.redpanda.nodes, self._s3_port):
            try:
                out = self.rpk.consume(topic=self.s3_topic_name)
            except RpkException as e:
                assert 'timed out' in e.msg
            else:
                raise RuntimeError(
                    f"RPK consume should have timed out, but ran with output: {out}"
                )