Exemplo n.º 1
0
    def test_fetch_after_committed_offset_was_removed(self,
                                                      transactions_enabled):
        """
        Test fetching when consumer offset was deleted by retention
        """

        self.redpanda._extra_rp_conf[
            "enable_transactions"] = transactions_enabled
        self.redpanda._extra_rp_conf[
            "enable_idempotence"] = transactions_enabled
        self.redpanda.start()

        topic = TopicSpec(partition_count=1,
                          replication_factor=3,
                          cleanup_policy=TopicSpec.CLEANUP_DELETE)
        self.client().create_topic(topic)

        kafka_tools = KafkaCliTools(self.redpanda)

        # produce until segments have been compacted
        produce_until_segments(
            self.redpanda,
            topic=topic.name,
            partition_idx=0,
            count=10,
        )
        consumer_group = 'test'
        rpk = RpkTool(self.redpanda)

        def consume(n=1):

            out = rpk.consume(topic.name, group=consumer_group, n=n)
            split = out.split('}')
            split = filter(lambda s: "{" in s, split)

            return map(lambda s: json.loads(s + "}"), split)

        #consume from the beggining
        msgs = consume(10)
        last = list(msgs).pop()
        offset = last['offset']

        # change retention time
        kafka_tools.alter_topic_config(
            topic.name, {
                TopicSpec.PROPERTY_RETENTION_BYTES: 2 * self.segment_size,
            })

        wait_for_segments_removal(self.redpanda,
                                  topic.name,
                                  partition_idx=0,
                                  count=5)

        partitions = list(rpk.describe_topic(topic.name))
        p = partitions[0]
        assert p.start_offset > offset
        # consume from the offset that doesn't exists,
        # the one that was committed previously was already removed
        out = list(consume(1))
        assert out[0]['offset'] == p.start_offset
Exemplo n.º 2
0
    def __init__(self, test_context):
        self.s3_bucket_name = f"panda-bucket-{uuid.uuid1()}"
        extra_rp_conf = dict(
            developer_mode=True,
            cloud_storage_enabled=True,
            cloud_storage_access_key=ArchivalTest.s3_access_key,
            cloud_storage_secret_key=ArchivalTest.s3_secret_key,
            cloud_storage_region=ArchivalTest.s3_region,
            cloud_storage_bucket=self.s3_bucket_name,
            cloud_storage_disable_tls=True,
            cloud_storage_api_endpoint=ArchivalTest.s3_host_name,
            cloud_storage_api_endpoint_port=9000,
            cloud_storage_reconciliation_interval_ms=500,
            cloud_storage_max_connections=5,
            log_segment_size=1048576  # 1MB
        )
        super(ArchivalTest, self).__init__(test_context=test_context,
                                           extra_rp_conf=extra_rp_conf)

        self.kafka_tools = KafkaCliTools(self.redpanda)
        self.s3_client = S3Client(
            region='panda-region',
            access_key=u"panda-user",
            secret_key=u"panda-secret",
            endpoint=f'http://{ArchivalTest.s3_host_name}:9000',
            logger=self.logger)
    def __init__(self, test_context):
        super(AlterTopicConfiguration,
              self).__init__(test_context=test_context,
                             num_brokers=3,
                             topics=self.topics)

        self.kafka_tools = KafkaCliTools(self.redpanda)
Exemplo n.º 4
0
    def test_incremental_config(self):
        topic = self.topics[0].name
        kafka_tools = KafkaCliTools(self.redpanda)
        kafka_tools.alter_topic_config(
            topic, {
                TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "1",
                TopicSpec.PROPERTY_DATA_POLICY_SCRIPT_NAME: "2"
            })
        spec = kafka_tools.describe_topic(topic)
        assert spec.redpanda_datapolicy == self._get_data_policy(1, 2)

        # Expect that trying to set with a function name but no script fails.
        try:
            r = kafka_tools.alter_topic_config(
                topic, {TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "3"})
        except subprocess.CalledProcessError as e:
            # Expected: request fails to update topic
            self.logger.info(f"Kafka CLI alter failed as expected: {e.stdout}")
            assert "unable to parse property" in e.stdout
        else:
            raise RuntimeError(f"Expected API error, got {r}")

        # Expect that the failed alter operation has not modified the topic
        spec = kafka_tools.describe_topic(topic)
        assert spec.redpanda_datapolicy == self._get_data_policy(1, 2)
Exemplo n.º 5
0
    def __init__(self, test_context):
        self.s3_bucket_name = f"panda-bucket-{uuid.uuid1()}"
        self._extra_rp_conf = dict(
            cloud_storage_enabled=True,
            cloud_storage_access_key=ArchivalTest.s3_access_key,
            cloud_storage_secret_key=ArchivalTest.s3_secret_key,
            cloud_storage_region=ArchivalTest.s3_region,
            cloud_storage_bucket=self.s3_bucket_name,
            cloud_storage_disable_tls=True,
            cloud_storage_api_endpoint=ArchivalTest.s3_host_name,
            cloud_storage_api_endpoint_port=9000,
            cloud_storage_reconciliation_interval_ms=500,
            cloud_storage_max_connections=5,
            log_compaction_interval_ms=self.log_compaction_interval_ms,
            log_segment_size=self.log_segment_size,
        )
        if test_context.function_name == "test_timeboxed_uploads":
            self._extra_rp_conf.update(
                log_segment_size=1024 * 1024 * 1024,
                cloud_storage_segment_max_upload_interval_sec=1)

        super(ArchivalTest, self).__init__(test_context=test_context,
                                           extra_rp_conf=self._extra_rp_conf)

        self.kafka_tools = KafkaCliTools(self.redpanda)
        self.rpk = RpkTool(self.redpanda)
        self.s3_client = S3Client(
            region='panda-region',
            access_key=u"panda-user",
            secret_key=u"panda-secret",
            endpoint=f'http://{ArchivalTest.s3_host_name}:9000',
            logger=self.logger)
Exemplo n.º 6
0
    def __init__(self,
                 context,
                 redpanda,
                 topic,
                 group=None,
                 offset=None,
                 partitions=None,
                 isolation_level=None,
                 from_beginning=False,
                 consumer_properties={},
                 formatter_properties={}):
        super(KafkaCliConsumer, self).__init__(context, num_nodes=1)
        self._redpanda = redpanda
        self._topic = topic
        self._group = group
        self._offset = offset
        self._partitions = partitions
        self._isolation_level = isolation_level
        self._from_beginning = from_beginning
        self._consumer_properties = consumer_properties
        self._formatter_properties = formatter_properties
        self._stopping = threading.Event()
        assert self._partitions is not None or self._group is not None, "either partitions or group have to be set"

        self._cli = KafkaCliTools(self._redpanda)
        self._messages = []
Exemplo n.º 7
0
    def test_changing_topic_retention(self, property, acks):
        """
        Test changing topic retention duration for topics with data produced
        with ACKS=1 and ACKS=-1. This test produces data until 10 segments
        appear, then it changes retention topic property and waits for
        segments to be removed
        """
        kafka_tools = KafkaCliTools(self.redpanda)

        # produce until segments have been compacted
        produce_until_segments(
            self.redpanda,
            topic=self.topic,
            partition_idx=0,
            count=10,
            acks=acks,
        )
        # change retention time
        kafka_tools.alter_topic_config(self.topic, {
            property: 10000,
        })
        wait_for_segments_removal(self.redpanda,
                                  self.topic,
                                  partition_idx=0,
                                  count=5)
Exemplo n.º 8
0
    def create_and_validate(self, name, custom_assignment):
        self.redpanda.logger.info(
            f"creating topic {name} with {custom_assignment}")
        cli = KafkaCliTools(self.redpanda)
        rpk = RpkTool(self.redpanda)

        cli.create_topic_with_assignment(name, custom_assignment)

        def replica_matches():
            replicas_per_partition = {}
            for p in rpk.describe_topic(name):
                replicas_per_partition[p.id] = list(p.replicas)
            self.redpanda.logger.debug(
                f"requested replicas: {custom_assignment}, current replicas: {replicas_per_partition}"
            )

            for p_id, replicas in enumerate(custom_assignment):
                if p_id not in replicas_per_partition:
                    return False

                if set(replicas) != set(replicas_per_partition[p_id]):
                    return False

            return True

        # each assignment defines a partition
        wait_until(replica_matches, 10, backoff_sec=1)
Exemplo n.º 9
0
 def alter_topic_configs(self, topic: str,
                         props: dict[str, typing.Union[str, int]]):
     """
     Alter multiple topic configuration properties.
     """
     kafka_tools = KafkaCliTools(self._redpanda)
     kafka_tools.alter_topic_config(topic, props)
Exemplo n.º 10
0
 def _create_initial_topics(self):
     config = self.redpanda.security_config()
     user = config.get("sasl_plain_username")
     passwd = config.get("sasl_plain_password")
     client = KafkaCliTools(self.redpanda, user=user, passwd=passwd)
     for spec in self.topics:
         self.logger.debug(f"Creating initial topic {spec}")
         client.create_topic(spec)
Exemplo n.º 11
0
    def __init__(self, test_context):
        super(TopicAutocreateTest, self).__init__(
            test_context=test_context,
            num_brokers=1,
            extra_rp_conf={'auto_create_topics_enabled': False})

        self.kafka_tools = KafkaCliTools(self.redpanda)
        self.rpk = RpkTool(self.redpanda)
Exemplo n.º 12
0
    def __init__(self, test_context):
        extra_rp_conf = dict(log_segment_size=262144, )

        super(TopicDeleteTest, self).__init__(test_context=test_context,
                                              num_brokers=3,
                                              extra_rp_conf=extra_rp_conf)

        self.kafka_tools = KafkaCliTools(self.redpanda)
Exemplo n.º 13
0
    def test_altering_topic_configuration(self, property, value):
        topic = self.topics[0].name
        kafka_tools = KafkaCliTools(self.redpanda)
        kafka_tools.alter_topic_config(topic, {property: value})
        spec = kafka_tools.describe_topic(topic)

        # e.g. retention.ms is TopicSpec.retention_ms
        attr_name = property.replace(".", "_")
        assert getattr(spec, attr_name, None) == value
Exemplo n.º 14
0
 def test_create_topic_with_custom_partition_assignment(self):
     cli = KafkaCliTools(self.redpanda)
     rpk = RpkTool(self.redpanda)
     # 3 partitions with single replica
     self.create_and_validate("topic-1", [[1], [3], [5]])
     # 3 partitions with replication factor of 2
     self.create_and_validate("topic-2", [[1, 2], [3, 4], [5, 1]])
     # 1 partition with replication factor of 3
     self.create_and_validate("topic-3", [[2, 4, 1]])
Exemplo n.º 15
0
    def test_create_partitions(self):
        topic = self.topics[0].name
        cli = KafkaCliTools(self.redpanda)
        # add 5 partitions to the topic
        cli.add_topic_partitions(topic, 5)

        res = cli.describe_topic(topic)
        # initially topic had 2 partitions, we added 5
        assert res.partition_count == 7
Exemplo n.º 16
0
 def test_set_data_policy(self):
     topic = self.topics[0].name
     kafka_tools = KafkaCliTools(self.redpanda)
     res = kafka_tools.alter_topic_config(
         topic, {
             TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "1",
             TopicSpec.PROPERTY_DATA_POLICY_SCRIPT_NAME: "2"
         })
     spec = kafka_tools.describe_topic(topic)
     assert spec.redpanda_datapolicy == self._get_data_policy(1, 2)
Exemplo n.º 17
0
    def __init__(self, test_context):
        self.s3_bucket = test_context.globals.get(self.GLOBAL_S3_BUCKET, None)
        self.s3_region = test_context.globals.get(self.GLOBAL_S3_REGION, None)
        self.s3_access_key = test_context.globals.get(
            self.GLOBAL_S3_ACCESS_KEY, None)
        self.s3_secret_key = test_context.globals.get(
            self.GLOBAL_S3_SECRET_KEY, None)
        self.s3_endpoint = None
        self.real_thing = self.s3_bucket and self.s3_region and self.s3_access_key and self.s3_secret_key
        if self.real_thing:
            extra_rp_conf = dict(
                developer_mode=True,
                disable_metrics=False,
                cloud_storage_enabled=True,
                cloud_storage_access_key=self.s3_access_key,
                cloud_storage_secret_key=self.s3_secret_key,
                cloud_storage_region=self.s3_region,
                cloud_storage_bucket=self.s3_bucket,
                cloud_storage_reconciliation_interval_ms=10000,
                cloud_storage_max_connections=10,
                cloud_storage_trust_file="/etc/ssl/certs/ca-certificates.crt",
                log_segment_size=32 * 1048576  # 32MB
            )
        else:
            bucket_name = f"{ArchivalTest.MINIO_BUCKET_NAME}-{uuid.uuid1()}"
            self.s3_bucket = bucket_name
            self.s3_region = ArchivalTest.MINIO_REGION
            self.s3_access_key = ArchivalTest.MINIO_ACCESS_KEY
            self.s3_secret_key = ArchivalTest.MINIO_SECRET_KEY
            extra_rp_conf = dict(
                developer_mode=True,
                disable_metrics=False,
                cloud_storage_enabled=True,
                cloud_storage_access_key=ArchivalTest.MINIO_ACCESS_KEY,
                cloud_storage_secret_key=ArchivalTest.MINIO_SECRET_KEY,
                cloud_storage_region=ArchivalTest.MINIO_REGION,
                cloud_storage_bucket=bucket_name,
                cloud_storage_disable_tls=True,
                cloud_storage_api_endpoint=ArchivalTest.MINIO_HOST_NAME,
                cloud_storage_api_endpoint_port=9000,
                cloud_storage_reconciliation_interval_ms=10000,
                cloud_storage_max_connections=5,
                log_segment_size=32 * 1048576  # 32MB
            )
            self.s3_endpoint = f'http://{ArchivalTest.MINIO_HOST_NAME}:9000'

        super(ArchivalTest, self).__init__(test_context=test_context,
                                           extra_rp_conf=extra_rp_conf)

        self.kafka_tools = KafkaCliTools(self.redpanda)
        self.s3_client = S3Client(region=self.s3_region,
                                  access_key=self.s3_access_key,
                                  secret_key=self.s3_secret_key,
                                  endpoint=self.s3_endpoint,
                                  logger=self.logger)
Exemplo n.º 18
0
 def setUp(self):
     self.mount_mu = MountMuService()
     self.redpanda_mu = RedpandaMuService()
     self.kafkakv_mu = KafkaKVMuService(self.redpanda_mu)
     seq = SeqMuService([self.mount_mu, self.redpanda_mu, self.kafkakv_mu])
     self.service = MuServiceRunner(seq, self.test_context, self.num_nodes)
     self.service.start()
     redpanda = RedpandaMuServiceServiceProxy(self.service,
                                              self.redpanda_mu)
     tools = KafkaCliTools(redpanda, KafkaCliTools.VERSIONS[0])
     tools.create_topic(KafkaKVMuService.TOPIC)
Exemplo n.º 19
0
    def test_changing_topic_retention_with_restart(self):
        """
        Test changing topic retention duration for topics with data produced
        with ACKS=1 and ACKS=-1. This test produces data until 10 segments
        appear, then it changes retention topic property and waits for some
        segmetnts to be removed
        """
        segment_size = 1048576

        # produce until segments have been compacted
        produce_until_segments(
            self.redpanda,
            topic=self.topic,
            partition_idx=0,
            count=20,
            acks=-1,
        )

        # restart all nodes to force replicating raft configuration
        self.redpanda.restart_nodes(self.redpanda.nodes)

        kafka_tools = KafkaCliTools(self.redpanda)
        # Wait for controller, alter configs doesn't have a retry loop
        kafka_tools.describe_topic(self.topic)

        # change retention bytes to preserve 15 segments
        self.client().alter_topic_configs(
            self.topic, {
                TopicSpec.PROPERTY_RETENTION_BYTES: 15 * segment_size,
            })
        wait_for_segments_removal(redpanda=self.redpanda,
                                  topic=self.topic,
                                  partition_idx=0,
                                  count=16)

        # change retention bytes again to preserve 10 segments
        self.client().alter_topic_configs(
            self.topic, {
                TopicSpec.PROPERTY_RETENTION_BYTES: 10 * segment_size,
            })
        wait_for_segments_removal(redpanda=self.redpanda,
                                  topic=self.topic,
                                  partition_idx=0,
                                  count=11)

        # change retention bytes again to preserve 5 segments
        self.client().alter_topic_configs(
            self.topic, {
                TopicSpec.PROPERTY_RETENTION_BYTES: 4 * segment_size,
            })
        wait_for_segments_removal(redpanda=self.redpanda,
                                  topic=self.topic,
                                  partition_idx=0,
                                  count=5)
Exemplo n.º 20
0
    def __init__(self, test_context):
        extra_rp_conf = dict(log_segment_size=262144, )

        topics = dict(topic=dict(partitions=3, cleanup_policy="compact"))

        super(TopicDeleteTest, self).__init__(test_context=test_context,
                                              num_brokers=3,
                                              extra_rp_conf=extra_rp_conf,
                                              topics=topics)

        self.kafka_tools = KafkaCliTools(self.redpanda)
Exemplo n.º 21
0
    def test_produce_topic(self):
        """
        Create a topic and verify that pandaproxy can produce to it.
        """
        name = "pandaproxy-topic-{}".format(uuid.uuid4())
        self.logger.debug("Topic name %s", name)

        prev = set(self._get_topics())
        self.logger.debug("Existing topics %s", prev)
        assert prev.isdisjoint(name)

        data = '{"records": [{"value": "dmVjdG9yaXplZA==", "partition": 0},{"value": "cGFuZGFwcm94eQ==", "partition": 1},{"value": "bXVsdGlicm9rZXI=", "partition": 2}]}'

        self.logger.debug("Producing to non-existant topic")
        produce_result = self._produce_topic(name, data)
        for o in produce_result["offsets"]:
            assert o["error_code"] == 3
            assert o["offset"] == -1

        kc = KafkaCat(self.redpanda)

        self.logger.debug("Creating test topic")
        kafka_tools = KafkaCliTools(self.redpanda)
        kafka_tools.create_topic(
            TopicSpec(name=name, replication_factor=1, partition_count=3))

        self.logger.debug("Waiting for leaders to settle")
        has_leaders = False
        while not has_leaders:
            topics = kc.metadata()["topics"]
            maybe_leaders = True
            for t in topics:
                if t["topic"] == name:
                    for p in t["partitions"]:
                        if p["leader"] == -1:
                            maybe_leaders = False
            has_leaders = maybe_leaders
        # TODO:
        #  Despite the above test, Pandaproxy can still get back no leaders
        #  Query Pandaproxy metadata to see when leaders have settled
        #  The retry logic for produce should have sufficient time for this
        #  additional settle time.

        self.logger.debug("Producing to topic")
        produce_result = self._produce_topic(name, data)
        self.logger.debug("Producing to topic: %s", produce_result)
        for o in produce_result["offsets"]:
            assert o["offset"] == 1, f'error_code {o["error_code"]}'

        self.logger.debug(f"Consuming topic: {name}")
        assert kc.consume_one(name, 0, 1)["payload"] == "vectorized"
        assert kc.consume_one(name, 1, 1)["payload"] == "pandaproxy"
        assert kc.consume_one(name, 2, 1)["payload"] == "multibroker"
Exemplo n.º 22
0
 def _create_topics(self,
                    names=create_topic_names(1),
                    partitions=1,
                    replicas=1):
     self.logger.debug(f"Creating topics: {names}")
     kafka_tools = KafkaCliTools(self.redpanda)
     for name in names:
         kafka_tools.create_topic(
             TopicSpec(name=name,
                       partition_count=partitions,
                       replication_factor=replicas))
     return names
Exemplo n.º 23
0
    def __init__(self, test_context):
        extra_rp_conf = dict(
            log_segment_size=1048576,
            retention_bytes=5242880,
            log_compaction_interval_ms=2000,
        )

        super(PrefixTruncateRecoveryTest,
              self).__init__(test_context=test_context,
                             num_brokers=3,
                             extra_rp_conf=extra_rp_conf)

        self.kafka_tools = KafkaCliTools(self.redpanda)
Exemplo n.º 24
0
    def _produce_consumed_space(self, orig_free: list[float]) -> bool:
        """ Test helper: produce about 10MiB of data and return true if any of
        the nodes saw a reduction in free space.
        """
        num_records = 10240
        record_size = 1024

        # Produce data and confirm metrics update
        ktools = KafkaCliTools(self.redpanda)
        ktools.produce(self.topic, num_records, record_size, acks=-1)

        new_free = self._node_disk_free_bytes()
        return self._count_greater(orig_free, new_free) > 0
Exemplo n.º 25
0
    def test_configuration_properties_kafka_config_allowlist(self):
        topic = self.topics[0].name
        kafka_tools = KafkaCliTools(self.redpanda)
        spec = kafka_tools.describe_topic(topic)
        self.client().alter_topic_configs(
            topic, {
                "unclean.leader.election.enable": True,
                TopicSpec.PROPERTY_SEGMENT_SIZE: spec.segment_bytes + 1,
            })

        spec.segment_bytes += 1
        new_spec = kafka_tools.describe_topic(topic)
        assert new_spec == spec
Exemplo n.º 26
0
    def test_scram(self):
        topic = TopicSpec()
        client = KafkaCliTools(self.redpanda,
                               user="******",
                               passwd="redpanda_pass")
        client.create_topic(topic)

        client = KafkaCliTools(self.redpanda,
                               user="******",
                               passwd="bad_password")
        try:
            client.list_topics()
            assert False, "Listing topics should fail"
        except Exception:
            pass

        client = KafkaCliTools(self.redpanda,
                               user="******",
                               passwd="redpanda_pass")
        topics = client.list_topics()
        print(topics)
        assert topic.name in topics
Exemplo n.º 27
0
    def test_configuration_properties_name_validation(self):
        topic = self.topics[0].name
        kafka_tools = KafkaCliTools(self.redpanda)
        spec = kafka_tools.describe_topic(topic)
        for i in range(0, 5):
            key = self.random_string(5)
            try:
                res = kafka_tools.alter_topic_config(topic, {key: "123"})
            except Exception as inst:
                test_logger.info("exception %s", inst)

        new_spec = kafka_tools.describe_topic(topic)
        # topic spec shouldn't change
        assert new_spec == spec
Exemplo n.º 28
0
    def test_altering_multiple_topic_configurations(self):
        topic = self.topics[0].name
        kafka_tools = KafkaCliTools(self.redpanda)
        res = kafka_tools.alter_topic_config(
            topic, {
                TopicSpec.PROPERTY_SEGMENT_SIZE: 1024,
                TopicSpec.PROPERTY_RETENTION_TIME: 360000,
                TopicSpec.PROPERTY_TIMESTAMP_TYPE: "LogAppendTime"
            })
        spec = kafka_tools.describe_topic(topic)

        assert spec.segment_bytes == 1024
        assert spec.retention_ms == 360000
        assert spec.message_timestamp_type == "LogAppendTime"
Exemplo n.º 29
0
 def _create_topics(self,
                    names=create_topic_names(1),
                    partitions=1,
                    replicas=1,
                    cleanup_policy=TopicSpec.CLEANUP_DELETE):
     self.logger.debug(f"Creating topics: {names}")
     kafka_tools = KafkaCliTools(self.redpanda)
     for name in names:
         kafka_tools.create_topic(
             TopicSpec(name=name,
                       partition_count=partitions,
                       replication_factor=replicas))
     assert set(names).issubset(self._get_topics().json())
     return names
Exemplo n.º 30
0
    def test_timequery_after_segments_eviction(self):
        """
        Test checking if the offset returned by time based index is
        valid during applying log cleanup policy
        """
        segment_size = 1048576

        # produce until segments have been compacted
        produce_until_segments(
            self.redpanda,
            topic=self.topic,
            partition_idx=0,
            count=10,
            acks=-1,
        )

        # restart all nodes to force replicating raft configuration
        self.redpanda.restart_nodes(self.redpanda.nodes)

        kafka_tools = KafkaCliTools(self.redpanda)
        # Wait for controller, alter configs doesn't have a retry loop
        kafka_tools.describe_topic(self.topic)

        # change retention bytes to preserve 15 segments
        self.client().alter_topic_configs(
            self.topic, {
                TopicSpec.PROPERTY_RETENTION_BYTES: 2 * segment_size,
            })

        def validate_time_query_until_deleted():
            def done():
                kcat = KafkaCat(self.redpanda)
                ts = 1638748800  # 12.6.2021 - old timestamp, query first offset
                offset = kcat.query_offset(self.topic, 0, ts)
                # assert that offset is valid
                assert offset >= 0

                topic_partitions = segments_count(self.redpanda, self.topic, 0)
                partitions = []
                for p in topic_partitions:
                    partitions.append(p <= 5)
                return all([p <= 5 for p in topic_partitions])

            wait_until(done,
                       timeout_sec=30,
                       backoff_sec=5,
                       err_msg="Segments were not removed")

        validate_time_query_until_deleted()