def test_incremental_config(self): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) kafka_tools.alter_topic_config( topic, { TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "1", TopicSpec.PROPERTY_DATA_POLICY_SCRIPT_NAME: "2" }) spec = kafka_tools.describe_topic(topic) assert spec.redpanda_datapolicy == self._get_data_policy(1, 2) # Expect that trying to set with a function name but no script fails. try: r = kafka_tools.alter_topic_config( topic, {TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "3"}) except subprocess.CalledProcessError as e: # Expected: request fails to update topic self.logger.info(f"Kafka CLI alter failed as expected: {e.stdout}") assert "unable to parse property" in e.stdout else: raise RuntimeError(f"Expected API error, got {r}") # Expect that the failed alter operation has not modified the topic spec = kafka_tools.describe_topic(topic) assert spec.redpanda_datapolicy == self._get_data_policy(1, 2)
def test_changing_topic_retention_with_restart(self): """ Test changing topic retention duration for topics with data produced with ACKS=1 and ACKS=-1. This test produces data until 10 segments appear, then it changes retention topic property and waits for some segmetnts to be removed """ segment_size = 1048576 # produce until segments have been compacted produce_until_segments( self.redpanda, topic=self.topic, partition_idx=0, count=20, acks=-1, ) # restart all nodes to force replicating raft configuration self.redpanda.restart_nodes(self.redpanda.nodes) kafka_tools = KafkaCliTools(self.redpanda) # Wait for controller, alter configs doesn't have a retry loop kafka_tools.describe_topic(self.topic) # change retention bytes to preserve 15 segments self.client().alter_topic_configs( self.topic, { TopicSpec.PROPERTY_RETENTION_BYTES: 15 * segment_size, }) wait_for_segments_removal(redpanda=self.redpanda, topic=self.topic, partition_idx=0, count=16) # change retention bytes again to preserve 10 segments self.client().alter_topic_configs( self.topic, { TopicSpec.PROPERTY_RETENTION_BYTES: 10 * segment_size, }) wait_for_segments_removal(redpanda=self.redpanda, topic=self.topic, partition_idx=0, count=11) # change retention bytes again to preserve 5 segments self.client().alter_topic_configs( self.topic, { TopicSpec.PROPERTY_RETENTION_BYTES: 4 * segment_size, }) wait_for_segments_removal(redpanda=self.redpanda, topic=self.topic, partition_idx=0, count=5)
def test_configuration_properties_kafka_config_allowlist(self): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) spec = kafka_tools.describe_topic(topic) self.client().alter_topic_configs( topic, { "unclean.leader.election.enable": True, TopicSpec.PROPERTY_SEGMENT_SIZE: spec.segment_bytes + 1, }) spec.segment_bytes += 1 new_spec = kafka_tools.describe_topic(topic) assert new_spec == spec
class TopicAutocreateTest(RedpandaTest): """ Verify that autocreation works, and that the settings of an autocreated topic match those for a topic created by hand with rpk. """ def __init__(self, test_context): super(TopicAutocreateTest, self).__init__( test_context=test_context, num_brokers=1, extra_rp_conf={'auto_create_topics_enabled': False}) self.kafka_tools = KafkaCliTools(self.redpanda) self.rpk = RpkTool(self.redpanda) @cluster(num_nodes=1) def topic_autocreate_test(self): auto_topic = 'autocreated' manual_topic = "manuallycreated" # With autocreation disabled, producing to a nonexistent topic should not work. try: # Use rpk rather than kafka CLI because rpk errors out promptly self.rpk.produce(auto_topic, "foo", "bar") except Exception: # The write failed, and shouldn't have created a topic assert auto_topic not in self.kafka_tools.list_topics() else: assert False, "Producing to a nonexistent topic should fail" # Enable autocreation self.redpanda.restart_nodes(self.redpanda.nodes, {'auto_create_topics_enabled': True}) # Auto create topic assert auto_topic not in self.kafka_tools.list_topics() self.kafka_tools.produce(auto_topic, 1, 4096) assert auto_topic in self.kafka_tools.list_topics() auto_topic_spec = self.kafka_tools.describe_topic(auto_topic) assert auto_topic_spec.retention_ms is None assert auto_topic_spec.retention_bytes is None # Create topic by hand, compare its properties to the autocreated one self.rpk.create_topic(manual_topic) manual_topic_spec = self.kafka_tools.describe_topic(auto_topic) assert manual_topic_spec.retention_ms == auto_topic_spec.retention_ms assert manual_topic_spec.retention_bytes == auto_topic_spec.retention_bytes # Clear name and compare the rest of the attributes manual_topic_spec.name = auto_topic_spec.name = None assert manual_topic_spec == auto_topic_spec
def test_configuration_properties_name_validation(self): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) spec = kafka_tools.describe_topic(topic) for i in range(0, 5): key = self.random_string(5) try: res = kafka_tools.alter_topic_config(topic, {key: "123"}) except Exception as inst: test_logger.info("exception %s", inst) new_spec = kafka_tools.describe_topic(topic) # topic spec shouldn't change assert new_spec == spec
def test_timequery_after_segments_eviction(self): """ Test checking if the offset returned by time based index is valid during applying log cleanup policy """ segment_size = 1048576 # produce until segments have been compacted produce_until_segments( self.redpanda, topic=self.topic, partition_idx=0, count=10, acks=-1, ) # restart all nodes to force replicating raft configuration self.redpanda.restart_nodes(self.redpanda.nodes) kafka_tools = KafkaCliTools(self.redpanda) # Wait for controller, alter configs doesn't have a retry loop kafka_tools.describe_topic(self.topic) # change retention bytes to preserve 15 segments self.client().alter_topic_configs( self.topic, { TopicSpec.PROPERTY_RETENTION_BYTES: 2 * segment_size, }) def validate_time_query_until_deleted(): def done(): kcat = KafkaCat(self.redpanda) ts = 1638748800 # 12.6.2021 - old timestamp, query first offset offset = kcat.query_offset(self.topic, 0, ts) # assert that offset is valid assert offset >= 0 topic_partitions = segments_count(self.redpanda, self.topic, 0) partitions = [] for p in topic_partitions: partitions.append(p <= 5) return all([p <= 5 for p in topic_partitions]) wait_until(done, timeout_sec=30, backoff_sec=5, err_msg="Segments were not removed") validate_time_query_until_deleted()
def test_configuration_properties_name_validation(self): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) spec = kafka_tools.describe_topic(topic) for i in range(0, 5): key = self.random_string(5) try: kafka_tools.alter_topic_config(topic, {key: "123"}) except Exception as inst: self.logger.info( "alter failed as expected: expected exception %s", inst) else: raise RuntimeError("Alter should have failed but succeeded!") new_spec = kafka_tools.describe_topic(topic) # topic spec shouldn't change assert new_spec == spec
def test_altering_topic_configuration(self, property, value): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) kafka_tools.alter_topic_config(topic, {property: value}) spec = kafka_tools.describe_topic(topic) # e.g. retention.ms is TopicSpec.retention_ms attr_name = property.replace(".", "_") assert getattr(spec, attr_name, None) == value
def test_create_partitions(self): topic = self.topics[0].name cli = KafkaCliTools(self.redpanda) # add 5 partitions to the topic cli.add_topic_partitions(topic, 5) res = cli.describe_topic(topic) # initially topic had 2 partitions, we added 5 assert res.partition_count == 7
def test_set_data_policy(self): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) res = kafka_tools.alter_topic_config( topic, { TopicSpec.PROPERTY_DATA_POLICY_FUNCTION_NAME: "1", TopicSpec.PROPERTY_DATA_POLICY_SCRIPT_NAME: "2" }) spec = kafka_tools.describe_topic(topic) assert spec.redpanda_datapolicy == self._get_data_policy(1, 2)
def test_altering_multiple_topic_configurations(self): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) res = kafka_tools.alter_topic_config( topic, { TopicSpec.PROPERTY_SEGMENT_SIZE: 1024, TopicSpec.PROPERTY_RETENTION_TIME: 360000, TopicSpec.PROPERTY_TIMESTAMP_TYPE: "LogAppendTime" }) spec = kafka_tools.describe_topic(topic) assert spec.segment_bytes == 1024 assert spec.retention_ms == 360000 assert spec.message_timestamp_type == "LogAppendTime"
def check(): client = KafkaCliTools(self.redpanda) # bulk describe output = client.describe_topics() for topic in topics: if f"partition_count={topic.partition_count}" not in output: return False if f"replication_factor={topic.replication_factor}" not in output: return False # and targetted topic describe topics_described = [ client.describe_topic(topic.name) for topic in topics ] for meta in zip(topics, topics_described): if meta[0].partition_count != meta[1].partition_count: return False if meta[0].replication_factor != meta[1].replication_factor: return False return True
def test_altering_topic_configuration(self, property, value): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) res = kafka_tools.alter_topic_config(topic, {property: value}) spec = kafka_tools.describe_topic(topic)
def _partition_count(self, topic): client = KafkaCliTools(self.redpanda) meta = client.describe_topic(topic) return meta.partition_count
def test_default_data_policy(self): topic = self.topics[0].name kafka_tools = KafkaCliTools(self.redpanda) spec = kafka_tools.describe_topic(topic) assert spec.redpanda_datapolicy == None