def init_partitions(settings: Settings): client = KafkaAdminClient(bootstrap_servers=settings.kafka_server, ) try: client.create_partitions( topic_partitions={ settings.kafka_topic: NewPartitions(total_count=len(settings.schema_table.keys())) }) except Exception as e: logger.warning(f"init_partitions error:{e}")
def init_partitions(): client = KafkaAdminClient(bootstrap_servers=settings.KAFKA_SERVER, ) try: client.create_partitions( topic_partitions={ settings.KAFKA_TOPIC: NewPartitions(total_count=len(settings.PARTITIONS.keys())) }) except Exception as e: logger.warning(f'init_partitions error:{e}')
def _init_partitions(cls, settings: Settings): client = KafkaAdminClient(bootstrap_servers=settings.kafka_servers, ) try: client.create_partitions( topic_partitions={ settings.kafka_topic: NewPartitions( total_count=len(settings.kafka_partitions.keys())) }) except Exception as e: logger.debug(f"init_partitions error:{e}")
def create_partiton(self): from kafka.admin import KafkaAdminClient, NewPartitions try: adminClient = KafkaAdminClient(bootstrap_servers=self.cur_broker) partitions = dict() pn = self.newpn_input.get() if pn and pn != "": pn = int(pn) newp = NewPartitions(total_count=pn) print(self.cur_topic) partitions[self.cur_topic] = newp adminClient.create_partitions(partitions) except Exception as e: logger.error("{}".format(e))
def extend_partitions(topic_name, topic_num_partitions): """ 扩容指定Topic的Partition数量 """ admin_client = init_client("KafkaAdminClient") new_partitions = NewPartitions(total_count=topic_num_partitions, new_assignments=None) topic_partitions = {topic_name: new_partitions} try: admin_client.create_partitions(topic_partitions) return True except Exception: log.error(traceback.format_exc()) return False
def test_admin_client(self): """ This test verifies that Kafka Admin Client can still be used to manage Kafka. """ admin_client = KafkaAdminClient( bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address()) # Create a topic with 3 partitions. new_topic_spec = NewTopic(name='test_admin_client', num_partitions=3, replication_factor=1) create_response = admin_client.create_topics([new_topic_spec]) error_data = create_response.topic_errors self.assertEqual(len(error_data), 1) self.assertEqual(error_data[0], (new_topic_spec.name, 0, None)) # Alter topic (change some Kafka-level property). config_resource = ConfigResource(ConfigResourceType.TOPIC, new_topic_spec.name, {'flush.messages': 42}) alter_response = admin_client.alter_configs([config_resource]) error_data = alter_response.resources self.assertEqual(len(error_data), 1) self.assertEqual(error_data[0][0], 0) # Add 2 more partitions to topic. new_partitions_spec = {new_topic_spec.name: NewPartitions(5)} new_partitions_response = admin_client.create_partitions( new_partitions_spec) error_data = create_response.topic_errors self.assertEqual(len(error_data), 1) self.assertEqual(error_data[0], (new_topic_spec.name, 0, None)) # Delete a topic. delete_response = admin_client.delete_topics([new_topic_spec.name]) error_data = create_response.topic_errors self.assertEqual(len(error_data), 1) self.assertEqual(error_data[0], (new_topic_spec.name, 0, None)) self.metrics.collect_final_metrics() self.metrics.assert_metric_increase('create_topics', 1) self.metrics.assert_metric_increase('alter_configs', 1) self.metrics.assert_metric_increase('create_partitions', 1) self.metrics.assert_metric_increase('delete_topics', 1)
def create_partitions_in_topic(self, partitions, **kwargs): """ create partitions in topic Arguments: partitions(list) : list of ['topic_name','num_partitions'] lists example : [['topic1',4], ['topic2',5]] timeout(int): timeout in milliseconds Returns: result(bool) : False if exception occures, True otherwise """ timeout = kwargs.get("timeout", None) validate = kwargs.get("validate", False) topic_partitions = {tup[0]:NewPartitions(total_count=tup[1]) for tup in partitions} print_info("creating partitions in topic") try: self.kafka_client.create_partitions(topic_partitions=topic_partitions, timeout_ms=timeout, validate_only=validate) result = True except KafkaError as exc: print_error("Exception during creating partitions - {}".format(exc)) result = False return result
import time kafka_broker = "localhost:9092" topic = "test" num_partitions = 4 admin_client = KafkaAdminClient(bootstrap_servers=kafka_broker, client_id='admin-client') try: topic_list = [] topic_list.append( NewTopic(name=topic, num_partitions=num_partitions, replication_factor=1)) admin_client.create_topics(new_topics=topic_list, validate_only=False) except Exception as e: # if topic already exists, set partitions try: admin_client.create_partitions({topic: NewPartitions(num_partitions)}) except: pass producer = KafkaProducer(bootstrap_servers=[kafka_broker]) i = 0 while True: i += 1 partition = i % num_partitions producer.send(topic, partition=partition, value=str(i).encode('utf-8')) print(f"Sent {i} on partition {partition}") time.sleep(1)
from kafka import KafkaAdminClient from kafka.admin import NewPartitions admin_client = KafkaAdminClient(bootstrap_servers=['localhost:9092']) topic_partitions = {'userInfo': NewPartitions(total_count=4)} admin_client.create_partitions(topic_partitions)
sleeping_time = 20 admin_client = KafkaAdminClient(bootstrap_servers=bootstrap_servers) consumer = KafkaConsumer(group_id='test', bootstrap_servers=bootstrap_servers) topics = consumer.topics() cdp_topics = [topic for topic in topics if re.match(topic_pattern, topic)] topics_amount = len(cdp_topics) counter = 0 for topic in cdp_topics: partitions = len(consumer.partitions_for_topic(topic)) if partitions < need_partitions: print( f'Increase number of partitions in topic {topic} to {need_partitions}' ) counter += 1 topic_partitions = {} topic_partitions[topic] = NewPartitions(total_count=need_partitions) admin_client.create_partitions(topic_partitions) if counter % sleep_after == 0 and counter != 0 and counter <= topics_amount: print(f"Sleeping {sleeping_time} seconds") sleep(sleeping_time) print(f"All of {counter} from {topics_amount} topics upgraded!") consumer.close() admin_client.close()
client.poll(future=update) metadata = client.cluster with open('configuration/kafka_topics.yaml', 'r') as topicsdata: topics_params = yaml.safe_load(topicsdata) topicsdata.close() for topic in topics_params['topics']: if topic['name'] not in metadata.topics(): print('Topic ' + topic['name'] + ' will be created') topic_list.append( NewTopic(name=topic['name'], num_partitions=topic['partitions'], replication_factor=topic['replication-factor'])) if topic['name'] in metadata.topics(): if len(metadata.partitions_for_topic( topic['name'])) < topic['partitions']: print('New partitions for topic: ' + topic['name'] + ' will be created') num_partitions = NewPartitions(topic['partitions']) new_partitions = {topic['name']: num_partitions} admin_client.create_partitions(new_partitions) print('Additional partitions created for topic: ' + topic['name']) if topic_list: admin_client.create_topics(new_topics=topic_list, validate_only=False) print('Topics successfully created') else: print('No topics to create')