def test_set_topic_config_kafka_9(self, mock_client): with mock.patch.object( ZK, 'set', autospec=True ) as mock_set: with ZK(self.cluster_config) as zk: config = {"version": 1, "config": {"cleanup.policy": "compact"}} config_change = {"version": 1, "entity_type": "topics", "entity_name": "some_topic"} zk.set_topic_config( "some_topic", config, (0, 9, 2) ) serialized_config = dump_json(config) serialized_config_change = dump_json(config_change) mock_set.assert_called_once_with( zk, '/config/topics/some_topic', serialized_config, ) expected_create_call = mock.call( '/config/changes/config_change_', serialized_config_change, None, False, True, False ) assert mock_client.return_value.create.call_args_list == [expected_create_call]
def test_set_topic_config_kafka_9(self, mock_client): with mock.patch.object(ZK, 'set', autospec=True) as mock_set: with ZK(self.cluster_config) as zk: config = { "version": 1, "config": { "cleanup.policy": "compact" } } config_change = { "version": 1, "entity_type": "topics", "entity_name": "some_topic" } zk.set_topic_config("some_topic", config, (0, 9, 2)) serialized_config = dump_json(config) serialized_config_change = dump_json(config_change) mock_set.assert_called_once_with( zk, '/config/topics/some_topic', serialized_config, ) expected_create_call = mock.call( '/config/changes/config_change_', serialized_config_change, None, False, True, False) assert mock_client.return_value.create.call_args_list == [ expected_create_call ]
def test_set_broker_config_kafka_10(self, mock_client): with mock.patch.object(ZK, 'set', autospec=True) as mock_set: with ZK(self.cluster_config) as zk: config = { "version": 1, "config": { "leader.replication.throttled.rate": "42" } } config_change = {"entity_path": "brokers/0", "version": 2} zk.set_broker_config(0, config) serialized_config = dump_json(config) serialized_config_change = dump_json(config_change) mock_set.assert_called_once_with( zk, '/config/brokers/0', serialized_config, ) expected_create_call = mock.call( '/config/changes/config_change_', serialized_config_change, None, False, True, False) assert mock_client.return_value.create.call_args_list == [ expected_create_call ]
def set_topic_config(self, topic, value, kafka_version=(0, 10, )): """Set configuration information for specified topic. :topic : topic whose configuration needs to be changed :value : config value with which the topic needs to be updated with. This would be of the form key=value. Example 'cleanup.policy=compact' :kafka_version :tuple kafka version the brokers are running on. Defaults to (0, 10, x). Kafka version 9 and kafka 10 support this feature. """ config_data = dump_json(value) try: # Change value return_value = self.set( "/config/topics/{topic}".format(topic=topic), config_data ) # Create change version = kafka_version[1] # this feature is supported in kafka 9 and kafka 10 assert version in (9, 10), "Feature supported with kafka 9 and kafka 10" if version == 9: # https://github.com/apache/kafka/blob/0.9.0.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L334 change_node = dump_json({ "version": 1, "entity_type": "topics", "entity_name": topic }) else: # kafka 10 # https://github.com/apache/kafka/blob/0.10.2.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L574 change_node = dump_json({ "version": 2, "entity_path": "topics/" + topic, }) self.create( '/config/changes/config_change_', change_node, sequence=True ) except NoNodeError as e: _log.error( "topic {topic} not found.".format(topic=topic) ) raise e return return_value
def set_topic_config(self, topic, value, kafka_version=(0, 10, )): """Set configuration information for specified topic. :topic : topic whose configuration needs to be changed :value : config value with which the topic needs to be updated with. This would be of the form key=value. Example 'cleanup.policy=compact' :kafka_version :tuple kafka version the brokers are running on. Defaults to (0, 10, x). Kafka version 9 and kafka 10 support this feature. """ config_data = dump_json(value) try: # Change value return_value = self.set( "/config/topics/{topic}".format(topic=topic), config_data ) # Create change version = kafka_version[1] # this feature is supported in kafka 9 and kafka 10 assert version in (9, 10), "Feature supported with kafka 9 and kafka 10" if version == 9: # https://github.com/apache/kafka/blob/0.9.0.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L334 change_node = dump_json({ "version": 1, "entity_type": "topics", "entity_name": topic }) else: # kafka 10 # https://github.com/apache/kafka/blob/0.10.2.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L574 change_node = dump_json({ "version": 2, "entity_path": "topics/" + topic, }) self.create( '/config/changes/config_change_', change_node, sequence=True ) except NoNodeError as e: _log.error( "topic {topic} not found.".format(topic=topic) ) raise e return return_value
def execute_plan(self, plan, allow_rf_change=False, allow_rf_mismatch=False): """Submit reassignment plan for execution.""" reassignment_path = '{admin}/{reassignment_node}'\ .format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE) plan_json = dump_json(plan) topic_names_from_proposed_plan = set() for partition in plan['partitions']: topic_names_from_proposed_plan.add(partition['topic']) base_plan = self.get_cluster_plan( topic_names=list(topic_names_from_proposed_plan)) if not validate_plan(plan, base_plan, allow_rf_change=allow_rf_change, allow_rf_mismatch=allow_rf_mismatch): _log.error( 'Given plan is invalid. Aborting new reassignment plan ... {plan}' .format(plan=plan)) return False # Send proposed-plan to zookeeper try: _log.info('Sending plan to Zookeeper...') self.create(reassignment_path, plan_json, makepath=True) _log.info( 'Re-assign partitions node in Zookeeper updated successfully ' 'with {plan}'.format(plan=plan), ) return True except NodeExistsError: _log.warning('Previous plan in progress. Exiting..') _log.warning( 'Aborting new reassignment plan... {plan}'.format(plan=plan)) in_progress_plan = load_json(self.get(reassignment_path)[0]) in_progress_partitions = [ '{topic}-{p_id}'.format( topic=p_data['topic'], p_id=str(p_data['partition']), ) for p_data in in_progress_plan['partitions'] ] _log.warning( '{count} partition(s) reassignment currently in progress:-'. format(count=len(in_progress_partitions)), ) _log.warning( '{partitions}. In Progress reassignment plan...'.format( partitions=', '.join(in_progress_partitions), ), ) return False except Exception as e: _log.error( 'Could not re-assign partitions {plan}. Error: {e}'.format( plan=plan, e=e), ) return False
def execute_plan(self, plan, allow_rf_change=False): """Submit reassignment plan for execution.""" reassignment_path = '{admin}/{reassignment_node}'\ .format(admin=ADMIN_PATH, reassignment_node=REASSIGNMENT_NODE) plan_json = dump_json(plan) base_plan = self.get_cluster_plan() if not validate_plan(plan, base_plan, allow_rf_change=allow_rf_change): _log.error('Given plan is invalid. Aborting new reassignment plan ... {plan}'.format(plan=plan)) return False # Send proposed-plan to zookeeper try: _log.info('Sending plan to Zookeeper...') self.create(reassignment_path, plan_json, makepath=True) _log.info( 'Re-assign partitions node in Zookeeper updated successfully ' 'with {plan}'.format(plan=plan), ) return True except NodeExistsError: _log.warning('Previous plan in progress. Exiting..') _log.warning('Aborting new reassignment plan... {plan}'.format(plan=plan)) in_progress_plan = load_json(self.get(reassignment_path)[0]) in_progress_partitions = [ '{topic}-{p_id}'.format( topic=p_data['topic'], p_id=str(p_data['partition']), ) for p_data in in_progress_plan['partitions'] ] _log.warning( '{count} partition(s) reassignment currently in progress:-' .format(count=len(in_progress_partitions)), ) _log.warning( '{partitions}. In Progress reassignment plan...'.format( partitions=', '.join(in_progress_partitions), ), ) return False except Exception as e: _log.error( 'Could not re-assign partitions {plan}. Error: {e}' .format(plan=plan, e=e), ) return False
def _set_entity_config(self, entity_type, entity_name, value, kafka_version=( 0, 10, )): """Set configuration information for specified entity. :entity_type : "brokers" or "topics" :entity_name : broker id or topic name :value : config value with which the entity needs to be updated with. This would be of the form key=value. Example 'cleanup.policy=compact' :kafka_version :tuple kafka version the brokers are running on. Defaults to (0, 10, x). Versions above Kafka 0.9 support this feature. """ assert entity_type in ( "brokers", "topics"), "Supported entities are brokers and topics" config_data = dump_json(value) try: # Change value return_value = self.set( "/config/{entity_type}/{entity_name}".format( entity_type=entity_type, entity_name=entity_name), config_data, ) # Create change assert kafka_version >= ( 0, 9, ), "Feature supported with kafka 0.9 and above" if kafka_version < ( 0, 10, ): # https://github.com/apache/kafka/blob/0.9.0.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L334 change_node = dump_json({ "version": 1, "entity_type": entity_type, "entity_name": entity_name, }) else: # kafka 0.10+ # https://github.com/apache/kafka/blob/0.10.2.1/ # core/src/main/scala/kafka/admin/AdminUtils.scala#L574 change_node = dump_json({ "version": 2, "entity_path": "{entity_type}/{entity_name}".format( entity_type=entity_type, entity_name=entity_name) }) self.create('/config/changes/config_change_', change_node, sequence=True) except NoNodeError as e: _log.error("{entity_type}: {entity_name} not found.".format( entity_type=entity_type, entity_name=entity_name)) raise e return return_value