def test_set_consumer_offsets_kafka( self, topics, kafka_client_mock ): kafka_client_mock = mock.Mock(wraps=kafka_client_mock) new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } set_consumer_offsets( kafka_client_mock, "group", new_offsets, raise_on_error=True, offset_storage='kafka', ) assert kafka_client_mock.send_offset_commit_request.call_count == 0 assert kafka_client_mock.send_offset_commit_request_kafka.call_count == 1
def test_set_consumer_offsets_invalid_storage( self, topics, kafka_client_mock ): kafka_client_mock = mock.Mock(wraps=kafka_client_mock) new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } with pytest.raises(InvalidOffsetStorageError): set_consumer_offsets( kafka_client_mock, "group", new_offsets, raise_on_error=True, offset_storage='randon_string', ) assert kafka_client_mock.send_offset_commit_request.call_count == 0 assert kafka_client_mock.send_offset_commit_request_kafka.call_count == 0
def unsubscribe_partitions(self, group, topic, partitions): offsets = {topic: {partition: 0 for partition in partitions}} set_consumer_offsets( self.client, group, nullify_offsets(offsets), )
def rename_group_with_storage_kafka( cls, client, old_groupid, new_groupid, topics, ): copied_offsets = get_current_consumer_offsets( client, old_groupid, topics, offset_storage='kafka', ) set_consumer_offsets( client, new_groupid, copied_offsets, offset_storage='kafka', ) set_consumer_offsets( client, old_groupid, nullify_offsets(topics), offset_storage='kafka', )
def test_set_consumer_offsets_invalid_storage( self, topics, kafka_client_mock ): kafka_client_mock = mock.Mock(wraps=kafka_client_mock) new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } with pytest.raises(InvalidOffsetStorageError): set_consumer_offsets( kafka_client_mock, "group", new_offsets, raise_on_error=True, offset_storage='randon_string', ) assert kafka_client_mock.send_offset_commit_request.call_count == 0 assert kafka_client_mock.send_offset_commit_request_kafka.call_count == 0
def test_set_consumer_offsets_kafka( self, topics, kafka_client_mock ): kafka_client_mock = mock.Mock(wraps=kafka_client_mock) new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } set_consumer_offsets( kafka_client_mock, "group", new_offsets, raise_on_error=True, offset_storage='kafka', ) assert kafka_client_mock.send_offset_commit_request.call_count == 0 assert kafka_client_mock.send_offset_commit_request_kafka.call_count == 1
def delete_group_kafka(cls, client, group, topics): new_offsets = nullify_offsets(topics) set_consumer_offsets( client, group, new_offsets, offset_storage='kafka', )
def set_consumer_group_offset(topic, group, offset): client = KafkaToolClient(KAFKA_URL) set_consumer_offsets( client, group, {topic: {0: offset}}, raise_on_error=True, )
def delete_group_kafka(cls, client, group, topics): new_offsets = nullify_offsets(topics) set_consumer_offsets( client, group, new_offsets, offset_storage='kafka', )
def create_consumer_group(topic, group_name, num_messages=1): client = KafkaToolClient(KAFKA_URL) set_consumer_offsets( client, group_name, {topic: {0: num_messages}}, raise_on_error=True, ) return client
def commit_offsets(offsets, group): # Setup the Kafka client config = get_cluster_config() client = KafkaToolClient(config.broker_list) set_consumer_offsets( client, group, offsets, ) client.close()
def set_consumer_group_offset(topic, group, offset): client = KafkaToolClient(KAFKA_URL) set_consumer_offsets( client, group, {topic: { 0: offset }}, raise_on_error=True, )
def commit_offsets(offsets, group): # Setup the Kafka client config = get_cluster_config() client = KafkaToolClient(config.broker_list) set_consumer_offsets( client, group, offsets, ) client.close()
def delete_topic(self, group, topic): offsets = get_current_consumer_offsets( self.client, group, [topic], ) set_consumer_offsets( self.client, group, nullify_offsets(offsets), )
def delete_topic(self, group, topic): offsets = get_current_consumer_offsets( self.client, group, [topic], ) set_consumer_offsets( self.client, group, nullify_offsets(offsets), )
def create_consumer_group(topic, group_name, num_messages=1): client = KafkaToolClient(KAFKA_URL) set_consumer_offsets( client, group_name, {topic: { 0: num_messages }}, raise_on_error=True, ) return client
def create_consumer_group_with_kafka_storage(topic, group_name): client = KafkaToolClient(KAFKA_URL) set_consumer_offsets( client, group_name, {topic: { 0: 1 }}, offset_storage='kafka', raise_on_error=True, ) return client
def unsubscribe_partitions(self, group, topic, partitions): offsets = { topic: { partition: 0 for partition in partitions } } set_consumer_offsets( self.client, group, nullify_offsets(offsets), )
def copy_group_kafka(cls, client, topics, source_group, destination_group): copied_offsets = get_current_consumer_offsets( client, source_group, topics, offset_storage='kafka', ) set_consumer_offsets( client, destination_group, copied_offsets, offset_storage='kafka', )
def initialize_kafka_offsets_topic(): if '__consumer_offsets' in list_topics(): return topic = create_random_topic(1, 1) produce_example_msg(topic, num_messages=1) kafka = KafkaToolClient(KAFKA_URL) set_consumer_offsets( kafka, create_random_group_id(), {topic: {0: 1}}, raise_on_error=True, ) time.sleep(20)
def initialize_kafka_offsets_topic(): if '__consumer_offsets' in list_topics(): return topic = create_random_topic(1, 1) produce_example_msg(topic, num_messages=1) kafka = KafkaToolClient(KAFKA_URL) set_consumer_offsets( kafka, create_random_group_id(), {topic: { 0: 1 }}, raise_on_error=True, ) time.sleep(20)
def restore_offsets(cls, client, parsed_consumer_offsets, storage): """Fetch current offsets from kafka, validate them against given consumer-offsets data and commit the new offsets. :param client: Kafka-client :param parsed_consumer_offsets: Parsed consumer offset data from json file :type parsed_consumer_offsets: dict(group: dict(topic: partition-offsets)) :param storage: String describing where to store the committed offsets. """ # Fetch current offsets try: consumer_group = parsed_consumer_offsets['groupid'] topics_offset_data = parsed_consumer_offsets['offsets'] topic_partitions = dict( (topic, [partition for partition in offset_data.keys()]) for topic, offset_data in topics_offset_data.iteritems()) except IndexError: print( "Error: Given parsed consumer-offset data {consumer_offsets} " "could not be parsed".format( consumer_offsets=parsed_consumer_offsets), file=sys.stderr, ) raise current_offsets = get_consumer_offsets_metadata( client, consumer_group, topic_partitions, offset_storage=storage, ) # Build new offsets new_offsets = cls.build_new_offsets( client, topics_offset_data, topic_partitions, current_offsets, ) # Commit offsets consumer_group = parsed_consumer_offsets['groupid'] set_consumer_offsets( client, consumer_group, new_offsets, offset_storage=storage, ) print("Restored to new offsets {offsets}".format( offsets=dict(new_offsets)))
def test_set_consumer_offsets_fail(self, kafka_client_mock): kafka_client_mock.set_commit_error() new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } expected_status = [ OffsetCommitError("topic1", 0, RequestTimedOutError.message), OffsetCommitError("topic1", 1, RequestTimedOutError.message), OffsetCommitError("topic2", 0, RequestTimedOutError.message), OffsetCommitError("topic2", 1, RequestTimedOutError.message), ] status = set_consumer_offsets( kafka_client_mock, "group", new_offsets, raise_on_error=True, ) assert len(status) == len(expected_status) for expected in expected_status: assert any(actual == expected for actual in status) assert kafka_client_mock.group_offsets == self.group_offsets
def test_set_consumer_offsets(self, kafka_client_mock): new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } status = set_consumer_offsets( kafka_client_mock, "group", new_offsets, ) expected_offsets = { 'topic1': { 0: 100, 1: 200, 2: 10, }, 'topic2': { 0: 150, 1: 300, } } assert status == [] assert kafka_client_mock.group_offsets == expected_offsets
def test_set_consumer_offsets_fail(self, kafka_client_mock): kafka_client_mock.set_commit_error() new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } expected_status = [ OffsetCommitError("topic1", 0, RequestTimedOutError.message), OffsetCommitError("topic1", 1, RequestTimedOutError.message), OffsetCommitError("topic2", 0, RequestTimedOutError.message), OffsetCommitError("topic2", 1, RequestTimedOutError.message), ] status = set_consumer_offsets( kafka_client_mock, "group", new_offsets, raise_on_error=True, ) assert len(status) == len(expected_status) for expected in expected_status: assert any(actual == expected for actual in status) assert kafka_client_mock.group_offsets == self.group_offsets
def test_set_consumer_offsets(self, kafka_client_mock): new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } status = set_consumer_offsets( kafka_client_mock, "group", new_offsets, ) expected_offsets = { 'topic1': { 0: 100, 1: 200, 2: 10, }, 'topic2': { 0: 150, 1: 300, } } expected_status = [] assert set(status) == set(expected_status) assert kafka_client_mock.group_offsets == expected_offsets
def restore_offsets(cls, client, parsed_consumer_offsets, storage): """Fetch current offsets from kafka, validate them against given consumer-offsets data and commit the new offsets. :param client: Kafka-client :param parsed_consumer_offsets: Parsed consumer offset data from json file :type parsed_consumer_offsets: dict(group: dict(topic: partition-offsets)) :param storage: String describing where to store the committed offsets. """ # Fetch current offsets try: consumer_group = parsed_consumer_offsets['groupid'] topics_offset_data = parsed_consumer_offsets['offsets'] topic_partitions = dict( (topic, [partition for partition in offset_data.keys()]) for topic, offset_data in topics_offset_data.iteritems() ) except IndexError: print( "Error: Given parsed consumer-offset data {consumer_offsets} " "could not be parsed".format(consumer_offsets=parsed_consumer_offsets), file=sys.stderr, ) raise current_offsets = get_consumer_offsets_metadata( client, consumer_group, topic_partitions, offset_storage=storage, ) # Build new offsets new_offsets = cls.build_new_offsets( client, topics_offset_data, topic_partitions, current_offsets, ) # Commit offsets consumer_group = parsed_consumer_offsets['groupid'] set_consumer_offsets( client, consumer_group, new_offsets, offset_storage=storage, ) print("Restored to new offsets {offsets}".format(offsets=dict(new_offsets)))
def rename_group( cls, client, old_groupid, new_groupid, topics, ): copied_offsets = get_current_consumer_offsets( client, old_groupid, topics, ) set_consumer_offsets(client, new_groupid, copied_offsets) set_consumer_offsets( client, old_groupid, nullify_offsets(topics), )
def rename_group( cls, client, old_groupid, new_groupid, topics, ): copied_offsets = get_current_consumer_offsets( client, old_groupid, topics, ) set_consumer_offsets(client, new_groupid, copied_offsets) set_consumer_offsets( client, old_groupid, nullify_offsets(topics), )
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() # Let's verify that the consumer does exist in Zookeeper if not args.force: cls.get_topics_from_consumer_group_id( cluster_config, args.groupid, storage=args.storage, ) try: results = set_consumer_offsets( client, args.groupid, cls.new_offsets_dict, offset_storage=args.storage, ) except TypeError: print( "Error: Badly formatted input, please re-run command " "with --help option.", file=sys.stderr) raise client.close() if results: final_error_str = ( "Error: Unable to commit consumer offsets for:\n") for result in results: error_str = ( " Topic: {topic} Partition: {partition} Error: {error}\n". format(topic=result.topic, partition=result.partition, error=result.error)) final_error_str += error_str print(final_error_str, file=sys.stderr) sys.exit(1)
def run(cls, args, cluster_config): # Setup the Kafka client client = KafkaToolClient(cluster_config.broker_list) client.load_metadata_for_topics() # Let's verify that the consumer does exist in Zookeeper if not args.force: cls.get_topics_from_consumer_group_id( cluster_config, args.groupid, ) try: results = set_consumer_offsets( client, args.groupid, cls.new_offsets_dict, offset_storage=args.storage, ) except TypeError: print( "Error: Badly formatted input, please re-run command " "with --help option.", file=sys.stderr ) raise client.close() if results: final_error_str = ("Error: Unable to commit consumer offsets for:\n") for result in results: error_str = ( " Topic: {topic} Partition: {partition} Error: {error}\n".format( topic=result.topic, partition=result.partition, error=result.error ) ) final_error_str += error_str print(final_error_str, file=sys.stderr) sys.exit(1)
def test_set_consumer_offsets(self, kafka_client_mock): new_offsets = { 'topic1': { 0: 100, 1: 200, }, 'topic2': { 0: 150, 1: 300, }, } status = set_consumer_offsets( kafka_client_mock, "group", new_offsets, ) expected_offsets = { 'topic1': { 0: 100, 1: 200, 2: 10, }, 'topic2': { 0: 150, 1: 300, } } expected_status = [ OffsetCommitResponsePayload("topic1", 0, 0), OffsetCommitResponsePayload("topic1", 1, 0), OffsetCommitResponsePayload("topic2", 0, 0), OffsetCommitResponsePayload("topic2", 1, 0), ] assert set(status) == set(expected_status) assert kafka_client_mock.group_offsets == expected_offsets
def copy_group_kafka(cls, client, topics, source_group, destination_group): copied_offsets = get_current_consumer_offsets(client, source_group, topics) set_consumer_offsets(client, destination_group, copied_offsets)