def commit_offsets(self, metadata=None): """ Notifies Kafka that the consumer's messages have been processed. Uses the "v0" version of the offset commit request to maintain compatability with clusters running 0.8.1. """ if metadata is None: metadata = "committed by %s" % self.name log.debug("Committing offsets for consumer group %s", self.group_name) request = offset_commit.OffsetCommitV0Request( group=self.group_name, topics=[ offset_commit.TopicRequest( name=topic, partitions=[ offset_commit.PartitionRequest( partition_id=partition_id, offset=self.offsets[topic][partition_id], metadata=metadata ) for partition_id in partition_ids ] ) for topic, partition_ids in six.iteritems(self.allocation) if topic in self.topics_to_commit ] ) results = yield self.send({self.coordinator_id: request}) retry, adjust_metadata = results[self.coordinator_id] if adjust_metadata: log.warn("Offset commit metadata '%s' was too long.", metadata) metadata = "" if retry: yield self.commit_offsets(metadata=metadata)
def test_consume_with_offset_fetch_and_commit_errors(self): self.add_topic("test.topic", leaders=(1, 8)) self.allocator.allocation = {"test.topic": [1]} self.set_responses( broker_id=3, api="offset_fetch", responses=[ offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.offsets_load_in_progress, partition_id=1, ) ]) ]), offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.request_timed_out, partition_id=1, ) ]) ]), offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.no_error, partition_id=1, offset=80, metadata="committed, ok!"), ]) ]), ]) self.set_responses( broker_id=3, api="offset_commit", responses=[ offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=errors.request_timed_out, partition_id=1, ) ]), ]), offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=errors.unknown, partition_id=1, ) ]), ]), ]) self.set_responses( broker_id=8, api="fetch", responses=[ fetch.FetchResponse(topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([])), ]), ]) ]) c = grouped.GroupedConsumer(["kafka01", "kafka02"], "work-group", zk_hosts=["zk01", "zk02", "zk03"]) yield c.connect() yield c.consume("test.topic") self.assert_sent(broker_id=3, request=offset_fetch.OffsetFetchRequest( group_name="work-group", topics=[ offset_fetch.TopicRequest( name="test.topic", partitions=[1], ) ])) self.assert_sent( broker_id=3, request=offset_commit.OffsetCommitV0Request( group="work-group", topics=[ offset_commit.TopicRequest( name="test.topic", partitions=[ offset_commit.PartitionRequest( partition_id=1, offset=80, metadata="committed by %s" % c.name) ]) ]))
def test_commit_offset_with_large_metadata(self): self.add_topic("test.topic", leaders=(1, 8)) self.allocator.allocation = {"test.topic": [1]} self.set_responses(broker_id=3, api="offset_fetch", responses=[ offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.no_error, partition_id=0, offset=80, metadata="committed, ok!") ]) ]), ]) self.set_responses( broker_id=3, api="offset_commit", responses=[ offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=(errors.offset_metadata_too_large), partition_id=1, ) ]), ]), offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=errors.no_error, partition_id=1, ) ]), ]), ]) self.set_responses( broker_id=8, api="fetch", responses=[ fetch.FetchResponse(topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([ (80, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "meow"}', )), ])), ]), ]) ]) c = grouped.GroupedConsumer(["kafka01", "kafka02"], "work-group", zk_hosts=["zk01", "zk02", "zk03"]) yield c.connect() msgs = yield c.consume("test.topic") self.assertEqual(msgs, [{"cat": "meow"}]) self.assert_sent( broker_id=3, request=offset_commit.OffsetCommitV0Request( group="work-group", topics=[ offset_commit.TopicRequest( name="test.topic", partitions=[ offset_commit.PartitionRequest( partition_id=1, offset=81, metadata="committed by %s" % c.name) ]) ])) self.assert_sent(broker_id=3, request=offset_commit.OffsetCommitV0Request( group="work-group", topics=[ offset_commit.TopicRequest( name="test.topic", partitions=[ offset_commit.PartitionRequest( partition_id=1, offset=81, metadata="") ]) ]))
def test_consume_without_autocommit(self): self.add_topic("test.topic", leaders=(1, 8)) self.allocator.allocation = {"test.topic": [0, 1]} self.set_responses(broker_id=3, api="offset_fetch", responses=[ offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.no_error, partition_id=0, offset=80, metadata="committed, ok!"), offset_fetch.PartitionResponse( error_code=errors.no_error, partition_id=1, offset=110, metadata="committed, ok!"), ]) ]), ]) self.set_responses(broker_id=3, api="offset_commit", responses=[ offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=errors.no_error, partition_id=1, ) ]), ]), ]) self.set_responses( broker_id=1, api="fetch", responses=[ fetch.FetchResponse(topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([ (80, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "meow"}', )), ])), ]), ]) ]) self.set_responses( broker_id=8, api="fetch", responses=[ fetch.FetchResponse(topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([ (110, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "meow"}', )), ])), ]), ]) ]) c = grouped.GroupedConsumer(["kafka01", "kafka02"], "work-group", zk_hosts=["zk01", "zk02", "zk03"], autocommit=False) yield c.connect() yield c.consume("test.topic") self.assert_sent(broker_id=1, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest(name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=80, max_bytes=(1024 * 1024), ), ]) ])) self.assert_sent(broker_id=8, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest(name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=1, offset=110, max_bytes=(1024 * 1024), ), ]) ])) yield c.commit_offsets() self.assert_sent( broker_id=3, request=offset_commit.OffsetCommitV0Request( group="work-group", topics=[ offset_commit.TopicRequest( name="test.topic", partitions=[ offset_commit.PartitionRequest( partition_id=0, offset=81, metadata="committed by %s" % c.name), offset_commit.PartitionRequest( partition_id=1, offset=111, metadata="committed by %s" % c.name), ]) ]))