def test_fatal_code_when_consuming(self): self.add_topic("test.topic", leaders=(3,)) self.set_responses( broker_id=3, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.message_size_too_large, highwater_mark_offset=2, message_set=messages.MessageSet([]) ), ] ), ] ), ] ) c = FakeConsumer(["kafka01"]) yield c.connect() msgs = yield c.consume("test.topic") self.assertEqual(msgs, []) self.assertEqual(len(self.requests_by_broker[3]), 1)
def test_consuming_unknown_topic_reloads_metadata(self): self.add_topic("test.topic", leaders=(3,)) self.set_responses( broker_id=3, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "dog"}', ) ), ] ) ), ] ), ] ), ] ) c = FakeConsumer(["kafka01"]) # at this point the cluster hasn't synced, so "test.topic" is unknown msgs = yield c.consume("test.topic") self.assertEqual(msgs, [{"cat": "dog"}]) self.assertEqual(len(self.requests_by_broker[3]), 1)
def test_retriable_error_for_offset(self): self.add_topic("test.topic", leaders=(1,)) self.set_responses( broker_id=1, api="offset", responses=[ offset.OffsetResponse( topics=[ offset.TopicResponse( name="test.topic", partitions=[ offset.PartitionResponse( partition_id=0, error_code=errors.request_timed_out, offsets=[], ) ] ) ] ), offset.OffsetResponse( topics=[ offset.TopicResponse( name="test.topic", partitions=[ offset.PartitionResponse( partition_id=0, error_code=errors.no_error, offsets=[99], ) ] ) ] ), ] ) self.set_responses( broker_id=1, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "meow"}', ) ), ]), ), ] ), ] ), ] ) c = single.SingleConsumer(["kafka01"]) yield c.connect() msgs = yield c.consume("test.topic") self.assertEqual(msgs, [{"cat": "meow"}])
def test_default_consumes_from_end_offset(self): self.add_topic("test.topic", leaders=(1,)) self.set_responses( broker_id=1, api="offset", responses=[ offset.OffsetResponse( topics=[ offset.TopicResponse( name="test.topic", partitions=[ offset.PartitionResponse( partition_id=0, error_code=errors.no_error, offsets=[99], ) ] ) ] ) ] ) self.set_responses( broker_id=1, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "meow"}', ) ), ( 1, messages.Message( magic=0, attributes=0, key=None, value='{"dog": "bark"}', ) ), ] ) ), ] ), ] ), ] ) c = single.SingleConsumer(["kafka01"]) yield c.connect() msgs = yield c.consume("test.topic") yield c.close() self.assertEqual(msgs, [{"cat": "meow"}, {"dog": "bark"}]) self.assert_sent( broker_id=1, request=offset.OffsetRequest( replica_id=-1, topics=[ offset.TopicRequest( name="test.topic", partitions=[ offset.PartitionRequest( partition_id=0, time=-1, # alias for 'end of topic' max_offsets=1, ) ] ) ] ) ) self.assert_sent( broker_id=1, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=99, max_bytes=(1024 * 1024), ), ] ) ] ) )
def test_timedelta_offset(self, mock_calendar): # captured the epoch seconds when first writing this written_epoch = 1455849320 mock_calendar.timegm.return_value = written_epoch self.add_topic("test.topic", leaders=(1,)) self.set_responses( broker_id=1, api="offset", responses=[ offset.OffsetResponse( topics=[ offset.TopicResponse( name="test.topic", partitions=[ offset.PartitionResponse( partition_id=0, error_code=errors.no_error, offsets=[99], ) ] ) ] ) ] ) self.set_responses( broker_id=1, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([]), ), ] ), ] ), ] ) c = single.SingleConsumer(["kafka01"]) yield c.connect() two_days = datetime.timedelta(days=2) yield c.consume("test.topic", start=two_days) self.assert_sent( broker_id=1, request=offset.OffsetRequest( replica_id=-1, topics=[ offset.TopicRequest( name="test.topic", partitions=[ offset.PartitionRequest( partition_id=0, # two days *in the past* time=written_epoch - two_days.total_seconds(), max_offsets=1, ) ] ) ] ) )
def test_datetime_offset(self): self.add_topic("test.topic", leaders=(1,)) self.set_responses( broker_id=1, api="offset", responses=[ offset.OffsetResponse( topics=[ offset.TopicResponse( name="test.topic", partitions=[ offset.PartitionResponse( partition_id=0, error_code=errors.no_error, offsets=[99], ) ] ) ] ) ] ) self.set_responses( broker_id=1, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([]), ), ] ), ] ), ] ) c = single.SingleConsumer(["kafka01"]) yield c.connect() start = datetime.datetime(2016, 2, 18, 0, 0) secs_since_epoch = ( start - datetime.datetime(1970, 1, 1, 0, 0) ).total_seconds() yield c.consume("test.topic", start=start) self.assert_sent( broker_id=1, request=offset.OffsetRequest( replica_id=-1, topics=[ offset.TopicRequest( name="test.topic", partitions=[ offset.PartitionRequest( partition_id=0, time=secs_since_epoch, max_offsets=1, ) ] ) ] ) )
def test_beginning_offset(self): self.add_topic("test.topic", leaders=(1,)) self.set_responses( broker_id=1, api="offset", responses=[ offset.OffsetResponse( topics=[ offset.TopicResponse( name="test.topic", partitions=[ offset.PartitionResponse( partition_id=0, error_code=errors.no_error, offsets=[99], ) ] ) ] ) ] ) self.set_responses( broker_id=1, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([]), ), ] ), ] ), ] ) c = single.SingleConsumer(["kafka01"]) yield c.connect() yield c.consume("test.topic", start=single.SingleConsumer.BEGINNING) self.assert_sent( broker_id=1, request=offset.OffsetRequest( replica_id=-1, topics=[ offset.TopicRequest( name="test.topic", partitions=[ offset.PartitionRequest( partition_id=0, time=-2, # alias for 'beginning of topic' max_offsets=1, ) ] ) ] ) )
def test_consume_with_offset_fetch_and_commit_errors(self): self.add_topic("test.topic", leaders=(1, 8)) self.allocator.allocation = {"test.topic": [1]} self.set_responses( broker_id=3, api="offset_fetch", responses=[ offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.offsets_load_in_progress, partition_id=1, ) ]) ]), offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.request_timed_out, partition_id=1, ) ]) ]), offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.no_error, partition_id=1, offset=80, metadata="committed, ok!"), ]) ]), ]) self.set_responses( broker_id=3, api="offset_commit", responses=[ offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=errors.request_timed_out, partition_id=1, ) ]), ]), offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=errors.unknown, partition_id=1, ) ]), ]), ]) self.set_responses( broker_id=8, api="fetch", responses=[ fetch.FetchResponse(topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([])), ]), ]) ]) c = grouped.GroupedConsumer(["kafka01", "kafka02"], "work-group", zk_hosts=["zk01", "zk02", "zk03"]) yield c.connect() yield c.consume("test.topic") self.assert_sent(broker_id=3, request=offset_fetch.OffsetFetchRequest( group_name="work-group", topics=[ offset_fetch.TopicRequest( name="test.topic", partitions=[1], ) ])) self.assert_sent( broker_id=3, request=offset_commit.OffsetCommitV0Request( group="work-group", topics=[ offset_commit.TopicRequest( name="test.topic", partitions=[ offset_commit.PartitionRequest( partition_id=1, offset=80, metadata="committed by %s" % c.name) ]) ]))
def test_commit_offset_with_large_metadata(self): self.add_topic("test.topic", leaders=(1, 8)) self.allocator.allocation = {"test.topic": [1]} self.set_responses(broker_id=3, api="offset_fetch", responses=[ offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.no_error, partition_id=0, offset=80, metadata="committed, ok!") ]) ]), ]) self.set_responses( broker_id=3, api="offset_commit", responses=[ offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=(errors.offset_metadata_too_large), partition_id=1, ) ]), ]), offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=errors.no_error, partition_id=1, ) ]), ]), ]) self.set_responses( broker_id=8, api="fetch", responses=[ fetch.FetchResponse(topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([ (80, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "meow"}', )), ])), ]), ]) ]) c = grouped.GroupedConsumer(["kafka01", "kafka02"], "work-group", zk_hosts=["zk01", "zk02", "zk03"]) yield c.connect() msgs = yield c.consume("test.topic") self.assertEqual(msgs, [{"cat": "meow"}]) self.assert_sent( broker_id=3, request=offset_commit.OffsetCommitV0Request( group="work-group", topics=[ offset_commit.TopicRequest( name="test.topic", partitions=[ offset_commit.PartitionRequest( partition_id=1, offset=81, metadata="committed by %s" % c.name) ]) ])) self.assert_sent(broker_id=3, request=offset_commit.OffsetCommitV0Request( group="work-group", topics=[ offset_commit.TopicRequest( name="test.topic", partitions=[ offset_commit.PartitionRequest( partition_id=1, offset=81, metadata="") ]) ]))
def test_consume_without_autocommit(self): self.add_topic("test.topic", leaders=(1, 8)) self.allocator.allocation = {"test.topic": [0, 1]} self.set_responses(broker_id=3, api="offset_fetch", responses=[ offset_fetch.OffsetFetchResponse(topics=[ offset_fetch.TopicResponse( name="test.topic", partitions=[ offset_fetch.PartitionResponse( error_code=errors.no_error, partition_id=0, offset=80, metadata="committed, ok!"), offset_fetch.PartitionResponse( error_code=errors.no_error, partition_id=1, offset=110, metadata="committed, ok!"), ]) ]), ]) self.set_responses(broker_id=3, api="offset_commit", responses=[ offset_commit.OffsetCommitResponse(topics=[ offset_commit.TopicResponse( name="test.topic", partitions=[ offset_commit.PartitionResponse( error_code=errors.no_error, partition_id=1, ) ]), ]), ]) self.set_responses( broker_id=1, api="fetch", responses=[ fetch.FetchResponse(topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([ (80, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "meow"}', )), ])), ]), ]) ]) self.set_responses( broker_id=8, api="fetch", responses=[ fetch.FetchResponse(topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([ (110, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "meow"}', )), ])), ]), ]) ]) c = grouped.GroupedConsumer(["kafka01", "kafka02"], "work-group", zk_hosts=["zk01", "zk02", "zk03"], autocommit=False) yield c.connect() yield c.consume("test.topic") self.assert_sent(broker_id=1, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest(name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=80, max_bytes=(1024 * 1024), ), ]) ])) self.assert_sent(broker_id=8, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest(name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=1, offset=110, max_bytes=(1024 * 1024), ), ]) ])) yield c.commit_offsets() self.assert_sent( broker_id=3, request=offset_commit.OffsetCommitV0Request( group="work-group", topics=[ offset_commit.TopicRequest( name="test.topic", partitions=[ offset_commit.PartitionRequest( partition_id=0, offset=81, metadata="committed by %s" % c.name), offset_commit.PartitionRequest( partition_id=1, offset=111, metadata="committed by %s" % c.name), ]) ]))
def test_max_bytes_at_partition_level(self): self.add_topic("test.topic", leaders=(3, 3)) self.set_responses( broker_id=3, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"foo": "bar"}', ) ), ] ) ), fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"bwee": "bwoo"}', ) ), ] ) ), ] ), ] ), ] ) c = FakeConsumer(["kafka01", "kafka02"], max_bytes=(1024 * 1024)) yield c.connect() msgs = yield c.consume("test.topic") self.assertEqual(msgs, [{"foo": "bar"}, {"bwee": "bwoo"}]) self.assert_sent( broker_id=3, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=0, max_bytes=(512 * 1024), ), fetch.PartitionRequest( partition_id=1, offset=0, max_bytes=(512 * 1024), ), ] ) ] ) )
def test_consumer_tracks_offsets(self): self.add_topic("test.topic", leaders=(3, 8)) self.set_responses( broker_id=3, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"foo": "bar"}', ) ), ( 1, messages.Message( magic=0, attributes=0, key=None, value='{"bwee": "bwoo"}', ) ), ] ) ), ] ), ] ), fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([]), ), ] ) ] ) ] ) self.set_responses( broker_id=8, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"meow": "bark"}', ) ), ] ) ), ] ), ] ), fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=1, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet([]) ), ] ), ] ), ] ) c = FakeConsumer(["kafka01", "kafka02"]) yield c.connect() msgs = yield c.consume("test.topic") possible_orders = [ [{"meow": "bark"}, {"foo": "bar"}, {"bwee": "bwoo"}], [{"foo": "bar"}, {"bwee": "bwoo"}, {"meow": "bark"}], ] self.assertTrue( any([msgs == possibility for possibility in possible_orders]) ) self.assert_sent( broker_id=3, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=0, max_bytes=(1024 * 1024), ), ] ) ] ) ) self.assert_sent( broker_id=8, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=1, offset=0, max_bytes=(1024 * 1024), ), ] ) ] ) ) msgs = yield c.consume("test.topic") self.assertEqual(msgs, []) self.assert_sent( broker_id=3, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=2, max_bytes=(1024 * 1024), ), ] ) ] ) ) self.assert_sent( broker_id=8, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=1, offset=1, max_bytes=(1024 * 1024), ), ] ) ] ) )
def test_offset_out_of_range_error(self): self.add_topic("test.topic", leaders=(3,)) self.set_responses( broker_id=3, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.offset_out_of_range, highwater_mark_offset=2, message_set=messages.MessageSet([]) ), ] ), ] ), fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "dog"}', ) ), ] ) ), ] ), ] ), ] ) c = FakeConsumer(["kafka01"]) yield c.connect() c.offsets["test.topic"][0] = 80 c.synced_offsets.add("test.topic") msgs = yield c.consume("test.topic") self.assertEqual(msgs, []) self.assert_sent( broker_id=3, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=80, max_bytes=(1024 * 1024), ), ] ) ] ) ) msgs = yield c.consume("test.topic") self.assertEqual(msgs, [{"cat": "dog"}]) self.assert_sent( broker_id=3, request=fetch.FetchRequest( replica_id=-1, max_wait_time=1000, min_bytes=1, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=0, max_bytes=(1024 * 1024), ), ] ) ] ) )
def test_retriable_code_when_consuming(self): self.add_topic("test.topic", leaders=(3,)) self.set_responses( broker_id=3, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.leader_not_available, highwater_mark_offset=2, message_set=messages.MessageSet([]) ), ] ), ] ), fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='{"cat": "dog"}', ) ), ] ) ), ] ), ] ), ] ) c = FakeConsumer(["kafka01"]) yield c.connect() msgs = yield c.consume("test.topic") c.cluster.heal.assert_called_once_with() self.assertEqual(msgs, []) self.assertEqual(len(self.requests_by_broker[3]), 1) msgs = yield c.consume("test.topic") self.assertEqual(msgs, [{"cat": "dog"}]) self.assertEqual(len(self.requests_by_broker[3]), 2)
def test_custom_deserializer_and_options(self): self.add_topic("test.topic", leaders=(3,)) self.set_responses( broker_id=3, api="fetch", responses=[ fetch.FetchResponse( topics=[ fetch.TopicResponse( name="test.topic", partitions=[ fetch.PartitionResponse( partition_id=0, error_code=errors.no_error, highwater_mark_offset=2, message_set=messages.MessageSet( messages=[ ( 0, messages.Message( magic=0, attributes=0, key=None, value='cat', ) ), ( 1, messages.Message( magic=0, attributes=0, key=None, value='dog', ) ), ] ) ), ] ), ] ), ] ) results = [Exception(), "bark"] def deserializer(val): result = results.pop(0) if isinstance(result, Exception): raise result return "%s: %s" % (val, result) c = FakeConsumer( ["kafka01", "kafka02"], deserializer=deserializer, max_wait_time=500, min_bytes=1024, max_bytes=1024 ) yield c.connect() msgs = yield c.consume("test.topic") self.assertEqual(msgs, ["dog: bark"]) self.assert_sent( broker_id=3, request=fetch.FetchRequest( replica_id=-1, max_wait_time=500, min_bytes=1024, topics=[ fetch.TopicRequest( name="test.topic", partitions=[ fetch.PartitionRequest( partition_id=0, offset=0, max_bytes=1024, ), ] ) ] ) )