def test(self): executor = SubscriptionExecutor(self.dataset, ThreadPoolExecutor(), DummyMetricsBackend(strict=True)) subscription = Subscription( SubscriptionIdentifier(PartitionId(0), uuid1()), SubscriptionData( project_id=self.project_id, conditions=[["platform", "IN", ["a"]]], aggregations=[["count()", "", "count"]], time_window=timedelta(minutes=500), resolution=timedelta(minutes=1), ), ) now = datetime.utcnow() tick = Tick( offsets=Interval(1, 2), timestamps=Interval(now - timedelta(minutes=1), now), ) result = executor.execute(ScheduledTask(now, subscription), tick).result() assert result["data"][0]["count"] == 10 result = executor.execute( ScheduledTask( now + timedelta(minutes=self.minutes) + subscription.data.time_window, subscription, ), tick, ).result() assert result["data"][0]["count"] == 0
def poll(self, timeout: Optional[float] = None) -> Optional[Message[Tick]]: message = self.__consumer.poll(timeout) if message is None: return None previous_message = self.__previous_messages.get(message.partition) result: Optional[Message[Tick]] if previous_message is not None: result = Message( message.partition, previous_message.offset, Tick( Interval(previous_message.offset, message.offset), Interval(previous_message.timestamp, message.timestamp), ), message.timestamp, ) else: result = None self.__previous_messages[message.partition] = MessageDetails( message.offset, message.timestamp ) return result
def test_tick_time_shift() -> None: partition = 0 offsets = Interval(0, 1) tick = Tick(partition, offsets, Interval(datetime(1970, 1, 1), datetime(1970, 1, 2))) assert tick.time_shift(timedelta(hours=24)) == Tick( partition, offsets, Interval(datetime(1970, 1, 2), datetime(1970, 1, 3)))
def test_interval_validation() -> None: Interval(1, 1) Interval(1, 10) with pytest.raises(InvalidRangeError) as e: Interval(10, 1) assert e.value.lower == 10 assert e.value.upper == 1
def poll(self, timeout: Optional[float] = None) -> Optional[Message[Tick]]: message = self.__consumer.poll(timeout) if message is None: return None try: commit = commit_codec.decode(message.payload) assert commit.orig_message_ts is not None except Exception: logger.error( f"Error decoding commit log message for followed group: {self.__followed_consumer_group}.", extra={ "payload": str(message.payload), "offset": message.offset }, exc_info=True, ) return None if commit.group != self.__followed_consumer_group: return None previous_message = self.__previous_messages.get(commit.partition) result: Optional[Message[Tick]] if previous_message is not None: try: time_interval = Interval(previous_message.orig_message_ts, commit.orig_message_ts) except InvalidRangeError: logger.warning( "Could not construct valid time interval between %r and %r!", previous_message, MessageDetails(commit.offset, commit.orig_message_ts), exc_info=True, ) return None else: result = Message( message.partition, message.offset, Tick( commit.partition.index, Interval(previous_message.offset, commit.offset), time_interval, ).time_shift(self.__time_shift), message.timestamp, ) else: result = None self.__previous_messages[commit.partition] = MessageDetails( commit.offset, commit.orig_message_ts) return result
def test_subscription_worker_consistent( subscription_data: SubscriptionData) -> None: state.set_config("event_subscription_non_consistent_sample_rate", 1) broker: Broker[SubscriptionTaskResult] = Broker(MemoryMessageStorage(), TestingClock()) result_topic = Topic("subscription-results") broker.create_topic(result_topic, partitions=1) frequency = timedelta(minutes=1) evaluations = 1 subscription = Subscription( SubscriptionIdentifier(PartitionId(0), uuid1()), subscription_data, ) store = DummySubscriptionDataStore() store.create(subscription.identifier.uuid, subscription.data) metrics = TestingMetricsBackend() dataset = get_dataset("events") worker = SubscriptionWorker( dataset, ThreadPoolExecutor(), { 0: SubscriptionScheduler(store, PartitionId(0), timedelta(), DummyMetricsBackend(strict=True)) }, broker.get_producer(), result_topic, metrics, ) now = datetime(2000, 1, 1) tick = Tick( offsets=Interval(0, 1), timestamps=Interval(now - (frequency * evaluations), now), ) worker.process_message(Message(Partition(Topic("events"), 0), 0, tick, now)) time.sleep(0.1) assert (len([ m for m in metrics.calls if isinstance(m, Increment) and m.name == "consistent" ]) == 1)
def poll(self, timeout: Optional[float] = None) -> Optional[Message[Tick]]: message = self.__consumer.poll(timeout) if message is None: return None previous_message = self.__previous_messages.get(message.partition) result: Optional[Message[Tick]] if previous_message is not None: try: time_interval = Interval(previous_message.timestamp, message.timestamp) if (self.__min_interval is not None and time_interval.upper - time_interval.lower < self.__min_interval): return None except InvalidRangeError: logger.warning( "Could not construct valid time interval between %r and %r!", previous_message, message, exc_info=True, ) return None else: result = Message( message.partition, previous_message.offset, Tick( None, Interval(previous_message.offset, message.offset), time_interval, ).time_shift(self.__time_shift), message.timestamp, ) else: result = None self.__previous_messages[message.partition] = MessageDetails( message.offset, message.timestamp) return result
def time_shift(self, delta: timedelta) -> Tick: """ Returns a new ``Tick`` instance that has had the bounds of its time interval shifted by the provided delta. """ return Tick( self.offsets, Interval(self.timestamps.lower + delta, self.timestamps.upper + delta), )
def build_interval(self, lower: timedelta, upper: timedelta) -> Interval[datetime]: return Interval(self.now + lower, self.now + upper)
def build_tick(self, lower: timedelta, upper: timedelta) -> Tick: return Tick(None, Interval(1, 5), Interval(self.now + lower, self.now + upper))
def test_combined_scheduler_and_executor() -> None: state.set_config("subscription_mode_events", "new") create_subscription() epoch = datetime(1970, 1, 1) dataset = get_dataset("events") entity_names = ["events"] num_partitions = 2 max_concurrent_queries = 2 total_concurrent_queries = 2 metrics = TestingMetricsBackend() commit = mock.Mock() partitions = mock.Mock() topic = Topic("snuba-commit-log") partition = Partition(topic, 0) stale_threshold_seconds = None result_topic = "events-subscription-results" schedule_ttl = 60 producer = KafkaProducer( build_kafka_producer_configuration( SnubaTopic.SUBSCRIPTION_RESULTS_EVENTS)) with closing(producer): factory = CombinedSchedulerExecutorFactory( dataset, entity_names, num_partitions, max_concurrent_queries, total_concurrent_queries, producer, metrics, stale_threshold_seconds, result_topic, schedule_ttl, ) strategy = factory.create_with_partitions(commit, partitions) message = Message( partition, 4, Tick( 0, offsets=Interval(1, 3), timestamps=Interval(epoch, epoch + timedelta(seconds=60)), ), epoch, ) strategy.submit(message) # Wait for the query to be executed and the result message produced for i in range(10): time.sleep(0.5) strategy.poll() if commit.call_count == 1: break assert commit.call_count == 1 strategy.close() strategy.join()
def test_tick_consumer_non_monotonic() -> None: topic = Topic("messages") partition = Partition(topic, 0) clock = TestingClock(epoch.timestamp()) broker: DummyBroker[int] = DummyBroker(clock) broker.create_topic(topic, partitions=1) producer: DummyProducer[int] = DummyProducer(broker) inner_consumer: Consumer[int] = DummyConsumer(broker, "group") consumer = TickConsumer(inner_consumer) consumer.subscribe([topic]) producer.produce(partition, 0) clock.sleep(1) producer.produce(partition, 1) with assert_changes(inner_consumer.tell, {partition: 0}, {partition: 1}), assert_does_not_change( consumer.tell, {partition: 0}): assert consumer.poll() is None with assert_changes(inner_consumer.tell, {partition: 1}, {partition: 2}), assert_changes( consumer.tell, {partition: 0}, {partition: 1}): assert consumer.poll() == Message( partition, 0, Tick( offsets=Interval(0, 1), timestamps=Interval(epoch, epoch + timedelta(seconds=1)), ), epoch + timedelta(seconds=1), ) clock.sleep(-1) producer.produce(partition, 2) with assert_changes(inner_consumer.tell, {partition: 2}, {partition: 3}), assert_does_not_change( consumer.tell, {partition: 1}): assert consumer.poll() is None clock.sleep(2) producer.produce(partition, 3) with assert_changes(inner_consumer.tell, {partition: 3}, {partition: 4}), assert_changes( consumer.tell, {partition: 1}, {partition: 3}): assert consumer.poll() == Message( partition, 1, Tick( offsets=Interval(1, 3), timestamps=Interval(epoch + timedelta(seconds=1), epoch + timedelta(seconds=2)), ), epoch + timedelta(seconds=2), )
def test_tick_consumer_non_monotonic() -> None: clock = TestingClock() broker: Broker[KafkaPayload] = Broker(MemoryMessageStorage(), clock) epoch = datetime.fromtimestamp(clock.time()) topic = Topic("messages") followed_consumer_group = "events" partition = Partition(topic, 0) broker.create_topic(topic, partitions=1) producer = broker.get_producer() inner_consumer = broker.get_consumer("group") consumer = CommitLogTickConsumer(inner_consumer, followed_consumer_group) def _assignment_callback(offsets: Mapping[Partition, int]) -> None: assert inner_consumer.tell() == {partition: 0} assert consumer.tell() == {partition: 0} assignment_callback = mock.Mock(side_effect=_assignment_callback) consumer.subscribe([topic], on_assign=assignment_callback) producer.produce( partition, commit_codec.encode( Commit(followed_consumer_group, partition, 0, epoch)), ).result() clock.sleep(1) producer.produce( partition, commit_codec.encode( Commit(followed_consumer_group, partition, 1, epoch + timedelta(seconds=1))), ).result() with assert_changes(lambda: assignment_callback.called, False, True): assert consumer.poll() is None assert consumer.tell() == {partition: 1} with assert_changes(consumer.tell, {partition: 1}, {partition: 2}): assert consumer.poll() == Message( partition, 1, Tick( 0, offsets=Interval(0, 1), timestamps=Interval(epoch, epoch + timedelta(seconds=1)), ), epoch + timedelta(seconds=1), ) clock.sleep(-1) producer.produce( partition, commit_codec.encode( Commit(followed_consumer_group, partition, 2, epoch)), ).result() with assert_changes(consumer.tell, {partition: 2}, {partition: 3}): assert consumer.poll() is None clock.sleep(2) producer.produce( partition, commit_codec.encode( Commit(followed_consumer_group, partition, 3, epoch + timedelta(seconds=2))), ).result() with assert_changes(consumer.tell, {partition: 3}, {partition: 4}): assert consumer.poll() == Message( partition, 3, Tick( 0, offsets=Interval(1, 3), timestamps=Interval(epoch + timedelta(seconds=1), epoch + timedelta(seconds=2)), ), epoch + timedelta(seconds=2), )
def test_tick_consumer() -> None: topic = Topic("messages") broker: DummyBroker[int] = DummyBroker() broker.create_topic(topic, partitions=2) producer: DummyProducer[int] = DummyProducer(broker) for partition, payloads in enumerate([[0, 1, 2], [0]]): for payload in payloads: producer.produce(Partition(topic, partition), payload).result() inner_consumer: Consumer[int] = DummyConsumer(broker, "group") consumer = TickConsumer(inner_consumer) consumer.subscribe([topic]) assert consumer.tell() == { Partition(topic, 0): 0, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 0, Partition(topic, 1): 0, } # consume 0, 0 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 0, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 0, } # consume 0, 1 assert consumer.poll() == Message( Partition(topic, 0), 0, Tick(offsets=Interval(0, 1), timestamps=Interval(epoch, epoch)), epoch, ) assert consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } # consume 0, 2 assert consumer.poll() == Message( Partition(topic, 0), 1, Tick(offsets=Interval(1, 2), timestamps=Interval(epoch, epoch)), epoch, ) assert consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 3, Partition(topic, 1): 0, } # consume 1, 0 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 3, Partition(topic, 1): 1, } # consume no message assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 3, Partition(topic, 1): 1, } consumer.seek({Partition(topic, 0): 1}) assert consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 1, } # consume 0, 1 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 1, } # consume 0, 2 assert consumer.poll() == Message( Partition(topic, 0), 1, Tick(offsets=Interval(1, 2), timestamps=Interval(epoch, epoch)), epoch, ) assert consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 3, Partition(topic, 1): 1, } with pytest.raises(ConsumerError): consumer.seek({Partition(topic, -1): 0})
def test_subscription_worker(broker: Broker[SubscriptionTaskResult], ) -> None: result_topic = Topic("subscription-results") broker.create_topic(result_topic, partitions=1) frequency = timedelta(minutes=1) evaluations = 3 subscription = Subscription( SubscriptionIdentifier(PartitionId(0), uuid1()), SubscriptionData( project_id=1, conditions=[], aggregations=[["count()", "", "count"]], time_window=timedelta(minutes=60), resolution=frequency, ), ) store = DummySubscriptionDataStore() store.create(subscription.identifier.uuid, subscription.data) metrics = DummyMetricsBackend(strict=True) dataset = get_dataset("events") worker = SubscriptionWorker( dataset, ThreadPoolExecutor(), { 0: SubscriptionScheduler(store, PartitionId(0), timedelta(), metrics) }, broker.get_producer(), result_topic, metrics, ) now = datetime(2000, 1, 1) tick = Tick( offsets=Interval(0, 1), timestamps=Interval(now - (frequency * evaluations), now), ) result_futures = worker.process_message( Message(Partition(Topic("events"), 0), 0, tick, now)) assert result_futures is not None and len(result_futures) == evaluations # Publish the results. worker.flush_batch([result_futures]) # Check to make sure the results were published. # NOTE: This does not cover the ``SubscriptionTaskResultCodec``! consumer = broker.get_consumer("group") consumer.subscribe([result_topic]) for i in range(evaluations): timestamp = now - frequency * (evaluations - i) message = consumer.poll() assert message is not None assert message.partition.topic == result_topic task, future = result_futures[i] future_result = request, result = future.result() assert message.payload.task.timestamp == timestamp assert message.payload == SubscriptionTaskResult(task, future_result) # NOTE: The time series extension is folded back into the request # body, ideally this would reference the timeseries options in # isolation. assert (request.body.items() > { "from_date": (timestamp - subscription.data.time_window).isoformat(), "to_date": timestamp.isoformat(), }.items()) assert result == { "meta": [{ "name": "count", "type": "UInt64" }], "data": [{ "count": 0 }], }
def test_subscription_worker(subscription_data: SubscriptionData) -> None: broker: Broker[SubscriptionTaskResult] = Broker(MemoryMessageStorage(), TestingClock()) result_topic = Topic("subscription-results") broker.create_topic(result_topic, partitions=1) frequency = timedelta(minutes=1) evaluations = 3 subscription = Subscription( SubscriptionIdentifier(PartitionId(0), uuid1()), subscription_data, ) store = DummySubscriptionDataStore() store.create(subscription.identifier.uuid, subscription.data) metrics = DummyMetricsBackend(strict=True) dataset = get_dataset("events") worker = SubscriptionWorker( dataset, ThreadPoolExecutor(), { 0: SubscriptionScheduler(store, PartitionId(0), timedelta(), metrics) }, broker.get_producer(), result_topic, metrics, ) now = datetime(2000, 1, 1) tick = Tick( offsets=Interval(0, 1), timestamps=Interval(now - (frequency * evaluations), now), ) result_futures = worker.process_message( Message(Partition(Topic("events"), 0), 0, tick, now)) assert result_futures is not None and len(result_futures) == evaluations # Publish the results. worker.flush_batch([result_futures]) # Check to make sure the results were published. # NOTE: This does not cover the ``SubscriptionTaskResultCodec``! consumer = broker.get_consumer("group") consumer.subscribe([result_topic]) for i in range(evaluations): timestamp = now - frequency * (evaluations - i) message = consumer.poll() assert message is not None assert message.partition.topic == result_topic task, future = result_futures[i] future_result = request, result = future.result() assert message.payload.task.timestamp == timestamp assert message.payload == SubscriptionTaskResult(task, future_result) # NOTE: The time series extension is folded back into the request # body, ideally this would reference the timeseries options in # isolation. from_pattern = FunctionCall( String(ConditionFunctions.GTE), ( Column(None, String("timestamp")), Literal(Datetime(timestamp - subscription.data.time_window)), ), ) to_pattern = FunctionCall( String(ConditionFunctions.LT), (Column(None, String("timestamp")), Literal(Datetime(timestamp))), ) condition = request.query.get_condition() assert condition is not None conditions = get_first_level_and_conditions(condition) assert any([from_pattern.match(e) for e in conditions]) assert any([to_pattern.match(e) for e in conditions]) assert result == { "meta": [{ "name": "count", "type": "UInt64" }], "data": [{ "count": 0 }], }
def test_tick_consumer(time_shift: Optional[timedelta]) -> None: clock = TestingClock() broker: Broker[KafkaPayload] = Broker(MemoryMessageStorage(), clock) epoch = datetime.fromtimestamp(clock.time()) topic = Topic("messages") followed_consumer_group = "events" broker.create_topic(topic, partitions=1) producer = broker.get_producer() for partition, offsets in enumerate([[0, 1, 2], [0]]): for offset in offsets: payload = commit_codec.encode( Commit(followed_consumer_group, Partition(topic, partition), offset, epoch)) producer.produce(Partition(topic, 0), payload).result() inner_consumer = broker.get_consumer("group") consumer = CommitLogTickConsumer(inner_consumer, followed_consumer_group, time_shift=time_shift) if time_shift is None: time_shift = timedelta() def _assignment_callback(offsets: Mapping[Partition, int]) -> None: assert consumer.tell() == { Partition(topic, 0): 0, } assignment_callback = mock.Mock(side_effect=_assignment_callback) consumer.subscribe([topic], on_assign=assignment_callback) with assert_changes(lambda: assignment_callback.called, False, True): # consume 0, 0 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 1, } # consume 0, 1 assert consumer.poll() == Message( Partition(topic, 0), 1, Tick(0, offsets=Interval(0, 1), timestamps=Interval(epoch, epoch)).time_shift(time_shift), epoch, ) assert consumer.tell() == { Partition(topic, 0): 2, } # consume 0, 2 assert consumer.poll() == Message( Partition(topic, 0), 2, Tick(0, offsets=Interval(1, 2), timestamps=Interval(epoch, epoch)).time_shift(time_shift), epoch, ) assert consumer.tell() == { Partition(topic, 0): 3, } # consume 1, 0 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 4, } # consume no message assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 4, } consumer.seek({Partition(topic, 0): 1}) assert consumer.tell() == { Partition(topic, 0): 1, } # consume 0, 1 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 2, } # consume 0, 2 assert consumer.poll() == Message( Partition(topic, 0), 2, Tick(0, offsets=Interval(1, 2), timestamps=Interval(epoch, epoch)).time_shift(time_shift), epoch, ) assert consumer.tell() == { Partition(topic, 0): 3, } with pytest.raises(ConsumerError): consumer.seek({Partition(topic, -1): 0})
def test_tick_consumer_non_monotonic(clock: Clock, broker: Broker[int]) -> None: epoch = datetime.fromtimestamp(clock.time()) topic = Topic("messages") partition = Partition(topic, 0) broker.create_topic(topic, partitions=1) producer = broker.get_producer() inner_consumer = broker.get_consumer("group") consumer = TickConsumer(inner_consumer) def assignment_callback(offsets: Mapping[Partition, int]) -> None: assignment_callback.called = True assert inner_consumer.tell() == {partition: 0} assert consumer.tell() == {partition: 0} assignment_callback.called = False consumer.subscribe([topic], on_assign=assignment_callback) producer.produce(partition, 0) clock.sleep(1) producer.produce(partition, 1) with assert_changes(lambda: assignment_callback.called, False, True): assert consumer.poll() is None assert inner_consumer.tell() == {partition: 1} assert consumer.tell() == {partition: 0} with assert_changes(inner_consumer.tell, {partition: 1}, {partition: 2}), assert_changes( consumer.tell, {partition: 0}, {partition: 1}): assert consumer.poll() == Message( partition, 0, Tick( offsets=Interval(0, 1), timestamps=Interval(epoch, epoch + timedelta(seconds=1)), ), epoch + timedelta(seconds=1), ) clock.sleep(-1) producer.produce(partition, 2) with assert_changes(inner_consumer.tell, {partition: 2}, {partition: 3}), assert_does_not_change( consumer.tell, {partition: 1}): assert consumer.poll() is None clock.sleep(2) producer.produce(partition, 3) with assert_changes(inner_consumer.tell, {partition: 3}, {partition: 4}), assert_changes( consumer.tell, {partition: 1}, {partition: 3}): assert consumer.poll() == Message( partition, 1, Tick( offsets=Interval(1, 3), timestamps=Interval(epoch + timedelta(seconds=1), epoch + timedelta(seconds=2)), ), epoch + timedelta(seconds=2), )
def test_tick_consumer(clock: Clock, broker: Broker[int], time_shift: Optional[timedelta]) -> None: epoch = datetime.fromtimestamp(clock.time()) topic = Topic("messages") broker.create_topic(topic, partitions=2) producer = broker.get_producer() for partition, payloads in enumerate([[0, 1, 2], [0]]): for payload in payloads: producer.produce(Partition(topic, partition), payload).result() inner_consumer = broker.get_consumer("group") consumer = TickConsumer(inner_consumer, time_shift=time_shift) if time_shift is None: time_shift = timedelta() def assignment_callback(offsets: Mapping[Partition, int]) -> None: assignment_callback.called = True assert consumer.tell() == { Partition(topic, 0): 0, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 0, Partition(topic, 1): 0, } assignment_callback.called = False consumer.subscribe([topic], on_assign=assignment_callback) with assert_changes(lambda: assignment_callback.called, False, True): # consume 0, 0 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 0, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 0, } # consume 0, 1 assert consumer.poll() == Message( Partition(topic, 0), 0, Tick(offsets=Interval(0, 1), timestamps=Interval(epoch, epoch)).time_shift(time_shift), epoch, ) assert consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } # consume 0, 2 assert consumer.poll() == Message( Partition(topic, 0), 1, Tick(offsets=Interval(1, 2), timestamps=Interval(epoch, epoch)).time_shift(time_shift), epoch, ) assert consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 3, Partition(topic, 1): 0, } # consume 1, 0 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 3, Partition(topic, 1): 1, } # consume no message assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 3, Partition(topic, 1): 1, } consumer.seek({Partition(topic, 0): 1}) assert consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 1, } # consume 0, 1 assert consumer.poll() is None assert consumer.tell() == { Partition(topic, 0): 1, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 1, } # consume 0, 2 assert consumer.poll() == Message( Partition(topic, 0), 1, Tick(offsets=Interval(1, 2), timestamps=Interval(epoch, epoch)).time_shift(time_shift), epoch, ) assert consumer.tell() == { Partition(topic, 0): 2, Partition(topic, 1): 0, } assert inner_consumer.tell() == { Partition(topic, 0): 3, Partition(topic, 1): 1, } with pytest.raises(ConsumerError): consumer.seek({Partition(topic, -1): 0})
def test_interval_validation() -> None: Interval(1, 1) Interval(1, 10) with pytest.raises(AssertionError): Interval(10, 1)