def test_drop_and_resume(): manager = make_manager( flow_control=types.FlowControl(max_messages=10, max_bytes=1000)) manager._leaser = leaser.Leaser(manager) manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True) manager._consumer.is_paused = True # Add several messages until we're over the load threshold. manager.leaser.add([ requests.LeaseRequest(ack_id="one", byte_size=750), requests.LeaseRequest(ack_id="two", byte_size=250), ]) assert manager.load == 1.0 # Trying to resume now should have no effect as we're over the threshold. manager.maybe_resume_consumer() manager._consumer.resume.assert_not_called() # Drop the 200 byte message, which should put us under the resume # threshold. manager.leaser.remove([requests.DropRequest(ack_id="two", byte_size=250)]) manager.maybe_resume_consumer() manager._consumer.resume.assert_called_once()
def test_start_lease_expiry_timer_unknown_ack_id(): manager = create_manager() leaser_ = leaser.Leaser(manager) # Nothing happens when this method is called with an ack-id that hasn't been # added yet. leaser_.start_lease_expiry_timer(["ack1"])
def open(self, callback): """Begin consuming messages. Args: callback (Callable[None, google.cloud.pubsub_v1.message.Messages]): A callback that will be called for each message received on the stream. """ if self.is_active: raise ValueError('This manager is already open.') if self._closed: raise ValueError( 'This manager has been closed and can not be re-used.') self._callback = functools.partial(_wrap_callback_errors, callback) # Start the thread to pass the requests. self._dispatcher = dispatcher.Dispatcher(self, self._scheduler.queue) self._dispatcher.start() # Start consuming messages. self._rpc = bidi.ResumableBidiRpc( start_rpc=self._client.api.streaming_pull, initial_request=self._get_initial_request, should_recover=self._should_recover) self._rpc.add_done_callback(self._on_rpc_done) self._consumer = bidi.BackgroundConsumer( self._rpc, self._on_response) self._consumer.start() # Start the lease maintainer thread. self._leaser = leaser.Leaser(self) self._leaser.start()
def test_maintain_leases_outdated_items(sleep, time): subscriber_ = create_subscriber() make_sleep_mark_subscriber_as_inactive(sleep, subscriber_) leaser_ = leaser.Leaser(subscriber_) # Add these items at the beginning of the timeline time.return_value = 0 leaser_.add([requests.LeaseRequest(ack_id='ack1', byte_size=50)]) # Add another item at towards end of the timeline time.return_value = subscriber_.flow_control.max_lease_duration - 1 leaser_.add([requests.LeaseRequest(ack_id='ack2', byte_size=50)]) # Now make sure time reports that we are at the end of our timeline. time.return_value = subscriber_.flow_control.max_lease_duration + 1 leaser_.maintain_leases() # Only ack2 should be renewed. ack1 should've been dropped subscriber_.modify_ack_deadline.assert_called_once_with( [requests.ModAckRequest( ack_id='ack2', seconds=10, )]) subscriber_.drop.assert_called_once_with( [requests.DropRequest(ack_id='ack1', byte_size=50)]) sleep.assert_called()
def test_lease_load_and_pause(): manager = make_manager( flow_control=types.FlowControl(max_messages=10, max_bytes=1000)) manager._leaser = leaser.Leaser(manager) manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True) manager._consumer.is_paused = False # This should mean that our messages count is at 10%, and our bytes # are at 15%; load should return the higher (0.15), and shouldn't cause # the consumer to pause. manager.leaser.add([requests.LeaseRequest(ack_id="one", byte_size=150)]) assert manager.load == 0.15 manager.maybe_pause_consumer() manager._consumer.pause.assert_not_called() # After this message is added, the messages should be higher at 20% # (versus 16% for bytes). manager.leaser.add([requests.LeaseRequest(ack_id="two", byte_size=10)]) assert manager.load == 0.2 # Returning a number above 100% is fine, and it should cause this to pause. manager.leaser.add([requests.LeaseRequest(ack_id="three", byte_size=1000)]) assert manager.load == 1.16 manager.maybe_pause_consumer() manager._consumer.pause.assert_called_once()
def open(self, callback, on_callback_error): """Begin consuming messages. Args: callback (Callable[None, google.cloud.pubsub_v1.message.Message]): A callback that will be called for each message received on the stream. on_callback_error (Callable[Exception]): A callable that will be called if an exception is raised in the provided `callback`. """ if self.is_active: raise ValueError("This manager is already open.") if self._closed: raise ValueError("This manager has been closed and can not be re-used.") self._callback = functools.partial( _wrap_callback_errors, callback, on_callback_error ) # Create the RPC stream_ack_deadline_seconds = self.ack_histogram.percentile(99) get_initial_request = functools.partial( self._get_initial_request, stream_ack_deadline_seconds ) self._rpc = bidi.ResumableBidiRpc( start_rpc=self._client.api.streaming_pull, initial_request=get_initial_request, should_recover=self._should_recover, should_terminate=self._should_terminate, throttle_reopen=True, ) self._rpc.add_done_callback(self._on_rpc_done) _LOGGER.debug( "Creating a stream, default ACK deadline set to {} seconds.".format( stream_ack_deadline_seconds ) ) # Create references to threads self._dispatcher = dispatcher.Dispatcher(self, self._scheduler.queue) self._consumer = bidi.BackgroundConsumer(self._rpc, self._on_response) self._leaser = leaser.Leaser(self) self._heartbeater = heartbeater.Heartbeater(self) # Start the thread to pass the requests. self._dispatcher.start() # Start consuming messages. self._consumer.start() # Start the lease maintainer thread. self._leaser.start() # Start the stream heartbeater thread. self._heartbeater.start()
def test_remove_not_managed(caplog): caplog.set_level(logging.DEBUG) leaser_ = leaser.Leaser(mock.sentinel.subscriber) leaser_.remove([requests.DropRequest(ack_id='ack1', byte_size=50)]) assert 'not managed' in caplog.text
def test_maintain_leases_no_ack_ids(): manager = create_manager() leaser_ = leaser.Leaser(manager) make_sleep_mark_manager_as_inactive(leaser_) leaser_.maintain_leases() manager.dispatcher.modify_ack_deadline.assert_not_called()
def test_remove_not_managed(caplog): caplog.set_level(logging.DEBUG) leaser_ = leaser.Leaser(mock.sentinel.manager) leaser_.remove([requests.DropRequest(ack_id="ack1", byte_size=50)]) assert "not managed" in caplog.text
def test_maintain_leases_no_ack_ids(sleep): subscriber_ = create_subscriber() make_sleep_mark_subscriber_as_inactive(sleep, subscriber_) leaser_ = leaser.Leaser(subscriber_) leaser_.maintain_leases() subscriber_.modify_ack_deadline.assert_not_called() sleep.assert_called()
def test_add_already_managed(caplog): caplog.set_level(logging.DEBUG) leaser_ = leaser.Leaser(mock.sentinel.manager) leaser_.add([requests.LeaseRequest(ack_id="ack1", byte_size=50)]) leaser_.add([requests.LeaseRequest(ack_id="ack1", byte_size=50)]) assert "already lease managed" in caplog.text
def test_add_already_managed(caplog): caplog.set_level(logging.DEBUG) leaser_ = leaser.Leaser(mock.sentinel.subscriber) leaser_.add([requests.LeaseRequest(ack_id='ack1', byte_size=50)]) leaser_.add([requests.LeaseRequest(ack_id='ack1', byte_size=50)]) assert 'already lease managed' in caplog.text
def test_start_already_started(thread): subscriber_ = mock.create_autospec(subscriber.Subscriber, instance=True) leaser_ = leaser.Leaser(subscriber_) leaser_._thread = mock.sentinel.thread with pytest.raises(ValueError): leaser_.start() thread.assert_not_called()
def test_maintain_leases_stopped(caplog): caplog.set_level(logging.INFO) manager = create_manager() leaser_ = leaser.Leaser(manager) leaser_.stop() leaser_.maintain_leases() assert "exiting" in caplog.text
def test_remove_negative_bytes(caplog): caplog.set_level(logging.DEBUG) leaser_ = leaser.Leaser(mock.sentinel.manager) leaser_.add([requests.LeaseRequest(ack_id="ack1", byte_size=50)]) leaser_.remove([requests.DropRequest(ack_id="ack1", byte_size=75)]) assert leaser_.bytes == 0 assert "unexpectedly negative" in caplog.text
def test_maintain_leases_inactive(caplog): caplog.set_level(logging.INFO) subscriber_ = create_subscriber() subscriber_.is_active = False leaser_ = leaser.Leaser(subscriber_) leaser_.maintain_leases() assert 'exiting' in caplog.text
def test_maintain_leases_inactive(caplog): caplog.set_level(logging.INFO) manager = create_manager() manager.is_active = False leaser_ = leaser.Leaser(manager) leaser_.maintain_leases() assert "exiting" in caplog.text
def test_maintain_leases_ack_ids(): manager = create_manager() leaser_ = leaser.Leaser(manager) make_sleep_mark_manager_as_inactive(leaser_) leaser_.add([requests.LeaseRequest(ack_id="my ack id", byte_size=50)]) leaser_.maintain_leases() manager.dispatcher.modify_ack_deadline.assert_called_once_with( [requests.ModAckRequest(ack_id="my ack id", seconds=10)])
def test_maintain_leases_stopped(caplog): caplog.set_level(logging.INFO) subscriber_ = create_subscriber() leaser_ = leaser.Leaser(subscriber_) leaser_.stop() leaser_.maintain_leases() assert 'exiting' in caplog.text
def test_start_already_started(thread): manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager, instance=True) leaser_ = leaser.Leaser(manager) leaser_._thread = mock.sentinel.thread with pytest.raises(ValueError): leaser_.start() thread.assert_not_called()
def test_stop(): subscriber_ = mock.create_autospec(subscriber.Subscriber, instance=True) leaser_ = leaser.Leaser(subscriber_) thread = mock.create_autospec(threading.Thread, instance=True) leaser_._thread = thread leaser_.stop() assert leaser_._stop_event.is_set() thread.join.assert_called_once() assert leaser_._thread is None
def test_stop(): manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager, instance=True) leaser_ = leaser.Leaser(manager) thread = mock.create_autospec(threading.Thread, instance=True) leaser_._thread = thread leaser_.stop() assert leaser_._stop_event.is_set() thread.join.assert_called_once() assert leaser_._thread is None
def test_start(thread): subscriber_ = mock.create_autospec(subscriber.Subscriber, instance=True) leaser_ = leaser.Leaser(subscriber_) leaser_.start() thread.assert_called_once_with(name=leaser._LEASE_WORKER_NAME, target=leaser_.maintain_leases) thread.return_value.start.assert_called_once() assert leaser_._thread is not None
def test_start(thread): manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager, instance=True) leaser_ = leaser.Leaser(manager) leaser_.start() thread.assert_called_once_with(name=leaser._LEASE_WORKER_NAME, target=leaser_.maintain_leases) thread.return_value.start.assert_called_once() assert leaser_._thread is not None
def test_maintain_leases_ack_ids(sleep): subscriber_ = create_subscriber() make_sleep_mark_subscriber_as_inactive(sleep, subscriber_) leaser_ = leaser.Leaser(subscriber_) leaser_.add([requests.LeaseRequest(ack_id='my ack id', byte_size=50)]) leaser_.maintain_leases() subscriber_.modify_ack_deadline.assert_called_once_with( [requests.ModAckRequest( ack_id='my ack id', seconds=10, )]) sleep.assert_called()
def test_add_and_remove(): leaser_ = leaser.Leaser(mock.sentinel.subscriber) leaser_.add([requests.LeaseRequest(ack_id='ack1', byte_size=50)]) leaser_.add([requests.LeaseRequest(ack_id='ack2', byte_size=25)]) assert leaser_.message_count == 2 assert set(leaser_.ack_ids) == set(['ack1', 'ack2']) assert leaser_.bytes == 75 leaser_.remove([requests.DropRequest(ack_id='ack1', byte_size=50)]) assert leaser_.message_count == 1 assert set(leaser_.ack_ids) == set(['ack2']) assert leaser_.bytes == 25
def test_add_and_remove(): leaser_ = leaser.Leaser(mock.sentinel.manager) leaser_.add([requests.LeaseRequest(ack_id="ack1", byte_size=50)]) leaser_.add([requests.LeaseRequest(ack_id="ack2", byte_size=25)]) assert leaser_.message_count == 2 assert set(leaser_.ack_ids) == set(["ack1", "ack2"]) assert leaser_.bytes == 75 leaser_.remove([requests.DropRequest(ack_id="ack1", byte_size=50)]) assert leaser_.message_count == 1 assert set(leaser_.ack_ids) == set(["ack2"]) assert leaser_.bytes == 25
def test_maintain_leases_inactive_manager(caplog): caplog.set_level(logging.INFO) manager = create_manager() manager.is_active = False leaser_ = leaser.Leaser(manager) make_sleep_mark_event_as_done(leaser_) leaser_.add([ requests.LeaseRequest(ack_id="my_ack_ID", byte_size=42, ordering_key="") ]) leaser_.maintain_leases() # Leases should still be maintained even if the manager is inactive. manager.dispatcher.modify_ack_deadline.assert_called() assert "exiting" in caplog.text
def test_maintain_leases_outdated_items(time): manager = create_manager() leaser_ = leaser.Leaser(manager) make_sleep_mark_event_as_done(leaser_) # Add and start expiry timer at the beginning of the timeline. time.return_value = 0 leaser_.add( [requests.LeaseRequest(ack_id="ack1", byte_size=50, ordering_key="")]) leaser_.start_lease_expiry_timer(["ack1"]) # Add a message but don't start the lease expiry timer. leaser_.add( [requests.LeaseRequest(ack_id="ack2", byte_size=50, ordering_key="")]) # Add a message and start expiry timer towards the end of the timeline. time.return_value = manager.flow_control.max_lease_duration - 1 leaser_.add( [requests.LeaseRequest(ack_id="ack3", byte_size=50, ordering_key="")]) leaser_.start_lease_expiry_timer(["ack3"]) # Add a message towards the end of the timeline, but DO NOT start expiry # timer. leaser_.add( [requests.LeaseRequest(ack_id="ack4", byte_size=50, ordering_key="")]) # Now make sure time reports that we are past the end of our timeline. time.return_value = manager.flow_control.max_lease_duration + 1 leaser_.maintain_leases() # ack2, ack3, and ack4 should be renewed. ack1 should've been dropped modacks = manager.dispatcher.modify_ack_deadline.call_args.args[0] expected = [ requests.ModAckRequest(ack_id="ack2", seconds=10), requests.ModAckRequest(ack_id="ack3", seconds=10), requests.ModAckRequest(ack_id="ack4", seconds=10), ] # Use sorting to allow for ordering variance. assert sorted(modacks) == sorted(expected) manager.dispatcher.drop.assert_called_once_with( [requests.DropRequest(ack_id="ack1", byte_size=50, ordering_key="")])
def test_maintain_leases_outdated_items(time): manager = create_manager() leaser_ = leaser.Leaser(manager) make_sleep_mark_manager_as_inactive(leaser_) # Add these items at the beginning of the timeline time.return_value = 0 leaser_.add([requests.LeaseRequest(ack_id="ack1", byte_size=50)]) # Add another item at towards end of the timeline time.return_value = manager.flow_control.max_lease_duration - 1 leaser_.add([requests.LeaseRequest(ack_id="ack2", byte_size=50)]) # Now make sure time reports that we are at the end of our timeline. time.return_value = manager.flow_control.max_lease_duration + 1 leaser_.maintain_leases() # Only ack2 should be renewed. ack1 should've been dropped manager.dispatcher.modify_ack_deadline.assert_called_once_with( [requests.ModAckRequest(ack_id="ack2", seconds=10)]) manager.dispatcher.drop.assert_called_once_with( [requests.DropRequest(ack_id="ack1", byte_size=50)])