def test_on_response():
    manager, _, dispatcher, _, scheduler = make_running_manager()
    manager._callback = mock.sentinel.callback

    # Set up the messages.
    response = types.StreamingPullResponse(
        received_messages=[
            types.ReceivedMessage(
                ack_id='fack',
                message=types.PubsubMessage(data=b'foo', message_id='1')
            ),
            types.ReceivedMessage(
                ack_id='back',
                message=types.PubsubMessage(data=b'bar', message_id='2')
            ),
        ],
    )

    # Actually run the method and prove that modack and schedule
    # are called in the expected way.
    manager._on_response(response)

    dispatcher.modify_ack_deadline.assert_called_once_with(
        [requests.ModAckRequest('fack', 10),
         requests.ModAckRequest('back', 10)]
    )

    schedule_calls = scheduler.schedule.mock_calls
    assert len(schedule_calls) == 2
    for call in schedule_calls:
        assert call[1][0] == mock.sentinel.callback
        assert isinstance(call[1][1], message.Message)
def test__on_response_no_leaser_overload():
    manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
    manager._callback = mock.sentinel.callback

    # Set up the messages.
    response = types.StreamingPullResponse(received_messages=[
        types.ReceivedMessage(ack_id="fack",
                              message=types.PubsubMessage(data=b"foo",
                                                          message_id="1")),
        types.ReceivedMessage(ack_id="back",
                              message=types.PubsubMessage(data=b"bar",
                                                          message_id="2")),
    ])

    # adjust message bookkeeping in leaser
    fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)

    # Actually run the method and prove that modack and schedule
    # are called in the expected way.
    manager._on_response(response)

    dispatcher.modify_ack_deadline.assert_called_once_with([
        requests.ModAckRequest("fack", 10),
        requests.ModAckRequest("back", 10)
    ])

    schedule_calls = scheduler.schedule.mock_calls
    assert len(schedule_calls) == 2
    for call in schedule_calls:
        assert call[1][0] == mock.sentinel.callback
        assert isinstance(call[1][1], message.Message)

    # the leaser load limit not hit, no messages had to be put on hold
    assert manager._messages_on_hold.size == 0
def test__on_response_modifies_ack_deadline():
    manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
    manager._callback = mock.sentinel.callback

    # Set up the messages.
    response = gapic_types.StreamingPullResponse(received_messages=[
        gapic_types.ReceivedMessage(
            ack_id="ack_1",
            message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
        ),
        gapic_types.ReceivedMessage(
            ack_id="ack_2",
            message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
        ),
    ])

    # adjust message bookkeeping in leaser
    fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=80)

    # Actually run the method and chack that correct MODACK value is used.
    with mock.patch.object(type(manager),
                           "ack_deadline",
                           new=mock.PropertyMock(return_value=18)):
        manager._on_response(response)

    dispatcher.modify_ack_deadline.assert_called_once_with([
        requests.ModAckRequest("ack_1", 18),
        requests.ModAckRequest("ack_2", 18)
    ])
Esempio n. 4
0
def test__on_response_with_leaser_overload():
    manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
    manager._callback = mock.sentinel.callback

    # Set up the messages.
    response = gapic_types.StreamingPullResponse(
        received_messages=[
            gapic_types.ReceivedMessage(
                ack_id="fack",
                message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
            ),
            gapic_types.ReceivedMessage(
                ack_id="back",
                message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
            ),
            gapic_types.ReceivedMessage(
                ack_id="zack",
                message=gapic_types.PubsubMessage(data=b"baz", message_id="3"),
            ),
        ]
    )

    # Adjust message bookkeeping in leaser. Pick 999 messages, which is just below
    # the default FlowControl.max_messages limit.
    fake_leaser_add(leaser, init_msg_count=999, assumed_msg_size=10)

    # Actually run the method and prove that modack and schedule
    # are called in the expected way.
    manager._on_response(response)

    # all messages should be added to the lease management and have their ACK
    # deadline extended, even those not dispatched to callbacks
    dispatcher.modify_ack_deadline.assert_called_once_with(
        [
            requests.ModAckRequest("fack", 10),
            requests.ModAckRequest("back", 10),
            requests.ModAckRequest("zack", 10),
        ]
    )

    # one message should be scheduled, the flow control limits allow for it
    schedule_calls = scheduler.schedule.mock_calls
    assert len(schedule_calls) == 1
    call_args = schedule_calls[0][1]
    assert call_args[0] == mock.sentinel.callback
    assert isinstance(call_args[1], message.Message)
    assert call_args[1].message_id == "1"

    # the rest of the messages should have been put on hold
    assert manager._messages_on_hold.size == 2
    while True:
        msg = manager._messages_on_hold.get()
        if msg is None:
            break
        else:
            assert isinstance(msg, message.Message)
            assert msg.message_id in ("2", "3")
Esempio n. 5
0
def test__on_response_with_leaser_overload():
    manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
    manager._callback = mock.sentinel.callback

    # Set up the messages.
    response = types.StreamingPullResponse(
        received_messages=[
            types.ReceivedMessage(
                ack_id="fack", message=types.PubsubMessage(data=b"foo", message_id="1")
            ),
            types.ReceivedMessage(
                ack_id="back", message=types.PubsubMessage(data=b"bar", message_id="2")
            ),
            types.ReceivedMessage(
                ack_id="zack", message=types.PubsubMessage(data=b"baz", message_id="3")
            ),
        ]
    )

    # Adjust message bookkeeping in leaser. Pick 99 messages, which is just below
    # the default FlowControl.max_messages limit.
    fake_leaser_add(leaser, init_msg_count=99, init_bytes=990)

    # Actually run the method and prove that modack and schedule
    # are called in the expected way.
    manager._on_response(response)

    dispatcher.modify_ack_deadline.assert_called_once_with(
        [
            requests.ModAckRequest("fack", 10),
            requests.ModAckRequest("back", 10),
            requests.ModAckRequest("zack", 10),
        ]
    )

    # one message should be scheduled, the leaser capacity allows for it
    schedule_calls = scheduler.schedule.mock_calls
    assert len(schedule_calls) == 1
    call_args = schedule_calls[0][1]
    assert call_args[0] == mock.sentinel.callback
    assert isinstance(call_args[1], message.Message)
    assert call_args[1].message_id == "1"

    # the rest of the messages should have been put on hold
    assert manager._messages_on_hold.qsize() == 2
    while True:
        try:
            msg = manager._messages_on_hold.get_nowait()
        except queue.Empty:
            break
        else:
            assert isinstance(msg, message.Message)
            assert msg.message_id in ("2", "3")
Esempio n. 6
0
def test_maintain_leases_outdated_items(sleep, time):
    manager = create_manager()
    make_sleep_mark_manager_as_inactive(sleep, manager)
    leaser_ = leaser.Leaser(manager)

    # Add these items at the beginning of the timeline
    time.return_value = 0
    leaser_.add([
        requests.LeaseRequest(ack_id='ack1', byte_size=50)])

    # Add another item at towards end of the timeline
    time.return_value = manager.flow_control.max_lease_duration - 1
    leaser_.add([
        requests.LeaseRequest(ack_id='ack2', byte_size=50)])

    # Now make sure time reports that we are at the end of our timeline.
    time.return_value = manager.flow_control.max_lease_duration + 1

    leaser_.maintain_leases()

    # Only ack2 should be renewed. ack1 should've been dropped
    manager.dispatcher.modify_ack_deadline.assert_called_once_with([
        requests.ModAckRequest(
            ack_id='ack2',
            seconds=10,
        )
    ])
    manager.dispatcher.drop.assert_called_once_with([
        requests.DropRequest(ack_id='ack1', byte_size=50)
    ])
    sleep.assert_called()
Esempio n. 7
0
def test_modify_ack_deadline():
    msg = create_message(b"foo", ack_id="bogus_ack_id")
    with mock.patch.object(msg._request_queue, "put") as put:
        msg.modify_ack_deadline(60)
        put.assert_called_once_with(
            requests.ModAckRequest(ack_id="bogus_ack_id", seconds=60))
        check_call_types(put, requests.ModAckRequest)
    def _on_response(self, response):
        """Process all received Pub/Sub messages.

        For each message, send a modified acknowledgment request to the
        server. This prevents expiration of the message due to buffering by
        gRPC or proxy/firewall. This makes the server and client expiration
        timer closer to each other thus preventing the message being
        redelivered multiple times.

        After the messages have all had their ack deadline updated, execute
        the callback for each message using the executor.
        """
        _LOGGER.debug('Scheduling callbacks for %s messages.',
                      len(response.received_messages))

        # Immediately modack the messages we received, as this tells the server
        # that we've received them.
        items = [
            requests.ModAckRequest(message.ack_id,
                                   self._ack_histogram.percentile(99))
            for message in response.received_messages
        ]
        self._dispatcher.modify_ack_deadline(items)
        for received_message in response.received_messages:
            message = google.cloud.pubsub_v1.subscriber.message.Message(
                received_message.message, received_message.ack_id,
                self._scheduler.queue)
            # TODO: Immediately lease instead of using the callback queue.
            self._scheduler.schedule(self._callback, message)
Esempio n. 9
0
def test_modify_ack_deadline_splitting_large_payload():
    manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager,
                                   instance=True)
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [
        # use realistic lengths for ACK IDs (max 176 bytes)
        requests.ModAckRequest(ack_id=str(i).zfill(176), seconds=60)
        for i in range(5001)
    ]
    dispatcher_.modify_ack_deadline(items)

    calls = manager.send.call_args_list
    assert len(calls) == 3

    all_ack_ids = {item.ack_id for item in items}
    sent_ack_ids = collections.Counter()

    for call in calls:
        message = call.args[0]
        assert message._pb.ByteSize() <= 524288  # server-side limit (2**19)
        sent_ack_ids.update(message.modify_deadline_ack_ids)

    assert set(
        sent_ack_ids) == all_ack_ids  # all messages should have been MODACK-ed
    assert sent_ack_ids.most_common(
        1)[0][1] == 1  # each message MODACK-ed exactly once
Esempio n. 10
0
    def _on_response(self, response):
        """Process all received Pub/Sub messages.

        For each message, send a modified acknowledgment request to the
        server. This prevents expiration of the message due to buffering by
        gRPC or proxy/firewall. This makes the server and client expiration
        timer closer to each other thus preventing the message being
        redelivered multiple times.

        After the messages have all had their ack deadline updated, execute
        the callback for each message using the executor.
        """
        if response is None:
            _LOGGER.debug(
                "Response callback invoked with None, likely due to a "
                "transport shutdown."
            )
            return

        # IMPORTANT: Circumvent the wrapper class and operate on the raw underlying
        # protobuf message to significantly gain on attribute access performance.
        received_messages = response._pb.received_messages

        _LOGGER.debug(
            "Processing %s received message(s), currently on hold %s (bytes %s).",
            len(received_messages),
            self._messages_on_hold.size,
            self._on_hold_bytes,
        )

        # Immediately (i.e. without waiting for the auto lease management)
        # modack the messages we received, as this tells the server that we've
        # received them.
        items = [
            requests.ModAckRequest(message.ack_id, self._ack_histogram.percentile(99))
            for message in received_messages
        ]
        self._dispatcher.modify_ack_deadline(items)

        with self._pause_resume_lock:
            for received_message in received_messages:
                message = google.cloud.pubsub_v1.subscriber.message.Message(
                    received_message.message,
                    received_message.ack_id,
                    received_message.delivery_attempt,
                    self._scheduler.queue,
                )
                self._messages_on_hold.put(message)
                self._on_hold_bytes += message.size
                req = requests.LeaseRequest(
                    ack_id=message.ack_id,
                    byte_size=message.size,
                    ordering_key=message.ordering_key,
                )
                self.leaser.add([req])

            self._maybe_release_messages()

        self.maybe_pause_consumer()
Esempio n. 11
0
    def _on_response(self, response):
        """Process all received Pub/Sub messages.

        For each message, send a modified acknowledgment request to the
        server. This prevents expiration of the message due to buffering by
        gRPC or proxy/firewall. This makes the server and client expiration
        timer closer to each other thus preventing the message being
        redelivered multiple times.

        After the messages have all had their ack deadline updated, execute
        the callback for each message using the executor.
        """
        _LOGGER.debug(
            "Processing %s received message(s), currenty on hold %s (bytes %s).",
            len(response.received_messages),
            self._messages_on_hold.qsize(),
            self._on_hold_bytes,
        )

        # Immediately (i.e. without waiting for the auto lease management)
        # modack the messages we received, as this tells the server that we've
        # received them.
        items = [
            requests.ModAckRequest(message.ack_id, self._ack_histogram.percentile(99))
            for message in response.received_messages
        ]
        self._dispatcher.modify_ack_deadline(items)

        invoke_callbacks_for = []

        for received_message in response.received_messages:
            message = google.cloud.pubsub_v1.subscriber.message.Message(
                received_message.message, received_message.ack_id, self._scheduler.queue
            )
            # Making a decision based on the load, and modifying the data that
            # affects the load -> needs a lock, as that state can be modified
            # by different threads.
            with self._pause_resume_lock:
                if self.load < _MAX_LOAD:
                    invoke_callbacks_for.append(message)
                else:
                    self._messages_on_hold.put(message)
                    self._on_hold_bytes += message.size

            req = requests.LeaseRequest(ack_id=message.ack_id, byte_size=message.size)
            self.leaser.add([req])
            self.maybe_pause_consumer()

        _LOGGER.debug(
            "Scheduling callbacks for %s new messages, new total on hold %s (bytes %s).",
            len(invoke_callbacks_for),
            self._messages_on_hold.qsize(),
            self._on_hold_bytes,
        )
        for msg in invoke_callbacks_for:
            self._scheduler.schedule(self._callback, msg)
Esempio n. 12
0
def test_maintain_leases_ack_ids():
    manager = create_manager()
    leaser_ = leaser.Leaser(manager)
    make_sleep_mark_manager_as_inactive(leaser_)
    leaser_.add([requests.LeaseRequest(ack_id="my ack id", byte_size=50)])

    leaser_.maintain_leases()

    manager.dispatcher.modify_ack_deadline.assert_called_once_with(
        [requests.ModAckRequest(ack_id="my ack id", seconds=10)])
Esempio n. 13
0
def test_maintain_leases_outdated_items(time):
    manager = create_manager()
    leaser_ = leaser.Leaser(manager)
    make_sleep_mark_event_as_done(leaser_)

    # Add and start expiry timer at the beginning of the timeline.
    time.return_value = 0
    leaser_.add(
        [requests.LeaseRequest(ack_id="ack1", byte_size=50, ordering_key="")])
    leaser_.start_lease_expiry_timer(["ack1"])

    # Add a message but don't start the lease expiry timer.
    leaser_.add(
        [requests.LeaseRequest(ack_id="ack2", byte_size=50, ordering_key="")])

    # Add a message and start expiry timer towards the end of the timeline.
    time.return_value = manager.flow_control.max_lease_duration - 1
    leaser_.add(
        [requests.LeaseRequest(ack_id="ack3", byte_size=50, ordering_key="")])
    leaser_.start_lease_expiry_timer(["ack3"])

    # Add a message towards the end of the timeline, but DO NOT start expiry
    # timer.
    leaser_.add(
        [requests.LeaseRequest(ack_id="ack4", byte_size=50, ordering_key="")])

    # Now make sure time reports that we are past the end of our timeline.
    time.return_value = manager.flow_control.max_lease_duration + 1

    leaser_.maintain_leases()

    # ack2, ack3, and ack4 should be renewed. ack1 should've been dropped
    modacks = manager.dispatcher.modify_ack_deadline.call_args.args[0]
    expected = [
        requests.ModAckRequest(ack_id="ack2", seconds=10),
        requests.ModAckRequest(ack_id="ack3", seconds=10),
        requests.ModAckRequest(ack_id="ack4", seconds=10),
    ]
    # Use sorting to allow for ordering variance.
    assert sorted(modacks) == sorted(expected)

    manager.dispatcher.drop.assert_called_once_with(
        [requests.DropRequest(ack_id="ack1", byte_size=50, ordering_key="")])
Esempio n. 14
0
def test_modify_ack_deadline():
    msg = create_message(b'foo', ack_id='bogus_ack_id')
    with mock.patch.object(msg._request_queue, 'put') as put:
        msg.modify_ack_deadline(60)
        put.assert_called_once_with(
            requests.ModAckRequest(
                ack_id='bogus_ack_id',
                seconds=60,
            ))
        check_call_types(put, requests.ModAckRequest)
Esempio n. 15
0
def test_modify_ack_deadline():
    manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager,
                                   instance=True)
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [requests.ModAckRequest(ack_id="ack_id_string", seconds=60)]
    dispatcher_.modify_ack_deadline(items)

    manager.send.assert_called_once_with(
        types.StreamingPullRequest(modify_deadline_ack_ids=["ack_id_string"],
                                   modify_deadline_seconds=[60]))
    def nack(self, items):
        """Explicitly deny receipt of messages.

        Args:
            items(Sequence[NackRequest]): The items to deny.
        """
        self.modify_ack_deadline([
            requests.ModAckRequest(ack_id=item.ack_id, seconds=0)
            for item in items
        ])
        self.drop([requests.DropRequest(*item) for item in items])
Esempio n. 17
0
    def _on_response(self, response):
        """Process all received Pub/Sub messages.

        For each message, send a modified acknowledgment request to the
        server. This prevents expiration of the message due to buffering by
        gRPC or proxy/firewall. This makes the server and client expiration
        timer closer to each other thus preventing the message being
        redelivered multiple times.

        After the messages have all had their ack deadline updated, execute
        the callback for each message using the executor.
        """
        _LOGGER.debug(
            "Processing %s received message(s), currenty on hold %s.",
            len(response.received_messages),
            self._messages_on_hold.qsize(),
        )

        # Immediately modack the messages we received, as this tells the server
        # that we've received them.
        items = [
            requests.ModAckRequest(message.ack_id, self._ack_histogram.percentile(99))
            for message in response.received_messages
        ]
        self._dispatcher.modify_ack_deadline(items)

        invoke_callbacks_for = []

        for received_message in response.received_messages:
            message = google.cloud.pubsub_v1.subscriber.message.Message(
                received_message.message,
                received_message.ack_id,
                self._scheduler.queue,
                autolease=False,
            )
            if self.load < 1.0:
                req = requests.LeaseRequest(
                    ack_id=message.ack_id, byte_size=message.size
                )
                self.leaser.add([req])
                invoke_callbacks_for.append(message)
                self.maybe_pause_consumer()
            else:
                self._messages_on_hold.put(message)

        _LOGGER.debug(
            "Scheduling callbacks for %s new messages, new total on hold %s.",
            len(invoke_callbacks_for),
            self._messages_on_hold.qsize(),
        )
        for msg in invoke_callbacks_for:
            self._scheduler.schedule(self._callback, msg)
Esempio n. 18
0
def test_maintain_leases_ack_ids(sleep):
    subscriber_ = create_subscriber()
    make_sleep_mark_subscriber_as_inactive(sleep, subscriber_)
    leaser_ = leaser.Leaser(subscriber_)
    leaser_.add([requests.LeaseRequest(ack_id='my ack id', byte_size=50)])

    leaser_.maintain_leases()

    subscriber_.modify_ack_deadline.assert_called_once_with(
        [requests.ModAckRequest(
            ack_id='my ack id',
            seconds=10,
        )])
    sleep.assert_called()
Esempio n. 19
0
    def modify_ack_deadline(self, seconds):
        """Resets the deadline for acknowledgement.

        New deadline will be the given value of seconds from now.

        The default implementation handles this for you; you should not need
        to manually deal with setting ack deadlines. The exception case is
        if you are implementing your own custom subclass of
        :class:`~.pubsub_v1.subcriber._consumer.Consumer`.

        Args:
            seconds (int): The number of seconds to set the lease deadline
                to. This should be between 0 and 600. Due to network latency,
                values below 10 are advised against.
        """
        self._request_queue.put(
            requests.ModAckRequest(ack_id=self._ack_id, seconds=seconds))
Esempio n. 20
0
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import helper_threads
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager

import mock
from six.moves import queue
import pytest


@pytest.mark.parametrize('item,method_name', [
    (requests.AckRequest(0, 0, 0), 'ack'),
    (requests.DropRequest(0, 0), 'drop'),
    (requests.LeaseRequest(0, 0), 'lease'),
    (requests.ModAckRequest(0, 0), 'modify_ack_deadline'),
    (requests.NackRequest(0, 0), 'nack')
])
def test_dispatch_callback(item, method_name):
    manager = mock.create_autospec(
        streaming_pull_manager.StreamingPullManager, instance=True)
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [item]

    with mock.patch.object(dispatcher_, method_name) as method:
        dispatcher_.dispatch_callback(items)

    method.assert_called_once_with([item])

Esempio n. 21
0
from google.cloud.pubsub_v1.subscriber._protocol import helper_threads
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
from google.pubsub_v1 import types as gapic_types

import mock
import pytest


@pytest.mark.parametrize(
    "item,method_name",
    [
        (requests.AckRequest(0, 0, 0, ""), "ack"),
        (requests.DropRequest(0, 0, ""), "drop"),
        (requests.LeaseRequest(0, 0, ""), "lease"),
        (requests.ModAckRequest(0, 0), "modify_ack_deadline"),
        (requests.NackRequest(0, 0, ""), "nack"),
    ],
)
def test_dispatch_callback(item, method_name):
    manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager,
                                   instance=True)
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [item]

    with mock.patch.object(dispatcher_, method_name) as method:
        dispatcher_.dispatch_callback(items)

    method.assert_called_once_with([item])
Esempio n. 22
0
    def maintain_leases(self):
        """Maintain all of the leases being managed.

        This method modifies the ack deadline for all of the managed
        ack IDs, then waits for most of that time (but with jitter), and
        repeats.
        """
        while self._manager.is_active and not self._stop_event.is_set():
            # Determine the appropriate duration for the lease. This is
            # based off of how long previous messages have taken to ack, with
            # a sensible default and within the ranges allowed by Pub/Sub.
            deadline = self._manager.ack_deadline
            _LOGGER.debug("The current deadline value is %d seconds.", deadline)

            # Make a copy of the leased messages. This is needed because it's
            # possible for another thread to modify the dictionary while
            # we're iterating over it.
            leased_messages = copy.copy(self._leased_messages)

            # Drop any leases that are beyond the max lease time. This ensures
            # that in the event of a badly behaving actor, we can drop messages
            # and allow the Pub/Sub server to resend them.
            cutoff = time.time() - self._manager.flow_control.max_lease_duration
            to_drop = [
                requests.DropRequest(ack_id, item.size, item.ordering_key)
                for ack_id, item in leased_messages.items()
                if item.sent_time < cutoff
            ]

            if to_drop:
                _LOGGER.warning(
                    "Dropping %s items because they were leased too long.", len(to_drop)
                )
                self._manager.dispatcher.drop(to_drop)

            # Remove dropped items from our copy of the leased messages (they
            # have already been removed from the real one by
            # self._manager.drop(), which calls self.remove()).
            for item in to_drop:
                leased_messages.pop(item.ack_id)

            # Create a streaming pull request.
            # We do not actually call `modify_ack_deadline` over and over
            # because it is more efficient to make a single request.
            ack_ids = leased_messages.keys()
            if ack_ids:
                _LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))

                # NOTE: This may not work as expected if ``consumer.active``
                #       has changed since we checked it. An implementation
                #       without any sort of race condition would require a
                #       way for ``send_request`` to fail when the consumer
                #       is inactive.
                self._manager.dispatcher.modify_ack_deadline(
                    [requests.ModAckRequest(ack_id, deadline) for ack_id in ack_ids]
                )

            # Now wait an appropriate period of time and do this again.
            #
            # We determine the appropriate period of time based on a random
            # period between 0 seconds and 90% of the lease. This use of
            # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
            # where there are many clients.
            snooze = random.uniform(0.0, deadline * 0.9)
            _LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
            self._stop_event.wait(timeout=snooze)

        _LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)
def test__on_response_with_ordering_keys():
    manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
    manager._callback = mock.sentinel.callback

    # Set up the messages.
    response = types.StreamingPullResponse(received_messages=[
        types.ReceivedMessage(
            ack_id="fack",
            message=types.PubsubMessage(
                data=b"foo", message_id="1", ordering_key=""),
        ),
        types.ReceivedMessage(
            ack_id="back",
            message=types.PubsubMessage(
                data=b"bar", message_id="2", ordering_key="key1"),
        ),
        types.ReceivedMessage(
            ack_id="zack",
            message=types.PubsubMessage(
                data=b"baz", message_id="3", ordering_key="key1"),
        ),
    ])

    # Make leaser with zero initial messages, so we don't test lease management
    # behavior.
    fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)

    # Actually run the method and prove that modack and schedule are called in
    # the expected way.
    manager._on_response(response)

    # All messages should be added to the lease management and have their ACK
    # deadline extended, even those not dispatched to callbacks.
    dispatcher.modify_ack_deadline.assert_called_once_with([
        requests.ModAckRequest("fack", 10),
        requests.ModAckRequest("back", 10),
        requests.ModAckRequest("zack", 10),
    ])

    # The first two messages should be scheduled, The third should be put on
    # hold because it's blocked by the completion of the second, which has the
    # same ordering key.
    schedule_calls = scheduler.schedule.mock_calls
    assert len(schedule_calls) == 2
    call_args = schedule_calls[0][1]
    assert call_args[0] == mock.sentinel.callback
    assert isinstance(call_args[1], message.Message)
    assert call_args[1].message_id == "1"

    call_args = schedule_calls[1][1]
    assert call_args[0] == mock.sentinel.callback
    assert isinstance(call_args[1], message.Message)
    assert call_args[1].message_id == "2"

    # Message 3 should have been put on hold.
    assert manager._messages_on_hold.size == 1
    # No messages available because message 2 (with "key1") has not completed yet.
    assert manager._messages_on_hold.get() is None

    # Complete message 2 (with "key1").
    manager.activate_ordering_keys(["key1"])

    # Completing message 2 should release message 3.
    schedule_calls = scheduler.schedule.mock_calls
    assert len(schedule_calls) == 3
    call_args = schedule_calls[2][1]
    assert call_args[0] == mock.sentinel.callback
    assert isinstance(call_args[1], message.Message)
    assert call_args[1].message_id == "3"

    # No messages available in the queue.
    assert manager._messages_on_hold.get() is None