def test_drop():
    msg = create_message(b"foo", ack_id="bogus_ack_id")
    with mock.patch.object(msg._request_queue, "put") as put:
        msg.drop()
        put.assert_called_once_with(
            requests.DropRequest(ack_id="bogus_ack_id", byte_size=30))
        check_call_types(put, requests.DropRequest)
def test_drop_ordered_messages():
    manager = mock.create_autospec(
        streaming_pull_manager.StreamingPullManager, instance=True
    )
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [
        requests.DropRequest(ack_id="ack_id_string", byte_size=10, ordering_key=""),
        requests.DropRequest(ack_id="ack_id_string", byte_size=10, ordering_key="key1"),
        requests.DropRequest(ack_id="ack_id_string", byte_size=10, ordering_key="key2"),
    ]
    dispatcher_.drop(items)

    manager.leaser.remove.assert_called_once_with(items)
    assert list(manager.activate_ordering_keys.call_args.args[0]) == ["key1", "key2"]
    manager.maybe_resume_consumer.assert_called_once()
Esempio n. 3
0
def test_maintain_leases_outdated_items(sleep, time):
    manager = create_manager()
    make_sleep_mark_manager_as_inactive(sleep, manager)
    leaser_ = leaser.Leaser(manager)

    # Add these items at the beginning of the timeline
    time.return_value = 0
    leaser_.add([
        requests.LeaseRequest(ack_id='ack1', byte_size=50)])

    # Add another item at towards end of the timeline
    time.return_value = manager.flow_control.max_lease_duration - 1
    leaser_.add([
        requests.LeaseRequest(ack_id='ack2', byte_size=50)])

    # Now make sure time reports that we are at the end of our timeline.
    time.return_value = manager.flow_control.max_lease_duration + 1

    leaser_.maintain_leases()

    # Only ack2 should be renewed. ack1 should've been dropped
    manager.dispatcher.modify_ack_deadline.assert_called_once_with([
        requests.ModAckRequest(
            ack_id='ack2',
            seconds=10,
        )
    ])
    manager.dispatcher.drop.assert_called_once_with([
        requests.DropRequest(ack_id='ack1', byte_size=50)
    ])
    sleep.assert_called()
def test_drop_and_resume():
    manager = make_manager(
        flow_control=types.FlowControl(max_messages=10, max_bytes=1000))
    manager._leaser = leaser.Leaser(manager)
    manager._consumer = mock.create_autospec(
        bidi.BackgroundConsumer, instance=True)
    manager._consumer.is_paused = True

    # Add several messages until we're over the load threshold.
    manager.leaser.add([
        requests.LeaseRequest(ack_id='one', byte_size=750),
        requests.LeaseRequest(ack_id='two', byte_size=250)])

    assert manager.load == 1.0

    # Trying to resume now should have no effect as we're over the threshold.
    manager.maybe_resume_consumer()
    manager._consumer.resume.assert_not_called()

    # Drop the 200 byte message, which should put us under the resume
    # threshold.
    manager.leaser.remove([
        requests.DropRequest(ack_id='two', byte_size=250)])
    manager.maybe_resume_consumer()
    manager._consumer.resume.assert_called_once()
Esempio n. 5
0
def test_remove_not_managed(caplog):
    caplog.set_level(logging.DEBUG)

    leaser_ = leaser.Leaser(mock.sentinel.subscriber)

    leaser_.remove([requests.DropRequest(ack_id='ack1', byte_size=50)])

    assert 'not managed' in caplog.text
Esempio n. 6
0
def test_remove_not_managed(caplog):
    caplog.set_level(logging.DEBUG)

    leaser_ = leaser.Leaser(mock.sentinel.manager)

    leaser_.remove([requests.DropRequest(ack_id="ack1", byte_size=50)])

    assert "not managed" in caplog.text
Esempio n. 7
0
def test_drop():
    manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager,
                                   instance=True)
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [requests.DropRequest(ack_id="ack_id_string", byte_size=10)]
    dispatcher_.drop(items)

    manager.leaser.remove.assert_called_once_with(items)
    manager.maybe_resume_consumer.assert_called_once()
Esempio n. 8
0
def test_remove_negative_bytes(caplog):
    caplog.set_level(logging.DEBUG)

    leaser_ = leaser.Leaser(mock.sentinel.manager)

    leaser_.add([requests.LeaseRequest(ack_id="ack1", byte_size=50)])
    leaser_.remove([requests.DropRequest(ack_id="ack1", byte_size=75)])

    assert leaser_.bytes == 0
    assert "unexpectedly negative" in caplog.text
Esempio n. 9
0
def test_drop():
    msg = create_message(b'foo', ack_id='bogus_ack_id')
    with mock.patch.object(msg._request_queue, 'put') as put:
        msg.drop()
        put.assert_called_once_with(
            requests.DropRequest(
                ack_id='bogus_ack_id',
                byte_size=30,
            ))
        check_call_types(put, requests.DropRequest)
    def nack(self, items):
        """Explicitly deny receipt of messages.

        Args:
            items(Sequence[NackRequest]): The items to deny.
        """
        self.modify_ack_deadline([
            requests.ModAckRequest(ack_id=item.ack_id, seconds=0)
            for item in items
        ])
        self.drop([requests.DropRequest(*item) for item in items])
Esempio n. 11
0
def test_add_and_remove():
    leaser_ = leaser.Leaser(mock.sentinel.subscriber)

    leaser_.add([requests.LeaseRequest(ack_id='ack1', byte_size=50)])
    leaser_.add([requests.LeaseRequest(ack_id='ack2', byte_size=25)])

    assert leaser_.message_count == 2
    assert set(leaser_.ack_ids) == set(['ack1', 'ack2'])
    assert leaser_.bytes == 75

    leaser_.remove([requests.DropRequest(ack_id='ack1', byte_size=50)])

    assert leaser_.message_count == 1
    assert set(leaser_.ack_ids) == set(['ack2'])
    assert leaser_.bytes == 25
Esempio n. 12
0
def test_add_and_remove():
    leaser_ = leaser.Leaser(mock.sentinel.manager)

    leaser_.add([requests.LeaseRequest(ack_id="ack1", byte_size=50)])
    leaser_.add([requests.LeaseRequest(ack_id="ack2", byte_size=25)])

    assert leaser_.message_count == 2
    assert set(leaser_.ack_ids) == set(["ack1", "ack2"])
    assert leaser_.bytes == 75

    leaser_.remove([requests.DropRequest(ack_id="ack1", byte_size=50)])

    assert leaser_.message_count == 1
    assert set(leaser_.ack_ids) == set(["ack2"])
    assert leaser_.bytes == 25
Esempio n. 13
0
    def drop(self):
        """Release the message from lease management.

        This informs the policy to no longer hold on to the lease for this
        message. Pub/Sub will re-deliver the message if it is not acknowledged
        before the existing lease expires.

        .. warning::
            For most use cases, the only reason to drop a message from
            lease management is on :meth:`ack` or :meth:`nack`; these methods
            both call this one. You probably do not want to call this method
            directly.
        """
        self._request_queue.put(
            requests.DropRequest(ack_id=self._ack_id, byte_size=self.size))
Esempio n. 14
0
def test_maintain_leases_outdated_items(time):
    manager = create_manager()
    leaser_ = leaser.Leaser(manager)
    make_sleep_mark_event_as_done(leaser_)

    # Add and start expiry timer at the beginning of the timeline.
    time.return_value = 0
    leaser_.add(
        [requests.LeaseRequest(ack_id="ack1", byte_size=50, ordering_key="")])
    leaser_.start_lease_expiry_timer(["ack1"])

    # Add a message but don't start the lease expiry timer.
    leaser_.add(
        [requests.LeaseRequest(ack_id="ack2", byte_size=50, ordering_key="")])

    # Add a message and start expiry timer towards the end of the timeline.
    time.return_value = manager.flow_control.max_lease_duration - 1
    leaser_.add(
        [requests.LeaseRequest(ack_id="ack3", byte_size=50, ordering_key="")])
    leaser_.start_lease_expiry_timer(["ack3"])

    # Add a message towards the end of the timeline, but DO NOT start expiry
    # timer.
    leaser_.add(
        [requests.LeaseRequest(ack_id="ack4", byte_size=50, ordering_key="")])

    # Now make sure time reports that we are past the end of our timeline.
    time.return_value = manager.flow_control.max_lease_duration + 1

    leaser_.maintain_leases()

    # ack2, ack3, and ack4 should be renewed. ack1 should've been dropped
    modacks = manager.dispatcher.modify_ack_deadline.call_args.args[0]
    expected = [
        requests.ModAckRequest(ack_id="ack2", seconds=10),
        requests.ModAckRequest(ack_id="ack3", seconds=10),
        requests.ModAckRequest(ack_id="ack4", seconds=10),
    ]
    # Use sorting to allow for ordering variance.
    assert sorted(modacks) == sorted(expected)

    manager.dispatcher.drop.assert_called_once_with(
        [requests.DropRequest(ack_id="ack1", byte_size=50, ordering_key="")])
Esempio n. 15
0
import threading

from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import helper_threads
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager

import mock
from six.moves import queue
import pytest


@pytest.mark.parametrize('item,method_name', [
    (requests.AckRequest(0, 0, 0), 'ack'),
    (requests.DropRequest(0, 0), 'drop'),
    (requests.LeaseRequest(0, 0), 'lease'),
    (requests.ModAckRequest(0, 0), 'modify_ack_deadline'),
    (requests.NackRequest(0, 0), 'nack')
])
def test_dispatch_callback(item, method_name):
    manager = mock.create_autospec(
        streaming_pull_manager.StreamingPullManager, instance=True)
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [item]

    with mock.patch.object(dispatcher_, method_name) as method:
        dispatcher_.dispatch_callback(items)

    method.assert_called_once_with([item])
Esempio n. 16
0
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import helper_threads
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
from google.pubsub_v1 import types as gapic_types

import mock
import pytest


@pytest.mark.parametrize(
    "item,method_name",
    [
        (requests.AckRequest(0, 0, 0, ""), "ack"),
        (requests.DropRequest(0, 0, ""), "drop"),
        (requests.LeaseRequest(0, 0, ""), "lease"),
        (requests.ModAckRequest(0, 0), "modify_ack_deadline"),
        (requests.NackRequest(0, 0, ""), "nack"),
    ],
)
def test_dispatch_callback(item, method_name):
    manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager,
                                   instance=True)
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [item]

    with mock.patch.object(dispatcher_, method_name) as method:
        dispatcher_.dispatch_callback(items)
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import helper_threads
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
from google.pubsub_v1 import types as gapic_types

import mock
import pytest


@pytest.mark.parametrize(
    "item,method_name",
    [
        (requests.AckRequest("0", 0, 0, ""), "ack"),
        (requests.DropRequest("0", 0, ""), "drop"),
        (requests.LeaseRequest("0", 0, ""), "lease"),
        (requests.ModAckRequest("0", 0), "modify_ack_deadline"),
        (requests.NackRequest("0", 0, ""), "nack"),
    ],
)
def test_dispatch_callback_active_manager(item, method_name):
    manager = mock.create_autospec(
        streaming_pull_manager.StreamingPullManager, instance=True
    )
    dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)

    items = [item]

    with mock.patch.object(dispatcher_, method_name) as method:
        dispatcher_.dispatch_callback(items)
Esempio n. 18
0
    def maintain_leases(self):
        """Maintain all of the leases being managed.

        This method modifies the ack deadline for all of the managed
        ack IDs, then waits for most of that time (but with jitter), and
        repeats.
        """
        while self._manager.is_active and not self._stop_event.is_set():
            # Determine the appropriate duration for the lease. This is
            # based off of how long previous messages have taken to ack, with
            # a sensible default and within the ranges allowed by Pub/Sub.
            deadline = self._manager.ack_deadline
            _LOGGER.debug("The current deadline value is %d seconds.", deadline)

            # Make a copy of the leased messages. This is needed because it's
            # possible for another thread to modify the dictionary while
            # we're iterating over it.
            leased_messages = copy.copy(self._leased_messages)

            # Drop any leases that are beyond the max lease time. This ensures
            # that in the event of a badly behaving actor, we can drop messages
            # and allow the Pub/Sub server to resend them.
            cutoff = time.time() - self._manager.flow_control.max_lease_duration
            to_drop = [
                requests.DropRequest(ack_id, item.size, item.ordering_key)
                for ack_id, item in leased_messages.items()
                if item.sent_time < cutoff
            ]

            if to_drop:
                _LOGGER.warning(
                    "Dropping %s items because they were leased too long.", len(to_drop)
                )
                self._manager.dispatcher.drop(to_drop)

            # Remove dropped items from our copy of the leased messages (they
            # have already been removed from the real one by
            # self._manager.drop(), which calls self.remove()).
            for item in to_drop:
                leased_messages.pop(item.ack_id)

            # Create a streaming pull request.
            # We do not actually call `modify_ack_deadline` over and over
            # because it is more efficient to make a single request.
            ack_ids = leased_messages.keys()
            if ack_ids:
                _LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))

                # NOTE: This may not work as expected if ``consumer.active``
                #       has changed since we checked it. An implementation
                #       without any sort of race condition would require a
                #       way for ``send_request`` to fail when the consumer
                #       is inactive.
                self._manager.dispatcher.modify_ack_deadline(
                    [requests.ModAckRequest(ack_id, deadline) for ack_id in ack_ids]
                )

            # Now wait an appropriate period of time and do this again.
            #
            # We determine the appropriate period of time based on a random
            # period between 0 seconds and 90% of the lease. This use of
            # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
            # where there are many clients.
            snooze = random.uniform(0.0, deadline * 0.9)
            _LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
            self._stop_event.wait(timeout=snooze)

        _LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)