def test_publish_updating_batch_size():
    batch = create_batch(topic="topic_foo")
    messages = (
        gapic_types.PubsubMessage(data=b"foobarbaz"),
        gapic_types.PubsubMessage(data=b"spameggs"),
        gapic_types.PubsubMessage(data=b"1335020400"),
    )

    # Publish each of the messages, which should save them to the batch.
    futures = [batch.publish(message) for message in messages]

    # There should be three messages on the batch, and three futures.
    assert len(batch.messages) == 3
    assert batch._futures == futures

    # The size should have been incremented by the sum of the size
    # contributions of each message to the PublishRequest.
    base_request_size = gapic_types.PublishRequest(
        topic="topic_foo")._pb.ByteSize()
    expected_request_size = base_request_size + sum(
        gapic_types.PublishRequest(messages=[msg])._pb.ByteSize()
        for msg in messages)

    assert batch.size == expected_request_size
    assert batch.size > 0  # I do not always trust protobuf.
Example #2
0
    def __init__(
        self,
        client,
        topic,
        settings,
        batch_done_callback=None,
        commit_when_full=True,
        commit_retry=gapic_v1.method.DEFAULT,
        commit_timeout: gapic_types.TimeoutType = gapic_v1.method.DEFAULT,
    ):
        self._client = client
        self._topic = topic
        self._settings = settings
        self._batch_done_callback = batch_done_callback
        self._commit_when_full = commit_when_full

        self._state_lock = threading.Lock()
        # These members are all communicated between threads; ensure that
        # any writes to them use the "state lock" to remain atomic.
        # _futures list should remain unchanged after batch
        # status changed from ACCEPTING_MESSAGES to any other
        # in order to avoid race conditions
        self._futures = []
        self._messages = []
        self._status = base.BatchStatus.ACCEPTING_MESSAGES

        # The initial size is not zero, we need to account for the size overhead
        # of the PublishRequest message itself.
        self._base_request_size = gapic_types.PublishRequest(
            topic=topic)._pb.ByteSize()
        self._size = self._base_request_size

        self._commit_retry = commit_retry
        self._commit_timeout = commit_timeout
def test_publish_single_message_size_exceeds_server_size_limit():
    batch = create_batch(
        topic="topic_foo",
        max_messages=1000,
        max_bytes=1000 * 1000,  # way larger than (mocked) server side limit
    )

    big_message = gapic_types.PubsubMessage(data=b"x" * 984)

    request_size = gapic_types.PublishRequest(topic="topic_foo",
                                              messages=[big_message
                                                        ])._pb.ByteSize()
    assert request_size == 1001  # sanity check, just above the (mocked) server limit

    with pytest.raises(exceptions.MessageTooLargeError):
        batch.publish(big_message)
def test_publish_total_messages_size_exceeds_server_size_limit():
    batch = create_batch(topic="topic_foo", max_messages=10, max_bytes=1500)

    messages = (
        gapic_types.PubsubMessage(data=b"x" * 500),
        gapic_types.PubsubMessage(data=b"x" * 600),
    )

    # Sanity check - request size is still below BatchSettings.max_bytes,
    # but it exceeds the server-side size limit.
    request_size = gapic_types.PublishRequest(
        topic="topic_foo", messages=messages)._pb.ByteSize()
    assert 1000 < request_size < 1500

    with mock.patch.object(batch, "commit") as fake_commit:
        batch.publish(messages[0])
        batch.publish(messages[1])

    # The server side limit should kick in and cause a commit.
    fake_commit.assert_called_once()
Example #5
0
    def publish(self, message):
        """Publish a single message.

        Add the given message to this object; this will cause it to be
        published once the batch either has enough messages or a sufficient
        period of time has elapsed. If the batch is full or the commit is
        already in progress, the method does not do anything.

        This method is called by :meth:`~.PublisherClient.publish`.

        Args:
            message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message.

        Returns:
            Optional[~google.api_core.future.Future]: An object conforming to
            the :class:`~concurrent.futures.Future` interface or :data:`None`.
            If :data:`None` is returned, that signals that the batch cannot
            accept a message.

        Raises:
            pubsub_v1.publisher.exceptions.MessageTooLargeError: If publishing
                the ``message`` would exceed the max size limit on the backend.
        """

        # Coerce the type, just in case.
        if not isinstance(message, gapic_types.PubsubMessage):
            # For performance reasons, the message should be constructed by directly
            # using the raw protobuf class, and only then wrapping it into the
            # higher-level PubsubMessage class.
            vanilla_pb = _raw_proto_pubbsub_message(**message)
            message = gapic_types.PubsubMessage.wrap(vanilla_pb)

        future = None

        with self._state_lock:
            assert (self._status != base.BatchStatus.ERROR
                    ), "Publish after stop() or publish error."

            if self.status != base.BatchStatus.ACCEPTING_MESSAGES:
                return

            size_increase = gapic_types.PublishRequest(
                messages=[message])._pb.ByteSize()

            if (self._base_request_size +
                    size_increase) > _SERVER_PUBLISH_MAX_BYTES:
                err_msg = (
                    "The message being published would produce too large a publish "
                    "request that would exceed the maximum allowed size on the "
                    "backend ({} bytes).".format(_SERVER_PUBLISH_MAX_BYTES))
                raise exceptions.MessageTooLargeError(err_msg)

            new_size = self._size + size_increase
            new_count = len(self._messages) + 1

            size_limit = min(self.settings.max_bytes,
                             _SERVER_PUBLISH_MAX_BYTES)
            overflow = new_size > size_limit or new_count >= self.settings.max_messages

            if not self._messages or not overflow:

                # Store the actual message in the batch's message queue.
                self._messages.append(message)
                self._size = new_size

                # Track the future on this batch (so that the result of the
                # future can be set).
                future = futures.Future()
                self._futures.append(future)

        # Try to commit, but it must be **without** the lock held, since
        # ``commit()`` will try to obtain the lock.
        if self._commit_when_full and overflow:
            self.commit()

        return future