def publish(self, message): """Publish a single message. Add the given message to this object; this will cause it to be published once the batch either has enough messages or a sufficient period of time has elapsed. If the batch is full or the commit is already in progress, the method does not do anything. This method is called by :meth:`~.PublisherClient.publish`. Args: message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message. Returns: Optional[~google.api_core.future.Future]: An object conforming to the :class:`~concurrent.futures.Future` interface or :data:`None`. If :data:`None` is returned, that signals that the batch cannot accept a message. Raises: pubsub_v1.publisher.exceptions.MessageTooLargeError: If publishing the ``message`` would exceed the max size limit on the backend. """ # Coerce the type, just in case. if not isinstance(message, gapic_types.PubsubMessage): # For performance reasons, the message should be constructed by directly # using the raw protobuf class, and only then wrapping it into the # higher-level PubsubMessage class. vanilla_pb = _raw_proto_pubbsub_message(**message) message = gapic_types.PubsubMessage.wrap(vanilla_pb) future = None with self._state_lock: assert (self._status != base.BatchStatus.ERROR ), "Publish after stop() or publish error." if self.status != base.BatchStatus.ACCEPTING_MESSAGES: return size_increase = gapic_types.PublishRequest( messages=[message])._pb.ByteSize() if (self._base_request_size + size_increase) > _SERVER_PUBLISH_MAX_BYTES: err_msg = ( "The message being published would produce too large a publish " "request that would exceed the maximum allowed size on the " "backend ({} bytes).".format(_SERVER_PUBLISH_MAX_BYTES)) raise exceptions.MessageTooLargeError(err_msg) new_size = self._size + size_increase new_count = len(self._messages) + 1 size_limit = min(self.settings.max_bytes, _SERVER_PUBLISH_MAX_BYTES) overflow = new_size > size_limit or new_count >= self.settings.max_messages if not self._messages or not overflow: # Store the actual message in the batch's message queue. self._messages.append(message) self._size = new_size # Track the future on this batch (so that the result of the # future can be set). future = futures.Future() self._futures.append(future) # Try to commit, but it must be **without** the lock held, since # ``commit()`` will try to obtain the lock. if self._commit_when_full and overflow: self.commit() return future
def publish(self, message): """Publish a single message. Add the given message to this object; this will cause it to be published once the batch either has enough messages or a sufficient period of time has elapsed. If the batch is full or the commit is already in progress, the method does not do anything. This method is called by :meth:`~.PublisherClient.publish`. Args: message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message. Returns: Optional[~google.api_core.future.Future]: An object conforming to the :class:`~concurrent.futures.Future` interface or :data:`None`. If :data:`None` is returned, that signals that the batch cannot accept a message. Raises: pubsub_v1.publisher.exceptions.MessageTooLargeError: If publishing the ``message`` would exceed the max size limit on the backend. """ # Coerce the type, just in case. if not isinstance(message, types.PubsubMessage): message = types.PubsubMessage(**message) future = None with self._state_lock: if not self.will_accept(message): return future size_increase = types.PublishRequest(messages=[message]).ByteSize() if (self._base_request_size + size_increase) > _SERVER_PUBLISH_MAX_BYTES: err_msg = ( "The message being published would produce too large a publish " "request that would exceed the maximum allowed size on the " "backend ({} bytes).".format(_SERVER_PUBLISH_MAX_BYTES)) raise exceptions.MessageTooLargeError(err_msg) new_size = self._size + size_increase new_count = len(self._messages) + 1 size_limit = min(self.settings.max_bytes, _SERVER_PUBLISH_MAX_BYTES) overflow = new_size > size_limit or new_count >= self.settings.max_messages if not self._messages or not overflow: # Store the actual message in the batch's message queue. self._messages.append(message) self._size = new_size # Track the future on this batch (so that the result of the # future can be set). future = futures.Future(completed=threading.Event()) self._futures.append(future) # Try to commit, but it must be **without** the lock held, since # ``commit()`` will try to obtain the lock. if overflow: self.commit() return future