Beispiel #1
0
    def _commit(self):
        """Actually publish all of the messages on the active batch.

        This moves the batch out from being the active batch to an in progress
        batch on the publisher, and then the batch is discarded upon
        completion.

        .. note::

            This method blocks. The :meth:`commit` method is the non-blocking
            version, which calls this one.
        """
        with self._state_lock:
            if self._status in _CAN_COMMIT:
                self._status = base.BatchStatus.IN_PROGRESS
            else:
                # If, in the intervening period between when this method was
                # called and now, the batch started to be committed, or
                # completed a commit, then no-op at this point.
                _LOGGER.debug(
                    "Batch is already in progress or has been cancelled, "
                    "exiting commit")
                return

        # Once in the IN_PROGRESS state, no other thread can publish additional
        # messages or initiate a commit (those operations become a no-op), thus
        # it is safe to release the state lock here. Releasing the lock avoids
        # blocking other threads in case api.publish() below takes a long time
        # to complete.
        # https://github.com/googleapis/google-cloud-python/issues/8036

        # Sanity check: If there are no messages, no-op.
        if not self._messages:
            _LOGGER.debug("No messages to publish, exiting commit")
            self._status = base.BatchStatus.SUCCESS
            return

        # Begin the request to publish these messages.
        # Log how long the underlying request takes.
        start = time.time()

        batch_transport_succeeded = True
        try:
            # Performs retries for errors defined by the retry configuration.
            response = self._client.api.publish(
                topic=self._topic,
                messages=self._messages,
                retry=self._commit_retry,
                timeout=self._commit_timeout,
            )
        except google.api_core.exceptions.GoogleAPIError as exc:
            # We failed to publish, even after retries, so set the exception on
            # all futures and exit.
            self._status = base.BatchStatus.ERROR

            for future in self._futures:
                future.set_exception(exc)

            batch_transport_succeeded = False
            if self._batch_done_callback is not None:
                # Failed to publish batch.
                self._batch_done_callback(batch_transport_succeeded)

            _LOGGER.exception("Failed to publish %s messages.",
                              len(self._futures))
            return

        end = time.time()
        _LOGGER.debug("gRPC Publish took %s seconds.", end - start)

        if len(response.message_ids) == len(self._futures):
            # Iterate over the futures on the queue and return the response
            # IDs. We are trusting that there is a 1:1 mapping, and raise
            # an exception if not.
            self._status = base.BatchStatus.SUCCESS
            for message_id, future in zip(response.message_ids, self._futures):
                future.set_result(message_id)
        else:
            # Sanity check: If the number of message IDs is not equal to
            # the number of futures I have, then something went wrong.
            self._status = base.BatchStatus.ERROR
            exception = exceptions.PublishError(
                "Some messages were not successfully published.")

            for future in self._futures:
                future.set_exception(exception)

            # Unknown error -> batch failed to be correctly transported/
            batch_transport_succeeded = False

            _LOGGER.error(
                "Only %s of %s messages were published.",
                len(response.message_ids),
                len(self._futures),
            )

        if self._batch_done_callback is not None:
            self._batch_done_callback(batch_transport_succeeded)
    def _commit(self):
        """Actually publish all of the messages on the active batch.

        This moves the batch out from being the active batch to an in progress
        batch on the publisher, and then the batch is discarded upon
        completion.

        .. note::

            This method blocks. The :meth:`commit` method is the non-blocking
            version, which calls this one.
        """
        with self._state_lock:
            if self._status in _CAN_COMMIT:
                self._status = base.BatchStatus.IN_PROGRESS
            else:
                # If, in the intervening period between when this method was
                # called and now, the batch started to be committed, or
                # completed a commit, then no-op at this point.
                _LOGGER.debug("Batch is already in progress, exiting commit")
                return

            # Sanity check: If there are no messages, no-op.
            if not self._messages:
                _LOGGER.debug("No messages to publish, exiting commit")
                self._status = base.BatchStatus.SUCCESS
                return

            # Begin the request to publish these messages.
            # Log how long the underlying request takes.
            start = time.time()

            try:
                response = self._client.api.publish(self._topic, self._messages)
            except google.api_core.exceptions.GoogleAPIError as exc:
                # We failed to publish, set the exception on all futures and
                # exit.
                self._status = base.BatchStatus.ERROR

                for future in self._futures:
                    future.set_exception(exc)

                _LOGGER.exception("Failed to publish %s messages.", len(self._futures))
                return

            end = time.time()
            _LOGGER.debug("gRPC Publish took %s seconds.", end - start)

            if len(response.message_ids) == len(self._futures):
                # Iterate over the futures on the queue and return the response
                # IDs. We are trusting that there is a 1:1 mapping, and raise
                # an exception if not.
                self._status = base.BatchStatus.SUCCESS
                zip_iter = six.moves.zip(response.message_ids, self._futures)
                for message_id, future in zip_iter:
                    future.set_result(message_id)
            else:
                # Sanity check: If the number of message IDs is not equal to
                # the number of futures I have, then something went wrong.
                self._status = base.BatchStatus.ERROR
                exception = exceptions.PublishError(
                    "Some messages were not successfully published."
                )

                for future in self._futures:
                    future.set_exception(exception)

                _LOGGER.error(
                    "Only %s of %s messages were published.",
                    len(response.message_ids),
                    len(self._futures),
                )