Exemplo n.º 1
0
def connstr_receivers(live_eventhub):
    connection_str = live_eventhub["connection_str"]
    partitions = [str(i) for i in range(PARTITION_COUNT)]
    receivers = []
    for p in partitions:
        uri = "sb://{}/{}".format(live_eventhub['hostname'], live_eventhub['event_hub'])
        sas_auth = SASTokenAuth.from_shared_access_key(
            uri, live_eventhub['key_name'], live_eventhub['access_key'])

        source = "amqps://{}/{}/ConsumerGroups/{}/Partitions/{}".format(
            live_eventhub['hostname'],
            live_eventhub['event_hub'],
            live_eventhub['consumer_group'],
            p)
        receiver = ReceiveClient(source, auth=sas_auth, debug=False, timeout=0, prefetch=500)
        receiver.open()
        receivers.append(receiver)
    yield connection_str, receivers
    for r in receivers:
        r.close()
class Receiver(object):
    """
    Implements a Receiver.
    """
    timeout = 0
    _epoch = b'com.microsoft:epoch'

    def __init__(self, client, source, offset=None, prefetch=300, epoch=None, keep_alive=None, auto_reconnect=True):
        """
        Instantiate a receiver.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: str
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param epoch: An optional epoch value.
        :type epoch: int
        """
        self.running = False
        self.client = client
        self.source = source
        self.offset = offset
        self.prefetch = prefetch
        self.epoch = epoch
        self.keep_alive = keep_alive
        self.auto_reconnect = auto_reconnect
        self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler)
        self.reconnect_backoff = 1
        self.properties = None
        self.redirected = None
        self.error = None
        partition = self.source.split('/')[-1]
        self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition)
        source = Source(self.source)
        if self.offset is not None:
            source.set_filter(self.offset.selector())
        if epoch:
            self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))}
        self._handler = ReceiveClient(
            source,
            auth=self.client.get_auth(),
            debug=self.client.debug,
            prefetch=self.prefetch,
            link_properties=self.properties,
            timeout=self.timeout,
            error_policy=self.retry_policy,
            keep_alive_interval=self.keep_alive,
            client_name=self.name,
            properties=self.client.create_properties())

    def open(self):
        """
        Open the Receiver using the supplied conneciton.
        If the handler has previously been redirected, the redirect
        context will be used to create a new handler before opening it.

        :param connection: The underlying client shared connection.
        :type: connection: ~uamqp.connection.Connection
        """
        # pylint: disable=protected-access
        self.running = True
        if self.redirected:
            self.source = self.redirected.address
            source = Source(self.source)
            if self.offset is not None:
                source.set_filter(self.offset.selector())
            alt_creds = {
                "username": self.client._auth_config.get("iot_username"),
                "password":self.client._auth_config.get("iot_password")}
            self._handler = ReceiveClient(
                source,
                auth=self.client.get_auth(**alt_creds),
                debug=self.client.debug,
                prefetch=self.prefetch,
                link_properties=self.properties,
                timeout=self.timeout,
                error_policy=self.retry_policy,
                keep_alive_interval=self.keep_alive,
                client_name=self.name,
                properties=self.client.create_properties())
        self._handler.open()
        while not self._handler.client_ready():
            time.sleep(0.05)

    def _reconnect(self):  # pylint: disable=too-many-statements
        # pylint: disable=protected-access
        alt_creds = {
            "username": self.client._auth_config.get("iot_username"),
            "password": self.client._auth_config.get("iot_password")}
        self._handler.close()
        source = Source(self.source)
        if self.offset is not None:
            source.set_filter(self.offset.selector())
        self._handler = ReceiveClient(
            source,
            auth=self.client.get_auth(**alt_creds),
            debug=self.client.debug,
            prefetch=self.prefetch,
            link_properties=self.properties,
            timeout=self.timeout,
            error_policy=self.retry_policy,
            keep_alive_interval=self.keep_alive,
            client_name=self.name,
            properties=self.client.create_properties())
        try:
            self._handler.open()
            while not self._handler.client_ready():
                time.sleep(0.05)
            return True
        except errors.TokenExpired as shutdown:
            log.info("Receiver disconnected due to token expiry. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
            if shutdown.action.retry and self.auto_reconnect:
                log.info("Receiver detached. Attempting reconnect.")
                return False
            log.info("Receiver detached. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except errors.MessageHandlerError as shutdown:
            if self.auto_reconnect:
                log.info("Receiver detached. Attempting reconnect.")
                return False
            log.info("Receiver detached. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except errors.AMQPConnectionError as shutdown:
            if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect:
                log.info("Receiver couldn't authenticate. Attempting reconnect.")
                return False
            log.info("Receiver connection error (%r). Shutting down.", shutdown)
            error = EventHubError(str(shutdown))
            self.close(exception=error)
            raise error
        except Exception as e:
            log.info("Unexpected error occurred (%r). Shutting down.", e)
            error = EventHubError("Receiver reconnect failed: {}".format(e))
            self.close(exception=error)
            raise error

    def reconnect(self):
        """If the Receiver was disconnected from the service with
        a retryable error - attempt to reconnect."""
        while not self._reconnect():
            time.sleep(self.reconnect_backoff)

    def get_handler_state(self):
        """
        Get the state of the underlying handler with regards to start
        up processes.

        :rtype: ~uamqp.constants.MessageReceiverState
        """
        # pylint: disable=protected-access
        return self._handler._message_receiver.get_state()

    def has_started(self):
        """
        Whether the handler has completed all start up processes such as
        establishing the connection, session, link and authentication, and
        is not ready to process messages.
        **This function is now deprecated and will be removed in v2.0+.**

        :rtype: bool
        """
        # pylint: disable=protected-access
        timeout = False
        auth_in_progress = False
        if self._handler._connection.cbs:
            timeout, auth_in_progress = self._handler._auth.handle_token()
        if timeout:
            raise EventHubError("Authorization timeout.")
        if auth_in_progress:
            return False
        if not self._handler._client_ready():
            return False
        return True

    def close(self, exception=None):
        """
        Close down the handler. If the handler has already closed,
        this will be a no op. An optional exception can be passed in to
        indicate that the handler was shutdown due to error.

        :param exception: An optional exception if the handler is closing
         due to an error.
        :type exception: Exception
        """
        self.running = False
        if self.error:
            return
        if isinstance(exception, errors.LinkRedirect):
            self.redirected = exception
        elif isinstance(exception, EventHubError):
            self.error = exception
        elif exception:
            self.error = EventHubError(str(exception))
        else:
            self.error = EventHubError("This receive handler is now closed.")
        self._handler.close()

    @property
    def queue_size(self):
        """
        The current size of the unprocessed Event queue.

        :rtype: int
        """
        # pylint: disable=protected-access
        if self._handler._received_messages:
            return self._handler._received_messages.qsize()
        return 0

    def receive(self, max_batch_size=None, timeout=None):
        """
        Receive events from the EventHub.

        :param max_batch_size: Receive a batch of events. Batch size will
         be up to the maximum specified, but will return as soon as service
         returns no new events. If combined with a timeout and no events are
         retrieve before the time, the result will be empty. If no batch
         size is supplied, the prefetch size will be the maximum.
        :type max_batch_size: int
        :rtype: list[~azure.eventhub.common.EventData]
        """
        if self.error:
            raise self.error
        if not self.running:
            raise ValueError("Unable to receive until client has been started.")
        data_batch = []
        try:
            timeout_ms = 1000 * timeout if timeout else 0
            message_batch = self._handler.receive_message_batch(
                max_batch_size=max_batch_size,
                timeout=timeout_ms)
            for message in message_batch:
                event_data = EventData(message=message)
                self.offset = event_data.offset
                data_batch.append(event_data)
            return data_batch
        except (errors.TokenExpired, errors.AuthenticationException):
            log.info("Receiver disconnected due to token error. Attempting reconnect.")
            self.reconnect()
            return data_batch
        except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
            if shutdown.action.retry and self.auto_reconnect:
                log.info("Receiver detached. Attempting reconnect.")
                self.reconnect()
                return data_batch
            log.info("Receiver detached. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except errors.MessageHandlerError as shutdown:
            if self.auto_reconnect:
                log.info("Receiver detached. Attempting reconnect.")
                self.reconnect()
                return data_batch
            log.info("Receiver detached. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except Exception as e:
            log.info("Unexpected error occurred (%r). Shutting down.", e)
            error = EventHubError("Receive failed: {}".format(e))
            self.close(exception=error)
            raise error
Exemplo n.º 3
0
class EventHubConsumer(object):
    """
    A consumer responsible for reading EventData from a specific Event Hub
     partition and as a member of a specific consumer group.

    A consumer may be exclusive, which asserts ownership over the partition for the consumer
     group to ensure that only one consumer from that group is reading the from the partition.
     These exclusive consumers are sometimes referred to as "Epoch Consumers."

    A consumer may also be non-exclusive, allowing multiple consumers from the same consumer
     group to be actively reading events from the partition.  These non-exclusive consumers are
     sometimes referred to as "Non-Epoch Consumers."

    """
    timeout = 0
    _epoch = b'com.microsoft:epoch'

    def __init__(self,
                 client,
                 source,
                 event_position=None,
                 prefetch=300,
                 owner_level=None,
                 keep_alive=None,
                 auto_reconnect=True):
        """
        Instantiate a consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method
         in EventHubClient.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: str
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param owner_level: The priority of the exclusive consumer. It will an exclusive
         consumer if owner_level is set.
        :type owner_level: int
        """
        self.running = False
        self.client = client
        self.source = source
        self.offset = event_position
        self.messages_iter = None
        self.prefetch = prefetch
        self.owner_level = owner_level
        self.keep_alive = keep_alive
        self.auto_reconnect = auto_reconnect
        self.retry_policy = errors.ErrorPolicy(
            max_retries=self.client.config.max_retries,
            on_error=_error_handler)
        self.reconnect_backoff = 1
        self.properties = None
        self.redirected = None
        self.error = None
        partition = self.source.split('/')[-1]
        self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition)
        source = Source(self.source)
        if self.offset is not None:
            source.set_filter(self.offset._selector())  # pylint: disable=protected-access
        if owner_level:
            self.properties = {
                types.AMQPSymbol(self._epoch): types.AMQPLong(int(owner_level))
            }
        self._handler = ReceiveClient(
            source,
            auth=self.client.get_auth(),
            debug=self.client.config.network_tracing,
            prefetch=self.prefetch,
            link_properties=self.properties,
            timeout=self.timeout,
            error_policy=self.retry_policy,
            keep_alive_interval=self.keep_alive,
            client_name=self.name,
            properties=self.client._create_properties(
                self.client.config.user_agent))  # pylint: disable=protected-access

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close(exc_val)

    def __iter__(self):
        return self

    def __next__(self):
        self._open()
        max_retries = self.client.config.max_retries
        connecting_count = 0
        while True:
            connecting_count += 1
            try:
                if not self.messages_iter:
                    self.messages_iter = self._handler.receive_messages_iter()
                message = next(self.messages_iter)
                event_data = EventData(message=message)
                self.offset = EventPosition(event_data.offset, inclusive=False)
                return event_data
            except errors.AuthenticationException as auth_error:
                if connecting_count < max_retries:
                    log.info(
                        "EventHubConsumer disconnected due to token error. Attempting reconnect."
                    )
                    self._reconnect()
                else:
                    log.info(
                        "EventHubConsumer authentication failed. Shutting down."
                    )
                    error = AuthenticationError(str(auth_error), auth_error)
                    self.close(auth_error)
                    raise error
            except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
                if shutdown.action.retry and self.auto_reconnect:
                    log.info(
                        "EventHubConsumer detached. Attempting reconnect.")
                    self._reconnect()
                else:
                    log.info("EventHubConsumer detached. Shutting down.")
                    error = ConnectionLostError(str(shutdown), shutdown)
                    self.close(exception=error)
                    raise error
            except errors.MessageHandlerError as shutdown:
                if connecting_count < max_retries:
                    log.info(
                        "EventHubConsumer detached. Attempting reconnect.")
                    self._reconnect()
                else:
                    log.info("EventHubConsumer detached. Shutting down.")
                    error = ConnectionLostError(str(shutdown), shutdown)
                    self.close(error)
                    raise error
            except errors.AMQPConnectionError as shutdown:
                if connecting_count < max_retries:
                    log.info(
                        "EventHubConsumer connection lost. Attempting reconnect."
                    )
                    self._reconnect()
                else:
                    log.info(
                        "EventHubConsumer connection lost. Shutting down.")
                    error = ConnectionLostError(str(shutdown), shutdown)
                    self.close(error)
                    raise error
            except compat.TimeoutException as shutdown:
                if connecting_count < max_retries:
                    log.info(
                        "EventHubConsumer timed out receiving event data. Attempting reconnect."
                    )
                    self._reconnect()
                else:
                    log.info("EventHubConsumer timed out. Shutting down.")
                    self.close(shutdown)
                    raise ConnectionLostError(str(shutdown), shutdown)
            except StopIteration:
                raise
            except KeyboardInterrupt:
                log.info("EventHubConsumer stops due to keyboard interrupt")
                self.close()
                raise
            except Exception as e:
                log.error("Unexpected error occurred (%r). Shutting down.", e)
                error = EventHubError("Receive failed: {}".format(e), e)
                self.close(exception=error)
                raise error

    def _check_closed(self):
        if self.error:
            raise EventHubError(
                "This consumer has been closed. Please create a new consumer to receive event data.",
                self.error)

    def _redirect(self, redirect):
        self.redirected = redirect
        self.running = False
        self.messages_iter = None
        self._open()

    def _open(self):
        """
        Open the EventHubConsumer using the supplied connection.
        If the handler has previously been redirected, the redirect
        context will be used to create a new handler before opening it.

        """
        # pylint: disable=protected-access
        self._check_closed()
        if self.redirected:
            self.client._process_redirect_uri(self.redirected)
            self.source = self.redirected.address
            source = Source(self.source)
            if self.offset is not None:
                source.set_filter(self.offset._selector())

            alt_creds = {
                "username": self.client._auth_config.get("iot_username"),
                "password": self.client._auth_config.get("iot_password")
            }
            self._handler = ReceiveClient(
                source,
                auth=self.client.get_auth(**alt_creds),
                debug=self.client.config.network_tracing,
                prefetch=self.prefetch,
                link_properties=self.properties,
                timeout=self.timeout,
                error_policy=self.retry_policy,
                keep_alive_interval=self.keep_alive,
                client_name=self.name,
                properties=self.client._create_properties(
                    self.client.config.user_agent))  # pylint: disable=protected-access
        if not self.running:
            self._connect()
            self.running = True

    def _connect(self):
        connected = self._build_connection()
        if not connected:
            time.sleep(self.reconnect_backoff)
            while not self._build_connection(is_reconnect=True):
                time.sleep(self.reconnect_backoff)

    def _build_connection(self, is_reconnect=False):
        """

        :param is_reconnect: True - trying to reconnect after fail to connect or a connection is lost.
                             False - the 1st time to connect
        :return: True - connected.  False - not connected
        """
        # pylint: disable=protected-access
        if is_reconnect:
            alt_creds = {
                "username": self.client._auth_config.get("iot_username"),
                "password": self.client._auth_config.get("iot_password")
            }
            self._handler.close()
            source = Source(self.source)
            if self.offset is not None:
                source.set_filter(self.offset._selector())
            self._handler = ReceiveClient(
                source,
                auth=self.client.get_auth(**alt_creds),
                debug=self.client.config.network_tracing,
                prefetch=self.prefetch,
                link_properties=self.properties,
                timeout=self.timeout,
                error_policy=self.retry_policy,
                keep_alive_interval=self.keep_alive,
                client_name=self.name,
                properties=self.client._create_properties(
                    self.client.config.user_agent))  # pylint: disable=protected-access
            self.messages_iter = None
        try:
            self._handler.open()
            while not self._handler.client_ready():
                time.sleep(0.05)
            return True
        except errors.AuthenticationException as shutdown:
            if is_reconnect:
                log.info(
                    "EventHubConsumer couldn't authenticate. Shutting down. (%r)",
                    shutdown)
                error = AuthenticationError(str(shutdown), shutdown)
                self.close(exception=error)
                raise error
            else:
                log.info(
                    "EventHubConsumer couldn't authenticate. Attempting reconnect."
                )
                return False
        except errors.LinkRedirect as redirect:
            self._redirect(redirect)
            return True
        except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
            if shutdown.action.retry:
                log.info("EventHubConsumer detached. Attempting reconnect.")
                return False
            else:
                log.info("EventHubConsumer detached. Shutting down.")
                error = ConnectError(str(shutdown), shutdown)
                self.close(exception=error)
                raise error
        except errors.MessageHandlerError as shutdown:
            if is_reconnect:
                log.info("EventHubConsumer detached. Shutting down.")
                error = ConnectError(str(shutdown), shutdown)
                self.close(exception=error)
                raise error
            else:
                log.info("EventHubConsumer detached. Attempting reconnect.")
                return False
        except errors.AMQPConnectionError as shutdown:
            if is_reconnect:
                log.info(
                    "EventHubConsumer connection error (%r). Shutting down.",
                    shutdown)
                error = AuthenticationError(str(shutdown), shutdown)
                self.close(exception=error)
                raise error
            else:
                log.info(
                    "EventHubConsumer couldn't authenticate. Attempting reconnect."
                )
                return False
        except compat.TimeoutException as shutdown:
            if is_reconnect:
                log.info(
                    "EventHubConsumer authentication timed out. Shutting down."
                )
                error = AuthenticationError(str(shutdown), shutdown)
                self.close(exception=error)
                raise error
            else:
                log.info(
                    "EventHubConsumer authentication timed out. Attempting reconnect."
                )
                return False
        except Exception as e:
            log.error(
                "Unexpected error occurred when building connection (%r). Shutting down.",
                e)
            error = EventHubError(
                "Unexpected error occurred when building connection", e)
            self.close(exception=error)
            raise error

    def _reconnect(self):
        return self._build_connection(is_reconnect=True)

    @property
    def queue_size(self):
        # type:() -> int
        """
        The current size of the unprocessed Event queue.

        :rtype: int
        """
        # pylint: disable=protected-access
        if self._handler._received_messages:
            return self._handler._received_messages.qsize()
        return 0

    def receive(self, max_batch_size=None, timeout=None):
        # type:(int, float) -> List[EventData]
        """
        Receive events from the EventHub.

        :param max_batch_size: Receive a batch of events. Batch size will
         be up to the maximum specified, but will return as soon as service
         returns no new events. If combined with a timeout and no events are
         retrieve before the time, the result will be empty. If no batch
         size is supplied, the prefetch size will be the maximum.
        :type max_batch_size: int
        :param timeout: The maximum wait time to build up the requested message count for the batch.
         If not specified, the default wait time specified when the consumer was created will be used.
        :type timeout: float
        :rtype: list[~azure.eventhub.common.EventData]
        :raises: ~azure.eventhub.AuthenticationError, ~azure.eventhub.ConnectError, ~azure.eventhub.ConnectionLostError,
                ~azure.eventhub.EventHubError
        Example:
            .. literalinclude:: ../examples/test_examples_eventhub.py
                :start-after: [START eventhub_client_sync_receive]
                :end-before: [END eventhub_client_sync_receive]
                :language: python
                :dedent: 4
                :caption: Receive events from the EventHub.

        """
        self._check_closed()
        self._open()

        max_batch_size = min(
            self.client.config.max_batch_size,
            self.prefetch) if max_batch_size is None else max_batch_size
        timeout = self.client.config.receive_timeout if timeout is None else timeout

        data_batch = []  # type: List[EventData]
        max_retries = self.client.config.max_retries
        connecting_count = 0
        while True:
            connecting_count += 1
            try:
                timeout_ms = 1000 * timeout if timeout else 0
                message_batch = self._handler.receive_message_batch(
                    max_batch_size=max_batch_size -
                    (len(data_batch) if data_batch else 0),
                    timeout=timeout_ms)
                for message in message_batch:
                    event_data = EventData(message=message)
                    self.offset = EventPosition(event_data.offset)
                    data_batch.append(event_data)
                return data_batch
            except errors.AuthenticationException as auth_error:
                if connecting_count < max_retries:
                    log.info(
                        "EventHubConsumer disconnected due to token error. Attempting reconnect."
                    )
                    self._reconnect()
                else:
                    log.info(
                        "EventHubConsumer authentication failed. Shutting down."
                    )
                    error = AuthenticationError(str(auth_error), auth_error)
                    self.close(auth_error)
                    raise error
            except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
                if shutdown.action.retry and self.auto_reconnect:
                    log.info(
                        "EventHubConsumer detached. Attempting reconnect.")
                    self._reconnect()
                else:
                    log.info("EventHubConsumer detached. Shutting down.")
                    error = ConnectionLostError(str(shutdown), shutdown)
                    self.close(exception=error)
                    raise error
            except errors.MessageHandlerError as shutdown:
                if connecting_count < max_retries:
                    log.info(
                        "EventHubConsumer detached. Attempting reconnect.")
                    self._reconnect()
                else:
                    log.info("EventHubConsumer detached. Shutting down.")
                    error = ConnectionLostError(str(shutdown), shutdown)
                    self.close(error)
                    raise error
            except errors.AMQPConnectionError as shutdown:
                if connecting_count < max_retries:
                    log.info(
                        "EventHubConsumer connection lost. Attempting reconnect."
                    )
                    self._reconnect()
                else:
                    log.info(
                        "EventHubConsumer connection lost. Shutting down.")
                    error = ConnectionLostError(str(shutdown), shutdown)
                    self.close(error)
                    raise error
            except compat.TimeoutException as shutdown:
                if connecting_count < max_retries:
                    log.info(
                        "EventHubConsumer timed out receiving event data. Attempting reconnect."
                    )
                    self._reconnect()
                else:
                    log.info("EventHubConsumer timed out. Shutting down.")
                    self.close(shutdown)
                    raise ConnectionLostError(str(shutdown), shutdown)
            except KeyboardInterrupt:
                log.info("EventHubConsumer stops due to keyboard interrupt")
                self.close()
                raise
            except Exception as e:
                log.error("Unexpected error occurred (%r). Shutting down.", e)
                error = EventHubError("Receive failed: {}".format(e), e)
                self.close(exception=error)
                raise error

    def close(self, exception=None):
        # type:(Exception) -> None
        """
        Close down the handler. If the handler has already closed,
        this will be a no op. An optional exception can be passed in to
        indicate that the handler was shutdown due to error.

        :param exception: An optional exception if the handler is closing
         due to an error.
        :type exception: Exception

        Example:
            .. literalinclude:: ../examples/test_examples_eventhub.py
                :start-after: [START eventhub_client_receiver_close]
                :end-before: [END eventhub_client_receiver_close]
                :language: python
                :dedent: 4
                :caption: Close down the handler.

        """
        if self.messages_iter:
            self.messages_iter.close()
            self.messages_iter = None
        self.running = False
        if self.error:
            return
        if isinstance(exception, errors.LinkRedirect):
            self.redirected = exception
        elif isinstance(exception, EventHubError):
            self.error = exception
        elif exception:
            self.error = EventHubError(str(exception))
        else:
            self.error = EventHubError("This receive handler is now closed.")
        self._handler.close()

    next = __next__  # for python2.7
class ServiceBusReceiver(BaseHandler, ReceiverMixin):  # pylint: disable=too-many-instance-attributes
    """The ServiceBusReceiver class defines a high level interface for
    receiving messages from the Azure Service Bus Queue or Topic Subscription.

    The two primary channels for message receipt are `receive()` to make a single request for messages,
    and `for message in receiver:` to continuously receive incoming messages in an ongoing fashion.

    :ivar fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
     The namespace format is: `<yournamespace>.servicebus.windows.net`.
    :vartype fully_qualified_namespace: str
    :ivar entity_path: The path of the entity that the client connects to.
    :vartype entity_path: str

    :param str fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
     The namespace format is: `<yournamespace>.servicebus.windows.net`.
    :param ~azure.core.credentials.TokenCredential credential: The credential object used for authentication which
     implements a particular interface for getting tokens. It accepts
     :class: credential objects generated by the azure-identity library and objects that implement the
     `get_token(self, *scopes)` method.
    :keyword str queue_name: The path of specific Service Bus Queue the client connects to.
    :keyword str topic_name: The path of specific Service Bus Topic which contains the Subscription
     the client connects to.
    :keyword str subscription_name: The path of specific Service Bus Subscription under the
     specified Topic the client connects to.
    :keyword float max_wait_time: The timeout in seconds between received messages after which the receiver will
     automatically shutdown. The default value is 0, meaning no timeout.
    :keyword receive_mode: The mode with which messages will be retrieved from the entity. The two options
     are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given
     lock period before they will be removed from the queue. Messages received with ReceiveAndDelete
     will be immediately removed from the queue, and cannot be subsequently abandoned or re-received
     if the client fails to process the message.
     The default mode is PeekLock.
    :paramtype receive_mode: ~azure.servicebus.ReceiveMode
    :keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
    :keyword transport_type: The type of transport protocol that will be used for communicating with
     the Service Bus service. Default is `TransportType.Amqp`.
    :paramtype transport_type: ~azure.servicebus.TransportType
    :keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
     keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
     Additionally the following keys may also be present: `'username', 'password'`.
    :keyword str user_agent: If specified, this will be added in front of the built-in user agent string.
    :keyword int prefetch_count: The maximum number of messages to cache with each request to the service.
     This setting is only for advanced performance tuning. Increasing this value will improve message throughput
     performance but increase the chance that messages will expire while they are cached if they're not
     processed fast enough.
     The default value is 0, meaning messages will be received from the service and processed one at a time.
     In the case of prefetch_count being 0, `ServiceBusReceiver.receive` would try to cache `max_message_count`
     (if provided) within its request to the service.

    .. admonition:: Example:

        .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
            :start-after: [START create_servicebus_receiver_sync]
            :end-before: [END create_servicebus_receiver_sync]
            :language: python
            :dedent: 4
            :caption: Create a new instance of the ServiceBusReceiver.

    """
    def __init__(self, fully_qualified_namespace, credential, **kwargs):
        # type: (str, TokenCredential, Any) -> None
        self._message_iter = None  # type: Optional[Iterator[ReceivedMessage]]
        if kwargs.get("entity_name"):
            super(ServiceBusReceiver, self).__init__(
                fully_qualified_namespace=fully_qualified_namespace,
                credential=credential,
                **kwargs)
        else:
            queue_name = kwargs.get("queue_name")  # type: Optional[str]
            topic_name = kwargs.get("topic_name")  # type: Optional[str]
            subscription_name = kwargs.get("subscription_name")
            if queue_name and topic_name:
                raise ValueError(
                    "Queue/Topic name can not be specified simultaneously.")
            if topic_name and not subscription_name:
                raise ValueError(
                    "Subscription name is missing for the topic. Please specify subscription_name."
                )
            entity_name = queue_name or topic_name
            if not entity_name:
                raise ValueError(
                    "Queue/Topic name is missing. Please specify queue_name/topic_name."
                )

            super(ServiceBusReceiver, self).__init__(
                fully_qualified_namespace=fully_qualified_namespace,
                credential=credential,
                entity_name=entity_name,
                **kwargs)

        self._populate_attributes(**kwargs)

    def __iter__(self):
        return self._iter_contextual_wrapper()

    def _iter_contextual_wrapper(self, max_wait_time=None):
        # pylint: disable=protected-access
        original_timeout = None
        while True:
            # This is not threadsafe, but gives us a way to handle if someone passes
            # different max_wait_times to different iterators and uses them in concert.
            if max_wait_time:
                original_timeout = self._handler._timeout
                self._handler._timeout = max_wait_time * 1000
            try:
                yield next(self)
            except StopIteration:
                break
            finally:
                if original_timeout:
                    self._handler._timeout = original_timeout

    def __next__(self):
        self._check_live()
        while True:
            try:
                return self._do_retryable_operation(self._iter_next)
            except StopIteration:
                self._message_iter = None
                raise

    next = __next__  # for python2.7

    def _iter_next(self):
        self._open()
        if not self._message_iter:
            self._message_iter = self._handler.receive_messages_iter()
        uamqp_message = next(self._message_iter)
        message = self._build_message(uamqp_message)
        return message

    def _create_handler(self, auth):
        # type: (AMQPAuth) -> None
        self._handler = ReceiveClient(
            self._get_source(),
            auth=auth,
            debug=self._config.logging_enable,
            properties=self._properties,
            error_policy=self._error_policy,
            client_name=self._name,
            on_attach=self._on_attach,
            auto_complete=False,
            encoding=self._config.encoding,
            receive_settle_mode=self._receive_mode.value,
            send_settle_mode=SenderSettleMode.Settled
            if self._receive_mode == ReceiveMode.ReceiveAndDelete else None,
            timeout=self._max_wait_time * 1000 if self._max_wait_time else 0,
            prefetch=self._prefetch_count,
            keep_alive_interval=self._config.keep_alive,
            shutdown_after_timeout=False)

    def _open(self):
        # pylint: disable=protected-access
        if self._running:
            return
        if self._handler and not self._handler._shutdown:
            self._handler.close()

        auth = None if self._connection else create_authentication(self)
        self._create_handler(auth)
        try:
            self._handler.open(connection=self._connection)
            while not self._handler.client_ready():
                time.sleep(0.05)
            self._running = True
        except:
            self.close()
            raise

    def _receive(self, max_message_count=None, timeout=None):
        # type: (Optional[int], Optional[float]) -> List[ReceivedMessage]
        # pylint: disable=protected-access
        self._open()

        amqp_receive_client = self._handler
        received_messages_queue = amqp_receive_client._received_messages
        max_message_count = max_message_count or self._prefetch_count
        timeout_ms = 1000 * (timeout or self._max_wait_time) if (
            timeout or self._max_wait_time) else 0
        abs_timeout_ms = amqp_receive_client._counter.get_current_ms(
        ) + timeout_ms if timeout_ms else 0

        batch = []  # type: List[Message]
        while not received_messages_queue.empty() and len(
                batch) < max_message_count:
            batch.append(received_messages_queue.get())
            received_messages_queue.task_done()
        if len(batch) >= max_message_count:
            return [self._build_message(message) for message in batch]

        # Dynamically issue link credit if max_message_count > 1 when the prefetch_count is the default value 1
        if max_message_count and self._prefetch_count == 1 and max_message_count > 1:
            link_credit_needed = max_message_count - len(batch)
            amqp_receive_client.message_handler.reset_link_credit(
                link_credit_needed)

        first_message_received = expired = False
        receiving = True
        while receiving and not expired and len(batch) < max_message_count:
            while receiving and received_messages_queue.qsize(
            ) < max_message_count:
                if abs_timeout_ms and amqp_receive_client._counter.get_current_ms(
                ) > abs_timeout_ms:
                    expired = True
                    break
                before = received_messages_queue.qsize()
                receiving = amqp_receive_client.do_work()
                received = received_messages_queue.qsize() - before
                if not first_message_received and received_messages_queue.qsize(
                ) > 0 and received > 0:
                    # first message(s) received, continue receiving for some time
                    first_message_received = True
                    abs_timeout_ms = amqp_receive_client._counter.get_current_ms() + \
                                     self._further_pull_receive_timeout_ms
            while not received_messages_queue.empty() and len(
                    batch) < max_message_count:
                batch.append(received_messages_queue.get())
                received_messages_queue.task_done()

        return [self._build_message(message) for message in batch]

    def _settle_message(self,
                        settlement,
                        lock_tokens,
                        dead_letter_details=None):
        # type: (bytes, List[str], Optional[Dict[str, Any]]) -> Any
        message = {
            MGMT_REQUEST_DISPOSITION_STATUS: settlement,
            MGMT_REQUEST_LOCK_TOKENS: types.AMQPArray(lock_tokens)
        }

        self._populate_message_properties(message)
        if dead_letter_details:
            message.update(dead_letter_details)

        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_UPDATE_DISPOSTION_OPERATION, message,
            mgmt_handlers.default)

    def _renew_locks(self, *lock_tokens, **kwargs):
        # type: (str, Any) -> Any
        timeout = kwargs.pop("timeout", None)
        message = {MGMT_REQUEST_LOCK_TOKENS: types.AMQPArray(lock_tokens)}
        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RENEWLOCK_OPERATION,
            message,
            mgmt_handlers.lock_renew_op,
            timeout=timeout)

    def close(self):
        # type: () -> None
        super(ServiceBusReceiver, self).close()
        self._message_iter = None  # pylint: disable=attribute-defined-outside-init

    def get_streaming_message_iter(self, max_wait_time=None):
        # type: (float) -> Iterator[ReceivedMessage]
        """Receive messages from an iterator indefinitely, or if a max_wait_time is specified, until
        such a timeout occurs.

        :param max_wait_time: Maximum time to wait in seconds for the next message to arrive.
         If no messages arrive, and no timeout is specified, this call will not return
         until the connection is closed. If specified, and no messages arrive for the
         timeout period, the iterator will stop.
        :type max_wait_time: float
        :rtype: Iterator[ReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_forever]
                :end-before: [END receive_forever]
                :language: python
                :dedent: 4
                :caption: Receive indefinitely from an iterator in streaming fashion.
        """
        return self._iter_contextual_wrapper(max_wait_time)

    @classmethod
    def from_connection_string(cls, conn_str, **kwargs):
        # type: (str, Any) -> ServiceBusReceiver
        """Create a ServiceBusReceiver from a connection string.

        :param conn_str: The connection string of a Service Bus.
        :type conn_str: str
        :keyword str queue_name: The path of specific Service Bus Queue the client connects to.
        :keyword str topic_name: The path of specific Service Bus Topic which contains the Subscription
         the client connects to.
        :keyword str subscription_name: The path of specific Service Bus Subscription under the
         specified Topic the client connects to.
        :keyword receive_mode: The mode with which messages will be retrieved from the entity. The two options
         are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given
         lock period before they will be removed from the queue. Messages received with ReceiveAndDelete
         will be immediately removed from the queue, and cannot be subsequently abandoned or re-received
         if the client fails to process the message.
         The default mode is PeekLock.
        :paramtype receive_mode: ~azure.servicebus.ReceiveMode
        :keyword float max_wait_time: The timeout in seconds between received messages after which the receiver will
         automatically shutdown. The default value is 0, meaning no timeout.
        :keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
        :keyword transport_type: The type of transport protocol that will be used for communicating with
         the Service Bus service. Default is `TransportType.Amqp`.
        :paramtype transport_type: ~azure.servicebus.TransportType
        :keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
         keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
         Additionally the following keys may also be present: `'username', 'password'`.
        :keyword str user_agent: If specified, this will be added in front of the built-in user agent string.
        :keyword int prefetch_count: The maximum number of messages to cache with each request to the service.
         This setting is only for advanced performance tuning. Increasing this value will improve message throughput
         performance but increase the chance that messages will expire while they are cached if they're not
         processed fast enough.
         The default value is 0, meaning messages will be received from the service and processed one at a time.
         In the case of prefetch_count being 0, `ServiceBusReceiver.receive` would try to cache `max_message_count`
         (if provided) within its request to the service.
        :rtype: ~azure.servicebus.ServiceBusReceiver

        :raises ~azure.servicebus.ServiceBusAuthenticationError: Indicates an issue in token/identity validity.
        :raises ~azure.servicebus.ServiceBusAuthorizationError: Indicates an access/rights related failure.

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START create_servicebus_receiver_from_conn_str_sync]
                :end-before: [END create_servicebus_receiver_from_conn_str_sync]
                :language: python
                :dedent: 4
                :caption: Create a new instance of the ServiceBusReceiver from connection string.

        """
        constructor_args = cls._convert_connection_string_to_kwargs(
            conn_str, **kwargs)
        if kwargs.get("queue_name") and kwargs.get("subscription_name"):
            raise ValueError("Queue entity does not have subscription.")

        if kwargs.get("topic_name") and not kwargs.get("subscription_name"):
            raise ValueError(
                "Subscription name is missing for the topic. Please specify subscription_name."
            )
        return cls(**constructor_args)

    def receive_messages(self, max_message_count=None, max_wait_time=None):
        # type: (int, float) -> List[ReceivedMessage]
        """Receive a batch of messages at once.

        This approach is optimal if you wish to process multiple messages simultaneously, or
        perform an ad-hoc receive as a single call.

        Note that the number of messages retrieved in a single batch will be dependent on
        whether `prefetch_count` was set for the receiver. If `prefetch_count` is not set for the receiver,
        the receiver would try to cache max_message_count (if provided) messages within the request to the service.

        This call will prioritize returning quickly over meeting a specified batch size, and so will
        return as soon as at least one message is received and there is a gap in incoming messages regardless
        of the specified batch size.

        :param int max_message_count: Maximum number of messages in the batch. Actual number
         returned will depend on prefetch_count and incoming stream rate.
        :param float max_wait_time: Maximum time to wait in seconds for the first message to arrive.
         If no messages arrive, and no timeout is specified, this call will not return
         until the connection is closed. If specified, an no messages arrive within the
         timeout period, an empty list will be returned.

        :rtype: List[~azure.servicebus.ReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_sync]
                :end-before: [END receive_sync]
                :language: python
                :dedent: 4
                :caption: Receive messages from ServiceBus.

        """
        self._check_live()
        return self._do_retryable_operation(
            self._receive,
            max_message_count=max_message_count,
            timeout=max_wait_time,
            operation_requires_timeout=True)

    def receive_deferred_messages(self, sequence_numbers, **kwargs):
        # type: (Union[int,List[int]], Any) -> List[ReceivedMessage]
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param Union[int,List[int]] sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :keyword float timeout: The total operation timeout in seconds including all the retries. The value must be
         greater than 0 if specified. The default value is None, meaning no timeout.
        :rtype: List[~azure.servicebus.ReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_defer_sync]
                :end-before: [END receive_defer_sync]
                :language: python
                :dedent: 4
                :caption: Receive deferred messages from ServiceBus.

        """
        self._check_live()
        timeout = kwargs.pop("timeout", None)
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if isinstance(sequence_numbers, six.integer_types):
            sequence_numbers = [sequence_numbers]
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        self._open()
        try:
            receive_mode = self._receive_mode.value.value
        except AttributeError:
            receive_mode = int(self._receive_mode)
        message = {
            MGMT_REQUEST_SEQUENCE_NUMBERS:
            types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            MGMT_REQUEST_RECEIVER_SETTLE_MODE:
            types.AMQPuInt(receive_mode)
        }

        self._populate_message_properties(message)

        handler = functools.partial(mgmt_handlers.deferred_message_op,
                                    receive_mode=self._receive_mode,
                                    receiver=self)
        messages = self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,
            message,
            handler,
            timeout=timeout)
        return messages

    def peek_messages(self, max_message_count=1, **kwargs):
        # type: (int, Any) -> List[PeekedMessage]
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param int max_message_count: The maximum number of messages to try and peek. The default
         value is 1.
        :keyword int sequence_number: A message sequence number from which to start browsing messages.
        :keyword float timeout: The total operation timeout in seconds including all the retries. The value must be
         greater than 0 if specified. The default value is None, meaning no timeout.

        :rtype: List[~azure.servicebus.PeekedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START peek_messages_sync]
                :end-before: [END peek_messages_sync]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue.

        """
        self._check_live()
        sequence_number = kwargs.pop("sequence_number", 0)
        timeout = kwargs.pop("timeout", None)
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if not sequence_number:
            sequence_number = self._last_received_sequenced_number or 1
        if int(max_message_count) < 1:
            raise ValueError("count must be 1 or greater.")
        if int(sequence_number) < 1:
            raise ValueError("start_from must be 1 or greater.")

        self._open()
        message = {
            MGMT_REQUEST_FROM_SEQUENCE_NUMBER: types.AMQPLong(sequence_number),
            MGMT_REQUEST_MAX_MESSAGE_COUNT: max_message_count
        }

        self._populate_message_properties(message)

        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_PEEK_OPERATION,
            message,
            mgmt_handlers.peek_op,
            timeout=timeout)
class ServiceBusReceiver(BaseHandler, ReceiverMixin):  # pylint: disable=too-many-instance-attributes
    """The ServiceBusReceiver class defines a high level interface for
    receiving messages from the Azure Service Bus Queue or Topic Subscription.

    The two primary channels for message receipt are `receive()` to make a single request for messages,
    and `for message in receiver:` to continuously receive incoming messages in an ongoing fashion.

    :ivar fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
     The namespace format is: `<yournamespace>.servicebus.windows.net`.
    :vartype fully_qualified_namespace: str
    :ivar entity_path: The path of the entity that the client connects to.
    :vartype entity_path: str

    :param str fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
     The namespace format is: `<yournamespace>.servicebus.windows.net`.
    :param ~azure.core.credentials.TokenCredential credential: The credential object used for authentication which
     implements a particular interface for getting tokens. It accepts
     :class:`ServiceBusSharedKeyCredential<azure.servicebus.ServiceBusSharedKeyCredential>`, or credential objects
     generated by the azure-identity library and objects that implement the `get_token(self, *scopes)` method.
    :keyword str queue_name: The path of specific Service Bus Queue the client connects to.
    :keyword str topic_name: The path of specific Service Bus Topic which contains the Subscription
     the client connects to.
    :keyword str subscription_name: The path of specific Service Bus Subscription under the
     specified Topic the client connects to.
    :keyword int prefetch: The maximum number of messages to cache with each request to the service.
     The default value is 0, meaning messages will be received from the service and processed
     one at a time. Increasing this value will improve message throughput performance but increase
     the change that messages will expire while they are cached if they're not processed fast enough.
    :keyword float idle_timeout: The timeout in seconds between received messages after which the receiver will
     automatically shutdown. The default value is 0, meaning no timeout.
    :keyword mode: The mode with which messages will be retrieved from the entity. The two options
     are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given
     lock period before they will be removed from the queue. Messages received with ReceiveAndDelete
     will be immediately removed from the queue, and cannot be subsequently abandoned or re-received
     if the client fails to process the message.
     The default mode is PeekLock.
    :paramtype mode: ~azure.servicebus.ReceiveSettleMode
    :keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
    :keyword int retry_total: The total number of attempts to redo a failed operation when an error occurs.
     Default value is 3.
    :keyword transport_type: The type of transport protocol that will be used for communicating with
     the Service Bus service. Default is `TransportType.Amqp`.
    :paramtype transport_type: ~azure.servicebus.TransportType
    :keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
     keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
     Additionally the following keys may also be present: `'username', 'password'`.

    .. admonition:: Example:

        .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
            :start-after: [START create_servicebus_receiver_sync]
            :end-before: [END create_servicebus_receiver_sync]
            :language: python
            :dedent: 4
            :caption: Create a new instance of the ServiceBusReceiver.

    """
    def __init__(self, fully_qualified_namespace, credential, **kwargs):
        # type: (str, TokenCredential, Any) -> None
        if kwargs.get("entity_name"):
            super(ServiceBusReceiver, self).__init__(
                fully_qualified_namespace=fully_qualified_namespace,
                credential=credential,
                **kwargs)
        else:
            queue_name = kwargs.get("queue_name")  # type: Optional[str]
            topic_name = kwargs.get("topic_name")  # type: Optional[str]
            subscription_name = kwargs.get("subscription_name")
            if queue_name and topic_name:
                raise ValueError(
                    "Queue/Topic name can not be specified simultaneously.")
            if topic_name and not subscription_name:
                raise ValueError(
                    "Subscription name is missing for the topic. Please specify subscription_name."
                )
            entity_name = queue_name or topic_name
            if not entity_name:
                raise ValueError(
                    "Queue/Topic name is missing. Please specify queue_name/topic_name."
                )

            super(ServiceBusReceiver, self).__init__(
                fully_qualified_namespace=fully_qualified_namespace,
                credential=credential,
                entity_name=entity_name,
                **kwargs)

        self._populate_attributes(**kwargs)

    def __iter__(self):
        return self

    def __next__(self):
        self._check_live()
        while True:
            try:
                return self._do_retryable_operation(self._iter_next)
            except StopIteration:
                self.close()
                raise

    next = __next__  # for python2.7

    def _iter_next(self):
        self._open()
        uamqp_message = next(self._message_iter)
        message = self._build_message(uamqp_message)
        return message

    def _create_handler(self, auth):
        # type: (AMQPAuth) -> None
        self._handler = ReceiveClient(
            self._get_source(),
            auth=auth,
            debug=self._config.logging_enable,
            properties=self._properties,
            error_policy=self._error_policy,
            client_name=self._name,
            on_attach=self._on_attach,
            auto_complete=False,
            encoding=self._config.encoding,
            receive_settle_mode=self._mode.value,
            send_settle_mode=SenderSettleMode.Settled
            if self._mode == ReceiveSettleMode.ReceiveAndDelete else None,
            timeout=self._idle_timeout * 1000 if self._idle_timeout else 0,
            prefetch=self._prefetch)

    def _open(self):
        if self._running:
            return
        if self._handler:
            self._handler.close()

        auth = None if self._connection else create_authentication(self)
        self._create_handler(auth)
        try:
            self._handler.open(connection=self._connection)
            self._message_iter = self._handler.receive_messages_iter()  # pylint: disable=attribute-defined-outside-init
            while not self._handler.client_ready():
                time.sleep(0.05)
            self._running = True
        except:
            self.close()
            raise

    def _receive(self, max_batch_size=None, timeout=None):
        # type: (Optional[int], Optional[float]) -> List[ReceivedMessage]
        self._open()
        max_batch_size = max_batch_size or self._handler._prefetch  # pylint: disable=protected-access

        timeout_ms = 1000 * (timeout or self._idle_timeout) if (
            timeout or self._idle_timeout) else 0
        batch = self._handler.receive_message_batch(
            max_batch_size=max_batch_size, timeout=timeout_ms)

        return [self._build_message(message) for message in batch]

    def _settle_message(self,
                        settlement,
                        lock_tokens,
                        dead_letter_details=None):
        # type: (bytes, List[str], Optional[Dict[str, Any]]) -> Any
        message = {
            MGMT_REQUEST_DISPOSITION_STATUS: settlement,
            MGMT_REQUEST_LOCK_TOKENS: types.AMQPArray(lock_tokens)
        }

        self._populate_message_properties(message)
        if dead_letter_details:
            message.update(dead_letter_details)

        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_UPDATE_DISPOSTION_OPERATION, message,
            mgmt_handlers.default)

    def _renew_locks(self, *lock_tokens):
        # type: (*str) -> Any
        message = {MGMT_REQUEST_LOCK_TOKENS: types.AMQPArray(lock_tokens)}
        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RENEWLOCK_OPERATION, message,
            mgmt_handlers.lock_renew_op)

    @classmethod
    def from_connection_string(cls, conn_str, **kwargs):
        # type: (str, Any) -> ServiceBusReceiver
        """Create a ServiceBusReceiver from a connection string.

        :param conn_str: The connection string of a Service Bus.
        :keyword str queue_name: The path of specific Service Bus Queue the client connects to.
        :keyword str topic_name: The path of specific Service Bus Topic which contains the Subscription
         the client connects to.
        :keyword str subscription_name: The path of specific Service Bus Subscription under the
         specified Topic the client connects to.
        :keyword mode: The mode with which messages will be retrieved from the entity. The two options
         are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given
         lock period before they will be removed from the queue. Messages received with ReceiveAndDelete
         will be immediately removed from the queue, and cannot be subsequently abandoned or re-received
         if the client fails to process the message.
         The default mode is PeekLock.
        :paramtype mode: ~azure.servicebus.ReceiveSettleMode
        :keyword int prefetch: The maximum number of messages to cache with each request to the service.
         The default value is 0, meaning messages will be received from the service and processed
         one at a time. Increasing this value will improve message throughput performance but increase
         the change that messages will expire while they are cached if they're not processed fast enough.
        :keyword float idle_timeout: The timeout in seconds between received messages after which the receiver will
         automatically shutdown. The default value is 0, meaning no timeout.
        :keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
        :keyword int retry_total: The total number of attempts to redo a failed operation when an error occurs.
         Default value is 3.
        :keyword transport_type: The type of transport protocol that will be used for communicating with
         the Service Bus service. Default is `TransportType.Amqp`.
        :paramtype transport_type: ~azure.servicebus.TransportType
        :keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
         keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
         Additionally the following keys may also be present: `'username', 'password'`.
        :rtype: ~azure.servicebus.ServiceBusReceiver

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START create_servicebus_receiver_from_conn_str_sync]
                :end-before: [END create_servicebus_receiver_from_conn_str_sync]
                :language: python
                :dedent: 4
                :caption: Create a new instance of the ServiceBusReceiver from connection string.

        """
        constructor_args = _convert_connection_string_to_kwargs(
            conn_str, ServiceBusSharedKeyCredential, **kwargs)
        if kwargs.get("queue_name") and kwargs.get("subscription_name"):
            raise ValueError("Queue entity does not have subscription.")

        if kwargs.get("topic_name") and not kwargs.get("subscription_name"):
            raise ValueError(
                "Subscription name is missing for the topic. Please specify subscription_name."
            )
        return cls(**constructor_args)

    def receive_messages(self, max_batch_size=None, max_wait_time=None):
        # type: (int, float) -> List[ReceivedMessage]
        """Receive a batch of messages at once.

        This approach is optimal if you wish to process multiple messages simultaneously, or
        perform an ad-hoc receive as a single call.

        Note that the number of messages retrieved in a single batch will be dependent on
        whether `prefetch` was set for the receiver. This call will prioritize returning
        quickly over meeting a specified batch size, and so will return as soon as at least
        one message is received and there is a gap in incoming messages regardless
        of the specified batch size.

        :param int max_batch_size: Maximum number of messages in the batch. Actual number
         returned will depend on prefetch size and incoming stream rate.
        :param float max_wait_time: Maximum time to wait in seconds for the first message to arrive.
         If no messages arrive, and no timeout is specified, this call will not return
         until the connection is closed. If specified, an no messages arrive within the
         timeout period, an empty list will be returned.
        :rtype: list[~azure.servicebus.ReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_sync]
                :end-before: [END receive_sync]
                :language: python
                :dedent: 4
                :caption: Receive messages from ServiceBus.

        """
        self._check_live()
        if max_batch_size and self._prefetch < max_batch_size:
            raise ValueError(
                "max_batch_size should be less than or equal to prefetch of ServiceBusReceiver, or you "
                "could set a larger prefetch value when you're constructing the ServiceBusReceiver."
            )
        return self._do_retryable_operation(self._receive,
                                            max_batch_size=max_batch_size,
                                            timeout=max_wait_time,
                                            require_timeout=True)

    def receive_deferred_messages(self, sequence_numbers):
        # type: (List[int]) -> List[ReceivedMessage]
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param list[int] sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :rtype: list[~azure.servicebus.ReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_defer_sync]
                :end-before: [END receive_defer_sync]
                :language: python
                :dedent: 4
                :caption: Receive deferred messages from ServiceBus.

        """
        self._check_live()
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        self._open()
        try:
            receive_mode = self._mode.value.value
        except AttributeError:
            receive_mode = int(self._mode)
        message = {
            MGMT_REQUEST_SEQUENCE_NUMBERS:
            types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            MGMT_REQUEST_RECEIVER_SETTLE_MODE:
            types.AMQPuInt(receive_mode)
        }

        self._populate_message_properties(message)

        handler = functools.partial(mgmt_handlers.deferred_message_op,
                                    mode=self._mode)
        messages = self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER, message, handler)
        for m in messages:
            m._receiver = self  # pylint: disable=protected-access
        return messages

    def peek_messages(self, message_count=1, sequence_number=None):
        # type: (int, Optional[int]) -> List[PeekMessage]
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param int message_count: The maximum number of messages to try and peek. The default
         value is 1.
        :param int sequence_number: A message sequence number from which to start browsing messages.
        :rtype: list[~azure.servicebus.PeekMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START peek_messages_sync]
                :end-before: [END peek_messages_sync]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue.

        """
        self._check_live()
        if not sequence_number:
            sequence_number = self._last_received_sequenced_number or 1
        if int(message_count) < 1:
            raise ValueError("count must be 1 or greater.")
        if int(sequence_number) < 1:
            raise ValueError("start_from must be 1 or greater.")

        self._open()
        message = {
            MGMT_REQUEST_FROM_SEQUENCE_NUMBER: types.AMQPLong(sequence_number),
            MGMT_REQUEST_MESSAGE_COUNT: message_count
        }

        self._populate_message_properties(message)

        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_PEEK_OPERATION, message, mgmt_handlers.peek_op)
class ServiceBusReceiver(
    BaseHandler, ReceiverMixin
):  # pylint: disable=too-many-instance-attributes
    """The ServiceBusReceiver class defines a high level interface for
    receiving messages from the Azure Service Bus Queue or Topic Subscription.

    The two primary channels for message receipt are `receive()` to make a single request for messages,
    and `for message in receiver:` to continuously receive incoming messages in an ongoing fashion.

    **Please use the `get_<queue/subscription>_receiver` method of ~azure.servicebus.ServiceBusClient to create a
    ServiceBusReceiver instance.**

    :ivar fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
     The namespace format is: `<yournamespace>.servicebus.windows.net`.
    :vartype fully_qualified_namespace: str
    :ivar entity_path: The path of the entity that the client connects to.
    :vartype entity_path: str

    :param str fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
     The namespace format is: `<yournamespace>.servicebus.windows.net`.
    :param credential: The credential object used for authentication which
     implements a particular interface for getting tokens. It accepts
     credential objects generated by the azure-identity library and objects that implement the
     `get_token(self, *scopes)` method, or alternatively, an AzureSasCredential can be provided too.
    :type credential: ~azure.core.credentials.TokenCredential or ~azure.core.credentials.AzureSasCredential
     or ~azure.core.credentials.AzureNamedKeyCredential
    :keyword str queue_name: The path of specific Service Bus Queue the client connects to.
    :keyword str topic_name: The path of specific Service Bus Topic which contains the Subscription
     the client connects to.
    :keyword str subscription_name: The path of specific Service Bus Subscription under the
     specified Topic the client connects to.
    :keyword Optional[float] max_wait_time: The timeout in seconds between received messages after which the
     receiver will automatically stop receiving. The default value is None, meaning no timeout.
    :keyword receive_mode: The mode with which messages will be retrieved from the entity. The two options
     are PEEK_LOCK and RECEIVE_AND_DELETE. Messages received with PEEK_LOCK must be settled within a given
     lock period before they will be removed from the queue. Messages received with RECEIVE_AND_DELETE
     will be immediately removed from the queue, and cannot be subsequently abandoned or re-received
     if the client fails to process the message.
     The default mode is PEEK_LOCK.
    :paramtype receive_mode: Union[~azure.servicebus.ServiceBusReceiveMode, str]
    :keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
    :keyword transport_type: The type of transport protocol that will be used for communicating with
     the Service Bus service. Default is `TransportType.Amqp`.
    :paramtype transport_type: ~azure.servicebus.TransportType
    :keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
     keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
     Additionally the following keys may also be present: `'username', 'password'`.
    :keyword str user_agent: If specified, this will be added in front of the built-in user agent string.
    :keyword Optional[~azure.servicebus.AutoLockRenewer] auto_lock_renewer: An ~azure.servicebus.AutoLockRenewer
     can be provided such that messages are automatically registered on receipt. If the receiver is a session
     receiver, it will apply to the session instead.
    :keyword int prefetch_count: The maximum number of messages to cache with each request to the service.
     This setting is only for advanced performance tuning. Increasing this value will improve message throughput
     performance but increase the chance that messages will expire while they are cached if they're not
     processed fast enough.
     The default value is 0, meaning messages will be received from the service and processed one at a time.
     In the case of prefetch_count being 0, `ServiceBusReceiver.receive` would try to cache `max_message_count`
     (if provided) within its request to the service.
    """

    def __init__(self, fully_qualified_namespace, credential, **kwargs):
        # type: (str, Union[TokenCredential, AzureSasCredential, AzureNamedKeyCredential], Any) -> None
        self._message_iter = None  # type: Optional[Iterator[ServiceBusReceivedMessage]]
        if kwargs.get("entity_name"):
            super(ServiceBusReceiver, self).__init__(
                fully_qualified_namespace=fully_qualified_namespace,
                credential=credential,
                **kwargs
            )
        else:
            queue_name = kwargs.get("queue_name")  # type: Optional[str]
            topic_name = kwargs.get("topic_name")  # type: Optional[str]
            subscription_name = kwargs.get("subscription_name")
            if queue_name and topic_name:
                raise ValueError(
                    "Queue/Topic name can not be specified simultaneously."
                )
            if topic_name and not subscription_name:
                raise ValueError(
                    "Subscription name is missing for the topic. Please specify subscription_name."
                )
            entity_name = queue_name or topic_name
            if not entity_name:
                raise ValueError(
                    "Queue/Topic name is missing. Please specify queue_name/topic_name."
                )

            super(ServiceBusReceiver, self).__init__(
                fully_qualified_namespace=fully_qualified_namespace,
                credential=credential,
                entity_name=entity_name,
                **kwargs
            )

        self._populate_attributes(**kwargs)
        self._session = (
            None if self._session_id is None else ServiceBusSession(self._session_id, self)
        )

    def __iter__(self):
        return self._iter_contextual_wrapper()

    def _iter_contextual_wrapper(self, max_wait_time=None):
        """The purpose of this wrapper is to allow both state restoration (for multiple concurrent iteration)
        and per-iter argument passing that requires the former."""
        # pylint: disable=protected-access
        original_timeout = None
        while True:
            # This is not threadsafe, but gives us a way to handle if someone passes
            # different max_wait_times to different iterators and uses them in concert.
            if max_wait_time:
                original_timeout = self._handler._timeout
                self._handler._timeout = max_wait_time * 1000
            try:
                message = self._inner_next()
                links = get_receive_links(message)
                with receive_trace_context_manager(self, links=links):
                    yield message
            except StopIteration:
                break
            finally:
                if original_timeout:
                    try:
                        self._handler._timeout = original_timeout
                    except AttributeError:  # Handler may be disposed already.
                        pass

    def _inner_next(self):
        # We do this weird wrapping such that an imperitive next() call, and a generator-based iter both trace sanely.
        self._check_live()
        while True:
            try:
                return self._do_retryable_operation(self._iter_next)
            except StopIteration:
                self._message_iter = None
                raise

    def __next__(self):
        # Normally this would wrap the yield of the iter, but for a direct next call we just trace imperitively.
        message = self._inner_next()
        links = get_receive_links(message)
        with receive_trace_context_manager(self, links=links):
            return message

    next = __next__  # for python2.7

    def _iter_next(self):
        self._open()
        if not self._message_iter:
            self._message_iter = self._handler.receive_messages_iter()
        uamqp_message = next(self._message_iter)
        message = self._build_message(uamqp_message)
        if (
            self._auto_lock_renewer
            and not self._session
            and self._receive_mode != ServiceBusReceiveMode.RECEIVE_AND_DELETE
        ):
            self._auto_lock_renewer.register(self, message)
        return message

    @classmethod
    def _from_connection_string(cls, conn_str, **kwargs):
        # type: (str, Any) -> ServiceBusReceiver
        """Create a ServiceBusReceiver from a connection string.

        :param conn_str: The connection string of a Service Bus.
        :type conn_str: str
        :keyword str queue_name: The path of specific Service Bus Queue the client connects to.
        :keyword str topic_name: The path of specific Service Bus Topic which contains the Subscription
         the client connects to.
        :keyword str subscription_name: The path of specific Service Bus Subscription under the
         specified Topic the client connects to.
        :keyword receive_mode: The mode with which messages will be retrieved from the entity. The two options
         are PEEK_LOCK and RECEIVE_AND_DELETE. Messages received with PEEK_LOCK must be settled within a given
         lock period before they will be removed from the queue. Messages received with RECEIVE_AND_DELETE
         will be immediately removed from the queue, and cannot be subsequently abandoned or re-received
         if the client fails to process the message.
         The default mode is PEEK_LOCK.
        :paramtype receive_mode: Union[~azure.servicebus.ServiceBusReceiveMode, str]
        :keyword Optional[float] max_wait_time: The timeout in seconds between received messages after which the
         receiver will automatically stop receiving. The default value is None, meaning no timeout.
        :keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
        :keyword transport_type: The type of transport protocol that will be used for communicating with
         the Service Bus service. Default is `TransportType.Amqp`.
        :paramtype transport_type: ~azure.servicebus.TransportType
        :keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
         keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
         Additionally the following keys may also be present: `'username', 'password'`.
        :keyword str user_agent: If specified, this will be added in front of the built-in user agent string.
        :keyword int prefetch_count: The maximum number of messages to cache with each request to the service.
         This setting is only for advanced performance tuning. Increasing this value will improve message throughput
         performance but increase the chance that messages will expire while they are cached if they're not
         processed fast enough.
         The default value is 0, meaning messages will be received from the service and processed one at a time.
         In the case of prefetch_count being 0, `ServiceBusReceiver.receive` would try to cache `max_message_count`
         (if provided) within its request to the service.
        :rtype: ~azure.servicebus.ServiceBusReceiver

        :raises ~azure.servicebus.ServiceBusAuthenticationError: Indicates an issue in token/identity validity.
        :raises ~azure.servicebus.ServiceBusAuthorizationError: Indicates an access/rights related failure.

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START create_servicebus_receiver_from_conn_str_sync]
                :end-before: [END create_servicebus_receiver_from_conn_str_sync]
                :language: python
                :dedent: 4
                :caption: Create a new instance of the ServiceBusReceiver from connection string.

        """
        constructor_args = cls._convert_connection_string_to_kwargs(conn_str, **kwargs)
        if kwargs.get("queue_name") and kwargs.get("subscription_name"):
            raise ValueError("Queue entity does not have subscription.")

        if kwargs.get("topic_name") and not kwargs.get("subscription_name"):
            raise ValueError(
                "Subscription name is missing for the topic. Please specify subscription_name."
            )
        return cls(**constructor_args)

    def _create_handler(self, auth):
        # type: (AMQPAuth) -> None
        self._handler = ReceiveClient(
            self._get_source(),
            auth=auth,
            debug=self._config.logging_enable,
            properties=self._properties,
            error_policy=self._error_policy,
            client_name=self._name,
            on_attach=self._on_attach,
            auto_complete=False,
            encoding=self._config.encoding,
            receive_settle_mode=ServiceBusToAMQPReceiveModeMap[self._receive_mode],
            send_settle_mode=SenderSettleMode.Settled
            if self._receive_mode == ServiceBusReceiveMode.RECEIVE_AND_DELETE
            else None,
            timeout=self._max_wait_time * 1000 if self._max_wait_time else 0,
            prefetch=self._prefetch_count,
            keep_alive_interval=self._config.keep_alive,
            shutdown_after_timeout=False,
        )

    def _open(self):
        # pylint: disable=protected-access
        if self._running:
            return
        if self._handler and not self._handler._shutdown:
            self._handler.close()

        auth = None if self._connection else create_authentication(self)
        self._create_handler(auth)
        try:
            self._handler.open(connection=self._connection)
            while not self._handler.client_ready():
                time.sleep(0.05)
            self._running = True
        except:
            self._close_handler()
            raise

        if self._auto_lock_renewer and self._session:
            self._auto_lock_renewer.register(self, self.session)

    def _receive(self, max_message_count=None, timeout=None):
        # type: (Optional[int], Optional[float]) -> List[ServiceBusReceivedMessage]
        # pylint: disable=protected-access
        self._open()

        amqp_receive_client = self._handler
        received_messages_queue = amqp_receive_client._received_messages
        max_message_count = max_message_count or self._prefetch_count
        timeout_ms = (
            1000 * (timeout or self._max_wait_time)
            if (timeout or self._max_wait_time)
            else 0
        )
        abs_timeout_ms = (
            amqp_receive_client._counter.get_current_ms() + timeout_ms
            if timeout_ms
            else 0
        )

        batch = []  # type: List[Message]
        while not received_messages_queue.empty() and len(batch) < max_message_count:
            batch.append(received_messages_queue.get())
            received_messages_queue.task_done()
        if len(batch) >= max_message_count:
            return [self._build_message(message) for message in batch]

        # Dynamically issue link credit if max_message_count > 1 when the prefetch_count is the default value 1
        if max_message_count and self._prefetch_count == 1 and max_message_count > 1:
            link_credit_needed = max_message_count - len(batch)
            amqp_receive_client.message_handler.reset_link_credit(link_credit_needed)

        first_message_received = expired = False
        receiving = True
        while receiving and not expired and len(batch) < max_message_count:
            while receiving and received_messages_queue.qsize() < max_message_count:
                if (
                    abs_timeout_ms
                    and amqp_receive_client._counter.get_current_ms() > abs_timeout_ms
                ):
                    expired = True
                    break
                before = received_messages_queue.qsize()
                receiving = amqp_receive_client.do_work()
                received = received_messages_queue.qsize() - before
                if (
                    not first_message_received
                    and received_messages_queue.qsize() > 0
                    and received > 0
                ):
                    # first message(s) received, continue receiving for some time
                    first_message_received = True
                    abs_timeout_ms = (
                        amqp_receive_client._counter.get_current_ms()
                        + self._further_pull_receive_timeout_ms
                    )
            while (
                not received_messages_queue.empty() and len(batch) < max_message_count
            ):
                batch.append(received_messages_queue.get())
                received_messages_queue.task_done()

        return [self._build_message(message) for message in batch]

    def _settle_message_with_retry(
        self,
        message,
        settle_operation,
        dead_letter_reason=None,
        dead_letter_error_description=None,
    ):
        # pylint: disable=protected-access
        self._check_live()
        if not isinstance(message, ServiceBusReceivedMessage):
            raise TypeError(
                "Parameter 'message' must be of type ServiceBusReceivedMessage"
            )
        self._check_message_alive(message, settle_operation)

        # The following condition check is a hot fix for settling a message received for non-session queue after
        # lock expiration.
        # uamqp doesn't have the ability to receive disposition result returned from the service after settlement,
        # so there's no way we could tell whether a disposition succeeds or not and there's no error condition info.
        # Throwing a general message error type here gives us the evolvability to have more fine-grained exception
        # subclasses in the future after we add the missing feature support in uamqp.
        # see issue: https://github.com/Azure/azure-uamqp-c/issues/274
        if not self._session and message._lock_expired:
            raise ServiceBusError(
                message="The lock on the message lock has expired.",
                error=message.auto_renew_error,
            )

        self._do_retryable_operation(
            self._settle_message,
            timeout=None,
            message=message,
            settle_operation=settle_operation,
            dead_letter_reason=dead_letter_reason,
            dead_letter_error_description=dead_letter_error_description,
        )

        message._settled = True

    def _settle_message(
        self,
        message,
        settle_operation,
        dead_letter_reason=None,
        dead_letter_error_description=None,
    ):
        # type: (ServiceBusReceivedMessage, str, Optional[str], Optional[str]) -> None
        # pylint: disable=protected-access
        try:
            if not message._is_deferred_message:
                try:
                    self._settle_message_via_receiver_link(
                        message,
                        settle_operation,
                        dead_letter_reason=dead_letter_reason,
                        dead_letter_error_description=dead_letter_error_description,
                    )()
                    return
                except RuntimeError as exception:
                    _LOGGER.info(
                        "Message settling: %r has encountered an exception (%r)."
                        "Trying to settle through management link",
                        settle_operation,
                        exception,
                    )
            dead_letter_details = (
                {
                    MGMT_REQUEST_DEAD_LETTER_REASON: dead_letter_reason or "",
                    MGMT_REQUEST_DEAD_LETTER_ERROR_DESCRIPTION: dead_letter_error_description
                    or "",
                }
                if settle_operation == MESSAGE_DEAD_LETTER
                else None
            )
            self._settle_message_via_mgmt_link(
                MESSAGE_MGMT_SETTLEMENT_TERM_MAP[settle_operation],
                [message.lock_token],  # type: ignore
                dead_letter_details=dead_letter_details,
            )
        except Exception as exception:
            _LOGGER.info(
                "Message settling: %r has encountered an exception (%r) through management link",
                settle_operation,
                exception,
            )
            raise

    def _settle_message_via_mgmt_link(
        self, settlement, lock_tokens, dead_letter_details=None
    ):
        # type: (str, List[Union[uuid.UUID, str]], Optional[Dict[str, Any]]) -> Any
        message = {
            MGMT_REQUEST_DISPOSITION_STATUS: settlement,
            MGMT_REQUEST_LOCK_TOKENS: types.AMQPArray(lock_tokens),
        }

        self._populate_message_properties(message)
        if dead_letter_details:
            message.update(dead_letter_details)

        # We don't do retry here, retry is done in the ServiceBusReceivedMessage._settle_message
        return self._mgmt_request_response(
            REQUEST_RESPONSE_UPDATE_DISPOSTION_OPERATION, message, mgmt_handlers.default
        )

    def _renew_locks(self, *lock_tokens, **kwargs):
        # type: (str, Any) -> Any
        timeout = kwargs.pop("timeout", None)
        message = {MGMT_REQUEST_LOCK_TOKENS: types.AMQPArray(lock_tokens)}
        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RENEWLOCK_OPERATION,
            message,
            mgmt_handlers.message_lock_renew_op,
            timeout=timeout,
        )

    def _close_handler(self):
        self._message_iter = None
        super(ServiceBusReceiver, self)._close_handler()

    @property
    def session(self):
        # type: () -> ServiceBusSession
        """
        Get the ServiceBusSession object linked with the receiver. Session is only available to session-enabled
        entities, it would return None if called on a non-sessionful receiver.

        :rtype: ~azure.servicebus.ServiceBusSession

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START get_session_sync]
                :end-before: [END get_session_sync]
                :language: python
                :dedent: 4
                :caption: Get session from a receiver
        """
        return self._session  # type: ignore

    def close(self):
        # type: () -> None
        super(ServiceBusReceiver, self).close()
        self._message_iter = None  # pylint: disable=attribute-defined-outside-init

    def _get_streaming_message_iter(self, max_wait_time=None):
        # type: (Optional[float]) -> Iterator[ServiceBusReceivedMessage]
        """Receive messages from an iterator indefinitely, or if a max_wait_time is specified, until
        such a timeout occurs.

        :param max_wait_time: Maximum time to wait in seconds for the next message to arrive.
         If no messages arrive, and no timeout is specified, this call will not return
         until the connection is closed. If specified, and no messages arrive for the
         timeout period, the iterator will stop.
        :type max_wait_time: Optional[float]
        :rtype: Iterator[ServiceBusReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_forever]
                :end-before: [END receive_forever]
                :language: python
                :dedent: 4
                :caption: Receive indefinitely from an iterator in streaming fashion.
        """
        self._check_live()
        if max_wait_time is not None and max_wait_time <= 0:
            raise ValueError("The max_wait_time must be greater than 0.")
        return self._iter_contextual_wrapper(max_wait_time)

    def receive_messages(self, max_message_count=1, max_wait_time=None):
        # type: (Optional[int], Optional[float]) -> List[ServiceBusReceivedMessage]
        """Receive a batch of messages at once.

        This approach is optimal if you wish to process multiple messages simultaneously, or
        perform an ad-hoc receive as a single call.

        Note that the number of messages retrieved in a single batch will be dependent on
        whether `prefetch_count` was set for the receiver. If `prefetch_count` is not set for the receiver,
        the receiver would try to cache max_message_count (if provided) messages within the request to the service.

        This call will prioritize returning quickly over meeting a specified batch size, and so will
        return as soon as at least one message is received and there is a gap in incoming messages regardless
        of the specified batch size.

        :param Optional[int] max_message_count: Maximum number of messages in the batch. Actual number
         returned will depend on prefetch_count and incoming stream rate.
         Setting to None will fully depend on the prefetch config. The default value is 1.
        :param Optional[float] max_wait_time: Maximum time to wait in seconds for the first message to arrive.
         If no messages arrive, and no timeout is specified, this call will not return
         until the connection is closed. If specified, an no messages arrive within the
         timeout period, an empty list will be returned.

        :rtype: List[~azure.servicebus.ServiceBusReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_sync]
                :end-before: [END receive_sync]
                :language: python
                :dedent: 4
                :caption: Receive messages from ServiceBus.

        """
        self._check_live()
        if max_wait_time is not None and max_wait_time <= 0:
            raise ValueError("The max_wait_time must be greater than 0.")
        if max_message_count is not None and max_message_count <= 0:
            raise ValueError("The max_message_count must be greater than 0")
        messages = self._do_retryable_operation(
            self._receive,
            max_message_count=max_message_count,
            timeout=max_wait_time,
            operation_requires_timeout=True,
        )
        links = get_receive_links(messages)
        with receive_trace_context_manager(self, links=links):
            if (
                self._auto_lock_renewer
                and not self._session
                and self._receive_mode != ServiceBusReceiveMode.RECEIVE_AND_DELETE
            ):
                for message in messages:
                    self._auto_lock_renewer.register(self, message)
            return messages

    def receive_deferred_messages(self, sequence_numbers, **kwargs):
        # type: (Union[int,List[int]], Any) -> List[ServiceBusReceivedMessage]
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param Union[int,List[int]] sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :keyword Optional[float] timeout: The total operation timeout in seconds including all the retries.
         The value must be greater than 0 if specified. The default value is None, meaning no timeout.
        :rtype: List[~azure.servicebus.ServiceBusReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_defer_sync]
                :end-before: [END receive_defer_sync]
                :language: python
                :dedent: 4
                :caption: Receive deferred messages from ServiceBus.

        """
        self._check_live()
        timeout = kwargs.pop("timeout", None)
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if isinstance(sequence_numbers, six.integer_types):
            sequence_numbers = [sequence_numbers]
        if len(sequence_numbers) == 0:
            return []  # no-op on empty list.
        self._open()
        uamqp_receive_mode = ServiceBusToAMQPReceiveModeMap[self._receive_mode]
        try:
            receive_mode = uamqp_receive_mode.value.value
        except AttributeError:
            receive_mode = int(uamqp_receive_mode.value)
        message = {
            MGMT_REQUEST_SEQUENCE_NUMBERS: types.AMQPArray(
                [types.AMQPLong(s) for s in sequence_numbers]
            ),
            MGMT_REQUEST_RECEIVER_SETTLE_MODE: types.AMQPuInt(receive_mode),
        }

        self._populate_message_properties(message)

        handler = functools.partial(
            mgmt_handlers.deferred_message_op,
            receive_mode=self._receive_mode,
            receiver=self,
        )
        messages = self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,
            message,
            handler,
            timeout=timeout,
        )
        links = get_receive_links(messages)
        with receive_trace_context_manager(
            self, span_name=SPAN_NAME_RECEIVE_DEFERRED, links=links
        ):
            if (
                self._auto_lock_renewer
                and not self._session
                and self._receive_mode != ServiceBusReceiveMode.RECEIVE_AND_DELETE
            ):
                for message in messages:
                    self._auto_lock_renewer.register(self, message)
            return messages

    def peek_messages(self, max_message_count=1, **kwargs):
        # type: (int, Any) -> List[ServiceBusReceivedMessage]
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param int max_message_count: The maximum number of messages to try and peek. The default
         value is 1.
        :keyword int sequence_number: A message sequence number from which to start browsing messages.
        :keyword Optional[float] timeout: The total operation timeout in seconds including all the retries.
         The value must be greater than 0 if specified. The default value is None, meaning no timeout.

        :rtype: List[~azure.servicebus.ServiceBusReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START peek_messages_sync]
                :end-before: [END peek_messages_sync]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue.

        """
        self._check_live()
        sequence_number = kwargs.pop("sequence_number", 0)
        timeout = kwargs.pop("timeout", None)
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if not sequence_number:
            sequence_number = self._last_received_sequenced_number or 1
        if int(max_message_count) < 0:
            raise ValueError("max_message_count must be 1 or greater.")

        self._open()
        message = {
            MGMT_REQUEST_FROM_SEQUENCE_NUMBER: types.AMQPLong(sequence_number),
            MGMT_REQUEST_MAX_MESSAGE_COUNT: max_message_count,
        }

        self._populate_message_properties(message)
        handler = functools.partial(mgmt_handlers.peek_op, receiver=self)
        messages = self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_PEEK_OPERATION, message, handler, timeout=timeout
        )
        links = get_receive_links(messages)
        with receive_trace_context_manager(
            self, span_name=SPAN_NAME_PEEK, links=links
        ):
            return messages

    def complete_message(self, message):
        """Complete the message.

        This removes the message from the queue.

        :param message: The received message to be completed.
        :type message: ~azure.servicebus.ServiceBusReceivedMessage
        :rtype: None
        :raises: ~azure.servicebus.exceptions.MessageAlreadySettled if the message has been settled.
        :raises: ~azure.servicebus.exceptions.SessionLockLostError if session lock has already expired.
        :raises: ~azure.servicebus.exceptions.ServiceBusError when errors happen.

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START complete_message_sync]
                :end-before: [END complete_message_sync]
                :language: python
                :dedent: 4
                :caption: Complete a received message.

        """
        self._settle_message_with_retry(message, MESSAGE_COMPLETE)

    def abandon_message(self, message):
        """Abandon the message.

        This message will be returned to the queue and made available to be received again.

        :param message: The received message to be abandoned.
        :type message: ~azure.servicebus.ServiceBusReceivedMessage
        :rtype: None
        :raises: ~azure.servicebus.exceptions.MessageAlreadySettled if the message has been settled.
        :raises: ~azure.servicebus.exceptions.SessionLockLostError if session lock has already expired.
        :raises: ~azure.servicebus.exceptions.ServiceBusError when errors happen.

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START abandon_message_sync]
                :end-before: [END abandon_message_sync]
                :language: python
                :dedent: 4
                :caption: Abandon a received message.

        """
        self._settle_message_with_retry(message, MESSAGE_ABANDON)

    def defer_message(self, message):
        """Defers the message.

        This message will remain in the queue but must be requested
        specifically by its sequence number in order to be received.

        :param message: The received message to be deferred.
        :type message: ~azure.servicebus.ServiceBusReceivedMessage
        :rtype: None
        :raises: ~azure.servicebus.exceptions.MessageAlreadySettled if the message has been settled.
        :raises: ~azure.servicebus.exceptions.SessionLockLostError if session lock has already expired.
        :raises: ~azure.servicebus.exceptions.ServiceBusError when errors happen.

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START defer_message_sync]
                :end-before: [END defer_message_sync]
                :language: python
                :dedent: 4
                :caption: Defer a received message.

        """
        self._settle_message_with_retry(message, MESSAGE_DEFER)

    def dead_letter_message(self, message, reason=None, error_description=None):
        """Move the message to the Dead Letter queue.

        The Dead Letter queue is a sub-queue that can be
        used to store messages that failed to process correctly, or otherwise require further inspection
        or processing. The queue can also be configured to send expired messages to the Dead Letter queue.

        :param message: The received message to be dead-lettered.
        :type message: ~azure.servicebus.ServiceBusReceivedMessage
        :param Optional[str] reason: The reason for dead-lettering the message.
        :param Optional[str] error_description: The detailed error description for dead-lettering the message.
        :rtype: None
        :raises: ~azure.servicebus.exceptions.MessageAlreadySettled if the message has been settled.
        :raises: ~azure.servicebus.exceptions.SessionLockLostError if session lock has already expired.
        :raises: ~azure.servicebus.exceptions.ServiceBusError when errors happen.

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START dead_letter_message_sync]
                :end-before: [END dead_letter_message_sync]
                :language: python
                :dedent: 4
                :caption: Dead letter a received message.

        """
        self._settle_message_with_retry(
            message,
            MESSAGE_DEAD_LETTER,
            dead_letter_reason=reason,
            dead_letter_error_description=error_description,
        )

    def renew_message_lock(self, message, **kwargs):
        # type: (ServiceBusReceivedMessage, Any) -> datetime.datetime
        # pylint: disable=protected-access,no-member
        """Renew the message lock.

        This will maintain the lock on the message to ensure it is not returned to the queue
        to be reprocessed.

        In order to complete (or otherwise settle) the message, the lock must be maintained,
        and cannot already have expired; an expired lock cannot be renewed.

        Messages received via RECEIVE_AND_DELETE mode are not locked, and therefore cannot be renewed.
        This operation is only available for non-sessionful messages as well.

        :param message: The message to renew the lock for.
        :type message: ~azure.servicebus.ServiceBusReceivedMessage
        :keyword Optional[float] timeout: The total operation timeout in seconds including all the retries.
         The value must be greater than 0 if specified. The default value is None, meaning no timeout.
        :returns: The utc datetime the lock is set to expire at.
        :rtype: datetime.datetime
        :raises: TypeError if the message is sessionful.
        :raises: ~azure.servicebus.exceptions.MessageAlreadySettled if the message has been settled.
        :raises: ~azure.servicebus.exceptions.MessageLockLostError if message lock has already expired.

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START renew_message_lock_sync]
                :end-before: [END renew_message_lock_sync]
                :language: python
                :dedent: 4
                :caption: Renew the lock on a received message.

        """
        # type: ignore
        try:
            if self.session:
                raise TypeError(
                    "Renewing message lock is an invalid operation when working with sessions."
                    "Please renew the session lock instead."
                )
        except AttributeError:
            pass
        self._check_live()
        self._check_message_alive(message, MESSAGE_RENEW_LOCK)
        token = message.lock_token
        if not token:
            raise ValueError("Unable to renew lock - no lock token found.")

        timeout = kwargs.pop("timeout", None)
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")

        expiry = self._renew_locks(token, timeout=timeout)  # type: ignore
        message._expiry = utc_from_timestamp(
            expiry[MGMT_RESPONSE_MESSAGE_EXPIRATION][0] / 1000.0
        )

        return message._expiry  # type: ignore
Exemplo n.º 7
0
class EventHubConsumer(ConsumerProducerMixin):  # pylint:disable=too-many-instance-attributes
    """
    A consumer responsible for reading EventData from a specific Event Hub
    partition and as a member of a specific consumer group.

    A consumer may be exclusive, which asserts ownership over the partition for the consumer
    group to ensure that only one consumer from that group is reading the from the partition.
    These exclusive consumers are sometimes referred to as "Epoch Consumers."

    A consumer may also be non-exclusive, allowing multiple consumers from the same consumer
    group to be actively reading events from the partition.  These non-exclusive consumers are
    sometimes referred to as "Non-Epoch Consumers."

    Please use the method `create_consumer` on `EventHubClient` for creating `EventHubConsumer`.
    """
    def __init__(self, client, source, **kwargs):
        """
        Instantiate a consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method
        in EventHubClient.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: str
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param owner_level: The priority of the exclusive consumer. An exclusive
         consumer will be created if owner_level is set.
        :type owner_level: int
        :param track_last_enqueued_event_properties: Indicates whether or not the consumer should request information
         on the last enqueued event on its associated partition, and track that information as events are received.
         When information about the partition's last enqueued event is being tracked, each event received from the
         Event Hubs service will carry metadata about the partition. This results in a small amount of additional
         network bandwidth consumption that is generally a favorable trade-off when considered against periodically
         making requests for partition properties using the Event Hub client.
         It is set to `False` by default.
        :type track_last_enqueued_event_properties: bool
        """
        event_position = kwargs.get("event_position", None)
        prefetch = kwargs.get("prefetch", 300)
        owner_level = kwargs.get("owner_level", None)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)
        track_last_enqueued_event_properties = kwargs.get(
            "track_last_enqueued_event_properties", False)
        idle_timeout = kwargs.get("idle_timeout", None)

        self.running = False
        self.closed = False
        self.stop = False  # used by event processor
        self.handler_ready = False

        self._on_event_received = kwargs.get("on_event_received")
        self._client = client
        self._source = source
        self._offset = event_position
        self._offset_inclusive = kwargs.get("event_position_inclusive", False)
        self._prefetch = prefetch
        self._owner_level = owner_level
        self._keep_alive = keep_alive
        self._auto_reconnect = auto_reconnect
        self._retry_policy = errors.ErrorPolicy(
            max_retries=self._client._config.max_retries,
            on_error=_error_handler)  # pylint:disable=protected-access
        self._reconnect_backoff = 1
        self._link_properties = {}
        self._error = None
        self._timeout = 0
        self._idle_timeout = (idle_timeout * 1000) if idle_timeout else None
        partition = self._source.split('/')[-1]
        self._partition = partition
        self._name = "EHConsumer-{}-partition{}".format(
            uuid.uuid4(), partition)
        if owner_level:
            self._link_properties[types.AMQPSymbol(
                EPOCH_SYMBOL)] = types.AMQPLong(int(owner_level))
        link_property_timeout_ms = (self._client._config.receive_timeout
                                    or self._timeout) * 1000  # pylint:disable=protected-access
        self._link_properties[types.AMQPSymbol(
            TIMEOUT_SYMBOL)] = types.AMQPLong(int(link_property_timeout_ms))
        self._handler = None
        self._track_last_enqueued_event_properties = track_last_enqueued_event_properties
        self._last_received_event = None

    def _create_handler(self, auth):
        source = Source(self._source)
        if self._offset is not None:
            source.set_filter(
                event_position_selector(self._offset, self._offset_inclusive))
        desired_capabilities = None
        if self._track_last_enqueued_event_properties:
            symbol_array = [types.AMQPSymbol(RECEIVER_RUNTIME_METRIC_SYMBOL)]
            desired_capabilities = utils.data_factory(
                types.AMQPArray(symbol_array))

        properties = create_properties(self._client._config.user_agent)  # pylint:disable=protected-access
        self._handler = ReceiveClient(
            source,
            auth=auth,
            debug=self._client._config.network_tracing,  # pylint:disable=protected-access
            prefetch=self._prefetch,
            link_properties=self._link_properties,
            timeout=self._timeout,
            idle_timeout=self._idle_timeout,
            error_policy=self._retry_policy,
            keep_alive_interval=self._keep_alive,
            client_name=self._name,
            receive_settle_mode=uamqp.constants.ReceiverSettleMode.
            ReceiveAndDelete,
            auto_complete=False,
            properties=properties,
            desired_capabilities=desired_capabilities)

        self._handler._streaming_receive = True  # pylint:disable=protected-access
        self._handler._message_received_callback = self._message_received  # pylint:disable=protected-access

    def _open_with_retry(self):
        return self._do_retryable_operation(self._open,
                                            operation_need_param=False)

    def _message_received(self, message):
        # pylint:disable=protected-access
        event_data = EventData._from_message(message)
        trace_link_message(event_data)
        self._last_received_event = event_data
        self._on_event_received(event_data)

    def _open(self):
        """Open the EventHubConsumer/EventHubProducer using the supplied connection.

        """
        # pylint: disable=protected-access
        if not self.running:
            if self._handler:
                self._handler.close()
            auth = self._client._create_auth()
            self._create_handler(auth)
            self._handler.open(
                connection=self._client._conn_manager.get_connection(
                    self._client._address.hostname, auth)  # pylint: disable=protected-access
            )
            self.handler_ready = False
            self.running = True

        if not self.handler_ready:
            if self._handler.client_ready():
                self.handler_ready = True
        return self.handler_ready

    def receive(self):
        retried_times = 0
        last_exception = None
        max_retries = self._client._config.max_retries  # pylint:disable=protected-access

        while retried_times <= max_retries:
            try:
                if self._open():
                    self._handler.do_work()
                return
            except Exception as exception:  # pylint: disable=broad-except
                if isinstance(exception, uamqp.errors.LinkDetach) and \
                        exception.condition == uamqp.constants.ErrorCodes.LinkStolen:  # pylint: disable=no-member
                    raise self._handle_exception(exception)
                if not self.running:  # exit by close
                    return
                if self._last_received_event:
                    self._offset = self._last_received_event.offset
                last_exception = self._handle_exception(exception)
                retried_times += 1
                if retried_times > max_retries:
                    _LOGGER.info(
                        "%r operation has exhausted retry. Last exception: %r.",
                        self._name, last_exception)
                    raise last_exception
Exemplo n.º 8
0
class Receiver:
    """
    Implements a Receiver.
    """
    timeout = 0
    _epoch = b'com.microsoft:epoch'

    def __init__(self, client, source, prefetch=300, epoch=None):
        """
        Instantiate a receiver.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: ~uamqp.address.Source
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param epoch: An optional epoch value.
        :type epoch: int
        """
        self.offset = None
        self.prefetch = prefetch
        self.epoch = epoch
        self.properties = None
        self.redirected = None
        self.debug = client.debug
        self.error = None
        if epoch:
            self.properties = {
                types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))
            }
        self._handler = ReceiveClient(source,
                                      auth=client.auth,
                                      debug=self.debug,
                                      prefetch=self.prefetch,
                                      link_properties=self.properties,
                                      timeout=self.timeout)

    def open(self, connection):
        """
        Open the Receiver using the supplied conneciton.
        If the handler has previously been redirected, the redirect
        context will be used to create a new handler before opening it.

        :param connection: The underlying client shared connection.
        :type: connection: ~uamqp.connection.Connection
        """
        if self.redirected:
            self._handler = ReceiveClient(self.redirected.address,
                                          auth=None,
                                          debug=self.debug,
                                          prefetch=self.prefetch,
                                          link_properties=self.properties,
                                          timeout=self.timeout)
        self._handler.open(connection)

    def get_handler_state(self):
        """
        Get the state of the underlying handler with regards to start
        up processes.

        :rtype: ~uamqp.constants.MessageReceiverState
        """
        # pylint: disable=protected-access
        return self._handler._message_receiver.get_state()

    def has_started(self):
        """
        Whether the handler has completed all start up processes such as
        establishing the connection, session, link and authentication, and
        is not ready to process messages.

        :rtype: bool
        """
        # pylint: disable=protected-access
        timeout = False
        auth_in_progress = False
        if self._handler._connection.cbs:
            timeout, auth_in_progress = self._handler._auth.handle_token()
        if timeout:
            raise EventHubError("Authorization timeout.")
        elif auth_in_progress:
            return False
        elif not self._handler._client_ready():
            return False
        else:
            return True

    def close(self, exception=None):
        """
        Close down the handler. If the handler has already closed,
        this will be a no op. An optional exception can be passed in to
        indicate that the handler was shutdown due to error.

        :param exception: An optional exception if the handler is closing
         due to an error.
        :type exception: Exception
        """
        if self.error:
            return
        elif isinstance(exception, errors.LinkRedirect):
            self.redirected = exception
        elif isinstance(exception, EventHubError):
            self.error = exception
        elif exception:
            self.error = EventHubError(str(exception))
        else:
            self.error = EventHubError("This receive handler is now closed.")
        self._handler.close()

    @property
    def queue_size(self):
        """
        The current size of the unprocessed Event queue.

        :rtype: int
        """
        # pylint: disable=protected-access
        if self._handler._received_messages:
            return self._handler._received_messages.qsize()
        return 0

    def receive(self, max_batch_size=None, timeout=None):
        """
        Receive events from the EventHub.

        :param max_batch_size: Receive a batch of events. Batch size will
         be up to the maximum specified, but will return as soon as service
         returns no new events. If combined with a timeout and no events are
         retrieve before the time, the result will be empty. If no batch
         size is supplied, the prefetch size will be the maximum.
        :type max_batch_size: int
        :rtype: list[~azure.eventhub.common.EventData]
        """
        if self.error:
            raise self.error
        try:
            timeout_ms = 1000 * timeout if timeout else 0
            message_batch = self._handler.receive_message_batch(
                max_batch_size=max_batch_size, timeout=timeout_ms)
            data_batch = []
            for message in message_batch:
                event_data = EventData(message=message)
                self.offset = event_data.offset
                data_batch.append(event_data)
            return data_batch
        except errors.LinkDetach as detach:
            error = EventHubError(str(detach))
            self.close(exception=error)
            raise error
        except Exception as e:
            error = EventHubError("Receive failed: {}".format(e))
            self.close(exception=error)
            raise error

    def selector(self, default):
        """
        Create a selector for the current offset if it is set.

        :param default: The fallback receive offset.
        :type default: ~azure.eventhub.common.Offset
        :rtype: ~azure.eventhub.common.Offset
        """
        if self.offset is not None:
            return Offset(self.offset).selector()
        return default
Exemplo n.º 9
0
class Receiver(BaseHandler):  # pylint: disable=too-many-instance-attributes
    """A message receiver.

    This receive handler acts as an iterable message stream for retrieving
    messages for a Service Bus entity. It operates a single connection that must be opened and
    closed on completion. The service connection will remain open for the entirety of the iterator.
    If you find yourself only partially iterating the message stream, you should run the receiver
    in a `with` statement to ensure the connection is closed.
    The Receiver should not be instantiated directly, and should be accessed from a `QueueClient` or
    `SubscriptionClient` using the `get_receiver()` method.

    .. note:: This object is not thread-safe.

    :param handler_id: The ID used as the connection name for the Receiver.
    :type handler_id: str
    :param source: The endpoint from which to receive messages.
    :type source: ~uamqp.Source
    :param auth_config: The SASL auth credentials.
    :type auth_config: dict[str, str]
    :param connection: A shared connection [not yet supported].
    :type connection: ~uamqp.Connection
    :param mode: The receive connection mode. Value must be either PeekLock or ReceiveAndDelete.
    :type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
    :param encoding: The encoding used for string properties. Default is 'UTF-8'.
    :type encoding: str
    :param debug: Whether to enable network trace debug logs.
    :type debug: bool

    Example:
        .. literalinclude:: ../examples/test_examples.py
            :start-after: [START get_receiver]
            :end-before: [END get_receiver]
            :language: python
            :dedent: 4
            :caption: Get the receiver client from Service Bus client

    """

    def __init__(
            self, handler_id, source, auth_config, connection=None,
            mode=ReceiveSettleMode.PeekLock, encoding='UTF-8', debug=False, **kwargs):
        self._used = threading.Event()
        self.name = "SBReceiver-{}".format(handler_id)
        self.last_received = None
        self.mode = mode
        self.message_iter = None
        super(Receiver, self).__init__(
            source, auth_config, connection=connection, encoding=encoding, debug=debug, **kwargs)

    def __iter__(self):
        return self

    def __next__(self):
        self._can_run()
        while True:
            if self.receiver_shutdown:
                self.close()
                raise StopIteration
            try:
                received = next(self.message_iter)
                wrapped = self._build_message(received)
                return wrapped
            except StopIteration:
                self.close()
                raise
            except Exception as e:  # pylint: disable=broad-except
                self._handle_exception(e)

    def _build_handler(self):
        auth = None if self.connection else authentication.SASTokenAuth.from_shared_access_key(**self.auth_config)
        self._handler = ReceiveClient(
            self.endpoint,
            auth=auth,
            debug=self.debug,
            properties=self.properties,
            error_policy=self.error_policy,
            client_name=self.name,
            auto_complete=False,
            encoding=self.encoding,
            **self.handler_kwargs)

    def _build_message(self, received):
        message = Message(None, message=received)
        message._receiver = self  # pylint: disable=protected-access
        self.last_received = message.sequence_number
        return message

    def _can_run(self):
        if self._used.is_set():
            raise InvalidHandlerState("Receiver has already closed.")
        if self.receiver_shutdown:
            self.close()
            raise InvalidHandlerState("Receiver has already closed.")
        if not self.running:
            self.open()

    def _renew_locks(self, *lock_tokens):
        message = {'lock-tokens': types.AMQPArray(lock_tokens)}
        return self._mgmt_request_response(
            REQUEST_RESPONSE_RENEWLOCK_OPERATION,
            message,
            mgmt_handlers.lock_renew_op)

    def _settle_deferred(self, settlement, lock_tokens, dead_letter_details=None):
        message = {
            'disposition-status': settlement,
            'lock-tokens': types.AMQPArray(lock_tokens)}
        if dead_letter_details:
            message.update(dead_letter_details)
        return self._mgmt_request_response(
            REQUEST_RESPONSE_UPDATE_DISPOSTION_OPERATION,
            message,
            mgmt_handlers.default)

    def _build_receiver(self):
        """This is a temporary patch pending a fix in uAMQP."""
        # pylint: disable=protected-access
        self._handler.message_handler = self._handler.receiver_type(
            self._handler._session,
            self._handler._remote_address,
            self._handler._name,
            on_message_received=self._handler._message_received,
            name='receiver-link-{}'.format(uuid.uuid4()),
            debug=self._handler._debug_trace,
            prefetch=self._handler._prefetch,
            max_message_size=self._handler._max_message_size,
            properties=self._handler._link_properties,
            error_policy=self._handler._error_policy,
            encoding=self._handler._encoding)
        if self.mode != ReceiveSettleMode.PeekLock:
            self._handler.message_handler.send_settle_mode = constants.SenderSettleMode.Settled
            self._handler.message_handler.receive_settle_mode = constants.ReceiverSettleMode.ReceiveAndDelete
            self._handler.message_handler._settle_mode = constants.ReceiverSettleMode.ReceiveAndDelete
        self._handler.message_handler.open()

    def next(self):
        return self.__next__()

    @property
    def receiver_shutdown(self):
        if self._handler:
            return self._handler._shutdown  # pylint: disable=protected-access
        return True

    @receiver_shutdown.setter
    def receiver_shutdown(self, value):
        if self._handler:
            self._handler._shutdown = value  # pylint: disable=protected-access
        else:
            raise ValueError("Receiver has no AMQP handler")

    @property
    def queue_size(self):
        """The current size of the unprocessed message queue.

        :rtype: int

        Example:
            .. literalinclude:: ../examples/test_examples.py
                :start-after: [START queue_size]
                :end-before: [END queue_size]
                :language: python
                :dedent: 4
                :caption: Get the number of unprocessed messages in the queue

        """
        # pylint: disable=protected-access
        if self._handler._received_messages:
            return self._handler._received_messages.qsize()
        return 0

    def peek(self, count=1, start_from=None):
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param count: The maximum number of messages to try and peek. The default
         value is 1.
        :type count: int
        :param start_from: A message sequence number from which to start browsing messages.
        :type start_from: int
        :rtype: list[~azure.servicebus.common.message.PeekMessage]

        Example:
            .. literalinclude:: ../examples/test_examples.py
                :start-after: [START peek_messages]
                :end-before: [END peek_messages]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue

        """
        if not start_from:
            start_from = self.last_received or 1
        if int(count) < 1:
            raise ValueError("count must be 1 or greater.")
        if int(start_from) < 1:
            raise ValueError("start_from must be 1 or greater.")

        self._can_run()
        message = {
            'from-sequence-number': types.AMQPLong(start_from),
            'message-count': count
        }
        return self._mgmt_request_response(
            REQUEST_RESPONSE_PEEK_OPERATION,
            message,
            mgmt_handlers.peek_op)

    def receive_deferred_messages(self, sequence_numbers, mode=ReceiveSettleMode.PeekLock):
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :type sequence_numbers: list[int]
        :param mode: The receive mode, default value is PeekLock.
        :type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
        :rtype: list[~azure.servicebus.common.message.DeferredMessage]

        Example:
            .. literalinclude:: ../examples/test_examples.py
                :start-after: [START receive_deferred_messages]
                :end-before: [END receive_deferred_messages]
                :language: python
                :dedent: 8
                :caption: Get the messages which were previously deferred

        """
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        self._can_run()
        try:
            receive_mode = mode.value.value
        except AttributeError:
            receive_mode = int(mode)
        message = {
            'sequence-numbers': types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            'receiver-settle-mode': types.AMQPuInt(receive_mode)
        }
        handler = functools.partial(mgmt_handlers.deferred_message_op, mode=receive_mode)
        messages = self._mgmt_request_response(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,
            message,
            handler)
        for m in messages:
            m._receiver = self  # pylint: disable=protected-access
        return messages

    def open(self):
        """Open receiver connection and authenticate session.

        If the receiver is already open, this operation will do nothing.
        This method will be called automatically when one starts to iterate
        messages in the receiver, so there should be no need to call it directly.
        A receiver opened with this method must be explicitly closed.
        It is recommended to open a handler within a context manager as
        opposed to calling the method directly.

        .. note:: This operation is not thread-safe.

        """
        if self.running:
            return
        self.running = True
        try:
            self._handler.open(connection=self.connection)
            self.message_iter = self._handler.receive_messages_iter()
            while not self._handler.auth_complete():
                time.sleep(0.05)
            self._build_receiver()
            while not self._handler.client_ready():
                time.sleep(0.05)
        except Exception as e:  # pylint: disable=broad-except
            try:
                self._handle_exception(e)
            except:
                self.running = False
                raise

    def close(self, exception=None):
        """Close down the receiver connection.

        If the receiver has already closed, this operation will do nothing. An optional exception can be passed in to
        indicate that the handler was shutdown due to error.
        It is recommended to open a handler within a context manager as
        opposed to calling the method directly.
        The receiver will be implicitly closed on completion of the message iterator,
        however this method will need to be called explicitly if the message iterator is not run
        to completion.

        .. note:: This operation is not thread-safe.

        :param exception: An optional exception if the handler is closing
         due to an error.
        :type exception: Exception

        Example:
            .. literalinclude:: ../examples/test_examples.py
                :start-after: [START open_close_receiver_connection]
                :end-before: [END open_close_receiver_connection]
                :language: python
                :dedent: 4
                :caption: Close the connection and shutdown the receiver

        """
        if not self.running:
            return
        self.running = False
        self.receiver_shutdown = True
        self._used.set()
        super(Receiver, self).close(exception=exception)

    def fetch_next(self, max_batch_size=None, timeout=None):
        """Receive a batch of messages at once.

        This approach it optimal if you wish to process multiple messages simultaneously. Note that the
        number of messages retrieved in a single batch will be dependent on
        whether `prefetch` was set for the receiver. This call will prioritize returning
        quickly over meeting a specified batch size, and so will return as soon as at least
        one message is received and there is a gap in incoming messages regardless
        of the specified batch size.

        :param max_batch_size: Maximum number of messages in the batch. Actual number
         returned will depend on prefetch size and incoming stream rate.
        :type max_batch_size: int
        :param timeout: The time to wait in seconds for the first message to arrive.
         If no messages arrive, and no timeout is specified, this call will not return
         until the connection is closed. If specified, an no messages arrive within the
         timeout period, an empty list will be returned.
        :rtype: list[~azure.servicebus.common.message.Message]

        Example:
            .. literalinclude:: ../examples/test_examples.py
                :start-after: [START fetch_next_messages]
                :end-before: [END fetch_next_messages]
                :language: python
                :dedent: 4
                :caption: Get the messages in batch from the receiver

        """
        self._can_run()
        wrapped_batch = []
        max_batch_size = max_batch_size or self._handler._prefetch  # pylint: disable=protected-access
        try:
            timeout_ms = 1000 * timeout if timeout else 0
            batch = self._handler.receive_message_batch(
                max_batch_size=max_batch_size,
                timeout=timeout_ms)
            for received in batch:
                message = self._build_message(received)
                wrapped_batch.append(message)
        except Exception as e:  # pylint: disable=broad-except
            self._handle_exception(e)
        return wrapped_batch
Exemplo n.º 10
0
class Receiver(BaseHandler):  # pylint: disable=too-many-instance-attributes
    """A message receiver.

    This receive handler acts as an iterable message stream for retrieving
    messages for a Service Bus entity. It operates a single connection that must be opened and
    closed on completion. The service connection will remain open for the entirety of the iterator.
    If you find yourself only partially iterating the message stream, you should run the receiver
    in a `with` statement to ensure the connection is closed.
    The Receiver should not be instantiated directly, and should be accessed from a `QueueClient` or
    `SubscriptionClient` using the `get_receiver()` method.

    .. note:: This object is not thread-safe.

    :param handler_id: The ID used as the connection name for the Receiver.
    :type handler_id: str
    :param source: The endpoint from which to receive messages.
    :type source: ~uamqp.Source
    :param auth_config: The SASL auth credentials.
    :type auth_config: dict[str, str]
    :param connection: A shared connection [not yet supported].
    :type connection: ~uamqp.Connection
    :param mode: The receive connection mode. Value must be either PeekLock or ReceiveAndDelete.
    :type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
    :param encoding: The encoding used for string properties. Default is 'UTF-8'.
    :type encoding: str
    :param debug: Whether to enable network trace debug logs.
    :type debug: bool

    .. admonition:: Example:
        .. literalinclude:: ../samples/sync_samples/test_examples.py
            :start-after: [START get_receiver]
            :end-before: [END get_receiver]
            :language: python
            :dedent: 4
            :caption: Get the receiver client from Service Bus client

    """
    def __init__(self,
                 handler_id,
                 source,
                 auth_config,
                 connection=None,
                 mode=ReceiveSettleMode.PeekLock,
                 encoding='UTF-8',
                 debug=False,
                 **kwargs):
        self._used = threading.Event()
        self.name = "SBReceiver-{}".format(handler_id)
        self.last_received = None
        self.mode = mode
        self.message_iter = None
        super(Receiver, self).__init__(source,
                                       auth_config,
                                       connection=connection,
                                       encoding=encoding,
                                       debug=debug,
                                       **kwargs)

    def __iter__(self):
        return self

    def __next__(self):
        self._can_run()
        while True:
            if self.receiver_shutdown:
                self.close()
                raise StopIteration
            try:
                received = next(self.message_iter)
                wrapped = self._build_message(received)
                return wrapped
            except StopIteration:
                self.close()
                raise
            except Exception as e:  # pylint: disable=broad-except
                self._handle_exception(e)

    def _build_handler(self):
        auth = None if self.connection else authentication.SASTokenAuth.from_shared_access_key(
            **self.auth_config)
        self._handler = ReceiveClient(self.endpoint,
                                      auth=auth,
                                      debug=self.debug,
                                      properties=self.properties,
                                      error_policy=self.error_policy,
                                      client_name=self.name,
                                      auto_complete=False,
                                      encoding=self.encoding,
                                      **self.handler_kwargs)

    def _build_message(self, received):
        message = Message(None, message=received)
        message._receiver = self  # pylint: disable=protected-access
        self.last_received = message.sequence_number
        return message

    def _can_run(self):
        if self._used.is_set():
            raise InvalidHandlerState("Receiver has already closed.")
        if self.receiver_shutdown:
            self.close()
            raise InvalidHandlerState("Receiver has already closed.")
        if not self.running:
            self.open()

    def _renew_locks(self, *lock_tokens):
        message = {'lock-tokens': types.AMQPArray(lock_tokens)}
        return self._mgmt_request_response(
            REQUEST_RESPONSE_RENEWLOCK_OPERATION, message,
            mgmt_handlers.lock_renew_op)

    def _settle_deferred(self,
                         settlement,
                         lock_tokens,
                         dead_letter_details=None):
        message = {
            'disposition-status': settlement,
            'lock-tokens': types.AMQPArray(lock_tokens)
        }
        if dead_letter_details:
            message.update(dead_letter_details)
        return self._mgmt_request_response(
            REQUEST_RESPONSE_UPDATE_DISPOSTION_OPERATION, message,
            mgmt_handlers.default)

    def _build_receiver(self):
        """This is a temporary patch pending a fix in uAMQP."""
        # pylint: disable=protected-access
        self._handler.message_handler = self._handler.receiver_type(
            self._handler._session,
            self._handler._remote_address,
            self._handler._name,
            on_message_received=self._handler._message_received,
            name='receiver-link-{}'.format(uuid.uuid4()),
            debug=self._handler._debug_trace,
            prefetch=self._handler._prefetch,
            max_message_size=self._handler._max_message_size,
            properties=self._handler._link_properties,
            error_policy=self._handler._error_policy,
            encoding=self._handler._encoding)
        if self.mode != ReceiveSettleMode.PeekLock:
            self._handler.message_handler.send_settle_mode = constants.SenderSettleMode.Settled
            self._handler.message_handler.receive_settle_mode = constants.ReceiverSettleMode.ReceiveAndDelete
            self._handler.message_handler._settle_mode = constants.ReceiverSettleMode.ReceiveAndDelete
        self._handler.message_handler.open()

    def next(self):
        return self.__next__()

    @property
    def receiver_shutdown(self):
        if self._handler:
            return self._handler._shutdown  # pylint: disable=protected-access
        return True

    @receiver_shutdown.setter
    def receiver_shutdown(self, value):
        if self._handler:
            self._handler._shutdown = value  # pylint: disable=protected-access
        else:
            raise ValueError("Receiver has no AMQP handler")

    @property
    def queue_size(self):
        """The current size of the unprocessed message queue.

        :rtype: int

        .. admonition:: Example:
            .. literalinclude:: ../samples/sync_samples/test_examples.py
                :start-after: [START queue_size]
                :end-before: [END queue_size]
                :language: python
                :dedent: 4
                :caption: Get the number of unprocessed messages in the queue

        """
        # pylint: disable=protected-access
        if self._handler._received_messages:
            return self._handler._received_messages.qsize()
        return 0

    def peek(self, count=1, start_from=None):
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param count: The maximum number of messages to try and peek. The default
         value is 1.
        :type count: int
        :param start_from: A message sequence number from which to start browsing messages.
        :type start_from: int
        :rtype: list[~azure.servicebus.common.message.PeekMessage]

        .. admonition:: Example:
            .. literalinclude:: ../samples/sync_samples/test_examples.py
                :start-after: [START peek_messages]
                :end-before: [END peek_messages]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue

        """
        if not start_from:
            start_from = self.last_received or 1
        if int(count) < 1:
            raise ValueError("count must be 1 or greater.")
        if int(start_from) < 1:
            raise ValueError("start_from must be 1 or greater.")

        self._can_run()
        message = {
            'from-sequence-number': types.AMQPLong(start_from),
            'message-count': count
        }
        return self._mgmt_request_response(REQUEST_RESPONSE_PEEK_OPERATION,
                                           message, mgmt_handlers.peek_op)

    def receive_deferred_messages(self,
                                  sequence_numbers,
                                  mode=ReceiveSettleMode.PeekLock):
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :type sequence_numbers: list[int]
        :param mode: The receive mode, default value is PeekLock.
        :type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
        :rtype: list[~azure.servicebus.common.message.DeferredMessage]

        .. admonition:: Example:
            .. literalinclude:: ../samples/sync_samples/test_examples.py
                :start-after: [START receive_deferred_messages]
                :end-before: [END receive_deferred_messages]
                :language: python
                :dedent: 8
                :caption: Get the messages which were previously deferred

        """
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        self._can_run()
        try:
            receive_mode = mode.value.value
        except AttributeError:
            receive_mode = int(mode)
        message = {
            'sequence-numbers':
            types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            'receiver-settle-mode':
            types.AMQPuInt(receive_mode)
        }
        handler = functools.partial(mgmt_handlers.deferred_message_op,
                                    mode=receive_mode)
        messages = self._mgmt_request_response(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER, message, handler)
        for m in messages:
            m._receiver = self  # pylint: disable=protected-access
        return messages

    def open(self):
        """Open receiver connection and authenticate session.

        If the receiver is already open, this operation will do nothing.
        This method will be called automatically when one starts to iterate
        messages in the receiver, so there should be no need to call it directly.
        A receiver opened with this method must be explicitly closed.
        It is recommended to open a handler within a context manager as
        opposed to calling the method directly.

        .. note:: This operation is not thread-safe.

        """
        if self.running:
            return
        self.running = True
        try:
            self._handler.open(connection=self.connection)
            self.message_iter = self._handler.receive_messages_iter()
            while not self._handler.auth_complete():
                time.sleep(0.05)
            self._build_receiver()
            while not self._handler.client_ready():
                time.sleep(0.05)
        except Exception as e:  # pylint: disable=broad-except
            try:
                self._handle_exception(e)
            except:
                self.running = False
                raise

    def close(self, exception=None):
        """Close down the receiver connection.

        If the receiver has already closed, this operation will do nothing. An optional exception can be passed in to
        indicate that the handler was shutdown due to error.
        It is recommended to open a handler within a context manager as
        opposed to calling the method directly.
        The receiver will be implicitly closed on completion of the message iterator,
        however this method will need to be called explicitly if the message iterator is not run
        to completion.

        .. note:: This operation is not thread-safe.

        :param exception: An optional exception if the handler is closing
         due to an error.
        :type exception: Exception

        .. admonition:: Example:
            .. literalinclude:: ../samples/sync_samples/test_examples.py
                :start-after: [START open_close_receiver_connection]
                :end-before: [END open_close_receiver_connection]
                :language: python
                :dedent: 4
                :caption: Close the connection and shutdown the receiver

        """
        if not self.running:
            return
        self.running = False
        self.receiver_shutdown = True
        self._used.set()
        super(Receiver, self).close(exception=exception)

    def fetch_next(self, max_batch_size=None, timeout=None):
        """Receive a batch of messages at once.

        This approach it optimal if you wish to process multiple messages simultaneously. Note that the
        number of messages retrieved in a single batch will be dependent on
        whether `prefetch` was set for the receiver. This call will prioritize returning
        quickly over meeting a specified batch size, and so will return as soon as at least
        one message is received and there is a gap in incoming messages regardless
        of the specified batch size.

        :param max_batch_size: Maximum number of messages in the batch. Actual number
         returned will depend on prefetch size and incoming stream rate.
        :type max_batch_size: int
        :param timeout: The time to wait in seconds for the first message to arrive.
         If no messages arrive, and no timeout is specified, this call will not return
         until the connection is closed. If specified, an no messages arrive within the
         timeout period, an empty list will be returned.
        :rtype: list[~azure.servicebus.common.message.Message]

        .. admonition:: Example:
            .. literalinclude:: ../samples/sync_samples/test_examples.py
                :start-after: [START fetch_next_messages]
                :end-before: [END fetch_next_messages]
                :language: python
                :dedent: 4
                :caption: Get the messages in batch from the receiver

        """
        self._can_run()
        wrapped_batch = []
        max_batch_size = max_batch_size or self._handler._prefetch  # pylint: disable=protected-access
        try:
            timeout_ms = 1000 * timeout if timeout else 0
            batch = self._handler.receive_message_batch(
                max_batch_size=max_batch_size, timeout=timeout_ms)
            for received in batch:
                message = self._build_message(received)
                wrapped_batch.append(message)
        except Exception as e:  # pylint: disable=broad-except
            self._handle_exception(e)
        return wrapped_batch
Exemplo n.º 11
0
class Receiver(object):
    """
    Implements a Receiver.

    Example:
        .. literalinclude:: ../examples/test_examples_eventhub.py
            :start-after: [START create_eventhub_client_receiver_instance]
            :end-before: [END create_eventhub_client_receiver_instance]
            :language: python
            :dedent: 4
            :caption: Create a new instance of the Receiver.

    """
    timeout = 0
    _epoch = b'com.microsoft:epoch'

    def __init__(self, client, source, offset=None, prefetch=300, epoch=None, keep_alive=None, auto_reconnect=True):
        """
        Instantiate a receiver.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: str
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param epoch: An optional epoch value.
        :type epoch: int
        """
        self.running = False
        self.client = client
        self.source = source
        self.offset = offset
        self.prefetch = prefetch
        self.epoch = epoch
        self.keep_alive = keep_alive
        self.auto_reconnect = auto_reconnect
        self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler)
        self.reconnect_backoff = 1
        self.properties = None
        self.redirected = None
        self.error = None
        partition = self.source.split('/')[-1]
        self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition)
        source = Source(self.source)
        if self.offset is not None:
            source.set_filter(self.offset.selector())
        if epoch:
            self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))}
        self._handler = ReceiveClient(
            source,
            auth=self.client.get_auth(),
            debug=self.client.debug,
            prefetch=self.prefetch,
            link_properties=self.properties,
            timeout=self.timeout,
            error_policy=self.retry_policy,
            keep_alive_interval=self.keep_alive,
            client_name=self.name,
            properties=self.client.create_properties())

    def open(self):
        """
        Open the Receiver using the supplied conneciton.
        If the handler has previously been redirected, the redirect
        context will be used to create a new handler before opening it.

        :param connection: The underlying client shared connection.
        :type: connection: ~uamqp.connection.Connection

        Example:
            .. literalinclude:: ../examples/test_examples_eventhub.py
                :start-after: [START eventhub_client_receiver_open]
                :end-before: [END eventhub_client_receiver_open]
                :language: python
                :dedent: 4
                :caption: Open the Receiver using the supplied conneciton.

        """
        # pylint: disable=protected-access
        self.running = True
        if self.redirected:
            self.source = self.redirected.address
            source = Source(self.source)
            if self.offset is not None:
                source.set_filter(self.offset.selector())
            alt_creds = {
                "username": self.client._auth_config.get("iot_username"),
                "password":self.client._auth_config.get("iot_password")}
            self._handler = ReceiveClient(
                source,
                auth=self.client.get_auth(**alt_creds),
                debug=self.client.debug,
                prefetch=self.prefetch,
                link_properties=self.properties,
                timeout=self.timeout,
                error_policy=self.retry_policy,
                keep_alive_interval=self.keep_alive,
                client_name=self.name,
                properties=self.client.create_properties())
        self._handler.open()
        while not self._handler.client_ready():
            time.sleep(0.05)

    def _reconnect(self):  # pylint: disable=too-many-statements
        # pylint: disable=protected-access
        alt_creds = {
            "username": self.client._auth_config.get("iot_username"),
            "password": self.client._auth_config.get("iot_password")}
        self._handler.close()
        source = Source(self.source)
        if self.offset is not None:
            source.set_filter(self.offset.selector())
        self._handler = ReceiveClient(
            source,
            auth=self.client.get_auth(**alt_creds),
            debug=self.client.debug,
            prefetch=self.prefetch,
            link_properties=self.properties,
            timeout=self.timeout,
            error_policy=self.retry_policy,
            keep_alive_interval=self.keep_alive,
            client_name=self.name,
            properties=self.client.create_properties())
        try:
            self._handler.open()
            while not self._handler.client_ready():
                time.sleep(0.05)
            return True
        except errors.TokenExpired as shutdown:
            log.info("Receiver disconnected due to token expiry. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
            if shutdown.action.retry and self.auto_reconnect:
                log.info("Receiver detached. Attempting reconnect.")
                return False
            log.info("Receiver detached. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except errors.MessageHandlerError as shutdown:
            if self.auto_reconnect:
                log.info("Receiver detached. Attempting reconnect.")
                return False
            log.info("Receiver detached. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except errors.AMQPConnectionError as shutdown:
            if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect:
                log.info("Receiver couldn't authenticate. Attempting reconnect.")
                return False
            log.info("Receiver connection error (%r). Shutting down.", shutdown)
            error = EventHubError(str(shutdown))
            self.close(exception=error)
            raise error
        except Exception as e:
            log.info("Unexpected error occurred (%r). Shutting down.", e)
            error = EventHubError("Receiver reconnect failed: {}".format(e))
            self.close(exception=error)
            raise error

    def reconnect(self):
        """If the Receiver was disconnected from the service with
        a retryable error - attempt to reconnect."""
        while not self._reconnect():
            time.sleep(self.reconnect_backoff)

    def get_handler_state(self):
        """
        Get the state of the underlying handler with regards to start
        up processes.

        :rtype: ~uamqp.constants.MessageReceiverState
        """
        # pylint: disable=protected-access
        return self._handler._message_receiver.get_state()

    def has_started(self):
        """
        Whether the handler has completed all start up processes such as
        establishing the connection, session, link and authentication, and
        is not ready to process messages.
        **This function is now deprecated and will be removed in v2.0+.**

        :rtype: bool
        """
        # pylint: disable=protected-access
        timeout = False
        auth_in_progress = False
        if self._handler._connection.cbs:
            timeout, auth_in_progress = self._handler._auth.handle_token()
        if timeout:
            raise EventHubError("Authorization timeout.")
        if auth_in_progress:
            return False
        if not self._handler._client_ready():
            return False
        return True

    def close(self, exception=None):
        """
        Close down the handler. If the handler has already closed,
        this will be a no op. An optional exception can be passed in to
        indicate that the handler was shutdown due to error.

        :param exception: An optional exception if the handler is closing
         due to an error.
        :type exception: Exception

        Example:
            .. literalinclude:: ../examples/test_examples_eventhub.py
                :start-after: [START eventhub_client_receiver_close]
                :end-before: [END eventhub_client_receiver_close]
                :language: python
                :dedent: 4
                :caption: Close down the handler.

        """
        self.running = False
        if self.error:
            return
        if isinstance(exception, errors.LinkRedirect):
            self.redirected = exception
        elif isinstance(exception, EventHubError):
            self.error = exception
        elif exception:
            self.error = EventHubError(str(exception))
        else:
            self.error = EventHubError("This receive handler is now closed.")
        self._handler.close()

    @property
    def queue_size(self):
        """
        The current size of the unprocessed Event queue.

        :rtype: int
        """
        # pylint: disable=protected-access
        if self._handler._received_messages:
            return self._handler._received_messages.qsize()
        return 0

    def receive(self, max_batch_size=None, timeout=None):
        """
        Receive events from the EventHub.

        :param max_batch_size: Receive a batch of events. Batch size will
         be up to the maximum specified, but will return as soon as service
         returns no new events. If combined with a timeout and no events are
         retrieve before the time, the result will be empty. If no batch
         size is supplied, the prefetch size will be the maximum.
        :type max_batch_size: int
        :rtype: list[~azure.eventhub.common.EventData]

        Example:
            .. literalinclude:: ../examples/test_examples_eventhub.py
                :start-after: [START eventhub_client_sync_receive]
                :end-before: [END eventhub_client_sync_receive]
                :language: python
                :dedent: 4
                :caption: Receive events from the EventHub.

        """
        if self.error:
            raise self.error
        if not self.running:
            raise ValueError("Unable to receive until client has been started.")
        data_batch = []
        try:
            timeout_ms = 1000 * timeout if timeout else 0
            message_batch = self._handler.receive_message_batch(
                max_batch_size=max_batch_size,
                timeout=timeout_ms)
            for message in message_batch:
                event_data = EventData(message=message)
                self.offset = event_data.offset
                data_batch.append(event_data)
            return data_batch
        except (errors.TokenExpired, errors.AuthenticationException):
            log.info("Receiver disconnected due to token error. Attempting reconnect.")
            self.reconnect()
            return data_batch
        except (errors.LinkDetach, errors.ConnectionClose) as shutdown:
            if shutdown.action.retry and self.auto_reconnect:
                log.info("Receiver detached. Attempting reconnect.")
                self.reconnect()
                return data_batch
            log.info("Receiver detached. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except errors.MessageHandlerError as shutdown:
            if self.auto_reconnect:
                log.info("Receiver detached. Attempting reconnect.")
                self.reconnect()
                return data_batch
            log.info("Receiver detached. Shutting down.")
            error = EventHubError(str(shutdown), shutdown)
            self.close(exception=error)
            raise error
        except Exception as e:
            log.info("Unexpected error occurred (%r). Shutting down.", e)
            error = EventHubError("Receive failed: {}".format(e))
            self.close(exception=error)
            raise error