async def cancel_scheduled_messages(self, sequence_numbers):
        # type: (Union[int, List[int]]) -> None
        """
        Cancel one or more messages that have previously been scheduled and are still pending.

        :param sequence_numbers: The sequence numbers of the scheduled messages.
        :type sequence_numbers: int or list[int]
        :rtype: None
        :raises: ~azure.servicebus.exceptions.ServiceBusError if messages cancellation failed due to message already
         cancelled or enqueued.

        .. admonition:: Example:

            .. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
                :start-after: [START cancel_scheduled_messages_async]
                :end-before: [END cancel_scheduled_messages_async]
                :language: python
                :dedent: 4
                :caption: Cancelling messages scheduled to be sent in future
        """
        await self._open()
        if isinstance(sequence_numbers, int):
            numbers = [types.AMQPLong(sequence_numbers)]
        else:
            numbers = [types.AMQPLong(s) for s in sequence_numbers]
        request_body = {
            MGMT_REQUEST_SEQUENCE_NUMBERS: types.AMQPArray(numbers)
        }
        return await self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_CANCEL_SCHEDULED_MESSAGE_OPERATION, request_body,
            mgmt_handlers.default)
    def __init__(  # pylint: disable=super-init-not-called
            self, client, source, **kwargs):
        """
        Instantiate an async consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method
        in EventHubClient.

        :param client: The parent EventHubClientAsync.
        :type client: ~azure.eventhub.aio.EventHubClientAsync
        :param source: The source EventHub from which to receive events.
        :type source: ~uamqp.address.Source
        :param event_position: The position from which to start receiving.
        :type event_position: ~azure.eventhub.common.EventPosition
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param owner_level: The priority of the exclusive consumer. An exclusive
         consumer will be created if owner_level is set.
        :type owner_level: int
        :param track_last_enqueued_event_properties: Indicates whether or not the consumer should request information
         on the last enqueued event on its associated partition, and track that information as events are received.
         When information about the partition's last enqueued event is being tracked, each event received from the
         Event Hubs service will carry metadata about the partition. This results in a small amount of additional
         network bandwidth consumption that is generally a favorable trade-off when considered against periodically
         making requests for partition properties using the Event Hub client.
         It is set to `False` by default.
        :type track_last_enqueued_event_properties: bool
        :param loop: An event loop.
        """
        event_position = kwargs.get("event_position", None)
        prefetch = kwargs.get("prefetch", 300)
        owner_level = kwargs.get("owner_level", None)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)
        track_last_enqueued_event_properties = kwargs.get("track_last_enqueued_event_properties", False)
        loop = kwargs.get("loop", None)

        super(EventHubConsumer, self).__init__()
        self._loop = loop or asyncio.get_event_loop()
        self._client = client
        self._source = source
        self._offset = event_position
        self._messages_iter = None
        self._prefetch = prefetch
        self._owner_level = owner_level
        self._keep_alive = keep_alive
        self._auto_reconnect = auto_reconnect
        self._retry_policy = errors.ErrorPolicy(max_retries=self._client._config.max_retries, on_error=_error_handler)  # pylint:disable=protected-access
        self._reconnect_backoff = 1
        self._link_properties = {}
        partition = self._source.split('/')[-1]
        self._partition = partition
        self._name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition)
        if owner_level:
            self._link_properties[types.AMQPSymbol(self._epoch_symbol)] = types.AMQPLong(int(owner_level))
        link_property_timeout_ms = (self._client._config.receive_timeout or self._timeout) * 1000  # pylint:disable=protected-access
        self._link_properties[types.AMQPSymbol(self._timeout_symbol)] = types.AMQPLong(int(link_property_timeout_ms))
        self._handler = None
        self._track_last_enqueued_event_properties = track_last_enqueued_event_properties
        self._last_enqueued_event_properties = {}
Exemple #3
0
    def __init__(  # pylint: disable=super-init-not-called
            self, client, source, **kwargs):
        """
        Instantiate an async consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method
        in EventHubClient.

        :param client: The parent EventHubClientAsync.
        :type client: ~azure.eventhub.aio.EventHubClientAsync
        :param source: The source EventHub from which to receive events.
        :type source: ~uamqp.address.Source
        :param event_position: The position from which to start receiving.
        :type event_position: ~azure.eventhub.common.EventPosition
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param owner_level: The priority of the exclusive consumer. An exclusive
         consumer will be created if owner_level is set.
        :type owner_level: int
        :param loop: An event loop.
        """
        event_position = kwargs.get("event_position", None)
        prefetch = kwargs.get("prefetch", 300)
        owner_level = kwargs.get("owner_level", None)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)
        loop = kwargs.get("loop", None)

        super(EventHubConsumer, self).__init__()
        self._loop = loop or asyncio.get_event_loop()
        self._running = False
        self._client = client
        self._source = source
        self._offset = event_position
        self._messages_iter = None
        self._prefetch = prefetch
        self._owner_level = owner_level
        self._keep_alive = keep_alive
        self._auto_reconnect = auto_reconnect
        self._retry_policy = errors.ErrorPolicy(
            max_retries=self._client._config.max_retries,
            on_error=_error_handler)  # pylint:disable=protected-access
        self._reconnect_backoff = 1
        self._redirected = None
        self._error = None
        self._link_properties = {}
        partition = self._source.split('/')[-1]
        self._partition = partition
        self._name = "EHReceiver-{}-partition{}".format(
            uuid.uuid4(), partition)
        if owner_level:
            self._link_properties[types.AMQPSymbol(
                self._epoch_symbol)] = types.AMQPLong(int(owner_level))
        link_property_timeout_ms = (self._client._config.receive_timeout
                                    or self._timeout) * 1000  # pylint:disable=protected-access
        self._link_properties[types.AMQPSymbol(
            self._timeout_symbol)] = types.AMQPLong(
                int(link_property_timeout_ms))
        self._handler = None
Exemple #4
0
    def __init__(self, client, source, **kwargs):
        # type: (EventHubConsumerClient, str, Any) -> None
        event_position = kwargs.get("event_position", None)
        prefetch = kwargs.get("prefetch", 300)
        owner_level = kwargs.get("owner_level", None)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)
        track_last_enqueued_event_properties = kwargs.get(
            "track_last_enqueued_event_properties", False
        )
        idle_timeout = kwargs.get("idle_timeout", None)

        self.running = False
        self.closed = False
        self.stop = False  # used by event processor
        self.handler_ready = False

        self._on_event_received = kwargs[
            "on_event_received"
        ]  # type: Callable[[EventData], None]
        self._client = client
        self._source = source
        self._offset = event_position
        self._offset_inclusive = kwargs.get("event_position_inclusive", False)
        self._prefetch = prefetch
        self._owner_level = owner_level
        self._keep_alive = keep_alive
        self._auto_reconnect = auto_reconnect
        self._retry_policy = errors.ErrorPolicy(
            max_retries=self._client._config.max_retries, on_error=_error_handler  # pylint:disable=protected-access
        )
        self._reconnect_backoff = 1
        self._link_properties = {}  # type: Dict[types.AMQPType, types.AMQPType]
        self._error = None
        self._timeout = 0
        self._idle_timeout = (idle_timeout * 1000) if idle_timeout else None
        partition = self._source.split("/")[-1]
        self._partition = partition
        self._name = "EHConsumer-{}-partition{}".format(uuid.uuid4(), partition)
        if owner_level is not None:
            self._link_properties[types.AMQPSymbol(EPOCH_SYMBOL)] = types.AMQPLong(
                int(owner_level)
            )
        link_property_timeout_ms = (
            self._client._config.receive_timeout or self._timeout  # pylint:disable=protected-access
        ) * 1000
        self._link_properties[types.AMQPSymbol(TIMEOUT_SYMBOL)] = types.AMQPLong(
            int(link_property_timeout_ms)
        )
        self._handler = None  # type: Optional[ReceiveClient]
        self._track_last_enqueued_event_properties = (
            track_last_enqueued_event_properties
        )
        self._message_buffer = deque()  # type: ignore
        self._last_received_event = None  # type: Optional[EventData]
    def __init__(self, client: "EventHubConsumerClient", source: str,
                 **kwargs) -> None:
        super().__init__()
        event_position = kwargs.get("event_position", None)
        prefetch = kwargs.get("prefetch", 300)
        owner_level = kwargs.get("owner_level", None)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)
        track_last_enqueued_event_properties = kwargs.get(
            "track_last_enqueued_event_properties", False)
        idle_timeout = kwargs.get("idle_timeout", None)
        loop = kwargs.get("loop", None)

        self.running = False
        self.closed = False

        self._on_event_received = kwargs[
            "on_event_received"]  # type: Callable[[EventData], Awaitable[None]]
        self._loop = loop or get_running_loop()
        self._client = client
        self._source = source
        self._offset = event_position
        self._offset_inclusive = kwargs.get("event_position_inclusive", False)
        self._prefetch = prefetch
        self._owner_level = owner_level
        self._keep_alive = keep_alive
        self._auto_reconnect = auto_reconnect
        self._retry_policy = errors.ErrorPolicy(
            max_retries=self._client._config.max_retries,
            on_error=_error_handler  # pylint:disable=protected-access
        )
        self._reconnect_backoff = 1
        self._timeout = 0
        self._idle_timeout = (idle_timeout * 1000) if idle_timeout else None
        self._link_properties = {
        }  # type: Dict[types.AMQPType, types.AMQPType]
        partition = self._source.split("/")[-1]
        self._partition = partition
        self._name = "EHReceiver-{}-partition{}".format(
            uuid.uuid4(), partition)
        if owner_level:
            self._link_properties[types.AMQPSymbol(
                EPOCH_SYMBOL)] = types.AMQPLong(int(owner_level))
        link_property_timeout_ms = (
            self._client._config.receive_timeout or self._timeout  # pylint:disable=protected-access
        ) * 1000
        self._link_properties[types.AMQPSymbol(
            TIMEOUT_SYMBOL)] = types.AMQPLong(int(link_property_timeout_ms))
        self._handler = None  # type: Optional[ReceiveClientAsync]
        self._track_last_enqueued_event_properties = (
            track_last_enqueued_event_properties)
        self._event_queue = queue.Queue()
        self._last_received_event = None  # type: Optional[EventData]
    def __init__(self, client, source, **kwargs):
        """
        Instantiate a consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method
        in EventHubClient.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: str
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param owner_level: The priority of the exclusive consumer. It will an exclusive
         consumer if owner_level is set.
        :type owner_level: int
        """
        event_position = kwargs.get("event_position", None)
        prefetch = kwargs.get("prefetch", 300)
        owner_level = kwargs.get("owner_level", None)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)

        super(EventHubConsumer, self).__init__()
        self.running = False
        self.client = client
        self.source = source
        self.offset = event_position
        self.messages_iter = None
        self.prefetch = prefetch
        self.owner_level = owner_level
        self.keep_alive = keep_alive
        self.auto_reconnect = auto_reconnect
        self.retry_policy = errors.ErrorPolicy(
            max_retries=self.client.config.max_retries,
            on_error=_error_handler)
        self.reconnect_backoff = 1
        self._link_properties = {}
        self.redirected = None
        self.error = None
        partition = self.source.split('/')[-1]
        self.partition = partition
        self.name = "EHConsumer-{}-partition{}".format(uuid.uuid4(), partition)
        if owner_level:
            self._link_properties[types.AMQPSymbol(
                self._epoch)] = types.AMQPLong(int(owner_level))
        link_property_timeout_ms = (self.client.config.receive_timeout
                                    or self.timeout) * 1000
        self._link_properties[types.AMQPSymbol(
            self._timeout)] = types.AMQPLong(int(link_property_timeout_ms))
        self._handler = None
Exemple #7
0
    def __init__(  # pylint: disable=super-init-not-called
            self,
            client,
            source,
            offset=None,
            prefetch=300,
            epoch=None,
            keep_alive=None,
            auto_reconnect=True,
            loop=None):
        """
        Instantiate an async receiver.

        :param client: The parent EventHubClientAsync.
        :type client: ~azure.eventhub.async_ops.EventHubClientAsync
        :param source: The source EventHub from which to receive events.
        :type source: ~uamqp.address.Source
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param epoch: An optional epoch value.
        :type epoch: int
        :param loop: An event loop.
        """
        self.loop = loop or asyncio.get_event_loop()
        self.running = False
        self.client = client
        self.source = source
        self.offset = offset
        self.prefetch = prefetch
        self.epoch = epoch
        self.keep_alive = keep_alive
        self.auto_reconnect = auto_reconnect
        self.retry_policy = errors.ErrorPolicy(max_retries=3,
                                               on_error=_error_handler)
        self.reconnect_backoff = 1
        self.redirected = None
        self.error = None
        self.properties = None
        partition = self.source.split('/')[-1]
        self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition)
        source = Source(self.source)
        if self.offset is not None:
            source.set_filter(self.offset.selector())
        if epoch:
            self.properties = {
                types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))
            }
        self._handler = ReceiveClientAsync(
            source,
            auth=self.client.get_auth(),
            debug=self.client.debug,
            prefetch=self.prefetch,
            link_properties=self.properties,
            timeout=self.timeout,
            error_policy=self.retry_policy,
            keep_alive_interval=self.keep_alive,
            client_name=self.name,
            properties=self.client.create_properties(),
            loop=self.loop)
Exemple #8
0
 def __init__(self, client, source, prefetch=300, epoch=None, loop=None):  # pylint: disable=super-init-not-called
     """
     Instantiate an async receiver.
     :param client: The parent EventHubClient.
     :type client: ~azure.eventhub.EventHubClient
     :param source: The source EventHub from which to receive events.
     :type source: ~uamqp.Source
     :param prefetch: The number of events to prefetch from the service
      for processing. Default is 300.
     :type prefetch: int
     :param epoch: An optional epoch value.
     :type epoch: int
     :param loop: An event loop.
     """
     self.loop = loop or asyncio.get_event_loop()
     self.offset = None
     self._callback = None
     self.prefetch = prefetch
     self.epoch = epoch
     properties = None
     if epoch:
         properties = {
             types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))
         }
     self._handler = ReceiveClientAsync(source,
                                        auth=client.auth,
                                        debug=client.debug,
                                        prefetch=self.prefetch,
                                        link_properties=properties,
                                        timeout=self.timeout,
                                        loop=self.loop)
    def __init__(self, client, source, prefetch=300, epoch=None):
        """
        Instantiate a receiver.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: ~uamqp.address.Source
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param epoch: An optional epoch value.
        :type epoch: int
        """
        self.offset = None
        self.prefetch = prefetch
        self.epoch = epoch
        self.properties = None
        self.redirected = None
        self.debug = client.debug
        self.error = None
        if epoch:
            self.properties = {
                types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))
            }
        self._handler = ReceiveClient(source,
                                      auth=client.auth,
                                      debug=self.debug,
                                      prefetch=self.prefetch,
                                      link_properties=self.properties,
                                      timeout=self.timeout)
    def __init__(self,
                 client,
                 source,
                 event_position=None,
                 prefetch=300,
                 owner_level=None,
                 keep_alive=None,
                 auto_reconnect=True):
        """
        Instantiate a consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method
         in EventHubClient.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: str
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param owner_level: The priority of the exclusive consumer. It will an exclusive
         consumer if owner_level is set.
        :type owner_level: int
        """
        self.running = False
        self.client = client
        self.source = source
        self.offset = event_position
        self.messages_iter = None
        self.prefetch = prefetch
        self.owner_level = owner_level
        self.keep_alive = keep_alive
        self.auto_reconnect = auto_reconnect
        self.retry_policy = errors.ErrorPolicy(
            max_retries=self.client.config.max_retries,
            on_error=_error_handler)
        self.reconnect_backoff = 1
        self.properties = None
        self.redirected = None
        self.error = None
        partition = self.source.split('/')[-1]
        self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition)
        source = Source(self.source)
        if self.offset is not None:
            source.set_filter(self.offset._selector())  # pylint: disable=protected-access
        if owner_level:
            self.properties = {
                types.AMQPSymbol(self._epoch): types.AMQPLong(int(owner_level))
            }
        self._handler = ReceiveClient(
            source,
            auth=self.client.get_auth(),
            debug=self.client.config.network_tracing,
            prefetch=self.prefetch,
            link_properties=self.properties,
            timeout=self.timeout,
            error_policy=self.retry_policy,
            keep_alive_interval=self.keep_alive,
            client_name=self.name,
            properties=self.client._create_properties(
                self.client.config.user_agent))  # pylint: disable=protected-access
    async def receive_deferred_messages(
            self, sequence_numbers: Union[int, List[int]],
            **kwargs: Any) -> List[ServiceBusReceivedMessage]:
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param Union[int, list[int]] sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :keyword float timeout: The total operation timeout in seconds including all the retries. The value must be
         greater than 0 if specified. The default value is None, meaning no timeout.
        :rtype: list[~azure.servicebus.aio.ServiceBusReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
                :start-after: [START receive_defer_async]
                :end-before: [END receive_defer_async]
                :language: python
                :dedent: 4
                :caption: Receive deferred messages from ServiceBus.

        """
        self._check_live()
        timeout = kwargs.pop("timeout", None)
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if isinstance(sequence_numbers, six.integer_types):
            sequence_numbers = [sequence_numbers]
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        await self._open()
        try:
            receive_mode = self._receive_mode.value.value
        except AttributeError:
            receive_mode = int(self._receive_mode)
        message = {
            MGMT_REQUEST_SEQUENCE_NUMBERS:
            types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            MGMT_REQUEST_RECEIVER_SETTLE_MODE:
            types.AMQPuInt(receive_mode)
        }

        self._populate_message_properties(message)

        handler = functools.partial(mgmt_handlers.deferred_message_op,
                                    receive_mode=self._receive_mode,
                                    message_type=ServiceBusReceivedMessage,
                                    receiver=self)
        messages = await self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,
            message,
            handler,
            timeout=timeout)
        if self._auto_lock_renewer and not self._session:
            for message in messages:
                self._auto_lock_renewer.register(self, message)
        return messages
    async def cancel_scheduled_messages(self,
                                        sequence_numbers: Union[int,
                                                                List[int]],
                                        *,
                                        timeout: Optional[float] = None,
                                        **kwargs: Any) -> None:
        """
        Cancel one or more messages that have previously been scheduled and are still pending.

        :param sequence_numbers: The sequence numbers of the scheduled messages.
        :type sequence_numbers: int or list[int]
        :keyword float timeout: The total operation timeout in seconds including all the retries. The value must be
         greater than 0 if specified. The default value is None, meaning no timeout.
        :rtype: None
        :raises: ~azure.servicebus.exceptions.ServiceBusError if messages cancellation failed due to message already
         cancelled or enqueued.

        .. admonition:: Example:

            .. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
                :start-after: [START cancel_scheduled_messages_async]
                :end-before: [END cancel_scheduled_messages_async]
                :language: python
                :dedent: 4
                :caption: Cancelling messages scheduled to be sent in future
        """
        if kwargs:
            warnings.warn(f"Unsupported keyword args: {kwargs}")
        self._check_live()
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if isinstance(sequence_numbers, int):
            numbers = [types.AMQPLong(sequence_numbers)]
        else:
            numbers = [types.AMQPLong(s) for s in sequence_numbers]
        if len(numbers) == 0:
            return None  # no-op on empty list.
        request_body = {
            MGMT_REQUEST_SEQUENCE_NUMBERS: types.AMQPArray(numbers)
        }
        return await self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_CANCEL_SCHEDULED_MESSAGE_OPERATION,
            request_body,
            mgmt_handlers.default,
            timeout=timeout,
        )
Exemple #13
0
    def __init__(  # pylint: disable=super-init-not-called
            self, client, target, **kwargs):
        """
        Instantiate an async EventHubProducer. EventHubProducer should be instantiated by calling the `create_producer`
        method in EventHubClient.

        :param client: The parent EventHubClientAsync.
        :type client: ~azure.eventhub.aio.EventHubClientAsync
        :param target: The URI of the EventHub to send to.
        :type target: str
        :param partition: The specific partition ID to send to. Default is `None`, in which case the service
         will assign to all partitions using round-robin.
        :type partition: str
        :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is
         queued. Default value is 60 seconds. If set to 0, there will be no timeout.
        :type send_timeout: float
        :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during
         periods of inactivity. The default value is `None`, i.e. no keep alive pings.
        :type keep_alive: float
        :param auto_reconnect: Whether to automatically reconnect the producer if a retryable error occurs.
         Default value is `True`.
        :type auto_reconnect: bool
        :param loop: An event loop. If not specified the default event loop will be used.
        """
        partition = kwargs.get("partition", None)
        send_timeout = kwargs.get("send_timeout", 60)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)
        loop = kwargs.get("loop", None)

        super(EventHubProducer, self).__init__()
        self._loop = loop or asyncio.get_event_loop()
        self._max_message_size_on_link = None
        self._running = False
        self._client = client
        self._target = target
        self._partition = partition
        self._keep_alive = keep_alive
        self._auto_reconnect = auto_reconnect
        self._timeout = send_timeout
        self._retry_policy = errors.ErrorPolicy(
            max_retries=self._client._config.max_retries,
            on_error=_error_handler)  # pylint:disable=protected-access
        self._reconnect_backoff = 1
        self._name = "EHProducer-{}".format(uuid.uuid4())
        self._unsent_events = None
        self._redirected = None
        self._error = None
        if partition:
            self._target += "/Partitions/" + partition
            self._name += "-partition{}".format(partition)
        self._handler = None
        self._outcome = None
        self._condition = None
        self._link_properties = {
            types.AMQPSymbol(self._timeout_symbol):
            types.AMQPLong(int(self._timeout * 1000))
        }
    async def receive_deferred_messages(self,
                                        sequence_numbers,
                                        mode=ReceiveSettleMode.PeekLock,
                                        **kwargs):
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :type sequence_numbers: list[int]
        :param mode: The mode with which messages will be retrieved from the entity. The two options
         are PeekLock and ReceiveAndDelete. Messages received with PeekLock must be settled within a given
         lock period before they will be removed from the queue. Messages received with ReceiveAndDelete
         will be immediately removed from the queue, and cannot be subsequently rejected or re-received if
         the client fails to process the message. The default mode is PeekLock.
        :type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
        :rtype: list[~azure.servicebus.aio.async_message.DeferredMessage]

        Example:
            .. literalinclude:: ../examples/async_examples/test_examples_async.py
                :start-after: [START client_defer_messages]
                :end-before: [END client_defer_messages]
                :language: python
                :dedent: 8
                :caption: Defer messages, then retrieve them by sequence number.

        """
        if (self.entity and self.requires_session) or kwargs.get('session'):
            raise ValueError(
                "Sessionful deferred messages can only be received within a locked receive session."
            )
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        try:
            receive_mode = mode.value.value
        except AttributeError:
            receive_mode = int(mode)
        message = {
            'sequence-numbers':
            types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            'receiver-settle-mode':
            types.AMQPuInt(receive_mode)
        }

        mgmt_handler = functools.partial(mgmt_handlers.deferred_message_op,
                                         mode=receive_mode,
                                         message_type=DeferredMessage)
        async with BaseHandler(self.entity_uri,
                               self.auth_config,
                               loop=self.loop,
                               debug=self.debug,
                               **kwargs) as handler:
            return await handler._mgmt_request_response(  # pylint: disable=protected-access
                REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER, message,
                mgmt_handler)
    def peek_messages(self,
                      max_message_count: int = 1,
                      *,
                      sequence_number: int = 0,
                      timeout: Optional[float] = None,
                      **kwargs: Any) -> List[ServiceBusReceivedMessage]:
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param int max_message_count: The maximum number of messages to try and peek. The default
         value is 1.
        :keyword int sequence_number: A message sequence number from which to start browsing messages.
        :keyword Optional[float] timeout: The total operation timeout in seconds including all the retries.
         The value must be greater than 0 if specified. The default value is None, meaning no timeout.

        :rtype: List[~azure.servicebus.ServiceBusReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START peek_messages_sync]
                :end-before: [END peek_messages_sync]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue.

        """
        if kwargs:
            warnings.warn(f"Unsupported keyword args: {kwargs}")
        self._check_live()
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if not sequence_number:
            sequence_number = self._last_received_sequenced_number or 1
        if int(max_message_count) < 0:
            raise ValueError("max_message_count must be 1 or greater.")

        self._open()
        message = {
            MGMT_REQUEST_FROM_SEQUENCE_NUMBER: types.AMQPLong(sequence_number),
            MGMT_REQUEST_MAX_MESSAGE_COUNT: max_message_count,
        }

        self._populate_message_properties(message)
        handler = functools.partial(mgmt_handlers.peek_op, receiver=self)
        messages = self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_PEEK_OPERATION, message, handler, timeout=timeout)
        links = get_receive_links(messages)
        with receive_trace_context_manager(self,
                                           span_name=SPAN_NAME_PEEK,
                                           links=links):
            return messages
    def peek_messages(self, max_message_count=1, **kwargs):
        # type: (int, Any) -> List[PeekedMessage]
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param int max_message_count: The maximum number of messages to try and peek. The default
         value is 1.
        :keyword int sequence_number: A message sequence number from which to start browsing messages.
        :keyword float timeout: The total operation timeout in seconds including all the retries. The value must be
         greater than 0 if specified. The default value is None, meaning no timeout.

        :rtype: List[~azure.servicebus.PeekedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START peek_messages_sync]
                :end-before: [END peek_messages_sync]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue.

        """
        self._check_live()
        sequence_number = kwargs.pop("sequence_number", 0)
        timeout = kwargs.pop("timeout", None)
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if not sequence_number:
            sequence_number = self._last_received_sequenced_number or 1
        if int(max_message_count) < 1:
            raise ValueError("count must be 1 or greater.")
        if int(sequence_number) < 1:
            raise ValueError("start_from must be 1 or greater.")

        self._open()
        message = {
            MGMT_REQUEST_FROM_SEQUENCE_NUMBER: types.AMQPLong(sequence_number),
            MGMT_REQUEST_MAX_MESSAGE_COUNT: max_message_count
        }

        self._populate_message_properties(message)

        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_PEEK_OPERATION,
            message,
            mgmt_handlers.peek_op,
            timeout=timeout)
    async def receive_deferred_messages(self,
                                        sequence_numbers,
                                        mode=ReceiveSettleMode.PeekLock):
        """Receive messages that have previously been deferred.

        This operation can only receive deferred messages from the current session.
        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :type sequence_numbers: list[int]
        :param mode: The receive mode, default value is PeekLock.
        :type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
        :rtype: list[~azure.servicebus.aio.async_message.DeferredMessage]

        Example:
            .. literalinclude:: ../examples/async_examples/test_examples_async.py
                :start-after: [START receiver_defer_session_messages]
                :end-before: [END receiver_defer_session_messages]
                :language: python
                :dedent: 8
                :caption: Defer messages, then retrieve them by sequence number.

        """
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        await self._can_run()
        try:
            receive_mode = mode.value.value
        except AttributeError:
            receive_mode = int(mode)
        message = {
            'sequence-numbers':
            types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            'receiver-settle-mode':
            types.AMQPuInt(receive_mode),
            'session-id':
            self.session_id
        }
        handler = functools.partial(mgmt_handlers.deferred_message_op,
                                    mode=receive_mode,
                                    message_type=DeferredMessage)
        messages = await self._mgmt_request_response(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER, message, handler)
        for m in messages:
            m._receiver = self  # pylint: disable=protected-access
        return messages
Exemple #18
0
    async def receive_deferred_messages(self, sequence_numbers):
        # type: (Union[int, List[int]]) -> List[ReceivedMessage]
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param Union[int, list[int]] sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :rtype: list[~azure.servicebus.aio.ReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
                :start-after: [START receive_defer_async]
                :end-before: [END receive_defer_async]
                :language: python
                :dedent: 4
                :caption: Receive deferred messages from ServiceBus.

        """
        self._check_live()
        if isinstance(sequence_numbers, six.integer_types):
            sequence_numbers = [sequence_numbers]
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        await self._open()
        try:
            receive_mode = self._receive_mode.value.value
        except AttributeError:
            receive_mode = int(self._receive_mode)
        message = {
            MGMT_REQUEST_SEQUENCE_NUMBERS: types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            MGMT_REQUEST_RECEIVER_SETTLE_MODE: types.AMQPuInt(receive_mode)
        }

        self._populate_message_properties(message)

        handler = functools.partial(mgmt_handlers.deferred_message_op,
                                    receive_mode=self._receive_mode,
                                    message_type=ReceivedMessage,
                                    receiver=self)
        messages = await self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,
            message,
            handler
        )
        return messages
Exemple #19
0
    async def receive_deferred_messages(self, sequence_numbers):
        # type: (List[int]) -> List[ReceivedMessage]
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param list[int] sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :rtype: list[~azure.servicebus.aio.ReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
                :start-after: [START receive_defer_async]
                :end-before: [END receive_defer_async]
                :language: python
                :dedent: 4
                :caption: Receive deferred messages from ServiceBus.

        """
        self._can_run()
        if not sequence_numbers:
            raise ValueError("At least one sequence number must be specified.")
        await self._open()
        try:
            receive_mode = self._mode.value.value
        except AttributeError:
            receive_mode = int(self._mode)
        message = {
            MGMT_REQUEST_SEQUENCE_NUMBERS:
            types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
            MGMT_REQUEST_RECEIVER_SETTLE_MODE:
            types.AMQPuInt(receive_mode)
        }

        if self._session_id:
            message[MGMT_REQUEST_SESSION_ID] = self._session_id

        handler = functools.partial(mgmt_handlers.deferred_message_op,
                                    mode=self._mode,
                                    message_type=ReceivedMessage)
        messages = await self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER, message, handler)
        for m in messages:
            m._receiver = self  # pylint: disable=protected-access
        return messages
Exemple #20
0
async def test_event_hubs_idempotent_producer(live_eventhub_config):

    uri = "sb://{}/{}".format(live_eventhub_config['hostname'],
                              live_eventhub_config['event_hub'])
    sas_auth = authentication.SASTokenAsync.from_shared_access_key(
        uri, live_eventhub_config['key_name'],
        live_eventhub_config['access_key'])

    target = "amqps://{}/{}/Partitions/0".format(
        live_eventhub_config['hostname'], live_eventhub_config['event_hub'])

    symbol_array = [
        uamqp_types.AMQPSymbol(b"com.microsoft:idempotent-producer")
    ]
    desired_capabilities = utils.data_factory(
        uamqp_types.AMQPArray(symbol_array))

    link_properties = {
        uamqp_types.AMQPSymbol(b"com.microsoft:timeout"):
        uamqp_types.AMQPLong(int(60 * 1000))
    }

    def on_attach(attach_source, attach_target, properties, error):
        if str(attach_target) == target:
            on_attach.owner_level = properties.get(
                b"com.microsoft:producer-epoch")
            on_attach.producer_group_id = properties.get(
                b"com.microsoft:producer-id")
            on_attach.starting_sequence_number = properties.get(
                b"com.microsoft:producer-sequence-number")

    send_client = uamqp.SendClientAsync(
        target,
        auth=sas_auth,
        desired_capabilities=desired_capabilities,
        link_properties=link_properties,
        on_attach=on_attach,
        debug=True)
    await send_client.open_async()
    while not await send_client.client_ready_async():
        await asyncio.sleep(0.05)

    assert on_attach.owner_level is not None
    assert on_attach.producer_group_id is not None
    assert on_attach.starting_sequence_number is not None
    await send_client.close_async()
Exemple #21
0
    def peek_messages(self, message_count=1, sequence_number=None):
        # type: (int, Optional[int]) -> List[PeekMessage]
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param int message_count: The maximum number of messages to try and peek. The default
         value is 1.
        :param int sequence_number: A message sequence number from which to start browsing messages.

        :rtype: List[~azure.servicebus.PeekMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START peek_messages_sync]
                :end-before: [END peek_messages_sync]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue.

        """
        self._check_live()
        if not sequence_number:
            sequence_number = self._last_received_sequenced_number or 1
        if int(message_count) < 1:
            raise ValueError("count must be 1 or greater.")
        if int(sequence_number) < 1:
            raise ValueError("start_from must be 1 or greater.")

        self._open()
        message = {
            MGMT_REQUEST_FROM_SEQUENCE_NUMBER: types.AMQPLong(sequence_number),
            MGMT_REQUEST_MESSAGE_COUNT: message_count
        }

        self._populate_message_properties(message)

        return self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_PEEK_OPERATION,
            message,
            mgmt_handlers.peek_op
        )
    async def peek(self, count=1, start_from=0, session=None, **kwargs):
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.

        :param count: The maximum number of messages to try and peek. The default
         value is 1.
        :type count: int
        :param start_from: A message sequence number from which to start browsing messages.
        :type start_from: int
        :param session: If the entity requires sessions, a session ID must be supplied
         in order that only messages from that session will be browsed. If the entity
         does not require sessions this value will be ignored.
        :type session: str
        :rtype: list[~azure.servicebus.common.message.PeekMessage]

        Example:
            .. literalinclude:: ../examples/async_examples/test_examples_async.py
                :start-after: [START client_peek_messages]
                :end-before: [END client_peek_messages]
                :language: python
                :dedent: 4
                :caption: Peek messages in the queue.

        """
        message = {
            'from-sequence-number': types.AMQPLong(start_from),
            'message-count': int(count)
        }
        if self.entity and self.requires_session:
            if not session:
                raise ValueError("Sessions are required, please set session.")
            message['session-id'] = session

        async with BaseHandler(self.entity_uri,
                               self.auth_config,
                               loop=self.loop,
                               debug=self.debug,
                               **kwargs) as handler:
            return await handler._mgmt_request_response(  # pylint: disable=protected-access
                REQUEST_RESPONSE_PEEK_OPERATION, message,
                mgmt_handlers.peek_op)
    def __init__(self, client: "EventHubProducerClient", target: str,
                 **kwargs) -> None:
        super().__init__()
        partition = kwargs.get("partition", None)
        send_timeout = kwargs.get("send_timeout", 60)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)
        loop = kwargs.get("loop", None)
        idle_timeout = kwargs.get("idle_timeout", None)

        self.running = False
        self.closed = False

        self._loop = loop or get_running_loop()
        self._max_message_size_on_link = None
        self._client = client
        self._target = target
        self._partition = partition
        self._keep_alive = keep_alive
        self._auto_reconnect = auto_reconnect
        self._timeout = send_timeout
        self._idle_timeout = (idle_timeout * 1000) if idle_timeout else None
        self._retry_policy = errors.ErrorPolicy(
            max_retries=self._client._config.max_retries,
            on_error=_error_handler  # pylint:disable=protected-access
        )
        self._reconnect_backoff = 1
        self._name = "EHProducer-{}".format(uuid.uuid4())
        self._unsent_events = []  # type: List[Any]
        self._error = None
        if partition:
            self._target += "/Partitions/" + partition
            self._name += "-partition{}".format(partition)
        self._handler = None  # type: Optional[SendClientAsync]
        self._outcome = None  # type: Optional[constants.MessageSendResult]
        self._condition = None  # type: Optional[Exception]
        self._lock = asyncio.Lock(loop=self._loop)
        self._link_properties = {
            types.AMQPSymbol(TIMEOUT_SYMBOL):
            types.AMQPLong(int(self._timeout * 1000))
        }
    async def cancel_scheduled_messages(self, *sequence_numbers):
        """Cancel one or more messages that have previsouly been scheduled and are still pending.

        :param sequence_numbers: The seqeuence numbers of the scheduled messages.
        :type sequence_numbers: int

        Example:
            .. literalinclude:: ../examples/async_examples/test_examples_async.py
                :start-after: [START cancel_schedule_messages]
                :end-before: [END cancel_schedule_messages]
                :language: python
                :dedent: 4
                :caption: Schedule messages.

        """
        if not self.running:
            await self.open()
        numbers = [types.AMQPLong(s) for s in sequence_numbers]
        request_body = {'sequence-numbers': types.AMQPArray(numbers)}
        return await self._mgmt_request_response(
            REQUEST_RESPONSE_CANCEL_SCHEDULED_MESSAGE_OPERATION, request_body,
            mgmt_handlers.default)
Exemple #25
0
    def peek(self, count=1, start_from=None):
        """Browse messages currently pending in the queue.

        Peeked messages are not removed from queue, nor are they locked. They cannot be completed,
        deferred or dead-lettered.
        This operation will only peek pending messages in the current session.

        :param count: The maximum number of messages to try and peek. The default
         value is 1.
        :type count: int
        :param start_from: A message sequence number from which to start browsing messages.
        :type start_from: int
        :rtype: list[~azure.servicebus.common.message.PeekMessage]

        .. admonition:: Example:
            .. literalinclude:: ../samples/sync_samples/test_examples.py
                :start-after: [START peek_messages]
                :end-before: [END peek_messages]
                :language: python
                :dedent: 4
                :caption: Look at pending messages in the queue

        """
        if not start_from:
            start_from = self.last_received or 1
        if int(count) < 1:
            raise ValueError("count must be 1 or greater.")
        if int(start_from) < 1:
            raise ValueError("start_from must be 1 or greater.")

        self._can_run()
        message = {
            'from-sequence-number': types.AMQPLong(start_from),
            'message-count': count,
            'session-id': self.session_id
        }
        return self._mgmt_request_response(REQUEST_RESPONSE_PEEK_OPERATION,
                                           message, mgmt_handlers.peek_op)
    def receive_deferred_messages(self, sequence_numbers, **kwargs):
        # type: (Union[int,List[int]], Any) -> List[ServiceBusReceivedMessage]
        """Receive messages that have previously been deferred.

        When receiving deferred messages from a partitioned entity, all of the supplied
        sequence numbers must be messages from the same partition.

        :param Union[int,List[int]] sequence_numbers: A list of the sequence numbers of messages that have been
         deferred.
        :keyword Optional[float] timeout: The total operation timeout in seconds including all the retries.
         The value must be greater than 0 if specified. The default value is None, meaning no timeout.
        :rtype: List[~azure.servicebus.ServiceBusReceivedMessage]

        .. admonition:: Example:

            .. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
                :start-after: [START receive_defer_sync]
                :end-before: [END receive_defer_sync]
                :language: python
                :dedent: 4
                :caption: Receive deferred messages from ServiceBus.

        """
        self._check_live()
        timeout = kwargs.pop("timeout", None)
        if timeout is not None and timeout <= 0:
            raise ValueError("The timeout must be greater than 0.")
        if isinstance(sequence_numbers, six.integer_types):
            sequence_numbers = [sequence_numbers]
        if len(sequence_numbers) == 0:
            return []  # no-op on empty list.
        self._open()
        uamqp_receive_mode = ServiceBusToAMQPReceiveModeMap[self._receive_mode]
        try:
            receive_mode = uamqp_receive_mode.value.value
        except AttributeError:
            receive_mode = int(uamqp_receive_mode.value)
        message = {
            MGMT_REQUEST_SEQUENCE_NUMBERS: types.AMQPArray(
                [types.AMQPLong(s) for s in sequence_numbers]
            ),
            MGMT_REQUEST_RECEIVER_SETTLE_MODE: types.AMQPuInt(receive_mode),
        }

        self._populate_message_properties(message)

        handler = functools.partial(
            mgmt_handlers.deferred_message_op,
            receive_mode=self._receive_mode,
            receiver=self,
        )
        messages = self._mgmt_request_response_with_retry(
            REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,
            message,
            handler,
            timeout=timeout,
        )
        links = get_receive_links(messages)
        with receive_trace_context_manager(
            self, span_name=SPAN_NAME_RECEIVE_DEFERRED, links=links
        ):
            if (
                self._auto_lock_renewer
                and not self._session
                and self._receive_mode != ServiceBusReceiveMode.RECEIVE_AND_DELETE
            ):
                for message in messages:
                    self._auto_lock_renewer.register(self, message)
            return messages
    def __init__(self, client, source, **kwargs):
        """
        Instantiate a consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method
        in EventHubClient.

        :param client: The parent EventHubClient.
        :type client: ~azure.eventhub.client.EventHubClient
        :param source: The source EventHub from which to receive events.
        :type source: str
        :param prefetch: The number of events to prefetch from the service
         for processing. Default is 300.
        :type prefetch: int
        :param owner_level: The priority of the exclusive consumer. An exclusive
         consumer will be created if owner_level is set.
        :type owner_level: int
        :param track_last_enqueued_event_properties: Indicates whether or not the consumer should request information
         on the last enqueued event on its associated partition, and track that information as events are received.
         When information about the partition's last enqueued event is being tracked, each event received from the
         Event Hubs service will carry metadata about the partition. This results in a small amount of additional
         network bandwidth consumption that is generally a favorable trade-off when considered against periodically
         making requests for partition properties using the Event Hub client.
         It is set to `False` by default.
        :type track_last_enqueued_event_properties: bool
        """
        event_position = kwargs.get("event_position", None)
        prefetch = kwargs.get("prefetch", 300)
        owner_level = kwargs.get("owner_level", None)
        keep_alive = kwargs.get("keep_alive", None)
        auto_reconnect = kwargs.get("auto_reconnect", True)
        track_last_enqueued_event_properties = kwargs.get(
            "track_last_enqueued_event_properties", False)
        idle_timeout = kwargs.get("idle_timeout", None)

        self.running = False
        self.closed = False
        self.stop = False  # used by event processor
        self.handler_ready = False

        self._on_event_received = kwargs.get("on_event_received")
        self._client = client
        self._source = source
        self._offset = event_position
        self._offset_inclusive = kwargs.get("event_position_inclusive", False)
        self._prefetch = prefetch
        self._owner_level = owner_level
        self._keep_alive = keep_alive
        self._auto_reconnect = auto_reconnect
        self._retry_policy = errors.ErrorPolicy(
            max_retries=self._client._config.max_retries,
            on_error=_error_handler)  # pylint:disable=protected-access
        self._reconnect_backoff = 1
        self._link_properties = {}
        self._error = None
        self._timeout = 0
        self._idle_timeout = (idle_timeout * 1000) if idle_timeout else None
        partition = self._source.split('/')[-1]
        self._partition = partition
        self._name = "EHConsumer-{}-partition{}".format(
            uuid.uuid4(), partition)
        if owner_level:
            self._link_properties[types.AMQPSymbol(
                EPOCH_SYMBOL)] = types.AMQPLong(int(owner_level))
        link_property_timeout_ms = (self._client._config.receive_timeout
                                    or self._timeout) * 1000  # pylint:disable=protected-access
        self._link_properties[types.AMQPSymbol(
            TIMEOUT_SYMBOL)] = types.AMQPLong(int(link_property_timeout_ms))
        self._handler = None
        self._track_last_enqueued_event_properties = track_last_enqueued_event_properties
        self._last_received_event = None