Ejemplo n.º 1
0
def should_retry_up_to_max_retries_with_proper_headers_with_dlk_retry_enabled(
    publisher_session: Publisher, ):
    body = {"test": "test"}

    publisher_session.publish(
        body, message_properties={"headers": {
            "x-origin": "sample"
        }})

    new_response = {"count": 0}

    def callback(data: dict, **kwargs):
        new_response["count"] = new_response["count"] + 1
        raise Exception

    consumer = Consumer(
        exchange_name=publisher_session.exchange_name,
        queue_name=publisher_session.queue_name,
        routing_key=publisher_session.routing_key,
        callback=callback,
        is_dlk_retry_enabled=True,
        retry_delay=1,
        max_retries=1,
    )
    consumer.start()

    with pytest.raises(AssertionError):
        assert_consumed_message(new_response, {"count": 3})

    consumer.close()
Ejemplo n.º 2
0
def should_republish_message_to_original_queue_with_dlk_retry_enabled(
    publisher_session: Publisher, ):
    body = {"test": "test"}

    publisher_session.publish(
        body, message_properties={"headers": {
            "x-origin": "sample"
        }})

    response = {"count": 0}
    error_response = {"count": 0}

    def callback(data: dict, **kwargs):
        response["count"] = response["count"] + 1
        raise Exception

    def error_callback(*args, **kwargs):
        error_response["count"] = error_response["count"] + 1

    consumer = Consumer(
        exchange_name=publisher_session.exchange_name,
        queue_name=publisher_session.queue_name,
        routing_key=publisher_session.routing_key,
        callback=callback,
        is_dlk_retry_enabled=True,
        retry_delay=1,
        error_callback=error_callback,
    )
    consumer.start()
    assert_consumed_message(response, {"count": 3})
    assert_consumed_message(error_response, {"count": 3})
    consumer.close()
Ejemplo n.º 3
0
def should_handle_different_ident():
    with patch("threading.Thread.ident", new_callable=PropertyMock) as mock_ident:
        mock_ident.side_effect = [11111, 22222]
        from pyrmq import Publisher

        with patch("pika.adapters.blocking_connection.BlockingChannel.basic_publish"):
            publisher = Publisher(
                exchange_name=TEST_EXCHANGE_NAME,
                queue_name=TEST_QUEUE_NAME,
                routing_key=TEST_ROUTING_KEY,
            )
            publisher.publish({})
            publisher.publish({})
Ejemplo n.º 4
0
def should_handle_exception_from_callback(publisher_session: Publisher):
    body = {"test": "test"}
    publisher_session.publish(body)

    response = {}

    def callback(data):
        response.update(data)
        raise Exception

    consumer = Consumer(
        exchange_name=publisher_session.exchange_name,
        queue_name=publisher_session.queue_name,
        routing_key=publisher_session.routing_key,
        callback=callback,
    )
    consumer.start()
    wait_for_result(response, body)
    consumer.close()
Ejemplo n.º 5
0
def should_nack_message_when_callback_method_returns_false(
    publisher_session: Publisher, ) -> None:
    body = {"test": "test"}

    publisher_session.publish(
        body, message_properties={"headers": {
            "x-origin": "sample"
        }})

    response = {"count": 0}

    def callback_that_should_nack(data: dict, **kwargs):
        response["count"] = response["count"] + 1
        return False

    consumer = Consumer(
        exchange_name=publisher_session.exchange_name,
        queue_name=publisher_session.queue_name,
        routing_key=publisher_session.routing_key,
        callback=callback_that_should_nack,
    )
    consumer.start()
    assert_consumed_infinite_loop(response, {"count": 1})
    consumer.close()

    # Consumer should still be able to consume the same message
    # without publishing again if nack is successful.

    new_response = {"count": 0}

    def callback_that_should_ack(data: dict, **kwargs):
        new_response["count"] = new_response["count"] + 1

    consumer = Consumer(
        exchange_name=publisher_session.exchange_name,
        queue_name=publisher_session.queue_name,
        routing_key=publisher_session.routing_key,
        callback=callback_that_should_ack,
    )

    consumer.start()
    assert_consumed_infinite_loop(response, {"count": 1})
    consumer.close()
Ejemplo n.º 6
0
def should_get_message_with_higher_priority(priority_session: Publisher):
    test_data = []

    for i in range(0, 10):
        rand_int = randint(0, 255)
        body = {"test": f"test{rand_int}", "priority": rand_int}
        priority_session.publish(body, priority=rand_int)
        test_data.append(body)
    response = {}

    priority_data = [
        pri_data for pri_data in test_data
        if pri_data["priority"] >= TEST_PRIORITY_ARGUMENTS["x-max-priority"]
    ]
    priority_data.reverse()
    less_priority_data = sorted(
        [
            pri_data for pri_data in test_data
            if pri_data["priority"] < TEST_PRIORITY_ARGUMENTS["x-max-priority"]
        ],
        key=lambda x: x["priority"],
    )
    priority_sorted_data = [*less_priority_data, *priority_data]
    last_expected = priority_sorted_data[0]

    def callback(data, **kwargs):
        expected = priority_sorted_data.pop()
        assert expected == data
        response.update(data)

    consumer = Consumer(
        exchange_name=priority_session.exchange_name,
        queue_name=priority_session.queue_name,
        routing_key=priority_session.routing_key,
        callback=callback,
        queue_args=TEST_PRIORITY_ARGUMENTS,
    )
    consumer.start()
    # Last message received with lowest priority
    assert_consumed_message(response, last_expected)
    consumer.close()
Ejemplo n.º 7
0
def should_handle_connection_error_when_connecting():
    from pyrmq import Publisher

    publisher = Publisher(
        exchange_name="incorrect_exchange_name",
        queue_name="incorrect_queue_name",
        routing_key="incorrect_routing_key",
        username="******",  # BlockingConnection class from pika goes on an infinite loop if credentials are wrong.
    )

    with patch(
        "pika.adapters.blocking_connection.BlockingConnection.__init__",
        side_effect=AMQPConnectionError,
    ):
        with patch("time.sleep") as sleep:
            # noinspection PyTypeChecker
            with pytest.raises((TypeError, AMQPConnectionError,)):
                publisher.publish({})

    # Should not sleep since infinite_retry is set to False
    assert sleep.call_count == 0
Ejemplo n.º 8
0
def should_handle_connection_error_when_publishing():
    from pyrmq import Publisher

    def error_callback(error):
        print("error", error)

    publisher = Publisher(
        exchange_name="incorrect_exchange_name",
        queue_name="incorrect_queue_name",
        routing_key="incorrect_routing_key",
        error_callback=error_callback,
    )
    body = {"sample_body": "value"}
    with patch(
        "pika.adapters.blocking_connection.BlockingChannel.basic_publish",
        side_effect=AMQPConnectionError,
    ):
        with patch("time.sleep") as sleep:
            with pytest.raises(AMQPConnectionError):
                publisher.publish(body)

    assert sleep.call_count == publisher.connection_attempts - 1
Ejemplo n.º 9
0
def should_handle_exception_from_callback(publisher_session: Publisher):
    body = {"test": "test"}
    publisher_session.publish(body)

    response = {}

    def error_callback(*args, **kwargs):
        assert kwargs["error_type"] == CONSUME_ERROR

    def callback(data, **kwargs):
        response.update(data)
        raise Exception

    consumer = Consumer(
        exchange_name=publisher_session.exchange_name,
        queue_name=publisher_session.queue_name,
        routing_key=publisher_session.routing_key,
        callback=callback,
        error_callback=error_callback,
    )
    consumer.start()
    assert_consumed_message(response, body)
    consumer.close()
Ejemplo n.º 10
0
def should_handle_infinite_retry():
    from pyrmq import Publisher

    def error_callback(error):
        print("error", error)

    publisher = Publisher(
        exchange_name="incorrect_exchange_name",
        queue_name="incorrect_queue_name",
        routing_key="incorrect_routing_key",
        infinite_retry=True,
        error_callback=error_callback,
    )

    with patch(
        "pika.adapters.blocking_connection.BlockingConnection.__init__",
        side_effect=AMQPConnectionError,
    ):
        with patch("time.sleep", side_effect=[None, None, Exception]) as sleep_call:
            # noinspection PyTypeChecker
            with pytest.raises(Exception):
                publisher.publish({})

            assert sleep_call.call_count == 3
Ejemplo n.º 11
0
class Consumer(object):
    """
    This class uses a ``BlockingConnection`` from pika that automatically handles
    queue declarations and bindings plus retry logic built for its connection and consumption.
    It starts its own thread upon initialization and runs pika's ``start_consuming()``.
    """
    def __init__(
        self,
        exchange_name: str,
        queue_name: str,
        routing_key: str,
        callback: Callable,
        exchange_type: Optional[str] = "direct",
        **kwargs,
    ):
        """
        :param exchange_name: Your exchange name.
        :param queue_name: Your queue name.
        :param routing_key: Your queue name.
        :param callback: Your callback that should handle a consumed message
        :keyword host: Your RabbitMQ host. Default: ``"localhost"``
        :keyword port: Your RabbitMQ port. Default: ``5672``
        :keyword username: Your RabbitMQ username. Default: ``"guest"``
        :keyword password: Your RabbitMQ password. Default: ``"guest"``
        :keyword connection_attempts: How many times should PyRMQ try? Default: ``3``
        :keyword is_dlk_retry_enabled: Flag to enable DLK-based retry logic of consumed messages. Default: ``False``
        :keyword retry_delay: Seconds between retries. Default: ``5``
        :keyword retry_backoff_base: Exponential backoff base in seconds. Default: ``2``
        :keyword retry_queue_suffix: The suffix that will be appended to the ``queue_name`` to act as the name of the retry_queue. Default: ``retry``
        :keyword max_retries: Number of maximum retries for DLK retry logic. Default: ``20``
        :keyword exchange_args: Your exchange arguments. Default: ``None``
        :keyword queue_args: Your queue arguments. Default: ``None``
        :keyword bound_exchange: The exchange this consumer needs to bind to. This is an object that has two keys, ``name`` and ``type``. Default: ``None``
        :keyword auto_ack: Flag whether to ack or nack the consumed message regardless of its outcome. Default: ``True``
        :keyword prefetch_count: How many messages should the consumer retrieve at a time for consumption. Default: ``1``
        """

        from pyrmq import Publisher

        self.connection = None
        self.exchange_name = exchange_name
        self.queue_name = queue_name
        self.routing_key = routing_key
        self.exchange_type = exchange_type
        self.message_received_callback = callback
        self.host = kwargs.get("host") or os.getenv(
            "RABBITMQ_HOST") or "localhost"
        self.port = kwargs.get("port") or os.getenv("RABBITMQ_PORT") or 5672
        self.username = kwargs.get("username", "guest")
        self.password = kwargs.get("password", "guest")
        self.connection_attempts = kwargs.get("connection_attempts", 3)
        self.retry_delay = kwargs.get("retry_delay", 5)
        self.is_dlk_retry_enabled = kwargs.get("is_dlk_retry_enabled", False)
        self.retry_backoff_base = kwargs.get("retry_backoff_base", 2)
        self.retry_queue_suffix = kwargs.get("retry_queue_suffix", "retry")
        self.max_retries = kwargs.get("max_retries", 20)
        self.error_callback = kwargs.get("error_callback")
        self.infinite_retry = kwargs.get("infinite_retry", False)
        self.exchange_args = kwargs.get("exchange_args")
        self.queue_args = kwargs.get("queue_args")
        self.bound_exchange = kwargs.get("bound_exchange")
        self.auto_ack = kwargs.get("auto_ack", True)
        self.prefetch_count = kwargs.get("prefetch_count", 1)
        self.channel = None
        self.thread = None

        self.connection_parameters = ConnectionParameters(
            host=self.host,
            port=self.port,
            credentials=PlainCredentials(self.username, self.password),
            connection_attempts=self.connection_attempts,
            retry_delay=self.retry_delay,
        )

        self.retry_queue_name = f"{self.queue_name}.{self.retry_queue_suffix}"

        if self.is_dlk_retry_enabled:
            self.retry_publisher = Publisher(
                exchange_name=self.retry_queue_name,
                queue_name=self.retry_queue_name,
                routing_key=self.retry_queue_name,
                username=self.username,
                password=self.password,
                port=self.port,
                host=self.host,
                queue_args={
                    "x-dead-letter-exchange": self.exchange_name,
                    "x-dead-letter-routing-key": self.routing_key,
                },
            )

    def declare_queue(self) -> None:
        """
        Declare and bind a channel to a queue.
        """
        self.channel.exchange_declare(
            exchange=self.exchange_name,
            durable=True,
            exchange_type=self.exchange_type,
            arguments=self.exchange_args,
        )

        self.channel.queue_declare(queue=self.queue_name,
                                   arguments=self.queue_args,
                                   durable=True)
        self.channel.queue_bind(
            queue=self.queue_name,
            exchange=self.exchange_name,
            routing_key=self.routing_key,
            arguments=self.queue_args,
        )

        if self.bound_exchange:
            bound_exchange_name = self.bound_exchange["name"]
            bound_exchange_type = self.bound_exchange["type"]
            self.channel.exchange_declare(
                exchange=bound_exchange_name,
                durable=True,
                exchange_type=bound_exchange_type,
            )
            self.channel.exchange_bind(
                destination=self.exchange_name,
                source=bound_exchange_name,
                routing_key=self.routing_key,
                arguments=self.exchange_args,
            )

    def start(self):
        self.connect()
        self.declare_queue()

        self.thread = Thread(target=self.consume)
        self.thread.setDaemon(True)
        self.thread.start()

    def __run_error_callback(self, message: str, error: Exception,
                             error_type: str) -> None:
        """
        Log error message.
        :param message: Message to be logged in error_callback
        :param error: Error encountered in consuming the message
        :param error_type: Type of error (CONNECT_ERROR or CONSUME_ERROR)
        """
        if self.error_callback:
            try:
                self.error_callback(message,
                                    error=error,
                                    error_type=error_type)

            except Exception as exception:
                logger.exception(exception)

        else:
            logger.exception(error)

    def __send_reconnection_error_message(
        self,
        error: Union[AMQPConnectionError, ConnectionResetError,
                     ChannelClosedByBroker],
        retry_count: int,
    ) -> None:
        """
        Send error message to your preferred location.
        :param error: Error that prevented the Consumer from processing the message.
        :param retry_count: Amount retries the Consumer tried before sending an error message.
        """
        message = (
            f"Service tried to reconnect to queue **{retry_count}** times "
            f"but still failed."
            f"\n{repr(error)}")
        self.__run_error_callback(message, error, CONNECT_ERROR)

    def __send_consume_error_message(self,
                                     error: Exception,
                                     retry_count: int = 1) -> None:
        """
        Send error message to your preferred location.
        :param error: Error that prevented the Consumer from processing the message.
        :param retry_count: Amount retries the Consumer tried before sending an error message.
        """
        message = (f"Service tried to consume message **{retry_count}** times "
                   f"but still failed."
                   f"\n{repr(error)}")
        self.__run_error_callback(message, error, CONSUME_ERROR)

    def __create_connection(self) -> BlockingConnection:
        """
        Create pika's ``BlockingConnection`` from the given connection parameters.
        """
        return BlockingConnection(self.connection_parameters)

    def _compute_expiration(self, retry_count: int) -> int:
        """
        Compute message expiration time from the retry queue in seconds.
        """
        b = self.retry_backoff_base
        n = self.retry_delay * 1000

        return b**(retry_count - 1) * n  # 5, 10, 20, 40, 80

    def _publish_to_retry_queue(self, data: dict, properties,
                                retry_reason: Exception) -> None:
        """
        Publish message to retry queue with the appropriate metadata in the headers.
        """
        headers = properties.headers or {}
        attempt = headers.get("x-attempt", 0) + 1
        self.__send_consume_error_message(retry_reason, attempt)

        if attempt > self.max_retries:
            return

        expiration = self._compute_expiration(attempt)
        now = datetime.now()
        next_attempt = now + timedelta(seconds=(expiration // 1000))
        message_properties = {
            **properties.__dict__,
            "expiration": str(expiration),
            "headers": {
                **headers,
                "x-attempt": attempt,
                "x-max-attempts": self.max_retries,
                "x-created-at": headers.get("x-created-at", now.isoformat()),
                "x-retry-reason": repr(retry_reason),
                "x-next-attempt": next_attempt.isoformat(),
            },
        }

        for i in range(1, attempt + 1):
            attempt_no = f"x-attempt-{i}"
            previous_attempts = message_properties["headers"]
            previous_attempts[attempt_no] = previous_attempts.get(
                attempt_no, now.isoformat())

        self.retry_publisher.publish(data,
                                     message_properties=message_properties)

    def _consume_message(self, channel, method, properties,
                         data: dict) -> None:
        """
        Wrap the user-provided callback, gracefully handle its errors, and
        call pika's ``basic_ack`` once successful.
        :param channel: pika's Channel this message was received.
        :param method: pika's basic Return
        :param properties: pika's BasicProperties
        :param data: Data received in bytes.
        """

        if isinstance(data, bytes):
            data = data.decode("ascii")

        data = json.loads(data)

        auto_ack = None

        try:
            logger.debug("Received message from queue")

            auto_ack = self.message_received_callback(data,
                                                      channel=channel,
                                                      method=method,
                                                      properties=properties)

        except Exception as error:
            if self.is_dlk_retry_enabled:
                self._publish_to_retry_queue(data, properties, error)

            else:
                self.__send_consume_error_message(error)

        if auto_ack or (auto_ack is None and self.auto_ack):
            channel.basic_ack(delivery_tag=method.delivery_tag)

        else:
            channel.basic_nack(delivery_tag=method.delivery_tag)

    def connect(self, retry_count=1) -> None:
        """
        Create pika's ``BlockingConnection`` and initialize queue bindings.
        :param retry_count: Amount retries the Consumer tried before sending an error message.
        """
        try:
            self.connection = self.__create_connection()
            self.channel = self.connection.channel()
            self.channel.basic_qos(prefetch_count=self.prefetch_count)

        except CONNECTION_ERRORS as error:
            if not (retry_count % self.connection_attempts):
                self.__send_reconnection_error_message(
                    error, self.connection_attempts * retry_count)

                if not self.infinite_retry:
                    raise error

            time.sleep(self.retry_delay)

            self.connect(retry_count=(retry_count + 1))

    def close(self) -> None:
        """
        Manually close a connection to RabbitMQ. This is useful for debugging and tests.
        """
        self.thread.join(0.1)

    def consume(self, retry_count=1) -> None:
        """
        Wrap pika's ``basic_consume()`` and ``start_consuming()`` with retry logic.
        """
        try:
            self.channel.basic_consume(self.queue_name, self._consume_message)

            self.channel.start_consuming()

        except CONNECTION_ERRORS as error:
            if not (retry_count % self.connection_attempts):
                self.__send_reconnection_error_message(error, retry_count)

                if not self.infinite_retry:
                    raise error

            time.sleep(self.retry_delay)

            self.connect()
            self.consume(retry_count=(retry_count + 1))
Ejemplo n.º 12
0
def should_consume_from_the_routed_queue_as_specified_in_headers() -> None:
    bound_exchange_name = "headers_exchange_name"
    routing_key = "headers_routing_key"
    first_response = {"count": 0}
    second_response = {"count": 0}

    def first_callback(data: dict, **kwargs):
        first_response["count"] = first_response["count"] + 1

    def second_callback(data: dict, **kwargs):
        second_response["count"] = second_response["count"] + 1

    # Connect and declare the first exchange/queue pair that subscribes to the bound exchange of type headers
    first_consumer = Consumer(
        exchange_name="first_exchange",
        queue_name="first_queue",
        routing_key=routing_key,
        bound_exchange={
            "name": bound_exchange_name,
            "type": "headers"
        },
        exchange_args={
            "routing.first": "first",
            "x-match": "all",
        },
        callback=first_callback,
    )
    first_consumer.connect()
    first_consumer.declare_queue()

    # Connect and declare the second exchange/queue pair that subscribes to the bound exchange of type headers
    second_consumer = Consumer(
        exchange_name="second_exchange",
        queue_name="second_queue",
        routing_key=routing_key,
        bound_exchange={
            "name": bound_exchange_name,
            "type": "headers"
        },
        exchange_args={
            "routing.second": "second",
            "x-match": "all",
        },
        callback=second_callback,
    )
    second_consumer.connect()
    second_consumer.declare_queue()

    publisher = Publisher(
        exchange_name=bound_exchange_name,
        exchange_type="headers",
        routing_key=routing_key,
    )
    publisher.publish(
        {}, message_properties={"headers": {
            "routing.first": "first"
        }})
    publisher.publish(
        {}, message_properties={"headers": {
            "routing.second": "second"
        }})

    first_consumer.start()
    assert_consumed_message(first_response, {"count": 1})
    first_consumer.close()

    second_consumer.start()
    assert_consumed_message(second_response, {"count": 1})
    second_consumer.close()