예제 #1
0
    def _publish_and_retry_on_missing_exchange(self,
                                               exchange,
                                               msg,
                                               routing_key=None,
                                               timeout=None):
        """Publisher that retry if the exchange is missing.
        """

        if not exchange.passive:
            raise RuntimeError("_publish_and_retry_on_missing_exchange() must "
                               "be called with an passive exchange.")

        # TODO(sileht): use @retrying
        # NOTE(sileht): no need to wait the application expect a response
        # before timeout is exshauted
        duration = (timeout
                    if timeout is not None else self.kombu_reconnect_timeout)

        timer = rpc_common.DecayingTimer(duration=duration)
        timer.start()

        while True:
            try:
                self._publish(exchange,
                              msg,
                              routing_key=routing_key,
                              timeout=timeout)
                return
            except self.connection.channel_errors as exc:
                # NOTE(noelbk/sileht):
                # If rabbit dies, the consumer can be disconnected before the
                # publisher sends, and if the consumer hasn't declared the
                # queue, the publisher's will send a message to an exchange
                # that's not bound to a queue, and the message wll be lost.
                # So we set passive=True to the publisher exchange and catch
                # the 404 kombu ChannelError and retry until the exchange
                # appears
                if exc.code == 404 and timer.check_return() > 0:
                    LOG.info(
                        _LI("The exchange %(exchange)s to send to "
                            "%(routing_key)s doesn't exist yet, "
                            "retrying...") % {
                                'exchange': exchange.name,
                                'routing_key': routing_key
                            })
                    time.sleep(0.25)
                    continue
                elif exc.code == 404:
                    msg = _("The exchange %(exchange)s to send to "
                            "%(routing_key)s still doesn't exist after "
                            "%(duration)s sec abandoning...") % {
                                'duration': duration,
                                'exchange': exchange.name,
                                'routing_key': routing_key
                            }
                    LOG.info(msg)
                    raise rpc_amqp.AMQPDestinationNotFound(msg)
                raise
예제 #2
0
    def test_duration_callback_with_args(self, now):
        now.return_value = 0
        t = common.DecayingTimer(2)
        t.start()

        now.return_value = 3
        callback = mock.Mock()
        remaining = t.check_return(callback, 1, a='b')
        self.assertEqual(0, remaining)
        callback.assert_called_once_with(1, a='b')
예제 #3
0
    def iterconsume(self, limit=None, timeout=None):
        """Return an iterator that will consume from all queues/consumers.

        NOTE(sileht): Must be called within the connection lock
        """

        timer = rpc_common.DecayingTimer(duration=timeout)
        timer.start()

        def _raise_timeout(exc):
            LOG.debug('Timed out waiting for RPC response: %s', exc)
            raise rpc_common.Timeout()

        def _recoverable_error_callback(exc):
            self.do_consume = True
            timer.check_return(_raise_timeout, exc)

        def _error_callback(exc):
            _recoverable_error_callback(exc)
            LOG.error(('Failed to consume message from queue: %s'),
                      exc)

        def _consume():
            if self.do_consume:
                queues_head = self.consumers[:-1]  # not fanout.
                queues_tail = self.consumers[-1]  # fanout
                for queue in queues_head:
                    queue.consume(nowait=True)
                queues_tail.consume(nowait=False)
                self.do_consume = False

            poll_timeout = (self._poll_timeout if timeout is None
                            else min(timeout, self._poll_timeout))
            while True:
                if self._consume_loop_stopped:
                    self._consume_loop_stopped = False
                    raise StopIteration

                if self._heartbeat_supported_and_enabled():
                    self._heartbeat_check()

                try:
                    return self.connection.drain_events(timeout=poll_timeout)
                except socket.timeout as exc:
                    poll_timeout = timer.check_return(
                        _raise_timeout, exc, maximum=self._poll_timeout)

        for iteration in itertools.count(0):
            if limit and iteration >= limit:
                raise StopIteration
            yield self.ensure(
                _consume,
                recoverable_error_callback=_recoverable_error_callback,
                error_callback=_error_callback)
예제 #4
0
    def consume(self, timeout=None):
        """Consume from all queues/consumers."""

        timer = rpc_common.DecayingTimer(duration=timeout)
        timer.start()

        def _raise_timeout(exc):
            LOG.debug('Timed out waiting for RPC response: %s', exc)
            raise rpc_common.Timeout()

        def _recoverable_error_callback(exc):
            if not isinstance(exc, rpc_common.Timeout):
                self._new_consumers = self._consumers
            timer.check_return(_raise_timeout, exc)

        def _error_callback(exc):
            _recoverable_error_callback(exc)
            LOG.error(_LE('Failed to consume message from queue: %s'),
                      exc)

        def _consume():
            # NOTE(sileht): in case the acknowledgment or requeue of a
            # message fail, the kombu transport can be disconnected
            # In this case, we must redeclare our consumers, so raise
            # a recoverable error to trigger the reconnection code.
            if not self.connection.connected:
                raise self.connection.recoverable_connection_errors[0]

            if self._new_consumers:
                for tag, consumer in enumerate(self._consumers):
                    if consumer in self._new_consumers:
                        consumer.consume(tag=tag)
                self._new_consumers = []

            poll_timeout = (self._poll_timeout if timeout is None
                            else min(timeout, self._poll_timeout))
            while True:
                if self._consume_loop_stopped:
                    return

                if self._heartbeat_supported_and_enabled():
                    self._heartbeat_check()

                try:
                    self.connection.drain_events(timeout=poll_timeout)
                    return
                except socket.timeout as exc:
                    poll_timeout = timer.check_return(
                        _raise_timeout, exc, maximum=self._poll_timeout)

        with self._connection_lock:
            self.ensure(_consume,
                        recoverable_error_callback=_recoverable_error_callback,
                        error_callback=_error_callback)
예제 #5
0
    def test_reset(self, now):
        now.return_value = 0
        t = common.DecayingTimer(3)
        t.start()

        now.return_value = 1
        remaining = t.check_return()
        self.assertEqual(2, remaining)

        t.restart()
        remaining = t.check_return()
        self.assertEqual(3, remaining)
예제 #6
0
    def wait(self, msg_id, timeout, call_monitor_timeout):
        # NOTE(sileht): for each msg_id we receive two amqp message
        # first one with the payload, a second one to ensure the other
        # have finish to send the payload
        # NOTE(viktors): We are going to remove this behavior in the N
        # release, but we need to keep backward compatibility, so we should
        # support both cases for now.
        timer = rpc_common.DecayingTimer(duration=timeout)
        timer.start()
        if call_monitor_timeout:
            call_monitor_timer = rpc_common.DecayingTimer(
                duration=call_monitor_timeout)
            call_monitor_timer.start()
        else:
            call_monitor_timer = None
        final_reply = None
        ending = False
        while not ending:
            timeout = timer.check_return(self._raise_timeout_exception, msg_id)
            if call_monitor_timer and timeout > 0:
                cm_timeout = call_monitor_timer.check_return(
                    self._raise_timeout_exception, msg_id)
                if cm_timeout < timeout:
                    timeout = cm_timeout
            try:
                message = self.waiters.get(msg_id, timeout=timeout)
            except moves.queue.Empty:
                self._raise_timeout_exception(msg_id)

            reply, ending = self._process_reply(message)
            if reply is not None:
                # NOTE(viktors): This can be either first _send_reply() with an
                # empty `result` field or a second _send_reply() with
                # ending=True and no `result` field.
                final_reply = reply
            elif ending is False:
                LOG.debug('Call monitor heartbeat received; '
                          'renewing timeout timer')
                call_monitor_timer.restart()
        return final_reply
예제 #7
0
    def consume(self, timeout=None):
        """Consume from all queues/consumers."""

        timer = rpc_common.DecayingTimer(duration=timeout)
        timer.start()

        def _raise_timeout(exc):
            LOG.debug('Timed out waiting for RPC response: %s', exc)
            raise rpc_common.Timeout()

        def _recoverable_error_callback(exc):
            self.do_consume = True
            timer.check_return(_raise_timeout, exc)

        def _error_callback(exc):
            _recoverable_error_callback(exc)
            LOG.error(_('Failed to consume message from queue: %s'), exc)

        def _consume():
            if self.do_consume:
                queues_head = self.consumers[:-1]  # not fanout.
                queues_tail = self.consumers[-1]  # fanout
                for queue in queues_head:
                    queue.consume(nowait=True)
                queues_tail.consume(nowait=False)
                self.do_consume = False

            poll_timeout = (self._poll_timeout if timeout is None else min(
                timeout, self._poll_timeout))
            while True:
                if self._consume_loop_stopped:
                    return

                if self._heartbeat_supported_and_enabled():
                    self.connection.heartbeat_check(
                        rate=self.driver_conf.heartbeat_rate)
                try:
                    self.connection.drain_events(timeout=poll_timeout)
                    return
                except socket.timeout as exc:
                    poll_timeout = timer.check_return(
                        _raise_timeout, exc, maximum=self._poll_timeout)

        with self._connection_lock:
            self.ensure(_consume,
                        recoverable_error_callback=_recoverable_error_callback,
                        error_callback=_error_callback)
예제 #8
0
    def reply(self, reply=None, failure=None, log_failure=True):
        if not self.msg_id:
            # NOTE(Alexei_987) not sending reply, if msg_id is empty
            #    because reply should not be expected by caller side
            return

        # NOTE(sileht): return without hold the a connection if possible
        if not self._obsolete_reply_queues.reply_q_valid(
                self.reply_q, self.msg_id):
            return

        # NOTE(sileht): we read the configuration value from the driver
        # to be able to backport this change in previous version that
        # still have the qpid driver
        duration = self.listener.driver.missing_destination_retry_timeout
        timer = rpc_common.DecayingTimer(duration=duration)
        timer.start()

        while True:
            try:
                with self.listener.driver._get_connection(
                        rpc_common.PURPOSE_SEND) as conn:
                    self._send_reply(conn,
                                     reply,
                                     failure,
                                     log_failure=log_failure)
                return
            except rpc_amqp.AMQPDestinationNotFound:
                if timer.check_return() > 0:
                    LOG.debug(("The reply %(msg_id)s cannot be sent  "
                               "%(reply_q)s reply queue don't exist, "
                               "retrying..."), {
                                   'msg_id': self.msg_id,
                                   'reply_q': self.reply_q
                               })
                    time.sleep(0.25)
                else:
                    self._obsolete_reply_queues.add(self.reply_q, self.msg_id)
                    LOG.info(
                        _LI("The reply %(msg_id)s cannot be sent  "
                            "%(reply_q)s reply queue don't exist after "
                            "%(duration)s sec abandoning..."), {
                                'msg_id': self.msg_id,
                                'reply_q': self.reply_q,
                                'duration': duration
                            })
                    return
예제 #9
0
    def iterconsume(self, limit=None, timeout=None):
        """Return an iterator that will consume from all queues/consumers."""

        timer = rpc_common.DecayingTimer(duration=timeout)
        timer.start()

        def _raise_timeout(exc):
            LOG.debug('Timed out waiting for RPC response: %s', exc)
            raise rpc_common.Timeout()

        def _error_callback(exc):
            timer.check_return(_raise_timeout, exc)
            LOG.exception(_('Failed to consume message from queue: %s'), exc)

        def _consume():
            # NOTE(sileht):
            # maximun value choosen according the best practice from kombu:
            # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
            poll_timeout = 1 if timeout is None else min(timeout, 1)

            while True:
                if self._consume_loop_stopped:
                    self._consume_loop_stopped = False
                    raise StopIteration

                try:
                    nxt_receiver = self.session.next_receiver(
                        timeout=poll_timeout)
                except qpid_exceptions.Empty as exc:
                    poll_timeout = timer.check_return(_raise_timeout, exc,
                                                      maximum=1)
                else:
                    break

            try:
                self._lookup_consumer(nxt_receiver).consume()
            except Exception:
                LOG.exception(_("Error processing message. "
                                "Skipping it."))

        for iteration in itertools.count(0):
            if limit and iteration >= limit:
                raise StopIteration
            yield self.ensure(_error_callback, _consume)
예제 #10
0
    def wait(self, msg_id, timeout):
        # NOTE(sileht): for each msg_id we receive two amqp message
        # first one with the payload, a second one to ensure the other
        # have finish to send the payload
        timer = rpc_common.DecayingTimer(duration=timeout)
        timer.start()
        final_reply = None
        ending = False
        while not ending:
            timeout = timer.check_return(self._raise_timeout_exception, msg_id)
            try:
                message = self.waiters.get(msg_id, timeout=timeout)
            except moves.queue.Empty:
                self._raise_timeout_exception(msg_id)

            reply, ending = self._process_reply(message)
            if not ending:
                final_reply = reply
        return final_reply
예제 #11
0
    def consume(self, timeout=None):
        """recieve messages as many as max_fetch_messages.

        In this functions, there are no while loop to subscribe.
        This would be helpful when we wants to control the velocity of
        subscription.
        """
        duration = (self.consumer_timeout if timeout is None else timeout)
        timer = driver_common.DecayingTimer(duration=duration)
        timer.start()

        def _raise_timeout():
            LOG.debug('Timed out waiting for Kafka response')
            raise driver_common.Timeout()

        poll_timeout = (self.consumer_timeout if timeout is None else min(
            timeout, self.consumer_timeout))

        while True:
            if self._consume_loop_stopped:
                return
            try:
                next_timeout = poll_timeout * 1000.0
                # TODO(use configure() method instead)
                # Currently KafkaConsumer does not support for
                # the case of updating only fetch_max_wait_ms parameter
                self.consumer._config['fetch_max_wait_ms'] = next_timeout
                messages = list(self.consumer.fetch_messages())
            except Exception as e:
                LOG.exception(_LE("Failed to consume messages: %s"), e)
                messages = None

            if not messages:
                poll_timeout = timer.check_return(
                    _raise_timeout, maximum=self.consumer_timeout)
                continue

            return messages
예제 #12
0
    def consume(self, timeout=None):
        """Receive up to 'max_fetch_messages' messages.

        :param timeout: poll timeout in seconds
        """
        duration = (self.consumer_timeout if timeout is None else timeout)
        timer = driver_common.DecayingTimer(duration=duration)
        timer.start()

        def _raise_timeout():
            LOG.debug('Timed out waiting for Kafka response')
            raise driver_common.Timeout()

        poll_timeout = (self.consumer_timeout if timeout is None
                        else min(timeout, self.consumer_timeout))

        while True:
            if self._consume_loop_stopped:
                return
            try:
                next_timeout = poll_timeout * 1000.0
                # TODO(use configure() method instead)
                # Currently KafkaConsumer does not support for
                # the case of updating only fetch_max_wait_ms parameter
                self.consumer._config['fetch_max_wait_ms'] = next_timeout
                messages = list(self.consumer.fetch_messages())
            except Exception as e:
                LOG.exception(_LE("Failed to consume messages: %s"), e)
                messages = None

            if not messages:
                poll_timeout = timer.check_return(
                    _raise_timeout, maximum=self.consumer_timeout)
                continue

            return messages
예제 #13
0
 def test_duration_is_none_and_maximun_set(self):
     t = common.DecayingTimer()
     t.start()
     remaining = t.check_return(None, maximum=2)
     self.assertEqual(2, remaining)
예제 #14
0
 def test_duration_is_none(self):
     t = common.DecayingTimer()
     t.start()
     remaining = t.check_return(None)
     self.assertEqual(None, remaining)
예제 #15
0
 def test_no_duration_but_maximum(self):
     t = common.DecayingTimer()
     t.start()
     remaining = t.check_return(maximum=2)
     self.assertEqual(2, remaining)
예제 #16
0
 def test_no_duration_no_callback(self):
     t = common.DecayingTimer()
     t.start()
     remaining = t.check_return()
     self.assertEqual(None, remaining)
예제 #17
0
    def reply(self, reply=None, failure=None):
        if not self.msg_id:
            # NOTE(Alexei_987) not sending reply, if msg_id is empty
            #    because reply should not be expected by caller side
            return

        # NOTE(sileht): return without hold the a connection if possible
        if not self._obsolete_reply_queues.reply_q_valid(
                self.reply_q, self.msg_id):
            return

        # NOTE(sileht): we read the configuration value from the driver
        # to be able to backport this change in previous version that
        # still have the qpid driver
        duration = self.listener.driver.missing_destination_retry_timeout
        timer = rpc_common.DecayingTimer(duration=duration)
        timer.start()

        while True:
            try:
                with self.listener.driver._get_connection(
                        rpc_common.PURPOSE_SEND, ) as conn:
                    self._send_reply(conn, reply, failure)

                return
            except oslo_messaging.MessageUndeliverable:
                # queue not found
                if timer.check_return() <= 0:
                    self._obsolete_reply_queues.add(self.reply_q, self.msg_id)
                    LOG.error(
                        'The reply %(msg_id)s failed to send after '
                        '%(duration)d seconds due to a missing queue '
                        '(%(reply_q)s). Abandoning...', {
                            'msg_id': self.msg_id,
                            'duration': duration,
                            'reply_q': self.reply_q
                        })
                    return

                LOG.debug(
                    'The reply %(msg_id)s could not be sent due to a missing '
                    'queue (%(reply_q)s). Retrying...', {
                        'msg_id': self.msg_id,
                        'reply_q': self.reply_q
                    })
                time.sleep(0.25)
            except rpc_amqp.AMQPDestinationNotFound as exc:
                # exchange not found/down
                if timer.check_return() <= 0:
                    self._obsolete_reply_queues.add(self.reply_q, self.msg_id)
                    LOG.error(
                        'The reply %(msg_id)s failed to send after '
                        '%(duration)d seconds due to a broker issue '
                        '(%(exc)s). Abandoning...', {
                            'msg_id': self.msg_id,
                            'duration': duration,
                            'exc': exc
                        })
                    return

                LOG.debug(
                    'The reply %(msg_id)s could not be sent due to a broker '
                    'issue (%(exc)s). Retrying...', {
                        'msg_id': self.msg_id,
                        'exc': exc
                    })
                time.sleep(0.25)
예제 #18
0
 def test_duration_expired_no_callback(self):
     t = common.DecayingTimer(2)
     t._ends_at = time.time() - 10
     remaining = t.check_return()
     self.assertAlmostEqual(-10, remaining, 0)