def _wait(self, condition, msg, log_after, timeout_timer):
        """Wait while condition() is true. Write a log message if condition()
        has not become false within `log_after` seconds. Raise TaskTimeout if
        timeout_timer expires while waiting.
        """

        log_timer = None
        if log_after != 0:
            log_timer = timeutils.StopWatch(duration=log_after)
            log_timer.start()

        while condition():
            if log_timer is not None and log_timer.expired():
                LOG.warn(_LW('Possible hang: %s'), msg)
                LOG.debug(''.join(traceback.format_stack()))
                # Only log once. After than we wait indefinitely without
                # logging.
                log_timer = None

            if timeout_timer is not None and timeout_timer.expired():
                raise TaskTimeout(msg)

            timeouts = []
            if log_timer is not None:
                timeouts.append(log_timer.leftover())
            if timeout_timer is not None:
                timeouts.append(timeout_timer.leftover())

            wait = None
            if timeouts:
                wait = min(timeouts)
            self._cond.wait(wait)
Esempio n. 2
0
        def _get_single_host():
            hosts = self.get_hosts(target)
            try:
                if not hosts:
                    err_msg = "No hosts were found for target %s." % target
                    LOG.error(err_msg)
                    raise oslo_messaging.InvalidTarget(err_msg, target)

                if len(hosts) == 1:
                    host = hosts[0]
                    LOG.info(_LI(
                        "A single host %(host)s found for target %(target)s.")
                        % {"host": host, "target": target})
                else:
                    host = random.choice(hosts)
                    LOG.warning(_LW(
                        "Multiple hosts %(hosts)s were found for target "
                        " %(target)s. Using the random one - %(host)s.")
                        % {"hosts": hosts, "target": target, "host": host})
                return host
            except oslo_messaging.InvalidTarget as ex:
                if timeout:
                    raise oslo_messaging.MessagingTimeout()
                else:
                    raise ex
    def send_request(self, multipart_message):

        envelope = multipart_message[zmq_names.MULTIPART_IDX_ENVELOPE]

        LOG.debug("Envelope: %s" % envelope)

        target = envelope[zmq_names.FIELD_TARGET]
        dealer_socket = self._check_hosts_connections(
            target, zmq_names.socket_type_str(zmq.ROUTER))

        if not dealer_socket.connections:
            # NOTE(ozamiatin): Here we can provide
            # a queue for keeping messages to send them later
            # when some listener appears. However such approach
            # being more reliable will consume additional memory.
            LOG.warning(
                _LW("Request %s was dropped because no connection") %
                envelope[zmq_names.FIELD_MSG_TYPE])
            return

        self.reply_receiver.track_socket(dealer_socket.handle)

        LOG.debug(
            "Sending message %(message)s to a target %(target)s" % {
                "message": envelope[zmq_names.FIELD_MSG_ID],
                "target": envelope[zmq_names.FIELD_TARGET]
            })

        if envelope[zmq_names.FIELD_MSG_TYPE] in zmq_names.MULTISEND_TYPES:
            for _ in range(dealer_socket.connections_count()):
                self._send_request(dealer_socket, multipart_message)
        else:
            self._send_request(dealer_socket, multipart_message)
Esempio n. 4
0
    def wait(self):
        """Wait for message processing to complete.

        After calling stop(), there may still be some some existing messages
        which have not been completely processed. The wait() method blocks
        until all message processing has completed.

        Once it's finished, the underlying driver resources associated to this
        server are released (like closing useless network connections).
        """
        self._check_same_thread_id()

        if self._running:
            # NOTE(dims): Need to change this to raise RuntimeError after
            # verifying/fixing other openstack projects (like Neutron)
            # work ok with this change
            LOG.warn(_LW("wait() should be called after stop() as it "
                         "waits for existing messages to finish "
                         "processing"))

        if self._executor is not None:
            self._executor.wait()
            # Close listener connection after processing all messages
            self._executor.listener.cleanup()

        self._executor = None
        # NOTE(sileht): executor/listener have been properly stopped
        # allow to restart it into another thread
        self._thread_id = None
    def _load_notifiers(self):
        """One-time load of notifier config file."""
        self.routing_groups = {}
        self.used_drivers = set()
        filename = CONF.routing_notifier_config
        if not filename:
            return

        # Infer which drivers are used from the config file.
        self.routing_groups = yaml.safe_load(
            self._get_notifier_config_file(filename))
        if not self.routing_groups:
            self.routing_groups = {}  # In case we got None from load()
            return

        for group in self.routing_groups.values():
            self.used_drivers.update(group.keys())

        LOG.debug('loading notifiers from %s', self.NOTIFIER_PLUGIN_NAMESPACE)
        self.plugin_manager = dispatch.DispatchExtensionManager(
            namespace=self.NOTIFIER_PLUGIN_NAMESPACE,
            check_func=self._should_load_plugin,
            invoke_on_load=True,
            invoke_args=None)
        if not list(self.plugin_manager):
            LOG.warning(_LW("Failed to load any notifiers for %s"),
                        self.NOTIFIER_PLUGIN_NAMESPACE)
Esempio n. 6
0
 def _no_reply_log(self, reply_q, msg_id):
     LOG.warning(
         _LW("%(reply_queue)s doesn't exists, drop reply to "
             "%(msg_id)s"), {
                 'reply_queue': reply_q,
                 'msg_id': msg_id
             })
Esempio n. 7
0
    def dispatch(self, incoming):
        """Dispatch notification messages to the appropriate endpoint method.
        """

        messages_grouped = itertools.groupby(sorted(
            (self._extract_user_message(m) for m in incoming),
            key=operator.itemgetter(0)), operator.itemgetter(0))

        requeues = set()
        for priority, messages in messages_grouped:
            __, raw_messages, messages = six.moves.zip(*messages)
            if priority not in PRIORITIES:
                LOG.warning(_LW('Unknown priority "%s"'), priority)
                continue
            for screen, callback in self._callbacks_by_priority.get(priority,
                                                                    []):
                if screen:
                    filtered_messages = [message for message in messages
                                         if screen.match(
                                             message["ctxt"],
                                             message["publisher_id"],
                                             message["event_type"],
                                             message["metadata"],
                                             message["payload"])]
                else:
                    filtered_messages = list(messages)

                if not filtered_messages:
                    continue

                ret = self._exec_callback(callback, filtered_messages)
                if ret == NotificationResult.REQUEUE:
                    requeues.update(raw_messages)
                    break
        return requeues
Esempio n. 8
0
    def _wait(self, condition, msg, log_after, timeout_timer):
        """Wait while condition() is true. Write a log message if condition()
        has not become false within `log_after` seconds. Raise TaskTimeout if
        timeout_timer expires while waiting.
        """

        log_timer = None
        if log_after != 0:
            log_timer = timeutils.StopWatch(duration=log_after)
            log_timer.start()

        while condition():
            if log_timer is not None and log_timer.expired():
                LOG.warning(_LW('Possible hang: %s'), msg)
                LOG.debug(''.join(traceback.format_stack()))
                # Only log once. After than we wait indefinitely without
                # logging.
                log_timer = None

            if timeout_timer is not None and timeout_timer.expired():
                raise TaskTimeout(msg)

            timeouts = []
            if log_timer is not None:
                timeouts.append(log_timer.leftover())
            if timeout_timer is not None:
                timeouts.append(timeout_timer.leftover())

            wait = None
            if timeouts:
                wait = min(timeouts)
            self._cond.wait(wait)
Esempio n. 9
0
    def _load_notifiers(self):
        """One-time load of notifier config file."""
        self.routing_groups = {}
        self.used_drivers = set()
        filename = CONF.routing_notifier_config
        if not filename:
            return

        # Infer which drivers are used from the config file.
        self.routing_groups = yaml.safe_load(
            self._get_notifier_config_file(filename))
        if not self.routing_groups:
            self.routing_groups = {}  # In case we got None from load()
            return

        for group in self.routing_groups.values():
            self.used_drivers.update(group.keys())

        LOG.debug('loading notifiers from %s', self.NOTIFIER_PLUGIN_NAMESPACE)
        self.plugin_manager = dispatch.DispatchExtensionManager(
            namespace=self.NOTIFIER_PLUGIN_NAMESPACE,
            check_func=self._should_load_plugin,
            invoke_on_load=True,
            invoke_args=None)
        if not list(self.plugin_manager):
            LOG.warning(_LW("Failed to load any notifiers for %s"),
                        self.NOTIFIER_PLUGIN_NAMESPACE)
Esempio n. 10
0
    def __init__(self, conf, *args, **kwargs):
        super(MatchmakerRedis, self).__init__(conf, *args, **kwargs)

        self._redis_hosts = self._extract_redis_hosts()

        self._redis_instances = [
            redis.StrictRedis(host=redis_host["host"],
                              port=redis_host["port"],
                              password=redis_host["password"])
            for redis_host in self._redis_hosts
        ]

        for redis_host, redis_instance \
                in six.moves.zip(self._redis_hosts, self._redis_instances):
            address = "{host}:{port}".format(host=redis_host["host"],
                                             port=redis_host["port"])
            redis_instance._address = address
            is_available = self._check_availability(redis_instance)
            if is_available:
                redis_instance._is_available = True
                redis_instance._ready_from = time.time()
            else:
                LOG.warning(_LW("Redis host %s is not available now."),
                            address)
                redis_instance._is_available = False
                redis_instance._ready_from = float("inf")

        # NOTE(gdavoian): store instances in a random order
        # (for the sake of load balancing)
        random.shuffle(self._redis_instances)

        self._availability_updater = \
            MatchmakerRedisAvailabilityUpdater(self.conf, self)
Esempio n. 11
0
 def sender_failed(self, sender_link, error):
     """Protocol error occurred."""
     LOG.warning(_LW("sender %(addr)s failed error=%(error)s"), {
         'addr': self._address,
         'error': error
     })
     self.sender_closed(sender_link)
Esempio n. 12
0
    def dispatch(self, incoming):
        """Dispatch notification messages to the appropriate endpoint method.
        """

        messages_grouped = itertools.groupby(
            (self._extract_user_message(m) for m in incoming), lambda x: x[0])

        requeues = set()
        for priority, messages in messages_grouped:
            __, raw_messages, messages = six.moves.zip(*messages)
            raw_messages = list(raw_messages)
            messages = list(messages)
            if priority not in PRIORITIES:
                LOG.warning(_LW('Unknown priority "%s"'), priority)
                continue
            for screen, callback in self._callbacks_by_priority.get(
                    priority, []):
                if screen:
                    filtered_messages = [
                        message for message in messages if screen.match(
                            message["ctxt"], message["publisher_id"],
                            message["event_type"], message["metadata"],
                            message["payload"])
                    ]
                else:
                    filtered_messages = messages

                if not filtered_messages:
                    continue

                ret = self._exec_callback(callback, filtered_messages)
                if ret == NotificationResult.REQUEUE:
                    requeues.update(raw_messages)
                    break
        return requeues
def _drop_message_warn(request):
    LOG.warning(
        _LW("Matchmaker contains no records for specified "
            "target %(target)s. Dropping message %(msg_id)s.") % {
                "target": request.target,
                "msg_id": request.message_id
            })
Esempio n. 14
0
        def wrap(self, *args, **kws):
            with self._lock:
                # check to see if a fork was done after the Controller and its
                # I/O thread was spawned.  old_pid will be None the first time
                # this is called which will cause the Controller to be created.
                old_pid = self._pid
                self._pid = os.getpid()

                if old_pid != self._pid:
                    if self._ctrl is not None:
                        # fork was called after the Controller was created, and
                        # we are now executing as the child process.  Do not
                        # touch the existing Controller - it is owned by the
                        # parent.  Best we can do here is simply drop it and
                        # hope we get lucky.
                        LOG.warning(_LW("Process forked after connection "
                                        "established!"))
                        self._ctrl = None
                    # Create a Controller that connects to the messaging
                    # service:
                    self._ctrl = controller.Controller(self._hosts,
                                                       self._default_exchange,
                                                       self._conf)
                    self._ctrl.connect()
            return func(self, *args, **kws)
    def _create_message(self, context, message, reply_id, message_id, socket,
                        message_type):
        # drop a duplicate message
        if message_id in self.messages_cache:
            LOG.warning(
                _LW("[%(host)s] Dropping duplicate %(msg_type)s "
                    "message %(msg_id)s"), {
                        "host": self.host,
                        "msg_type": zmq_names.message_type_str(message_type),
                        "msg_id": message_id
                    })
            # NOTE(gdavoian): send yet another ack for the non-CALL
            # message, since the old one might be lost;
            # for the CALL message also try to resend its reply
            # (of course, if it was already obtained and cached).
            self._acknowledge(reply_id, message_id, socket)
            if message_type == zmq_names.CALL_TYPE:
                self._reply_from_cache(message_id, socket)
            return None

        self.messages_cache.add(message_id)

        # NOTE(gdavoian): send an immediate ack, since it may
        # be too late to wait until the message will be
        # dispatched and processed by a RPC server
        self._acknowledge(reply_id, message_id, socket)

        return super(DealerConsumerWithAcks,
                     self)._create_message(context, message, reply_id,
                                           message_id, socket, message_type)
Esempio n. 16
0
    def _heartbeat_thread_job(self):
        """Thread that maintains inactive connections
        """
        while not self._heartbeat_exit_event.is_set():
            with self._connection_lock.for_heartbeat():

                recoverable_errors = (
                    self.connection.recoverable_channel_errors +
                    self.connection.recoverable_connection_errors)

                try:
                    try:
                        self._heartbeat_check()
                        # NOTE(sileht): We need to drain event to receive
                        # heartbeat from the broker but don't hold the
                        # connection too much times. In amqpdriver a connection
                        # is used exclusivly for read or for write, so we have
                        # to do this for connection used for write drain_events
                        # already do that for other connection
                        try:
                            self.connection.drain_events(timeout=0.001)
                        except socket.timeout:
                            pass
                    except recoverable_errors as exc:
                        LOG.info(_LI("A recoverable connection/channel error "
                                     "occurred, trying to reconnect: %s"), exc)
                        self.ensure_connection()
                except Exception:
                    LOG.warning(_LW("Unexpected error during heartbeart "
                                    "thread processing, retrying..."))
                    LOG.debug('Exception', exc_info=True)

            self._heartbeat_exit_event.wait(
                timeout=self._heartbeat_wait_timeout)
        self._heartbeat_exit_event.clear()
Esempio n. 17
0
    def _parse_url(self):
        self.hostaddrs = []
        self.username = None
        self.password = None

        for host in self.url.hosts:
            # NOTE(ansmith): connections and failover are transparently
            # managed by the client library. Credentials will be
            # selectd from first host encountered in transport_url
            if self.username is None:
                self.username = host.username
                self.password = host.password
            else:
                if self.username != host.username:
                    LOG.warning(_LW("Different transport usernames detected"))

            if host.hostname:
                self.hostaddrs.append("%s:%s" % (
                    host.hostname,
                    host.port or self.driver_conf.kafka_default_port))

        if not self.hostaddrs:
            self.hostaddrs.append("%s:%s" %
                                  (self.driver_conf.kafka_default_host,
                                   self.driver_conf.kafka_default_port))
Esempio n. 18
0
    def get_single_host(self, target):
        """Get a single host by target.

       :param target: the target for messages
       :type target: Target
       :returns: a "hostname:port" host
       """

        hosts = self.get_hosts(target)
        if not hosts:
            err_msg = "No hosts were found for target %s." % target
            LOG.error(err_msg)
            raise oslo_messaging.InvalidTarget(err_msg, target)

        if len(hosts) == 1:
            host = hosts[0]
            LOG.info(
                _LI("A single host %(host)s found for target %(target)s.") % {
                    "host": host,
                    "target": target
                })
        else:
            host = random.choice(hosts)
            LOG.warning(
                _LW("Multiple hosts %(hosts)s were found for target "
                    " %(target)s. Using the random one - %(host)s.") % {
                        "hosts": hosts,
                        "target": target,
                        "host": host
                    })
        return host
Esempio n. 19
0
    def __call__(self, remote_properties):
        # for backwards compatibility use legacy if dynamic and we're connected
        # to qpidd or we cannot identify the message bus.  This can be
        # overridden via the configuration.
        product = remote_properties.get('product', 'qpid-cpp')

        # TODO(kgiusti): Router support was added in Newton.  Remove this
        # warning post Newton, once the driver has stabilized.
        if product == "qpid-dispatch-router":
            w = _LW("This is the initial release of support for message"
                    " routing technology. Be aware that messages are not"
                    " queued and may be discarded if there are no consumers"
                    " present.")
            LOG.warning(w)

        if self._mode == 'legacy' or (self._mode == 'dynamic'
                                      and product == 'qpid-cpp'):
            return LegacyAddresser(self._default_exchange,
                                   self._kwargs['legacy_server_prefix'],
                                   self._kwargs['legacy_broadcast_prefix'],
                                   self._kwargs['legacy_group_prefix'])
        else:
            return RoutableAddresser(self._default_exchange,
                                     self._kwargs.get("rpc_exchange"),
                                     self._kwargs["rpc_prefix"],
                                     self._kwargs.get("notify_exchange"),
                                     self._kwargs["notify_prefix"],
                                     self._kwargs["unicast"],
                                     self._kwargs["multicast"],
                                     self._kwargs["anycast"])
Esempio n. 20
0
    def wait(self):
        """Wait for message processing to complete.

        After calling stop(), there may still be some some existing messages
        which have not been completely processed. The wait() method blocks
        until all message processing has completed.

        Once it's finished, the underlying driver resources associated to this
        server are released (like closing useless network connections).
        """
        self._check_same_thread_id()

        if self._running:
            # NOTE(dims): Need to change this to raise RuntimeError after
            # verifying/fixing other openstack projects (like Neutron)
            # work ok with this change
            LOG.warn(
                _LW("wait() should be called after stop() as it "
                    "waits for existing messages to finish "
                    "processing"))

        if self._executor is not None:
            self._executor.wait()
            # Close listener connection after processing all messages
            self._executor.listener.cleanup()

        self._executor = None
        # NOTE(sileht): executor/listener have been properly stopped
        # allow to restart it into another thread
        self._thread_id = None
    def _create_message(self, context, message, message_version, reply_id,
                        message_id, socket, message_type):
        # drop a duplicate message
        if message_id in self.messages_cache:
            LOG.warning(
                _LW("[%(host)s] Dropping duplicate %(msg_type)s "
                    "message %(msg_id)s"),
                {"host": self.host,
                 "msg_type": zmq_names.message_type_str(message_type),
                 "msg_id": message_id}
            )
            # NOTE(gdavoian): send yet another ack for the direct
            # message, since the old one might be lost;
            # for the CALL message also try to resend its reply
            # (of course, if it was already obtained and cached).
            if message_type in zmq_names.DIRECT_TYPES:
                self._acknowledge(message_version, reply_id, message_id,
                                  socket)
            if message_type == zmq_names.CALL_TYPE:
                self._reply_from_cache(message_id, socket)
            return None

        self.messages_cache.add(message_id)

        # NOTE(gdavoian): send an immediate ack, since it may
        # be too late to wait until the message will be
        # dispatched and processed by a RPC server
        if message_type in zmq_names.DIRECT_TYPES:
            self._acknowledge(message_version, reply_id, message_id, socket)

        return super(DealerConsumerWithAcks, self)._create_message(
            context, message, message_version, reply_id,
            message_id, socket, message_type
        )
Esempio n. 22
0
    def _parse_url(self):
        self.hostaddrs = []
        self.username = None
        self.password = None

        for host in self.url.hosts:
            # NOTE(ansmith): connections and failover are transparently
            # managed by the client library. Credentials will be
            # selectd from first host encountered in transport_url
            if self.username is None:
                self.username = host.username
                self.password = host.password
            else:
                if self.username != host.username:
                    LOG.warning(_LW("Different transport usernames detected"))

            if host.hostname:
                self.hostaddrs.append("%s:%s" %
                                      (host.hostname, host.port
                                       or self.driver_conf.kafka_default_port))

        if not self.hostaddrs:
            self.hostaddrs.append("%s:%s" %
                                  (self.driver_conf.kafka_default_host,
                                   self.driver_conf.kafka_default_port))
    def send_request(self, multipart_message):

        envelope = multipart_message[zmq_names.MULTIPART_IDX_ENVELOPE]

        LOG.info(_LI("Envelope: %s") % envelope)

        target = envelope[zmq_names.FIELD_TARGET]
        dealer_socket, hosts = self._check_hosts_connections(target)

        if not dealer_socket.connections:
            # NOTE(ozamiatin): Here we can provide
            # a queue for keeping messages to send them later
            # when some listener appears. However such approach
            # being more reliable will consume additional memory.
            LOG.warning(_LW("Request %s was dropped because no connection")
                        % envelope[zmq_names.FIELD_MSG_TYPE])
            return

        self.reply_receiver.track_socket(dealer_socket.handle)

        LOG.info(_LI("Sending message %(message)s to a target %(target)s")
                 % {"message": envelope[zmq_names.FIELD_MSG_ID],
                    "target": envelope[zmq_names.FIELD_TARGET]})

        if envelope[zmq_names.FIELD_MSG_TYPE] in zmq_names.MULTISEND_TYPES:
            for _ in range(dealer_socket.connections_count()):
                self._send_request(dealer_socket, multipart_message)
        else:
            self._send_request(dealer_socket, multipart_message)
Esempio n. 24
0
    def __call__(self, remote_properties):
        # for backwards compatibility use legacy if dynamic and we're connected
        # to qpidd or we cannot identify the message bus.  This can be
        # overridden via the configuration.
        product = remote_properties.get('product', 'qpid-cpp')

        # TODO(kgiusti): Router support was added in Newton.  Remove this
        # warning post Newton, once the driver has stabilized.
        if product == "qpid-dispatch-router":
            w = _LW("This is the initial release of support for message"
                    " routing technology. Be aware that messages are not"
                    " queued and may be discarded if there are no consumers"
                    " present.")
            LOG.warning(w)

        if self._mode == 'legacy' or (self._mode == 'dynamic'
                                      and product == 'qpid-cpp'):
            return LegacyAddresser(self._default_exchange,
                                   self._kwargs['legacy_server_prefix'],
                                   self._kwargs['legacy_broadcast_prefix'],
                                   self._kwargs['legacy_group_prefix'])
        else:
            return RoutableAddresser(self._default_exchange,
                                     self._kwargs.get("rpc_exchange"),
                                     self._kwargs["rpc_prefix"],
                                     self._kwargs.get("notify_exchange"),
                                     self._kwargs["notify_prefix"],
                                     self._kwargs["unicast"],
                                     self._kwargs["multicast"],
                                     self._kwargs["anycast"])
Esempio n. 25
0
def _get_matchmaker(*args, **kwargs):
    global matchmaker
    mm_name = CONF.rpc_zmq_matchmaker

    # Back compatibility for old class names
    mm_mapping = {
        'oslo_messaging._drivers.matchmaker_redis.MatchMakerRedis': 'redis',
        'oslo_messaging._drivers.matchmaker_ring.MatchMakerRing': 'ring',
        'oslo_messaging._drivers.matchmaker.MatchMakerLocalhost': 'local',
        'oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis': 'redis',
        'oslo.messaging._drivers.matchmaker_ring.MatchMakerRing': 'ring',
        'oslo.messaging._drivers.matchmaker.MatchMakerLocalhost': 'local'
    }
    if mm_name in mm_mapping:
        LOG.warn(
            _LW('rpc_zmq_matchmaker = %(old_val)s is deprecated. '
                'It is suggested to change the value to %(new_val)s.'), {
                    'old_val': mm_name,
                    'new_val': mm_mapping[mm_name]
                })
        mm_name = mm_mapping[mm_name]

    if not matchmaker:
        mgr = driver.DriverManager('oslo.messaging.zmq.matchmaker', mm_name)
        matchmaker = mgr.driver(*args, **kwargs)
    return matchmaker
Esempio n. 26
0
    def start(self, override_pool_size=None):
        """Start handling incoming messages.

        This method causes the server to begin polling the transport for
        incoming messages and passing them to the dispatcher. Message
        processing will continue until the stop() method is called.

        The executor controls how the server integrates with the applications
        I/O handling strategy - it may choose to poll for messages in a new
        process, thread or co-operatively scheduled coroutine or simply by
        registering a callback with an event loop. Similarly, the executor may
        choose to dispatch messages in a new thread, coroutine or simply the
        current thread.
        """
        if self._started:
            LOG.warning(
                _LW('The server has already been started. Ignoring'
                    ' the redundant call to start().'))
            return

        self._started = True

        executor_opts = {}

        if self.executor_type in ("threading", "eventlet"):
            executor_opts["max_workers"] = (
                override_pool_size or self.conf.executor_thread_pool_size)
        self._work_executor = self._executor_cls(**executor_opts)

        try:
            self.listener = self._create_listener()
        except driver_base.TransportDriverError as ex:
            raise ServerListenError(self.target, ex)

        self.listener.start(self._on_incoming)
Esempio n. 27
0
 def execute(self, controller):
     """Runs on eventloop thread - sends request."""
     if not self._deadline or self._deadline > time.time():
         controller.request(self._target, self._request,
                            self._results_queue, self._wait_for_reply)
     else:
         LOG.warning(_LW("Send request to %s aborted: TTL expired."),
                     self._target)
Esempio n. 28
0
 def execute(self, controller):
     """Runs on eventloop thread - sends request."""
     if not self._deadline or self._deadline > time.time():
         controller.request(self._target, self._request,
                            self._results_queue, self._wait_for_reply)
     else:
         LOG.warning(_LW("Send request to %s aborted: TTL expired."),
                     self._target)
Esempio n. 29
0
 def run(self, key):
     if not self._ring_has(key):
         LOG.warn(
             _LW("No key defining hosts for topic '%s', "
                 "see ringfile"), key)
         return []
     host = next(self.ring0[key])
     return [(key + '.' + host, host)]
Esempio n. 30
0
 def _fetch_hosts(self, target, get_hosts, get_hosts_retry):
     key = str(target)
     if key not in self.routing_table:
         try:
             hosts = get_hosts_retry(
                 target, zmq_names.socket_type_str(zmq.DEALER))
             self.routing_table[key] = (hosts, time.time())
         except retrying.RetryError:
             LOG.warning(_LW("Matchmaker contains no hosts for target %s")
                         % key)
     else:
         try:
             hosts = get_hosts(
                 target, zmq_names.socket_type_str(zmq.DEALER))
             self.routing_table[key] = (hosts, time.time())
         except zmq_matchmaker_base.MatchmakerUnavailable:
             LOG.warning(_LW("Matchmaker contains no hosts for target %s")
                         % key)
Esempio n. 31
0
 def run(self, key):
     # Assume starts with "fanout~", strip it for lookup.
     nkey = key.split('fanout~')[1:][0]
     if not self._ring_has(nkey):
         LOG.warn(
             _LW("No key defining hosts for topic '%s', "
                 "see ringfile"), nkey)
         return []
     return map(lambda x: (key + '.' + x, x), self.ring[nkey])
Esempio n. 32
0
 def run_loop(self):
     reply, socket = self.poller.poll(timeout=self.conf.rpc_poll_timeout)
     if reply is not None:
         reply_id = reply[zmq_names.FIELD_MSG_ID]
         call_future = self.replies.get(reply_id)
         if call_future:
             call_future.set_result(reply)
         else:
             LOG.warning(_LW("Received timed out reply: %s"), reply_id)
Esempio n. 33
0
 def add(self, msg_id):
     self._queues[msg_id] = moves.queue.Queue()
     if len(self._queues) > self._wrn_threshold:
         LOG.warning(_LW('Number of call queues is greater than warning '
                         'threshold: %(old_threshold)s. There could be a '
                         'leak. Increasing threshold to: %(threshold)s'),
                     {'old_threshold': self._wrn_threshold,
                      'threshold': self._wrn_threshold * 2})
         self._wrn_threshold *= 2
Esempio n. 34
0
 def _check_same_thread_id(self):
     if self._thread_id is None:
         self._thread_id = self._get_thread_id()
     elif self._thread_id != self._get_thread_id():
         # NOTE(dims): Need to change this to raise RuntimeError after
         # verifying/fixing other openstack projects (like Neutron)
         # work ok with this change
         LOG.warn(_LW("start/stop/wait must be called in the "
                      "same thread"))
 def run_loop(self):
     reply, socket = self.poller.poll(timeout=self.conf.rpc_poll_timeout)
     if reply is not None:
         reply_id = reply[zmq_names.FIELD_MSG_ID]
         call_future = self.replies.get(reply_id)
         if call_future:
             call_future.set_result(reply)
         else:
             LOG.warning(_LW("Received timed out reply: %s"), reply_id)
Esempio n. 36
0
    def run(self):
        """Run the proton event/timer loop."""
        LOG.debug("Starting Proton thread, container=%s",
                  self._container.name)

        while not self._shutdown:
            readers, writers, timers = self._container.need_processing()

            readfds = [c.user_context for c in readers]
            # additionally, always check for readability of pipe we
            # are using to wakeup processing thread by other threads
            readfds.append(self._requests)
            writefds = [c.user_context for c in writers]

            timeout = None
            if timers:
                deadline = timers[0].deadline  # 0 == next expiring timer
                now = time.time()
                timeout = 0 if deadline <= now else deadline - now

            # adjust timeout for any deferred requests
            timeout = self._schedule.get_delay(timeout)

            try:
                results = select.select(readfds, writefds, [], timeout)
            except select.error as serror:
                if serror[0] == errno.EINTR:
                    LOG.warning(_LW("ignoring interrupt from select(): %s"),
                                str(serror))
                    continue
                raise  # assuming fatal...

            # don't process any I/O or timers if woken up by a shutdown:
            # if we've been forked we don't want to do I/O on the parent's
            # sockets
            if self._shutdown:
                break

            readable, writable, ignore = results

            for r in readable:
                r.read()

            for t in timers:
                if t.deadline > time.time():
                    break
                t.process(time.time())

            for w in writable:
                w.write()

            self._schedule.process()  # run any deferred requests

        LOG.info(_LI("eventloop thread exiting, container=%s"),
                 self._container.name)
        self._container.destroy()
Esempio n. 37
0
 def func_wrapper(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except redis.ConnectionError:
         LOG.warning(_LW("Redis is currently not available. "
                         "Messages are being sent to known targets using "
                         "existing connections. But new nodes "
                         "can not be discovered until Redis is up "
                         "and running."))
         raise zmq_matchmaker_base.MatchmakerUnavailable()
Esempio n. 38
0
 def _check_same_thread_id(self):
     if self._thread_id is None:
         self._thread_id = self._get_thread_id()
     elif self._thread_id != self._get_thread_id():
         # NOTE(dims): Need to change this to raise RuntimeError after
         # verifying/fixing other openstack projects (like Neutron)
         # work ok with this change
         LOG.warn(
             _LW("start/stop/wait must be called in the "
                 "same thread"))
Esempio n. 39
0
    def _heartbeat_supported_and_enabled(self):
        if self.heartbeat_timeout_threshold <= 0:
            return False

        if self.connection.supports_heartbeats:
            return True
        elif not self._heartbeat_support_log_emitted:
            LOG.warn(_LW("Heartbeat support requested but it is not supported " "by the kombu driver or the broker"))
            self._heartbeat_support_log_emitted = True
        return False
Esempio n. 40
0
 def sender_remote_closed(self, sender_link, pn_condition):
     # The remote has initiated a close.  This could happen when the message
     # bus is shutting down, or it detected an error
     LOG.warning(
         _LW("sender %(addr)s failed due to remote initiated close:"
             " condition=%(cond)s"), {
                 'addr': self._address,
                 'cond': pn_condition
             })
     self._link.close()
Esempio n. 41
0
    def run(self):
        """Run the proton event/timer loop."""
        LOG.debug("Starting Proton thread, container=%s", self._container.name)

        while not self._shutdown:
            readers, writers, timers = self._container.need_processing()

            readfds = [c.user_context for c in readers]
            # additionally, always check for readability of pipe we
            # are using to wakeup processing thread by other threads
            readfds.append(self._requests)
            writefds = [c.user_context for c in writers]

            timeout = None
            if timers:
                deadline = timers[0].deadline  # 0 == next expiring timer
                now = time.time()
                timeout = 0 if deadline <= now else deadline - now

            # adjust timeout for any deferred requests
            timeout = self._schedule.get_delay(timeout)

            try:
                results = select.select(readfds, writefds, [], timeout)
            except select.error as serror:
                if serror[0] == errno.EINTR:
                    LOG.warning(_LW("ignoring interrupt from select(): %s"),
                                str(serror))
                    continue
                raise  # assuming fatal...

            # don't process any I/O or timers if woken up by a shutdown:
            # if we've been forked we don't want to do I/O on the parent's
            # sockets
            if self._shutdown:
                break

            readable, writable, ignore = results

            for r in readable:
                r.read()

            for t in timers:
                if t.deadline > time.time():
                    break
                t.process(time.time())

            for w in writable:
                w.write()

            self._schedule.process()  # run any deferred requests

        LOG.info(_LI("eventloop thread exiting, container=%s"),
                 self._container.name)
        self._container.destroy()
Esempio n. 42
0
    def start(self, override_pool_size=None):
        """Start handling incoming messages.

        This method causes the server to begin polling the transport for
        incoming messages and passing them to the dispatcher. Message
        processing will continue until the stop() method is called.

        The executor controls how the server integrates with the applications
        I/O handling strategy - it may choose to poll for messages in a new
        process, thread or co-operatively scheduled coroutine or simply by
        registering a callback with an event loop. Similarly, the executor may
        choose to dispatch messages in a new thread, coroutine or simply the
        current thread.
        """
        # Warn that restarting will be deprecated
        if self._started:
            LOG.warning(
                _LW('Restarting a MessageHandlingServer is inherently '
                    'racy. It is deprecated, and will become a noop '
                    'in a future release of oslo.messaging. If you '
                    'need to restart MessageHandlingServer you should '
                    'instantiate a new object.'))
        self._started = True

        executor_opts = {}

        if self.executor_type == "threading":
            executor_opts["max_workers"] = (
                override_pool_size or self.conf.executor_thread_pool_size)
        elif self.executor_type == "eventlet":
            eventletutils.warn_eventlet_not_patched(
                expected_patched_modules=['thread'],
                what="the 'oslo.messaging eventlet executor'")
            executor_opts["max_workers"] = (
                override_pool_size or self.conf.executor_thread_pool_size)

        self._work_executor = self._executor_cls(**executor_opts)

        try:
            self.listener = self._create_listener()
        except driver_base.TransportDriverError as ex:
            raise ServerListenError(self.target, ex)

        # HACK(sileht): We temporary pass the executor to the rabbit
        # listener to fix a race with the deprecated blocking executor.
        # We do this hack because this is need only for 'synchronous'
        # executor like blocking. And this one is deprecated. Making
        # driver working in an sync and an async way is complicated
        # and blocking have 0% tests coverage.
        if hasattr(self.listener, '_poll_style_listener'):
            l = self.listener._poll_style_listener
            if hasattr(l, "_message_operations_handler"):
                l._message_operations_handler._executor = (self.executor_type)

        self.listener.start(self._on_incoming)
Esempio n. 43
0
    def _wait_for_ack(self, request, ack_future=None):
        if ack_future is None:
            ack_future = self._schedule_request_for_ack(request)

        retries = \
            request.retry or self.conf.oslo_messaging_zmq.rpc_retry_attempts
        if retries is None:
            retries = -1
        timeout = self.conf.oslo_messaging_zmq.rpc_ack_timeout_base

        done = ack_future is None
        while not done:
            try:
                reply_id, response = ack_future.result(timeout=timeout)
                done = True
                assert response is None, "Ack expected!"
                if reply_id is not None:
                    assert reply_id == request.routing_key, \
                        "Ack from recipient expected!"
            except AssertionError:
                LOG.error(_LE("Message format error in ack for %s"),
                          request.message_id)
            except futures.TimeoutError:
                LOG.warning(
                    _LW("No ack received within %(tout)s seconds "
                        "for %(msg_id)s"), {
                            "tout": timeout,
                            "msg_id": request.message_id
                        })
                if retries != 0:
                    if retries > 0:
                        retries -= 1
                    self.sender.send(ack_future.socket, request)
                    timeout *= \
                        self.conf.oslo_messaging_zmq.rpc_ack_timeout_multiplier
                else:
                    LOG.warning(_LW("Exhausted number of retries for %s"),
                                request.message_id)
                    done = True

        if request.msg_type != zmq_names.CALL_TYPE:
            self.receiver.untrack_request(request)
Esempio n. 44
0
    def wait(self):
        """Wait for message processing to complete.

        After calling stop(), there may still be some existing messages
        which have not been completely processed. The wait() method blocks
        until all message processing has completed.

        Once it's finished, the underlying driver resources associated to this
        server are released (like closing useless network connections).
        """
        with self._state_cond:
            if self._running:
                LOG.warn(
                    _LW("wait() should be called after stop() as it "
                        "waits for existing messages to finish "
                        "processing"))
                w = timeutils.StopWatch()
                w.start()
                while self._running:
                    # NOTE(harlowja): 1.0 seconds was mostly chosen at
                    # random, but it seems like a reasonable value to
                    # use to avoid spamming the logs with to much
                    # information.
                    self._state_cond.wait(1.0)
                    if self._running and not self._dummy_cond:
                        LOG.warn(
                            _LW("wait() should have been called"
                                " after stop() as wait() waits for existing"
                                " messages to finish processing, it has"
                                " been %0.2f seconds and stop() still has"
                                " not been called"), w.elapsed())
            executor = self._executor_obj
            self._executor_obj = None
        if executor is not None:
            # We are the lucky calling thread to wait on the executor to
            # actually finish.
            try:
                executor.wait()
            finally:
                # Close listener connection after processing all messages
                executor.listener.cleanup()
                executor = None
Esempio n. 45
0
    def wait(self):
        """Wait for message processing to complete.

        After calling stop(), there may still be some existing messages
        which have not been completely processed. The wait() method blocks
        until all message processing has completed.

        Once it's finished, the underlying driver resources associated to this
        server are released (like closing useless network connections).
        """
        with self._state_cond:
            if self._running:
                LOG.warn(_LW("wait() should be called after stop() as it "
                             "waits for existing messages to finish "
                             "processing"))
                w = timeutils.StopWatch()
                w.start()
                while self._running:
                    # NOTE(harlowja): 1.0 seconds was mostly chosen at
                    # random, but it seems like a reasonable value to
                    # use to avoid spamming the logs with to much
                    # information.
                    self._state_cond.wait(1.0)
                    if self._running and not self._dummy_cond:
                        LOG.warn(
                            _LW("wait() should have been called"
                                " after stop() as wait() waits for existing"
                                " messages to finish processing, it has"
                                " been %0.2f seconds and stop() still has"
                                " not been called"), w.elapsed())
            executor = self._executor_obj
            self._executor_obj = None
        if executor is not None:
            # We are the lucky calling thread to wait on the executor to
            # actually finish.
            try:
                executor.wait()
            finally:
                # Close listener connection after processing all messages
                executor.listener.cleanup()
                executor = None
 def add(self, msg_id):
     self._queues[msg_id] = moves.queue.Queue()
     if len(self._queues) > self._wrn_threshold:
         LOG.warn(
             _LW(
                 "Number of call queues is greater than warning "
                 "threshold: %(old_threshold)s. There could be a "
                 "leak. Increasing threshold to: %(threshold)s"
             ),
             {"old_threshold": self._wrn_threshold, "threshold": self._wrn_threshold * 2},
         )
         self._wrn_threshold *= 2
Esempio n. 47
0
 def add(self, msg_id):
     self._queues[msg_id] = moves.queue.Queue()
     queues_length = len(self._queues)
     if queues_length > self._wrn_threshold:
         LOG.warning(_LW('Number of call queues is %(queues_length)s, '
                         'greater than warning threshold: %(old_threshold)s'
                         '. There could be a leak. Increasing threshold to:'
                         ' %(threshold)s'),
                     {'queues_length': queues_length,
                      'old_threshold': self._wrn_threshold,
                      'threshold': self._wrn_threshold * 2})
         self._wrn_threshold *= 2
Esempio n. 48
0
    def _update_routing_table(self):
        target_keys = self.routing_table.get_targets()

        try:
            for target_key in target_keys:
                hosts = self.matchmaker.get_hosts_by_key(target_key)
                self.routing_table.update_hosts(target_key, hosts)
            LOG.debug("Updating routing table from the matchmaker. "
                      "%d target(s) updated %s." % (len(target_keys),
                                                    target_keys))
        except zmq_matchmaker_base.MatchmakerUnavailable:
            LOG.warning(_LW("Not updated. Matchmaker was not available."))
Esempio n. 49
0
    def send_message(self, socket, multipart_message):
        message_version = multipart_message[zmq_names.MESSAGE_VERSION_IDX]
        if six.PY3:
            message_version = message_version.decode('utf-8')

        send_message_version = self._send_message_versions.get(message_version)
        if send_message_version is None:
            LOG.warning(_LW("Dropping message with unsupported version %s"),
                        message_version)
            return

        send_message_version(socket, multipart_message)
Esempio n. 50
0
    def run(self):
        """Run the proton event/timer loop."""
        LOG.debug("Starting Proton thread, container=%s",
                  self._container.name)

        while not self._shutdown:

            readfds = [self._requests]
            writefds = []
            deadline = self._scheduler._next_deadline

            pyngus_conn = self._connection and self._connection.pyngus_conn
            if pyngus_conn and self._connection.socket:
                if pyngus_conn.needs_input:
                    readfds.append(self._connection)
                if pyngus_conn.has_output:
                    writefds.append(self._connection)
                if pyngus_conn.deadline:
                    deadline = (pyngus_conn.deadline if not deadline else
                                min(deadline, pyngus_conn.deadline))

            # force select to return in time to service the next expiring timer
            if deadline:
                _now = now()
                timeout = 0 if deadline <= _now else (deadline - _now)
            else:
                timeout = None

            # and now we wait...
            try:
                select.select(readfds, writefds, [], timeout)
            except select.error as serror:
                if serror[0] == errno.EINTR:
                    LOG.warning(_LW("ignoring interrupt from select(): %s"),
                                str(serror))
                    continue
                raise  # assuming fatal...

            # Ignore the select return value - simply poll the socket for I/O.
            # Testing shows that polling improves latency over checking the
            # lists returned by select()
            self._requests.process_requests()
            self._connection.read_socket()
            if pyngus_conn and pyngus_conn.deadline:
                _now = now()
                if pyngus_conn.deadline <= _now:
                    pyngus_conn.process(_now)
            self._connection.write_socket()

            self._scheduler._process()  # run any deferred requests

        LOG.info(_LI("eventloop thread exiting, container=%s"),
                 self._container.name)
Esempio n. 51
0
def deserialize_remote_exception(data, allowed_remote_exmods):
    failure = jsonutils.loads(six.text_type(data))

    trace = failure.get('tb', [])
    message = failure.get('message', "") + "\n" + "\n".join(trace)
    name = failure.get('class')
    module = failure.get('module')

    # the remote service which raised the given exception might have a
    # different python version than the caller. For example, the caller might
    # run python 2.7, while the remote service might run python 3.4. Thus,
    # the exception module will be "builtins" instead of "exceptions".
    if module in _EXCEPTIONS_MODULES:
        module = _EXCEPTIONS_MODULE

    # NOTE(ameade): We DO NOT want to allow just any module to be imported, in
    # order to prevent arbitrary code execution.
    if module != _EXCEPTIONS_MODULE and module not in allowed_remote_exmods:
        return oslo_messaging.RemoteError(name, failure.get('message'), trace)

    try:
        __import__(module)
        mod = sys.modules[module]
        klass = getattr(mod, name)
        if not issubclass(klass, Exception):
            raise TypeError("Can only deserialize Exceptions")

        failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
    except (AttributeError, TypeError, ImportError) as error:
        LOG.warning(_LW("Failed to rebuild remote exception due to error: %s"),
                    six.text_type(error))
        return oslo_messaging.RemoteError(name, failure.get('message'), trace)

    ex_type = type(failure)
    str_override = lambda self: message
    new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
                       {'__str__': str_override, '__unicode__': str_override})
    new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
    try:
        # NOTE(ameade): Dynamically create a new exception type and swap it in
        # as the new type for the exception. This only works on user defined
        # Exceptions and not core Python exceptions. This is important because
        # we cannot necessarily change an exception message so we must override
        # the __str__ method.
        failure.__class__ = new_ex_type
    except TypeError:
        # NOTE(ameade): If a core exception then just add the traceback to the
        # first exception argument.
        failure.args = (message,) + failure.args[1:]
    return failure
Esempio n. 52
0
    def _wait_for_ack(self, request, ack_future=None):
        if ack_future is None:
            ack_future = self._schedule_request_for_ack(request)

        retries = \
            request.retry or self.conf.oslo_messaging_zmq.rpc_retry_attempts
        if retries is None:
            retries = -1
        timeout = self.conf.oslo_messaging_zmq.rpc_ack_timeout_base

        done = ack_future is None
        while not done:
            try:
                ack = ack_future.result(timeout=timeout)
                done = True
                self._check_ack(ack, request)
            except AssertionError:
                LOG.error(_LE("Message format error in ack for %s"),
                          request.message_id)
            except futures.TimeoutError:
                LOG.warning(_LW("No ack received within %(tout)s seconds "
                                "for %(msg_id)s"),
                            {"tout": timeout,
                             "msg_id": request.message_id})
                if retries != 0:
                    if retries > 0:
                        retries -= 1
                    self.sender.send(ack_future.socket, request)
                    timeout *= \
                        self.conf.oslo_messaging_zmq.rpc_ack_timeout_multiplier
                else:
                    LOG.warning(_LW("Exhausted number of retries for %s"),
                                request.message_id)
                    done = True

        if request.msg_type != zmq_names.CALL_TYPE:
            self.receiver.untrack_request(request)
 def _send_and_retry(self, message, topic, retry):
     current_retry = 0
     if not isinstance(message, str):
         message = jsonutils.dumps(message)
     while message is not None:
         try:
             self._send(message, topic)
             message = None
         except Exception:
             LOG.warn(_LW("Failed to publish a message of topic %s"), topic)
             current_retry += 1
             if retry is not None and current_retry >= retry:
                 LOG.exception(_LE("Failed to retry to send data "
                                   "with max retry times"))
                 message = None
Esempio n. 54
0
        def wrap(self, *args, **kws):
            with self._lock:
                old_pid = self._pid
                self._pid = os.getpid()

            if old_pid != self._pid:
                if self._ctrl is not None:
                    LOG.warning(_LW("Process forked after connection "
                                    "established!"))
                    self._ctrl.shutdown(wait=False)
                # Create a Controller that connects to the messaging service:
                self._ctrl = controller.Controller(self._hosts,
                                                   self._default_exchange,
                                                   self._conf)
                self._ctrl.connect()
            return func(self, *args, **kws)
Esempio n. 55
0
    def start(self, override_pool_size=None):
        """Start handling incoming messages.

        This method causes the server to begin polling the transport for
        incoming messages and passing them to the dispatcher. Message
        processing will continue until the stop() method is called.

        The executor controls how the server integrates with the applications
        I/O handling strategy - it may choose to poll for messages in a new
        process, thread or co-operatively scheduled coroutine or simply by
        registering a callback with an event loop. Similarly, the executor may
        choose to dispatch messages in a new thread, coroutine or simply the
        current thread.
        """
        # Warn that restarting will be deprecated
        if self._started:
            LOG.warning(_LW('Restarting a MessageHandlingServer is inherently '
                            'racy. It is deprecated, and will become a noop '
                            'in a future release of oslo.messaging. If you '
                            'need to restart MessageHandlingServer you should '
                            'instantiate a new object.'))
        self._started = True

        try:
            self.listener = self.dispatcher._listen(self.transport)
        except driver_base.TransportDriverError as ex:
            raise ServerListenError(self.target, ex)

        executor_opts = {}

        if self.executor_type == "threading":
            executor_opts["max_workers"] = (
                override_pool_size or self.conf.executor_thread_pool_size
            )
        elif self.executor_type == "eventlet":
            eventletutils.warn_eventlet_not_patched(
                expected_patched_modules=['thread'],
                what="the 'oslo.messaging eventlet executor'")
            executor_opts["max_workers"] = (
                override_pool_size or self.conf.executor_thread_pool_size
            )

        self._work_executor = self._executor_cls(**executor_opts)
        self._poll_executor = self._executor_cls(**executor_opts)

        return lambda: self._poll_executor.submit(self._runner)
    def send_request(self, request):

        if request.msg_type == zmq_names.CALL_TYPE:
            raise zmq_publisher_base.UnsupportedSendPattern(request.msg_type)

        push_socket, hosts = self._check_hosts_connections(request.target)

        if not push_socket.connections:
            LOG.warning(_LW("Request %s was dropped because no connection")
                        % request.msg_type)
            return

        if request.msg_type in zmq_names.MULTISEND_TYPES:
            for _ in range(push_socket.connections_count()):
                self._send_request(push_socket, request)
        else:
            self._send_request(push_socket, request)
Esempio n. 57
0
    def message_received(self, receiver, message, handle):
        """This is a Pyngus callback, invoked by Pyngus when a new message
        arrives on this receiver link from the peer.
        """
        self._credit = self._credit - 1
        self._update_credit()

        key = message.correlation_id
        if key in self._correlation:
            LOG.debug("Received response for msg id=%s", key)
            result = {"status": "OK", "response": message}
            self._correlation[key].put(result)
            # cleanup (only need one response per request)
            del self._correlation[key]
            receiver.message_accepted(handle)
        else:
            LOG.warning(_LW("Can't find receiver for response msg id=%s, " "dropping!"), key)
            receiver.message_modified(handle, True, True, None)