Example #1
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Required IPC directory does not exist at"
                                " %s"), ipc_dir)
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Permission denied to IPC directory at"
                                " %s"), ipc_dir)
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
Example #2
0
def _multi_send(method, context, topic, msg, timeout=None,
                envelope=False, _msg_id=None, allowed_remote_exmods=None):
    """Wraps the sending of messages.

    Dispatches to the matchmaker and sends message to all relevant hosts.
    """
    allowed_remote_exmods = allowed_remote_exmods or []
    conf = CONF
    LOG.debug(' '.join(map(pformat, (topic, msg))))

    queues = _get_matchmaker().queues(topic)
    LOG.debug("Sending message(s) to: %s", queues)

    # Don't stack if we have no matchmaker results
    if not queues:
        LOG.warn(_("No matchmaker results. Not casting."))
        # While not strictly a timeout, callers know how to handle
        # this exception and a timeout isn't too big a lie.
        raise rpc_common.Timeout(_("No match from matchmaker."))

    # This supports brokerless fanout (addresses > 1)
    return_val = None
    for queue in queues:
        _topic, ip_addr = queue
        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)

        if method.__name__ == '_cast':
            eventlet.spawn_n(method, _addr, context,
                             _topic, msg, timeout, envelope, _msg_id)
        else:
            return_val = method(_addr, context, _topic, msg, timeout,
                                envelope, allowed_remote_exmods)

    return return_val
Example #3
0
        def on_error(exc, interval):
            error_callback and error_callback(exc)

            info = {'hostname': self.connection.hostname,
                    'port': self.connection.port,
                    'err_str': exc, 'sleep_time': interval}

            if 'Socket closed' in six.text_type(exc):
                LOG.error(_('AMQP server %(hostname)s:%(port)d closed'
                            ' the connection. Check login credentials:'
                            ' %(err_str)s'), info)
            else:
                LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
                            'unreachable: %(err_str)s. Trying again in '
                            '%(sleep_time)d seconds.'), info)

            # XXX(nic): when reconnecting to a RabbitMQ cluster
            # with mirrored queues in use, the attempt to release the
            # connection can hang "indefinitely" somewhere deep down
            # in Kombu.  Blocking the thread for a bit prior to
            # release seems to kludge around the problem where it is
            # otherwise reproduceable.
            # TODO(sileht): Check if this is useful since we
            # use kombu for HA connection, the interval_step
            # should sufficient, because the underlying kombu transport
            # connection object freed.
            if self.conf.kombu_reconnect_delay > 0:
                LOG.info(_("Delaying reconnect for %1.1f seconds...") %
                         self.conf.kombu_reconnect_delay)
                time.sleep(self.conf.kombu_reconnect_delay)
Example #4
0
            def publisher(waiter):
                LOG.info(_("Creating proxy for topic: %s"), topic)

                try:
                    # The topic is received over the network,
                    # don't trust this input.
                    if self.badchars.search(topic) is not None:
                        emsg = _("Topic contained dangerous characters.")
                        LOG.warn(emsg)
                        raise RPCException(emsg)

                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type, bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while(True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data, copy=False)
Example #5
0
    def reconnect(self, retry=None):
        """Handles reconnecting and re-establishing sessions and queues.
        Will retry up to retry number of times.
        retry = None or -1 means to retry forever
        retry = 0 means no retry
        retry = N means N retries
        """
        delay = 1
        attempt = 0
        loop_forever = False
        if retry is None or retry < 0:
            loop_forever = True

        while True:
            self._disconnect()

            attempt += 1
            broker = six.next(self.brokers)
            try:
                self._connect(broker)
            except qpid_exceptions.MessagingError as e:
                msg_dict = dict(e=e,
                                delay=delay,
                                retry=retry,
                                broker=broker)
                if not loop_forever and attempt > retry:
                    msg = _('Unable to connect to AMQP server on '
                            '%(broker)s after %(retry)d '
                            'tries: %(e)s') % msg_dict
                    LOG.error(msg)
                    raise exceptions.MessageDeliveryFailure(msg)
                else:
                    msg = _("Unable to connect to AMQP server on %(broker)s: "
                            "%(e)s. Sleeping %(delay)s seconds") % msg_dict
                    LOG.error(msg)
                    time.sleep(delay)
                    delay = min(delay + 1, 5)
            else:
                LOG.info(_('Connected to AMQP server on %s'), broker['host'])
                break

        self.session = self.connection.session()

        if self.consumers:
            consumers = self.consumers
            self.consumers = {}

            for consumer in six.itervalues(consumers):
                consumer.reconnect(self.session)
                self._register_consumer(consumer)

            LOG.debug("Re-established AMQP queues")
Example #6
0
    def register(self, proxy, in_addr, zmq_type_in,
                 in_bind=True, subscribe=None):

        LOG.info(_("Registering reactor"))

        if zmq_type_in not in (zmq.PULL, zmq.SUB):
            raise RPCException("Bad input socktype")

        # Items push in.
        inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
                        subscribe=subscribe)

        self.proxies[inq] = proxy
        self.sockets.append(inq)

        LOG.info(_("In reactor registered"))
Example #7
0
 def _error_callback(exc):
     if isinstance(exc, qpid_exceptions.Empty):
         LOG.debug('Timed out waiting for RPC response: %s', exc)
         raise rpc_common.Timeout()
     else:
         LOG.exception(_('Failed to consume message from queue: %s'),
                       exc)
    def __init__(self, info=None, topic=None, method=None):
        """Initiates Timeout object.

        :param info: Extra info to convey to the user
        :param topic: The topic that the rpc call was sent to
        :param rpc_method_name: The name of the rpc method being
                                called
        """
        self.info = info
        self.topic = topic
        self.method = method
        super(Timeout, self).__init__(
            None,
            info=info or _('<unknown>'),
            topic=topic or _('<unknown>'),
            method=method or _('<unknown>'))
Example #9
0
        def _consume():
            # NOTE(sileht):
            # maximun value choosen according the best practice from kombu:
            # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
            poll_timeout = 1 if timeout is None else min(timeout, 1)

            while True:
                if self._consume_loop_stopped:
                    self._consume_loop_stopped = False
                    raise StopIteration

                try:
                    nxt_receiver = self.session.next_receiver(
                        timeout=poll_timeout)
                except qpid_exceptions.Empty as exc:
                    poll_timeout = timer.check_return(_raise_timeout, exc,
                                                      maximum=1)
                else:
                    break

            try:
                self._lookup_consumer(nxt_receiver).consume()
            except Exception:
                LOG.exception(_("Error processing message. "
                                "Skipping it."))
Example #10
0
    def _load_notifiers(self):
        """One-time load of notifier config file."""
        self.routing_groups = {}
        self.used_drivers = set()
        filename = CONF.routing_notifier_config
        if not filename:
            return

        # Infer which drivers are used from the config file.
        self.routing_groups = yaml.load(
            self._get_notifier_config_file(filename))
        if not self.routing_groups:
            self.routing_groups = {}  # In case we got None from load()
            return

        for group in self.routing_groups.values():
            self.used_drivers.update(group.keys())

        LOG.debug('loading notifiers from %s', self.NOTIFIER_PLUGIN_NAMESPACE)
        self.plugin_manager = dispatch.DispatchExtensionManager(
            namespace=self.NOTIFIER_PLUGIN_NAMESPACE,
            check_func=self._should_load_plugin,
            invoke_on_load=True,
            invoke_args=None)
        if not list(self.plugin_manager):
            LOG.warning(_("Failed to load any notifiers for %s"),
                        self.NOTIFIER_PLUGIN_NAMESPACE)
Example #11
0
    def __init__(self, addr, zmq_type, bind=True, subscribe=None):
        self.sock = _get_ctxt().socket(zmq_type)
        self.addr = addr
        self.type = zmq_type
        self.subscriptions = []

        # Support failures on sending/receiving on wrong socket type.
        self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
        self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
        self.can_sub = zmq_type in (zmq.SUB, )

        # Support list, str, & None for subscribe arg (cast to list)
        do_sub = {
            list: subscribe,
            str: [subscribe],
            type(None): []
        }[type(subscribe)]

        for f in do_sub:
            self.subscribe(f)

        str_data = {'addr': addr, 'type': self.socket_s(),
                    'subscribe': subscribe, 'bind': bind}

        LOG.debug("Connecting to %(addr)s with %(type)s", str_data)
        LOG.debug("-> Subscribed to %(subscribe)s", str_data)
        LOG.debug("-> bind: %(bind)s", str_data)

        try:
            if bind:
                self.sock.bind(addr)
            else:
                self.sock.connect(addr)
        except Exception:
            raise RPCException(_("Could not open socket."))
Example #12
0
    def consume(self, sock):
        # TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug("CONSUMER RECEIVED DATA: %s", data)

        proxy = self.proxies[sock]

        if data[2] == 'cast':  # Legacy protocol
            packenv = data[3]

            ctx, msg = _deserialize(packenv)
            request = rpc_common.deserialize_msg(msg)
            ctx = RpcContext.unmarshal(ctx)
        elif data[2] == 'impl_zmq_v2':
            packenv = data[4:]

            msg = unflatten_envelope(packenv)
            request = rpc_common.deserialize_msg(msg)

            # Unmarshal only after verifying the message.
            ctx = RpcContext.unmarshal(data[3])
        else:
            LOG.error(_("ZMQ Envelope version unsupported or unknown."))
            return

        self.pool.spawn_n(self.process, proxy, ctx, request)
Example #13
0
    def create_consumer(self, topic, proxy, fanout=False):
        # Register with matchmaker.
        _get_matchmaker().register(topic, CONF.rpc_zmq_host)

        # Subscription scenarios
        if fanout:
            sock_type = zmq.SUB
            subscribe = ('', fanout)[type(fanout) == str]
            topic = 'fanout~' + topic.split('.', 1)[0]
        else:
            sock_type = zmq.PULL
            subscribe = None
            topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))

        if topic in self.topics:
            LOG.info(_("Skipping topic registration. Already registered."))
            return

        # Receive messages from (local) proxy
        inaddr = "ipc://%s/zmq_topic_%s" % \
            (CONF.rpc_zmq_ipc_dir, topic)

        LOG.debug("Consumer is a zmq.%s",
                  ['PULL', 'SUB'][sock_type == zmq.SUB])

        self.reactor.register(proxy, inaddr, sock_type,
                              subscribe=subscribe, in_bind=False)
        self.topics.append(topic)
Example #14
0
 def _error_callback(exc):
     if isinstance(exc, socket.timeout):
         LOG.debug('Timed out waiting for RPC response: %s', exc)
         raise rpc_common.Timeout()
     else:
         LOG.exception(_('Failed to consume message from queue: %s'),
                       exc)
         self.do_consume = True
Example #15
0
 def _call_notify(self, ext, context, message, priority, retry,
                  accepted_drivers):
     """Emit the notification.
     """
     # accepted_drivers is passed in as a result of the map() function
     LOG.info(_("Routing '%(event)s' notification to '%(driver)s' driver"),
              {'event': message.get('event_type'), 'driver': ext.name})
     ext.obj.notify(context, message, priority, retry)
 def run(self, key):
     if not self._ring_has(key):
         LOG.warn(
             _("No key defining hosts for topic '%s', "
               "see ringfile"), key
         )
         return []
     host = next(self.ring0[key])
     return [(key + '.' + host, host)]
Example #17
0
 def consume(self):
     """Fetch the message and pass it to the callback object."""
     message = self.receiver.fetch()
     try:
         self._unpack_json_msg(message)
         self.callback(QpidMessage(self.session, message))
     except Exception:
         LOG.exception(_("Failed to process message... skipping it."))
         self.session.acknowledge(message)
 def start(self):
     self._running = True
     while self._running:
         try:
             incoming = self.listener.poll()
             if incoming is not None:
                 with self.dispatcher(incoming) as callback:
                     callback()
         except Exception:
             LOG.exception(_("Unexpected exception occurred."))
 def run(self, key):
     # Assume starts with "fanout~", strip it for lookup.
     nkey = key.split('fanout~')[1:][0]
     if not self._ring_has(nkey):
         LOG.warn(
             _("No key defining hosts for topic '%s', "
               "see ringfile"), nkey
         )
         return []
     return map(lambda x: (key + '.' + x, x), self.ring[nkey])
Example #20
0
    def unregister(self, key, host):
        """Unregister a topic."""
        if (key, host) in self.host_topic:
            del self.host_topic[(key, host)]

        self.hosts.discard(host)
        self.backend_unregister(key, '.'.join((key, host)))

        LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
                 {'key': key, 'host': host})
Example #21
0
def _serialize(data):
    """Serialization wrapper.

    We prefer using JSON, but it cannot encode all types.
    Error if a developer passes us bad data.
    """
    try:
        return jsonutils.dumps(data, ensure_ascii=True)
    except TypeError:
        with excutils.save_and_reraise_exception():
            LOG.error(_("JSON serialization failed."))
Example #22
0
def _get_matchmaker(*args, **kwargs):
    global matchmaker
    if not matchmaker:
        mm = CONF.rpc_zmq_matchmaker
        if mm.endswith('matchmaker.MatchMakerRing'):
            mm.replace('matchmaker', 'matchmaker_ring')
            LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
                       ' %(new)s instead') % dict(
                     orig=CONF.rpc_zmq_matchmaker, new=mm))
        matchmaker = importutils.import_object(mm, *args, **kwargs)
    return matchmaker
Example #23
0
    def _callback_handler(self, message, callback):
        """Call callback with deserialized message.

        Messages that are processed and ack'ed.
        """

        try:
            callback(RabbitMessage(message))
        except Exception:
            LOG.exception(_("Failed to process message"
                            " ... skipping it."))
            message.ack()
Example #24
0
 def _connect(self, broker):
     """Connect to rabbit.  Re-establish any queues that may have
     been declared before if we are reconnecting.  Exceptions should
     be handled by the caller.
     """
     LOG.info(_("Connecting to AMQP server on "
                "%(hostname)s:%(port)d"), broker)
     self.connection = kombu.connection.BrokerConnection(**broker)
     self.connection_errors = self.connection.connection_errors
     self.channel_errors = self.connection.channel_errors
     if self.memory_transport:
         # Kludge to speed up tests.
         self.connection.transport.polling_interval = 0.0
     self.do_consume = True
     self.consumer_num = itertools.count(1)
     self.connection.connect()
     self.channel = self.connection.channel()
     # work around 'memory' transport bug in 1.1.3
     if self.memory_transport:
         self.channel._new_queue('ae.undeliver')
     for consumer in self.consumers:
         consumer.reconnect(self.channel)
     LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'),
              broker)
    def __init__(self, message=None, **kwargs):
        self.kwargs = kwargs

        if not message:
            try:
                message = self.msg_fmt % kwargs

            except Exception:
                # kwargs doesn't match a variable in the message
                # log the issue and the kwargs
                LOG.exception(_('Exception in string format operation'))
                for name, value in six.iteritems(kwargs):
                    LOG.error("%s: %s", name, value)
                # at least get the core message out if something happened
                message = self.msg_fmt

        super(RPCException, self).__init__(message)
Example #26
0
    def start_heartbeat(self):
        """Implementation of MatchMakerBase.start_heartbeat.

        Launches greenthread looping send_heartbeats(),
        yielding for CONF.matchmaker_heartbeat_freq seconds
        between iterations.
        """
        if not self.hosts:
            raise MatchMakerException(
                _("Register before starting heartbeat."))

        def do_heartbeat():
            while True:
                self.send_heartbeats()
                eventlet.sleep(CONF.matchmaker_heartbeat_freq)

        self._heart = eventlet.spawn(do_heartbeat)
Example #27
0
    def _disconnect(self):
        if self.connection:
            # XXX(nic): when reconnecting to a RabbitMQ cluster
            # with mirrored queues in use, the attempt to release the
            # connection can hang "indefinitely" somewhere deep down
            # in Kombu.  Blocking the thread for a bit prior to
            # release seems to kludge around the problem where it is
            # otherwise reproduceable.
            if self.conf.kombu_reconnect_delay > 0:
                LOG.info(_("Delaying reconnect for %1.1f seconds...") %
                         self.conf.kombu_reconnect_delay)
                time.sleep(self.conf.kombu_reconnect_delay)

            try:
                self.connection.release()
            except self.connection_errors:
                pass
            self.connection = None
Example #28
0
 def _dispatch_and_reply(self, incoming):
     try:
         incoming.reply(self._dispatch(incoming.ctxt,
                                       incoming.message))
     except ExpectedException as e:
         LOG.debug(u'Expected exception during message handling (%s)',
                   e.exc_info[1])
         incoming.reply(failure=e.exc_info, log_failure=False)
     except Exception as e:
         # sys.exc_info() is deleted by LOG.exception().
         exc_info = sys.exc_info()
         LOG.error(_('Exception during message handling: %s'), e,
                   exc_info=exc_info)
         incoming.reply(failure=exc_info)
         # NOTE(dhellmann): Remove circular object reference
         # between the current stack frame and the traceback in
         # exc_info.
         del exc_info
Example #29
0
    def process(self, proxy, ctx, data):
        data.setdefault('version', None)
        data.setdefault('args', {})

        # Method starting with - are
        # processed internally. (non-valid method name)
        method = data.get('method')
        if not method:
            LOG.error(_("RPC message did not include method."))
            return

        # Internal method
        # uses internal context for safety.
        if method == '-reply':
            self.private_ctx.reply(ctx, proxy, **data['args'])
            return

        proxy.dispatch(ctx, data['version'],
                       data['method'], data.get('namespace'), **data['args'])
Example #30
0
 def reconnect(self, channel):
     """Re-declare the queue after a rabbit reconnect."""
     self.channel = channel
     self.kwargs['channel'] = channel
     self.queue = kombu.entity.Queue(**self.kwargs)
     try:
         self.queue.declare()
     except Exception as e:
         # NOTE: This exception may be triggered by a race condition.
         # Simply retrying will solve the error most of the time and
         # should work well enough as a workaround until the race condition
         # itself can be fixed.
         # TODO(jrosenboom): In order to be able to match the Execption
         # more specifically, we have to refactor ConsumerBase to use
         # 'channel_errors' of the kombu connection object that
         # has created the channel.
         # See https://bugs.launchpad.net/neutron/+bug/1318721 for details.
         LOG.exception(_("Declaring queue failed with (%s), retrying"), e)
         self.queue.declare()
Example #31
0
    def ensure(self, error_callback, method, retry=None,
               timeout_is_error=True):
        """Will retry up to retry number of times.
        retry = None means use the value of rabbit_max_retries
        retry = -1 means to retry forever
        retry = 0 means no retry
        retry = N means N retries
        """

        current_pid = os.getpid()
        if self._initial_pid != current_pid:
            LOG.warn("Process forked after connection established! "
                     "This can result in unpredictable behavior. "
                     "See: http://docs.openstack.org/developer/"
                     "oslo.messaging/transport.html")
            self._initial_pid = current_pid

        if retry is None:
            retry = self.max_retries
        if retry is None or retry < 0:
            retry = None

        def on_error(exc, interval):
            self.channel = None

            error_callback and error_callback(exc)

            interval = (self.conf.kombu_reconnect_delay + interval
                        if self.conf.kombu_reconnect_delay > 0 else interval)

            info = {'hostname': self.connection.hostname,
                    'port': self.connection.port,
                    'err_str': exc, 'sleep_time': interval}

            if 'Socket closed' in six.text_type(exc):
                LOG.error(_('AMQP server %(hostname)s:%(port)s closed'
                            ' the connection. Check login credentials:'
                            ' %(err_str)s'), info)
            else:
                LOG.error(_('AMQP server on %(hostname)s:%(port)s is '
                            'unreachable: %(err_str)s. Trying again in '
                            '%(sleep_time)d seconds.'), info)

            # XXX(nic): when reconnecting to a RabbitMQ cluster
            # with mirrored queues in use, the attempt to release the
            # connection can hang "indefinitely" somewhere deep down
            # in Kombu.  Blocking the thread for a bit prior to
            # release seems to kludge around the problem where it is
            # otherwise reproduceable.
            # TODO(sileht): Check if this is useful since we
            # use kombu for HA connection, the interval_step
            # should sufficient, because the underlying kombu transport
            # connection object freed.
            if self.conf.kombu_reconnect_delay > 0:
                time.sleep(self.conf.kombu_reconnect_delay)

        def on_reconnection(new_channel):
            """Callback invoked when the kombu reconnects and creates
            a new channel, we use it the reconfigure our consumers.
            """
            self.consumer_num = itertools.count(1)
            for consumer in self.consumers:
                consumer.reconnect(new_channel)

        recoverable_errors = (self.connection.recoverable_channel_errors +
                              self.connection.recoverable_connection_errors)
        try:
            autoretry_method = self.connection.autoretry(
                method, channel=self.channel,
                max_retries=retry,
                errback=on_error,
                interval_start=self.interval_start or 1,
                interval_step=self.interval_stepping,
                on_revive=on_reconnection,
            )
            ret, channel = autoretry_method()
            self.channel = channel
            return ret
        except recoverable_errors as exc:
            self.channel = None
            # NOTE(sileht): number of retry exceeded and the connection
            # is still broken
            msg = _('Unable to connect to AMQP server on '
                    '%(hostname)s:%(port)d after %(retry)d '
                    'tries: %(err_str)s') % {
                        'hostname': self.connection.hostname,
                        'port': self.connection.port,
                        'err_str': exc,
                        'retry': retry}
            LOG.error(msg)
            raise exceptions.MessageDeliveryFailure(msg)
Example #32
0
 def validate_ssl_version(cls, version):
     key = version.lower()
     try:
         return cls._SSL_PROTOCOLS[key]
     except KeyError:
         raise RuntimeError(_("Invalid SSL version : %s") % version)
Example #33
0
 def _error_callback(exc):
     log_info = {'topic': topic, 'err_str': exc}
     LOG.exception(
         _("Failed to publish message to topic "
           "'%(topic)s': %(err_str)s"), log_info)
Example #34
0
 def _connect_error(exc):
     log_info = {'topic': topic, 'err_str': exc}
     LOG.error(
         _("Failed to declare consumer for topic '%(topic)s': "
           "%(err_str)s"), log_info)
Example #35
0
class InvalidRPCConnectionReuse(RPCException):
    msg_fmt = _("Invalid reuse of an RPC connection.")
Example #36
0
class DuplicateMessageError(RPCException):
    msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
Example #37
0
class UnsupportedRpcEnvelopeVersion(RPCException):
    msg_fmt = _("Specified RPC envelope version, %(version)s, "
                "not supported by this endpoint.")
Example #38
0
class UnsupportedRpcVersion(RPCException):
    msg_fmt = _("Specified RPC version, %(version)s, not supported by "
                "this endpoint.")
Example #39
0
    def __init__(self, conf, url):
        self.consumers = []
        self.consumer_num = itertools.count(1)
        self.conf = conf
        self.max_retries = self.conf.rabbit_max_retries
        # Try forever?
        if self.max_retries <= 0:
            self.max_retries = None
        self.interval_start = self.conf.rabbit_retry_interval
        self.interval_stepping = self.conf.rabbit_retry_backoff
        # max retry-interval = 30 seconds
        self.interval_max = 30

        self._ssl_params = self._fetch_ssl_params()
        self._login_method = self.conf.rabbit_login_method

        if url.virtual_host is not None:
            virtual_host = url.virtual_host
        else:
            virtual_host = self.conf.rabbit_virtual_host

        self._url = ''
        if self.conf.fake_rabbit:
            LOG.warn("Deprecated: fake_rabbit option is deprecated, set "
                     "rpc_backend to kombu+memory or use the fake "
                     "driver instead.")
            self._url = 'memory://%s/' % virtual_host
        elif url.hosts:
            for host in url.hosts:
                transport = url.transport.replace('kombu+', '')
                transport = url.transport.replace('rabbit', 'amqp')
                self._url += '%s%s://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '',
                    transport,
                    parse.quote(host.username or ''),
                    parse.quote(host.password or ''),
                    host.hostname or '', str(host.port or 5672),
                    virtual_host)
        elif url.transport.startswith('kombu+'):
            # NOTE(sileht): url have a + but no hosts
            # (like kombu+memory:///), pass it to kombu as-is
            transport = url.transport.replace('kombu+', '')
            self._url = "%s://%s" % (transport, virtual_host)
        else:
            for adr in self.conf.rabbit_hosts:
                hostname, port = netutils.parse_host_port(
                    adr, default_port=self.conf.rabbit_port)
                self._url += '%samqp://%s:%s@%s:%s/%s' % (
                    ";" if self._url else '',
                    parse.quote(self.conf.rabbit_userid),
                    parse.quote(self.conf.rabbit_password),
                    hostname, port,
                    virtual_host)

        self._initial_pid = os.getpid()

        self.do_consume = True
        self._consume_loop_stopped = False

        self.channel = None
        self.connection = kombu.connection.Connection(
            self._url, ssl=self._ssl_params, login_method=self._login_method,
            failover_strategy="shuffle")

        LOG.info(_('Connecting to AMQP server on %(hostname)s:%(port)d'),
                 {'hostname': self.connection.hostname,
                  'port': self.connection.port})
        # NOTE(sileht): just ensure the connection is setuped at startup
        self.ensure(error_callback=None,
                    method=lambda channel: True)
        LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'),
                 {'hostname': self.connection.hostname,
                  'port': self.connection.port})

        if self._url.startswith('memory://'):
            # Kludge to speed up tests.
            self.connection.transport.polling_interval = 0.0
Example #40
0
class RpcVersionCapError(RPCException):
    msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
Example #41
0
 def _error_callback(exc):
     self.do_consume = True
     timer.check_return(_raise_timeout, exc)
     LOG.exception(_('Failed to consume message from queue: %s'),
                   exc)
Example #42
0
    def reconnect(self, retry=None):
        """Handles reconnecting and re-establishing queues.
        Will retry up to retry number of times.
        retry = None means use the value of rabbit_max_retries
        retry = -1 means to retry forever
        retry = 0 means no retry
        retry = N means N retries
        Sleep between tries, starting at self.interval_start
        seconds, backing off self.interval_stepping number of seconds
        each attempt.
        """

        attempt = 0
        loop_forever = False
        if retry is None:
            retry = self.max_retries
        if retry is None or retry < 0:
            loop_forever = True

        while True:
            self._disconnect()

            broker = six.next(self.brokers)
            attempt += 1
            try:
                self._connect(broker)
                return
            except IOError as ex:
                e = ex
            except self.connection_errors as ex:
                e = ex
            except Exception as ex:
                # NOTE(comstud): Unfortunately it's possible for amqplib
                # to return an error not covered by its transport
                # connection_errors in the case of a timeout waiting for
                # a protocol response.  (See paste link in LP888621)
                # So, we check all exceptions for 'timeout' in them
                # and try to reconnect in this case.
                if 'timeout' not in six.text_type(e):
                    raise
                e = ex

            log_info = {}
            log_info['err_str'] = e
            log_info['retry'] = retry or 0
            log_info.update(broker)

            if not loop_forever and attempt > retry:
                msg = _('Unable to connect to AMQP server on '
                        '%(hostname)s:%(port)d after %(retry)d '
                        'tries: %(err_str)s') % log_info
                LOG.error(msg)
                raise exceptions.MessageDeliveryFailure(msg)
            else:
                if attempt == 1:
                    sleep_time = self.interval_start or 1
                elif attempt > 1:
                    sleep_time += self.interval_stepping

                sleep_time = min(sleep_time, self.interval_max)

                log_info['sleep_time'] = sleep_time
                if 'Socket closed' in six.text_type(e):
                    LOG.error(
                        _('AMQP server %(hostname)s:%(port)d closed'
                          ' the connection. Check login credentials:'
                          ' %(err_str)s'), log_info)
                else:
                    LOG.error(
                        _('AMQP server on %(hostname)s:%(port)d is '
                          'unreachable: %(err_str)s. Trying again in '
                          '%(sleep_time)d seconds.'), log_info)
                time.sleep(sleep_time)
Example #43
0
 def _consume(sock):
     LOG.info(_("Consuming socket"))
     while True:
         self.consume(sock)
Example #44
0
 def send(self, data, **kwargs):
     if not self.can_send:
         raise RPCException(_("You cannot send on this socket."))
     self.sock.send_multipart(data, **kwargs)
Example #45
0
 def recv(self, **kwargs):
     if not self.can_recv:
         raise RPCException(_("You cannot recv on this socket."))
     return self.sock.recv_multipart(**kwargs)