def consume_in_thread(self): """Runs the ZmqProxy service.""" ipc_dir = CONF.rpc_zmq_ipc_dir consume_in = "tcp://%s:%s" % \ (CONF.rpc_zmq_bind_address, CONF.rpc_zmq_port) consumption_proxy = InternalContext(None) try: os.makedirs(ipc_dir) except os.error: if not os.path.isdir(ipc_dir): with excutils.save_and_reraise_exception(): LOG.error( _("Required IPC directory does not exist at" " %s"), ipc_dir) try: self.register(consumption_proxy, consume_in, zmq.PULL) except zmq.ZMQError: if os.access(ipc_dir, os.X_OK): with excutils.save_and_reraise_exception(): LOG.error(_("Permission denied to IPC directory at" " %s"), ipc_dir) with excutils.save_and_reraise_exception(): LOG.error( _("Could not create ZeroMQ receiver daemon. " "Socket may already be in use.")) super(ZmqProxy, self).consume_in_thread()
def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, # don't trust this input. if self.badchars.search(topic) is not None: emsg = _("Topic contained dangerous characters.") LOG.warn(emsg) raise RPCException(emsg) out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue( CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) waiter.send(True) while (True): data = self.topic_proxy[topic].get() out_sock.send(data, copy=False)
def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, # don't trust this input. if self.badchars.search(topic) is not None: emsg = _("Topic contained dangerous characters.") LOG.warn(emsg) raise RPCException(emsg) out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue( CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) waiter.send(True) while(True): data = self.topic_proxy[topic].get() out_sock.send(data, copy=False)
def consume_in_thread(self): """Runs the ZmqProxy service.""" ipc_dir = CONF.rpc_zmq_ipc_dir consume_in = "tcp://%s:%s" % \ (CONF.rpc_zmq_bind_address, CONF.rpc_zmq_port) consumption_proxy = InternalContext(None) try: os.makedirs(ipc_dir) except os.error: if not os.path.isdir(ipc_dir): with excutils.save_and_reraise_exception(): LOG.error(_("Required IPC directory does not exist at" " %s"), ipc_dir) try: self.register(consumption_proxy, consume_in, zmq.PULL) except zmq.ZMQError: if os.access(ipc_dir, os.X_OK): with excutils.save_and_reraise_exception(): LOG.error(_("Permission denied to IPC directory at" " %s"), ipc_dir) with excutils.save_and_reraise_exception(): LOG.error(_("Could not create ZeroMQ receiver daemon. " "Socket may already be in use.")) super(ZmqProxy, self).consume_in_thread()
def reconnect(self, retry=None): """Handles reconnecting and re-establishing sessions and queues. Will retry up to retry number of times. retry = None or -1 means to retry forever retry = 0 means no retry retry = N means N retries """ delay = 1 attempt = 0 loop_forever = False if retry is None or retry < 0: loop_forever = True while True: self._disconnect() attempt += 1 broker = six.next(self.brokers) try: self._connect(broker) except qpid_exceptions.MessagingError as e: msg_dict = dict(e=e, delay=delay, retry=retry, broker=broker) if not loop_forever and attempt > retry: msg = _('Unable to connect to AMQP server on ' '%(broker)s after %(retry)d ' 'tries: %(e)s') % msg_dict LOG.error(msg) raise exceptions.MessageDeliveryFailure(msg) else: msg = _("Unable to connect to AMQP server on %(broker)s: " "%(e)s. Sleeping %(delay)s seconds") % msg_dict LOG.error(msg) time.sleep(delay) delay = min(delay + 1, 5) else: LOG.info(_('Connected to AMQP server on %s'), broker['host']) break self.session = self.connection.session() if self.consumers: consumers = self.consumers self.consumers = {} for consumer in six.itervalues(consumers): consumer.reconnect(self.session) self._register_consumer(consumer) LOG.debug("Re-established AMQP queues")
def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__(None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def register(self, proxy, in_addr, zmq_type_in, in_bind=True, subscribe=None): LOG.info(_("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") # Items push in. inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, subscribe=subscribe) self.proxies[inq] = proxy self.sockets.append(inq) LOG.info(_("In reactor registered"))
def _consume(): # NOTE(sileht): # maximum value chosen according the best practice from kombu: # http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop poll_timeout = 1 if timeout is None else min(timeout, 1) while True: if self._consume_loop_stopped: self._consume_loop_stopped = False return try: nxt_receiver = self.session.next_receiver( timeout=poll_timeout) except qpid_exceptions.Empty as exc: poll_timeout = timer.check_return(_raise_timeout, exc, maximum=1) else: break try: self._lookup_consumer(nxt_receiver).consume() except Exception: LOG.exception(_("Error processing message. " "Skipping it."))
def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): warnings.warn( _( "The Qpid driver has been deprecated. " "The driver is planned to be removed during the " "`Mitaka` development cycle." ), DeprecationWarning, stacklevel=2, ) opt_group = cfg.OptGroup(name="oslo_messaging_qpid", title="QPID driver options") conf.register_group(opt_group) conf.register_opts(qpid_opts, group=opt_group) conf.register_opts(rpc_amqp.amqp_opts, group=opt_group) conf.register_opts(base.base_opts, group=opt_group) connection_pool = rpc_amqp.ConnectionPool(conf, conf.oslo_messaging_qpid.rpc_conn_pool_size, url, Connection) super(QpidDriver, self).__init__( conf, url, connection_pool, default_exchange, allowed_remote_exmods, conf.oslo_messaging_qpid.send_single_reply, )
def __init__(self, conf, url, default_exchange=None, allowed_remote_exmods=None): warnings.warn(_('The Qpid driver has been deprecated. ' 'The driver is planned to be removed during the `M` ' 'development cycle.'), DeprecationWarning, stacklevel=2) opt_group = cfg.OptGroup(name='oslo_messaging_qpid', title='QPID driver options') conf.register_group(opt_group) conf.register_opts(qpid_opts, group=opt_group) conf.register_opts(rpc_amqp.amqp_opts, group=opt_group) conf.register_opts(base.base_opts, group=opt_group) connection_pool = rpc_amqp.ConnectionPool( conf, conf.oslo_messaging_qpid.rpc_conn_pool_size, url, Connection) super(QpidDriver, self).__init__( conf, url, connection_pool, default_exchange, allowed_remote_exmods, conf.oslo_messaging_qpid.send_single_reply, )
def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def consume(self, sock): # TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug("CONSUMER RECEIVED DATA: %s", data) proxy = self.proxies[sock] if data[2] == 'cast': # Legacy protocol packenv = data[3] ctx, msg = _deserialize(packenv) request = rpc_common.deserialize_msg(msg) ctx = RpcContext.unmarshal(ctx) elif data[2] == 'impl_zmq_v2': packenv = data[4:] msg = unflatten_envelope(packenv) request = rpc_common.deserialize_msg(msg) # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: LOG.error(_("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request)
def on_error(exc, interval): LOG.debug(_("Received recoverable error from kombu:"), exc_info=True) recoverable_error_callback and recoverable_error_callback(exc) interval = (self.kombu_reconnect_delay + interval if self.kombu_reconnect_delay > 0 else interval) info = {'err_str': exc, 'sleep_time': interval} info.update(self.connection.info()) if 'Socket closed' in six.text_type(exc): LOG.error(_LE('AMQP server %(hostname)s:%(port)s closed' ' the connection. Check login credentials:' ' %(err_str)s'), info) else: LOG.error(_LE('AMQP server on %(hostname)s:%(port)s is ' 'unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.'), info) # XXX(nic): when reconnecting to a RabbitMQ cluster # with mirrored queues in use, the attempt to release the # connection can hang "indefinitely" somewhere deep down # in Kombu. Blocking the thread for a bit prior to # release seems to kludge around the problem where it is # otherwise reproduceable. # TODO(sileht): Check if this is useful since we # use kombu for HA connection, the interval_step # should sufficient, because the underlying kombu transport # connection object freed. if self.kombu_reconnect_delay > 0: time.sleep(self.kombu_reconnect_delay)
def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" LOG.debug("Running func with context: %s", ctx.to_dict()) data.setdefault('version', None) data.setdefault('args', {}) try: result = proxy.dispatch(ctx, data['version'], data['method'], data.get('namespace'), **data['args']) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass except rpc_common.ClientException as e: LOG.debug("Expected exception during message handling (%s)", e._exc_info[1]) return { 'exc': rpc_common.serialize_remote_exception(e._exc_info, log_failure=False) } except Exception: LOG.error(_("Exception during message handling")) return { 'exc': rpc_common.serialize_remote_exception(sys.exc_info()) }
def create_consumer(self, topic, proxy, fanout=False): # Register with matchmaker. _get_matchmaker().register(topic, CONF.rpc_zmq_host) # Subscription scenarios if fanout: sock_type = zmq.SUB subscribe = ('', fanout)[type(fanout) == str] topic = 'fanout~' + topic.split('.', 1)[0] else: sock_type = zmq.PULL subscribe = None topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host)) if topic in self.topics: LOG.info(_("Skipping topic registration. Already registered.")) return # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % \ (CONF.rpc_zmq_ipc_dir, topic) LOG.debug("Consumer is a zmq.%s", ['PULL', 'SUB'][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False) self.topics.append(topic)
class Timeout(RPCException): """Signifies that a timeout has occurred. This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ msg_fmt = _('Timeout while waiting on RPC response - ' 'topic: "%(topic)s", RPC method: "%(method)s" ' 'info: "%(info)s"') def __init__(self, info=None, topic=None, method=None): """Initiates Timeout object. :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__(None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def on_error(exc, interval): LOG.debug(_("Received recoverable error from kombu:"), exc_info=True) recoverable_error_callback and recoverable_error_callback(exc) interval = (self.driver_conf.kombu_reconnect_delay + interval if self.driver_conf.kombu_reconnect_delay > 0 else interval) info = {'err_str': exc, 'sleep_time': interval} info.update(self.connection.info()) if 'Socket closed' in six.text_type(exc): LOG.error(_LE('AMQP server %(hostname)s:%(port)d closed' ' the connection. Check login credentials:' ' %(err_str)s'), info) else: LOG.error(_LE('AMQP server on %(hostname)s:%(port)d is ' 'unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.'), info) # XXX(nic): when reconnecting to a RabbitMQ cluster # with mirrored queues in use, the attempt to release the # connection can hang "indefinitely" somewhere deep down # in Kombu. Blocking the thread for a bit prior to # release seems to kludge around the problem where it is # otherwise reproduceable. # TODO(sileht): Check if this is useful since we # use kombu for HA connection, the interval_step # should sufficient, because the underlying kombu transport # connection object freed. if self.driver_conf.kombu_reconnect_delay > 0: time.sleep(self.driver_conf.kombu_reconnect_delay)
def _load_notifiers(self): """One-time load of notifier config file.""" self.routing_groups = {} self.used_drivers = set() filename = CONF.routing_notifier_config if not filename: return # Infer which drivers are used from the config file. self.routing_groups = yaml.load( self._get_notifier_config_file(filename)) if not self.routing_groups: self.routing_groups = {} # In case we got None from load() return for group in self.routing_groups.values(): self.used_drivers.update(group.keys()) LOG.debug('loading notifiers from %s', self.NOTIFIER_PLUGIN_NAMESPACE) self.plugin_manager = dispatch.DispatchExtensionManager( namespace=self.NOTIFIER_PLUGIN_NAMESPACE, check_func=self._should_load_plugin, invoke_on_load=True, invoke_args=None) if not list(self.plugin_manager): LOG.warning(_("Failed to load any notifiers for %s"), self.NOTIFIER_PLUGIN_NAMESPACE)
def run(self, key): if not self._ring_has(key): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile"), key) return [] host = next(self.ring0[key]) return [(key + '.' + host, host)]
def _call_notify(self, ext, context, message, priority, retry, accepted_drivers): """Emit the notification. """ # accepted_drivers is passed in as a result of the map() function LOG.info(_("Routing '%(event)s' notification to '%(driver)s' driver"), {'event': message.get('event_type'), 'driver': ext.name}) ext.obj.notify(context, message, priority, retry)
def _publish_and_retry_on_missing_exchange(self, exchange, msg, routing_key=None, timeout=None): """Publisher that retry if the exchange is missing. """ if not exchange.passive: raise RuntimeError("_publish_and_retry_on_missing_exchange() must " "be called with an passive exchange.") # TODO(sileht): use @retrying # NOTE(sileht): no need to wait the application expect a response # before timeout is exshauted duration = (timeout if timeout is not None else self.kombu_reconnect_timeout) timer = rpc_common.DecayingTimer(duration=duration) timer.start() while True: try: self._publish(exchange, msg, routing_key=routing_key, timeout=timeout) return except self.connection.channel_errors as exc: # NOTE(noelbk/sileht): # If rabbit dies, the consumer can be disconnected before the # publisher sends, and if the consumer hasn't declared the # queue, the publisher's will send a message to an exchange # that's not bound to a queue, and the message wll be lost. # So we set passive=True to the publisher exchange and catch # the 404 kombu ChannelError and retry until the exchange # appears if exc.code == 404 and timer.check_return() > 0: LOG.info( _LI("The exchange %(exchange)s to send to " "%(routing_key)s doesn't exist yet, " "retrying...") % { 'exchange': exchange.name, 'routing_key': routing_key }) time.sleep(0.25) continue elif exc.code == 404: msg = _("The exchange %(exchange)s to send to " "%(routing_key)s still doesn't exist after " "%(duration)s sec abandoning...") % { 'duration': duration, 'exchange': exchange.name, 'routing_key': routing_key } LOG.info(msg) raise rpc_amqp.AMQPDestinationNotFound(msg) raise
def run(self, key): # Assume starts with "fanout~", strip it for lookup. nkey = key.split('fanout~')[1:][0] if not self._ring_has(nkey): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile"), nkey) return [] return map(lambda x: (key + '.' + x, x), self.ring[nkey])
def consume(self): """Fetch the message and pass it to the callback object.""" message = self.receiver.fetch() try: self._unpack_json_msg(message) self.callback(QpidMessage(self.session, message)) except Exception: LOG.exception(_("Failed to process message... skipping it.")) self.session.acknowledge(message)
def run(self, key): if not self._ring_has(key): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile"), key ) return [] host = next(self.ring0[key]) return [(key + '.' + host, host)]
def _run(self): try: message = self.listener.poll() if message is not None: message.acknowledge() self._received.set() self.message = message message.reply(reply=True) except Exception: LOG.exception(_("Unexpected exception occurred."))
def unregister(self, key, host): """Unregister a topic.""" if (key, host) in self.host_topic: del self.host_topic[(key, host)] self.hosts.discard(host) self.backend_unregister(key, '.'.join((key, host))) LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"), {'key': key, 'host': host})
def start(self): self._running = True while self._running: try: incoming = self.listener.poll() if incoming is not None: with self.dispatcher(incoming) as callback: callback() except Exception: LOG.exception(_("Unexpected exception occurred."))
def run(self, key): # Assume starts with "fanout~", strip it for lookup. nkey = key.split('fanout~')[1:][0] if not self._ring_has(nkey): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile"), nkey ) return [] return map(lambda x: (key + '.' + x, x), self.ring[nkey])
def _serialize(data): """Serialization wrapper. We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ try: return jsonutils.dumps(data, ensure_ascii=True) except TypeError: with excutils.save_and_reraise_exception(): LOG.error(_("JSON serialization failed."))
def _callback_handler(self, message, callback): """Call callback with deserialized message. Messages that are processed and ack'ed. """ try: callback(RabbitMessage(message)) except Exception: LOG.exception(_("Failed to process message" " ... skipping it.")) message.ack()
def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.ctxt = zmq.Context(CONF.rpc_zmq_contexts) self.sock = self.ctxt.socket(zmq_type) # Enable IPv6-support in libzmq. # When IPv6 is enabled, a socket will connect to, or accept # connections from, both IPv4 and IPv6 hosts. try: self.sock.ipv6 = True except AttributeError: # NOTE(dhellmann): Sometimes the underlying library does # not recognize the IPV6 option. There's nothing we can # really do in that case, so ignore the error and keep # trying to work. pass self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = { 'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind } LOG.debug("Connecting to %(addr)s with %(type)s", str_data) LOG.debug("-> Subscribed to %(subscribe)s", str_data) LOG.debug("-> bind: %(bind)s", str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket."))
def run(self): """The incoming message dispath itself Can be run in an other thread/greenlet/corotine if the executor is able to do it. """ try: self._result = self._dispatch(self._incoming) except Exception: msg = _('The dispatcher method must catches all exceptions') LOG.exception(msg) raise RuntimeError(msg)
def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: mm = CONF.rpc_zmq_matchmaker if mm.endswith('matchmaker.MatchMakerRing'): mm.replace('matchmaker', 'matchmaker_ring') LOG.warn( _('rpc_zmq_matchmaker = %(orig)s is deprecated; use' ' %(new)s instead') % dict(orig=CONF.rpc_zmq_matchmaker, new=mm)) matchmaker = importutils.import_object(mm, *args, **kwargs) return matchmaker
def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None, allowed_remote_exmods=None): """Wraps the sending of messages. Dispatches to the matchmaker and sends message to all relevant hosts. """ allowed_remote_exmods = allowed_remote_exmods or [] conf = CONF LOG.debug(' '.join(map(pformat, (topic, msg)))) queues = _get_matchmaker().queues(topic) LOG.debug("Sending message(s) to: %s", queues) # Don't stack if we have no matchmaker results if not queues: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) return_val = None for queue in queues: _topic, ip_addr = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, _topic, msg, timeout, envelope, _msg_id) else: return_val = method(_addr, context, _topic, msg, timeout, envelope, allowed_remote_exmods) return return_val
def _publish_and_retry_on_missing_exchange(self, exchange, msg, routing_key=None, timeout=None): """Publisher that retry if the exchange is missing. """ if not exchange.passive: raise RuntimeError("_publish_and_retry_on_missing_exchange() must " "be called with an passive exchange.") # TODO(sileht): use @retrying # NOTE(sileht): no need to wait the application expect a response # before timeout is exshauted duration = ( timeout if timeout is not None else self.kombu_reconnect_timeout ) timer = rpc_common.DecayingTimer(duration=duration) timer.start() while True: try: self._publish(exchange, msg, routing_key=routing_key, timeout=timeout) return except self.connection.channel_errors as exc: # NOTE(noelbk/sileht): # If rabbit dies, the consumer can be disconnected before the # publisher sends, and if the consumer hasn't declared the # queue, the publisher's will send a message to an exchange # that's not bound to a queue, and the message wll be lost. # So we set passive=True to the publisher exchange and catch # the 404 kombu ChannelError and retry until the exchange # appears if exc.code == 404 and timer.check_return() > 0: LOG.info(_LI("The exchange %(exchange)s to send to " "%(routing_key)s doesn't exist yet, " "retrying...") % { 'exchange': exchange.name, 'routing_key': routing_key}) time.sleep(0.25) continue elif exc.code == 404: msg = _("The exchange %(exchange)s to send to " "%(routing_key)s still doesn't exist after " "%(duration)s sec abandoning...") % { 'duration': duration, 'exchange': exchange.name, 'routing_key': routing_key} LOG.info(msg) raise rpc_amqp.AMQPDestinationNotFound(msg) raise
def check_return(self, timeout_callback=None, *args, **kwargs): maximum = kwargs.pop('maximum', None) if self._duration is None: return None if maximum is None else maximum if self._ends_at is None: raise RuntimeError(_("Can not check/return a timeout from a timer" " that has not been started.")) left = self._ends_at - time.time() if left <= 0 and timeout_callback is not None: timeout_callback(*args, **kwargs) return left if maximum is None else min(left, maximum)
def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.ctxt = zmq.Context(CONF.rpc_zmq_contexts) self.sock = self.ctxt.socket(zmq_type) # Enable IPv6-support in libzmq. # When IPv6 is enabled, a socket will connect to, or accept # connections from, both IPv4 and IPv6 hosts. try: self.sock.ipv6 = True except AttributeError: # NOTE(dhellmann): Sometimes the underlying library does # not recognize the IPV6 option. There's nothing we can # really do in that case, so ignore the error and keep # trying to work. pass self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind} LOG.debug("Connecting to %(addr)s with %(type)s", str_data) LOG.debug("-> Subscribed to %(subscribe)s", str_data) LOG.debug("-> bind: %(bind)s", str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket."))
def check_return(self, timeout_callback=None, *args, **kwargs): maximum = kwargs.pop('maximum', None) if self._duration is None: return None if maximum is None else maximum if self._ends_at is None: raise RuntimeError( _("Can not check/return a timeout from a timer" " that has not been started.")) left = self._ends_at - time.time() if left <= 0 and timeout_callback is not None: timeout_callback(*args, **kwargs) return left if maximum is None else min(left, maximum)
def _callback(self, message): """Call callback with deserialized message. Messages that are processed and ack'ed. """ m2p = getattr(self.queue.channel, 'message_to_python', None) if m2p: message = m2p(message) try: self.callback(RabbitMessage(message)) except Exception: LOG.exception(_("Failed to process message" " ... skipping it.")) message.ack()
def start_heartbeat(self): """Implementation of MatchMakerBase.start_heartbeat. Launches greenthread looping send_heartbeats(), yielding for CONF.matchmaker_heartbeat_freq seconds between iterations. """ if not self.hosts: raise MatchMakerException(_("Register before starting heartbeat.")) def do_heartbeat(): while True: self.send_heartbeats() eventlet.sleep(CONF.matchmaker_heartbeat_freq) self._heart = eventlet.spawn(do_heartbeat)
def start_heartbeat(self): """Implementation of MatchMakerBase.start_heartbeat. Launches greenthread looping send_heartbeats(), yielding for CONF.matchmaker_heartbeat_freq seconds between iterations. """ if not self.hosts: raise MatchMakerException( _("Register before starting heartbeat.")) def do_heartbeat(): while True: self.send_heartbeats() eventlet.sleep(CONF.matchmaker_heartbeat_freq) self._heart = eventlet.spawn(do_heartbeat)
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in six.iteritems(kwargs): LOG.error("%s: %s", name, value) # at least get the core message out if something happened message = self.msg_fmt super(RPCException, self).__init__(message)
def reconnect(self, channel): """Re-declare the queue after a rabbit reconnect.""" self.channel = channel self.kwargs['channel'] = channel self.queue = kombu.entity.Queue(**self.kwargs) try: self.queue.declare() except Exception as e: # NOTE: This exception may be triggered by a race condition. # Simply retrying will solve the error most of the time and # should work well enough as a workaround until the race condition # itself can be fixed. # TODO(jrosenboom): In order to be able to match the Exception # more specifically, we have to refactor ConsumerBase to use # 'channel_errors' of the kombu connection object that # has created the channel. # See https://bugs.launchpad.net/neutron/+bug/1318721 for details. LOG.error(_("Declaring queue failed with (%s), retrying"), e) self.queue.declare()
def _dispatch_and_reply(self, incoming, executor_callback): try: incoming.reply(self._dispatch(incoming.ctxt, incoming.message, executor_callback)) except ExpectedException as e: LOG.debug(u'Expected exception during message handling (%s)', e.exc_info[1]) incoming.reply(failure=e.exc_info, log_failure=False) except Exception as e: # sys.exc_info() is deleted by LOG.exception(). exc_info = sys.exc_info() LOG.error(_('Exception during message handling: %s'), e, exc_info=exc_info) incoming.reply(failure=exc_info) # NOTE(dhellmann): Remove circular object reference # between the current stack frame and the traceback in # exc_info. del exc_info
def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs # NOTE(matiu): With cells, it's possible to re-raise remote, remote # exceptions. Lets turn it back into the original exception type. cls_name = six.text_type(failure.__class__.__name__) mod_name = six.text_type(failure.__class__.__module__) if (cls_name.endswith(_REMOTE_POSTFIX) and mod_name.endswith(_REMOTE_POSTFIX)): cls_name = cls_name[:-len(_REMOTE_POSTFIX)] mod_name = mod_name[:-len(_REMOTE_POSTFIX)] data = { 'class': cls_name, 'module': mod_name, 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data