def _connect(self): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info( _("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % self.params) try: self.connection.close() except self.connection_errors: pass # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None self.connection = kombu.connection.BrokerConnection(**self.params) self.connection_errors = self.connection.connection_errors if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) self.connection.connect() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'), self.params)
def create_consumer(self, topic, proxy, fanout=False): # Only consume on the base topic name. topic = topic.split('.', 1)[0] LOG.info(_("Create Consumer for topic (%(topic)s)") % {'topic': topic}) # Subscription scenarios if fanout: subscribe = ('', fanout)[type(fanout) == str] sock_type = zmq.SUB topic = 'fanout~' + topic else: sock_type = zmq.PULL subscribe = None # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % \ (self.conf.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ['PULL', 'SUB'][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False)
def _multi_send(method, context, topic, msg, timeout=None): """ Wraps the sending of messages, dispatches to the matchmaker and sends message to all relevant hosts. """ conf = FLAGS LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))}) queues = matchmaker.queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results if len(queues) == 0: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout, "No match from matchmaker." # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == '_cast': eventlet.spawn_n(method, _addr, context, _topic, _topic, msg, timeout) return return method(_addr, context, _topic, _topic, msg, timeout)
def register_opts(conf): """Registration of options for this driver.""" #NOTE(ewindisch): ZMQ_CTX and matchmaker # are initialized here as this is as good # an initialization method as any. # We memoize through these globals global ZMQ_CTX global matchmaker global FLAGS if not FLAGS: conf.register_opts(zmq_opts) FLAGS = conf # Don't re-set, if this method is called twice. if not ZMQ_CTX: ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts) if not matchmaker: # rpc_zmq_matchmaker should be set to a 'module.Class' mm_path = conf.rpc_zmq_matchmaker.split('.') mm_module = '.'.join(mm_path[:-1]) mm_class = mm_path[-1] # Only initialize a class. if mm_path[-1][0] not in string.ascii_uppercase: LOG.error(_("Matchmaker could not be loaded.\n" "rpc_zmq_matchmaker is not a class.")) raise RPCException(_("Error loading Matchmaker.")) mm_impl = importutils.import_module(mm_module) mm_constructor = getattr(mm_impl, mm_class) matchmaker = mm_constructor()
def register(self, proxy, in_addr, zmq_type_in, out_addr=None, zmq_type_out=None, in_bind=True, out_bind=True, subscribe=None): LOG.info(_("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") # Items push in. inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, subscribe=subscribe) self.proxies[inq] = proxy self.sockets.append(inq) LOG.info(_("In reactor registered")) if not out_addr: return if zmq_type_out not in (zmq.PUSH, zmq.PUB): raise RPCException("Bad output socktype") # Items push out. outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) self.mapping[inq] = outq self.mapping[outq] = inq self.sockets.append(outq) LOG.info(_("Out reactor registered"))
def register_opts(conf): """Registration of options for this driver.""" #NOTE(ewindisch): ZMQ_CTX and matchmaker # are initialized here as this is as good # an initialization method as any. # We memoize through these globals global ZMQ_CTX global matchmaker global FLAGS if not FLAGS: conf.register_opts(zmq_opts) FLAGS = conf # Don't re-set, if this method is called twice. if not ZMQ_CTX: ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts) if not matchmaker: # rpc_zmq_matchmaker should be set to a 'module.Class' mm_path = conf.rpc_zmq_matchmaker.split('.') mm_module = '.'.join(mm_path[:-1]) mm_class = mm_path[-1] # Only initialize a class. if mm_path[-1][0] not in string.ascii_uppercase: LOG.error( _("Matchmaker could not be loaded.\n" "rpc_zmq_matchmaker is not a class.")) raise RPCException(_("Error loading Matchmaker.")) mm_impl = importutils.import_module(mm_module) mm_constructor = getattr(mm_impl, mm_class) matchmaker = mm_constructor()
def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = ZMQ_CTX.socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind} LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug(_("-> bind: %(bind)s"), str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket."))
def _connect(self): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info(_("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % self.params) try: self.connection.close() except self.connection_errors: pass # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None self.connection = kombu.connection.BrokerConnection(**self.params) self.connection_errors = self.connection.connection_errors if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) self.connection.connect() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'), self.params)
def _error_callback(exc): if isinstance(exc, qpid.messaging.exceptions.Empty): LOG.exception(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc))
def _error_callback(exc): if isinstance(exc, qpid.messaging.exceptions.Empty): LOG.exception( _('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception( _('Failed to consume message from queue: %s') % str(exc))
def _error_callback(exc): if isinstance(exc, socket.timeout): LOG.exception( _('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception( _('Failed to consume message from queue: %s') % str(exc)) info['do_consume'] = True
def _error_callback(exc): if isinstance(exc, socket.timeout): LOG.exception(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc)) info['do_consume'] = True
def serialize_remote_exception(failure_info): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] LOG.error(_("Returning exception %s to caller"), unicode(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs data = { 'class': str(failure.__class__.__name__), 'module': str(failure.__class__.__module__), 'message': unicode(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data
class Timeout(RPCException): """Signifies that a timeout has occurred. This exception is raised if the rpc_response_timeout is reached while waiting for a response from the remote side. """ message = _("Timeout while waiting on RPC response.")
def _callback(raw_message): message = self.channel.message_to_python(raw_message) try: callback(message.payload) message.ack() except Exception: LOG.exception(_("Failed to process message... skipping it."))
def consume(self, sock): #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {'data': data}) self.mapping[sock].send(data) return msg_id, topic, style, in_msg = data ctx, request = _deserialize(in_msg) ctx = RpcContext.unmarshal(ctx) proxy = self.proxies[sock] self.pool.spawn_n(self.process, style, topic, proxy, ctx, request)
def consume(self): """Fetch the message and pass it to the callback object""" message = self.receiver.fetch() try: self.callback(message.content) except Exception: logging.exception(_("Failed to process message... skipping it.")) finally: self.session.acknowledge(message)
def consume(self, sock): #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { 'data': data}) self.mapping[sock].send(data) return msg_id, topic, style, in_msg = data ctx, request = _deserialize(in_msg) ctx = RpcContext.unmarshal(ctx) proxy = self.proxies[sock] self.pool.spawn_n(self.process, style, topic, proxy, ctx, request)
def _serialize(data): """ Serialization wrapper We prefer using JSON, but it cannot encode all types. Error if a developer passes us bad data. """ try: return str(jsonutils.dumps(data, ensure_ascii=True)) except TypeError: LOG.error(_("JSON serialization failed.")) raise
def subscribe(self, msg_filter): """Subscribe.""" if not self.can_sub: raise RPCException("Cannot subscribe on this socket.") LOG.debug(_("Subscribing to %s"), msg_filter) try: self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter) except Exception: return self.subscriptions.append(msg_filter)
def _call(addr, context, msg_id, topic, msg, timeout=None): # timeout_response is how long we wait for a response timeout = timeout or FLAGS.rpc_response_timeout # The msg_id is used to track replies. msg_id = str(uuid.uuid4().hex) # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % FLAGS.rpc_zmq_host LOG.debug(_("Creating payload")) # Curry the original request into a reply method. mcontext = RpcContext.marshal(context) payload = { 'method': '-reply', 'args': { 'msg_id': msg_id, 'context': mcontext, 'topic': reply_topic, 'msg': [mcontext, msg] } } LOG.debug(_("Creating queue socket for reply waiter")) # Messages arriving async. # TODO(ewindisch): have reply consumer with dynamic subscription mgmt with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % FLAGS.rpc_zmq_ipc_dir, zmq.SUB, subscribe=msg_id, bind=False) LOG.debug(_("Sending cast")) _cast(addr, context, msg_id, topic, payload) LOG.debug(_("Cast sent; Waiting reply")) # Blocks until receives reply msg = msg_waiter.recv() LOG.debug(_("Received message: %s"), msg) LOG.debug(_("Unpacking response")) responses = _deserialize(msg[-1]) # ZMQError trumps the Timeout error. except zmq.ZMQError: raise RPCException("ZMQ Socket Error") finally: if 'msg_waiter' in vars(): msg_waiter.close() # It seems we don't need to do all of the following, # but perhaps it would be useful for multicall? # One effect of this is that we're checking all # responses for Exceptions. for resp in responses: if isinstance(resp, types.DictType) and 'exc' in resp: raise rpc_common.deserialize_remote_exception(FLAGS, resp['exc']) return responses[-1]
def _call(addr, context, msg_id, topic, msg, timeout=None): # timeout_response is how long we wait for a response timeout = timeout or FLAGS.rpc_response_timeout # The msg_id is used to track replies. msg_id = str(uuid.uuid4().hex) # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % FLAGS.rpc_zmq_host LOG.debug(_("Creating payload")) # Curry the original request into a reply method. mcontext = RpcContext.marshal(context) payload = { 'method': '-reply', 'args': { 'msg_id': msg_id, 'context': mcontext, 'topic': reply_topic, 'msg': [mcontext, msg] } } LOG.debug(_("Creating queue socket for reply waiter")) # Messages arriving async. # TODO(ewindisch): have reply consumer with dynamic subscription mgmt with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket( "ipc://%s/zmq_topic_zmq_replies" % FLAGS.rpc_zmq_ipc_dir, zmq.SUB, subscribe=msg_id, bind=False ) LOG.debug(_("Sending cast")) _cast(addr, context, msg_id, topic, payload) LOG.debug(_("Cast sent; Waiting reply")) # Blocks until receives reply msg = msg_waiter.recv() LOG.debug(_("Received message: %s"), msg) LOG.debug(_("Unpacking response")) responses = _deserialize(msg[-1]) # ZMQError trumps the Timeout error. except zmq.ZMQError: raise RPCException("ZMQ Socket Error") finally: if 'msg_waiter' in vars(): msg_waiter.close() # It seems we don't need to do all of the following, # but perhaps it would be useful for multicall? # One effect of this is that we're checking all # responses for Exceptions. for resp in responses: if isinstance(resp, types.DictType) and 'exc' in resp: raise rpc_common.deserialize_remote_exception(FLAGS, resp['exc']) return responses[-1]
def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = ZMQ_CTX.socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB, ) # Support list, str, & None for subscribe arg (cast to list) do_sub = { list: subscribe, str: [subscribe], type(None): [] }[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = { 'addr': addr, 'type': self.socket_s(), 'subscribe': subscribe, 'bind': bind } LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug(_("-> bind: %(bind)s"), str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket."))
def consume(self, sock): ipc_dir = self.conf.rpc_zmq_ipc_dir #TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() msg_id, topic, style, in_msg = data topic = topic.split('.', 1)[0] LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data))) # Handle zmq_replies magic if topic.startswith('fanout~'): sock_type = zmq.PUB elif topic.startswith('zmq_replies'): sock_type = zmq.PUB inside = _deserialize(in_msg) msg_id = inside[-1]['args']['msg_id'] response = inside[-1]['args']['response'] LOG.debug(_("->response->%s"), response) data = [str(msg_id), _serialize(response)] else: sock_type = zmq.PUSH if not topic in self.topic_proxy: outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) self.topic_proxy[topic] = outq self.sockets.append(outq) LOG.info(_("Created topic proxy: %s"), topic) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(.5) LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data}) self.topic_proxy[topic].send(data) LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data})
def reconnect(self): """Handles reconnecting and re-establishing sessions and queues""" if self.connection.opened(): try: self.connection.close() except qpid.messaging.exceptions.ConnectionError: pass while True: try: self.connection.open() except qpid.messaging.exceptions.ConnectionError, e: LOG.error(_('Unable to connect to AMQP server: %s'), e) time.sleep(self.conf.qpid_reconnect_interval or 1) else: break
def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" LOG.debug(_("Running func with context: %s"), ctx.to_dict()) data.setdefault('version', None) data.setdefault('args', []) try: result = proxy.dispatch( ctx, data['version'], data['method'], **data['args']) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass except Exception: return {'exc': rpc_common.serialize_remote_exception(sys.exc_info())}
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) # at least get the core message out if something happened message = self.message super(RPCException, self).__init__(message)
def _get_response(self, ctx, proxy, topic, data): """Process a curried message and cast the result to topic.""" LOG.debug(_("Running func with context: %s"), ctx.to_dict()) data.setdefault('version', None) data.setdefault('args', []) try: result = proxy.dispatch(ctx, data['version'], data['method'], **data['args']) return ConsumerBase.normalize_reply(result, ctx.replies) except greenlet.GreenletExit: # ignore these since they are just from shutdowns pass except Exception: return { 'exc': rpc_common.serialize_remote_exception(sys.exc_info()) }
class RemoteError(RPCException): """Signifies that a remote class has raised an exception. Contains a string representation of the type of the original exception, the value of the original exception, and the traceback. These are sent to the parent as a joined string so printing the exception contains all of the relevant info. """ message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") def __init__(self, exc_type=None, value=None, traceback=None): self.exc_type = exc_type self.value = value self.traceback = traceback super(RemoteError, self).__init__(exc_type=exc_type, value=value, traceback=traceback)
def reply(self, ctx, proxy, msg_id=None, context=None, topic=None, msg=None): """Reply to a casted call.""" # Our real method is curried into msg['args'] child_ctx = RpcContext.unmarshal(msg[0]) response = ConsumerBase.normalize_reply( self._get_response(child_ctx, proxy, topic, msg[1]), ctx.replies) LOG.debug(_("Sending reply")) cast(FLAGS, ctx, topic, { 'method': '-process_reply', 'args': { 'msg_id': msg_id, 'response': response } })
def reply(self, ctx, proxy, msg_id=None, context=None, topic=None, msg=None): """Reply to a casted call.""" # Our real method is curried into msg['args'] child_ctx = RpcContext.unmarshal(msg[0]) response = ConsumerBase.normalize_reply( self._get_response(child_ctx, proxy, topic, msg[1]), ctx.replies) LOG.debug(_("Sending reply")) cast( FLAGS, ctx, topic, { 'method': '-process_reply', 'args': { 'msg_id': msg_id, 'response': response } })
def _deserialize(data): """ Deserialization wrapper """ LOG.debug(_("Deserializing: %s"), data) return jsonutils.loads(data)
def send(self, data): if not self.can_send: raise RPCException(_("You cannot send on this socket.")) self.sock.send_multipart(data)
def recv(self): if not self.can_recv: raise RPCException(_("You cannot recv on this socket.")) return self.sock.recv_multipart()
def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.error(_("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info)
class InvalidRPCConnectionReuse(RPCException): message = _("Invalid reuse of an RPC connection.")
def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception(_("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info)
class UnsupportedRpcVersion(RPCException): message = _("Specified RPC version, %(version)s, not supported by " "this endpoint.")
def _consume(sock): LOG.info(_("Consuming socket")) while True: self.consume(sock)
class Connection(object): """Connection object.""" pool = None def __init__(self, conf, server_params=None): self.consumers = [] self.consumer_thread = None self.conf = conf self.max_retries = self.conf.rabbit_max_retries # Try forever? if self.max_retries <= 0: self.max_retries = None self.interval_start = self.conf.rabbit_retry_interval self.interval_stepping = self.conf.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 self.memory_transport = False if server_params is None: server_params = {} # Keys to translate from server_params to kombu params server_params_to_kombu_params = {'username': '******'} params = {} for sp_key, value in server_params.iteritems(): p_key = server_params_to_kombu_params.get(sp_key, sp_key) params[p_key] = value params.setdefault('hostname', self.conf.rabbit_host) params.setdefault('port', self.conf.rabbit_port) params.setdefault('userid', self.conf.rabbit_userid) params.setdefault('password', self.conf.rabbit_password) params.setdefault('virtual_host', self.conf.rabbit_virtual_host) self.params = params if self.conf.fake_rabbit: self.params['transport'] = 'memory' self.memory_transport = True else: self.memory_transport = False if self.conf.rabbit_use_ssl: self.params['ssl'] = self._fetch_ssl_params() self.connection = None self.reconnect() def _fetch_ssl_params(self): """Handles fetching what ssl params should be used for the connection (if any)""" ssl_params = dict() # http://docs.python.org/library/ssl.html - ssl.wrap_socket if self.conf.kombu_ssl_version: ssl_params['ssl_version'] = self.conf.kombu_ssl_version if self.conf.kombu_ssl_keyfile: ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile if self.conf.kombu_ssl_certfile: ssl_params['certfile'] = self.conf.kombu_ssl_certfile if self.conf.kombu_ssl_ca_certs: ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs # We might want to allow variations in the # future with this? ssl_params['cert_reqs'] = ssl.CERT_REQUIRED if not ssl_params: # Just have the default behavior return True else: # Return the extended behavior return ssl_params def _connect(self): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info( _("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % self.params) try: self.connection.close() except self.connection_errors: pass # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None self.connection = kombu.connection.BrokerConnection(**self.params) self.connection_errors = self.connection.connection_errors if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) self.connection.connect() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'), self.params) def reconnect(self): """Handles reconnecting and re-establishing queues. Will retry up to self.max_retries number of times. self.max_retries = 0 means to retry forever. Sleep between tries, starting at self.interval_start seconds, backing off self.interval_stepping number of seconds each attempt. """ attempt = 0 while True: attempt += 1 try: self._connect() return except (self.connection_errors, IOError), e: pass except Exception, e: # NOTE(comstud): Unfortunately it's possible for amqplib # to return an error not covered by its transport # connection_errors in the case of a timeout waiting for # a protocol response. (See paste link in LP888621) # So, we check all exceptions for 'timeout' in them # and try to reconnect in this case. if 'timeout' not in str(e): raise log_info = {} log_info['err_str'] = str(e) log_info['max_retries'] = self.max_retries log_info.update(self.params) if self.max_retries and attempt == self.max_retries: LOG.exception( _('Unable to connect to AMQP server on ' '%(hostname)s:%(port)d after %(max_retries)d ' 'tries: %(err_str)s') % log_info) # NOTE(comstud): Copied from original code. There's # really no better recourse because if this was a queue we # need to consume on, we have no way to consume anymore. sys.exit(1) if attempt == 1: sleep_time = self.interval_start or 1 elif attempt > 1: sleep_time += self.interval_stepping if self.interval_max: sleep_time = min(sleep_time, self.interval_max) log_info['sleep_time'] = sleep_time LOG.exception( _('AMQP server on %(hostname)s:%(port)d is' ' unreachable: %(err_str)s. Trying again in ' '%(sleep_time)d seconds.') % log_info) time.sleep(sleep_time)
def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.error( _("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info)
def _consume(): nxt_receiver = self.session.next_receiver(timeout=timeout) try: self._lookup_consumer(nxt_receiver).consume() except Exception: LOG.exception(_("Error processing message. Skipping it."))
def _error_callback(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception( _("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info)