def __init__(self, *args, **kwargs): max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 while True: tries += 1 if tries > 1: time.sleep(sleep_time) # backoff for next retry attempt.. if there is one sleep_time += FLAGS.rabbit_retry_backoff if sleep_time > 30: sleep_time = 30 try: super(Consumer, self).__init__(*args, **kwargs) self.failed_connection = False break except Exception as e: # Catching all because carrot sucks self.failed_connection = True if max_retries > 0 and tries == max_retries: break fl_host = FLAGS.rabbit_host fl_port = FLAGS.rabbit_port fl_intv = sleep_time LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' ' unreachable: %(e)s. Trying again in %(fl_intv)d' ' seconds.') % locals()) if self.failed_connection: LOG.error(_('Unable to connect to AMQP server ' 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1)
def process_data(self, message_data, message): """Consumer callback to call a method on a proxy object. Parses the message for validity and fires off a thread to call the proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} """ LOG.debug(_('received %s') % message_data) # This will be popped off in _unpack_context msg_id = message_data.get('_msg_id', None) ctxt = _unpack_context(message_data) method = message_data.get('method') args = message_data.get('args', {}) message.ack() if not method: # NOTE(vish): we may not want to ack here, but that means that bad # messages stay in the queue indefinitely, so for now # we just log the message and send an error string # back to the caller LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(msg_id, _('No method for message: %s') % message_data) return self.pool.spawn_n(self._process_data, ctxt, method, args)
def msg_reply(msg_id, reply=None, failure=None, ending=False): """Sends a reply or an error on the channel signified by msg_id. Failure should be a sys.exc_info() tuple. """ if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) LOG.error(_("Returning exception %s to caller"), message) LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) with ConnectionPool.item() as conn: publisher = DirectPublisher(connection=conn, msg_id=msg_id) try: msg = {'result': reply, 'failure': failure} if ending: msg['ending'] = True publisher.send(msg) except TypeError: msg = {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), 'failure': failure} if ending: msg['ending'] = True publisher.send(msg) publisher.close()
def reconnect(self): """Handles reconnecting and re-establishing queues""" if self.connection: try: self.connection.close() except self.connection.connection_errors: pass time.sleep(1) self.connection = kombu.connection.BrokerConnection(**self.params) if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) try: self.connection.ensure_connection(errback=self.connect_error, max_retries=self.max_retries, interval_start=self.interval_start, interval_step=self.interval_stepping, interval_max=self.interval_max) except self.connection.connection_errors, e: # We should only get here if max_retries is set. We'll go # ahead and exit in this case. err_str = str(e) max_retries = self.max_retries LOG.error(_('Unable to connect to AMQP server ' 'after %(max_retries)d tries: %(err_str)s') % locals()) sys.exit(1)
def __init__(self, connection=None, topic='broadcast', proxy=None): LOG.debug(_('Initing the Adapter Consumer for %s') % topic) self.proxy = proxy self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) super(AdapterConsumer, self).__init__(connection=connection, topic=topic) self.register_callback(self.process_data)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Wraps the parent fetch with some logic for failed connection.""" # TODO(vish): the logic for failed connections and logging should be # refactored into some sort of connection manager object try: if self.failed_connection: # NOTE(vish): connection is defined in the parent class, we can # recreate it as long as we create the backend too # pylint: disable=W0201 self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() return super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # want exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception, e: # pylint: disable=W0703 if not self.failed_connection: LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True
def send_message(topic, message, wait=True): """Sends a message for testing.""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) LOG.debug(_('topic is %s'), topic) LOG.debug(_('message %s'), message) if wait: consumer = messaging.Consumer(connection=Connection.instance(), queue=msg_id, exchange=msg_id, auto_delete=True, exchange_type='direct', routing_key=msg_id) consumer.register_callback(generic_response) publisher = messaging.Publisher(connection=Connection.instance(), exchange=FLAGS.control_exchange, durable=FLAGS.rabbit_durable_queues, exchange_type='topic', routing_key=topic) publisher.send(message) publisher.close() if wait: consumer.wait() consumer.close()
def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) with ConnectionPool.item() as conn: publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) publisher.close()
def __init__(self, topic, connection=None): self.exchange = '%s_fanout' % topic self.queue = '%s_fanout' % topic self.durable = False self.auto_delete = True LOG.info(_('Creating "%(exchange)s" fanout exchange'), dict(exchange=self.exchange)) super(FanoutPublisher, self).__init__(connection=connection)
def connect_error(self, exc, interval): """Callback when there are connection re-tries by kombu""" info = self.params.copy() info['intv'] = interval info['e'] = exc LOG.error(_('AMQP server on %(hostname)s:%(port)d is' ' unreachable: %(e)s. Trying again in %(intv)d' ' seconds.') % info)
def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) with ConnectionPool.item() as conn: publisher = FanoutPublisher(topic, connection=conn) publisher.send(msg) publisher.close()
def notify(context, topic, msg): """Sends a notification event on a topic.""" LOG.debug(_('Sending notification on %s...'), topic) _pack_context(msg, context) with ConnectionPool.item() as conn: publisher = TopicPublisher(connection=conn, topic=topic, durable=True) publisher.send(msg) publisher.close()
def _unpack_context(msg): """Unpack context from msg.""" context_dict = {} for key in list(msg.keys()): # NOTE(vish): Some versions of python don't like unicode keys # in kwargs. key = str(key) if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value context_dict['msg_id'] = msg.pop('_msg_id', None) LOG.debug(_('unpacked context: %s'), context_dict) return RpcContext.from_dict(context_dict)
def publisher_send(self, cls, topic, msg, **kwargs): """Send to a publisher based on the publisher class""" while True: try: publisher = cls(self.channel, topic, **kwargs) publisher.send(msg) return except self.connection.connection_errors, e: LOG.exception(_('Failed to publish message %s' % str(e))) try: self.reconnect() except self.connection.connection_errors, e: pass
def __init__(self, connection=None, topic='broadcast', proxy=None): self.exchange = '%s_fanout' % topic self.routing_key = topic unique = uuid.uuid4().hex self.queue = '%s_fanout_%s' % (topic, unique) self.durable = False # Fanout creates unique queue names, so we should auto-remove # them when done, so they're not left around on restart. # Also, we're the only one that should be consuming. exclusive # implies auto_delete, so we'll just set that.. self.exclusive = True LOG.info(_('Created "%(exchange)s" fanout exchange ' 'with "%(key)s" routing key'), dict(exchange=self.exchange, key=self.routing_key)) super(FanoutAdapterConsumer, self).__init__(connection=connection, topic=topic, proxy=proxy)
def multicall(context, topic, msg): """Make a call that returns multiple times.""" # Can't use 'with' for multicall, as it returns an iterator # that will continue to use the connection. When it's done, # connection.close() will get called which will put it back into # the pool LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) conn = ConnectionContext() wait_msg = MulticallWaiter(conn) conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, msg) return wait_msg
def iterconsume(self, limit=None): """Return an iterator that will consume from all queues/consumers""" while True: try: queues_head = self.consumers[:-1] queues_tail = self.consumers[-1] for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.connection.drain_events() except self.connection.connection_errors, e: LOG.exception(_('Failed to consume message from queue: ' '%s' % str(e))) self.reconnect()
def multicall(context, topic, msg): """Make a call that returns multiple times.""" LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) con_conn = ConnectionPool.get() consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) publisher = TopicPublisher(connection=con_conn, topic=topic) publisher.send(msg) publisher.close() return wait_msg
def wait(self, limit=None): running = True while running: it = self.consumer_set.iterconsume(limit=limit) if not it: break while True: try: it.next() except StopIteration: return except greenlet.GreenletExit: running = False break except Exception as e: LOG.exception(_("Exception while processing consumer")) self.reconnect() # Break to outer loop break
def _process_data(self, ctxt, method, args): """Thread that magically looks for a method on the proxy object and calls it. """ try: node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! rval = node_func(context=ctxt, **node_args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: ctxt.reply(x, None) else: ctxt.reply(rval, None) # This final None tells multicall that it is done. ctxt.reply(ending=True) except Exception as e: LOG.exception('Exception during message handling') ctxt.reply(None, sys.exc_info()) return
def __call__(self, message_data): """Consumer callback to call a method on a proxy object. Parses the message for validity and fires off a thread to call the proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} """ LOG.debug(_('received %s') % message_data) ctxt = _unpack_context(message_data) method = message_data.get('method') args = message_data.get('args', {}) if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data) return self.pool.spawn_n(self._process_data, ctxt, method, args)
def create(self): LOG.debug('Pool creating new connection') return Connection()
class Connection(object): """Connection object.""" def __init__(self): self.consumers = [] self.consumer_thread = None self.max_retries = FLAGS.rabbit_max_retries # Try forever? if self.max_retries <= 0: self.max_retries = None self.interval_start = FLAGS.rabbit_retry_interval self.interval_stepping = FLAGS.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 self.memory_transport = False self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, userid=FLAGS.rabbit_userid, password=FLAGS.rabbit_password, virtual_host=FLAGS.rabbit_virtual_host) if FLAGS.fake_rabbit: self.params['transport'] = 'memory' self.memory_transport = True else: self.memory_transport = False self.connection = None self.reconnect() def reconnect(self): """Handles reconnecting and re-establishing queues""" if self.connection: try: self.connection.close() except self.connection.connection_errors: pass time.sleep(1) self.connection = kombu.connection.BrokerConnection(**self.params) if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) try: self.connection.ensure_connection(errback=self.connect_error, max_retries=self.max_retries, interval_start=self.interval_start, interval_step=self.interval_stepping, interval_max=self.interval_max) except self.connection.connection_errors, e: # We should only get here if max_retries is set. We'll go # ahead and exit in this case. err_str = str(e) max_retries = self.max_retries LOG.error(_('Unable to connect to AMQP server ' 'after %(max_retries)d tries: %(err_str)s') % locals()) sys.exit(1) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) if self.consumers: LOG.debug(_("Re-established AMQP queues"))
def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) with ConnectionContext() as conn: conn.fanout_send(topic, msg)
def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) with ConnectionContext() as conn: conn.topic_send(topic, msg)
def generic_response(message_data, message): """Logs a result and exits.""" LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0)
def notify(context, topic, msg): """Sends a notification event on a topic.""" LOG.debug(_('Sending notification on %s...'), topic) _pack_context(msg, context) with ConnectionContext() as conn: conn.notify_send(topic, msg, durable=True)
def create(self): LOG.debug('Pool creating new connection') return Connection.instance(new=True)