def _unpack_context(msg): """Unpack context from msg.""" context_dict = {} for key in list(msg.keys()): # NOTE(vish): Some versions of python don't like unicode keys # in kwargs. key = str(key) if key.startswith('_context_'): value = msg.pop(key) context_dict[key[9:]] = value context_dict['msg_id'] = msg.pop('_msg_id', None) LOG.debug(_('unpacked context: %s'), context_dict) return RpcContext.from_dict(context_dict)
def multicall(context, topic, msg): """Make a call that returns multiple times.""" # Can't use 'with' for multicall, as it returns an iterator # that will continue to use the connection. When it's done, # connection.close() will get called which will put it back into # the pool LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) conn = ConnectionContext() wait_msg = MulticallWaiter(conn) conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, msg) return wait_msg
def multicall(context, topic, msg): """Make a call that returns multiple times.""" LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) con_conn = ConnectionPool.get() consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) wait_msg = MulticallWaiter(consumer) consumer.register_callback(wait_msg) publisher = TopicPublisher(connection=con_conn, topic=topic) publisher.send(msg) publisher.close() return wait_msg
def __call__(self, message_data): """Consumer callback to call a method on a proxy object. Parses the message for validity and fires off a thread to call the proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} """ LOG.debug(_('received %s') % message_data) ctxt = _unpack_context(message_data) method = message_data.get('method') args = message_data.get('args', {}) if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data) return self.pool.spawn_n(self._process_data, ctxt, method, args)
def generic_response(message_data, message): """Logs a result and exits.""" LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0)
def create(self): LOG.debug('Pool creating new connection') return Connection.instance(new=True)
def fanout_cast(context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) with ConnectionContext() as conn: conn.fanout_send(topic, msg)
def cast(context, topic, msg): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) with ConnectionContext() as conn: conn.topic_send(topic, msg)
def create(self): LOG.debug('Pool creating new connection') return Connection()
class Connection(object): """Connection object.""" def __init__(self): self.consumers = [] self.consumer_thread = None self.max_retries = FLAGS.rabbit_max_retries # Try forever? if self.max_retries <= 0: self.max_retries = None self.interval_start = FLAGS.rabbit_retry_interval self.interval_stepping = FLAGS.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 self.memory_transport = False self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, userid=FLAGS.rabbit_userid, password=FLAGS.rabbit_password, virtual_host=FLAGS.rabbit_virtual_host) if FLAGS.fake_rabbit: self.params['transport'] = 'memory' self.memory_transport = True else: self.memory_transport = False self.connection = None self.reconnect() def reconnect(self): """Handles reconnecting and re-estblishing queues""" if self.connection: try: self.connection.close() except self.connection.connection_errors: pass time.sleep(1) self.connection = kombu.connection.BrokerConnection(**self.params) if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) try: self.connection.ensure_connection( errback=self.connect_error, max_retries=self.max_retries, interval_start=self.interval_start, interval_step=self.interval_stepping, interval_max=self.interval_max) except self.connection.connection_errors, e: # We should only get here if max_retries is set. We'll go # ahead and exit in this case. err_str = str(e) max_retries = self.max_retries LOG.error( _('Unable to connect to AMQP server ' 'after %(max_retries)d tries: %(err_str)s') % locals()) sys.exit(1) LOG.info( _('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) if self.consumers: LOG.debug(_("Re-established AMQP queues"))
def notify(context, topic, msg): """Sends a notification event on a topic.""" LOG.debug(_('Sending notification on %s...'), topic) _pack_context(msg, context) with ConnectionContext() as conn: conn.notify_send(topic, msg, durable=True)
class Connection(object): """Connection object.""" def __init__(self, server_params=None): self.session = None self.consumers = {} self.consumer_thread = None if server_params is None: server_params = {} default_params = dict(hostname=FLAGS.qpid_hostname, port=FLAGS.qpid_port, username=FLAGS.qpid_username, password=FLAGS.qpid_password) params = server_params for key in default_params.keys(): params.setdefault(key, default_params[key]) self.broker = params['hostname'] + ":" + str(params['port']) # Create the connection - this does not open the connection self.connection = qpid.messaging.Connection(self.broker) # Check if flags are set and if so set them for the connection # before we call open self.connection.username = params['username'] self.connection.password = params['password'] self.connection.sasl_mechanisms = FLAGS.qpid_sasl_mechanisms self.connection.reconnect = FLAGS.qpid_reconnect if FLAGS.qpid_reconnect_timeout: self.connection.reconnect_timeout = FLAGS.qpid_reconnect_timeout if FLAGS.qpid_reconnect_limit: self.connection.reconnect_limit = FLAGS.qpid_reconnect_limit if FLAGS.qpid_reconnect_interval_max: self.connection.reconnect_interval_max = ( FLAGS.qpid_reconnect_interval_max) if FLAGS.qpid_reconnect_interval_min: self.connection.reconnect_interval_min = ( FLAGS.qpid_reconnect_interval_min) if FLAGS.qpid_reconnect_interval: self.connection.reconnect_interval = FLAGS.qpid_reconnect_interval self.connection.hearbeat = FLAGS.qpid_heartbeat self.connection.protocol = FLAGS.qpid_protocol self.connection.tcp_nodelay = FLAGS.qpid_tcp_nodelay # Open is part of reconnect - # NOTE(WGH) not sure we need this with the reconnect flags self.reconnect() def _register_consumer(self, consumer): self.consumers[str(consumer.get_receiver())] = consumer def _lookup_consumer(self, receiver): return self.consumers[str(receiver)] def reconnect(self): """Handles reconnecting and re-establishing sessions and queues""" if self.connection.opened(): try: self.connection.close() except qpid.messaging.exceptions.ConnectionError: pass while True: try: self.connection.open() except qpid.messaging.exceptions.ConnectionError, e: LOG.error(_('Unable to connect to AMQP server: %s ' % str(e))) time.sleep(FLAGS.qpid_reconnect_interval or 1) else: break LOG.info(_('Connected to AMQP server on %s' % self.broker)) self.session = self.connection.session() for consumer in self.consumers.itervalues(): consumer.reconnect(self.session) if self.consumers: LOG.debug(_("Re-established AMQP queues"))
def create(self): LOG.debug('Pool creating new connection') return self.connection_cls()