def _process_data(self, msg_id, ctxt, method, args): """Thread that maigcally looks for a method on the proxy object and calls it. """ node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! try: rval = node_func(context=ctxt, **node_args) if msg_id: # Check if the result was a generator if isinstance(rval, types.GeneratorType): for x in rval: msg_reply(msg_id, x, None) else: msg_reply(msg_id, rval, None) # This final None tells multicall that it is done. msg_reply(msg_id, None, None) elif isinstance(rval, types.GeneratorType): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: LOG.exception('Exception during message handling') if msg_id: msg_reply(msg_id, None, sys.exc_info()) return
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Wraps the parent fetch with some logic for failed connection.""" # TODO(vish): the logic for failed connections and logging should be # refactored into some sort of connection manager object try: if self.failed_connection: # NOTE(vish): connection is defined in the parent class, we can # recreate it as long as we create the backend too # pylint: disable=W0201 self.connection = Connection.recreate() self.backend = self.connection.create_backend() self.declare() return super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: LOG.error(_('Reconnected to queue')) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # want exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception, e: # pylint: disable=W0703 if not self.failed_connection: LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True
def _error_callback(exc): if isinstance(exc, qpid.messaging.exceptions.Empty): LOG.exception(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc))
def _error_callback(exc): if isinstance(exc, qpid.messaging.exceptions.Empty): LOG.exception( _('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception( _('Failed to consume message from queue: %s') % str(exc))
def publisher_send(self, cls, topic, msg, **kwargs): """Send to a publisher based on the publisher class""" while True: try: publisher = cls(self.channel, topic, **kwargs) publisher.send(msg) return except self.connection.connection_errors, e: LOG.exception(_('Failed to publish message %s' % str(e))) try: self.reconnect() except self.connection.connection_errors, e: pass
def iterconsume(self, limit=None): """Return an iterator that will consume from all queues/consumers""" while True: try: queues_head = self.consumers[:-1] queues_tail = self.consumers[-1] for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.connection.drain_events() except self.connection.connection_errors, e: LOG.exception(_("Failed to consume message from queue: " "%s" % str(e))) self.reconnect()
def iterconsume(self, limit=None): """Return an iterator that will consume from all queues/consumers""" while True: try: queues_head = self.consumers[:-1] queues_tail = self.consumers[-1] for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.connection.drain_events() except self.connection.connection_errors, e: LOG.exception(_('Failed to consume message from queue: ' '%s' % str(e))) self.reconnect()
def wait(self, limit=None): running = True while running: it = self.consumer_set.iterconsume(limit=limit) if not it: break while True: try: it.next() except StopIteration: return except greenlet.GreenletExit: running = False break except Exception as e: LOG.exception(_("Exception while processing consumer")) self.reconnect() # Break to outer loop break
def _process_data(self, ctxt, method, args): """Thread that magically looks for a method on the proxy object and calls it. """ try: node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! rval = node_func(context=ctxt, **node_args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: ctxt.reply(x, None) else: ctxt.reply(rval, None) # This final None tells multicall that it is done. ctxt.reply(ending=True) except Exception as e: LOG.exception("Exception during message handling") ctxt.reply(None, sys.exc_info()) return
def _process_data(self, ctxt, method, args): """Thread that magically looks for a method on the proxy object and calls it. """ node_func = getattr(self.proxy, str(method)) node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! try: rval = node_func(context=ctxt, **node_args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: ctxt.reply(x, None) else: ctxt.reply(rval, None) # This final None tells multicall that it is done. ctxt.reply(ending=True) except Exception as e: LOG.exception('Exception during message handling') ctxt.reply(None, sys.exc_info()) return
def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception(_("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info)
def _error_callback(exc): LOG.exception(_('Failed to consume message from queue: %s') % str(exc))
def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception( _("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info)
def _error_callback(exc): LOG.exception( _('Failed to consume message from queue: %s') % str(exc))