def _error_callback(exc): if isinstance(exc, socket.timeout): LOG.debug(_("Timed out waiting for RPC response: %s") % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_("Failed to consume message from queue: %s") % str(exc)) info["do_consume"] = True
def _connect(self, params): """Connect to rabbit. Re-establish any queues that may have been declared before if we are reconnecting. Exceptions should be handled by the caller. """ if self.connection: LOG.info(_("Reconnecting to AMQP server on " "%(hostname)s:%(port)d") % params) try: self.connection.release() except self.connection_errors: pass # Setting this in case the next statement fails, though # it shouldn't be doing any network operations, yet. self.connection = None self.connection = kombu.connection.BrokerConnection(**params) self.connection_errors = self.connection.connection_errors if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) self.connection.connect() self.channel = self.connection.channel() # work around 'memory' transport bug in 1.1.3 if self.memory_transport: self.channel._new_queue("ae.undeliver") for consumer in self.consumers: consumer.reconnect(self.channel) LOG.info(_("Connected to AMQP server on %(hostname)s:%(port)d") % params)
def publisher(waiter): LOG.info(_("Creating proxy for topic: %s"), topic) try: # The topic is received over the network, # don't trust this input. if self.badchars.search(topic) is not None: emsg = _("Topic contained dangerous characters.") LOG.warn(emsg) raise RPCException(emsg) out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic), sock_type, bind=True) except RPCException: waiter.send_exception(*sys.exc_info()) return self.topic_proxy[topic] = eventlet.queue.LightQueue(CONF.rpc_zmq_topic_backlog) self.sockets.append(out_sock) # It takes some time for a pub socket to open, # before we can have any faith in doing a send() to it. if sock_type == zmq.PUB: eventlet.sleep(0.5) waiter.send(True) while True: data = self.topic_proxy[topic].get() out_sock.send(data) LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {"data": data})
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def create_consumer(self, topic, proxy, fanout=False): # Register with matchmaker. _get_matchmaker().register(topic, CONF.rpc_zmq_host) # Subscription scenarios if fanout: sock_type = zmq.SUB subscribe = ("", fanout)[type(fanout) == str] topic = "fanout~" + topic.split(".", 1)[0] else: sock_type = zmq.PULL subscribe = None topic = ".".join((topic.split(".", 1)[0], CONF.rpc_zmq_host)) if topic in self.topics: LOG.info(_("Skipping topic registration. Already registered.")) return # Receive messages from (local) proxy inaddr = "ipc://%s/zmq_topic_%s" % (CONF.rpc_zmq_ipc_dir, topic) LOG.debug(_("Consumer is a zmq.%s"), ["PULL", "SUB"][sock_type == zmq.SUB]) self.reactor.register(proxy, inaddr, sock_type, subscribe=subscribe, in_bind=False) self.topics.append(topic)
def inner(*args, **kwargs): # NOTE(soren): If we ever go natively threaded, this will be racy. # See http://stackoverflow.com/questions/5390569/dyn # amically-allocating-and-destroying-mutexes sem = _semaphores.get(name, semaphore.Semaphore()) if name not in _semaphores: # this check is not racy - we're already holding ref locally # so GC won't remove the item and there was no IO switch # (only valid in greenthreads) _semaphores[name] = sem with sem: LOG.debug(_('Got semaphore "%(lock)s" for method ' '"%(method)s"...'), {'lock': name, 'method': f.__name__}) if external and not CONF.disable_process_locking: LOG.debug(_('Attempting to grab file lock "%(lock)s" for ' 'method "%(method)s"...'), {'lock': name, 'method': f.__name__}) cleanup_dir = False # We need a copy of lock_path because it is non-local local_lock_path = lock_path if not local_lock_path: local_lock_path = CONF.lock_path if not local_lock_path: cleanup_dir = True local_lock_path = tempfile.mkdtemp() if not os.path.exists(local_lock_path): cleanup_dir = True fileutils.ensure_tree(local_lock_path) # NOTE(mikal): the lock name cannot contain directory # separators safe_name = name.replace(os.sep, '_') lock_file_name = '%s%s' % (lock_file_prefix, safe_name) lock_file_path = os.path.join(local_lock_path, lock_file_name) try: lock = InterProcessLock(lock_file_path) with lock: LOG.debug(_('Got file lock "%(lock)s" at %(path)s ' 'for method "%(method)s"...'), {'lock': name, 'path': lock_file_path, 'method': f.__name__}) retval = f(*args, **kwargs) finally: # NOTE(vish): This removes the tempdir if we needed # to create one. This is used to cleanup # the locks left behind by unit tests. if cleanup_dir: shutil.rmtree(local_lock_path) else: retval = f(*args, **kwargs) return retval
def run_periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" for task_name, task in self._periodic_tasks: full_task_name = '.'.join([self.__class__.__name__, task_name]) ticks_to_skip = self._ticks_to_skip[task_name] if ticks_to_skip > 0: LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s" " ticks left until next run"), dict(full_task_name=full_task_name, ticks_to_skip=ticks_to_skip)) self._ticks_to_skip[task_name] -= 1 continue self._ticks_to_skip[task_name] = task._ticks_between_runs LOG.debug(_("Running periodic task %(full_task_name)s"), dict(full_task_name=full_task_name)) try: task(self, context) except Exception as e: if raise_on_error: raise LOG.exception(_("Error during %(full_task_name)s:" " %(e)s"), dict(e=e, full_task_name=full_task_name))
def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info(_('Child %(pid)d killed by signal %(sig)d'), dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) LOG.info(_('Child %(pid)s exited with status %(code)d'), dict(pid=pid, code=code)) if pid not in self.children: LOG.warning(_('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap
def consume(self, sock): # TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {"data": data}) self.mapping[sock].send(data) return proxy = self.proxies[sock] if data[2] == "cast": # Legacy protocol packenv = data[3] ctx, msg = _deserialize(packenv) request = rpc_common.deserialize_msg(msg) ctx = RpcContext.unmarshal(ctx) elif data[2] == "impl_zmq_v2": packenv = data[4:] msg = unflatten_envelope(packenv) request = rpc_common.deserialize_msg(msg) # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: LOG.error(_("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request)
def __init__(self, addr, zmq_type, bind=True, subscribe=None): self.sock = _get_ctxt().socket(zmq_type) self.addr = addr self.type = zmq_type self.subscriptions = [] # Support failures on sending/receiving on wrong socket type. self.can_recv = zmq_type in (zmq.PULL, zmq.SUB) self.can_send = zmq_type in (zmq.PUSH, zmq.PUB) self.can_sub = zmq_type in (zmq.SUB,) # Support list, str, & None for subscribe arg (cast to list) do_sub = {list: subscribe, str: [subscribe], type(None): []}[type(subscribe)] for f in do_sub: self.subscribe(f) str_data = {"addr": addr, "type": self.socket_s(), "subscribe": subscribe, "bind": bind} LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data) LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data) LOG.debug(_("-> bind: %(bind)s"), str_data) try: if bind: self.sock.bind(addr) else: self.sock.connect(addr) except Exception: raise RPCException(_("Could not open socket."))
def register( self, proxy, in_addr, zmq_type_in, out_addr=None, zmq_type_out=None, in_bind=True, out_bind=True, subscribe=None ): LOG.info(_("Registering reactor")) if zmq_type_in not in (zmq.PULL, zmq.SUB): raise RPCException("Bad input socktype") # Items push in. inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind, subscribe=subscribe) self.proxies[inq] = proxy self.sockets.append(inq) LOG.info(_("In reactor registered")) if not out_addr: return if zmq_type_out not in (zmq.PUSH, zmq.PUB): raise RPCException("Bad output socktype") # Items push out. outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind) self.mapping[inq] = outq self.mapping[outq] = inq self.sockets.append(outq) LOG.info(_("Out reactor registered"))
def _parse_check(rule): """ Parse a single base check rule into an appropriate Check object. """ # Handle the special checks if rule == '!': return FalseCheck() elif rule == '@': return TrueCheck() try: kind, match = rule.split(':', 1) except Exception: LOG.exception(_("Failed to understand rule %(rule)s") % locals()) # If the rule is invalid, we'll fail closed return FalseCheck() # Find what implements the check if kind in _checks: return _checks[kind](kind, match) elif None in _checks: return _checks[None](kind, match) else: LOG.error(_("No handler for matches of kind %s") % kind) return FalseCheck()
def wait(self): """Loop waiting on children to die and respawning as necessary""" LOG.debug(_('Full set of CONF:')) CONF.log_opt_values(LOG, std_logging.DEBUG) while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) eventlet.greenthread.sleep(.01) continue while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) if self.sigcaught: signame = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'}[self.sigcaught] LOG.info(_('Caught %s, stopping children'), signame) for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child()
def multicall(conf, context, topic, msg, timeout, connection_pool): """Make a call that returns multiple times.""" # TODO(pekowski): Remove all these comments in Havana. # For amqp_rpc_single_reply_queue = False, # Can't use 'with' for multicall, as it returns an iterator # that will continue to use the connection. When it's done, # connection.close() will get called which will put it back into # the pool # For amqp_rpc_single_reply_queue = True, # The 'with' statement is mandatory for closing the connection LOG.debug(_('Making synchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) _add_unique_id(msg) pack_context(msg, context) # TODO(pekowski): Remove this flag and the code under the if clause # in Havana. if not conf.amqp_rpc_single_reply_queue: conn = ConnectionContext(conf, connection_pool) wait_msg = MulticallWaiter(conf, conn, timeout) conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) else: with _reply_proxy_create_sem: if not connection_pool.reply_proxy: connection_pool.reply_proxy = ReplyProxy(conf, connection_pool) msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()}) wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool) with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout) return wait_msg
def _process_data(self, ctxt, version, method, namespace, args): """Process a message in a new thread. If the proxy object we have has a dispatch method (see rpc.dispatcher.RpcDispatcher), pass it the version, method, and args and let it dispatch as appropriate. If not, use the old behavior of magically calling the specified method on the proxy we have here. """ ctxt.update_store() try: rval = self.proxy.dispatch(ctxt, version, method, namespace, **args) # Check if the result was a generator if inspect.isgenerator(rval): for x in rval: ctxt.reply(x, None, connection_pool=self.connection_pool) else: ctxt.reply(rval, None, connection_pool=self.connection_pool) # This final None tells multicall that it is done. ctxt.reply(ending=True, connection_pool=self.connection_pool) except rpc_common.ClientException as e: LOG.debug(_('Expected exception during message handling (%s)') % e._exc_info[1]) ctxt.reply(None, e._exc_info, connection_pool=self.connection_pool, log_failure=False) except Exception: # sys.exc_info() is deleted by LOG.exception(). exc_info = sys.exc_info() LOG.error(_('Exception during message handling'), exc_info=exc_info) ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
def __call__(self, message_data): """Consumer callback to call a method on a proxy object. Parses the message for validity and fires off a thread to call the proxy object method. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} """ # It is important to clear the context here, because at this point # the previous context is stored in local.store.context if hasattr(local.store, 'context'): del local.store.context rpc_common._safe_log(LOG.debug, _('received %s'), message_data) self.msg_id_cache.check_duplicate_message(message_data) ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') args = message_data.get('args', {}) version = message_data.get('version') namespace = message_data.get('namespace') if not method: LOG.warn(_('no method for message: %s') % message_data) ctxt.reply(_('No method for message: %s') % message_data, connection_pool=self.connection_pool) return self.pool.spawn_n(self._process_data, ctxt, version, method, namespace, args)
def _multi_send(method, context, topic, msg, timeout=None, envelope=False, _msg_id=None): """ Wraps the sending of messages, dispatches to the matchmaker and sends message to all relevant hosts. """ conf = CONF LOG.debug(_("%(msg)s") % {"msg": " ".join(map(pformat, (topic, msg)))}) queues = _get_matchmaker().queues(topic) LOG.debug(_("Sending message(s) to: %s"), queues) # Don't stack if we have no matchmaker results if len(queues) == 0: LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) for queue in queues: (_topic, ip_addr) = queue _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port) if method.__name__ == "_cast": eventlet.spawn_n(method, _addr, context, _topic, msg, timeout, envelope, _msg_id) return return method(_addr, context, _topic, msg, timeout, envelope)
def _error_callback(exc): if isinstance(exc, qpid_exceptions.Empty): LOG.debug(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc))
def notify(context, publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': timeutils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = jsonutils.to_primitive(payload, convert_instances=True) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(timeutils.utcnow())) for driver in _get_drivers(): try: driver.notify(context, msg) except Exception as e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s") % dict(e=e, payload=payload))
def __init__(self, info=None, topic=None, method=None): """ :param info: Extra info to convey to the user :param topic: The topic that the rpc call was sent to :param rpc_method_name: The name of the rpc method being called """ self.info = info self.topic = topic self.method = method super(Timeout, self).__init__( None, info=info or _('<unknown>'), topic=topic or _('<unknown>'), method=method or _('<unknown>'))
def cast(conf, context, topic, msg, connection_pool): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg))
def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), unicode(failure)) # LOG.error(_("Returning exception %s to caller"), # six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs data = { 'class': str(failure.__class__.__name__), 'module': str(failure.__class__.__module__), 'message': unicode(failure), # 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data
def deprecated(self, msg, *args, **kwargs): stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) else: self.warn(stdmsg, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, connection_pool): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def run(self, key): # Assume starts with "fanout~", strip it for lookup. nkey = key.split("fanout~")[1:][0] if not self._ring_has(nkey): LOG.warn(_("No key defining hosts for topic '%s', " "see ringfile") % (nkey,)) return [] return map(lambda x: (key + "." + x, x), self.ring[nkey])
def __exit__(self, exc_type, exc_val, exc_tb): try: self.unlock() self.lockfile.close() except IOError: LOG.exception(_("Could not release the acquired lock `%s`"), self.fname)
def add_call_waiter(self, waiter, msg_id): self._num_call_waiters += 1 if self._num_call_waiters > self._num_call_waiters_wrn_threshhold: LOG.warn(_('Number of call waiters is greater than warning ' 'threshhold: %d. There could be a MulticallProxyWaiter ' 'leak.') % self._num_call_waiters_wrn_threshhold) self._num_call_waiters_wrn_threshhold *= 2 self._call_waiters[msg_id] = waiter
def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read() LOG.info(_('Parent process has died unexpectedly, exiting')) sys.exit(1)
def _process_data(self, message_data): msg_id = message_data.pop('_msg_id', None) waiter = self._call_waiters.get(msg_id) if not waiter: LOG.warn(_('no calling threads waiting for msg_id : %s' ', message : %s') % (msg_id, message_data)) else: waiter.put(message_data)
def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) try: node = xmlutils.safe_minidom_parse_string(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} except expat.ExpatError: msg = _("cannot understand XML") raise exception.MalformedRequestBody(reason=msg)