def consume(self, sock): # TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % { 'data': data}) self.mapping[sock].send(data) return proxy = self.proxies[sock] if data[2] == 'cast': # Legacy protocol packenv = data[3] ctx, msg = _deserialize(packenv) request = rpc_common.deserialize_msg(msg) ctx = RpcContext.unmarshal(ctx) elif data[2] == 'impl_zmq_v2': packenv = data[4:] msg = unflatten_envelope(packenv) request = rpc_common.deserialize_msg(msg) # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: LOG.error(_("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request)
def consume(self, sock): # TODO(ewindisch): use zero-copy (i.e. references, not copying) data = sock.recv() LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data) if sock in self.mapping: LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {'data': data}) self.mapping[sock].send(data) return proxy = self.proxies[sock] if data[2] == 'cast': # Legacy protocol packenv = data[3] ctx, msg = _deserialize(packenv) request = rpc_common.deserialize_msg(msg) ctx = RpcContext.unmarshal(ctx) elif data[2] == 'impl_zmq_v2': packenv = data[4:] msg = unflatten_envelope(packenv) request = rpc_common.deserialize_msg(msg) # Unmarshal only after verifying the message. ctx = RpcContext.unmarshal(data[3]) else: LOG.error(_("ZMQ Envelope version unsupported or unknown.")) return self.pool.spawn_n(self.process, proxy, ctx, request)
def _callback(raw_message): message = self.channel.message_to_python(raw_message) try: msg = rpc_common.deserialize_msg(message.payload) callback(msg) except Exception: LOG.exception(_("Failed to process message... skipping it.")) finally: message.ack()
def consume(self): """Fetch the message and pass it to the callback object""" message = self.receiver.fetch() try: msg = rpc_common.deserialize_msg(message.content) self.callback(msg) except Exception: LOG.exception(_("Failed to process message... skipping it.")) finally: self.session.acknowledge(message)
def _make_event_from_message(message): """Turn a raw message from the wire into an event.Event object """ if 'oslo.message' in message: # Unpack the RPC call body and discard the envelope message = rpc_common.deserialize_msg(message) tenant_id = _get_tenant_id_for_message(message) crud = event.UPDATE router_id = None if message.get('method') == 'router_deleted': crud = event.DELETE router_id = message.get('args', {}).get('router_id') else: event_type = message.get('event_type', '') # Router id is not always present, but look for it as though # it is to avoid duplicating this line a few times. router_id = message.get('payload', {}).get('router', {}).get('id') if event_type.startswith('routerstatus.update'): # We generate these events ourself, so ignore them. return None if event_type == 'router.create.end': crud = event.CREATE elif event_type == 'router.delete.end': crud = event.DELETE router_id = message.get('payload', {}).get('router_id') elif event_type in _INTERFACE_NOTIFICATIONS: crud = event.UPDATE router_id = message.get( 'payload', {} ).get('router.interface', {}).get('id') elif event_type in _INTERESTING_NOTIFICATIONS: crud = event.UPDATE elif event_type.endswith('.end'): crud = event.UPDATE elif event_type.startswith('akanda.rug.command'): LOG.debug('received a command: %r', message.get('payload')) # If the message does not specify a tenant, send it to everyone pl = message.get('payload', {}) tenant_id = pl.get('tenant_id', '*') router_id = pl.get('router_id') crud = event.COMMAND if pl.get('command') == commands.POLL: return event.Event( tenant_id='*', router_id='*', crud=event.POLL, body={}, ) else: # LOG.debug('ignoring message %r', message) return None return event.Event(tenant_id, router_id, crud, message)
def _make_event_from_message(message): """Turn a raw message from the wire into an event.Event object """ if 'oslo.message' in message: # Unpack the RPC call body and discard the envelope message = rpc_common.deserialize_msg(message) tenant_id = _get_tenant_id_for_message(message) crud = event.UPDATE router_id = None if message.get('method') == 'router_deleted': crud = event.DELETE router_id = message.get('args', {}).get('router_id') else: event_type = message.get('event_type', '') # Router id is not always present, but look for it as though # it is to avoid duplicating this line a few times. router_id = message.get('payload', {}).get('router', {}).get('id') if event_type.startswith('routerstatus.update'): # We generate these events ourself, so ignore them. return None if event_type == 'router.create.end': crud = event.CREATE elif event_type == 'router.delete.end': crud = event.DELETE router_id = message.get('payload', {}).get('router_id') elif event_type in _INTERFACE_NOTIFICATIONS: crud = event.UPDATE router_id = message.get('payload', {}).get('router.interface', {}).get('id') elif event_type in _INTERESTING_NOTIFICATIONS: crud = event.UPDATE elif event_type.endswith('.end'): crud = event.UPDATE elif event_type.startswith('akanda.rug.command'): LOG.debug('received a command: %r', message.get('payload')) # If the message does not specify a tenant, send it to everyone pl = message.get('payload', {}) tenant_id = pl.get('tenant_id', '*') router_id = pl.get('router_id') crud = event.COMMAND if pl.get('command') == commands.POLL: return event.Event( tenant_id='*', router_id='*', crud=event.POLL, body={}, ) else: # LOG.debug('ignoring message %r', message) return None return event.Event(tenant_id, router_id, crud, message)
def _call(addr, context, topic, msg, timeout=None, envelope=False): # timeout_response is how long we wait for a response timeout = timeout or CONF.rpc_response_timeout # The msg_id is used to track replies. msg_id = uuid.uuid4().hex # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host LOG.debug(_("Creating payload")) # Curry the original request into a reply method. mcontext = RpcContext.marshal(context) payload = { 'method': '-reply', 'args': { 'msg_id': msg_id, 'context': mcontext, 'topic': reply_topic, 'msg': [mcontext, msg] } } LOG.debug(_("Creating queue socket for reply waiter")) # Messages arriving async. # TODO(ewindisch): have reply consumer with dynamic subscription mgmt with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket( "ipc://%s/zmq_topic_zmq_replies.%s" % (CONF.rpc_zmq_ipc_dir, CONF.rpc_zmq_host), zmq.SUB, subscribe=msg_id, bind=False ) LOG.debug(_("Sending cast")) _cast(addr, context, topic, payload, envelope) LOG.debug(_("Cast sent; Waiting reply")) # Blocks until receives reply msg = msg_waiter.recv() LOG.debug(_("Received message: %s"), msg) LOG.debug(_("Unpacking response")) if msg[2] == 'cast': # Legacy version raw_msg = _deserialize(msg[-1])[-1] elif msg[2] == 'impl_zmq_v2': rpc_envelope = unflatten_envelope(msg[4:]) raw_msg = rpc_common.deserialize_msg(rpc_envelope) else: raise rpc_common.UnsupportedRpcEnvelopeVersion( _("Unsupported or unknown ZMQ envelope returned.")) responses = raw_msg['args']['response'] # ZMQError trumps the Timeout error. except zmq.ZMQError: raise RPCException("ZMQ Socket Error") except (IndexError, KeyError): raise RPCException(_("RPC Message Invalid.")) finally: if 'msg_waiter' in vars(): msg_waiter.close() # It seems we don't need to do all of the following, # but perhaps it would be useful for multicall? # One effect of this is that we're checking all # responses for Exceptions. for resp in responses: if isinstance(resp, types.DictType) and 'exc' in resp: raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) return responses[-1]
def _call(addr, context, topic, msg, timeout=None, envelope=False): # timeout_response is how long we wait for a response timeout = timeout or CONF.rpc_response_timeout # The msg_id is used to track replies. msg_id = uuid.uuid4().hex # Replies always come into the reply service. reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host LOG.debug(_("Creating payload")) # Curry the original request into a reply method. mcontext = RpcContext.marshal(context) payload = { 'method': '-reply', 'args': { 'msg_id': msg_id, 'context': mcontext, 'topic': reply_topic, 'msg': [mcontext, msg] } } LOG.debug(_("Creating queue socket for reply waiter")) # Messages arriving async. # TODO(ewindisch): have reply consumer with dynamic subscription mgmt with Timeout(timeout, exception=rpc_common.Timeout): try: msg_waiter = ZmqSocket("ipc://%s/zmq_topic_zmq_replies.%s" % (CONF.rpc_zmq_ipc_dir, CONF.rpc_zmq_host), zmq.SUB, subscribe=msg_id, bind=False) LOG.debug(_("Sending cast")) _cast(addr, context, topic, payload, envelope) LOG.debug(_("Cast sent; Waiting reply")) # Blocks until receives reply msg = msg_waiter.recv() LOG.debug(_("Received message: %s"), msg) LOG.debug(_("Unpacking response")) if msg[2] == 'cast': # Legacy version raw_msg = _deserialize(msg[-1])[-1] elif msg[2] == 'impl_zmq_v2': rpc_envelope = unflatten_envelope(msg[4:]) raw_msg = rpc_common.deserialize_msg(rpc_envelope) else: raise rpc_common.UnsupportedRpcEnvelopeVersion( _("Unsupported or unknown ZMQ envelope returned.")) responses = raw_msg['args']['response'] # ZMQError trumps the Timeout error. except zmq.ZMQError: raise RPCException("ZMQ Socket Error") except (IndexError, KeyError): raise RPCException(_("RPC Message Invalid.")) finally: if 'msg_waiter' in vars(): msg_waiter.close() # It seems we don't need to do all of the following, # but perhaps it would be useful for multicall? # One effect of this is that we're checking all # responses for Exceptions. for resp in responses: if isinstance(resp, types.DictType) and 'exc' in resp: raise rpc_common.deserialize_remote_exception(CONF, resp['exc']) return responses[-1]