示例#1
0
    def consume(self, sock):
        # TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug("CONSUMER RECEIVED DATA: %s", data)

        proxy = self.proxies[sock]

        if data[2] == 'cast':  # Legacy protocol
            packenv = data[3]

            ctx, msg = _deserialize(packenv)
            request = rpc_common.deserialize_msg(msg)
            ctx = RpcContext.unmarshal(ctx)
        elif data[2] == 'impl_zmq_v2':
            packenv = data[4:]

            msg = unflatten_envelope(packenv)
            request = rpc_common.deserialize_msg(msg)

            # Unmarshal only after verifying the message.
            ctx = RpcContext.unmarshal(data[3])
        else:
            LOG.error(_LE("ZMQ Envelope version unsupported or unknown."))
            return

        self.pool.spawn_n(self.process, proxy, ctx, request)
示例#2
0
    def consume(self, sock):
        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
        if sock in self.mapping:
            LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
                'data': data})
            self.mapping[sock].send(data)
            return

        proxy = self.proxies[sock]

        if data[2] == 'cast':  # Legacy protocol
            packenv = data[3]

            ctx, msg = _deserialize(packenv)
            request = rpc_common.deserialize_msg(msg)
            ctx = RpcContext.unmarshal(ctx)
        elif data[2] == 'impl_zmq_v2':
            packenv = data[4:]

            msg = unflatten_envelope(packenv)
            request = rpc_common.deserialize_msg(msg)

            # Unmarshal only after verifying the message.
            ctx = RpcContext.unmarshal(data[3])
        else:
            LOG.error(_("ZMQ Envelope version unsupported or unknown."))
            return

        self.pool.spawn_n(self.process, proxy, ctx, request)
示例#3
0
    def _callback_handler(self, message, callback):
        """Call callback with deserialized message.

        Messages that are processed without exception are ack'ed.

        If the message processing generates an exception, it will be
        ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
        """

        try:
            msg = rpc_common.deserialize_msg(message.payload)
            callback(msg)
        except Exception:
            if self.ack_on_error:
                LOG.exception(
                    _("Failed to process message"
                      " ... skipping it."))
                message.ack()
            else:
                LOG.exception(
                    _("Failed to process message"
                      " ... will requeue."))
                message.requeue()
        else:
            message.ack()
示例#4
0
    def _callback_handler(self, message, callback):
        """Call callback with deserialized message.

        Messages that are processed without exception are ack'ed.

        If the message processing generates an exception, it will be
        ack'ed if ack_on_error=True. Otherwise it will be .reject()'ed.
        Rejection is better than waiting for the message to timeout.
        Rejected messages are immediately requeued.
        """

        ack_msg = False
        try:
            msg = rpc_common.deserialize_msg(message.payload)
            callback(msg)
            ack_msg = True
        except Exception:
            if self.ack_on_error:
                ack_msg = True
                LOG.exception(_("Failed to process message"
                                " ... skipping it."))
            else:
                LOG.exception(_("Failed to process message"
                                " ... will requeue."))
        finally:
            if ack_msg:
                message.ack()
            else:
                message.reject()
示例#5
0
 def _callback(raw_message):
     message = self.channel.message_to_python(raw_message)
     try:
         msg = rpc_common.deserialize_msg(message.payload)
         callback(msg)
         message.ack()
     except Exception:
         LOG.exception(_("Failed to process message... skipping it."))
示例#6
0
 def _callback(raw_message):
     message = self.channel.message_to_python(raw_message)
     try:
         msg = rpc_common.deserialize_msg(message.payload)
         callback(msg)
         message.ack()
     except Exception:
         LOG.exception(_("Failed to process message... skipping it."))
示例#7
0
    def test_serialize_msg_v2(self):
        msg = {'foo': 'bar'}
        s_msg = {'oslo.version': rpc_common._RPC_ENVELOPE_VERSION,
                 'oslo.message': jsonutils.dumps(msg)}
        serialized = rpc_common.serialize_msg(msg)

        self.assertEqual(s_msg, rpc_common.serialize_msg(msg))

        self.assertEqual(msg, rpc_common.deserialize_msg(serialized))
示例#8
0
    def test_serialize_msg_v2(self):
        msg = {'foo': 'bar'}
        s_msg = {'oslo.version': rpc_common._RPC_ENVELOPE_VERSION,
                 'oslo.message': jsonutils.dumps(msg)}
        serialized = rpc_common.serialize_msg(msg)

        self.assertEqual(s_msg, rpc_common.serialize_msg(msg))

        self.assertEqual(msg, rpc_common.deserialize_msg(serialized))
示例#9
0
 def consume(self):
     """Fetch the message and pass it to the callback object"""
     message = self.receiver.fetch()
     try:
         msg = rpc_common.deserialize_msg(message.content)
         self.callback(msg)
     except Exception:
         LOG.exception(_("Failed to process message... skipping it."))
     finally:
         self.session.acknowledge(message)
示例#10
0
 def consume(self):
     """Fetch the message and pass it to the callback object"""
     message = self.receiver.fetch()
     try:
         msg = rpc_common.deserialize_msg(message.content)
         self.callback(msg)
     except Exception:
         LOG.exception(_("Failed to process message... skipping it."))
     finally:
         self.session.acknowledge(message)
示例#11
0
 def consume(self):
     """Fetch the message and pass it to the callback object."""
     message = self.receiver.fetch()
     try:
         self._unpack_json_msg(message)
         msg = rpc_common.deserialize_msg(message.content)
         self.callback(msg)
     except Exception:
         LOG.exception(_LE("Failed to process message... skipping it."))
     finally:
         # TODO(sandy): Need support for optional ack_on_error.
         self.session.acknowledge(message)
示例#12
0
 def consume(self):
     """Fetch the message and pass it to the callback object."""
     message = self.receiver.fetch()
     try:
         self._unpack_json_msg(message)
         msg = rpc_common.deserialize_msg(message.content)
         self.callback(msg)
     except Exception:
         LOG.exception(_("Failed to process message... skipping it."))
     finally:
         # TODO(sandy): Need support for optional ack_on_error.
         self.session.acknowledge(message)
示例#13
0
    def consume(self, sock):
        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
        if sock in self.mapping:
            LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {'data': data})
            self.mapping[sock].send(data)
            return

        msg_id, topic, style, in_msg = data

        ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
        ctx = RpcContext.unmarshal(ctx)

        proxy = self.proxies[sock]

        self.pool.spawn_n(self.process, style, topic, proxy, ctx, request)
示例#14
0
    def consume(self, sock):
        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
        if sock in self.mapping:
            LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
                'data': data})
            self.mapping[sock].send(data)
            return

        msg_id, topic, style, in_msg = data

        ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
        ctx = RpcContext.unmarshal(ctx)

        proxy = self.proxies[sock]

        self.pool.spawn_n(self.process, style, topic,
                          proxy, ctx, request)
    def _callback_handler(self, message, callback):
        """Call callback with deserialized message.

        Messages that are processed without exception are ack'ed.

        If the message processing generates an exception, it will be
        ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
        """

        try:
            msg = rpc_common.deserialize_msg(message.payload)
            callback(msg)
        except Exception:
            if self.ack_on_error:
                LOG.exception(_LE("Failed to process message" " ... skipping it."))
                message.ack()
            else:
                LOG.exception(_LE("Failed to process message" " ... will requeue."))
                message.requeue()
        else:
            message.ack()
示例#16
0
def _call(addr, context, topic, msg, timeout=None,
          envelope=False):
    # timeout_response is how long we wait for a response
    timeout = timeout or CONF.rpc_response_timeout

    # The msg_id is used to track replies.
    msg_id = uuid.uuid4().hex

    # Replies always come into the reply service.
    reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host

    LOG.debug(_("Creating payload"))
    # Curry the original request into a reply method.
    mcontext = RpcContext.marshal(context)
    payload = {
        'method': '-reply',
        'args': {
            'msg_id': msg_id,
            'topic': reply_topic,
            # TODO(ewindisch): safe to remove mcontext in I.
            'msg': [mcontext, msg]
        }
    }

    LOG.debug(_("Creating queue socket for reply waiter"))

    # Messages arriving async.
    # TODO(ewindisch): have reply consumer with dynamic subscription mgmt
    with Timeout(timeout, exception=rpc_common.Timeout):
        try:
            msg_waiter = ZmqSocket(
                "ipc://%s/zmq_topic_zmq_replies.%s" %
                (CONF.rpc_zmq_ipc_dir,
                 CONF.rpc_zmq_host),
                zmq.SUB, subscribe=msg_id, bind=False
            )

            LOG.debug(_("Sending cast"))
            _cast(addr, context, topic, payload, envelope)

            LOG.debug(_("Cast sent; Waiting reply"))
            # Blocks until receives reply
            msg = msg_waiter.recv()
            LOG.debug(_("Received message: %s"), msg)
            LOG.debug(_("Unpacking response"))

            if msg[2] == 'cast':  # Legacy version
                raw_msg = _deserialize(msg[-1])[-1]
            elif msg[2] == 'impl_zmq_v2':
                rpc_envelope = unflatten_envelope(msg[4:])
                raw_msg = rpc_common.deserialize_msg(rpc_envelope)
            else:
                raise rpc_common.UnsupportedRpcEnvelopeVersion(
                    _("Unsupported or unknown ZMQ envelope returned."))

            responses = raw_msg['args']['response']
        # ZMQError trumps the Timeout error.
        except zmq.ZMQError:
            raise RPCException("ZMQ Socket Error")
        except (IndexError, KeyError):
            raise RPCException(_("RPC Message Invalid."))
        finally:
            if 'msg_waiter' in vars():
                msg_waiter.close()

    # It seems we don't need to do all of the following,
    # but perhaps it would be useful for multicall?
    # One effect of this is that we're checking all
    # responses for Exceptions.
    for resp in responses:
        if isinstance(resp, types.DictType) and 'exc' in resp:
            raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])

    return responses[-1]
示例#17
0
def _call(addr, context, topic, msg, timeout=None,
          envelope=False):
    # timeout_response is how long we wait for a response
    timeout = timeout or CONF.rpc_response_timeout

    # The msg_id is used to track replies.
    msg_id = uuid.uuid4().hex

    # Replies always come into the reply service.
    reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host

    LOG.debug("Creating payload")
    # Curry the original request into a reply method.
    mcontext = RpcContext.marshal(context)
    payload = {
        'method': '-reply',
        'args': {
            'msg_id': msg_id,
            'topic': reply_topic,
            # TODO(ewindisch): safe to remove mcontext in I.
            'msg': [mcontext, msg]
        }
    }

    LOG.debug("Creating queue socket for reply waiter")

    # Messages arriving async.
    # TODO(ewindisch): have reply consumer with dynamic subscription mgmt
    with Timeout(timeout, exception=rpc_common.Timeout):
        try:
            msg_waiter = ZmqSocket(
                "ipc://%s/zmq_topic_zmq_replies.%s" %
                (CONF.rpc_zmq_ipc_dir,
                 CONF.rpc_zmq_host),
                zmq.SUB, subscribe=msg_id, bind=False
            )

            LOG.debug("Sending cast")
            _cast(addr, context, topic, payload, envelope)

            LOG.debug("Cast sent; Waiting reply")
            # Blocks until receives reply
            msg = msg_waiter.recv()
            LOG.debug("Received message: %s", msg)
            LOG.debug("Unpacking response")

            if msg[2] == 'cast':  # Legacy version
                raw_msg = _deserialize(msg[-1])[-1]
            elif msg[2] == 'impl_zmq_v2':
                rpc_envelope = unflatten_envelope(msg[4:])
                raw_msg = rpc_common.deserialize_msg(rpc_envelope)
            else:
                raise rpc_common.UnsupportedRpcEnvelopeVersion(
                    _("Unsupported or unknown ZMQ envelope returned."))

            responses = raw_msg['args']['response']
        # ZMQError trumps the Timeout error.
        except zmq.ZMQError:
            raise RPCException("ZMQ Socket Error")
        except (IndexError, KeyError):
            raise RPCException(_("RPC Message Invalid."))
        finally:
            if 'msg_waiter' in vars():
                msg_waiter.close()

    # It seems we don't need to do all of the following,
    # but perhaps it would be useful for multicall?
    # One effect of this is that we're checking all
    # responses for Exceptions.
    for resp in responses:
        if isinstance(resp, types.DictType) and 'exc' in resp:
            raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])

    return responses[-1]
示例#18
0
 def test_deserialize_msg_no_envelope(self):
     self.assertEqual(1, rpc_common.deserialize_msg(1))
     self.assertEqual([], rpc_common.deserialize_msg([]))
     self.assertEqual({}, rpc_common.deserialize_msg({}))
     self.assertEqual('foo', rpc_common.deserialize_msg('foo'))
示例#19
0
    def consume(self, sock):
        ipc_dir = CONF.rpc_zmq_ipc_dir

        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        msg_id, topic, style, in_msg = data
        topic = topic.split('.', 1)[0]

        LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))

        # Handle zmq_replies magic
        if topic.startswith('fanout~'):
            sock_type = zmq.PUB
        elif topic.startswith('zmq_replies'):
            sock_type = zmq.PUB
            inside = rpc_common.deserialize_msg(_deserialize(in_msg))
            msg_id = inside[-1]['args']['msg_id']
            response = inside[-1]['args']['response']
            LOG.debug(_("->response->%s"), response)
            data = [str(msg_id), _serialize(response)]
        else:
            sock_type = zmq.PUSH

        if topic not in self.topic_proxy:

            def publisher(waiter):
                LOG.info(_("Creating proxy for topic: %s"), topic)

                try:
                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type,
                                         bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while (True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data)
                    LOG.debug(
                        _("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
                        {'data': data})

            wait_sock_creation = eventlet.event.Event()
            eventlet.spawn(publisher, wait_sock_creation)

            try:
                wait_sock_creation.wait()
            except RPCException:
                LOG.error(_("Topic socket file creation failed."))
                return

        try:
            self.topic_proxy[topic].put_nowait(data)
            LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") % {'data': data})
        except eventlet.queue.Full:
            LOG.error(
                _("Local per-topic backlog buffer full for topic "
                  "%(topic)s. Dropping message.") % {'topic': topic})
示例#20
0
    def consume(self, sock):
        ipc_dir = CONF.rpc_zmq_ipc_dir

        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
        data = sock.recv()
        msg_id, topic, style, in_msg = data
        topic = topic.split('.', 1)[0]

        LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))

        # Handle zmq_replies magic
        if topic.startswith('fanout~'):
            sock_type = zmq.PUB
        elif topic.startswith('zmq_replies'):
            sock_type = zmq.PUB
            inside = rpc_common.deserialize_msg(_deserialize(in_msg))
            msg_id = inside[-1]['args']['msg_id']
            response = inside[-1]['args']['response']
            LOG.debug(_("->response->%s"), response)
            data = [str(msg_id), _serialize(response)]
        else:
            sock_type = zmq.PUSH

        if topic not in self.topic_proxy:
            def publisher(waiter):
                LOG.info(_("Creating proxy for topic: %s"), topic)

                try:
                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type, bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while(True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data)
                    LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
                              {'data': data})

            wait_sock_creation = eventlet.event.Event()
            eventlet.spawn(publisher, wait_sock_creation)

            try:
                wait_sock_creation.wait()
            except RPCException:
                LOG.error(_("Topic socket file creation failed."))
                return

        try:
            self.topic_proxy[topic].put_nowait(data)
            LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
                      {'data': data})
        except eventlet.queue.Full:
            LOG.error(_("Local per-topic backlog buffer full for topic "
                        "%(topic)s. Dropping message.") % {'topic': topic})
示例#21
0
 def test_deserialize_msg_no_envelope(self):
     self.assertEqual(1, rpc_common.deserialize_msg(1))
     self.assertEqual([], rpc_common.deserialize_msg([]))
     self.assertEqual({}, rpc_common.deserialize_msg({}))
     self.assertEqual('foo', rpc_common.deserialize_msg('foo'))