Пример #1
0
 def test_delete(self):
     b = Queue("foo", self.exchange, "foo", channel=Channel())
     b.delete()
     self.assertIn("queue_delete", b.channel)
Пример #2
0
 def test_delete(self):
     b = Queue("foo", self.exchange, "foo", channel=get_conn().channel())
     b.delete()
     self.assertIn("queue_delete", b.channel)
Пример #3
0
 def test_delete(self):
     b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
     b.delete()
     self.assertIn('queue_delete', b.channel)
Пример #4
0
 def test_delete(self):
     b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
     b.delete()
     self.assertIn('queue_delete', b.channel)
Пример #5
0
    def call(self, name, operation, timeout=10, args=None, **kwargs):
        """Send a message and wait for reply

        @param name: name of destination service queue
        @param operation: name of service operation to invoke
        @param timeout: RPC timeout to await a reply
        @param args: dictionary of keyword args to pass to operation.
                     Use this OR kwargs.
        @param kwargs: additional args to pass to operation
        """

        if args:
            if kwargs:
                raise TypeError("specify args dict or keyword arguments, not both")
        else:
            args = kwargs

        # create a direct queue for the reply. This may end up being a
        # bottleneck for performance: each rpc call gets a brand new
        # exclusive queue. However this approach is used nova.rpc and
        # seems to have carried them pretty far. If/when this
        # becomes a bottleneck we can set up a long-lived backend queue and
        # use correlation_id to deal with concurrent RPC calls. See:
        #   http://www.rabbitmq.com/tutorials/tutorial-six-python.html
        msg_id = uuid.uuid4().hex

        # expire the reply queue shortly after the timeout. it will be
        # (lazily) deleted by the broker if we don't clean it up first
        queue_arguments = {'x-expires': int((timeout + 1) * 1000)}
        queue = Queue(name=msg_id, exchange=self._exchange, routing_key=msg_id,
                      durable=False, queue_arguments=queue_arguments)

        messages = []
        event = threading.Event()

        def _callback(body, message):
            messages.append(body)
            message.ack()
            event.set()

        d = dict(op=operation, args=args)
        headers = {'reply-to': msg_id, 'sender': self.add_sysname(self.name)}
        dest = self.add_sysname(name)

        def _declare_and_send(channel):
            consumer = Consumer(channel, (queue,), callbacks=(_callback,))
            with Producer(channel) as producer:
                producer.publish(d, routing_key=dest, headers=headers,
                    exchange=self._exchange, serializer=self._serializer)
            return consumer

        log.debug("sending call to %s:%s", dest, operation)
        with connections[self._pool_conn].acquire(block=True) as conn:
            consumer, channel = self.ensure(conn, _declare_and_send)
            try:
                self._consume(conn, consumer, timeout=timeout, until_event=event)

                # try to delete queue, but don't worry if it fails (will expire)
                try:
                    queue = queue.bind(channel)
                    queue.delete(nowait=True)
                except Exception:
                    log.exception("error deleting queue")

            finally:
                conn.maybe_close_channel(channel)

        msg_body = messages[0]
        if msg_body.get('error'):
            raise_error(msg_body['error'])
        else:
            return msg_body.get('result')