Esempio n. 1
0
    def __init__(self, sysname):
        self._sysname = sysname
        self.ready = Event()

        # exchange/queues/bindings
        self._exchanges = {}  # names -> { subscriber, topictrie(queue name) }
        self._queues = {}  # names -> gevent queue
        self._bindings_by_queue = defaultdict(
            list)  # queue name -> [(ex, binding)]
        self._lock_declarables = coros.RLock(
        )  # exchanges, queues, bindings, routing method

        # consumers
        self._consumers = defaultdict(
            list)  # queue name -> [ctag, channel._on_deliver]
        self._consumers_by_ctag = {}  # ctag -> queue_name ??
        self._ctag_pool = IDPool()  # pool of consumer tags
        self._lock_consumers = coros.RLock(
        )  # lock for interacting with any consumer related attrs

        # deliveries
        self._unacked = {}  # dtag -> (ctag, msg)
        self._lock_unacked = coros.RLock(
        )  # lock for interacting with unacked field

        self._gl_msgs = None
        self._gl_pool = Pool()
        self.gl_ioloop = None

        self.errors = []
Esempio n. 2
0
    def __init__(self):
        log.debug("In NodeB.__init__")
        BaseNode.__init__(self)

        self._pool = IDPool()
        self._bidir_pool = {}   # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {}     # maps active pika channel numbers to our numbers (from self._pool)

        BaseNode.__init__(self)
Esempio n. 3
0
    def __init__(self, router=None):
        BaseNode.__init__(self)

        self._own_router = True
        if router is not None:
            self._local_router = router
            self._own_router = False
        else:
            self._local_router = LocalRouter(get_sys_name())
        self._channel_id_pool = IDPool()
Esempio n. 4
0
    def __init__(self):
        log.debug("In NodeB.__init__")
        self.ready = event.Event()
        self._lock = coros.RLock()
        self._pool = IDPool()
        self._bidir_pool = {
        }  # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {
        }  # maps active pika channel numbers to our numbers (from self._pool)

        amqp.Node.__init__(self)
Esempio n. 5
0
    def __init__(self):
        log.debug("In NodeB.__init__")
        BaseNode.__init__(self)

        self._pool = IDPool()
        self._bidir_pool = {
        }  # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {
        }  # maps active pika channel numbers to our numbers (from self._pool)
        self._dead_pool = [
        ]  # channels removed from pool for failing health test, for later forensics

        BaseNode.__init__(self)
Esempio n. 6
0
    def test_different_new_id_method(self):
        new_id = lambda x: x + 2

        self._idpool = IDPool(new_id=new_id)

        self.assertEquals(self._idpool.get_id(), 2)
        self.assertEquals(self._idpool.get_id(), 4)
        self.assertEquals(self._idpool.get_id(), 6)

        self._idpool.release_id(4)

        self.assertEquals(self._idpool.get_id(), 4)
        self.assertEquals(self._idpool._last_id, 6)
Esempio n. 7
0
    def __init__(self, sysname):
        self._sysname = sysname
        self.ready = Event()

        # exchange/queues/bindings
        self._exchanges = {}                            # names -> { subscriber, topictrie(queue name) }
        self._queues = {}                               # names -> gevent queue
        self._bindings_by_queue = defaultdict(list)     # queue name -> [(ex, binding)]
        self._lock_declarables = RLock()                # exchanges, queues, bindings, routing method

        # consumers
        self._consumers = defaultdict(list)             # queue name -> [ctag, channel._on_deliver]
        self._consumers_by_ctag = {}                    # ctag -> queue_name ??
        self._ctag_pool = IDPool()                      # pool of consumer tags
        self._lock_consumers = RLock()                  # lock for interacting with any consumer related attrs

        # deliveries
        self._unacked = {}                              # dtag -> (ctag, msg)
        self._lock_unacked = RLock()                    # lock for interacting with unacked field

        self._gl_msgs = None
        self._gl_pool = Pool()
        self.gl_ioloop = None

        self.errors = []
Esempio n. 8
0
    def __init__(self):
        log.debug("In NodeB.__init__")
        self.ready = event.Event()
        self._lock = coros.RLock()
        self._pool = IDPool()
        self._bidir_pool = {}   # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {}     # maps active pika channel numbers to our numbers (from self._pool)

        amqp.Node.__init__(self)
Esempio n. 9
0
    def __init__(self):
        log.debug("In NodeB.__init__")
        BaseNode.__init__(self)

        self._pool = IDPool()
        self._bidir_pool = {}   # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {}     # maps active pika channel numbers to our numbers (from self._pool)
        self._dead_pool = []    # channels removed from pool for failing health test, for later forensics

        BaseNode.__init__(self)
Esempio n. 10
0
    def __init__(self, router=None):
        BaseNode.__init__(self)

        self._own_router = True
        if router is not None:
            self._local_router = router
            self._own_router = False
        else:
            self._local_router = LocalRouter(get_sys_name())
        self._channel_id_pool = IDPool()
Esempio n. 11
0
class LocalNode(BaseNode):
    def __init__(self, router=None):
        BaseNode.__init__(self)

        self._own_router = True
        if router is not None:
            self._local_router = router
            self._own_router = False
        else:
            self._local_router = LocalRouter(get_sys_name())
        self._channel_id_pool = IDPool()

    def start_node(self):
        BaseNode.start_node(self)
        if self._own_router:
            self._local_router.start()

    def stop_node(self):
        if self.running:
            if self._own_router:
                self._local_router.stop()
        self.running = False

    def _new_transport(self, ch_number=None):
        trans = LocalTransport(self._local_router, ch_number)
        return trans

    def channel(self, ch_type, transport=None):
        ch = self._new_channel(ch_type,
                               ch_number=self._channel_id_pool.get_id(),
                               transport=transport)
        # @TODO keep track of all channels to close them later from the top

        ch._transport.add_on_close_callback(self._on_channel_close)
        return ch

    def _on_channel_close(self, ch, code, text):
        log.debug("LocalNode._on_channel_close (%s)", ch.channel_number)
        self._channel_id_pool.release_id(ch.channel_number)
Esempio n. 12
0
    def test_different_new_id_method(self):
        new_id = lambda x: x + 2

        self._idpool = IDPool(new_id=new_id)

        self.assertEquals(self._idpool.get_id(), 2)
        self.assertEquals(self._idpool.get_id(), 4)
        self.assertEquals(self._idpool.get_id(), 6)

        self._idpool.release_id(4)

        self.assertEquals(self._idpool.get_id(), 4)
        self.assertEquals(self._idpool._last_id, 6)
Esempio n. 13
0
class LocalNode(BaseNode):
    def __init__(self, router=None):
        BaseNode.__init__(self)

        self._own_router = True
        if router is not None:
            self._local_router = router
            self._own_router = False
        else:
            self._local_router = LocalRouter(get_sys_name())
        self._channel_id_pool = IDPool()

    def start_node(self):
        BaseNode.start_node(self)
        if self._own_router:
            self._local_router.start()

    def stop_node(self):
        if self.running:
            if self._own_router:
                self._local_router.stop()
        self.running = False

    def _new_transport(self, ch_number=None):
        trans = LocalTransport(self._local_router, ch_number)
        return trans

    def channel(self, ch_type, transport=None):
        ch = self._new_channel(ch_type, ch_number=self._channel_id_pool.get_id(), transport=transport)
        # @TODO keep track of all channels to close them later from the top

        ch._transport.add_on_close_callback(self._on_channel_close)
        return ch

    def _on_channel_close(self, ch, code, text):
        log.debug("ZeroMQNode._on_channel_close (%s)", ch.channel_number)
        self._channel_id_pool.release_id(ch.channel_number)
Esempio n. 14
0
class NodeB(amqp.Node):
    """
    Blocking interface to AMQP messaging primitives.

    Wrap around Node and create blocking interface for getting channel
    objects.
    """
    def __init__(self):
        log.debug("In NodeB.__init__")
        self.ready = event.Event()
        self._lock = coros.RLock()
        self._pool = IDPool()
        self._bidir_pool = {
        }  # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {
        }  # maps active pika channel numbers to our numbers (from self._pool)

        amqp.Node.__init__(self)

    def start_node(self):
        """
        This should only be called by on_connection_opened.
        so, maybe we don't need a start_node/stop_node interface
        """
        log.debug("In start_node")
        amqp.Node.start_node(self)
        self.running = 1
        self.ready.set()

    def _new_channel(self, ch_type, ch_number=None, **kwargs):
        """
        Creates a pyon Channel based on the passed in type, and activates it for use.
        """
        chan = ch_type(**kwargs)
        amq_chan = blocking_cb(self.client.channel,
                               'on_open_callback',
                               channel_number=ch_number)
        chan.on_channel_open(amq_chan)
        return chan

    def channel(self, ch_type, **kwargs):
        """
        Creates a Channel object with an underlying transport callback and returns it.

        @type ch_type   BaseChannel
        """
        log.debug("NodeB.channel")
        with self._lock:
            # having _queue_auto_delete on is a pre-req to being able to pool.
            if ch_type == channel.BidirClientChannel and not ch_type._queue_auto_delete:
                chid = self._pool.get_id()
                if chid in self._bidir_pool:
                    log.debug(
                        "BidirClientChannel requested, pulling from pool (%d)",
                        chid)
                    assert not chid in self._pool_map.values()
                    ch = self._bidir_pool[chid]
                    self._pool_map[ch.get_channel_id()] = chid
                else:
                    log.debug(
                        "BidirClientChannel requested, no pool items available, creating new (%d)",
                        chid)
                    ch = self._new_channel(ch_type, **kwargs)
                    ch.set_close_callback(self.on_channel_request_close)
                    self._bidir_pool[chid] = ch
                    self._pool_map[ch.get_channel_id()] = chid
            else:
                ch = self._new_channel(ch_type, **kwargs)
            assert ch

        return ch

    def on_channel_request_close(self, ch):
        """
        Close callback for pooled Channels.

        When a new, pooled Channel is created that this Node manages, it will specify this as the
        close callback in order to prevent that Channel from actually closing.
        """
        log.debug("NodeB: on_channel_request_close\n\tChType %s, Ch#: %d",
                  ch.__class__, ch.get_channel_id())

        assert ch.get_channel_id() in self._pool_map
        with self._lock:
            chid = self._pool_map.pop(ch.get_channel_id())
            log.debug("Releasing BiDir pool Pika #%d, our id #%d",
                      ch.get_channel_id(), chid)
            self._pool.release_id(chid)

            # reset channel
            ch.reset()

            # sanity check: if auto delete got turned on, we must remove this channel from the pool
            if ch._queue_auto_delete:
                log.warn(
                    "A pooled channel now has _queue_auto_delete set true, we must remove it: check what caused this as it's likely a timing error"
                )

                self._bidir_pool.pop(chid)
                self._pool._ids_free.remove(chid)
Esempio n. 15
0
class NodeB(BaseNode):
    """
    Blocking interface to AMQP messaging primitives.

    Wrap around Node and create blocking interface for getting channel
    objects.
    """

    def __init__(self):
        log.debug("In NodeB.__init__")
        BaseNode.__init__(self)

        self._pool = IDPool()
        self._bidir_pool = {}   # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {}     # maps active pika channel numbers to our numbers (from self._pool)
        self._dead_pool = []    # channels removed from pool for failing health test, for later forensics

        BaseNode.__init__(self)

    def stop_node(self):
        """
        Closes the connection to the broker, cleans up resources held by this node.
        """
        log.debug("NodeB.stop_node (running: %s)", self.running)

        if self.running:
            # clean up pooling before we shut connection
            self._destroy_pool()
            self.client.close()

        BaseNode.stop_node(self)

    def _destroy_pool(self):
        """
        Explicitly deletes pooled queues in this Node.
        """
        for chan in self._bidir_pool.itervalues():
            if chan._recv_name:
                chan._destroy_queue()

    def _new_transport(self, ch_number=None):
        """
        Creates a new AMQPTransport with an underlying Pika channel.
        """
        amq_chan = blocking_cb(self.client.channel, 'on_open_callback', channel_number=ch_number)
        if amq_chan is None:
            log.error("AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s", ch_number)
            from pyon.container.cc import Container
            if Container.instance is not None:
                Container.instance.fail_fast("AMQCHAN IS NONE, messaging has failed", True)
            raise StandardError("AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s" % ch_number)

        transport = AMQPTransport(amq_chan)

        # return the pending in collection (lets this number be assigned again later)
        self.client._pending.remove(transport.channel_number)

        # by default, everything should have a prefetch count of 1 (configurable)
        # this can be overridden by the channel get_n related methods
        transport.qos_impl(prefetch_count=CFG.get_safe('container.messaging.endpoint.prefetch_count', 1))

        return transport

    def _check_pooled_channel_health(self, ch):
        """
        Returns true if the channel has the proper callbacks in pika for delivery.

        We're seeing an issue where channels are considered open and consuming by pika, RabbitMQ,
        and our layer, but the "callbacks" mechanism in pika does not have any entries for
        delivering messages to our layer, therefore messages are being dropped. Rabbit is happily
        sending messages along, resulting in large numbers of UNACKED messages.

        If this method returns false, the channel should be discarded and a new one created.
        """
        cbs = self.client.callbacks._callbacks
        if "_on_basic_deliver" not in cbs[ch.get_channel_id()].iterkeys():
            return False

        return True

    def channel(self, ch_type, transport=None):
        """
        Creates a Channel object with an underlying transport callback and returns it.

        @type ch_type   BaseChannel
        """
        #log.debug("NodeB.channel")
        with self._lock:
            # having _queue_auto_delete on is a pre-req to being able to pool.
            if ch_type == channel.BidirClientChannel and not ch_type._queue_auto_delete:

                # only attempt this 5 times - somewhat arbitrary but we can't have an infinite loop here
                attempts = 5
                while attempts > 0:
                    attempts -= 1

                    chid = self._pool.get_id()
                    if chid in self._bidir_pool:
                        log.debug("BidirClientChannel requested, pulling from pool (%d)", chid)
                        assert not chid in self._pool_map.values()

                        # we need to check the health of this bidir channel
                        ch = self._bidir_pool[chid]
                        if not self._check_pooled_channel_health(ch):
                            log.warning("Channel (%d) failed health check, removing from pool", ch.get_channel_id())

                            # return chid to the id pool
                            self._pool.release_id(chid)

                            # remove this channel from the pool, put into dead pool
                            self._dead_pool.append(ch)
                            del self._bidir_pool[chid]

                            # now close the channel (must remove our close callback which returns it to the pool)
                            assert ch._close_callback == self.on_channel_request_close
                            ch._close_callback = None
                            ch.close()

                            # resume the loop to attempt to get one again
                            continue

                        self._pool_map[ch.get_channel_id()] = chid
                    else:
                        log.debug("BidirClientChannel requested, no pool items available, creating new (%d)", chid)
                        ch = self._new_channel(ch_type, transport=transport)
                        ch.set_close_callback(self.on_channel_request_close)
                        self._bidir_pool[chid] = ch
                        self._pool_map[ch.get_channel_id()] = chid

                    # channel here is valid, exit out of attempts loop
                    break
                else:    # while loop didn't get a valid channel in X attempts
                    raise StandardError("Could not get a valid channel")

            else:
                ch = self._new_channel(ch_type, transport=transport)
            assert ch

        return ch

    def on_channel_request_close(self, ch):
        """
        Close callback for pooled Channels.

        When a new, pooled Channel is created that this Node manages, it will specify this as the
        close callback in order to prevent that Channel from actually closing.
        """
        log.debug("NodeB: on_channel_request_close\n\tChType %s, Ch#: %d", ch.__class__, ch.get_channel_id())

        assert ch.get_channel_id() in self._pool_map
        with self._lock:
            chid = self._pool_map.pop(ch.get_channel_id())
            log.debug("Releasing BiDir pool Pika #%d, our id #%d", ch.get_channel_id(), chid)
            self._pool.release_id(chid)

            # reset channel
            ch.reset()

            # sanity check: if auto delete got turned on, we must remove this channel from the pool
            if ch._queue_auto_delete:
                log.warn("A pooled channel now has _queue_auto_delete set true, we must remove it: check what caused this as it's likely a timing error")

                self._bidir_pool.pop(chid)
                self._pool._ids_free.remove(chid)
Esempio n. 16
0
class PoolTest(PyonTestCase):
    def setUp(self):
        self._idpool = IDPool()

    def test_get_id(self):
        self.assertEquals(self._idpool.get_id(), 1)
        self.assertEquals(self._idpool.get_id(), 2)
        self.assertEquals(self._idpool.get_id(), 3)
        self.assertEquals(self._idpool.get_id(), 4)

        self.assertEquals(self._idpool._ids_in_use, {1, 2, 3, 4})

    def test_release_id(self):
        self._idpool.get_id()
        self._idpool.release_id(1)

        self.assertEquals(self._idpool._ids_in_use, set())
        self.assertEquals(self._idpool._ids_free, {1})

    def test_get_and_release_id(self):
        self._idpool.get_id()
        self._idpool.get_id()
        self._idpool.get_id()
        self._idpool.get_id()

        self._idpool.release_id(3)
        self.assertEquals(self._idpool._ids_in_use, {1, 2, 4})
        self.assertEquals(self._idpool._ids_free, {3})
        self.assertEquals(self._idpool.get_id(), 3)

        self._idpool.release_id(2)
        self._idpool.release_id(1)

        self.assertIn(self._idpool.get_id(), {1, 2})
        self.assertIn(self._idpool.get_id(), {1, 2})
        self.assertNotIn(self._idpool.get_id(), {1, 2})  # is 5 now

    def test_release_unknown_id(self):
        self.assertEquals(self._idpool._ids_free, set())
        self.assertEquals(self._idpool._ids_in_use, set())

        self._idpool.release_id(1)

        self.assertEquals(self._idpool._ids_free, set())
        self.assertEquals(self._idpool._ids_in_use, set())

        self._idpool.get_id()
        self._idpool.get_id()

        self.assertEquals(self._idpool._ids_free, set())
        self.assertEquals(self._idpool._ids_in_use, {1, 2})

        self._idpool.release_id(3)  # still doesn't exist

        self.assertEquals(self._idpool._ids_free, set())
        self.assertEquals(self._idpool._ids_in_use, {1, 2})

    def test_different_new_id_method(self):
        new_id = lambda x: x + 2

        self._idpool = IDPool(new_id=new_id)

        self.assertEquals(self._idpool.get_id(), 2)
        self.assertEquals(self._idpool.get_id(), 4)
        self.assertEquals(self._idpool.get_id(), 6)

        self._idpool.release_id(4)

        self.assertEquals(self._idpool.get_id(), 4)
        self.assertEquals(self._idpool._last_id, 6)
Esempio n. 17
0
class NodeB(amqp.Node):
    """
    Blocking interface to AMQP messaging primitives.

    Wrap around Node and create blocking interface for getting channel
    objects.
    """

    def __init__(self):
        log.debug("In NodeB.__init__")
        self.ready = event.Event()
        self._lock = coros.RLock()
        self._pool = IDPool()
        self._bidir_pool = {}   # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {}     # maps active pika channel numbers to our numbers (from self._pool)

        amqp.Node.__init__(self)

    def start_node(self):
        """
        This should only be called by on_connection_opened.
        so, maybe we don't need a start_node/stop_node interface
        """
        log.debug("In start_node")
        amqp.Node.start_node(self)
        self.running = 1
        self.ready.set()

    def _new_channel(self, ch_type, ch_number=None, **kwargs):
        """
        Creates a pyon Channel based on the passed in type, and activates it for use.
        """
        chan = ch_type(**kwargs)
        amq_chan = blocking_cb(self.client.channel, 'on_open_callback', channel_number=ch_number)
        chan.on_channel_open(amq_chan)
        return chan

    def channel(self, ch_type, **kwargs):
        """
        Creates a Channel object with an underlying transport callback and returns it.

        @type ch_type   BaseChannel
        """
        log.debug("NodeB.channel")
        with self._lock:
            # having _queue_auto_delete on is a pre-req to being able to pool.
            if ch_type == channel.BidirClientChannel and not ch_type._queue_auto_delete:
                chid = self._pool.get_id()
                if chid in self._bidir_pool:
                    log.debug("BidirClientChannel requested, pulling from pool (%d)", chid)
                    assert not chid in self._pool_map.values()
                    ch = self._bidir_pool[chid]
                    self._pool_map[ch.get_channel_id()] = chid
                else:
                    log.debug("BidirClientChannel requested, no pool items available, creating new (%d)", chid)
                    ch = self._new_channel(ch_type, **kwargs)
                    ch.set_close_callback(self.on_channel_request_close)
                    self._bidir_pool[chid] = ch
                    self._pool_map[ch.get_channel_id()] = chid
            else:
                ch = self._new_channel(ch_type, **kwargs)
            assert ch

        return ch

    def on_channel_request_close(self, ch):
        """
        Close callback for pooled Channels.

        When a new, pooled Channel is created that this Node manages, it will specify this as the
        close callback in order to prevent that Channel from actually closing.
        """
        log.debug("NodeB: on_channel_request_close\n\tChType %s, Ch#: %d", ch.__class__, ch.get_channel_id())

        assert ch.get_channel_id() in self._pool_map
        with self._lock:
            ch.stop_consume()
            chid = self._pool_map.pop(ch.get_channel_id())
            log.debug("Releasing BiDir pool Pika #%d, our id #%d", ch.get_channel_id(), chid)
            self._pool.release_id(chid)

            # sanity check: if auto delete got turned on, we must remove this channel from the pool
            if ch._queue_auto_delete:
                log.warn("A pooled channel now has _queue_auto_delete set true, we must remove it: check what caused this as it's likely a timing error")

                self._bidir_pool.pop(chid)
                self._pool._ids_free.remove(chid)
Esempio n. 18
0
class NodeB(BaseNode):
    """
    Blocking interface to AMQP messaging primitives.

    Wrap around Node and create blocking interface for getting channel
    objects.
    """

    def __init__(self):
        log.debug("In NodeB.__init__")
        BaseNode.__init__(self)

        self._pool = IDPool()
        self._bidir_pool = {}   # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {}     # maps active pika channel numbers to our numbers (from self._pool)

        BaseNode.__init__(self)

    def stop_node(self):
        """
        Closes the connection to the broker, cleans up resources held by this node.
        """
        log.debug("NodeB.stop_node (running: %s)", self.running)

        if self.running:
            # clean up pooling before we shut connection
            self._destroy_pool()
            self.client.close()

        BaseNode.stop_node(self)

    def _destroy_pool(self):
        """
        Explicitly deletes pooled queues in this Node.
        """
        for chan in self._bidir_pool.itervalues():
            if chan._recv_name:
                chan._destroy_queue()

    def _new_transport(self, ch_number=None):
        """
        Creates a new AMQPTransport with an underlying Pika channel.
        """
        amq_chan = blocking_cb(self.client.channel, 'on_open_callback', channel_number=ch_number)
        if amq_chan is None:
            log.error("AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s", ch_number)
            traceback.print_stack()
            from pyon.container.cc import Container
            if Container.instance is not None:
                Container.instance.fail_fast("AMQCHAN IS NONE, messaging has failed", True)
            raise StandardError("AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s" % ch_number)

        transport = AMQPTransport(amq_chan)
        return transport

    def channel(self, ch_type, transport=None):
        """
        Creates a Channel object with an underlying transport callback and returns it.

        @type ch_type   BaseChannel
        """
        log.debug("NodeB.channel")
        with self._lock:
            # having _queue_auto_delete on is a pre-req to being able to pool.
            if ch_type == channel.BidirClientChannel and not ch_type._queue_auto_delete:
                chid = self._pool.get_id()
                if chid in self._bidir_pool:
                    log.debug("BidirClientChannel requested, pulling from pool (%d)", chid)
                    assert not chid in self._pool_map.values()
                    ch = self._bidir_pool[chid]
                    self._pool_map[ch.get_channel_id()] = chid
                else:
                    log.debug("BidirClientChannel requested, no pool items available, creating new (%d)", chid)
                    ch = self._new_channel(ch_type, transport=transport)
                    ch.set_close_callback(self.on_channel_request_close)
                    self._bidir_pool[chid] = ch
                    self._pool_map[ch.get_channel_id()] = chid
            else:
                ch = self._new_channel(ch_type, transport=transport)
            assert ch

        return ch

    def on_channel_request_close(self, ch):
        """
        Close callback for pooled Channels.

        When a new, pooled Channel is created that this Node manages, it will specify this as the
        close callback in order to prevent that Channel from actually closing.
        """
        log.debug("NodeB: on_channel_request_close\n\tChType %s, Ch#: %d", ch.__class__, ch.get_channel_id())

        assert ch.get_channel_id() in self._pool_map
        with self._lock:
            chid = self._pool_map.pop(ch.get_channel_id())
            log.debug("Releasing BiDir pool Pika #%d, our id #%d", ch.get_channel_id(), chid)
            self._pool.release_id(chid)

            # reset channel
            ch.reset()

            # sanity check: if auto delete got turned on, we must remove this channel from the pool
            if ch._queue_auto_delete:
                log.warn("A pooled channel now has _queue_auto_delete set true, we must remove it: check what caused this as it's likely a timing error")

                self._bidir_pool.pop(chid)
                self._pool._ids_free.remove(chid)
Esempio n. 19
0
 def setUp(self):
     self._idpool = IDPool()
Esempio n. 20
0
class NodeB(BaseNode):
    """
    Blocking interface to AMQP messaging primitives.

    Wrap around Node and create blocking interface for getting channel
    objects.
    """
    def __init__(self):
        log.debug("In NodeB.__init__")
        BaseNode.__init__(self)

        self._pool = IDPool()
        self._bidir_pool = {
        }  # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {
        }  # maps active pika channel numbers to our numbers (from self._pool)
        self._dead_pool = [
        ]  # channels removed from pool for failing health test, for later forensics

        BaseNode.__init__(self)

    def stop_node(self):
        """
        Closes the connection to the broker, cleans up resources held by this node.
        """
        log.info("Closing broker connection with %s pooled channels",
                 len(self._bidir_pool))

        if self.running:
            # clean up pooling before we shut connection
            self._destroy_pool()
            self.client.close()

        BaseNode.stop_node(self)

    def _destroy_pool(self):
        """
        Explicitly deletes pooled queues in this Node.
        """
        for chan in self._bidir_pool.itervalues():
            if chan._recv_name:
                chan._destroy_queue()

    def _new_transport(self, ch_number=None):
        """
        Creates a new AMQPTransport with an underlying Pika channel.
        """
        amq_chan = blocking_cb(self.client.channel,
                               'on_open_callback',
                               channel_number=ch_number)
        if amq_chan is None:
            log.error(
                "AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s",
                ch_number)
            from pyon.container.cc import Container
            if Container.instance is not None:
                Container.instance.fail_fast(
                    "AMQCHAN IS NONE, messaging has failed", True)
            raise StandardError(
                "AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s"
                % ch_number)

        transport = AMQPTransport(amq_chan)

        # return the pending in collection (lets this number be assigned again later)
        self.client._pending.remove(transport.channel_number)

        # by default, everything should have a prefetch count of 1 (configurable)
        # this can be overridden by the channel get_n related methods
        transport.qos_impl(prefetch_count=CFG.get_safe(
            'container.messaging.endpoint.prefetch_count', 1))

        return transport

    def _check_pooled_channel_health(self, ch):
        """
        Returns true if the channel has the proper callbacks in pika for delivery.

        We're seeing an issue where channels are considered open and consuming by pika, RabbitMQ,
        and our layer, but the "callbacks" mechanism in pika does not have any entries for
        delivering messages to our layer, therefore messages are being dropped. Rabbit is happily
        sending messages along, resulting in large numbers of UNACKED messages.

        If this method returns false, the channel should be discarded and a new one created.
        """
        cbs = self.client.callbacks._callbacks
        if "_on_basic_deliver" not in cbs[ch.get_channel_id()].iterkeys():
            return False

        return True

    def channel(self, ch_type, transport=None):
        """
        Creates a Channel object with an underlying transport callback and returns it.
        This function uses a channel pool, such that in certain conditions, channels can be
        reused. Currently BidirClientChannel that is not auto-delete.

        @type ch_type   BaseChannel
        """
        #log.debug("NodeB.channel")
        with self._lock:
            # having _queue_auto_delete on is a pre-req to being able to pool.
            if ch_type == channel.BidirClientChannel and not ch_type._queue_auto_delete:

                # only attempt this 5 times - somewhat arbitrary but we can't have an infinite loop here
                attempts = 5
                while attempts > 0:
                    attempts -= 1

                    chid = self._pool.get_id()
                    if chid in self._bidir_pool:
                        log.debug(
                            "BidirClientChannel requested, pulling from pool (%d)",
                            chid)
                        assert not chid in self._pool_map.values()

                        # we need to check the health of this bidir channel
                        ch = self._bidir_pool[chid]
                        if not self._check_pooled_channel_health(ch):
                            log.warning(
                                "Channel (%d) failed health check, removing from pool",
                                ch.get_channel_id())

                            # return chid to the id pool
                            self._pool.release_id(chid)

                            # remove this channel from the pool, put into dead pool
                            self._dead_pool.append(ch)
                            del self._bidir_pool[chid]

                            # now close the channel (must remove our close callback which returns it to the pool)
                            assert ch._close_callback == self.on_channel_request_close
                            ch._close_callback = None
                            ch.close()

                            # resume the loop to attempt to get one again
                            continue

                        self._pool_map[ch.get_channel_id()] = chid
                    else:
                        log.debug(
                            "BidirClientChannel requested, no pool items available, creating new (%d)",
                            chid)
                        ch = self._new_channel(ch_type, transport=transport)
                        ch.set_close_callback(self.on_channel_request_close)
                        self._bidir_pool[chid] = ch
                        self._pool_map[ch.get_channel_id()] = chid

                    # channel here is valid, exit out of attempts loop
                    break
                else:  # while loop didn't get a valid channel in X attempts
                    raise StandardError("Could not get a valid channel")

            else:
                # Create a different type channel
                ch = self._new_channel(ch_type, transport=transport)
            assert ch

        return ch

    def on_channel_request_close(self, ch):
        """
        Close callback for pooled Channels.

        When a new, pooled Channel is created that this Node manages, it will specify this as the
        close callback in order to prevent that Channel from actually closing.
        """
        log.debug("NodeB: on_channel_request_close\n\tChType %s, Ch#: %d",
                  ch.__class__, ch.get_channel_id())

        assert ch.get_channel_id() in self._pool_map
        with self._lock:
            chid = self._pool_map.pop(ch.get_channel_id())
            log.debug("Releasing BiDir pool Pika #%d, our id #%d",
                      ch.get_channel_id(), chid)
            self._pool.release_id(chid)

            # reset channel
            ch.reset()

            # sanity check: if auto delete got turned on, we must remove this channel from the pool
            if ch._queue_auto_delete:
                log.warn(
                    "A pooled channel now has _queue_auto_delete set true, we must remove it: check what caused this as it's likely a timing error"
                )

                self._bidir_pool.pop(chid)
                self._pool._ids_free.remove(chid)
Esempio n. 21
0
class LocalRouter(object):
    """
    A RabbitMQ-like routing device implemented with gevent mechanisms for an in-memory broker.
    Using LocalTransport, can handle topic-exchange-like communication in ION within the context
    of a single container.
    """

    class ConsumerClosedMessage(object):
        """
        Dummy object used to exit queue get looping greenlets.
        """
        pass

    def __init__(self, sysname):
        self._sysname = sysname
        self.ready = Event()

        # exchange/queues/bindings
        self._exchanges = {}                            # names -> { subscriber, topictrie(queue name) }
        self._queues = {}                               # names -> gevent queue
        self._bindings_by_queue = defaultdict(list)     # queue name -> [(ex, binding)]
        self._lock_declarables = RLock()                # exchanges, queues, bindings, routing method

        # consumers
        self._consumers = defaultdict(list)             # queue name -> [ctag, channel._on_deliver]
        self._consumers_by_ctag = {}                    # ctag -> queue_name ??
        self._ctag_pool = IDPool()                      # pool of consumer tags
        self._lock_consumers = RLock()                  # lock for interacting with any consumer related attrs

        # deliveries
        self._unacked = {}                              # dtag -> (ctag, msg)
        self._lock_unacked = RLock()                    # lock for interacting with unacked field

        self._gl_msgs = None
        self._gl_pool = Pool()
        self.gl_ioloop = None

        self.errors = []

    @property
    def _connect_addr(self):
        return "inproc://%s" % self._sysname

    def start(self):
        """
        Starts all internal greenlets of this router device.
        """
        self._queue_incoming = Queue()
        self._gl_msgs = self._gl_pool.spawn(self._run_gl_msgs)
        self._gl_msgs._glname = "pyon.net AMQP msgs"
        self._gl_msgs.link_exception(self._child_failed)

        self.gl_ioloop = spawn(self._run_ioloop)
        self.gl_ioloop._glname = "pyon.net AMQP ioloop"

    def stop(self):
        self._gl_msgs.kill()    # @TODO: better
        self._gl_pool.join(timeout=5, raise_error=True)

    def _run_gl_msgs(self):
        self.ready.set()
        while True:
            ex, rkey, body, props = self._queue_incoming.get()
            try:
                with self._lock_declarables:
                    self._route(ex, rkey, body, props)
            except Exception as e:
                self.errors.append(e)
                log.exception("Routing message")

    def _route(self, exchange, routing_key, body, props):
        """
        Delivers incoming messages into queues based on known routes.
        This entire method runs in a lock (likely pretty slow).
        """
        assert exchange in self._exchanges, "Unknown exchange %s" % exchange

        queues = self._exchanges[exchange].get_all_matches(routing_key)
        log.debug("route: ex %s, rkey %s,  matched %s routes", exchange, routing_key, len(queues))

        # deliver to each queue
        for q in queues:
            assert q in self._queues
            log.debug("deliver -> %s", q)
            self._queues[q].put((exchange, routing_key, body, props))

    def _child_failed(self, gproc):
        """
        Handler method for when any child worker thread dies with error.
        Aborts the "ioloop" greenlet.
        """
        log.error("Child (%s) failed with an exception: %s", gproc, gproc.exception)

        if self.gl_ioloop:
            self.gl_ioloop.kill(exception=gproc.exception, block=False)

    def _run_ioloop(self):
        """
        An "IOLoop"-like greenlet - sits and waits until the pool is finished.
        Fits with the AMQP node.
        """
        self._gl_pool.join()

    def publish(self, exchange, routing_key, body, properties, immediate=False, mandatory=False):
        self._queue_incoming.put((exchange, routing_key, body, properties))
        sleep(0.0001)      # really wish switch would work instead of a sleep, seems wrong

    def declare_exchange(self, exchange, **kwargs):
        with self._lock_declarables:
            if not exchange in self._exchanges:
                self._exchanges[exchange] = TopicTrie()

    def delete_exchange(self, exchange, **kwargs):
        with self._lock_declarables:
            if exchange in self._exchanges:
                del self._exchanges[exchange]

    def declare_queue(self, queue, **kwargs):
        with self._lock_declarables:
            # come up with new queue name if none specified
            if queue is None or queue == '':
                while True:
                    proposed = "q-%s" % str(uuid4())[0:10]
                    if proposed not in self._queues:
                        queue = proposed
                        break

            if not queue in self._queues:
                self._queues[queue] = Queue()

            return queue

    def delete_queue(self, queue, **kwargs):
        with self._lock_declarables:
            if queue in self._queues:
                del self._queues[queue]

                # kill bindings
                for ex, binding in self._bindings_by_queue[queue]:
                    if ex in self._exchanges:
                        self._exchanges[ex].remove_topic_tree(binding, queue)

                self._bindings_by_queue.pop(queue)

    def bind(self, exchange, queue, binding):
        log.info("Bind: ex %s, q %s, b %s", exchange, queue, binding)
        with self._lock_declarables:
            assert exchange in self._exchanges, "Missing exchange %s in list of exchanges" % str(exchange)
            assert queue in self._queues

            tt = self._exchanges[exchange]

            tt.add_topic_tree(binding, queue)
            self._bindings_by_queue[queue].append((exchange, binding))

    def unbind(self, exchange, queue, binding):
        with self._lock_declarables:
            assert exchange in self._exchanges
            assert queue in self._queues

            self._exchanges[exchange].remove_topic_tree(binding, queue)
            for i, val in enumerate(self._bindings_by_queue[queue]):
                ex, b = val
                if ex == exchange and b == binding:
                    self._bindings_by_queue[queue].pop(i)
                    break

    def start_consume(self, callback, queue, no_ack=False, exclusive=False):
        assert queue in self._queues

        with self._lock_consumers:
            new_ctag = self._generate_ctag()
            assert new_ctag not in self._consumers_by_ctag

            with self._lock_declarables:
                gl = self._gl_pool.spawn(self._run_consumer, new_ctag, queue, self._queues[queue], callback)
                gl.link_exception(self._child_failed)
            self._consumers[queue].append((new_ctag, callback, no_ack, exclusive, gl))
            self._consumers_by_ctag[new_ctag] = queue

            return new_ctag

    def stop_consume(self, consumer_tag):
        assert consumer_tag in self._consumers_by_ctag

        with self._lock_consumers:
            queue = self._consumers_by_ctag[consumer_tag]
            self._consumers_by_ctag.pop(consumer_tag)

            for i, consumer in enumerate(self._consumers[queue]):
                if consumer[0] == consumer_tag:

                    # notify consumer greenlet that we want to stop
                    if queue in self._queues:
                        self._queues[queue].put(self.ConsumerClosedMessage())
                    consumer[4].join(timeout=5)
                    consumer[4].kill()

                    # @TODO reject any unacked messages

                    self._consumers[queue].pop(i)
                    break

            self._return_ctag(consumer_tag)

    def _run_consumer(self, ctag, queue_name, gqueue, callback):
        cnt = 0
        while True:
            m = gqueue.get()
            if isinstance(m, self.ConsumerClosedMessage):
                break
            exchange, routing_key, body, props = m

            # create method frame
            method_frame = DotDict()
            method_frame['consumer_tag']    = ctag
            method_frame['redelivered']     = False     # @TODO
            method_frame['exchange']        = exchange
            method_frame['routing_key']     = routing_key

            # create header frame
            header_frame = DotDict()
            header_frame['headers'] = props.copy()

            # make delivery tag for ack/reject later
            dtag = self._generate_dtag(ctag, cnt)
            cnt += 1

            with self._lock_unacked:
                self._unacked[dtag] = (ctag, queue_name, m)

            method_frame['delivery_tag'] = dtag

            # deliver to callback
            try:
                callback(self, method_frame, header_frame, body)
            except Exception:
                log.exception("delivering to consumer, ignore!")

    def _generate_ctag(self):
        return "zctag-%s" % self._ctag_pool.get_id()

    def _return_ctag(self, ctag):
        self._ctag_pool.release_id(int(ctag.split("-")[-1]))

    def _generate_dtag(self, ctag, cnt):
        """
        Generates a unique delivery tag for each consumer.

        Greenlet-safe, no need to lock.
        """
        return "%s-%s" % (ctag, cnt)

    def ack(self, delivery_tag):
        assert delivery_tag in self._unacked

        with self._lock_unacked:
            del self._unacked[delivery_tag]

    def reject(self, delivery_tag, requeue=False):
        assert delivery_tag in self._unacked

        with self._lock_unacked:
            _, queue, m = self._unacked.pop(delivery_tag)
            if requeue:
                log.warn("REQUEUE: EXPERIMENTAL %s", delivery_tag)
                self._queues[queue].put(m)

    def transport_close(self, transport):
        log.warn("LocalRouter.transport_close: %s TODO", transport)
        # @TODO reject all messages in unacked spot

        # turn off any consumers from this transport

    def get_stats(self, queue):
        """
        Returns a 2-tuple of (# msgs, # consumers) on a given queue.
        """
        assert queue in self._queues

        consumers = 0
        if queue in self._consumers:
            consumers = len(self._consumers[queue])

        # the queue qsize gives you number of undelivered messages, which i think is what AMQP does too
        return (self._queues[queue].qsize(), consumers)

    def purge(self, queue):
        """
        Deletes all contents of a queue.

        @TODO could end up in a race with an infinite producer
        """
        assert queue in self._queues

        with Timeout(5):
            while not self._queues[queue].empty():
                self._queues[queue].get_nowait()
Esempio n. 22
0
class PoolTest(PyonTestCase):

    def setUp(self):
        self._idpool = IDPool()

    def test_get_id(self):
        self.assertEquals(self._idpool.get_id(), 1)
        self.assertEquals(self._idpool.get_id(), 2)
        self.assertEquals(self._idpool.get_id(), 3)
        self.assertEquals(self._idpool.get_id(), 4)

        self.assertEquals(self._idpool._ids_in_use, { 1, 2, 3, 4 } )

    def test_release_id(self):
        self._idpool.get_id()
        self._idpool.release_id(1)

        self.assertEquals(self._idpool._ids_in_use, set())
        self.assertEquals(self._idpool._ids_free, { 1 })

    def test_get_and_release_id(self):
        self._idpool.get_id()
        self._idpool.get_id()
        self._idpool.get_id()
        self._idpool.get_id()

        self._idpool.release_id(3)
        self.assertEquals(self._idpool._ids_in_use, { 1, 2, 4 })
        self.assertEquals(self._idpool._ids_free, { 3 } )
        self.assertEquals(self._idpool.get_id(), 3)

        self._idpool.release_id(2)
        self._idpool.release_id(1)

        self.assertIn(self._idpool.get_id(), { 1, 2 })
        self.assertIn(self._idpool.get_id(), { 1, 2 })
        self.assertNotIn(self._idpool.get_id(), { 1, 2 })       # is 5 now

    def test_release_unknown_id(self):
        self.assertEquals(self._idpool._ids_free, set())
        self.assertEquals(self._idpool._ids_in_use, set())

        self._idpool.release_id(1)

        self.assertEquals(self._idpool._ids_free, set())
        self.assertEquals(self._idpool._ids_in_use, set())

        self._idpool.get_id()
        self._idpool.get_id()

        self.assertEquals(self._idpool._ids_free, set())
        self.assertEquals(self._idpool._ids_in_use, { 1, 2 })

        self._idpool.release_id(3)      # still doesn't exist

        self.assertEquals(self._idpool._ids_free, set())
        self.assertEquals(self._idpool._ids_in_use, {1, 2} )

    def test_different_new_id_method(self):
        new_id = lambda x: x + 2

        self._idpool = IDPool(new_id=new_id)

        self.assertEquals(self._idpool.get_id(), 2)
        self.assertEquals(self._idpool.get_id(), 4)
        self.assertEquals(self._idpool.get_id(), 6)

        self._idpool.release_id(4)

        self.assertEquals(self._idpool.get_id(), 4)
        self.assertEquals(self._idpool._last_id, 6)
Esempio n. 23
0
class LocalRouter(object):
    """
    A RabbitMQ-like routing device implemented with gevent mechanisms for an in-memory broker.

    Using LocalTransport, can handle topic-exchange-like communication in ION within the context
    of a single container.
    """
    class ConsumerClosedMessage(object):
        """
        Dummy object used to exit queue get looping greenlets.
        """
        pass

    def __init__(self, sysname):
        self._sysname = sysname
        self.ready = Event()

        # exchange/queues/bindings
        self._exchanges = {}  # names -> { subscriber, topictrie(queue name) }
        self._queues = {}  # names -> gevent queue
        self._bindings_by_queue = defaultdict(
            list)  # queue name -> [(ex, binding)]
        self._lock_declarables = coros.RLock(
        )  # exchanges, queues, bindings, routing method

        # consumers
        self._consumers = defaultdict(
            list)  # queue name -> [ctag, channel._on_deliver]
        self._consumers_by_ctag = {}  # ctag -> queue_name ??
        self._ctag_pool = IDPool()  # pool of consumer tags
        self._lock_consumers = coros.RLock(
        )  # lock for interacting with any consumer related attrs

        # deliveries
        self._unacked = {}  # dtag -> (ctag, msg)
        self._lock_unacked = coros.RLock(
        )  # lock for interacting with unacked field

        self._gl_msgs = None
        self._gl_pool = Pool()
        self.gl_ioloop = None

        self.errors = []

    @property
    def _connect_addr(self):
        return "inproc://%s" % self._sysname

    def start(self):
        """
        Starts all internal greenlets of this router device.
        """
        self._queue_incoming = Queue()
        self._gl_msgs = self._gl_pool.spawn(self._run_gl_msgs)
        self._gl_msgs.link_exception(self._child_failed)

        self.gl_ioloop = spawn(self._run_ioloop)

    def stop(self):
        self._gl_msgs.kill()  # @TODO: better
        self._gl_pool.join(timeout=5, raise_error=True)

    def _run_gl_msgs(self):
        self.ready.set()
        while True:
            ex, rkey, body, props = self._queue_incoming.get()
            try:
                with self._lock_declarables:
                    self._route(ex, rkey, body, props)
            except Exception as e:
                self.errors.append(e)
                log.exception("Routing message")

    def _route(self, exchange, routing_key, body, props):
        """
        Delivers incoming messages into queues based on known routes.

        This entire method runs in a lock (likely pretty slow).
        """
        assert exchange in self._exchanges, "Unknown exchange %s" % exchange

        queues = self._exchanges[exchange].get_all_matches(routing_key)
        log.debug("route: ex %s, rkey %s,  matched %s routes", exchange,
                  routing_key, len(queues))

        # deliver to each queue
        for q in queues:
            assert q in self._queues
            log.debug("deliver -> %s", q)
            self._queues[q].put((exchange, routing_key, body, props))

    def _child_failed(self, gproc):
        """
        Handler method for when any child worker thread dies with error.

        Aborts the "ioloop" greenlet.
        """
        log.error("Child (%s) failed with an exception: %s", gproc,
                  gproc.exception)

        if self.gl_ioloop:
            self.gl_ioloop.kill(exception=gproc.exception, block=False)

    def _run_ioloop(self):
        """
        An "IOLoop"-like greenlet - sits and waits until the pool is finished.

        Fits with the AMQP node.
        """
        self._gl_pool.join()

    def publish(self,
                exchange,
                routing_key,
                body,
                properties,
                immediate=False,
                mandatory=False):
        self._queue_incoming.put((exchange, routing_key, body, properties))
        sleep(
            0.0001
        )  # really wish switch would work instead of a sleep, seems wrong

    def declare_exchange(self, exchange, **kwargs):
        with self._lock_declarables:
            if not exchange in self._exchanges:
                self._exchanges[exchange] = TopicTrie()

    def delete_exchange(self, exchange, **kwargs):
        with self._lock_declarables:
            if exchange in self._exchanges:
                del self._exchanges[exchange]

    def declare_queue(self, queue, **kwargs):

        with self._lock_declarables:
            # come up with new queue name if none specified
            if queue is None or queue == '':
                while True:
                    proposed = "q-%s" % str(uuid4())[0:10]
                    if proposed not in self._queues:
                        queue = proposed
                        break

            if not queue in self._queues:
                self._queues[queue] = Queue()

            return queue

    def delete_queue(self, queue, **kwargs):
        with self._lock_declarables:
            if queue in self._queues:
                del self._queues[queue]

                # kill bindings
                for ex, binding in self._bindings_by_queue[queue]:
                    if ex in self._exchanges:
                        self._exchanges[ex].remove_topic_tree(binding, queue)

                self._bindings_by_queue.pop(queue)

    def bind(self, exchange, queue, binding):
        log.info("Bind: ex %s, q %s, b %s", exchange, queue, binding)
        with self._lock_declarables:
            assert exchange in self._exchanges, "Missing exchange %s in list of exchanges" % str(
                exchange)
            assert queue in self._queues

            tt = self._exchanges[exchange]

            tt.add_topic_tree(binding, queue)
            self._bindings_by_queue[queue].append((exchange, binding))

    def unbind(self, exchange, queue, binding):
        with self._lock_declarables:
            assert exchange in self._exchanges
            assert queue in self._queues

            self._exchanges[exchange].remove_topic_tree(binding, queue)
            for i, val in enumerate(self._bindings_by_queue[queue]):
                ex, b = val
                if ex == exchange and b == binding:
                    self._bindings_by_queue[queue].pop(i)
                    break

    def start_consume(self, callback, queue, no_ack=False, exclusive=False):
        assert queue in self._queues

        with self._lock_consumers:
            new_ctag = self._generate_ctag()
            assert new_ctag not in self._consumers_by_ctag

            with self._lock_declarables:
                gl = self._gl_pool.spawn(self._run_consumer, new_ctag, queue,
                                         self._queues[queue], callback)
                gl.link_exception(self._child_failed)
            self._consumers[queue].append(
                (new_ctag, callback, no_ack, exclusive, gl))
            self._consumers_by_ctag[new_ctag] = queue

            return new_ctag

    def stop_consume(self, consumer_tag):
        assert consumer_tag in self._consumers_by_ctag

        with self._lock_consumers:
            queue = self._consumers_by_ctag[consumer_tag]
            self._consumers_by_ctag.pop(consumer_tag)

            for i, consumer in enumerate(self._consumers[queue]):
                if consumer[0] == consumer_tag:

                    # notify consumer greenlet that we want to stop
                    if queue in self._queues:
                        self._queues[queue].put(self.ConsumerClosedMessage())
                    consumer[4].join(timeout=5)
                    consumer[4].kill()

                    # @TODO reject any unacked messages

                    self._consumers[queue].pop(i)
                    break

            self._return_ctag(consumer_tag)

    def _run_consumer(self, ctag, queue_name, gqueue, callback):
        cnt = 0
        while True:
            m = gqueue.get()
            if isinstance(m, self.ConsumerClosedMessage):
                break
            exchange, routing_key, body, props = m

            # create method frame
            method_frame = DotDict()
            method_frame['consumer_tag'] = ctag
            method_frame['redelivered'] = False  # @TODO
            method_frame['exchange'] = exchange
            method_frame['routing_key'] = routing_key

            # create header frame
            header_frame = DotDict()
            header_frame['headers'] = props.copy()

            # make delivery tag for ack/reject later
            dtag = self._generate_dtag(ctag, cnt)
            cnt += 1

            with self._lock_unacked:
                self._unacked[dtag] = (ctag, queue_name, m)

            method_frame['delivery_tag'] = dtag

            # deliver to callback
            try:
                callback(self, method_frame, header_frame, body)
            except Exception:
                log.exception("delivering to consumer, ignore!")

    def _generate_ctag(self):
        return "zctag-%s" % self._ctag_pool.get_id()

    def _return_ctag(self, ctag):
        self._ctag_pool.release_id(int(ctag.split("-")[-1]))

    def _generate_dtag(self, ctag, cnt):
        """
        Generates a unique delivery tag for each consumer.

        Greenlet-safe, no need to lock.
        """
        return "%s-%s" % (ctag, cnt)

    def ack(self, delivery_tag):
        assert delivery_tag in self._unacked

        with self._lock_unacked:
            del self._unacked[delivery_tag]

    def reject(self, delivery_tag, requeue=False):
        assert delivery_tag in self._unacked

        with self._lock_unacked:
            _, queue, m = self._unacked.pop(delivery_tag)
            if requeue:
                log.warn("REQUEUE: EXPERIMENTAL %s", delivery_tag)
                self._queues[queue].put(m)

    def transport_close(self, transport):
        log.warn("LocalRouter.transport_close: %s TODO", transport)
        # @TODO reject all messages in unacked spot

        # turn off any consumers from this transport

    def get_stats(self, queue):
        """
        Returns a 2-tuple of (# msgs, # consumers) on a given queue.
        """
        assert queue in self._queues

        consumers = 0
        if queue in self._consumers:
            consumers = len(self._consumers[queue])

        # the queue qsize gives you number of undelivered messages, which i think is what AMQP does too
        return (self._queues[queue].qsize(), consumers)

    def purge(self, queue):
        """
        Deletes all contents of a queue.

        @TODO could end up in a race with an infinite producer
        """
        assert queue in self._queues

        with Timeout(5):
            while not self._queues[queue].empty():
                self._queues[queue].get_nowait()
Esempio n. 24
0
 def setUp(self):
     self._idpool = IDPool()
Esempio n. 25
0
class NodeB(amqp.Node):
    """
    Blocking interface to AMQP messaging primitives.

    Wrap around Node and create blocking interface for getting channel
    objects.
    """
    def __init__(self):
        log.debug("In NodeB.__init__")
        self.running = False
        self.ready = event.Event()
        self._lock = coros.RLock()
        self._pool = IDPool()
        self._bidir_pool = {
        }  # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {
        }  # maps active pika channel numbers to our numbers (from self._pool)

        self.interceptors = {}  # endpoint interceptors

        amqp.Node.__init__(self)

    def start_node(self):
        """
        This should only be called by on_connection_opened.
        so, maybe we don't need a start_node/stop_node interface
        """
        log.debug("In start_node")
        amqp.Node.start_node(self)
        self.running = True
        self.ready.set()

    def stop_node(self):
        """
        Closes the connection to the broker, cleans up resources held by this node.
        """
        log.debug("NodeB.stop_node (running: %s)", self.running)

        if self.running:
            # clean up pooling before we shut connection
            self._destroy_pool()
            self.client.close()
        self.running = False

    def _destroy_pool(self):
        """
        Explicitly deletes pooled queues in this Node.
        """
        for chan in self._bidir_pool.itervalues():
            chan._destroy_queue()

    def _new_channel(self, ch_type, ch_number=None, **kwargs):
        """
        Creates a pyon Channel based on the passed in type, and activates it for use.
        """
        chan = ch_type(**kwargs)
        amq_chan = blocking_cb(self.client.channel,
                               'on_open_callback',
                               channel_number=ch_number)
        if amq_chan is None:
            log.error(
                "AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s",
                ch_number)
            import traceback
            traceback.print_stack()
            raise StandardError(
                "AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s"
                % ch_number)

        chan.on_channel_open(amq_chan)
        return chan

    def channel(self, ch_type, **kwargs):
        """
        Creates a Channel object with an underlying transport callback and returns it.

        @type ch_type   BaseChannel
        """
        log.debug("NodeB.channel")
        with self._lock:
            # having _queue_auto_delete on is a pre-req to being able to pool.
            if ch_type == channel.BidirClientChannel and not ch_type._queue_auto_delete:
                chid = self._pool.get_id()
                if chid in self._bidir_pool:
                    log.debug(
                        "BidirClientChannel requested, pulling from pool (%d)",
                        chid)
                    assert not chid in self._pool_map.values()
                    ch = self._bidir_pool[chid]
                    self._pool_map[ch.get_channel_id()] = chid
                else:
                    log.debug(
                        "BidirClientChannel requested, no pool items available, creating new (%d)",
                        chid)
                    ch = self._new_channel(ch_type, **kwargs)
                    ch.set_close_callback(self.on_channel_request_close)
                    self._bidir_pool[chid] = ch
                    self._pool_map[ch.get_channel_id()] = chid
            else:
                ch = self._new_channel(ch_type, **kwargs)
            assert ch

        return ch

    def on_channel_request_close(self, ch):
        """
        Close callback for pooled Channels.

        When a new, pooled Channel is created that this Node manages, it will specify this as the
        close callback in order to prevent that Channel from actually closing.
        """
        log.debug("NodeB: on_channel_request_close\n\tChType %s, Ch#: %d",
                  ch.__class__, ch.get_channel_id())

        assert ch.get_channel_id() in self._pool_map
        with self._lock:
            chid = self._pool_map.pop(ch.get_channel_id())
            log.debug("Releasing BiDir pool Pika #%d, our id #%d",
                      ch.get_channel_id(), chid)
            self._pool.release_id(chid)

            # reset channel
            ch.reset()

            # sanity check: if auto delete got turned on, we must remove this channel from the pool
            if ch._queue_auto_delete:
                log.warn(
                    "A pooled channel now has _queue_auto_delete set true, we must remove it: check what caused this as it's likely a timing error"
                )

                self._bidir_pool.pop(chid)
                self._pool._ids_free.remove(chid)

    def setup_interceptors(self, interceptor_cfg):
        stack = interceptor_cfg["stack"]
        defs = interceptor_cfg["interceptors"]

        interceptors = defaultdict(list)

        by_name_dict = {}
        for type_and_direction in stack:
            interceptor_names = stack[type_and_direction]
            for name in interceptor_names:
                if name in by_name_dict:
                    classinst = by_name_dict[name]
                else:
                    interceptor_def = defs[name]

                    # Instantiate and put in by_name array
                    modpath, classname = interceptor_def['class'].rsplit(
                        '.', 1)
                    classinst = for_name(modpath, classname)

                    # Call configure
                    classinst.configure(
                        config=interceptor_def["config"] if "config" in
                        interceptor_def else None)

                    # Put in by_name_dict for possible re-use
                    by_name_dict[name] = classinst

                interceptors[type_and_direction].append(classinst)

        self.interceptors = dict(interceptors)
Esempio n. 26
0
class NodeB(amqp.Node):
    """
    Blocking interface to AMQP messaging primitives.

    Wrap around Node and create blocking interface for getting channel
    objects.
    """

    def __init__(self):
        log.debug("In NodeB.__init__")
        self.running = False
        self.ready = event.Event()
        self._lock = coros.RLock()
        self._pool = IDPool()
        self._bidir_pool = {}   # maps inactive/active our numbers (from self._pool) to channels
        self._pool_map = {}     # maps active pika channel numbers to our numbers (from self._pool)

        self.interceptors = {}  # endpoint interceptors

        amqp.Node.__init__(self)

    def start_node(self):
        """
        This should only be called by on_connection_opened.
        so, maybe we don't need a start_node/stop_node interface
        """
        log.debug("In start_node")
        amqp.Node.start_node(self)
        self.running = True
        self.ready.set()

    def stop_node(self):
        """
        Closes the connection to the broker, cleans up resources held by this node.
        """
        log.debug("NodeB.stop_node (running: %s)", self.running)

        if self.running:
            # clean up pooling before we shut connection
            self._destroy_pool()
            self.client.close()
        self.running = False

    def _destroy_pool(self):
        """
        Explicitly deletes pooled queues in this Node.
        """
        for chan in self._bidir_pool.itervalues():
            chan._destroy_queue()

    def _new_channel(self, ch_type, ch_number=None, **kwargs):
        """
        Creates a pyon Channel based on the passed in type, and activates it for use.
        """
        chan = ch_type(**kwargs)
        amq_chan = blocking_cb(self.client.channel, 'on_open_callback', channel_number=ch_number)
        if amq_chan is None:
            log.error("AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s", ch_number)
            import traceback
            traceback.print_stack()
            raise StandardError("AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s" % ch_number)

        chan.on_channel_open(amq_chan)
        return chan

    def channel(self, ch_type, **kwargs):
        """
        Creates a Channel object with an underlying transport callback and returns it.

        @type ch_type   BaseChannel
        """
        log.debug("NodeB.channel")
        with self._lock:
            # having _queue_auto_delete on is a pre-req to being able to pool.
            if ch_type == channel.BidirClientChannel and not ch_type._queue_auto_delete:
                chid = self._pool.get_id()
                if chid in self._bidir_pool:
                    log.debug("BidirClientChannel requested, pulling from pool (%d)", chid)
                    assert not chid in self._pool_map.values()
                    ch = self._bidir_pool[chid]
                    self._pool_map[ch.get_channel_id()] = chid
                else:
                    log.debug("BidirClientChannel requested, no pool items available, creating new (%d)", chid)
                    ch = self._new_channel(ch_type, **kwargs)
                    ch.set_close_callback(self.on_channel_request_close)
                    self._bidir_pool[chid] = ch
                    self._pool_map[ch.get_channel_id()] = chid
            else:
                ch = self._new_channel(ch_type, **kwargs)
            assert ch

        return ch

    def on_channel_request_close(self, ch):
        """
        Close callback for pooled Channels.

        When a new, pooled Channel is created that this Node manages, it will specify this as the
        close callback in order to prevent that Channel from actually closing.
        """
        log.debug("NodeB: on_channel_request_close\n\tChType %s, Ch#: %d", ch.__class__, ch.get_channel_id())

        assert ch.get_channel_id() in self._pool_map
        with self._lock:
            chid = self._pool_map.pop(ch.get_channel_id())
            log.debug("Releasing BiDir pool Pika #%d, our id #%d", ch.get_channel_id(), chid)
            self._pool.release_id(chid)

            # reset channel
            ch.reset()

            # sanity check: if auto delete got turned on, we must remove this channel from the pool
            if ch._queue_auto_delete:
                log.warn("A pooled channel now has _queue_auto_delete set true, we must remove it: check what caused this as it's likely a timing error")

                self._bidir_pool.pop(chid)
                self._pool._ids_free.remove(chid)

    def setup_interceptors(self, interceptor_cfg):
        stack = interceptor_cfg["stack"]
        defs = interceptor_cfg["interceptors"]

        interceptors = defaultdict(list)

        by_name_dict = {}
        for type_and_direction in stack:
            interceptor_names = stack[type_and_direction]
            for name in interceptor_names:
                if name in by_name_dict:
                    classinst = by_name_dict[name]
                else:
                    interceptor_def = defs[name]

                    # Instantiate and put in by_name array
                    modpath, classname  = interceptor_def['class'].rsplit('.', 1)
                    classinst           = for_name(modpath, classname)

                    # Call configure
                    classinst.configure(config = interceptor_def["config"] if "config" in interceptor_def else None)

                    # Put in by_name_dict for possible re-use
                    by_name_dict[name] = classinst

                interceptors[type_and_direction].append(classinst)

        self.interceptors = dict(interceptors)