Exemplo n.º 1
0
    def get_realtime_visualization_data(self, query_token=''):
        """This operation returns a block of visualization data for displaying data product in real time. This operation requires a
        user specific token which was provided from a previous request to the init_realtime_visualization operation.

        @param query_token    str
        @retval datatable    str
        @throws NotFound    Throws if specified query_token or its visualization product does not exist
        """
        log.debug( "get_realtime_visualization_data Vis worker: %s", self.id)

        ret_val = []
        if not query_token:
            raise BadRequest("The query_token parameter is missing")


        #Taking advantage of idempotency
        xq = self.container.ex_manager.create_xn_queue(query_token)


        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()

        # Different messages should get processed differently. Ret val will be decided by the viz product type
        ret_val = self._process_visualization_message(msgs)


        #TODO - replace as need be to return valid GDT data
        #return {'viz_data': ret_val}

        return ret_val
Exemplo n.º 2
0
    def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None, callback=None,
                 sub_type=None, origin_type=None, pattern=None, auto_delete=None, *args, **kwargs):
        """
        Initializer.

        If the queue_name is specified here, the sysname is prefixed automatically to it. This is because
        named queues are not namespaces to their exchanges, so two different systems on the same broker
        can cross-pollute messages if a named queue is used.

        Note: an EventSubscriber needs to be closed to free broker resources
        """
        self._cbthread = None

        # sets self._ev_recv_name, self.binding
        BaseEventSubscriberMixin.__init__(self, xp_name=xp_name, event_type=event_type, origin=origin,
                                          queue_name=queue_name, sub_type=sub_type, origin_type=origin_type,
                                          pattern=pattern, auto_delete=auto_delete)

        log.debug("EventPublisher events pattern %s", self.binding)

        from_name = self._get_from_name()
        binding   = self._get_binding()

        Subscriber.__init__(self, from_name=from_name, binding=binding, callback=callback,
                            auto_delete=self._auto_delete, **kwargs)
Exemplo n.º 3
0
    def test_subscribe(self):
        #Test Subscriber.
        #The goal of this test is to get messages routed to the callback mock.
        cbmock = Mock()
        sub = Subscriber(node=self._node, from_name="testsub", callback=cbmock)

        # tell the subscriber to create this as the main listening channel
        listen_channel_mock = self._setup_mock_channel(
            ch_type=SubscriberChannel, value="subbed", error_message="")
        sub.node.channel.return_value = listen_channel_mock

        # tell our channel to return itself when accepted
        listen_channel_mock.accept.return_value = listen_channel_mock

        # we're ready! call listen
        sub.listen()

        # make sure we got our message
        cbmock.assert_called_once_with(
            'subbed', {
                'conv-id': sentinel.conv_id,
                'status_code': 200,
                'error_message': '',
                'op': None
            })
Exemplo n.º 4
0
    def test_create_endpoint(self):
        def mycb(msg, headers):
            return "test"

        sub = Subscriber(node=self._node, from_name="testsub", callback=mycb)
        e = sub.create_endpoint()

        self.assertEquals(e._callback, mycb)
Exemplo n.º 5
0
    def test_create_endpoint(self):
        def mycb(msg, headers):
            return "test"

        sub = Subscriber(node=self._node, from_name="testsub", callback=mycb)
        e = sub.create_endpoint()

        self.assertEquals(e._callback, mycb)
Exemplo n.º 6
0
    def __init__(self,  callback=None, pattern='#', *args, **kwargs):
        """
        Note: a ConversationSubscriber needs to be closed to free broker resources
        """
        self._cbthread = None
        self.binding = pattern

        log.debug("ConversationSubscriber pattern %s", self.binding)

        Subscriber.__init__(self, binding=self.binding, callback=callback, **kwargs)
Exemplo n.º 7
0
    def __init__(self, process=None):
        self.process = process

        self.async_res = AsyncResult()
        self.wait_name = "asyncresult_" + create_simple_unique_id()
        if self.process:
            self.wait_name = self.wait_name + "_" + self.process.id
        # TODO: Use same mechanism as pooled RPC response endpoint (without the request)
        self.wait_sub = Subscriber(from_name=self.wait_name,
                                   callback=self._result_callback,
                                   auto_delete=True)
        self.activated = False
    def get_realtime_visualization_data(self, query_token='', callback='', tqx=""):
        """This operation returns a block of visualization data for displaying data product in real time. This operation requires a
        user specific token which was provided from a previous request to the init_realtime_visualization operation.

        @param query_token    str
        @retval datatable    str
        @throws NotFound    Throws if specified query_token or its visualization product does not exist
        """

        print " >>>>>>>>>>>>>>> QUERY TOKEN : ", query_token
        print " >>>>>>>>>>>>>>> callback : ", callback
        print ">>>>>>>>>>>>>>>  TQX : ", tqx

        reqId = 0
        # If a reqId was passed in tqx, extract it
        if tqx:
            tqx_param_list = tqx.split(";")
            for param in tqx_param_list:
                key, value = param.split(":")
                if key == 'reqId':
                    reqId = value

        ret_val = []
        if not query_token:
            raise BadRequest("The query_token parameter is missing")

        #try:

        #Taking advantage of idempotency
        xq = self.container.ex_manager.create_xn_queue(query_token)


        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()

        # Different messages should get processed differently. Ret val will be decided by the viz product type
        ret_val = self._process_visualization_message(msgs, callback, reqId)

        #except Exception, e:
        #    raise e

        #finally:
        #    subscriber.close()

        #TODO - replace as need be to return valid GDT data
        #return {'viz_data': ret_val}

        return ret_val
Exemplo n.º 9
0
    def __init__(self,
                 xp_name=None,
                 event_type=None,
                 origin=None,
                 queue_name=None,
                 callback=None,
                 sub_type=None,
                 origin_type=None,
                 *args,
                 **kwargs):
        """
        Initializer.

        If the queue_name is specified here, the sysname is prefixed automatically to it. This is because
        named queues are not namespaces to their exchanges, so two different systems on the same broker
        can cross-pollute messages if a named queue is used.

        Note: an EventSubscriber needs to be closed to free broker resources
        """
        self.callback = callback
        self._cbthread = None

        self.event_type = event_type
        self.sub_type = sub_type
        self.origin_type = origin_type
        self.origin = origin

        xp_name = xp_name or get_events_exchange_point()
        binding = self._topic(event_type, origin, sub_type, origin_type)
        self.binding = binding

        # TODO: Provide a case where we can have multiple bindings (e.g. different event_types)

        # prefix the queue_name, if specified, with the sysname
        # this is because queue names transcend xp boundaries (see R1 OOIION-477)
        if queue_name is not None:
            if not queue_name.startswith(bootstrap.get_sys_name()):
                queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name)
                log.warn(
                    "queue_name specified, prepending sys_name to it: %s" %
                    queue_name)

        name = (xp_name, queue_name)

        log.debug("EventPublisher events pattern %s", binding)

        Subscriber.__init__(self,
                            from_name=name,
                            binding=binding,
                            callback=self._callback,
                            **kwargs)
Exemplo n.º 10
0
    def get_realtime_visualization_data(self, query_token='', callback='', tqx=""):
        """This operation returns a block of visualization data for displaying data product in real time. This operation requires a
        user specific token which was provided from a previous request to the init_realtime_visualization operation.

        @param query_token    str
        @retval datatable    str
        @throws NotFound    Throws if specified query_token or its visualization product does not exist
        """

        log.debug("Query token : " + query_token + " CB : " + callback + "TQX : " + tqx)

        reqId = 0
        # If a reqId was passed in tqx, extract it
        if tqx:
            tqx_param_list = tqx.split(";")
            for param in tqx_param_list:
                key, value = param.split(":")
                if key == 'reqId':
                    reqId = value

        ret_val = []
        if not query_token:
            raise BadRequest("The query_token parameter is missing")

        #try:

        #Taking advantage of idempotency
        xq = self.container.ex_manager.create_xn_queue(query_token)


        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()

        # Different messages should get processed differently. Ret val will be decided by the viz product type
        ret_val = self._process_visualization_message(msgs, callback, reqId)

        #except Exception, e:
        #    raise e

        #finally:
        #    subscriber.close()

        #TODO - replace as need be to return valid GDT data
        #return {'viz_data': ret_val}

        return ret_val
Exemplo n.º 11
0
    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(),
                                                     'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(),
                                                  str(uuid.uuid4())[0:6]))
Exemplo n.º 12
0
    def __init__(self,
                 xp_name=None,
                 event_type=None,
                 origin=None,
                 queue_name=None,
                 callback=None,
                 sub_type=None,
                 origin_type=None,
                 pattern=None,
                 auto_delete=None,
                 *args,
                 **kwargs):
        """
        Initializer.

        If the queue_name is specified here, the sysname is prefixed automatically to it. This is because
        named queues are not namespaces to their exchanges, so two different systems on the same broker
        can cross-pollute messages if a named queue is used.

        Note: an EventSubscriber needs to be closed to free broker resources
        """
        self._cbthread = None

        # sets self._ev_recv_name, self.binding
        BaseEventSubscriberMixin.__init__(self,
                                          xp_name=xp_name,
                                          event_type=event_type,
                                          origin=origin,
                                          queue_name=queue_name,
                                          sub_type=sub_type,
                                          origin_type=origin_type,
                                          pattern=pattern,
                                          auto_delete=auto_delete)

        log.debug("EventPublisher events pattern %s", self.binding)

        from_name = self._get_from_name()
        binding = self._get_binding()

        Subscriber.__init__(self,
                            from_name=from_name,
                            binding=binding,
                            callback=callback,
                            auto_delete=self._auto_delete,
                            **kwargs)
Exemplo n.º 13
0
    def test_subscribe(self):
        #Test Subscriber.
        #The goal of this test is to get messages routed to the callback mock.
        cbmock = Mock()
        sub = Subscriber(node=self._node, from_name="testsub", callback=cbmock)

        # tell the subscriber to create this as the main listening channel
        listen_channel_mock = self._setup_mock_channel(ch_type=SubscriberChannel, value="subbed", error_message="")
        sub.node.channel.return_value = listen_channel_mock

        # tell our channel to return itself when accepted
        listen_channel_mock.accept.return_value = listen_channel_mock

        # we're ready! call listen
        sub.listen()

        # make sure we got our message
        cbmock.assert_called_once_with('subbed', {'conv-id': sentinel.conv_id, 'status_code':200, 'error_message':'', 'op': None})
Exemplo n.º 14
0
    def _create_channel(self, **kwargs):
        """
        Override to set the channel's queue_auto_delete property.
        """
        ch = Subscriber._create_channel(self, **kwargs)
        if self._auto_delete is not None:
            ch.queue_auto_delete = self._auto_delete

        return ch
Exemplo n.º 15
0
    def _create_channel(self, **kwargs):
        """
        Override to set the channel's queue_auto_delete property.
        """
        ch = Subscriber._create_channel(self, **kwargs)
        if self._auto_delete is not None:
            ch.queue_auto_delete = self._auto_delete

        return ch
Exemplo n.º 16
0
    def _build_header(self, raw_msg):
        """
        Override to direct the calls in _build_header - first the Subscriber, then the Process mixin.
        """

        header1 = Subscriber._build_header(self, raw_msg)
        header2 = ProcessEndpointUnitMixin._build_header(self, raw_msg)

        header1.update(header2)

        return header1
Exemplo n.º 17
0
    def _build_header(self, raw_msg):
        """
        Override to direct the calls in _build_header - first the Subscriber, then the Process mixin.
        """

        header1 = Subscriber._build_header(self, raw_msg)
        header2 = ProcessEndpointUnitMixin._build_header(self, raw_msg)

        header1.update(header2)

        return header1
Exemplo n.º 18
0
    def test_subscribe(self):
        """
        Test Subscriber.
        The goal of this test is to get messages routed to the callback mock.
        """
        cbmock = Mock()
        sub = Subscriber(node=self._node, from_name="testsub", callback=cbmock)

        # tell the subscriber to create this as the main listening channel
        listen_channel_mock = self._setup_mock_channel(ch_type=SubscriberChannel, value="subbed", error_message="")
        sub.node.channel.return_value = listen_channel_mock

        # tell our channel to return itself when accepted
        listen_channel_mock.accept.return_value.__enter__.return_value = listen_channel_mock

        # we're ready! call listen
        sub.listen()

        # make sure we got our message
        cbmock.assert_called_once_with("subbed", {"status_code": 200, "error_message": "", "op": None})
Exemplo n.º 19
0
    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(), 'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(), str(uuid.uuid4())[0:6]))
Exemplo n.º 20
0
    def __init__(self, xp_name=None, event_name=None, origin=None, queue_name=None, callback=None, *args, **kwargs):
        """
        Initializer.

        If the queue_name is specified here, the sysname is prefixed automatically to it. This is becuase
        named queues are not namespaces to their exchanges, so two different systems on the same broker
        can cross-pollute messages if a named queue is used.
        """
        self._event_name = event_name or self.event_name

        xp_name = xp_name or get_events_exchange_point()
        binding = self._topic(origin)

        # prefix the queue_name, if specified, with the sysname
        # this is because queue names transcend xp boundaries (see R1 OOIION-477)
        if queue_name is not None:
            if not queue_name.startswith(bootstrap.sys_name):
                queue_name = "%s.%s" % (bootstrap.sys_name, queue_name)
                log.warn("queue_name specified, prepending sys_name to it: %s" % queue_name)

        name = (xp_name, queue_name)

        Subscriber.__init__(self, name=name, binding=binding, callback=callback, **kwargs)
Exemplo n.º 21
0
    def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None, callback=None,
                 sub_type=None, origin_type=None, *args, **kwargs):
        """
        Initializer.

        If the queue_name is specified here, the sysname is prefixed automatically to it. This is because
        named queues are not namespaces to their exchanges, so two different systems on the same broker
        can cross-pollute messages if a named queue is used.

        Note: an EventSubscriber needs to be closed to free broker resources
        """
        self.callback = callback
        self._cbthread = None

        self.event_type = event_type
        self.sub_type = sub_type
        self.origin_type = origin_type
        self.origin = origin

        xp_name = xp_name or get_events_exchange_point()
        binding = self._topic(event_type, origin, sub_type, origin_type)
        self.binding = binding

        # TODO: Provide a case where we can have multiple bindings (e.g. different event_types)

        # prefix the queue_name, if specified, with the sysname
        # this is because queue names transcend xp boundaries (see R1 OOIION-477)
        if queue_name is not None:
            if not queue_name.startswith(bootstrap.get_sys_name()):
                queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name)
                log.warn("queue_name specified, prepending sys_name to it: %s" % queue_name)

        name = (xp_name, queue_name)

        log.debug("EventPublisher events pattern %s", binding)

        Subscriber.__init__(self, from_name=name, binding=binding, callback=self._callback, **kwargs)
Exemplo n.º 22
0
    def get_realtime_visualization_data(self, query_token=''):
        """This operation returns a block of visualization data for displaying data product in real time. This operation requires a
        user specific token which was provided from a previous request to the init_realtime_visualization operation.

        @param query_token    str
        @retval datatable    str
        @throws NotFound    Throws if specified query_token or its visualization product does not exist
        """
        log.debug("get_realtime_visualization_data Vis worker: %s", self.id)

        ret_val = []
        if not query_token:
            raise BadRequest("The query_token parameter is missing")

        try:
            #Taking advantage of idempotency
            queue_name = '-'.join([USER_VISUALIZATION_QUEUE, query_token])
            xq = self.container.ex_manager.create_xn_queue(queue_name)

            subscriber = Subscriber(from_name=xq)
            subscriber.initialize()

        except:
            # Close the subscriber if it exists
            if subscriber:
                subscriber.close()

            raise BadRequest("Could not subscribe to the real-time queue")

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()

        subscriber.close()

        # Different messages should get processed differently. Ret val will be decided by the viz product type
        ret_val = self._process_visualization_message(msgs)

        return ret_val
Exemplo n.º 23
0
class AsyncResultWaiter(object):
    """
    Class that makes waiting for an async result notification easy.
    Creates a subscriber for a generated token name, which can be handed to the async provider.
    The provider then publishes the result to the token name when ready.
    The caller can wait for the result or timeout.
    """
    def __init__(self, process=None):
        self.process = process

        self.async_res = AsyncResult()
        self.wait_name = "asyncresult_" + create_simple_unique_id()
        if self.process:
            self.wait_name = self.wait_name + "_" + self.process.id
        # TODO: Use same mechanism as pooled RPC response endpoint (without the request)
        self.wait_sub = Subscriber(from_name=self.wait_name,
                                   callback=self._result_callback,
                                   auto_delete=True)
        self.activated = False

    def activate(self):
        if self.activated:
            raise BadRequest("Already active")
        self.listen_gl = spawn(self.wait_sub.listen
                               )  # This initializes and activates the listener
        self.wait_sub.get_ready_event().wait(timeout=1)
        self.activated = True

        return self.wait_name

    def _result_callback(self, msg, headers):
        log.debug("AsyncResultWaiter: received message")
        self.async_res.set(msg)

    def await (self, timeout=None, request_id=None):
        try:
            result = self.async_res.get(timeout=timeout)
            if request_id and isinstance(
                    result,
                    AsyncResultMsg) and result.request_id != request_id:
                log.warn("Received result for different request: %s", result)
                result = None

        except gevent.Timeout:
            raise Timeout("Timeout in AsyncResultWaiter name={}".format(
                self.wait_name))
        finally:
            self.wait_sub.deactivate()
            self.wait_sub.close()
            self.listen_gl.join(timeout=1)
            self.activated = False

        return result
Exemplo n.º 24
0
    def get_realtime_visualization_data(self, query_token=''):
        """This operation returns a block of visualization data for displaying data product in real time. This operation requires a
        user specific token which was provided from a previous request to the init_realtime_visualization operation.

        @param query_token    str
        @retval datatable    str
        @throws NotFound    Throws if specified query_token or its visualization product does not exist
        """
        log.debug( "get_realtime_visualization_data Vis worker: %s", self.id)

        ret_val = []
        if not query_token:
            raise BadRequest("The query_token parameter is missing")

        try:
            #Taking advantage of idempotency
            queue_name = '-'.join([USER_VISUALIZATION_QUEUE, query_token])
            xq = self.container.ex_manager.create_xn_queue(queue_name)

            subscriber = Subscriber(from_name=xq)
            subscriber.initialize()

        except:
            # Close the subscriber if it exists
            if subscriber:
                subscriber.close()

            raise BadRequest("Could not subscribe to the real-time queue")

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()

        subscriber.close()

        # Different messages should get processed differently. Ret val will be decided by the viz product type
        ret_val = self._process_visualization_message(msgs)

        return ret_val
    def get_realtime_visualization_data(self, query_token=''):
        """This operation returns a block of visualization data for displaying data product in real time. This operation requires a
        user specific token which was provided from a previsou request to the init_realtime_visualization operation.

        @param query_token    str
        @retval datatable    str
        @throws NotFound    Throws if specified query_token or its visualization product does not exist
        """

        if not query_token:
            raise BadRequest("The query_token parameter is missing")

        try:

            #Taking advantage of idempotency
            xq = self.container.ex_manager.create_xn_queue(query_token)

            subscriber = Subscriber(from_name=xq)
            subscriber.initialize()

            msg_count,_ = subscriber.get_stats()
            log.info('Messages in user queue 1: %s ' % msg_count)

            ret_val = []
            msgs = subscriber.get_all_msgs(timeout=2)
            for x in range(len(msgs)):
                msgs[x].ack()

            # Different messages should get processed differently. Ret val will be decided by the viz product type
            ret_val = self._process_visualization_message(msgs)

            msg_count,_ = subscriber.get_stats()
            log.info('Messages in user queue 2: %s ' % msg_count)

        except Exception, e:
            raise e
Exemplo n.º 26
0
#!/usr/bin/env python

from pyon.net.endpoint import Subscriber
from pyon.net.messaging import make_node
import gevent
import time

node, iowat = make_node()


def msg_recv(msg, h):
    global counter
    counter += 1


sub = Subscriber(node=node, name="hassan", callback=msg_recv)

counter = 0
st = time.time()


def tick():
    global counter, st
    while True:
        time.sleep(2)
        ct = time.time()
        elapsed_s = ct - st

        mps = counter / elapsed_s

        print counter, "messages, per sec:", mps
Exemplo n.º 27
0
 def create_endpoint(self, **kwargs):
     newkwargs = kwargs.copy()
     newkwargs['process'] = self._process
     newkwargs['routing_call'] = self._routing_call
     return Subscriber.create_endpoint(self, **newkwargs)
Exemplo n.º 28
0
 def __init__(self, process=None, routing_call=None, **kwargs):
     assert process
     self._process = process
     self._routing_call = routing_call
     Subscriber.__init__(self, **kwargs)
Exemplo n.º 29
0
    def test_consume_one_message_at_a_time(self):
        # see also pyon.net.test.test_channel:TestChannelInt.test_consume_one_message_at_a_time

        pub3 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.3'))
        pub5 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.5'))

        #
        # SETUP COMPLETE, BEGIN TESTING OF EXCHANGE OBJECTS
        #

        xq = self.container.ex_manager.create_xn_queue('random_queue')
        self.addCleanup(xq.delete)

        # recv'd messages from the subscriber
        self.recv_queue = Queue()

        def cb(m, h):
            raise StandardError("Subscriber callback never gets called back!")

        sub = Subscriber(from_name=xq, callback=cb)
        sub.initialize()

        # publish 10 messages - we're not bound yet, so they'll just dissapear
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # allow time for routing
        time.sleep(2)

        # no messages yet
        self.assertRaises(Timeout, sub.get_one_msg, timeout=0)

        # now, we'll bind the xq
        xq.bind('routed.3')

        # even tho we are consuming, there are no messages - the previously published ones all dissapeared
        self.assertRaises(Timeout, sub.get_one_msg, timeout=0)

        # publish those messages again
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # allow time for routing
        time.sleep(2)

        # NOW we have messages!
        for x in xrange(10):
            mo = sub.get_one_msg(timeout=10)
            self.assertEquals(mo.body, "3,%s" % str(x))
            mo.ack()

        # we've cleared it all
        self.assertRaises(Timeout, sub.get_one_msg, timeout=0)

        # bind a wildcard and publish on both
        xq.bind('routed.*')

        for x in xrange(10):
            time.sleep(0.3)
            pub3.publish("3,%s" % str(x))
            time.sleep(0.3)
            pub5.publish("5,%s" % str(x))

        # allow time for routing
        time.sleep(2)

        # should get all 20, interleaved
        for x in xrange(10):
            mo = sub.get_one_msg(timeout=1)
            self.assertEquals(mo.body, "3,%s" % str(x))
            mo.ack()

            mo = sub.get_one_msg(timeout=1)
            self.assertEquals(mo.body, "5,%s" % str(x))
            mo.ack()

        # add 5 binding, remove all other bindings
        xq.bind('routed.5')
        xq.unbind('routed.3')
        xq.unbind('routed.*')

        # try publishing to 3, shouldn't arrive anymore
        pub3.publish("3")

        self.assertRaises(Timeout, sub.get_one_msg, timeout=0)

        # let's turn off the consumer and let things build up a bit
        sub._chan.stop_consume()

        for x in xrange(10):
            pub5.publish("5,%s" % str(x))

        # allow time for routing
        time.sleep(2)

        # 10 messages in the queue, no consumers
        self.assertTupleEqual((10, 0), sub._chan.get_stats())

        # drain queue
        sub._chan.start_consume()

        for x in xrange(10):
            mo = sub.get_one_msg(timeout=1)
            mo.ack()

        sub.close()
    def test_replay_integration(self):
        '''
        test_replay_integration
        '''
        import numpy as np
        # Keep the import it's used in the vector comparison below even though pycharm says its unused.

        cc = self.container
        XP = self.XP
        assertions = self.assertTrue

        ### Every thing below here can be run as a script:
        log.debug('Got it')

        pubsub_management_service = PubsubManagementServiceClient(node=cc.node)
        ingestion_management_service = IngestionManagementServiceClient(
            node=cc.node)
        dataset_management_service = DatasetManagementServiceClient(
            node=cc.node)
        data_retriever_service = DataRetrieverServiceClient(node=cc.node)

        datastore_name = 'dm_test_replay_integration'

        producer = Publisher(name=(XP, 'stream producer'))

        ingestion_configuration_id = ingestion_management_service.create_ingestion_configuration(
            exchange_point_id=XP,
            couch_storage=CouchStorage(datastore_name=datastore_name,
                                       datastore_profile='SCIDATA'),
            hdf_storage=HdfStorage(),
            number_of_workers=1)

        ingestion_management_service.activate_ingestion_configuration(
            ingestion_configuration_id=ingestion_configuration_id)

        definition = SBE37_CDM_stream_definition()
        data_stream_id = definition.data_stream_id
        encoding_id = definition.identifiables[data_stream_id].encoding_id
        element_count_id = definition.identifiables[
            data_stream_id].element_count_id

        stream_def_id = pubsub_management_service.create_stream_definition(
            container=definition)
        stream_id = pubsub_management_service.create_stream(
            stream_definition_id=stream_def_id)

        dataset_id = dataset_management_service.create_dataset(
            stream_id=stream_id,
            datastore_name=datastore_name,
            view_name='datasets/dataset_by_id')
        ingestion_management_service.create_dataset_configuration(
            dataset_id=dataset_id,
            archive_data=True,
            archive_metadata=True,
            ingestion_configuration_id=ingestion_configuration_id)
        definition.stream_resource_id = stream_id

        packet = _create_packet(definition)
        input_file = FileSystem.mktemp()
        input_file.write(packet.identifiables[data_stream_id].values)
        input_file_path = input_file.name
        input_file.close()

        fields = [
            'conductivity', 'height', 'latitude', 'longitude', 'pressure',
            'temperature', 'time'
        ]

        input_vectors = acquire_data([input_file_path], fields, 2).next()

        producer.publish(msg=packet, to_name=(XP, '%s.data' % stream_id))

        replay_id, replay_stream_id = data_retriever_service.define_replay(
            dataset_id)
        ar = gevent.event.AsyncResult()

        def sub_listen(msg, headers):

            assertions(isinstance(msg, StreamGranuleContainer),
                       'replayed message is not a granule.')
            hdf_string = msg.identifiables[data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            assertions(sha1 == msg.identifiables[encoding_id].sha1,
                       'Checksum failed.')
            assertions(
                msg.identifiables[element_count_id].value == 1,
                'record replay count is incorrect %d.' %
                msg.identifiables[element_count_id].value)
            output_file = FileSystem.mktemp()
            output_file.write(msg.identifiables[data_stream_id].values)
            output_file_path = output_file.name
            output_file.close()
            output_vectors = acquire_data([output_file_path], fields, 2).next()
            for field in fields:
                comparison = (input_vectors[field]['values'] ==
                              output_vectors[field]['values'])
                assertions(
                    comparison.all(), 'vector mismatch: %s vs %s' %
                    (input_vectors[field]['values'],
                     output_vectors[field]['values']))
            FileSystem.unlink(output_file_path)
            ar.set(True)

        subscriber = Subscriber(name=(XP, 'replay listener'),
                                callback=sub_listen)

        g = gevent.Greenlet(subscriber.listen,
                            binding='%s.data' % replay_stream_id)
        g.start()

        data_retriever_service.start_replay(replay_id)

        ar.get(timeout=10)

        FileSystem.unlink(input_file_path)
Exemplo n.º 31
0
 def __init__(self, process=None, **kwargs):
     self._process = process
     Subscriber.__init__(self, **kwargs)
Exemplo n.º 32
0
 def create_endpoint(self, **kwargs):
     newkwargs = kwargs.copy()
     newkwargs['process'] = self._process
     newkwargs['routing_call'] = self._routing_call
     return Subscriber.create_endpoint(self, **newkwargs)
Exemplo n.º 33
0
    def start_listener(self, stream_id, callback):

        sub = Subscriber(name=(self.XP, 'replay_listener'), callback=callback)
        g = gevent.Greenlet(sub.listen, binding='%s.data' % stream_id)
        g.start()
        self.thread_pool.append(g)
    def test_multiple_visualization_queue(self):

        # set up a workflow with the salinity transform and the doubler. We will direct the original stream and the doubled stream to queues
        # and test to make sure the subscription to the queues is working correctly
        assertions = self.assertTrue

        # Build the workflow definition
        workflow_def_obj = IonObject(RT.WorkflowDefinition, name='Viz_Test_Workflow',description='A workflow to test collection of multiple data products in queues')

        workflow_data_product_name = 'TEST-Workflow_Output_Product' #Set a specific output product name
        #-------------------------------------------------------------------------------------------------------------------------
        #Add a transformation process definition for salinity
        #-------------------------------------------------------------------------------------------------------------------------

        ctd_L2_salinity_dprocdef_id = self.create_salinity_data_process_definition()
        workflow_step_obj = IonObject('DataProcessWorkflowStep', data_process_definition_id=ctd_L2_salinity_dprocdef_id, persist_process_output_data=False)  #Don't persist the intermediate data product
        configuration = {'stream_name' : 'salinity'}
        workflow_step_obj.configuration = configuration
        workflow_def_obj.workflow_steps.append(workflow_step_obj)

        #Create it in the resource registry
        workflow_def_id = self.workflowclient.create_workflow_definition(workflow_def_obj)

        aids = self.rrclient.find_associations(workflow_def_id, PRED.hasDataProcessDefinition)
        assertions(len(aids) == 1 )

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        #Create and start the workflow
        workflow_id, workflow_product_id = self.workflowclient.create_data_process_workflow(workflow_def_id, ctd_parsed_data_product_id, timeout=30)

        workflow_output_ids,_ = self.rrclient.find_subjects(RT.Workflow, PRED.hasOutputProduct, workflow_product_id, True)
        assertions(len(workflow_output_ids) == 1 )

        #Walk the associations to find the appropriate output data streams to validate the messages
        workflow_dp_ids,_ = self.rrclient.find_objects(workflow_id, PRED.hasDataProduct, RT.DataProduct, True)
        assertions(len(workflow_dp_ids) == 1 )

        for dp_id in workflow_dp_ids:
            stream_ids, _ = self.rrclient.find_objects(dp_id, PRED.hasStream, None, True)
            assertions(len(stream_ids) == 1 )
            data_product_stream_ids.append(stream_ids[0])

        # Now for each of the data_product_stream_ids create a queue and pipe their data to the queue


        user_queue_name1 = USER_VISUALIZATION_QUEUE + '1'
        user_queue_name2 = USER_VISUALIZATION_QUEUE + '2'

        # use idempotency to create queues
        xq1 = self.container.ex_manager.create_xn_queue(user_queue_name1)
        self.addCleanup(xq1.delete)
        xq2 = self.container.ex_manager.create_xn_queue(user_queue_name2)
        self.addCleanup(xq2.delete)
        xq1.purge()
        xq2.purge()

        # the create_subscription call takes a list of stream_ids so create temp ones

        dp_stream_id1 = list()
        dp_stream_id1.append(data_product_stream_ids[0])
        dp_stream_id2 = list()
        dp_stream_id2.append(data_product_stream_ids[1])

        salinity_subscription_id1 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id1,
            exchange_name = user_queue_name1, name = "user visualization queue1")

        salinity_subscription_id2 = self.pubsubclient.create_subscription( stream_ids=dp_stream_id2,
            exchange_name = user_queue_name2, name = "user visualization queue2")

        # Create subscribers for the output of the queue
        subscriber1 = Subscriber(from_name=xq1)
        subscriber1.initialize()
        subscriber2 = Subscriber(from_name=xq2)
        subscriber2.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id1)
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id2)

        # Start input stream and wait for some time
        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(5.0)  # Send some messages - don't care how many

        msg_count,_ = xq1.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)
        msg_count,_ = xq2.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)

        msgs1 = subscriber1.get_all_msgs(timeout=2)
        msgs2 = subscriber2.get_all_msgs(timeout=2)

        for x in range(min(len(msgs1), len(msgs2))):
            msgs1[x].ack()
            msgs2[x].ack()
            self.validate_multiple_vis_queue_messages(msgs1[x].body, msgs2[x].body)

        # kill the ctd simulator process - that is enough data
        self.process_dispatcher.cancel_process(ctd_sim_pid)

        # close the subscription and queues
        subscriber1.close()
        subscriber2.close()

        return
Exemplo n.º 35
0
class TransformBenchTesting(TransformDataProcess):
    """
    Easiest way to run:
    from pyon.util.containers import DotDict
    tbt=cc.proc_manager._create_service_instance('55', 'tbt', 'pyon.ion.transform', 'TransformBenchTesting', DotDict({'process':{'name':'tbt', 'transform_id':'55'}}))
    tbt.init()
    tbt.start()
    """
    transform_number = 0
    message_length = 0

    def __init__(self):
        super(TransformBenchTesting, self).__init__()
        self.count = 0
        TransformBenchTesting.transform_number += 1

    def perf(self):

        with open('/tmp/pyon_performance.dat', 'a') as f:
            then = time.time()
            ocount = self.count
            while True:
                gevent.sleep(2.)
                now = time.time()
                count = self.count
                delta_t = now - then
                delta_c = count - ocount

                f.write(
                    '%s|%s\t%s\t%s\t%3.3f\n' %
                    (get_sys_name(), time.strftime("%H:%M:%S", time.gmtime()),
                     TransformBenchTesting.message_length,
                     TransformBenchTesting.transform_number,
                     float(delta_c) / delta_t))
                then = now
                ocount = count
                f.flush()

    @staticmethod
    def launch_benchmark(transform_number=1, primer=1, message_length=4):
        import gevent
        from gevent.greenlet import Greenlet
        from pyon.util.containers import DotDict
        from pyon.net.transport import NameTrio
        from pyon.net.endpoint import Publisher
        import uuid
        num = transform_number
        msg_len = message_length
        transforms = list()
        pids = 1
        TransformBenchTesting.message_length = message_length
        cc = Container.instance
        pub = Publisher(to_name=NameTrio(get_sys_name(),
                                         str(uuid.uuid4())[0:6]))
        for i in xrange(num):
            tbt = cc.proc_manager._create_service_instance(
                str(pids), 'tbt', 'prototype.transforms.linear',
                'TransformInPlace',
                DotDict({
                    'process': {
                        'name': 'tbt%d' % pids,
                        'transform_id': pids
                    }
                }))
            tbt.init()
            tbt.start()
            gevent.sleep(0.2)
            for i in xrange(primer):
                pub.publish(list(xrange(msg_len)))
            g = Greenlet(tbt.perf)
            g.start()
            transforms.append(tbt)
            pids += 1

    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(),
                                                     'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(),
                                                  str(uuid.uuid4())[0:6]))

    def publish(self, msg):
        self._bt_pub.publish(msg)
        self.count += 1

    def _stop_listener(self):
        self._bt_sub.close()
        self._sub_gl.join(timeout=2)
        self._sub_gl.kill()

    def on_stop(self):
        TransformDataProcess.on_stop(self)
        self._stop_listener()

    def on_quit(self):
        TransformDataProcess.on_quit(self)
        self._stop_listener()
Exemplo n.º 36
0
    def test_consume_one_message_at_a_time(self):
        # see also pyon.net.test.test_channel:TestChannelInt.test_consume_one_message_at_a_time

        pub3 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.3'))
        pub5 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.5'))

        #
        # SETUP COMPLETE, BEGIN TESTING OF EXCHANGE OBJECTS
        #

        xq = self.container.ex_manager.create_xn_queue('random_queue')
        self.addCleanup(xq.delete)

        # recv'd messages from the subscriber
        self.recv_queue = Queue()

        sub = Subscriber(from_name=xq, callback=lambda m,h: self.recv_queue.put((m, h)))
        sub.prepare_listener()

        # publish 10 messages - we're not bound yet, so they'll just dissapear
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # no messages yet
        self.assertFalse(sub.get_one_msg(timeout=0))

        # now, we'll bind the xq
        xq.bind('routed.3')

        # even tho we are consuming, there are no messages - the previously published ones all dissapeared
        self.assertFalse(sub.get_one_msg(timeout=0))

        # publish those messages again
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # NOW we have messages!
        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            m,h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "3,%s" % str(x))

        # we've cleared it all
        self.assertFalse(sub.get_one_msg(timeout=0))

        # bind a wildcard and publish on both
        xq.bind('routed.*')

        for x in xrange(10):
            time.sleep(0.3)
            pub3.publish("3,%s" % str(x))
            time.sleep(0.3)
            pub5.publish("5,%s" % str(x))

        # should get all 20, interleaved
        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            m, h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "3,%s" % str(x))

            self.assertTrue(sub.get_one_msg(timeout=0))
            m, h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "5,%s" % str(x))

        # add 5 binding, remove all other bindings
        xq.bind('routed.5')
        xq.unbind('routed.3')
        xq.unbind('routed.*')

        # try publishing to 3, shouldn't arrive anymore
        pub3.publish("3")

        self.assertFalse(sub.get_one_msg(timeout=0))

        # let's turn off the consumer and let things build up a bit
        sub._chan.stop_consume()

        for x in xrange(10):
            pub5.publish("5,%s" % str(x))

        # 10 messages in the queue, no consumers
        self.assertTupleEqual((10, 0), sub._chan.get_stats())

        # drain queue
        sub._chan.start_consume()
        time.sleep(1)       # yield to allow delivery

        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            self.recv_queue.get(timeout=0)

        sub.close()
Exemplo n.º 37
0
class TransformBenchTesting(TransformDataProcess):
    """
    Easiest way to run:
    from pyon.util.containers import DotDict
    tbt=cc.proc_manager._create_service_instance('55', 'tbt', 'pyon.ion.transform', 'TransformBenchTesting', DotDict({'process':{'name':'tbt', 'transform_id':'55'}}))
    tbt.init()
    tbt.start()
    """
    transform_number = 0
    message_length = 0
    def __init__(self):
        super(TransformBenchTesting,self).__init__()
        self.count = 0
        TransformBenchTesting.transform_number += 1

        
    def perf(self):

        with open('/tmp/pyon_performance.dat','a') as f:
            then = time.time()
            ocount = self.count
            while True:
                gevent.sleep(2.)
                now = time.time()
                count = self.count
                delta_t = now - then
                delta_c = count - ocount

                f.write('%s|%s\t%s\t%s\t%3.3f\n' % (get_sys_name(),time.strftime("%H:%M:%S", time.gmtime()),TransformBenchTesting.message_length,TransformBenchTesting.transform_number, float(delta_c) / delta_t))
                then = now
                ocount = count
                f.flush()
            
        
        

    @staticmethod
    def launch_benchmark(transform_number=1, primer=1,message_length=4):
        import gevent
        from gevent.greenlet import Greenlet
        from pyon.util.containers import DotDict
        from pyon.net.transport import NameTrio
        from pyon.net.endpoint import Publisher
        import uuid
        num = transform_number
        msg_len = message_length
        transforms = list()
        pids = 1
        TransformBenchTesting.message_length = message_length
        cc = Container.instance
        pub = Publisher(to_name=NameTrio(get_sys_name(),str(uuid.uuid4())[0:6]))
        for i in xrange(num):
            tbt=cc.proc_manager._create_service_instance(str(pids), 'tbt', 'prototype.transforms.linear', 'TransformInPlace', DotDict({'process':{'name':'tbt%d' % pids, 'transform_id':pids}}))
            tbt.init()
            tbt.start()
            gevent.sleep(0.2)
            for i in xrange(primer):
                pub.publish(list(xrange(msg_len)))
            g = Greenlet(tbt.perf)
            g.start()
            transforms.append(tbt)
            pids += 1

    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(), 'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(), str(uuid.uuid4())[0:6]))

    def publish(self, msg):
        self._bt_pub.publish(msg)
        self.count+=1

    def _stop_listener(self):
        self._bt_sub.close()
        self._sub_gl.join(timeout=2)
        self._sub_gl.kill()

    def on_stop(self):
        TransformDataProcess.on_stop(self)
        self._stop_listener()

    def on_quit(self):
        TransformDataProcess.on_quit(self)
        self._stop_listener()
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = USER_VISUALIZATION_QUEUE

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name = user_queue_name,
            name = "user visualization queue"
        )

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

#        for x in range(msg_count):
#            mo = subscriber.get_one_msg(timeout=1)
#            print mo.body
#            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
           # print msgs[x].body



        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)


        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data



        #Should see more messages in the queue
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)
Exemplo n.º 39
0
 def __init__(self, process=None, routing_call=None, **kwargs):
     assert process
     self._process = process
     self._routing_call = routing_call
     Subscriber.__init__(self, **kwargs)
    def test_visualization_queue(self):

        #The list of data product streams to monitor
        data_product_stream_ids = list()

        #Create the input data product
        ctd_stream_id, ctd_parsed_data_product_id = self.create_ctd_input_stream_and_data_product()
        data_product_stream_ids.append(ctd_stream_id)

        user_queue_name = USER_VISUALIZATION_QUEUE

        xq = self.container.ex_manager.create_xn_queue(user_queue_name)

        salinity_subscription_id = self.pubsubclient.create_subscription(
            stream_ids=data_product_stream_ids,
            exchange_name = user_queue_name,
            name = "user visualization queue"
        )

        subscriber = Subscriber(from_name=xq)
        subscriber.initialize()

        # after the queue has been created it is safe to activate the subscription
        self.pubsubclient.activate_subscription(subscription_id=salinity_subscription_id)

        #Start the output stream listener to monitor and collect messages
        #results = self.start_output_stream_and_listen(None, data_product_stream_ids)

        #Not sure why this is needed - but it is
        #subscriber._chan.stop_consume()

        ctd_sim_pid = self.start_simple_input_stream_process(ctd_stream_id)
        gevent.sleep(10.0)  # Send some messages - don't care how many

        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 1: %s ' % msg_count)

        #Validate the data from each of the messages along the way
        #self.validate_messages(results)

#        for x in range(msg_count):
#            mo = subscriber.get_one_msg(timeout=1)
#            print mo.body
#            mo.ack()

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])
           # print msgs[x].body



        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 2: %s ' % msg_count)


        #Trying to continue to receive messages in the queue
        gevent.sleep(5.0)  # Send some messages - don't care how many


        #Turning off after everything - since it is more representative of an always on stream of data!
        self.process_dispatcher.cancel_process(ctd_sim_pid) # kill the ctd simulator process - that is enough data



        #Should see more messages in the queue
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 3: %s ' % msg_count)

        msgs = subscriber.get_all_msgs(timeout=2)
        for x in range(len(msgs)):
            msgs[x].ack()
            self.validate_messages(msgs[x])

        #Should be zero after pulling all of the messages.
        msg_count,_ = xq.get_stats()
        log.info('Messages in user queue 4: %s ' % msg_count)

        subscriber.close()
        self.container.ex_manager.delete_xn(xq)
Exemplo n.º 41
0
 def __init__(self, process=None, **kwargs):
     self._process = process
     Subscriber.__init__(self, **kwargs)
    def __init__(self, queue_name, callback, **kwargs):
        self.callback = callback

        Subscriber.__init__(self, from_name=queue_name, callback=callback,
            **kwargs)