Exemple #1
0
 def launch_benchmark(transform_number=1, primer=1, message_length=4):
     import gevent
     from gevent.greenlet import Greenlet
     from pyon.util.containers import DotDict
     from pyon.net.transport import NameTrio
     from pyon.net.endpoint import Publisher
     import uuid
     num = transform_number
     msg_len = message_length
     transforms = list()
     pids = 1
     TransformBenchTesting.message_length = message_length
     cc = Container.instance
     pub = Publisher(to_name=NameTrio(get_sys_name(),
                                      str(uuid.uuid4())[0:6]))
     for i in xrange(num):
         tbt = cc.proc_manager._create_service_instance(
             str(pids), 'tbt', 'prototype.transforms.linear',
             'TransformInPlace',
             DotDict({
                 'process': {
                     'name': 'tbt%d' % pids,
                     'transform_id': pids
                 }
             }))
         tbt.init()
         tbt.start()
         gevent.sleep(0.2)
         for i in xrange(primer):
             pub.publish(list(xrange(msg_len)))
         g = Greenlet(tbt.perf)
         g.start()
         transforms.append(tbt)
         pids += 1
    def test_sub(self):

        #start interaction observer
        io = InteractionObserver()
        io.start()

        #publish an event
        ev_pub = EventPublisher(event_type="ResourceEvent")
        ev_pub.publish_event(origin="specific", description="event")


        # publish a message
        msg_pub = Publisher()
        msg_pub.publish(to_name='anyone', msg="msg")

        # give 2 seconds for the messages to arrive
        time.sleep(2)

        #verify that two messages (an event and a message) are seen
        self.assertEquals(len(io.msg_log), 2)

        #iterate through the messages observed
        for item in io.msg_log:
            # if event
            if item[2]:
                #verify that the origin is what we sent
                self.assertEquals(item[1]['origin'], 'specific')
        dump = io._get_data(io.msg_log,{})
        sump = dump
Exemple #3
0
    def test_xp_durable_send(self):
        xp = self.container.ex_manager.create_xp('an_xp')
        #self.addCleanup(xp.delete)

        xq = self.container.ex_manager.create_xn_queue('no_matter', xp)
        self.addCleanup(xq.delete)
        xq.bind('one')

        pub = Publisher(to_name=xp.create_route('one'))
        pub.publish('test')
        pub.close()

        url = self.container.ex_manager._get_management_url(
            "queues", "%2f", xq.queue, "get")
        res = self.container.ex_manager._make_management_call(url,
                                                              use_ems=False,
                                                              method='post',
                                                              data=json.dumps({
                                                                  'count':
                                                                  1,
                                                                  'requeue':
                                                                  True,
                                                                  'encoding':
                                                                  'auto'
                                                              }))

        self.assertEquals(len(res), 1)
        self.assertIn('properties', res[0])
        self.assertIn('delivery_mode', res[0]['properties'])
        self.assertEquals(2, res[0]['properties']['delivery_mode'])
Exemple #4
0
    def test_last_update_cache(self):
        handle = self.start_worker()
        queue = Queue()
        o_process = handle.process
        def new_process(msg):
            o_process(msg)
            queue.put(True)
        handle.process = new_process



        definition = SBE37_CDM_stream_definition()
        publisher = Publisher()

        stream_def_id = self.pubsub_cli.create_stream_definition(container=definition)
        stream_id = self.pubsub_cli.create_stream(stream_definition_id=stream_def_id)

        time = float(0.0)

        for granule in self.make_points(definition=definition, stream_id=stream_id, N=10):

            publisher.publish(granule, to_name=(self.XP, stream_id+'.data'))
            # Determinism sucks
            try:
                queue.get(timeout=5)
            except Empty:
                self.assertTrue(False, 'Process never received the message.')

            doc = self.db.read(stream_id)
            ntime = doc.variables['time'].value
            self.assertTrue(ntime >= time, 'The documents did not sequentially get updated correctly.')
            time = ntime
Exemple #5
0
    def test_pub_speed(self):
        pub = Publisher(node=self.container.node, name="i_no_exist")

        print >> sys.stderr, ""

        self.counter = 0
        self.alive = True

        def sendem():
            while self.alive:
                self.counter += 1
                pub.publish('meh')

        start_time = time.time()

        sendgl = spawn(sendem)
        time.sleep(5)
        end_time = time.time()

        self.alive = False
        sendgl.join(timeout=2)
        sendgl.kill()

        diff = end_time - start_time
        mps = float(self.counter) / diff

        print >> sys.stderr, "Published messages per second:", mps, "(", self.counter, "messages in", diff, "seconds)"
Exemple #6
0
 def __init__(self, process=None, wait_name=None):
     self.process = process
     if not wait_name.startswith("asyncresult_"):
         raise BadRequest("Not a valid wait_name")
     self.wait_name = wait_name
     if self.process:
         self.pub = ProcessPublisher(process=self.process,
                                     to_name=wait_name)
     else:
         self.pub = Publisher(to_name=wait_name)
Exemple #7
0
    def start(self):
        from pyon.net.endpoint import Publisher
        from pyon.util. async import spawn
        self.heartbeat_quit = Event()
        self.heartbeat_interval = float(
            self.heartbeat_cfg.get("publish_interval", 60))
        self.heartbeat_topic = self.heartbeat_cfg.get("topic", "heartbeat")
        self.heartbeat_pub = Publisher(to_name=self.heartbeat_topic)

        # Directly spawn a greenlet - we don't want this to be a supervised IonProcessThread
        self.heartbeat_gl = spawn(self.heartbeat_loop)
        self.started = True
        log.info("Started container heartbeat (interval=%s, topic=%s)",
                 self.heartbeat_interval, self.heartbeat_topic)
Exemple #8
0
    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(),
                                                     'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(),
                                                  str(uuid.uuid4())[0:6]))
Exemple #9
0
    def launch_benchmark(transform_number=1, primer=1, message_length=4):
        import gevent
        from gevent.greenlet import Greenlet
        from pyon.util.containers import DotDict
        from pyon.net.transport import NameTrio
        from pyon.net.endpoint import Publisher
        import numpy
        from pyon.ion.granule.record_dictionary import RecordDictionaryTool
        from pyon.ion.granule.taxonomy import TaxyTool
        from pyon.ion.granule.granule import build_granule

        tt = TaxyTool()
        tt.add_taxonomy_set('a')

        import uuid
        num = transform_number
        msg_len = message_length
        transforms = list()
        pids = 1
        TransformBenchTesting.message_length = message_length
        cc = Container.instance
        pub = Publisher(to_name=NameTrio(get_sys_name(),
                                         str(uuid.uuid4())[0:6]))
        for i in xrange(num):
            tbt = cc.proc_manager._create_service_instance(
                str(pids), 'tbt', 'prototype.transforms.linear',
                'TransformInPlaceNewGranule',
                DotDict({
                    'process': {
                        'name': 'tbt%d' % pids,
                        'transform_id': pids
                    }
                }))
            tbt.init()
            tbt.start()
            gevent.sleep(0.2)
            for i in xrange(primer):
                rd = RecordDictionaryTool(tt, message_length)
                rd['a'] = numpy.arange(message_length)
                gran = build_granule(data_producer_id='dp_id',
                                     taxonomy=tt,
                                     record_dictionary=rd)
                pub.publish(gran)

            g = Greenlet(tbt.perf)
            g.start()
            transforms.append(tbt)
            pids += 1
Exemple #10
0
    def __init__(self, CFG, factory, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._factory = factory
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')

        self._factory.set_state_change_callback(self._state_change_callback,
                                                None)
        self._first_beat()
    def test_async_result(self):
        request_id = "request_foo"
        waiter = AsyncResultWaiter()
        self.assertFalse(waiter.async_res.ready())
        token = waiter.activate()
        self.assertFalse(waiter.async_res.ready())
        log.info("Wait token: %s", token)

        pub = Publisher(to_name=token)
        async_msg = AsyncResultMsg(request_id=request_id)
        pub.publish(async_msg)

        res = waiter.await(timeout=1, request_id=request_id)
        self.assertTrue(waiter.async_res.ready())
        self.assertIsInstance(res, AsyncResultMsg)
        self.assertEqual(res.__dict__, async_msg.__dict__)
Exemple #12
0
    def __init__(self, CFG, factory, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._factory = factory
        self._next_beat(datetime.datetime.now())
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('process_dispatcher',
                                        'processdispatcher')

        self._factory.set_state_change_callback(self._state_change_callback,
                                                None)
    def test_sub(self):

        # publish 2 messages
        pub = Publisher()
        pub.publish(to_name='anyone', msg="hello1")
        pub.publish(to_name='anyone', msg="hello2")

        dsm = self.container.datastore_manager
        ds = dsm.get_datastore("conversations")

        # give at least 2 seconds for the persister to save in the repository
        # test may fail if it does not wait long enough for the persister
        no_of_conv = 0
        retried = 0

        while (no_of_conv != 2 and retried < 5):
            time.sleep(2)
            # assert that the 2 messages have been persisted
            no_of_conv = len(ds.list_objects())
            retried = retried + 1

        self.assertEquals(no_of_conv, 2)
Exemple #14
0
 def setUp(self):
     self._node = Mock(spec=NodeB)
     self._pub = Publisher(node=self._node, to_name="testpub")
     self._ch = Mock(spec=SendChannel)
     self._node.channel.return_value = self._ch
     self._node.interceptors = {}
Exemple #15
0
#!/usr/bin/env python

from pyon.net.endpoint import Publisher
from pyon.net.messaging import make_node
import gevent

node, iowat = make_node()

pub = Publisher(node=node, name="hassan")
    def test_replay_integration(self):
        '''
        test_replay_integration
        '''
        import numpy as np
        # Keep the import it's used in the vector comparison below even though pycharm says its unused.

        cc = self.container
        XP = self.XP
        assertions = self.assertTrue

        ### Every thing below here can be run as a script:
        log.debug('Got it')

        pubsub_management_service = PubsubManagementServiceClient(node=cc.node)
        ingestion_management_service = IngestionManagementServiceClient(
            node=cc.node)
        dataset_management_service = DatasetManagementServiceClient(
            node=cc.node)
        data_retriever_service = DataRetrieverServiceClient(node=cc.node)

        datastore_name = 'dm_test_replay_integration'

        producer = Publisher(name=(XP, 'stream producer'))

        ingestion_configuration_id = ingestion_management_service.create_ingestion_configuration(
            exchange_point_id=XP,
            couch_storage=CouchStorage(datastore_name=datastore_name,
                                       datastore_profile='SCIDATA'),
            hdf_storage=HdfStorage(),
            number_of_workers=1)

        ingestion_management_service.activate_ingestion_configuration(
            ingestion_configuration_id=ingestion_configuration_id)

        definition = SBE37_CDM_stream_definition()
        data_stream_id = definition.data_stream_id
        encoding_id = definition.identifiables[data_stream_id].encoding_id
        element_count_id = definition.identifiables[
            data_stream_id].element_count_id

        stream_def_id = pubsub_management_service.create_stream_definition(
            container=definition)
        stream_id = pubsub_management_service.create_stream(
            stream_definition_id=stream_def_id)

        dataset_id = dataset_management_service.create_dataset(
            stream_id=stream_id,
            datastore_name=datastore_name,
            view_name='datasets/dataset_by_id')
        ingestion_management_service.create_dataset_configuration(
            dataset_id=dataset_id,
            archive_data=True,
            archive_metadata=True,
            ingestion_configuration_id=ingestion_configuration_id)
        definition.stream_resource_id = stream_id

        packet = _create_packet(definition)
        input_file = FileSystem.mktemp()
        input_file.write(packet.identifiables[data_stream_id].values)
        input_file_path = input_file.name
        input_file.close()

        fields = [
            'conductivity', 'height', 'latitude', 'longitude', 'pressure',
            'temperature', 'time'
        ]

        input_vectors = acquire_data([input_file_path], fields, 2).next()

        producer.publish(msg=packet, to_name=(XP, '%s.data' % stream_id))

        replay_id, replay_stream_id = data_retriever_service.define_replay(
            dataset_id)
        ar = gevent.event.AsyncResult()

        def sub_listen(msg, headers):

            assertions(isinstance(msg, StreamGranuleContainer),
                       'replayed message is not a granule.')
            hdf_string = msg.identifiables[data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            assertions(sha1 == msg.identifiables[encoding_id].sha1,
                       'Checksum failed.')
            assertions(
                msg.identifiables[element_count_id].value == 1,
                'record replay count is incorrect %d.' %
                msg.identifiables[element_count_id].value)
            output_file = FileSystem.mktemp()
            output_file.write(msg.identifiables[data_stream_id].values)
            output_file_path = output_file.name
            output_file.close()
            output_vectors = acquire_data([output_file_path], fields, 2).next()
            for field in fields:
                comparison = (input_vectors[field]['values'] ==
                              output_vectors[field]['values'])
                assertions(
                    comparison.all(), 'vector mismatch: %s vs %s' %
                    (input_vectors[field]['values'],
                     output_vectors[field]['values']))
            FileSystem.unlink(output_file_path)
            ar.set(True)

        subscriber = Subscriber(name=(XP, 'replay listener'),
                                callback=sub_listen)

        g = gevent.Greenlet(subscriber.listen,
                            binding='%s.data' % replay_stream_id)
        g.start()

        data_retriever_service.start_replay(replay_id)

        ar.get(timeout=10)

        FileSystem.unlink(input_file_path)
Exemple #17
0
    def test_consume_one_message_at_a_time(self):
        # see also pyon.net.test.test_channel:TestChannelInt.test_consume_one_message_at_a_time

        pub3 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.3'))
        pub5 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.5'))

        #
        # SETUP COMPLETE, BEGIN TESTING OF EXCHANGE OBJECTS
        #

        xq = self.container.ex_manager.create_xn_queue('random_queue')
        self.addCleanup(xq.delete)

        # recv'd messages from the subscriber
        self.recv_queue = Queue()

        sub = Subscriber(from_name=xq, callback=lambda m,h: self.recv_queue.put((m, h)))
        sub.prepare_listener()

        # publish 10 messages - we're not bound yet, so they'll just dissapear
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # no messages yet
        self.assertFalse(sub.get_one_msg(timeout=0))

        # now, we'll bind the xq
        xq.bind('routed.3')

        # even tho we are consuming, there are no messages - the previously published ones all dissapeared
        self.assertFalse(sub.get_one_msg(timeout=0))

        # publish those messages again
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # NOW we have messages!
        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            m,h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "3,%s" % str(x))

        # we've cleared it all
        self.assertFalse(sub.get_one_msg(timeout=0))

        # bind a wildcard and publish on both
        xq.bind('routed.*')

        for x in xrange(10):
            time.sleep(0.3)
            pub3.publish("3,%s" % str(x))
            time.sleep(0.3)
            pub5.publish("5,%s" % str(x))

        # should get all 20, interleaved
        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            m, h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "3,%s" % str(x))

            self.assertTrue(sub.get_one_msg(timeout=0))
            m, h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "5,%s" % str(x))

        # add 5 binding, remove all other bindings
        xq.bind('routed.5')
        xq.unbind('routed.3')
        xq.unbind('routed.*')

        # try publishing to 3, shouldn't arrive anymore
        pub3.publish("3")

        self.assertFalse(sub.get_one_msg(timeout=0))

        # let's turn off the consumer and let things build up a bit
        sub._chan.stop_consume()

        for x in xrange(10):
            pub5.publish("5,%s" % str(x))

        # 10 messages in the queue, no consumers
        self.assertTupleEqual((10, 0), sub._chan.get_stats())

        # drain queue
        sub._chan.start_consume()
        time.sleep(1)       # yield to allow delivery

        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            self.recv_queue.get(timeout=0)

        sub.close()