Exemplo n.º 1
0
    def test_last_update_cache(self):
        handle = self.start_worker()
        queue = Queue()
        o_process = handle.process
        def new_process(msg):
            o_process(msg)
            queue.put(True)
        handle.process = new_process



        definition = SBE37_CDM_stream_definition()
        publisher = Publisher()

        stream_def_id = self.pubsub_cli.create_stream_definition(container=definition)
        stream_id = self.pubsub_cli.create_stream(stream_definition_id=stream_def_id)

        time = float(0.0)

        for granule in self.make_points(definition=definition, stream_id=stream_id, N=10):

            publisher.publish(granule, to_name=(self.XP, stream_id+'.data'))
            # Determinism sucks
            try:
                queue.get(timeout=5)
            except Empty:
                self.assertTrue(False, 'Process never received the message.')

            doc = self.db.read(stream_id)
            ntime = doc.variables['time'].value
            self.assertTrue(ntime >= time, 'The documents did not sequentially get updated correctly.')
            time = ntime
Exemplo n.º 2
0
    def __init__(self, event_type=None, xp=None, process=None, **kwargs):
        """
        Constructs a publisher of events for a specific type.

        @param  event_type  The name of the event type object
        @param  xp          Exchange (AMQP) name, can be none, will use events default.
        """

        self.event_type = event_type
        self.process = process
        self._events_xp = CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP)

        if bootstrap.container_instance and getattr(bootstrap.container_instance, 'event_repository', None):
            self.event_repo = bootstrap.container_instance.event_repository
        else:
            self.event_repo = None

        # generate an exchange name to publish events to
        container = (hasattr(self, '_process') and hasattr(self._process, 'container') and self._process.container) or BaseEndpoint._get_container_instance()
        if container and container.has_capability(container.CCAP.EXCHANGE_MANAGER):   # might be too early in chain
            xp = xp or container.create_xp(self._events_xp)
            to_name = xp
        else:
            xp = xp or self.get_events_exchange_point()
            to_name = (xp, None)

        Publisher.__init__(self, to_name=to_name, **kwargs)
Exemplo n.º 3
0
 def launch_benchmark(transform_number=1, primer=1, message_length=4):
     import gevent
     from gevent.greenlet import Greenlet
     from pyon.util.containers import DotDict
     from pyon.net.transport import NameTrio
     from pyon.net.endpoint import Publisher
     import uuid
     num = transform_number
     msg_len = message_length
     transforms = list()
     pids = 1
     TransformBenchTesting.message_length = message_length
     cc = Container.instance
     pub = Publisher(to_name=NameTrio(get_sys_name(),
                                      str(uuid.uuid4())[0:6]))
     for i in xrange(num):
         tbt = cc.proc_manager._create_service_instance(
             str(pids), 'tbt', 'prototype.transforms.linear',
             'TransformInPlace',
             DotDict({
                 'process': {
                     'name': 'tbt%d' % pids,
                     'transform_id': pids
                 }
             }))
         tbt.init()
         tbt.start()
         gevent.sleep(0.2)
         for i in xrange(primer):
             pub.publish(list(xrange(msg_len)))
         g = Greenlet(tbt.perf)
         g.start()
         transforms.append(tbt)
         pids += 1
Exemplo n.º 4
0
    def test_xp_durable_send(self):
        xp = self.container.ex_manager.create_xp('an_xp')
        #self.addCleanup(xp.delete)

        xq = self.container.ex_manager.create_xn_queue('no_matter', xp)
        self.addCleanup(xq.delete)
        xq.bind('one')

        pub = Publisher(to_name=xp.create_route('one'))
        pub.publish('test')
        pub.close()


        try:
            url = self.container.ex_manager._get_management_url("queues", "%2f", xq.queue, "get")
            res = self.container.ex_manager._make_management_call(url,
                                                                  use_ems=False,
                                                                  method='post',
                                                                  data=json.dumps({'count':1, 'requeue':True,'encoding':'auto'}))

            self.assertEquals(len(res), 1)
            self.assertIn('properties', res[0])
            self.assertIn('delivery_mode', res[0]['properties'])
            self.assertEquals(2, res[0]['properties']['delivery_mode'])

        except Exception, e:
            # Rabbit 3.x does not support this command anymore apparently.
            self.assertIn('Method Not Allowed', e.message)
Exemplo n.º 5
0
    def __init__(self, xp=None, **kwargs):

        # generate a name
        xp = xp or get_events_exchange_point()
        name = (xp, None)

        Publisher.__init__(self, name=name, **kwargs)
Exemplo n.º 6
0
    def __init__(self, process, stream, **kwargs):
        """
        Creates a StreamPublisher which publishes to the specified stream
        and is attached to the specified process.
        @param process   The IonProcess to attach to.
        @param stream    Name of the stream or StreamRoute object
        """
        super(StreamPublisher, self).__init__()
        if not isinstance(process, BaseService):
            raise BadRequest("No valid process provided.")
        if isinstance(stream, basestring):
            self.stream_route = StreamRoute(routing_key=stream)
        elif isinstance(stream, StreamRoute):
            self.stream_route = stream
        else:
            raise BadRequest("No valid stream information provided.")

        self.container = process.container
        self.xp_name = get_streaming_xp(
            self.stream_route.exchange_point)  # Fully qualified

        self.xp = self.container.ex_manager.create_xp(
            self.stream_route.exchange_point or DEFAULT_DATA_XP)
        self.xp_route = self.xp.create_route(self.stream_route.routing_key)

        Publisher.__init__(self, to_name=self.xp_route, **kwargs)
    def test_sub(self):

        #start interaction observer
        io = InteractionObserver()
        io.start()

        #publish an event
        ev_pub = EventPublisher(event_type="ResourceEvent")
        ev_pub.publish_event(origin="specific", description="event")


        # publish a message
        msg_pub = Publisher()
        msg_pub.publish(to_name='anyone', msg="msg")

        # give 2 seconds for the messages to arrive
        time.sleep(2)

        #verify that two messages (an event and a message) are seen
        self.assertEquals(len(io.msg_log), 2)

        #iterate through the messages observed
        for item in io.msg_log:
            # if event
            if item[2]:
                #verify that the origin is what we sent
                self.assertEquals(item[1]['origin'], 'specific')
        dump = io._get_data(io.msg_log,{})
        sump = dump
Exemplo n.º 8
0
    def test_last_update_cache(self):
        handle = self.start_worker()
        queue = Queue()
        o_process = handle.process
        def new_process(msg):
            o_process(msg)
            queue.put(True)
        handle.process = new_process



        definition = SBE37_CDM_stream_definition()
        publisher = Publisher()

        stream_def_id = self.pubsub_cli.create_stream_definition(container=definition)
        stream_id = self.pubsub_cli.create_stream(stream_definition_id=stream_def_id)

        time = float(0.0)

        for granule in self.make_points(definition=definition, stream_id=stream_id, N=10):

            publisher.publish(granule, to_name=(self.XP, stream_id+'.data'))
            # Determinism sucks
            try:
                queue.get(timeout=5)
            except Empty:
                self.assertTrue(False, 'Process never received the message.')

            doc = self.db.read(stream_id)
            ntime = doc.variables['time'].value
            self.assertTrue(ntime >= time, 'The documents did not sequentially get updated correctly.')
            time = ntime
    def test_sub(self):

        #start interaction observer
        io = InteractionObserver()
        io.start()

        #publish an event
        ev_pub = EventPublisher(event_type="ResourceEvent")
        ev_pub.publish_event(origin="specific", description="event")


        # publish a message
        msg_pub = Publisher()
        msg_pub.publish(to_name='anyone', msg="msg")

        # give 2 seconds for the messages to arrive
        time.sleep(2)

        #verify that two messages (an event and a message) are seen
        self.assertEquals(len(io.msg_log), 2)

        #iterate through the messages observed
        for item in io.msg_log:
            # if event
            if item[2]:
                #verify that the origin is what we sent
                self.assertEquals(item[1]['origin'], 'specific')
        dump = io._get_data(io.msg_log,{})
        sump = dump
Exemplo n.º 10
0
 def launch_benchmark(transform_number=1, primer=1,message_length=4):
     import gevent
     from gevent.greenlet import Greenlet
     from pyon.util.containers import DotDict
     from pyon.net.transport import NameTrio
     from pyon.net.endpoint import Publisher
     import uuid
     num = transform_number
     msg_len = message_length
     transforms = list()
     pids = 1
     TransformBenchTesting.message_length = message_length
     cc = Container.instance
     pub = Publisher(to_name=NameTrio(get_sys_name(),str(uuid.uuid4())[0:6]))
     for i in xrange(num):
         tbt=cc.proc_manager._create_service_instance(str(pids), 'tbt', 'prototype.transforms.linear', 'TransformInPlace', DotDict({'process':{'name':'tbt%d' % pids, 'transform_id':pids}}))
         tbt.init()
         tbt.start()
         gevent.sleep(0.2)
         for i in xrange(primer):
             pub.publish(list(xrange(msg_len)))
         g = Greenlet(tbt.perf)
         g.start()
         transforms.append(tbt)
         pids += 1
Exemplo n.º 11
0
    def __init__(self, event_type=None, xp=None, process=None, **kwargs):
        """
        Constructs a publisher of events for a specific type.

        @param  event_type  The name of the event type object
        @param  xp          Exchange (AMQP) name, can be none, will use events default.
        """

        self.event_type = event_type
        self.process = process
        self._events_xp = CFG.get_safe("exchange.core.events",
                                       DEFAULT_EVENTS_XP)

        if bootstrap.container_instance and getattr(
                bootstrap.container_instance, 'event_repository', None):
            self.event_repo = bootstrap.container_instance.event_repository
        else:
            self.event_repo = None

        # generate an exchange name to publish events to
        container = (hasattr(self, '_process') and hasattr(
            self._process, 'container') and self._process.container
                     ) or BaseEndpoint._get_container_instance()
        if container and container.has_capability(
                container.CCAP.EXCHANGE_MANAGER
        ):  # might be too early in chain
            xp = xp or container.create_xp(self._events_xp)
            to_name = xp
        else:
            xp = xp or self.get_events_exchange_point()
            to_name = (xp, None)

        Publisher.__init__(self, to_name=to_name, **kwargs)
Exemplo n.º 12
0
 def __init__(self, process=None, wait_name=None):
     self.process = process
     if not wait_name.startswith("asyncresult_"):
         raise BadRequest("Not a valid wait_name")
     self.wait_name = wait_name
     if self.process:
         self.pub = ProcessPublisher(process=self.process,
                                     to_name=wait_name)
     else:
         self.pub = Publisher(to_name=wait_name)
Exemplo n.º 13
0
    def start(self):
        from pyon.net.endpoint import Publisher
        from pyon.util. async import spawn
        self.heartbeat_quit = Event()
        self.heartbeat_interval = float(
            self.heartbeat_cfg.get("publish_interval", 60))
        self.heartbeat_topic = self.heartbeat_cfg.get("topic", "heartbeat")
        self.heartbeat_pub = Publisher(to_name=self.heartbeat_topic)

        # Directly spawn a greenlet - we don't want this to be a supervised IonProcessThread
        self.heartbeat_gl = spawn(self.heartbeat_loop)
        self.started = True
        log.info("Started container heartbeat (interval=%s, topic=%s)",
                 self.heartbeat_interval, self.heartbeat_topic)
Exemplo n.º 14
0
    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(),
                                                     'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(),
                                                  str(uuid.uuid4())[0:6]))
Exemplo n.º 15
0
    def test_sub(self):
        ar = event.AsyncResult()
        def cb(*args, **kwargs):
            ar.set(args)

        sub = ConvSubscriber(callback=cb)
        pub = Publisher()
        self._listen(sub)
        pub.publish(to_name='anyone', msg="hello")


        evmsg, evheaders = ar.get(timeout=5)
        self.assertEquals(evmsg, "hello")
        self.assertAlmostEquals(int(evheaders['ts']), int(get_ion_ts()), delta=5000)
Exemplo n.º 16
0
class AsyncResultPublisher(object):
    """
    Class that helps sending async results.
    """
    def __init__(self, process=None, wait_name=None):
        self.process = process
        if not wait_name.startswith("asyncresult_"):
            raise BadRequest("Not a valid wait_name")
        self.wait_name = wait_name
        if self.process:
            self.pub = ProcessPublisher(process=self.process,
                                        to_name=wait_name)
        else:
            self.pub = Publisher(to_name=wait_name)

    def publish_result(self, request_id, result):
        async_res = AsyncResultMsg(result=result,
                                   request_id=request_id,
                                   ts=get_ion_ts())
        self.pub.publish(async_res)
        self.pub.close()

    def publish_error(self, request_id, error, error_code):
        async_res = AsyncResultMsg(result=error,
                                   request_id=request_id,
                                   ts=get_ion_ts(),
                                   status=error_code)
        self.pub.publish(async_res)
        self.pub.close()
Exemplo n.º 17
0
    def launch_benchmark(transform_number=1, primer=1, message_length=4):
        import gevent
        from gevent.greenlet import Greenlet
        from pyon.util.containers import DotDict
        from pyon.net.transport import NameTrio
        from pyon.net.endpoint import Publisher
        import numpy
        from pyon.ion.granule.record_dictionary import RecordDictionaryTool
        from pyon.ion.granule.taxonomy import TaxyTool
        from pyon.ion.granule.granule import build_granule

        tt = TaxyTool()
        tt.add_taxonomy_set('a')

        import uuid
        num = transform_number
        msg_len = message_length
        transforms = list()
        pids = 1
        TransformBenchTesting.message_length = message_length
        cc = Container.instance
        pub = Publisher(to_name=NameTrio(get_sys_name(),
                                         str(uuid.uuid4())[0:6]))
        for i in xrange(num):
            tbt = cc.proc_manager._create_service_instance(
                str(pids), 'tbt', 'prototype.transforms.linear',
                'TransformInPlaceNewGranule',
                DotDict({
                    'process': {
                        'name': 'tbt%d' % pids,
                        'transform_id': pids
                    }
                }))
            tbt.init()
            tbt.start()
            gevent.sleep(0.2)
            for i in xrange(primer):
                rd = RecordDictionaryTool(tt, message_length)
                rd['a'] = numpy.arange(message_length)
                gran = build_granule(data_producer_id='dp_id',
                                     taxonomy=tt,
                                     record_dictionary=rd)
                pub.publish(gran)

            g = Greenlet(tbt.perf)
            g.start()
            transforms.append(tbt)
            pids += 1
Exemplo n.º 18
0
    def test_async_result(self):
        request_id = "request_foo"
        waiter = AsyncResultWaiter()
        self.assertFalse(waiter.async_res.ready())
        token = waiter.activate()
        self.assertFalse(waiter.async_res.ready())
        log.info("Wait token: %s", token)

        pub = Publisher(to_name=token)
        async_msg = AsyncResultMsg(request_id=request_id)
        pub.publish(async_msg)

        res = waiter.await(timeout=1, request_id=request_id)
        self.assertTrue(waiter.async_res.ready())
        self.assertIsInstance(res, AsyncResultMsg)
        self.assertEqual(res.__dict__, async_msg.__dict__)
Exemplo n.º 19
0
    def test_pub_speed(self):
        pub = Publisher(node=self.container.node, name="i_no_exist")

        print >> sys.stderr, ""

        self.counter = 0
        self.alive = True

        def sendem():
            while self.alive:
                self.counter += 1
                pub.publish('meh')

        start_time = time.time()

        sendgl = spawn(sendem)
        time.sleep(5)
        end_time = time.time()

        self.alive = False
        sendgl.join(timeout=2)
        sendgl.kill()

        diff = end_time - start_time
        mps = float(self.counter) / diff

        print >> sys.stderr, "Published messages per second:", mps, "(", self.counter, "messages in", diff, "seconds)"
Exemplo n.º 20
0
    def __init__(self, CFG, factory, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._factory = factory
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')

        self._factory.set_state_change_callback(self._state_change_callback,
                                                None)
        self._first_beat()
Exemplo n.º 21
0
class ContainerHeartbeater(object):
    """ Utility class that implements the container heartbeat publishing mechanism """
    def __init__(self, container, cfg):
        self.container = container
        self.heartbeat_cfg = cfg
        self.started = False

    def start(self):
        from pyon.net.endpoint import Publisher
        from pyon.util. async import spawn
        self.heartbeat_quit = Event()
        self.heartbeat_interval = float(
            self.heartbeat_cfg.get("publish_interval", 60))
        self.heartbeat_topic = self.heartbeat_cfg.get("topic", "heartbeat")
        self.heartbeat_pub = Publisher(to_name=self.heartbeat_topic)

        # Directly spawn a greenlet - we don't want this to be a supervised IonProcessThread
        self.heartbeat_gl = spawn(self.heartbeat_loop)
        self.started = True
        log.info("Started container heartbeat (interval=%s, topic=%s)",
                 self.heartbeat_interval, self.heartbeat_topic)

    def stop(self):
        if self.started:
            self.heartbeat_quit.set()
            self.heartbeat_gl.join(timeout=1)
            self.started = False

    def heartbeat_loop(self):
        self.publish_heartbeat()
        while not self.heartbeat_quit.wait(timeout=self.heartbeat_interval):
            self.publish_heartbeat()

    def publish_heartbeat(self):
        try:
            hb_msg = self.get_heartbeat_message()
            headers = dict(expiration=60000)
            self.heartbeat_pub.publish(hb_msg, headers=headers)
        except Exception:
            log.exception("Error publishing heatbeat")

    def get_heartbeat_message(self):
        from interface.objects import ContainerHeartbeat
        hb_msg = ContainerHeartbeat(container_id=self.container.id,
                                    ts=get_ion_ts())
        return hb_msg
Exemplo n.º 22
0
    def __init__(self, xp=None, event_repo=None, **kwargs):
        """
        Constructs a publisher of events.

        @param  xp          Exchange (AMQP) name, can be none, will use events default.
        @param  event_repo  An optional repository for published events. If None, will not store
                            published events. Use the Container.event_repository for this
                            parameter if you have one.
        """

        # generate a name
        xp = xp or get_events_exchange_point()
        name = (xp, None)

        self.event_repo = event_repo

        Publisher.__init__(self, to_name=name, **kwargs)
Exemplo n.º 23
0
    def __init__(self, CFG, factory, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._factory = factory
        self._next_beat(datetime.datetime.now())
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('process_dispatcher',
                                        'processdispatcher')

        self._factory.set_state_change_callback(self._state_change_callback,
                                                None)
Exemplo n.º 24
0
class TestPublisher(PyonTestCase):
    def setUp(self):
        self._node = Mock(spec=NodeB)
        self._pub = Publisher(node=self._node, name="testpub")
        self._ch = Mock(spec=SendChannel)
        self._node.channel.return_value = self._ch

    def test_publish(self):
        self.assertEquals(self._node.channel.call_count, 0)

        self._pub.publish("pub")

        self._node.channel.assert_called_once_with(self._pub.channel_type)
        self.assertEquals(self._ch.send.call_count, 1)

        self._pub.publish("pub2")
        self._node.channel.assert_called_once_with(self._pub.channel_type)
        self.assertEquals(self._ch.send.call_count, 2)
Exemplo n.º 25
0
class HeartBeater(object):
    def __init__(self, CFG, factory, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._factory = factory
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')

        self._factory.set_state_change_callback(self._state_change_callback,
                                                None)
        self._first_beat()

    def _first_beat(self):
        self._beat_time = datetime.datetime.now()

    def _next_beat(self, now):
        self._beat_time = now + datetime.timedelta(seconds=self._interval)

    def _state_change_callback(self, user_arg):
        # on state change set the beat time to now
        self._beat_time = datetime.datetime.now()

    def poll(self):

        now = datetime.datetime.now()
        if now > self._beat_time:
            self._next_beat(now)
            self.beat()

    def beat(self):
        try:
            beat = make_beat_msg(self._factory, self._CFG)
            message = dict(beat=beat, resource_id=self._CFG.agent.resource_id)
            to_name = self._pd_name
            self._log.debug("Send heartbeat: %s to %s", message, self._pd_name)
            self._publisher.publish(message, to_name=to_name)
        except:
            self._log.exception("beat failed")
Exemplo n.º 26
0
class ContainerHeartbeater(object):
    """ Utility class that implements the container heartbeat publishing mechanism """
    def __init__(self, container, cfg):
        self.container = container
        self.heartbeat_cfg = cfg
        self.started = False

    def start(self):
        from pyon.net.endpoint import Publisher
        from pyon.util.async import spawn
        self.heartbeat_quit = Event()
        self.heartbeat_interval = float(self.heartbeat_cfg.get("publish_interval", 60))
        self.heartbeat_topic = self.heartbeat_cfg.get("topic", "heartbeat")
        self.heartbeat_pub = Publisher(to_name=self.heartbeat_topic)

        # Directly spawn a greenlet - we don't want this to be a supervised IonProcessThread
        self.heartbeat_gl = spawn(self.heartbeat_loop)
        self.started = True
        log.info("Started container heartbeat (interval=%s, topic=%s)", self.heartbeat_interval, self.heartbeat_topic)

    def stop(self):
        if self.started:
            self.heartbeat_quit.set()
            self.heartbeat_gl.join(timeout=1)
            self.started = False

    def heartbeat_loop(self):
        self.publish_heartbeat()
        while not self.heartbeat_quit.wait(timeout=self.heartbeat_interval):
            self.publish_heartbeat()

    def publish_heartbeat(self):
        try:
            hb_msg = self.get_heartbeat_message()
            headers = dict(expiration=60000)
            self.heartbeat_pub.publish(hb_msg, headers=headers)
        except Exception:
            log.exception("Error publishing heatbeat")

    def get_heartbeat_message(self):
        from interface.objects import ContainerHeartbeat
        hb_msg = ContainerHeartbeat(container_id=self.container.id, ts=get_ion_ts())
        return hb_msg
Exemplo n.º 27
0
Arquivo: event.py Projeto: daf/pyon
    def __init__(self, event_type=None, xp=None, **kwargs):
        """
        Constructs a publisher of events for a specific type.

        @param  event_type  The name of the event type object
        @param  xp          Exchange (AMQP) name, can be none, will use events default.
        """

        self.event_type = event_type

        if bootstrap.container_instance and getattr(bootstrap.container_instance, 'event_repository', None):
            self.event_repo = bootstrap.container_instance.event_repository
        else:
            self.event_repo = None

        # generate an exchange name to publish events to
        xp = xp or get_events_exchange_point()
        name = (xp, None)

        Publisher.__init__(self, to_name=name, **kwargs)
Exemplo n.º 28
0
    def __init__(self, event_type=None, xp=None, **kwargs):
        """
        Constructs a publisher of events for a specific type.

        @param  event_type  The name of the event type object
        @param  xp          Exchange (AMQP) name, can be none, will use events default.
        """

        self.event_type = event_type

        if bootstrap.container_instance and getattr(bootstrap.container_instance, 'event_repository', None):
            self.event_repo = bootstrap.container_instance.event_repository
        else:
            self.event_repo = None

        # generate an exchange name to publish events to
        xp = xp or get_events_exchange_point()
        name = (xp, None)

        Publisher.__init__(self, to_name=name, **kwargs)
Exemplo n.º 29
0
    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(), 'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(), str(uuid.uuid4())[0:6]))
Exemplo n.º 30
0
    def start(self):
        from pyon.net.endpoint import Publisher
        from pyon.util.async import spawn
        self.heartbeat_quit = Event()
        self.heartbeat_interval = float(self.heartbeat_cfg.get("publish_interval", 60))
        self.heartbeat_topic = self.heartbeat_cfg.get("topic", "heartbeat")
        self.heartbeat_pub = Publisher(to_name=self.heartbeat_topic)

        # Directly spawn a greenlet - we don't want this to be a supervised IonProcessThread
        self.heartbeat_gl = spawn(self.heartbeat_loop)
        self.started = True
        log.info("Started container heartbeat (interval=%s, topic=%s)", self.heartbeat_interval, self.heartbeat_topic)
class HeartBeater(object):
    def __init__(self, CFG, factory, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._factory = factory
        self._next_beat(datetime.datetime.now())
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')

        self._factory.set_state_change_callback(self._state_change_callback, None)

    def _next_beat(self, now):
        self._beat_time = now + datetime.timedelta(seconds=self._interval)

    def _state_change_callback(self, user_arg):
        # on state change set the beat time to now
        self._beat_time = datetime.datetime.now()

    def poll(self):

        now = datetime.datetime.now()
        if now > self._beat_time:
            self._next_beat(now)
            self.beat()

    def beat(self):
        try:
            beat = make_beat_msg(self._factory, self._CFG)
            message = dict(beat=beat, resource_id=self._CFG.agent.resource_id)
            to_name = self._pd_name
            self._log.debug("Send heartbeat: %s to %s", message, self._pd_name)
            self._publisher.publish(message, to_name=to_name)
        except:
            self._log.exception("beat failed")
Exemplo n.º 32
0
    def test_xp_durable_send(self):
        xp = self.container.ex_manager.create_xp('an_xp')
        #self.addCleanup(xp.delete)

        xq = self.container.ex_manager.create_xn_queue('no_matter', xp)
        self.addCleanup(xq.delete)
        xq.bind('one')

        pub = Publisher(to_name=xp.create_route('one'))
        pub.publish('test')
        pub.close()

        url = self.container.ex_manager._get_management_url(
            "queues", "%2f", xq.queue, "get")
        res = self.container.ex_manager._make_management_call(url,
                                                              use_ems=False,
                                                              method='post',
                                                              data=json.dumps({
                                                                  'count':
                                                                  1,
                                                                  'requeue':
                                                                  True,
                                                                  'encoding':
                                                                  'auto'
                                                              }))

        self.assertEquals(len(res), 1)
        self.assertIn('properties', res[0])
        self.assertIn('delivery_mode', res[0]['properties'])
        self.assertEquals(2, res[0]['properties']['delivery_mode'])
    def test_sub(self):

        # publish 2 messages
        pub = Publisher()
        pub.publish(to_name='anyone', msg="hello1")
        pub.publish(to_name='anyone', msg="hello2")

        dsm = self.container.datastore_manager
        ds = dsm.get_datastore("conversations")

        # give at least 2 seconds for the persister to save in the repository
        # test may fail if it does not wait long enough for the persister
        no_of_conv = 0
        retried = 0

        while (no_of_conv != 2 and retried < 5):
            time.sleep(2)
            # assert that the 2 messages have been persisted
            no_of_conv = len(ds.list_objects())
            retried = retried + 1

        self.assertEquals(no_of_conv, 2)
    def test_sub(self):

        # publish 2 messages
        pub = Publisher()
        pub.publish(to_name='anyone', msg="hello1")
        pub.publish(to_name='anyone', msg="hello2")

        dsm = self.container.datastore_manager
        ds = dsm.get_datastore("conversations")

        # give at least 2 seconds for the persister to save in the repository
        # test may fail if it does not wait long enough for the persister
        no_of_conv = 0
        retried = 0

        while (no_of_conv != 2 and retried < 5):
            time.sleep(2)
            # assert that the 2 messages have been persisted
            no_of_conv = len(ds.list_objects())
            retried = retried + 1

        self.assertEquals(no_of_conv, 2)
Exemplo n.º 35
0
    def launch_benchmark(transform_number=1, primer=1,message_length=4):
        import gevent
        from gevent.greenlet import Greenlet
        from pyon.util.containers import DotDict
        from pyon.net.transport import NameTrio
        from pyon.net.endpoint import Publisher
        import numpy
        from pyon.ion.granule.record_dictionary import RecordDictionaryTool
        from pyon.ion.granule.taxonomy import TaxyTool
        from pyon.ion.granule.granule import build_granule

        tt = TaxyTool()
        tt.add_taxonomy_set('a')

        import uuid
        num = transform_number
        msg_len = message_length
        transforms = list()
        pids = 1
        TransformBenchTesting.message_length = message_length
        cc = Container.instance
        pub = Publisher(to_name=NameTrio(get_sys_name(),str(uuid.uuid4())[0:6]))
        for i in xrange(num):
            tbt=cc.proc_manager._create_service_instance(str(pids), 'tbt', 'prototype.transforms.linear', 'TransformInPlaceNewGranule', DotDict({'process':{'name':'tbt%d' % pids, 'transform_id':pids}}))
            tbt.init()
            tbt.start()
            gevent.sleep(0.2)
            for i in xrange(primer):
                rd = RecordDictionaryTool(tt, message_length)
                rd['a'] = numpy.arange(message_length)
                gran = build_granule(data_producer_id='dp_id',taxonomy=tt, record_dictionary=rd)
                pub.publish(gran)

            g = Greenlet(tbt.perf)
            g.start()
            transforms.append(tbt)
            pids += 1
Exemplo n.º 36
0
    def __init__(self, CFG, factory, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._factory = factory
        self._next_beat(datetime.datetime.now())
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('process_dispatcher', 'processdispatcher')

        self._factory.set_state_change_callback(self._state_change_callback, None)
Exemplo n.º 37
0
    def __init__(self, process, stream, **kwargs):
        """
        Creates a StreamPublisher which publishes to the specified stream
        and is attached to the specified process.
        @param process   The IonProcess to attach to.
        @param stream    Name of the stream or StreamRoute object
        """
        super(StreamPublisher, self).__init__()
        if not isinstance(process, BaseService):
            raise BadRequest("No valid process provided.")
        if isinstance(stream, basestring):
            self.stream_route = StreamRoute(routing_key=stream)
        elif isinstance(stream, StreamRoute):
            self.stream_route = stream
        else:
            raise BadRequest("No valid stream information provided.")

        self.container = process.container
        self.xp_name = get_streaming_xp(self.stream_route.exchange_point)   # Fully qualified

        self.xp = self.container.ex_manager.create_xp(self.stream_route.exchange_point or DEFAULT_DATA_XP)
        self.xp_route = self.xp.create_route(self.stream_route.routing_key)

        Publisher.__init__(self, to_name=self.xp_route, **kwargs)
    def __init__(self, CFG, factory, process_id, process, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._started = False
        self._factory = factory
        self.process = process
        self.process_id = process_id
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')

        self._factory.set_state_change_callback(self._state_change_callback, None)
        self._first_beat()
Exemplo n.º 39
0
    def test_consume_one_message_at_a_time(self):
        # see also pyon.net.test.test_channel:TestChannelInt.test_consume_one_message_at_a_time

        pub3 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.3'))
        pub5 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.5'))

        #
        # SETUP COMPLETE, BEGIN TESTING OF EXCHANGE OBJECTS
        #

        xq = self.container.ex_manager.create_xn_queue('random_queue')
        self.addCleanup(xq.delete)

        # recv'd messages from the subscriber
        self.recv_queue = Queue()

        def cb(m, h):
            raise StandardError("Subscriber callback never gets called back!")

        sub = Subscriber(from_name=xq, callback=cb)
        sub.initialize()

        # publish 10 messages - we're not bound yet, so they'll just dissapear
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # allow time for routing
        time.sleep(2)

        # no messages yet
        self.assertRaises(Timeout, sub.get_one_msg, timeout=0)

        # now, we'll bind the xq
        xq.bind('routed.3')

        # even tho we are consuming, there are no messages - the previously published ones all dissapeared
        self.assertRaises(Timeout, sub.get_one_msg, timeout=0)

        # publish those messages again
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # allow time for routing
        time.sleep(2)

        # NOW we have messages!
        for x in xrange(10):
            mo = sub.get_one_msg(timeout=10)
            self.assertEquals(mo.body, "3,%s" % str(x))
            mo.ack()

        # we've cleared it all
        self.assertRaises(Timeout, sub.get_one_msg, timeout=0)

        # bind a wildcard and publish on both
        xq.bind('routed.*')

        for x in xrange(10):
            time.sleep(0.3)
            pub3.publish("3,%s" % str(x))
            time.sleep(0.3)
            pub5.publish("5,%s" % str(x))

        # allow time for routing
        time.sleep(2)

        # should get all 20, interleaved
        for x in xrange(10):
            mo = sub.get_one_msg(timeout=1)
            self.assertEquals(mo.body, "3,%s" % str(x))
            mo.ack()

            mo = sub.get_one_msg(timeout=1)
            self.assertEquals(mo.body, "5,%s" % str(x))
            mo.ack()

        # add 5 binding, remove all other bindings
        xq.bind('routed.5')
        xq.unbind('routed.3')
        xq.unbind('routed.*')

        # try publishing to 3, shouldn't arrive anymore
        pub3.publish("3")

        self.assertRaises(Timeout, sub.get_one_msg, timeout=0)

        # let's turn off the consumer and let things build up a bit
        sub._chan.stop_consume()

        for x in xrange(10):
            pub5.publish("5,%s" % str(x))

        # allow time for routing
        time.sleep(2)

        # 10 messages in the queue, no consumers
        self.assertTupleEqual((10, 0), sub._chan.get_stats())

        # drain queue
        sub._chan.start_consume()

        for x in xrange(10):
            mo = sub.get_one_msg(timeout=1)
            mo.ack()

        sub.close()
Exemplo n.º 40
0
class TestPublisher(PyonTestCase):
    def setUp(self):
        self._node = Mock(spec=NodeB)
        self._pub = Publisher(node=self._node, to_name="testpub")
        self._ch = Mock(spec=SendChannel)
        self._node.channel.return_value = self._ch
        self._node.interceptors = {}

    def test_publish(self):
        self.assertEquals(self._node.channel.call_count, 0)

        self._pub.publish("pub")

        self._node.channel.assert_called_once_with(self._pub.channel_type,
                                                   transport=None)
        self.assertEquals(self._ch.send.call_count, 1)

        self._pub.publish("pub2")
        self._node.channel.assert_called_once_with(self._pub.channel_type,
                                                   transport=None)
        self.assertEquals(self._ch.send.call_count, 2)

    def test_publish_with_new_name(self):

        self.assertEquals(self._node.channel.call_count, 0)

        self._pub.publish(sentinel.msg, to_name=sentinel.to_name)
        self.assertEquals(self._ch.send.call_count, 1)

        self._pub.publish(sentinel.msg, to_name=sentinel.to_name)
        self.assertEquals(self._ch.send.call_count, 2)

    def test_close(self):
        self._pub.publish(sentinel.msg)
        self._pub._pub_ep.close = Mock()

        self._pub.close()
        self._pub._pub_ep.close.assert_called_once_with()
Exemplo n.º 41
0
 def setUp(self):
     self._node = Mock(spec=NodeB)
     self._pub = Publisher(node=self._node, to_name="testpub")
     self._ch = Mock(spec=SendChannel)
     self._node.channel.return_value = self._ch
     self._node.interceptors = {}
Exemplo n.º 42
0
 def __init__(self, process=None, **kwargs):
     self._process = process
     Publisher.__init__(self, **kwargs)
Exemplo n.º 43
0
    def test_consume_one_message_at_a_time(self):
        # see also pyon.net.test.test_channel:TestChannelInt.test_consume_one_message_at_a_time

        pub3 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.3'))
        pub5 = Publisher(to_name=(self.container.ex_manager.default_xs.exchange, 'routed.5'))

        #
        # SETUP COMPLETE, BEGIN TESTING OF EXCHANGE OBJECTS
        #

        xq = self.container.ex_manager.create_xn_queue('random_queue')
        self.addCleanup(xq.delete)

        # recv'd messages from the subscriber
        self.recv_queue = Queue()

        sub = Subscriber(from_name=xq, callback=lambda m,h: self.recv_queue.put((m, h)))
        sub.prepare_listener()

        # publish 10 messages - we're not bound yet, so they'll just dissapear
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # no messages yet
        self.assertFalse(sub.get_one_msg(timeout=0))

        # now, we'll bind the xq
        xq.bind('routed.3')

        # even tho we are consuming, there are no messages - the previously published ones all dissapeared
        self.assertFalse(sub.get_one_msg(timeout=0))

        # publish those messages again
        for x in xrange(10):
            pub3.publish("3,%s" % str(x))

        # NOW we have messages!
        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            m,h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "3,%s" % str(x))

        # we've cleared it all
        self.assertFalse(sub.get_one_msg(timeout=0))

        # bind a wildcard and publish on both
        xq.bind('routed.*')

        for x in xrange(10):
            time.sleep(0.3)
            pub3.publish("3,%s" % str(x))
            time.sleep(0.3)
            pub5.publish("5,%s" % str(x))

        # should get all 20, interleaved
        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            m, h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "3,%s" % str(x))

            self.assertTrue(sub.get_one_msg(timeout=0))
            m, h = self.recv_queue.get(timeout=0)
            self.assertEquals(m, "5,%s" % str(x))

        # add 5 binding, remove all other bindings
        xq.bind('routed.5')
        xq.unbind('routed.3')
        xq.unbind('routed.*')

        # try publishing to 3, shouldn't arrive anymore
        pub3.publish("3")

        self.assertFalse(sub.get_one_msg(timeout=0))

        # let's turn off the consumer and let things build up a bit
        sub._chan.stop_consume()

        for x in xrange(10):
            pub5.publish("5,%s" % str(x))

        # 10 messages in the queue, no consumers
        self.assertTupleEqual((10, 0), sub._chan.get_stats())

        # drain queue
        sub._chan.start_consume()
        time.sleep(1)       # yield to allow delivery

        for x in xrange(10):
            self.assertTrue(sub.get_one_msg(timeout=0))
            self.recv_queue.get(timeout=0)

        sub.close()
class HeartBeater(object):
    def __init__(self, CFG, factory, process_id, process, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = float(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._started = False
        self._factory = factory
        self.process = process
        self.process_id = process_id
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')
        self._publisher = Publisher(to_name=self._pd_name)

        self._factory.set_state_change_callback(
            self._state_change_callback, None)
        self._first_beat()

    def _first_beat(self):
        self._beat_time = datetime.datetime.now()

    def _next_beat(self, now):
        self._beat_time = now + datetime.timedelta(seconds=self._interval)

    def _state_change_callback(self, user_arg):
        # on state change set the beat time to now
        self._beat_time = datetime.datetime.now()

    @property
    def _eea_started(self):
        """_eea_started
        We must ensure that the eea is listening before heartbeating to the PD.
        If the eea isn't listening, the PD's reply will be lost.

        So we must ensure that the Pyon process's listeners are created, and are ready
        """
        if self._started:
            return True

        if len(self.process._process.listeners) > 0 and all(self.process._process.heartbeat()):
            self._log.debug(
                "eeagent heartbeat started because len(self.process._process.listeners) > 0 (%s) "
                "and all(self.process._process.heartbeat()) == True (%s)" % (
                    len(self.process._process.listeners), str(self.process._process.heartbeat())))
            self._started = True
            return True
        else:
            return False

    def poll(self):

        if not self._eea_started:
            return

        now = datetime.datetime.now()
        if now > self._beat_time:
            self._next_beat(now)
            self.beat()

    def beat(self):
        try:
            beat = make_beat_msg(self._factory, self._CFG)
            message = dict(
                beat=beat, eeagent_id=self.process_id,
                resource_id=self._CFG.agent.resource_id)

            if self._log.isEnabledFor(logging.DEBUG):
                processes = beat.get('processes')
                if processes is not None:
                    processes_str = "processes=%d" % len(processes)
                else:
                    processes_str = ""
                self._log.debug("Sending heartbeat to %s %s",
                                self._pd_name, processes_str)

            self._publisher.publish(message)
        except Exception:
            self._log.exception("beat failed")
Exemplo n.º 45
0
class TestPublisher(PyonTestCase):
    def setUp(self):
        self._node = Mock(spec=NodeB)
        self._pub = Publisher(node=self._node, to_name="testpub")
        self._ch = Mock(spec=SendChannel)
        self._node.channel.return_value = self._ch
        self._node.interceptors = {}

    def test_publish(self):
        self.assertEquals(self._node.channel.call_count, 0)

        self._pub.publish("pub")

        self._node.channel.assert_called_once_with(self._pub.channel_type, transport=None)
        self.assertEquals(self._ch.send.call_count, 1)

        self._pub.publish("pub2")
        self._node.channel.assert_called_once_with(self._pub.channel_type, transport=None)
        self.assertEquals(self._ch.send.call_count, 2)

    def test_publish_with_new_name(self):

        self.assertEquals(self._node.channel.call_count, 0)

        self._pub.publish(sentinel.msg, to_name=sentinel.to_name)
        self.assertEquals(self._ch.send.call_count, 1)

        self._pub.publish(sentinel.msg, to_name=sentinel.to_name)
        self.assertEquals(self._ch.send.call_count, 2)

    def test_close(self):
        self._pub.publish(sentinel.msg)
        self._pub._pub_ep.close = Mock()

        self._pub.close()
        self._pub._pub_ep.close.assert_called_once_with()
Exemplo n.º 46
0
#!/usr/bin/env python

from pyon.net.endpoint import Publisher
from pyon.net.messaging import make_node
import gevent

node, iowat = make_node()

pub = Publisher(node=node, name="hassan")
Exemplo n.º 47
0
class TransformBenchTesting(TransformDataProcess):
    """
    Easiest way to run:
    from pyon.util.containers import DotDict
    tbt=cc.proc_manager._create_service_instance('55', 'tbt', 'pyon.ion.transform', 'TransformBenchTesting', DotDict({'process':{'name':'tbt', 'transform_id':'55'}}))
    tbt.init()
    tbt.start()
    """
    transform_number = 0
    message_length = 0

    def __init__(self):
        super(TransformBenchTesting, self).__init__()
        self.count = 0
        TransformBenchTesting.transform_number += 1

    def perf(self):

        with open('/tmp/pyon_performance.dat', 'a') as f:
            then = time.time()
            ocount = self.count
            while True:
                gevent.sleep(2.)
                now = time.time()
                count = self.count
                delta_t = now - then
                delta_c = count - ocount

                f.write(
                    '%s|%s\t%s\t%s\t%3.3f\n' %
                    (get_sys_name(), time.strftime("%H:%M:%S", time.gmtime()),
                     TransformBenchTesting.message_length,
                     TransformBenchTesting.transform_number,
                     float(delta_c) / delta_t))
                then = now
                ocount = count
                f.flush()

    @staticmethod
    def launch_benchmark(transform_number=1, primer=1, message_length=4):
        import gevent
        from gevent.greenlet import Greenlet
        from pyon.util.containers import DotDict
        from pyon.net.transport import NameTrio
        from pyon.net.endpoint import Publisher
        import uuid
        num = transform_number
        msg_len = message_length
        transforms = list()
        pids = 1
        TransformBenchTesting.message_length = message_length
        cc = Container.instance
        pub = Publisher(to_name=NameTrio(get_sys_name(),
                                         str(uuid.uuid4())[0:6]))
        for i in xrange(num):
            tbt = cc.proc_manager._create_service_instance(
                str(pids), 'tbt', 'prototype.transforms.linear',
                'TransformInPlace',
                DotDict({
                    'process': {
                        'name': 'tbt%d' % pids,
                        'transform_id': pids
                    }
                }))
            tbt.init()
            tbt.start()
            gevent.sleep(0.2)
            for i in xrange(primer):
                pub.publish(list(xrange(msg_len)))
            g = Greenlet(tbt.perf)
            g.start()
            transforms.append(tbt)
            pids += 1

    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(),
                                                     'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(),
                                                  str(uuid.uuid4())[0:6]))

    def publish(self, msg):
        self._bt_pub.publish(msg)
        self.count += 1

    def _stop_listener(self):
        self._bt_sub.close()
        self._sub_gl.join(timeout=2)
        self._sub_gl.kill()

    def on_stop(self):
        TransformDataProcess.on_stop(self)
        self._stop_listener()

    def on_quit(self):
        TransformDataProcess.on_quit(self)
        self._stop_listener()
Exemplo n.º 48
0
class TransformBenchTesting(TransformDataProcess):
    """
    Easiest way to run:
    from pyon.util.containers import DotDict
    tbt=cc.proc_manager._create_service_instance('55', 'tbt', 'pyon.ion.transform', 'TransformBenchTesting', DotDict({'process':{'name':'tbt', 'transform_id':'55'}}))
    tbt.init()
    tbt.start()
    """
    transform_number = 0
    message_length = 0
    def __init__(self):
        super(TransformBenchTesting,self).__init__()
        self.count = 0
        TransformBenchTesting.transform_number += 1

        
    def perf(self):

        with open('/tmp/pyon_performance.dat','a') as f:
            then = time.time()
            ocount = self.count
            while True:
                gevent.sleep(2.)
                now = time.time()
                count = self.count
                delta_t = now - then
                delta_c = count - ocount

                f.write('%s|%s\t%s\t%s\t%3.3f\n' % (get_sys_name(),time.strftime("%H:%M:%S", time.gmtime()),TransformBenchTesting.message_length,TransformBenchTesting.transform_number, float(delta_c) / delta_t))
                then = now
                ocount = count
                f.flush()
            
        
        

    @staticmethod
    def launch_benchmark(transform_number=1, primer=1,message_length=4):
        import gevent
        from gevent.greenlet import Greenlet
        from pyon.util.containers import DotDict
        from pyon.net.transport import NameTrio
        from pyon.net.endpoint import Publisher
        import uuid
        num = transform_number
        msg_len = message_length
        transforms = list()
        pids = 1
        TransformBenchTesting.message_length = message_length
        cc = Container.instance
        pub = Publisher(to_name=NameTrio(get_sys_name(),str(uuid.uuid4())[0:6]))
        for i in xrange(num):
            tbt=cc.proc_manager._create_service_instance(str(pids), 'tbt', 'prototype.transforms.linear', 'TransformInPlace', DotDict({'process':{'name':'tbt%d' % pids, 'transform_id':pids}}))
            tbt.init()
            tbt.start()
            gevent.sleep(0.2)
            for i in xrange(primer):
                pub.publish(list(xrange(msg_len)))
            g = Greenlet(tbt.perf)
            g.start()
            transforms.append(tbt)
            pids += 1

    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(), 'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(), str(uuid.uuid4())[0:6]))

    def publish(self, msg):
        self._bt_pub.publish(msg)
        self.count+=1

    def _stop_listener(self):
        self._bt_sub.close()
        self._sub_gl.join(timeout=2)
        self._sub_gl.kill()

    def on_stop(self):
        TransformDataProcess.on_stop(self)
        self._stop_listener()

    def on_quit(self):
        TransformDataProcess.on_quit(self)
        self._stop_listener()
Exemplo n.º 49
0
    def test_replay_integration(self):
        '''
        test_replay_integration
        '''
        import numpy as np
        # Keep the import it's used in the vector comparison below even though pycharm says its unused.

        cc = self.container
        XP = self.XP
        assertions = self.assertTrue

        ### Every thing below here can be run as a script:
        log.debug('Got it')

        pubsub_management_service = PubsubManagementServiceClient(node=cc.node)
        ingestion_management_service = IngestionManagementServiceClient(node=cc.node)
        dataset_management_service = DatasetManagementServiceClient(node=cc.node)
        data_retriever_service = DataRetrieverServiceClient(node=cc.node)

        datastore_name = 'dm_test_replay_integration'

        producer = Publisher(name=(XP,'stream producer'))

        ingestion_configuration_id = ingestion_management_service.create_ingestion_configuration(
            exchange_point_id=XP,
            couch_storage=CouchStorage(datastore_name=datastore_name,datastore_profile='SCIDATA'),
            hdf_storage=HdfStorage(),
            number_of_workers=1
        )

        ingestion_management_service.activate_ingestion_configuration(
            ingestion_configuration_id=ingestion_configuration_id
        )

        definition = SBE37_CDM_stream_definition()
        data_stream_id = definition.data_stream_id
        encoding_id = definition.identifiables[data_stream_id].encoding_id
        element_count_id = definition.identifiables[data_stream_id].element_count_id

        stream_def_id = pubsub_management_service.create_stream_definition(
            container=definition
        )
        stream_id = pubsub_management_service.create_stream(
            stream_definition_id=stream_def_id
        )

        dataset_id = dataset_management_service.create_dataset(
            stream_id=stream_id,
            datastore_name=datastore_name,
            view_name='datasets/dataset_by_id'
        )
        ingestion_management_service.create_dataset_configuration(
            dataset_id=dataset_id,
            archive_data=True,
            archive_metadata=True,
            ingestion_configuration_id = ingestion_configuration_id
        )
        definition.stream_resource_id = stream_id

        packet = _create_packet(definition)
        input_file = FileSystem.mktemp()
        input_file.write(packet.identifiables[data_stream_id].values)
        input_file_path = input_file.name
        input_file.close()

        fields=[
            'conductivity',
            'height',
            'latitude',
            'longitude',
            'pressure',
            'temperature',
            'time'
        ]

        input_vectors = acquire_data([input_file_path],fields , 2).next()

        producer.publish(msg=packet, to_name=(XP,'%s.data' % stream_id))

        replay_id, replay_stream_id = data_retriever_service.define_replay(dataset_id)
        ar = gevent.event.AsyncResult()
        def sub_listen(msg, headers):

            assertions(isinstance(msg,StreamGranuleContainer),'replayed message is not a granule.')
            hdf_string = msg.identifiables[data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            assertions(sha1 == msg.identifiables[encoding_id].sha1,'Checksum failed.')
            assertions(msg.identifiables[element_count_id].value==1, 'record replay count is incorrect %d.' % msg.identifiables[element_count_id].value)
            output_file = FileSystem.mktemp()
            output_file.write(msg.identifiables[data_stream_id].values)
            output_file_path = output_file.name
            output_file.close()
            output_vectors = acquire_data([output_file_path],fields,2).next()
            for field in fields:
                comparison = (input_vectors[field]['values']==output_vectors[field]['values'])
                assertions(comparison.all(), 'vector mismatch: %s vs %s' %
                                             (input_vectors[field]['values'],output_vectors[field]['values']))
            FileSystem.unlink(output_file_path)
            ar.set(True)

        subscriber = Subscriber(name=(XP,'replay listener'),callback=sub_listen)

        g = gevent.Greenlet(subscriber.listen, binding='%s.data' % replay_stream_id)
        g.start()

        data_retriever_service.start_replay(replay_id)

        ar.get(timeout=10)

        FileSystem.unlink(input_file_path)
Exemplo n.º 50
0
 def create_endpoint(self, *args, **kwargs):
     newkwargs = kwargs.copy()
     newkwargs['process'] = self._process
     return Publisher.create_endpoint(self, *args, **newkwargs)
    def test_replay_integration(self):
        '''
        test_replay_integration
        '''
        import numpy as np
        # Keep the import it's used in the vector comparison below even though pycharm says its unused.

        cc = self.container
        XP = self.XP
        assertions = self.assertTrue

        ### Every thing below here can be run as a script:
        log.debug('Got it')

        pubsub_management_service = PubsubManagementServiceClient(node=cc.node)
        ingestion_management_service = IngestionManagementServiceClient(
            node=cc.node)
        dataset_management_service = DatasetManagementServiceClient(
            node=cc.node)
        data_retriever_service = DataRetrieverServiceClient(node=cc.node)

        datastore_name = 'dm_test_replay_integration'

        producer = Publisher(name=(XP, 'stream producer'))

        ingestion_configuration_id = ingestion_management_service.create_ingestion_configuration(
            exchange_point_id=XP,
            couch_storage=CouchStorage(datastore_name=datastore_name,
                                       datastore_profile='SCIDATA'),
            hdf_storage=HdfStorage(),
            number_of_workers=1)

        ingestion_management_service.activate_ingestion_configuration(
            ingestion_configuration_id=ingestion_configuration_id)

        definition = SBE37_CDM_stream_definition()
        data_stream_id = definition.data_stream_id
        encoding_id = definition.identifiables[data_stream_id].encoding_id
        element_count_id = definition.identifiables[
            data_stream_id].element_count_id

        stream_def_id = pubsub_management_service.create_stream_definition(
            container=definition)
        stream_id = pubsub_management_service.create_stream(
            stream_definition_id=stream_def_id)

        dataset_id = dataset_management_service.create_dataset(
            stream_id=stream_id,
            datastore_name=datastore_name,
            view_name='datasets/dataset_by_id')
        ingestion_management_service.create_dataset_configuration(
            dataset_id=dataset_id,
            archive_data=True,
            archive_metadata=True,
            ingestion_configuration_id=ingestion_configuration_id)
        definition.stream_resource_id = stream_id

        packet = _create_packet(definition)
        input_file = FileSystem.mktemp()
        input_file.write(packet.identifiables[data_stream_id].values)
        input_file_path = input_file.name
        input_file.close()

        fields = [
            'conductivity', 'height', 'latitude', 'longitude', 'pressure',
            'temperature', 'time'
        ]

        input_vectors = acquire_data([input_file_path], fields, 2).next()

        producer.publish(msg=packet, to_name=(XP, '%s.data' % stream_id))

        replay_id, replay_stream_id = data_retriever_service.define_replay(
            dataset_id)
        ar = gevent.event.AsyncResult()

        def sub_listen(msg, headers):

            assertions(isinstance(msg, StreamGranuleContainer),
                       'replayed message is not a granule.')
            hdf_string = msg.identifiables[data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            assertions(sha1 == msg.identifiables[encoding_id].sha1,
                       'Checksum failed.')
            assertions(
                msg.identifiables[element_count_id].value == 1,
                'record replay count is incorrect %d.' %
                msg.identifiables[element_count_id].value)
            output_file = FileSystem.mktemp()
            output_file.write(msg.identifiables[data_stream_id].values)
            output_file_path = output_file.name
            output_file.close()
            output_vectors = acquire_data([output_file_path], fields, 2).next()
            for field in fields:
                comparison = (input_vectors[field]['values'] ==
                              output_vectors[field]['values'])
                assertions(
                    comparison.all(), 'vector mismatch: %s vs %s' %
                    (input_vectors[field]['values'],
                     output_vectors[field]['values']))
            FileSystem.unlink(output_file_path)
            ar.set(True)

        subscriber = Subscriber(name=(XP, 'replay listener'),
                                callback=sub_listen)

        g = gevent.Greenlet(subscriber.listen,
                            binding='%s.data' % replay_stream_id)
        g.start()

        data_retriever_service.start_replay(replay_id)

        ar.get(timeout=10)

        FileSystem.unlink(input_file_path)
class HeartBeater(object):
    def __init__(self, CFG, factory, process_id, process, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = float(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._started = False
        self._factory = factory
        self.process = process
        self.process_id = process_id
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')
        self._publisher = Publisher(to_name=self._pd_name)

        self._factory.set_state_change_callback(self._state_change_callback,
                                                None)
        self._first_beat()

    def _first_beat(self):
        self._beat_time = datetime.datetime.now()

    def _next_beat(self, now):
        self._beat_time = now + datetime.timedelta(seconds=self._interval)

    def _state_change_callback(self, user_arg):
        # on state change set the beat time to now
        self._beat_time = datetime.datetime.now()

    @property
    def _eea_started(self):
        """_eea_started
        We must ensure that the eea is listening before heartbeating to the PD.
        If the eea isn't listening, the PD's reply will be lost.

        So we must ensure that the Pyon process's listeners are created, and are ready
        """
        if self._started:
            return True

        if len(self.process._process.listeners) > 0 and all(
                self.process._process.heartbeat()):
            self._log.debug(
                "eeagent heartbeat started because len(self.process._process.listeners) > 0 (%s) "
                "and all(self.process._process.heartbeat()) == True (%s)" %
                (len(self.process._process.listeners),
                 str(self.process._process.heartbeat())))
            self._started = True
            return True
        else:
            return False

    def poll(self):

        if not self._eea_started:
            return

        now = datetime.datetime.now()
        if now > self._beat_time:
            self._next_beat(now)
            self.beat()

    def beat(self):
        try:
            beat = make_beat_msg(self._factory, self._CFG)
            message = dict(beat=beat,
                           eeagent_id=self.process_id,
                           resource_id=self._CFG.agent.resource_id)

            if self._log.isEnabledFor(logging.DEBUG):
                processes = beat.get('processes')
                if processes is not None:
                    processes_str = "processes=%d" % len(processes)
                else:
                    processes_str = ""
                self._log.debug("Sending heartbeat to %s %s", self._pd_name,
                                processes_str)

            self._publisher.publish(message)
        except Exception:
            self._log.exception("beat failed")
class HeartBeater(object):
    def __init__(self, CFG, factory, process_id, process, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._started = False
        self._factory = factory
        self.process = process
        self.process_id = process_id
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')

        self._factory.set_state_change_callback(self._state_change_callback, None)
        self._first_beat()

    def _first_beat(self):
        self._beat_time = datetime.datetime.now()

    def _next_beat(self, now):
        self._beat_time = now + datetime.timedelta(seconds=self._interval)

    def _state_change_callback(self, user_arg):
        # on state change set the beat time to now
        self._beat_time = datetime.datetime.now()

    @property
    def _eea_started(self):
        if self._started:
            return True

        try:
            _eea_pyon_client = SimpleResourceAgentClient(self.process_id, process=self.process)
            eea_client = ExecutionEngineAgentClient(_eea_pyon_client)
            eea_client.dump_state()
            self._started = True
            return True
        except NotFound:
            return False
        except Exception:
            self._log.exception("Couldn't get eeagent state. Perhaps it is broken?")
            return False

    def poll(self):

        if not self._eea_started:
            return

        now = datetime.datetime.now()
        if now > self._beat_time:
            self._next_beat(now)
            self.beat()

    def beat(self):
        try:
            beat = make_beat_msg(self._factory, self._CFG)
            message = dict(beat=beat, eeagent_id=self.process_id, resource_id=self._CFG.agent.resource_id)
            to_name = self._pd_name

            if self._log.isEnabledFor(logging.DEBUG):
                processes = beat.get('processes')
                if processes is not None:
                    processes_str = "processes=%d" % len(processes)
                else:
                    processes_str = ""
                self._log.debug("Sending heartbeat to %s %s", self._pd_name, processes_str)

            self._publisher.publish(message, to_name=to_name)
        except Exception:
            self._log.exception("beat failed")
Exemplo n.º 54
0
 def setUp(self):
     self._node = Mock(spec=NodeB)
     self._pub = Publisher(node=self._node, to_name="testpub")
     self._ch = Mock(spec=SendChannel)
     self._node.channel.return_value = self._ch
     self._node.interceptors = {}
Exemplo n.º 55
0
class HeartBeater(object):
    def __init__(self, CFG, factory, process_id, process, log=logging):

        self._log = log
        self._log.log(logging.DEBUG, "Starting the heartbeat thread")
        self._CFG = CFG
        self._res = None
        self._interval = int(CFG.eeagent.heartbeat)
        self._res = None
        self._done = False
        self._started = False
        self._factory = factory
        self.process = process
        self.process_id = process_id
        self._publisher = Publisher()
        self._pd_name = CFG.eeagent.get('heartbeat_queue', 'heartbeat_queue')

        self._factory.set_state_change_callback(self._state_change_callback,
                                                None)
        self._first_beat()

    def _first_beat(self):
        self._beat_time = datetime.datetime.now()

    def _next_beat(self, now):
        self._beat_time = now + datetime.timedelta(seconds=self._interval)

    def _state_change_callback(self, user_arg):
        # on state change set the beat time to now
        self._beat_time = datetime.datetime.now()

    @property
    def _eea_started(self):
        if self._started:
            return True

        if all(self.process._process.heartbeat()):
            self._started = True
            return True
        else:
            return False

    def poll(self):

        if not self._eea_started:
            return

        now = datetime.datetime.now()
        if now > self._beat_time:
            self._next_beat(now)
            self.beat()

    def beat(self):
        try:
            beat = make_beat_msg(self._factory, self._CFG)
            message = dict(beat=beat,
                           eeagent_id=self.process_id,
                           resource_id=self._CFG.agent.resource_id)
            to_name = self._pd_name

            if self._log.isEnabledFor(logging.DEBUG):
                processes = beat.get('processes')
                if processes is not None:
                    processes_str = "processes=%d" % len(processes)
                else:
                    processes_str = ""
                self._log.debug("Sending heartbeat to %s %s", self._pd_name,
                                processes_str)

            self._publisher.publish(message, to_name=to_name)
        except Exception:
            self._log.exception("beat failed")
Exemplo n.º 56
0
 def __init__(self, process=None, **kwargs):
     self._process = process
     Publisher.__init__(self, **kwargs)
Exemplo n.º 57
0
 def create_endpoint(self, *args, **kwargs):
     newkwargs = kwargs.copy()
     newkwargs['process'] = self._process
     return Publisher.create_endpoint(self, *args, **newkwargs)