コード例 #1
0
def run_test_dispatcher(work_count, num_workers=1):
    # Set up temporary directories to save data
    import shutil
    BASE_DIR = tempfile.mkdtemp()
    PIDANTIC_DIR = tempfile.mkdtemp()

    WORK_KEYS = ['a', 'b', 'c', 'd', 'e']

    for x in [x for x in os.listdir(BASE_DIR) if x.endswith('.h5')]:
        os.remove(os.path.join(BASE_DIR, x))

    fps = {}
    for k in WORK_KEYS:
        fps[k] = os.path.join(BASE_DIR, '{0}.h5'.format(k))


#        with h5py.File(fps[k], 'a'):
#            pass

    bD = (50, )
    cD = (5, )
    fv = -9999
    dtype = 'f'

    def fcb(message, work):
        log.error('WORK DISCARDED!!!; %s: %s', message, work)

    disp = BrickWriterDispatcher(fcb,
                                 num_workers=num_workers,
                                 pidantic_dir=PIDANTIC_DIR)
    disp.run()

    def make_work():
        for x in xrange(work_count):
            bk = random.choice(WORK_KEYS)
            brick_metrics = (fps[bk], bD, cD, dtype, fv)
            if np.random.random_sample(1)[0] > 0.5:
                sl = int(np.random.randint(0, 10, 1)[0])
                w = np.random.random_sample(1)[0]
            else:
                strt = int(np.random.randint(0, bD[0] - 2, 1)[0])
                stp = int(np.random.randint(strt + 1, bD[0], 1)[0])
                sl = slice(strt, stp)
                w = np.random.random_sample(stp - strt)
            disp.put_work(work_key=bk,
                          work_metrics=brick_metrics,
                          work=([sl], w))
            time.sleep(0.1)

    spawn(make_work)

    # Remove temporary directories
    shutil.rmtree(BASE_DIR)
    shutil.rmtree(PIDANTIC_DIR)

    return disp
コード例 #2
0
 def _es_call(es, *args, **kwargs):
     res = AsyncResult()
     def async_call(es, *args, **kwargs):
         res.set(es(*args,**kwargs))
     spawn(async_call,es,*args,**kwargs)
     try:
         retval = res.get(timeout=CFG.get_safe('server.elasticsearch.timeout', 10))
     except Timeout:
         raise exceptions.Timeout("Call to ElasticSearch timed out.")
     return retval
コード例 #3
0
 def _es_call(es, *args, **kwargs):
     res = AsyncResult()
     def async_call(es, *args, **kwargs):
         res.set(es(*args,**kwargs))
     spawn(async_call,es,*args,**kwargs)
     try:
         retval = res.get(timeout=10)
     except Timeout:
         raise exceptions.Timeout("Call to ElasticSearch timed out.")
     return retval
コード例 #4
0
ファイル: process.py プロジェクト: oldpatricka/pyon
    def get_ready_event(self):
        """
        Returns an Event that is set when all the listeners in this Process are running.
        """
        ev = Event()
        def allready(ev):
            waitall([x.get_ready_event() for x in self.listeners])
            ev.set()

        spawn(allready, ev)
        return ev
コード例 #5
0
ファイル: process.py プロジェクト: oldpatricka/pyon
    def get_ready_event(self):
        """
        Returns an Event that is set when all the listeners in this Process are running.
        """
        ev = Event()

        def allready(ev):
            waitall([x.get_ready_event() for x in self.listeners])
            ev.set()

        spawn(allready, ev)
        return ev
コード例 #6
0
    def get_dirty_values_async_result(self):
        dirty_async_res = AsyncResult()
        def dirty_check(self, res):
            while True:
                if self.is_dirty():
                    time.sleep(0.1)
                else:
                    res.set(True)
                    break

        spawn(dirty_check, self, dirty_async_res)

        return dirty_async_res
コード例 #7
0
def run_test_dispatcher(work_count, num_workers=1):
    # Set up temporary directories to save data
    import shutil
    BASE_DIR = tempfile.mkdtemp()
    PIDANTIC_DIR = tempfile.mkdtemp()

    WORK_KEYS = ['a','b','c','d','e']

    for x in [x for x in os.listdir(BASE_DIR) if x.endswith('.h5')]:
        os.remove(os.path.join(BASE_DIR,x))

    fps = {}
    for k in WORK_KEYS:
        fps[k] = os.path.join(BASE_DIR, '{0}.h5'.format(k))
#        with h5py.File(fps[k], 'a'):
#            pass

    bD = (50,)
    cD = (5,)
    fv = -9999
    dtype = 'f'

    def fcb(message, work):
        log.error('WORK DISCARDED!!!; %s: %s', message, work)

    disp = BrickWriterDispatcher(fcb, num_workers=num_workers, pidantic_dir=PIDANTIC_DIR)
    disp.run()

    def make_work():
        for x in xrange(work_count):
            bk = random.choice(WORK_KEYS)
            brick_metrics = (fps[bk], bD, cD, dtype, fv)
            if np.random.random_sample(1)[0] > 0.5:
                sl = int(np.random.randint(0,10,1)[0])
                w = np.random.random_sample(1)[0]
            else:
                strt = int(np.random.randint(0,bD[0] - 2,1)[0])
                stp = int(np.random.randint(strt+1,bD[0],1)[0])
                sl = slice(strt, stp)
                w = np.random.random_sample(stp-strt)
            disp.put_work(work_key=bk, work_metrics=brick_metrics, work=([sl], w))
            time.sleep(0.1)

    spawn(make_work)

    # Remove temporary directories
    shutil.rmtree(BASE_DIR)
    shutil.rmtree(PIDANTIC_DIR)

    return disp
コード例 #8
0
    def get_dirty_values_async_result(self):
        dirty_async_res = AsyncResult()

        def dirty_check(self, res):
            while True:
                if self.is_dirty():
                    time.sleep(0.1)
                else:
                    res.set(True)
                    break

        spawn(dirty_check, self, dirty_async_res)

        return dirty_async_res
コード例 #9
0
    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._persister_loop, self.persist_interval)
        log.debug('EventPersister persist greenlet started in "%s" (interval %s)', self.__class__.__name__, self.persist_interval)

        # View trigger thread
        self._refresh_greenlet = spawn(self._refresher_loop, self.refresh_interval)
        log.debug('EventPersister view refresher greenlet started in "%s" (interval %s)', self.__class__.__name__, self.refresh_interval)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
                                         callback=self._on_event,
                                         queue_name="event_persister")

        self.event_sub.start()
コード例 #10
0
ファイル: pd_registry.py プロジェクト: edwardhunter/scioncc
    def start(self):
        # Create our own queue for container heartbeats and broadcasts
        topic = get_safe(self._pd_core.pd_cfg, "aggregator.container_topic") or "bx_containers"
        queue_name = "pd_aggregator_%s_%s" % (topic, create_valid_identifier(self.container.id, dot_sub="_"))
        self.sub_cont = Subscriber(binding=topic, from_name=queue_name, auto_delete=True,
                                   callback=self._receive_container_info)
        self.sub_cont_gl = spawn(self.sub_cont.listen)
        self.sub_cont.get_ready_event().wait()

        self.evt_sub = EventSubscriber(event_type=OT.ContainerLifecycleEvent, callback=self._receive_event)
        self.evt_sub.add_event_subscription(event_type=OT.ProcessLifecycleEvent)
        self.evt_sub_gl = spawn(self.evt_sub.listen)
        self.evt_sub.get_ready_event().wait()

        log.info("PD Aggregator - event and heartbeat subscribers started")
コード例 #11
0
ファイル: leader.py プロジェクト: scion-network/scioncc
 def start(self):
     self._leader_quit = Event()
     if self.process:
         self._leader_thread = self.process._process.thread_manager.spawn(
             self._leader_loop)
     else:
         self._leader_thread = spawn(self._leader_loop)
コード例 #12
0
    def test_rpc_speed(self):
        hsc = HelloServiceClient()

        print >> sys.stderr, ""

        self.counter = 0
        self.alive = True

        def sendem():
            while self.alive:
                hsc.noop('data')
                self.counter += 1

        start_time = time.time()

        sendgl = spawn(sendem)
        time.sleep(5)
        end_time = time.time()

        self.alive = False
        sendgl.join(timeout=2)
        sendgl.kill()

        diff = end_time - start_time
        mps = float(self.counter) / diff

        print >> sys.stderr, "Requests per second (RPC):", mps, "(", self.counter, "messages in", diff, "seconds)"
コード例 #13
0
ファイル: test_process.py プロジェクト: mkl-/scioncc
    def test_known_error(self):

        # IonExceptions and TypeErrors get forwarded back intact
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def proc_call():
            raise NotFound("didn't find it")

        def client_call(p=None, ar=None):
            try:
                ca = p._routing_call(proc_call, None)
                ca.get(timeout=5)

            except IonException as e:
                ar.set(e)

        ar = AsyncResult()
        gl_call = spawn(client_call, p=p, ar=ar)

        e = ar.get(timeout=5)

        self.assertIsInstance(e, NotFound)
コード例 #14
0
ファイル: test_speed.py プロジェクト: oldpatricka/pyon
    def test_pub_speed(self):
        pub = Publisher(node=self.container.node, name="i_no_exist")

        print >>sys.stderr, ""

        self.counter = 0
        self.alive = True
        def sendem():
            while self.alive:
                self.counter += 1
                pub.publish('meh')

        start_time = time.time()

        sendgl = spawn(sendem)
        time.sleep(5)
        end_time = time.time()

        self.alive = False
        sendgl.join(timeout=2)
        sendgl.kill()



        diff = end_time - start_time
        mps = float(self.counter) / diff

        print >>sys.stderr, "Published messages per second:", mps, "(", self.counter, "messages in", diff, "seconds)"
コード例 #15
0
    def test_pub_speed(self):
        pub = Publisher(node=self.container.node, name="i_no_exist")

        print >> sys.stderr, ""

        self.counter = 0
        self.alive = True

        def sendem():
            while self.alive:
                self.counter += 1
                pub.publish('meh')

        start_time = time.time()

        sendgl = spawn(sendem)
        time.sleep(5)
        end_time = time.time()

        self.alive = False
        sendgl.join(timeout=2)
        sendgl.kill()

        diff = end_time - start_time
        mps = float(self.counter) / diff

        print >> sys.stderr, "Published messages per second:", mps, "(", self.counter, "messages in", diff, "seconds)"
コード例 #16
0
ファイル: test_process.py プロジェクト: pkediyal/pyon
    def test_known_error(self):

        # IonExceptions and TypeErrors get forwarded back intact
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def proc_call():
            raise NotFound("didn't find it")

        def client_call(p=None, ar=None):
            try:
                ca = p._routing_call(proc_call, None)
                ca.get(timeout=5)

            except IonException as e:
                ar.set(e)

        ar = AsyncResult()
        gl_call = spawn(client_call, p=p, ar=ar)

        e = ar.get(timeout=5)

        self.assertIsInstance(e, NotFound)
コード例 #17
0
ファイル: test_exchange.py プロジェクト: j2project/pyon
    def test_rpc_with_xn(self):
        # get an xn to use for send/recv
        xn = self.container.ex_manager.create_xn_service('hello')
        self.addCleanup(xn.delete)

        # create an RPCServer for a hello service
        hs = HelloService()
        rpcs = RPCServer(from_name=xn, service=hs)

        # spawn the listener, kill on test exit (success/fail/error should cover?)
        gl_listen = spawn(rpcs.listen)
        def cleanup():
            rpcs.close()
            gl_listen.join(timeout=2)
            gl_listen.kill()
        self.addCleanup(cleanup)

        # wait for listen to be ready
        rpcs.get_ready_event().wait(timeout=5)

        # ok, now create a client using same xn
        hsc = HelloServiceClient(to_name=xn)

        # try to message it!
        ret = hsc.hello('hi there')

        # did we get back what we expected?
        self.assertEquals(ret, 'BACK:hi there')
コード例 #18
0
ファイル: test_speed.py プロジェクト: oldpatricka/pyon
    def test_rpc_speed(self):
        hsc = HelloServiceClient()

        print >>sys.stderr, ""

        self.counter = 0
        self.alive = True
        def sendem():
            while self.alive:
                hsc.noop('data')
                self.counter += 1

        start_time = time.time()

        sendgl = spawn(sendem)
        time.sleep(5)
        end_time = time.time()

        self.alive = False
        sendgl.join(timeout=2)
        sendgl.kill()

        diff = end_time - start_time
        mps = float(self.counter) / diff

        print >>sys.stderr, "Requests per second (RPC):", mps, "(", self.counter, "messages in", diff, "seconds)"
コード例 #19
0
ファイル: pd_engine.py プロジェクト: scion-network/scioncc
    def start(self):
        queue_name = get_safe(self._pd_core.pd_cfg, "command_queue") or "pd_command"
        self.sub_cont = Subscriber(binding=queue_name, from_name=queue_name, callback=self._receive_command)
        self.sub_cont_gl = spawn(self.sub_cont.listen, activate=False)
        self.sub_cont.get_ready_event().wait()

        self.pub_result = Publisher()
コード例 #20
0
    def test_rpc_with_xn(self):
        # get an xn to use for send/recv
        xn = self.container.ex_manager.create_xn_service('hello')
        self.addCleanup(xn.delete)

        # create an RPCServer for a hello service
        hs = HelloService()
        rpcs = RPCServer(from_name=xn, service=hs)

        # spawn the listener, kill on test exit (success/fail/error should cover?)
        gl_listen = spawn(rpcs.listen)

        def cleanup():
            rpcs.close()
            gl_listen.join(timeout=2)
            gl_listen.kill()

        self.addCleanup(cleanup)

        # wait for listen to be ready
        rpcs.get_ready_event().wait(timeout=5)

        # ok, now create a client using same xn
        hsc = HelloServiceClient(to_name=xn)

        # try to message it!
        ret = hsc.hello('hi there')

        # did we get back what we expected?
        self.assertEquals(ret, 'BACK:hi there')
コード例 #21
0
    def execute_acquire_data(self, *args):
        """
        Spawns a greenlet to perform a data acquisition
        Calls BaseDataHandler._acquire_data
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument should be a config dictionary
        """
        try:
            config = args[0]

        except IndexError:
            raise ParameterError('\'acquire_data\' command requires a config dict.')

        if not isinstance(config, dict):
            raise TypeError('args[0] of \'acquire_data\' is not a dict.')
        else:
            if get_safe(config,'constraints') is None and not self._semaphore.acquire(blocking=False):
                log.warn('Already acquiring new data - action not duplicated')
                return

            g = spawn(self._acquire_data, config, self._unlock_new_data_callback)
            log.debug('** Spawned {0}'.format(g))
            self._glet_queue.append(g)
コード例 #22
0
ファイル: test_process.py プロジェクト: pkediyal/pyon
    def test_unknown_error(self):

        # Unhandled exceptions get handled and then converted to ContainerErrors
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def proc_call():
            raise self.ExpectedError("didn't find it")

        def client_call(p=None, ar=None):
            try:
                ca = p._routing_call(proc_call, None)
                ca.get(timeout=5)

            except IonException as e:
                ar.set(e)

        ar = AsyncResult()
        gl_call = spawn(client_call, p=p, ar=ar)

        e = ar.get(timeout=5)

        self.assertIsInstance(e, ContainerError)
        self.assertEquals(len(p._errors), 1)
コード例 #23
0
    def on_start(self):
        super(IngestionWorker,self).on_start()
        #----------------------------------------------
        # Start up couch
        #----------------------------------------------


        self.couch_config = self.CFG.get('couch_storage')
        self.hdf_storage = self.CFG.get('hdf_storage')

        self.number_of_workers = self.CFG.get('number_of_workers')
        self.description = self.CFG.get('description')

        self.ingest_config_id = self.CFG.get('configuration_id')

        self.datastore_name = self.couch_config.get('datastore_name',None) or 'dm_datastore'
        try:
            self.datastore_profile = getattr(DataStore.DS_PROFILE, self.couch_config.get('datastore_profile','SCIDATA'))
        except AttributeError:
            log.exception('Invalid datastore profile passed to ingestion worker. Defaulting to SCIDATA')

            self.datastore_profile = DataStore.DS_PROFILE.SCIDATA
        log.debug('datastore_profile %s' % self.datastore_profile)
        self.db = self.container.datastore_manager.get_datastore(ds_name=self.datastore_name, profile = self.datastore_profile, config = self.CFG)

        self.resource_reg_client = ResourceRegistryServiceClient(node = self.container.node)

        self.dataset_configs = {}
        # update the policy
        def receive_dataset_config_event(event_msg, headers):
            log.info('Updating dataset config in ingestion worker: %s', event_msg)

            if event_msg.type != DatasetIngestionTypeEnum.DATASETINGESTIONBYSTREAM:
                raise IngestionWorkerException('Received invalid type in dataset config event.')

            stream_id = event_msg.configuration.stream_id

            if event_msg.deleted:
                try:
                    del self.dataset_configs[stream_id]
                except KeyError:
                    log.info('Tried to remove dataset config that does not exist!')
            else:
                self.dataset_configs[stream_id] = event_msg

            # Hook to override just before processing is complete
            self.dataset_configs_event_test_hook(event_msg, headers)


        #Start the event subscriber - really - what a mess!
        self.event_subscriber = EventSubscriber(
            event_type="DatasetIngestionConfigurationEvent",
            origin=self.ingest_config_id,
            callback=receive_dataset_config_event
            )

        self.gl = spawn(self.event_subscriber.listen)
        self.event_subscriber._ready_event.wait(timeout=5)

        log.info(str(self.db))
コード例 #24
0
ファイル: thread.py プロジェクト: scion-network/scioncc
 def _spawn(self):
     """ Spawn a gevent greenlet using defined target method and args.
     """
     gl = spawn(self.target, *self.spawn_args, **self.spawn_kwargs)
     gl.link(lambda _: self.ev_exit.set())  # Set exit event when we terminate
     gl._glname = "ION Thread %s" % str(self.target)
     return gl
コード例 #25
0
ファイル: test_process.py プロジェクト: mkl-/scioncc
    def test_unknown_error(self):

        # Unhandled exceptions get handled and then converted to ContainerErrors
        svc = self._make_service()
        p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
        p.start()
        p.get_ready_event().wait(timeout=5)
        self.addCleanup(p.stop)

        def proc_call():
            raise self.ExpectedError("didn't find it")

        def client_call(p=None, ar=None):
            try:
                ca = p._routing_call(proc_call, None)
                ca.get(timeout=5)

            except IonException as e:
                ar.set(e)

        ar = AsyncResult()
        gl_call = spawn(client_call, p=p, ar=ar)

        e = ar.get(timeout=5)

        self.assertIsInstance(e, ContainerError)
        self.assertEquals(len(p._errors), 1)
コード例 #26
0
    def execute_acquire_data(self, *args):
        """
        Creates a copy of self._dh_config, creates a publisher, and spawns a greenlet to perform a data acquisition cycle
        If the args[0] is a dict, any entries keyed with one of the 'PATCHABLE_CONFIG_KEYS' are used to patch the config
        Greenlet binds to BaseDataHandler._acquire_data and passes the publisher and config
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument can be a config dictionary
        """
        log.debug('Executing acquire_data: args = {0}'.format(args))

        # Make a copy of the config to ensure no cross-pollution
        config = self._dh_config.copy()

        # Patch the config if mods are passed in
        try:
            config_mods = args[0]
            if not isinstance(config_mods, dict):
                raise IndexError()

            log.debug('Configuration modifications provided: {0}'.format(config_mods))
            for k in self._params['PATCHABLE_CONFIG_KEYS']:
                p=get_safe(config_mods, k)
                if not p is None:
                    config[k] = p

        except IndexError:
            log.info('No configuration modifications were provided')

        # Verify that there is a stream_id member in the config
        stream_id = get_safe(config, 'stream_id')
        if not stream_id:
            raise ConfigurationError('Configuration does not contain required \'stream_id\' member')

        isNew = get_safe(config, 'constraints') is None

        if isNew and not self._semaphore.acquire(blocking=False):
            log.warn('Already acquiring new data - action not duplicated')
            return

        ndc = None
        if isNew:
            # Get the NewDataCheck attachment and add it's content to the config
            ext_ds_id = get_safe(config,'external_dataset_res_id')
            if ext_ds_id:
                ndc = self._find_new_data_check_attachment(ext_ds_id)

        config['new_data_check'] = ndc

            # Create a publisher to pass into the greenlet
        publisher = self._stream_registrar.create_publisher(stream_id=stream_id)

        # Spawn a greenlet to do the data acquisition and publishing
        g = spawn(self._acquire_data, config, publisher, self._unlock_new_data_callback, self._update_new_data_check_attachment)
        log.debug('** Spawned {0}'.format(g))
        self._glet_queue.append(g)
コード例 #27
0
    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._trigger_func, self.persist_interval)
        log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS, callback=self._on_event)
        self.event_sub.start()
コード例 #28
0
ファイル: data_agent.py プロジェクト: klawande-cci/scioncc
 def on_start_streaming(self, streaming_args=None):
     self.sampling_gl_quit = Event()
     self.sampling_interval = self.agent_config.get("sampling_interval", 5)
     self.sampling_gl = spawn(self._sample_data_loop,
                              self.sampling_interval)
     if self.agent_plugin and hasattr(self.agent_plugin,
                                      'on_start_streaming'):
         self.agent_plugin.on_start_streaming(streaming_args)
コード例 #29
0
 def start(self):
     self._do_stop = False
     self._g = spawn(self._run, self.name)
     log.info(
         'Brick writer worker \'%s\' started: req_port=%s, resp_port=%s',
         self.name, 'tcp://localhost:{0}'.format(self.req_port),
         'tcp://localhost:{0}'.format(self.resp_port))
     return self._g
コード例 #30
0
ファイル: thread.py プロジェクト: scion-network/scioncc
 def _spawn(self):
     """ Spawn a gevent greenlet using defined target method and args.
     """
     gl = spawn(self.target, *self.spawn_args, **self.spawn_kwargs)
     gl.link(
         lambda _: self.ev_exit.set())  # Set exit event when we terminate
     gl._glname = "ION Thread %s" % str(self.target)
     return gl
コード例 #31
0
    def start(self):
        log.debug("SFlowManager.start")

        if self._counter_interval > 0:
            self._gl_counter = spawn(self._counter)
        else:
            log.debug("Counter interval is 0, not spawning counter greenlet")

        self._udp_socket = socket(AF_INET, SOCK_DGRAM)
コード例 #32
0
ファイル: transport.py プロジェクト: pkediyal/pyon
    def start(self):
        """
        Starts all internal greenlets of this router device.
        """
        self._queue_incoming = Queue()
        self._gl_msgs = self._gl_pool.spawn(self._run_gl_msgs)
        self._gl_msgs.link_exception(self._child_failed)

        self.gl_ioloop = spawn(self._run_ioloop)
コード例 #33
0
    def activate(self):
        if self.activated:
            raise BadRequest("Already active")
        self.listen_gl = spawn(self.wait_sub.listen
                               )  # This initializes and activates the listener
        self.wait_sub.get_ready_event().wait(timeout=1)
        self.activated = True

        return self.wait_name
コード例 #34
0
ファイル: test_event.py プロジェクト: dstuebe/pyon
    def _listen(self, sub):
        """
        Pass in a subscriber here, this will make it listen in a background greenlet.
        """
        gl = spawn(sub.listen)
        self._listens.append(gl)
        sub._ready_event.wait(timeout=5)

        return gl
コード例 #35
0
ファイル: sflow.py プロジェクト: ooici/pyon
    def start(self):
        log.debug("SFlowManager.start")

        if self._counter_interval > 0:
            self._gl_counter = spawn(self._counter)
        else:
            log.debug("Counter interval is 0, not spawning counter greenlet")

        self._udp_socket = socket(AF_INET, SOCK_DGRAM)
コード例 #36
0
ファイル: transport.py プロジェクト: jamie-cyber1/pyon
    def start(self):
        """
        Starts all internal greenlets of this router device.
        """
        self._queue_incoming = Queue()
        self._gl_msgs = self._gl_pool.spawn(self._run_gl_msgs)
        self._gl_msgs.link_exception(self._child_failed)

        self.gl_ioloop = spawn(self._run_ioloop)
コード例 #37
0
    def _listen(self, sub):
        """
        Pass in a subscriber here, this will make it listen in a background greenlet.
        """
        gl = spawn(sub.listen)
        self._listens.append(gl)
        sub._ready_event.wait(timeout=5)

        return gl
コード例 #38
0
ファイル: coverage.py プロジェクト: tgiguere/coverage-model
    def insert_timesteps(self, count, origin=None, oob=True):
        """
        Insert count # of timesteps beginning at the origin

        The specified # of timesteps are inserted into the temporal value array at the indicated origin.  This also
        expands the temporal dimension of the AbstractParameterValue for each parameters

        @param count    The number of timesteps to insert
        @param origin   The starting location, from which to begin the insertion
        @param oob      Out of band operations, True will use greenlets, False will be in-band.
        """
        if self.closed:
            raise IOError('I/O operation on closed file')

        if self.mode == 'r':
            raise IOError('Coverage not open for writing: mode == \'{0}\''.format(self.mode))

        # Get the current shape of the temporal_dimension
        shp = self.temporal_domain.shape

        # If not provided, set the origin to the end of the array
        if origin is None or not isinstance(origin, int):
            origin = shp.extents[0]

        # Expand the shape of the temporal_domain - following works if extents is a list or tuple
        shp.extents = (shp.extents[0]+count,)+tuple(shp.extents[1:])

        # Expand the temporal dimension of each of the parameters - the parameter determines how to apply the change
        for n in self._range_dictionary:
            pc = self._range_dictionary.get_context(n)
            # Update the dom of the parameter_context
            if pc.dom.tdom is not None:
                pc.dom.tdom = self.temporal_domain.shape.extents

            self._persistence_layer.expand_domain(pc)
            self._range_value[n].expand_content(VariabilityEnum.TEMPORAL, origin, count)

        # Update the temporal_domain in the master_manager, do NOT flush!!
        self._persistence_layer.update_domain(tdom=self.temporal_domain, do_flush=False)
        # Flush the master_manager & parameter_managers in a separate greenlet
        if oob:
            spawn(self._persistence_layer.flush)
        else:
            self._persistence_layer.flush()
コード例 #39
0
ファイル: conversation_log.py プロジェクト: ateranishi/pyon
 def start(self):
     """
     Pass in a subscriber here, this will make it listen in a background greenlet.
     """
     assert not self._cbthread, "start called twice on ConversationSubscriber"
     gl = spawn(self.listen)
     self._cbthread = gl
     self._ready_event.wait(timeout=5)
     log.info("ConversationSubscriber started; pattern=%s" % self.binding)
     return gl
コード例 #40
0
ファイル: event.py プロジェクト: oldpatricka/pyon
 def activate(self):
     """
     Pass in a subscriber here, this will make it listen in a background greenlet.
     """
     assert not self._cbthread, "activate called twice on EventSubscriber"
     gl = spawn(self.listen)
     self._cbthread = gl
     self._ready_event.wait(timeout=5)
     log.info("EventSubscriber activated. Event pattern=%s" % self.binding)
     return gl
コード例 #41
0
 def activate(self):
     """
     Pass in a subscriber here, this will make it listen in a background greenlet.
     """
     assert not self._cbthread, "activate called twice on EventSubscriber"
     gl = spawn(self.listen)
     self._cbthread = gl
     self._ready_event.wait(timeout=5)
     log.info("EventSubscriber activated. Event pattern=%s" % self.binding)
     return gl
コード例 #42
0
    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._trigger_func, self.persist_interval)
        log.debug('EventPersister timer greenlet started in "%s" (interval %s)', self.__class__.__name__, self.persist_interval)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
                                         callback=self._on_event,
                                         queue_name="event_persister")

        self.event_sub.start()
コード例 #43
0
 def start(self):
     """
     Pass in a subscriber here, this will make it listen in a background greenlet.
     """
     assert not self._cbthread, "start called twice on EventSubscriber"
     gl = spawn(self.listen)
     self._cbthread = gl
     if not self._ready_event.wait(timeout=5):
         log.warning('EventSubscriber start timed out.')
     log.info("EventSubscriber started. Event pattern=%s" % self.binding)
     return gl
コード例 #44
0
    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._trigger_func, self.persist_interval)
        log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__)

        # Conv subscription to as many as it takes
        self.conv_sub = ConvSubscriber(callback=self._on_message)
        self.conv_sub.start()

        # Open repository
        self.conv_repository = ConvRepository()
コード例 #45
0
ファイル: event.py プロジェクト: daf/pyon
 def start(self):
     """
     Pass in a subscriber here, this will make it listen in a background greenlet.
     """
     assert not self._cbthread, "start called twice on EventSubscriber"
     gl = spawn(self.listen)
     self._cbthread = gl
     if not self._ready_event.wait(timeout=5):
         log.warning('EventSubscriber start timed out.')
     log.info("EventSubscriber started. Event pattern=%s" % self.binding)
     return gl
コード例 #46
0
ファイル: stream.py プロジェクト: swarbhanu/pyon
    def start(self):
        """
        Start consuming from the queue
        """
        if self._chan is not None:
            try:
                self._chan.start_consume()
            except ChannelError:
                log.info('Subscriber is already started')

        else:
            self.gl = spawn(self.listen)
コード例 #47
0
    def start(self):
        # Create our own queue for container heartbeats and broadcasts
        topic = get_safe(self._pd_core.pd_cfg,
                         "aggregator.container_topic") or "bx_containers"
        queue_name = "pd_aggregator_%s_%s" % (
            topic, create_valid_identifier(self.container.id, dot_sub="_"))
        self.sub_cont = Subscriber(binding=topic,
                                   from_name=queue_name,
                                   auto_delete=True,
                                   callback=self._receive_container_info)
        self.sub_cont_gl = spawn(self.sub_cont.listen)
        self.sub_cont.get_ready_event().wait()

        self.evt_sub = EventSubscriber(event_type=OT.ContainerLifecycleEvent,
                                       callback=self._receive_event)
        self.evt_sub.add_event_subscription(
            event_type=OT.ProcessLifecycleEvent)
        self.evt_sub_gl = spawn(self.evt_sub.listen)
        self.evt_sub.get_ready_event().wait()

        log.info("PD Aggregator - event and heartbeat subscribers started")
コード例 #48
0
ファイル: transform.py プロジェクト: swarbhanu/pyon
    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(), 'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(), str(uuid.uuid4())[0:6]))
コード例 #49
0
ファイル: endpoint.py プロジェクト: blazetopher/pyon
    def start(self):
        """
        Start consuming from the queue
        """
        if hasattr(self, '_chan'):
            try:
                self._chan.start_consume()
            except ChannelError:
                log.info('Subscriber is already started')

        else:
            self.gl = spawn(self.listen)
コード例 #50
0
ファイル: cc.py プロジェクト: scion-network/scioncc
    def start(self):
        from pyon.net.endpoint import Publisher
        from pyon.util.async import spawn
        self.heartbeat_quit = Event()
        self.heartbeat_interval = float(self.heartbeat_cfg.get("publish_interval", 60))
        self.heartbeat_topic = self.heartbeat_cfg.get("topic", "heartbeat")
        self.heartbeat_pub = Publisher(to_name=self.heartbeat_topic)

        # Directly spawn a greenlet - we don't want this to be a supervised IonProcessThread
        self.heartbeat_gl = spawn(self.heartbeat_loop)
        self.started = True
        log.info("Started container heartbeat (interval=%s, topic=%s)", self.heartbeat_interval, self.heartbeat_topic)
コード例 #51
0
def run_test_dispatcher(work_count, num_workers=1):

    BASE_DIR = 'test_data/masonry'
    WORK_KEYS = ['a','b','c','d','e']

    for x in [x for x in os.listdir(BASE_DIR) if x.endswith('.h5')]:
        os.remove(os.path.join(BASE_DIR,x))

    fps = {}
    for k in WORK_KEYS:
        fps[k] = os.path.join(BASE_DIR, '{0}.h5'.format(k))
#        with h5py.File(fps[k], 'a'):
#            pass

    bD = (50,)
    cD = (5,)
    fv = -9999
    dtype = 'f'

    disp = BrickWriterDispatcher(num_workers=num_workers, pidantic_dir='test_data/pid')
    disp.run()

    def make_work():
        for x in xrange(work_count):
            bk = random.choice(WORK_KEYS)
            brick_metrics = (fps[bk], bD, cD, dtype, fv)
            if np.random.random_sample(1)[0] > 0.5:
                sl = int(np.random.randint(0,10,1)[0])
                w = np.random.random_sample(1)[0]
            else:
                strt = int(np.random.randint(0,bD[0] - 2,1)[0])
                stp = int(np.random.randint(strt+1,bD[0],1)[0])
                sl = slice(strt, stp)
                w = np.random.random_sample(stp-strt)
            disp.put_work(work_key=bk, work_metrics=brick_metrics, work=([sl], w))
            time.sleep(0.1)

    spawn(make_work)

    return disp
コード例 #52
0
ファイル: process.py プロジェクト: swarbhanu/pyon
    def start_listeners(self):
        """
        Starts all listeners in managed greenlets.

        This must be called after starting this IonProcess. Currently, the Container's ProcManager
        will handle this for you, but if using an IonProcess manually, you must remember to call
        this method or no attached listeners will run.
        """

        # spawn all listeners in startup listeners (from initializer, or added later)
        for listener in self._startup_listeners:
            self.add_endpoint(listener)

        ev = Event()

        def allready(ev):
            waitall([x.get_ready_event() for x in self.listeners])
            ev.set()

        spawn(allready, ev)

        ev.wait(timeout=10)
コード例 #53
0
    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._trigger_func,
                                       self.persist_interval)
        log.debug('Publisher Greenlet started in "%s"' %
                  self.__class__.__name__)

        # Conv subscription to as many as it takes
        self.conv_sub = ConvSubscriber(callback=self._on_message)
        self.conv_sub.start()

        # Open repository
        self.conv_repository = ConvRepository()
コード例 #54
0
    def start(self):
        from pyon.net.endpoint import Publisher
        from pyon.util. async import spawn
        self.heartbeat_quit = Event()
        self.heartbeat_interval = float(
            self.heartbeat_cfg.get("publish_interval", 60))
        self.heartbeat_topic = self.heartbeat_cfg.get("topic", "heartbeat")
        self.heartbeat_pub = Publisher(to_name=self.heartbeat_topic)

        # Directly spawn a greenlet - we don't want this to be a supervised IonProcessThread
        self.heartbeat_gl = spawn(self.heartbeat_loop)
        self.started = True
        log.info("Started container heartbeat (interval=%s, topic=%s)",
                 self.heartbeat_interval, self.heartbeat_topic)
コード例 #55
0
ファイル: transform.py プロジェクト: oldpatricka/pyon
    def on_start(self):
        TransformDataProcess.on_start(self)

        # set up subscriber to *
        self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
                                  from_name=NameTrio(get_sys_name(),
                                                     'bench_queue', '*'))

        # spawn listener
        self._sub_gl = spawn(self._bt_sub.listen)

        # set up publisher to anything!
        self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(),
                                                  str(uuid.uuid4())[0:6]))
コード例 #56
0
ファイル: endpoint.py プロジェクト: ooici-dm/pyon
    def spawn_listener(self):
        def client_recv():
            while True:
                try:
                    log.debug("client_recv waiting for a message")
                    msg, headers, delivery_tag = self.channel.recv()
                    log.debug("client_recv got a message")
                    log_message(self.channel._send_name , msg, headers, delivery_tag)
                    try:
                        self._message_received(msg, headers)
                    finally:
                        # always ack a listener response
                        self.channel.ack(delivery_tag)
                except ChannelClosedError:
                    log.debug('Channel was closed during client_recv listen loop')
                    break

        # @TODO: spawn should be configurable to maybe the proc_sup in the container?
        self._recv_greenlet = spawn(client_recv)
コード例 #57
0
    def execute_start_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Put the DataHandler into streaming mode and start polling for new data
        Called from:
                      InstrumentAgent._handler_observatory_go_streaming

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        @retval Next ResourceAgentState (STREAMING)
        """
        log.debug('Entered execute_start_autosample with args={0} & kwargs={1}'.format(args, kwargs))
        if not self._polling and self._polling_glet is None:
            self._polling_glet = spawn(self._poll)

        return ResourceAgentState.STREAMING, None
コード例 #58
0
ファイル: pd_engine.py プロジェクト: scion-network/scioncc
    def _leader_callback(self, leader_info):
        if leader_info["action"] == "acquire_leader":
            def start_sub():
                if not self.registry.preconditions_true.is_set():
                    log.info("PD is leader - awaiting PD preconditions")
                    # Await preconditions
                    await_timeout = get_safe(self._pd_core.pd_cfg, "engine.await_preconditions.await_timeout")
                    precond_true = self.registry.preconditions_true.wait(timeout=await_timeout)
                    if not precond_true:
                        log.warn("PD preconditions not satisfied after timeout - continuing")

                if self._pd_core.is_leader() and self.sub_cont is not None and not self.sub_active:
                    # Are we still leader? Not activated?
                    num_msg, num_cons = self.sub_cont.get_stats()
                    log.info("PD is leader - starting to consume (%s pending commands, %s consumers)", num_msg, num_cons)
                    self.sub_cont.activate()
                    self.sub_active = True
            start_sub_gl = spawn(start_sub)
        elif leader_info["action"] == "release_leader":
            if self.sub_cont is not None and self.sub_active:
                self.sub_cont.deactivate()
                self.sub_active = False