Exemple #1
0
    def get_dataset_bounds(self, dataset_id=''):
        """@brief Get the bounding coordinates of the dataset using a couch map/reduce query
        @param dataset_id
        @result bounds is a dictionary containing spatial and temporal bounds of the dataset in standard units

        @param dataset_id    str
        @retval bounds    Unknown
        """
        dataset = self.read_dataset(dataset_id=dataset_id)
        key = dataset.primary_view_key  # stream_id
        ar = gevent.event.AsyncResult()

        def ar_timeout(db):
            opts = {'start_key': [key, 0], 'end_key': [key, 2]}
            try:
                results = db.query_view("datasets/bounds",
                                        opts=opts)[0]['value']
            except IndexError:
                # Means there are no results
                results = {}
            ar.set(results)

        db = self.container.datastore_manager.get_datastore(
            dataset.datastore_name)
        g = Greenlet(ar_timeout, db)
        g.start()
        bounds = ar.get(timeout=5)

        return bounds
Exemple #2
0
 def launch_benchmark(transform_number=1, primer=1, message_length=4):
     import gevent
     from gevent.greenlet import Greenlet
     from pyon.util.containers import DotDict
     from pyon.net.transport import NameTrio
     from pyon.net.endpoint import Publisher
     import uuid
     num = transform_number
     msg_len = message_length
     transforms = list()
     pids = 1
     TransformBenchTesting.message_length = message_length
     cc = Container.instance
     pub = Publisher(to_name=NameTrio(get_sys_name(),
                                      str(uuid.uuid4())[0:6]))
     for i in xrange(num):
         tbt = cc.proc_manager._create_service_instance(
             str(pids), 'tbt', 'prototype.transforms.linear',
             'TransformInPlace',
             DotDict({
                 'process': {
                     'name': 'tbt%d' % pids,
                     'transform_id': pids
                 }
             }))
         tbt.init()
         tbt.start()
         gevent.sleep(0.2)
         for i in xrange(primer):
             pub.publish(list(xrange(msg_len)))
         g = Greenlet(tbt.perf)
         g.start()
         transforms.append(tbt)
         pids += 1
Exemple #3
0
    def execute_replay(self):
        '''
        @brief Spawns a greenlet to take care of the query and work
        '''
        if not hasattr(self, 'output'):
            raise Inconsistent(
                'The replay process requires an output stream publisher named output. Invalid configuration!'
            )

        datastore_name = self.datastore_name
        key_id = self.key_id

        view_name = self.view_name

        opts = {
            'start_key': [key_id, 0],
            'end_key': [key_id, 2],
            'include_docs': True
        }

        g = Greenlet(self._query,
                     datastore_name=datastore_name,
                     view_name=view_name,
                     opts=opts,
                     callback=lambda results: self._publish_query(results))
        g.start()
    def handle_multi_device_job(self, job_handler, result_handler):

        job_worker_pool = Pool(self.max_job_task)
        job_percent_per_task = \
            self.job_log_utils.calculate_job_percentage(
                len(self.device_json), buffer_task_percent=False,
                total_percent=self.job_percent)[0]
        for device_id in self.device_json:
            device_data = self.device_json.get(device_id)
            device_fqname = ':'.join(map(str,
                                         device_data.get('device_fqname')))
            job_template_fq_name = ':'.join(map(str,
                                                self.job_template.fq_name))
            pr_fabric_job_template_fq_name = device_fqname + ":" + \
                self.fabric_fq_name + ":" + \
                job_template_fq_name
            self.job_log_utils.send_prouter_job_uve(
                self.job_template.fq_name,
                pr_fabric_job_template_fq_name,
                self.job_execution_id,
                job_status="IN_PROGRESS")

            job_worker_pool.start(
                Greenlet(job_handler.handle_job, result_handler,
                         job_percent_per_task, device_id))
        job_worker_pool.join()
Exemple #5
0
    def handle_multi_device_job(self, job_handler, result_handler):

        job_worker_pool = Pool(self.max_job_task)
        job_percent_per_task = \
            self.job_log_utils.calculate_job_percentage(
                len(self.device_json), buffer_task_percent=False,
                total_percent=self.job_percent)[0]
        for device_id in self.device_json:
            if device_id in result_handler.failed_device_jobs:
                self._logger.debug("Not executing the next operation"
                                   "in the workflow for device: %s" %
                                   device_id)
                continue
            device_data = self.device_json.get(device_id)
            device_fqname = ':'.join(map(str,
                                         device_data.get('device_fqname')))
            device_name = device_data.get('device_fqname', [""])[-1]
            job_template_fq_name = ':'.join(map(str,
                                                self.job_template.fq_name))
            pr_fabric_job_template_fq_name = device_fqname + ":" + \
                self.fabric_fq_name + ":" + \
                job_template_fq_name
            self.job_log_utils.send_prouter_job_uve(
                self.job_template.fq_name,
                pr_fabric_job_template_fq_name,
                self.job_execution_id,
                job_status="IN_PROGRESS")

            job_worker_pool.start(
                Greenlet(job_handler.handle_job, result_handler,
                         job_percent_per_task, device_id, device_name))
        job_worker_pool.join()
    def on_start(self):
        super(VizTransformProcForMatplotlibGraphs, self).on_start()
        #assert len(self.streams)==1
        self.initDataFlag = True
        self.graph_data = {
        }  # Stores a dictionary of variables : [List of values]

        # Need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(
            process=self, node=self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(
            node=self.container.node)

        # extract the various parameters passed to the transform process
        self.out_stream_id = self.CFG.get('process').get(
            'publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        #stream_route = self.pubsub_cli.register_producer(stream_id=self.out_stream_id)
        out_stream_pub_registrar = StreamPublisherRegistrar(
            process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(
            stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        self.stream_def = self.rr_cli.read(self.stream_def_id)

        # Start the thread responsible for keeping track of time and generating graphs
        # Mutex for ensuring proper concurrent communications between threads
        self.lock = RLock()
        self.rendering_proc = Greenlet(self.rendering_thread)
        self.rendering_proc.start()
 def run(self, blog):
     '''
     Initiate the thread to query, organize and publish the data
     '''
     production = Greenlet(self._grab,
                           blog=blog,
                           callback=lambda: self._on_done())
     production.start()
     self.greenlet_queue.append(production)
Exemple #8
0
    def generate_worker(self, target: Callable, *args, **kwargs) -> Greenlet:

        @functools.wraps(target)
        @CoroutineStrategy.save_return_value
        def _target_function(*_args, **_kwargs):
            result_value = target(*_args, **_kwargs)
            return result_value

        # return gevent.spawn(_target_function, *args, **kwargs)
        return Greenlet(_target_function, *args, **kwargs)
    def start(self):

        log.debug("StreamProducer start")
        # Threads become efficent Greenlets with gevent
        streams = self.CFG.get('process', {}).get('publish_streams', None)
        if streams:
            self.output_streams = list(k for k in streams)
        else:
            self.output_streams = None

        self.producer_proc = Greenlet(self._trigger_func)
        self.producer_proc.start()
    def on_start(self):
        super(ExampleDataProducer, self).on_start()

        stream_id = self.CFG.process.out_stream_id

        g = Greenlet(self._trigger_func, stream_id)
        log.debug('Starting publisher thread for simple ctd data.')
        g.start()
        log.info('Publisher Greenlet started in "%s"' %
                 self.__class__.__name__)
        self.greenlet_queue = []
        self.greenlet_queue.append(g)

        self.finished = gevent.event.Event()
Exemple #11
0
    def launch_benchmark(transform_number=1, primer=1, message_length=4):
        import gevent
        from gevent.greenlet import Greenlet
        from pyon.util.containers import DotDict
        from pyon.net.transport import NameTrio
        from pyon.net.endpoint import Publisher
        import numpy
        from pyon.ion.granule.record_dictionary import RecordDictionaryTool
        from pyon.ion.granule.taxonomy import TaxyTool
        from pyon.ion.granule.granule import build_granule

        tt = TaxyTool()
        tt.add_taxonomy_set('a')

        import uuid
        num = transform_number
        msg_len = message_length
        transforms = list()
        pids = 1
        TransformBenchTesting.message_length = message_length
        cc = Container.instance
        pub = Publisher(to_name=NameTrio(get_sys_name(),
                                         str(uuid.uuid4())[0:6]))
        for i in xrange(num):
            tbt = cc.proc_manager._create_service_instance(
                str(pids), 'tbt', 'prototype.transforms.linear',
                'TransformInPlaceNewGranule',
                DotDict({
                    'process': {
                        'name': 'tbt%d' % pids,
                        'transform_id': pids
                    }
                }))
            tbt.init()
            tbt.start()
            gevent.sleep(0.2)
            for i in xrange(primer):
                rd = RecordDictionaryTool(tt, message_length)
                rd['a'] = numpy.arange(message_length)
                gran = build_granule(data_producer_id='dp_id',
                                     taxonomy=tt,
                                     record_dictionary=rd)
                pub.publish(gran)

            g = Greenlet(tbt.perf)
            g.start()
            transforms.append(tbt)
            pids += 1
Exemple #12
0
    def test_dispatcher(self):
        options = {
            'enabled': True,
            'server': '127.0.0.1',
            'active_range': '00:00 - 23:59',
            'sleep_interval': '1',
            'activation_probability': '1',
            'username': '******',
            'password': '******',
            'port': 8080}

        dispatcher = BaitDispatcher(Mock(), options)

        dispatcher_greenlet = Greenlet(dispatcher.start)
        dispatcher_greenlet.start()
        gevent.sleep(2)
        dispatcher_greenlet.kill()
Exemple #13
0
    def test_dispatcher(self):
        options = {
            'enabled': True,
            'server': '127.0.0.1',
            'active_range': '00:00 - 23:59',
            'sleep_interval': '1',
            'activation_probability': '1',
            'username': '******',
            'password': '******',
            'port': 8080
        }

        dispatcher = BaitDispatcher({}, None, options)

        dispatcher.bait_type = Mock()
        dispatcher_greenlet = Greenlet(dispatcher.start)
        dispatcher_greenlet.start()
        time.sleep(1)
        dispatcher_greenlet.kill()
        dispatcher.bait_type.start.assert_called()
    def on_start(self):
        '''
        Creates a publisher for each stream_id passed in as publish_streams
        Creates an attribute with the name matching the stream name which corresponds to the publisher
        ex: say we have publish_streams:{'output': my_output_stream_id }
          then the instance has an attribute output which corresponds to the publisher for the stream
          in my_output_stream_id
        '''

        # Get the stream(s)
        stream_id = self.CFG.get('process',{}).get('stream_id','')

        self.greenlet_queue = []

        self._usgs_def = USGS_stream_definition()

        # Stream creation is done in SA, but to make the example go for demonstration create one here if it is not provided...
        if not stream_id:

            pubsub_cli = PubsubManagementServiceClient(node=self.container.node)
            stream_id = pubsub_cli.create_stream(
                name='Example USGS Data',
                stream_definition=self._usgs_def,
                original=True,
                encoding='ION R2')

        self.stream_publisher_registrar = StreamPublisherRegistrar(process=self,node=self.container.node)
        # Needed to get the originator's stream_id
        self.stream_id = stream_id


        self.publisher = self.stream_publisher_registrar.create_publisher(stream_id=stream_id)


        self.last_time = 0


        g = Greenlet(self._trigger_func, stream_id)
        log.warn('Starting publisher thread for simple usgs data.')
        g.start()
        self.greenlet_queue.append(g)
    def on_start(self):

        log.warn('Entering On Start!!!')
        # Get the stream(s)
        stream_id = self.CFG.get_safe('process.stream_id', {})

        self.greenlet_queue = []

        # Stream creation is done in SA, but to make the example go for demonstration create one here if it is not provided...
        if not stream_id:

            pubsub_cli = PubsubManagementServiceClient(
                node=self.container.node)

            stream_def_id = pubsub_cli.create_stream_definition(
                name='Producer stream %s' % str(uuid4()),
                container=self.outgoing_stream_def)

            stream_id = pubsub_cli.create_stream(
                name='Example CTD Data',
                stream_definition_id=stream_def_id,
                original=True,
                encoding='ION R2')

        self.stream_publisher_registrar = StreamPublisherRegistrar(
            process=self, node=self.container.node)
        # Needed to get the originator's stream_id
        self.stream_id = stream_id

        self.publisher = self.stream_publisher_registrar.create_publisher(
            stream_id=stream_id)

        self.last_time = 0

        g = Greenlet(self._trigger_func, stream_id)
        log.debug('Starting publisher thread for simple ctd data.')
        g.start()
        log.warn('Publisher Greenlet started in "%s"' %
                 self.__class__.__name__)
        self.greenlet_queue.append(g)
Exemple #16
0
 def wrapper(run, *args, **kwargs):
     return Greenlet(app_context(app)(run), *args, **kwargs)
Exemple #17
0
 def handle_multi_device_job(self, job_handler, result_handler):
     job_worker_pool = Pool(POOL_SIZE)
     for device_id in self.job_params['device_list']:
         job_worker_pool.start(Greenlet(job_handler.handle_device_job,
                                        device_id, result_handler))
     job_worker_pool.join()
Exemple #18
0
 def on_start(self):
     log.debug("StreamProducer start")
     self.producer_proc = Greenlet(self._trigger_func)
     self.producer_proc.start()
Exemple #19
0
    def _go_greenlet(self,
                     greenlet_count,
                     put_count,
                     get_count,
                     bench_item_count,
                     watchdog_interval_ms=60000,
                     max_item=128000,
                     max_bytes=32 * 1024 * 1024,
                     max_single_item_bytes=1 * 1024 * 1024,
                     purge_min_bytes=8 * 1024 * 1024,
                     purge_min_count=1000):
        """
        Doc
        :param greenlet_count: greenlet_count
        :param put_count: put_count
        :param get_count: get_count
        :param bench_item_count: bench_item_count
        :param watchdog_interval_ms: watchdog_interval_ms
        :param max_item: max_item
        :param max_bytes: max_bytes
        :param max_single_item_bytes: max_single_item_bytes
        :param purge_min_bytes: purge_min_bytes
        :param purge_min_count: purge_min_count
        """

        g_event = None
        g_array = None
        try:
            # Settings
            g_count = greenlet_count
            g_ms = 10000

            # Continue callback loop
            self.callback_return = True

            # Go
            self.mem_cache = MemoryCache(
                watchdog_interval_ms=watchdog_interval_ms,
                max_item=max_item,
                max_bytes=max_bytes,
                max_single_item_bytes=max_single_item_bytes,
                purge_min_bytes=purge_min_bytes,
                purge_min_count=purge_min_count)

            # Item count
            self.bench_item_count = bench_item_count
            self.bench_put_weight = put_count
            self.bench_get_weight = get_count
            self.bench_ttl_min_ms = 1000
            self.bench_ttl_max_ms = int(g_ms / 2)

            # Go
            self.run_event = Event()
            self.exception_raised = 0
            self.open_count = 0
            self.thread_running = AtomicIntSafe()
            self.thread_running_ok = AtomicIntSafe()

            # Item per greenlet
            item_per_greenlet = self.bench_item_count / g_count

            # Signal
            self.gorun_event = Event()

            # Alloc greenlet
            g_array = list()
            g_event = list()
            for _ in range(0, g_count):
                greenlet = Greenlet()
                g_array.append(greenlet)
                g_event.append(Event())

            # Run them
            cur_idx = 0
            for idx in range(0, len(g_array)):
                greenlet = g_array[idx]
                event = g_event[idx]
                greenlet.spawn(self._run_cache_bench, event, cur_idx,
                               cur_idx + item_per_greenlet)
                cur_idx += item_per_greenlet
                SolBase.sleep(0)

            # Signal
            self.gorun_event.set()

            # Wait a bit
            dt = SolBase.mscurrent()
            while SolBase.msdiff(dt) < g_ms:
                SolBase.sleep(500)
                # Stat
                ms = SolBase.msdiff(dt)
                sec = float(ms / 1000.0)
                total_put = Meters.aig("mcs.cache_put")
                per_sec_put = round(float(total_put) / sec, 2)
                total_get = Meters.aig("mcs.cache_get_hit") + Meters.aig(
                    "mcs.cache_get_miss")
                per_sec_get = round(float(total_get) / sec, 2)

                logger.info(
                    "Running..., count=%s, run=%s, ok=%s, put/sec=%s get/sec=%s, cache=%s",
                    self.open_count, self.thread_running.get(),
                    self.thread_running_ok.get(), per_sec_put, per_sec_get,
                    self.mem_cache)
                self.assertEqual(self.exception_raised, 0)

            # Over, signal
            logger.info("Signaling, count=%s", self.open_count)
            self.run_event.set()

            # Wait
            for g in g_event:
                g.wait(30.0)
                self.assertTrue(g.isSet())

            g_event = None
            g_array = None

            # Log
            Meters.write_to_logger()
        finally:
            self.run_event.set()
            if g_event:
                for g in g_event:
                    g.set()

            if g_array:
                for g in g_array:
                    g.kill()

            if self.mem_cache:
                max_count = 0
                total_size = 0
                i = 0
                for (k, v) in self.mem_cache._hash_key.items():
                    i += 1
                    total_size += len(k) + len(v[1])
                    if i < max_count:
                        logger.info("%s => %s", k, v)
                self.assertEqual(total_size,
                                 self.mem_cache._current_data_bytes.get())

                self.mem_cache.stop_cache()
                self.mem_cache = None
    def _go_greenlet(self, greenlet_count, put_count, get_count,
                     bench_item_count):
        """
        Doc
        :param greenlet_count: greenlet_count
        :param put_count: put_count
        :param get_count: get_count
        :param bench_item_count: bench_item_count
        """

        g_event = None
        g_array = None
        try:
            # Settings
            g_count = greenlet_count
            g_ms = 10000

            # Continue callback loop
            self.callback_return = True

            # Go
            self.redis_cache = RedisCache()

            # Item count
            self.bench_item_count = bench_item_count
            self.bench_put_weight = put_count
            self.bench_get_weight = get_count
            self.bench_ttl_min_ms = 1000
            self.bench_ttl_max_ms = int(g_ms / 2)

            # Go
            self.run_event = Event()
            self.exception_raised = 0
            self.open_count = 0
            self.thread_running = AtomicIntSafe()
            self.thread_running_ok = AtomicIntSafe()

            # Item per greenlet
            item_per_greenlet = self.bench_item_count / g_count

            # Signal
            self.gorun_event = Event()

            # Alloc greenlet
            g_array = list()
            g_event = list()
            for _ in range(0, g_count):
                greenlet = Greenlet()
                g_array.append(greenlet)
                g_event.append(Event())

            # Run them
            cur_idx = 0
            for idx in range(0, len(g_array)):
                greenlet = g_array[idx]
                event = g_event[idx]
                greenlet.spawn(self._run_cache_bench, event, cur_idx,
                               cur_idx + item_per_greenlet)
                cur_idx += item_per_greenlet
                SolBase.sleep(0)

            # Signal
            self.gorun_event.set()

            # Wait a bit
            dt = SolBase.mscurrent()
            while SolBase.msdiff(dt) < g_ms:
                SolBase.sleep(500)
                # Stat
                ms = SolBase.msdiff(dt)
                sec = float(ms / 1000.0)
                total_put = Meters.aig("rcs.cache_put")
                per_sec_put = round(float(total_put) / sec, 2)
                total_get = Meters.aig("rcs.cache_get_hit") + Meters.aig(
                    "rcs.cache_get_miss")
                per_sec_get = round(float(total_get) / sec, 2)

                logger.info(
                    "Running..., count=%s, run=%s, ok=%s, put/sec=%s get/sec=%s, cache=%s",
                    self.open_count, self.thread_running.get(),
                    self.thread_running_ok.get(), per_sec_put, per_sec_get,
                    self.redis_cache)
                self.assertEqual(self.exception_raised, 0)

            # Over, signal
            logger.info("Signaling, count=%s", self.open_count)
            self.run_event.set()

            # Wait
            for g in g_event:
                g.wait(30.0)
                self.assertTrue(g.isSet())

            g_event = None
            g_array = None

            # Log
            Meters.write_to_logger()
        finally:
            self.run_event.set()
            if g_event:
                for g in g_event:
                    g.set()

            if g_array:
                for g in g_array:
                    g.kill()

            if self.redis_cache:
                self.redis_cache.stop_cache()
                self.redis_cache = None
 def handle_multi_device_job(self, job_handler, result_handler):
     job_worker_pool = Pool(self.max_job_task)
     for device_id in self.job_params['device_list']:
         job_worker_pool.start(
             Greenlet(job_handler.handle_job, result_handler, device_id))
     job_worker_pool.join()
Exemple #22
0
# gevent depends on a gevent library, greenlet is a simple library for wrapping functions and arguments togther.
# Lets see them in action.

# In[ ]:


# Creating a simple function and wrapping it in greenlet
def myfunction(arg1, arg2, **kwargs):
    print(arg1, arg2, kwargs)
    return 100


# In[ ]:

g = Greenlet(myfunction, 'One', 'Two',
             now='Buckle my shoe')  # create a Greenlet instance using c'tor
g.start(
)  # and then call start(), which immediately calls it, returns None always
g.join()
print('Finished')
print('Greenlet.value', g.value)  # Stores the function return value

# In[ ]:

# the other way is to use the spawn() method, which creates an instance and calls start() too.
# So it is a shorthand and widely used
jobs = [
    gevent.spawn(myfunction, '1', '2', now='Buckle my shoe')
    for i in range(0, 5)
]
gevent.joinall(
Exemple #23
0
 def spawn(self, function, *args, **kwargs):
     greenlet = Greenlet(function, *args, **kwargs)
     self.start(greenlet)
     return greenlet