示例#1
0
 def launch_benchmark(transform_number=1, primer=1,message_length=4):
     import gevent
     from gevent.greenlet import Greenlet
     from pyon.util.containers import DotDict
     from pyon.net.transport import NameTrio
     from pyon.net.endpoint import Publisher
     import uuid
     num = transform_number
     msg_len = message_length
     transforms = list()
     pids = 1
     TransformBenchTesting.message_length = message_length
     cc = Container.instance
     pub = Publisher(to_name=NameTrio(get_sys_name(),str(uuid.uuid4())[0:6]))
     for i in xrange(num):
         tbt=cc.proc_manager._create_service_instance(str(pids), 'tbt', 'prototype.transforms.linear', 'TransformInPlace', DotDict({'process':{'name':'tbt%d' % pids, 'transform_id':pids}}))
         tbt.init()
         tbt.start()
         gevent.sleep(0.2)
         for i in xrange(primer):
             pub.publish(list(xrange(msg_len)))
         g = Greenlet(tbt.perf)
         g.start()
         transforms.append(tbt)
         pids += 1
示例#2
0
 def __init__(self, stats, reader_clients, event_push_client):
     Greenlet.__init__(self)
     self._log = logging.getLogger(str(self))
     self._stats = stats
     self._reader_clients = reader_clients
     self._event_push_client = event_push_client
     self._halt_event = Event()
示例#3
0
文件: backdoor.py 项目: strogo/pylibs
 def __init__(self, address, locals=None):
     Greenlet.__init__(self)
     if isinstance(address, socket.socket):
         self.socket = address
     else:
         self.socket = socket.tcp_listener(address)
     self.locals = locals
    def __init__(
        self,
        context,
        server_node_name,
        server_address,
        client_tag,
        client_address,
        deliverator,
        connect_messages=list(),
    ):
        Greenlet.__init__(self)

        self._log = logging.getLogger("ResilientClient-%s" % (server_address,))

        self._context = context
        self._server_node_name = server_node_name
        self._server_address = server_address

        self._client_tag = client_tag
        self._client_address = client_address
        self._deliverator = deliverator

        self._send_queue = gevent.queue.Queue()

        # prime the send queue with messages to be sent as soon
        # as we connect
        for connect_message in connect_messages:
            if not "message-id" in connect_message:
                connect_message["message-id"] = uuid.uuid1().hex
            message = message_format(ident=None, control=connect_message, body=None)
            self._send_queue.put(message)

        self._req_socket = None
        self.connected = False
示例#5
0
def qoorate_determine_relevency(application, item):
    """schedule an indexing using concurrency"""
    logging.info("qoorate_determine_relevency, start: %s" % item)
    g = Greenlet(qoorate_generate_relevency, item)
    logging.info("qoorate_generate_relevency, greenlet, start(): %s" % item)
    g.start()
    logging.info("qoorate_generate_relevency, end: %s" % item)
 def join(self, timeout=3.0):
     self._log.debug("joining")
     if self._req_socket is not None:
         self._req_socket.close()
         self._req_socket = None
     Greenlet.join(self, timeout)
     self._log.debug("join complete")
示例#7
0
 def init_stream_listeners(self, stream_id):
     self.event_listeners[stream_id] = {}
     self.event_queue[stream_id] = gevent.queue.Queue()
     self.last_few_events[stream_id]  = StreamEvent.get_events(stream_id)
     
     Greenlet.spawn(EventListeners.start_publishing_events, self, stream_id)
     '''
示例#8
0
 def run(self, blog):
     '''
     Initiate the thread to query, organize and publish the data
     '''
     production = Greenlet(self._grab,blog=blog,callback=lambda : self._on_done())
     production.start()
     self.greenlet_queue.append(production)
    def get_dataset_bounds(self, dataset_id=""):
        """@brief Get the bounding coordinates of the dataset using a couch map/reduce query
        @param dataset_id
        @result bounds is a dictionary containing spatial and temporal bounds of the dataset in standard units

        @param dataset_id    str
        @retval bounds    Unknown
        """
        dataset = self.read_dataset(dataset_id=dataset_id)
        key = dataset.primary_view_key  # stream_id
        ar = gevent.event.AsyncResult()

        def ar_timeout(db):
            opts = {"start_key": [key, 0], "end_key": [key, 2]}
            try:
                results = db.query_view("datasets/bounds", opts=opts)[0]["value"]
            except IndexError:
                # Means there are no results
                results = {}
            ar.set(results)

        db = self.container.datastore_manager.get_datastore(dataset.datastore_name)
        g = Greenlet(ar_timeout, db)
        g.start()
        bounds = ar.get(timeout=5)

        return bounds
示例#10
0
文件: http.py 项目: strogo/pylibs
 def serve_forever(self, *args, **kwargs):
     stop_timeout = kwargs.pop('stop_timeout', 0)
     self.start(*args, **kwargs)
     try:
         self._stopped_event.wait()
     finally:
         Greenlet.spawn(self.stop, timeout=stop_timeout).join()
示例#11
0
    def test_dispatcher(self):
        options = {
            'capabilities': {
                # NoneType because we're going to pass a None to the dispatcher.
                'NoneType': {
                    'enabled': True,
                    'server': '127.0.0.1',
                    'timing': {
                        'active_range': '00:00 - 23:59',
                        'sleep_interval': '1',
                        'activation_probability': '1'
                    },
                    'username': '******',
                    'password': '******',
                    'port': 8080
                },

            }
        }

        dispatcher = BeeDispatcher(options, None, '127.0.0.1')

        dispatcher.max_sessions = 1
        dispatcher.bee = Mock()
        dispatcher_greenlet = Greenlet(dispatcher.start)
        dispatcher_greenlet.start()
        time.sleep(1)
        dispatcher_greenlet.kill()
        dispatcher.bee.do_session.assert_called()
示例#12
0
 def __init__(self, spawn, func, iterable):
     from gevent.queue import Queue
     Greenlet.__init__(self)
     self.spawn = spawn
     self.func = func
     self.iterable = iterable
     self.queue = Queue()
     self.count = 0
 def join(self, timeout=3.0):
     """
     Clean up and wait for the greenlet to shut down
     """
     self._log.debug("joining")
     self._pull_socket.close()
     Greenlet.join(self, timeout)
     self._log.debug("join complete")
示例#14
0
 def __init__(self, sock, service=None, close_callback=None):
     self._buff = ""
     self.sock = sock
     self._timeout = 10  # 请求超时时间
     self._id_iter = _id_generator()  # 消息id生成器
     self._request_table = {}  # 保存所有的RPC请求的AsyncResult,key对应包ID,范围为30bit,在timeout时间内,理论上不可能重复
     self.service = service
     self.close_callback = close_callback  # 断开毁掉
     Greenlet.__init__(self)
示例#15
0
 def serve_forever(self, stop_timeout=None):
     """Start the server if it hasn't been already started and wait until it's stopped."""
     # add test that serve_forever exists on stop()
     if not self.started:
         self.start()
     try:
         self._stop_event.wait()
     finally:
         Greenlet.spawn(self.stop, timeout=stop_timeout).join()
示例#16
0
文件: pool.py 项目: renstrom/gevent
    def __init__(self, func, iterable, spawn=None, maxsize=None, _zipped=False):
        """
        An iterator that.

        :keyword int maxsize: If given and not-None, specifies the maximum number of
            finished results that will be allowed to accumulated awaiting the reader;
            more than that number of results will cause map function greenlets to begin
            to block. This is most useful is there is a great disparity in the speed of
            the mapping code and the consumer and the results consume a great deal of resources.
            Using a bound is more computationally expensive than not using a bound.

        .. versionchanged:: 1.1b3
            Added the *maxsize* parameter.
        """
        from gevent.queue import Queue
        Greenlet.__init__(self)
        if spawn is not None:
            self.spawn = spawn
        if _zipped:
            self._zipped = _zipped
        self.func = func
        self.iterable = iterable
        self.queue = Queue()
        if maxsize:
            # Bounding the queue is not enough if we want to keep from
            # accumulating objects; the result value will be around as
            # the greenlet's result, blocked on self.queue.put(), and
            # we'll go on to spawn another greenlet, which in turn can
            # create the result. So we need a semaphore to prevent a
            # greenlet from exiting while the queue is full so that we
            # don't spawn the next greenlet (assuming that self.spawn
            # is of course bounded). (Alternatively we could have the
            # greenlet itself do the insert into the pool, but that
            # takes some rework).
            #
            # Given the use of a semaphore at this level, sizing the queue becomes
            # redundant, and that lets us avoid having to use self.link() instead
            # of self.rawlink() to avoid having blocking methods called in the
            # hub greenlet.
            factory = Semaphore
        else:
            factory = DummySemaphore
        self._result_semaphore = factory(maxsize)

        self.count = 0
        self.finished = False
        # If the queue size is unbounded, then we want to call all
        # the links (_on_finish and _on_result) directly in the hub greenlet
        # for efficiency. However, if the queue is bounded, we can't do that if
        # the queue might block (because if there's no waiter the hub can switch to,
        # the queue simply raises Full). Therefore, in that case, we use
        # the safer, somewhat-slower (because it spawns a greenlet) link() methods.
        # This means that _on_finish and _on_result can be called and interleaved in any order
        # if the call to self.queue.put() blocks..
        # Note that right now we're not bounding the queue, instead using a semaphore.
        self.rawlink(self._on_finish)
示例#17
0
 def __init__(self, ctx, manager, config, thrashers):
     Greenlet.__init__(self)
     self.ctx = ctx
     self.config = config
     self.e = None
     self.logger = log.getChild('daemon_watchdog')
     self.manager = manager
     self.name = 'watchdog'
     self.stopping = Event()
     self.thrashers = thrashers
 def __init__(self, func, iterable, spawn=None):
     from gevent.queue import Queue
     Greenlet.__init__(self)
     if spawn is not None:
         self.spawn = spawn
     self.func = func
     self.iterable = iterable
     self.queue = Queue()
     self.count = 0
     self.rawlink(self._on_finish)
示例#19
0
    def start(self):
        """Start accepting the connections.

        If an address was provided in the constructor, then also create a socket, bind it and put it into the listening mode.
        """
        self.pre_start()
        try:
            self.start_accepting()
        except Exception:
            Greenlet.spawn(self.kill).join()
            raise
示例#20
0
 def on_subscribe(self, message):
     # validate that user is actually allowed to perform these actions (we
     # should probably replace this with actual channel auth negotiation at
     # the namespace level)
     game_id = message.pop('game_id')
     if not self.validate(game_id):
         self.emit('error', {'error': 'unauthorized'})
         return
     # spawn a thread to listen for messages from redis
     channel_id = game_id + '-room'
     Greenlet.spawn(self.listener, channel_id)
    def on_start(self):
        super(ExampleDataProducer, self).on_start()

        stream_id = self.CFG.process.out_stream_id

        g = Greenlet(self._trigger_func, stream_id)
        log.debug('Starting publisher thread for simple ctd data.')
        g.start()
        log.warn('Publisher Greenlet started in "%s"' % self.__class__.__name__)
        self.greenlet_queue = []
        self.greenlet_queue.append(g)
    def __init__(self, context, node_name, address):
        Greenlet.__init__(self)

        self._log = logging.getLogger("DealerClient-%s" % (node_name, ))

        self._dealer_socket = context.socket(zmq.XREQ)
        self._dealer_socket.setsockopt(zmq.LINGER, 1000)
        self._log.debug("connecting to %s" % (address, ))
        self._dealer_socket.connect(address)

        self._send_queue = Queue(maxsize=None)
        self._delivery_queues = dict()
 def __init__(self, stream_id , bit_rate_in_kbps = 128.0):
     Greenlet.__init__(self)
     self.stream_id = stream_id
     
     if(not AudioStreamReader.stream_buffers.get(self.stream_id, None)):
         buffer = Buffer()
         byte_rate = ((bit_rate_in_kbps/8)*1024)
         sleep_time = (buffer.chunk_byte_size*1.0)/byte_rate
         AudioStreamReader.stream_buffers[stream_id] = [buffer , byte_rate, sleep_time]
         
         
     self.buffer, self.byte_rate , self.sleep_time  = AudioStreamReader.stream_buffers[self.stream_id]
 def __init__(self, func, iterable, spawn=None):
     from gevent.queue import Queue
     Greenlet.__init__(self)
     if spawn is not None:
         self.spawn = spawn
     self.func = func
     self.iterable = iterable
     self.queue = Queue()
     self.count = 0
     self.waiting = []  # QQQ maybe deque will work faster there?
     self.index = 0
     self.maxindex = -1
     self.rawlink(self._on_finish)
    def __init__(self, context, address, deliverator):
        Greenlet.__init__(self)

        self._log = logging.getLogger("PULLServer-%s" % (address, ))

        # we need a valid path for IPC sockets
        if address.startswith("ipc://"):
            prepare_ipc_path(address)

        self._pull_socket = context.socket(zmq.PULL)
        self._log.debug("binding")
        self._pull_socket.bind(address)

        self._deliverator = deliverator
示例#26
0
文件: server.py 项目: mishto/ChatApp
    def add_connection(self, username, ws):
        """
        Ads ws to the key username.
        """

        subscriber = self.redis.pubsub()
        subscriber.subscribe(username)
        g_listener = Greenlet(self._listen_to_channel, subscriber, ws)
        g_listener.start()
        ws.greenlet_listener = g_listener

        if username in self.subscriptions:
            self.subscriptions[username].append(ws)
        else:
            self.subscriptions[username] = [ws]
示例#27
0
class TransformExampleProducer(StreamProcess):
    """
    Used as a data producer in examples.
    It publishes input for the following examples as {'num':<int>} where <int> is the integer.
    The production is published every 4 seconds and the published data is incremented by 1
    id_p = cc.spawn_process('myproducer', 'ion.processes.data.transforms.transform_example', 'TransformExampleProducer', {'process':{'type':'stream_process','publish_streams':{'out_stream':'forced'}},'stream_producer':{'interval':4000}})
    cc.proc_manager.procs['%s.%s' %(cc.id,id_p)].start()
    """

    def on_init(self):
        log.debug("StreamProducer init. Self.id=%s" % self.id)


    def start(self):

        log.debug("StreamProducer start")
        # Threads become efficent Greenlets with gevent
        streams = self.CFG.get('process',{}).get('publish_streams',None)
        if streams:
            self.output_streams = list(k for k in streams)
        else:
            self.output_streams = None

        self.producer_proc = Greenlet(self._trigger_func)
        self.producer_proc.start()


    def process(self, packet):
        pass

    def on_quit(self):
        log.debug("TransformExampleProducer quit")
        self.producer_proc.kill()

    def _trigger_func(self):
        interval = self.CFG.get('stream_producer').get('interval')
        stream_route = self.CFG.get('stream_producer').get('stream_route')
        if self.output_streams:
            pub = getattr(self,self.output_streams[0],None)
        else:
            pub = None
        num = 1
        while True:
            msg = dict(num=str(num))
            pub.publish(msg)
            log.debug("Message %s published", num)
            num += 1
            time.sleep(interval/1000.0)
示例#28
0
 def listen(self, host, port, backlog=1):
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
     sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     sock.bind((host, port))
     sock.listen(backlog)
     greenlet_accept = Greenlet.spawn(self._do_accept, sock)
     self._greenlet_accept[sock] = greenlet_accept
示例#29
0
 def start_publishing_events(self, stream_id):
     while(True):
         event_data = self.event_queue[stream_id].get()
         event_id = event_data["event_id"]
         data_to_send = json_util.dumps(event_data).replace("\r\n", "\n\n")
         if(event_id!=Event.RESET_POLLS_AND_SONG):
             EventListeners.last_few_events[stream_id].append(data_to_send)                
         else:
             self.last_reset_event[stream_id] = data_to_send
             
         if(len(EventListeners.last_few_events[stream_id])>20):
             EventListeners.last_few_events[stream_id].pop(0)
         StreamEvent.add(stream_id, data_to_send)
         for socket in self.event_listeners[stream_id]:
             # send data in parallel ?
             Greenlet.spawn(EventListeners.send_event , self, stream_id , socket, data_to_send)
示例#30
0
    def new_connection(self, sock):
        greenlet_recv = Greenlet.spawn(self._handle, sock)
        greenlet_send = Greenlet.spawn(self._write, sock)

        # closure
        def close(gr):
            greenlet_recv.kill()
            greenlet_send.kill()
            if not self._send_queue.has_key(sock):
                return
            sock.close()
            del self._send_queue[sock]

        greenlet_recv.link(close)
        greenlet_send.link(close)
        self._send_queue[sock] = Queue()
示例#31
0
 def __init__(self, current):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self.current = current
示例#32
0
    def _go_greenlet(self, greenlet_count, put_count, get_count,
                     bench_item_count):
        """
        Doc
        :param greenlet_count: greenlet_count
        :param put_count: put_count
        :param get_count: get_count
        :param bench_item_count: bench_item_count
        """

        g_event = None
        g_array = None
        try:
            # Settings
            g_count = greenlet_count
            g_ms = 10000

            # Continue callback loop
            self.callback_return = True

            # Go
            self.redis_cache = RedisCache()

            # Item count
            self.bench_item_count = bench_item_count
            self.bench_put_weight = put_count
            self.bench_get_weight = get_count
            self.bench_ttl_min_ms = 1000
            self.bench_ttl_max_ms = int(g_ms / 2)

            # Go
            self.run_event = Event()
            self.exception_raised = 0
            self.open_count = 0
            self.thread_running = AtomicIntSafe()
            self.thread_running_ok = AtomicIntSafe()

            # Item per greenlet
            item_per_greenlet = self.bench_item_count / g_count

            # Signal
            self.gorun_event = Event()

            # Alloc greenlet
            g_array = list()
            g_event = list()
            for _ in range(0, g_count):
                greenlet = Greenlet()
                g_array.append(greenlet)
                g_event.append(Event())

            # Run them
            cur_idx = 0
            for idx in range(0, len(g_array)):
                greenlet = g_array[idx]
                event = g_event[idx]
                greenlet.spawn(self._run_cache_bench, event, cur_idx,
                               cur_idx + item_per_greenlet)
                cur_idx += item_per_greenlet
                SolBase.sleep(0)

            # Signal
            self.gorun_event.set()

            # Wait a bit
            dt = SolBase.mscurrent()
            while SolBase.msdiff(dt) < g_ms:
                SolBase.sleep(500)
                # Stat
                ms = SolBase.msdiff(dt)
                sec = float(ms / 1000.0)
                total_put = Meters.aig("rcs.cache_put")
                per_sec_put = round(float(total_put) / sec, 2)
                total_get = Meters.aig("rcs.cache_get_hit") + Meters.aig(
                    "rcs.cache_get_miss")
                per_sec_get = round(float(total_get) / sec, 2)

                logger.info(
                    "Running..., count=%s, run=%s, ok=%s, put/sec=%s get/sec=%s, cache=%s",
                    self.open_count, self.thread_running.get(),
                    self.thread_running_ok.get(), per_sec_put, per_sec_get,
                    self.redis_cache)
                self.assertEqual(self.exception_raised, 0)

            # Over, signal
            logger.info("Signaling, count=%s", self.open_count)
            self.run_event.set()

            # Wait
            for g in g_event:
                g.wait(30.0)
                self.assertTrue(g.isSet())

            g_event = None
            g_array = None

            # Log
            Meters.write_to_logger()
        finally:
            self.run_event.set()
            if g_event:
                for g in g_event:
                    g.set()

            if g_array:
                for g in g_array:
                    g.kill()

            if self.redis_cache:
                self.redis_cache.stop_cache()
                self.redis_cache = None
示例#33
0
 def handle_multi_device_job(self, job_handler, result_handler):
     job_worker_pool = Pool(self.max_job_task)
     for device_id in self.job_params['device_list']:
         job_worker_pool.start(
             Greenlet(job_handler.handle_job, result_handler, device_id))
     job_worker_pool.join()
    def mainSocket(self, ws, path):
        path = str(path).split("/")
        queryArgs = dict(request.args.items())

        sessionId = request.cookies.get("session")

        # wait for the other socket to close if we were bounced
        sleep(.25)

        sessionState = self._getSessionState(sessionId)

        self._logger.info("entering websocket with path %s", path)
        reader = None
        isFirstMessage = True

        # set up message tracking
        timestamps = []

        lastDumpTimestamp = time.time()
        lastDumpMessages = 0
        lastDumpFrames = 0
        lastDumpTimeSpentCalculating = 0.0

        # set up cells
        cells = Cells(self.db)

        # reset the session state. There's only one per cells (which is why
        # we keep a list of sessions.)
        sessionState._reset(cells)

        cells = cells.withRoot(
            Subscribed(
                lambda: self.displayForPathAndQueryArgs(path, queryArgs)),
            serialization_context=self.db.serializationContext,
            session_state=sessionState)

        # large messages (more than frames_per_ack frames) send an ack
        # after every frames_per_ackth message
        largeMessageAck = gevent.queue.Queue()
        reader = Greenlet.spawn(
            functools.partial(readThread, ws, cells, largeMessageAck,
                              self._logger))

        self._logger.info("Starting main websocket handler with %s", ws)

        while not ws.closed:
            t0 = time.time()
            try:
                # make sure user is authenticated
                user = self.login_plugin.load_user(current_user.username)
                if not user.is_authenticated:
                    ws.close()
                    return

                messages = cells.renderMessages()

                lastDumpTimeSpentCalculating += time.time() - t0

                if isFirstMessage:
                    self._logger.info("Completed first rendering loop")
                    isFirstMessage = False

                for message in messages:
                    gevent.socket.wait_write(ws.stream.handler.socket.fileno())

                    writeJsonMessage(message, ws, largeMessageAck,
                                     self._logger)

                    lastDumpMessages += 1

                lastDumpFrames += 1
                # log slow messages
                if time.time() - lastDumpTimestamp > 60.0:
                    self._logger.info(
                        "In the last %.2f seconds, spent %.2f seconds"
                        " calculating %s messages over %s frames",
                        time.time() - lastDumpTimestamp,
                        lastDumpTimeSpentCalculating, lastDumpMessages,
                        lastDumpFrames)

                    lastDumpFrames = 0
                    lastDumpMessages = 0
                    lastDumpTimeSpentCalculating = 0
                    lastDumpTimestamp = time.time()

                # tell the browser to execute the postscripts that its built up
                writeJsonMessage("postscripts", ws, largeMessageAck,
                                 self._logger)

                # request an ACK from the browser before sending any more data
                # otherwise it can get overloaded and crash because it can't keep
                # up with the data volume
                writeJsonMessage("request_ack", ws, largeMessageAck,
                                 self._logger)

                ack = largeMessageAck.get()
                if ack is StopIteration:
                    raise Exception("Websocket closed.")

                cells.wait()

                timestamps.append(time.time())

                if len(timestamps) > MAX_FPS:
                    timestamps = timestamps[-MAX_FPS + 1:]
                    if (time.time() - timestamps[0]) < 1.0:
                        sleep(1.0 / MAX_FPS + .001)

            except Exception:
                self._logger.error("Websocket handler error: %s",
                                   traceback.format_exc())
                self.sessionStates[sessionId].append(sessionState)

                self._logger.info(
                    "Returning session state to pool for %s. Have %s",
                    sessionId, len(self.sessionStates[sessionId]))

                if reader:
                    reader.join()
    def _start(self):

        try:
            max_queue_size = self.config["queue_max_size"]
        except:
            max_queue_size = 1000

        self.gevent_queue = Queue(max_queue_size)

        self.intent_handler = IntentHandler(self.logger, self.config)
        self.greenlet = Greenlet.spawn(
            self.intent_handler.executeQueuedRequests, self.gevent_queue,
            self.api.handle_request_indication)
        self.logger.info("started greenlet")

        if self.config.get("enable_intent"):
            self.logger.info("starting intent server")
            from server import IntentServer
            self.intent_server = IntentServer(
                self.api.handle_request_indication, self.gevent_queue,
                self.config, self.logger)
            self.gevent_threadpool = ThreadPool(1)
            self.gevent_threadpool_worker = self.gevent_threadpool.spawn(
                self.intent_server.start)
            #self.intent_server.start()
            from .activate import PA_Activation
            pa_activation = PA_Activation(self.config, self.logger)
            pa_activation.start()

        for endpoint in self.config["endpoints"]:
            self.api.register_endpoint(
                endpoint["personality"],
                "%s://%s" % ("intent", endpoint["interface"]))

        if self.config.get("enable_test"):
            from .test import test_read_params, test_create_app, test_create_app_property, test_subscribe_pushed_data, test_push_data, test_destroy_app, test_subscribe_apps_with_search_str
            from .test import test_create_app_with_search_str, test_discover_apps_with_search_str, test_get_app, test_get_all_properties, test_get_latest_data_of_property
            contact = "intent://intent_test/m2m"
            from .test_retarget import test_retarget

            test_retarget(self.api.handle_request_indication, self.logger,
                          self.config, "retrieve", "/m2m")
            #            contact = "http://localhost:8080"
            test_read_params(self.api.handle_request_indication, self.logger,
                             self.config)
            self.logger.info(
                "============================================================")

            #           test_create_app(self.api.handle_request_indication, self.logger, self.config, "myApp")
            self.logger.info(
                "============================================================")

            #           test_create_app_property(self.api.handle_request_indication, self.logger, self.config, "myApp", "myProperty")
            self.logger.info(
                "============================================================")

            #           test_subscribe_pushed_data(self.api.handle_request_indication, self.logger, self.config, "myApp", "myProperty", contact)
            self.logger.info(
                "============================================================")

            #            test_push_data(self.api.handle_request_indication, self.logger, self.config, "myApp", "myProperty")
            self.logger.info(
                "============================================================")

            #            test_get_all_properties(self.api.handle_request_indication, self.logger, self.config, "myApp")
            self.logger.info(
                "============================================================")

            #            test_get_latest_data_of_property(self.api.handle_request_indication, self.logger, self.config, "myApp", "myProperty")
            self.logger.info(
                "============================================================")

            #            test_destroy_app(self.api.handle_request_indication, self.logger, self.config, "myApp")
            self.logger.info(
                "============================================================")

            #            test_subscribe_apps_with_search_str(self.api.handle_request_indication, self.logger, self.config, "healthDevice", contact)
            test_subscribe_apps_with_search_str(
                self.api.handle_request_indication, self.logger, self.config,
                None, contact)
            self.logger.info(
                "============================================================")

            #            test_create_app_with_search_str(self.api.handle_request_indication, self.logger, self.config, "myApp", "healthDevice")
            #            test_create_app_with_search_str(self.api.handle_request_indication, self.logger, self.config, "myApp", None)
            self.logger.info(
                "============================================================")

            #            test_discover_apps_with_search_str(self.api.handle_request_indication, self.logger, self.config, "healthDevice", "intent://test_action")
            self.logger.info(
                "============================================================")

            #            test_get_app(self.api.handle_request_indication, self.logger, self.config, "myApp", "intent://test_action")
            self.logger.info(
                "============================================================")

            #            test_destroy_app(self.api.handle_request_indication, self.logger, self.config, "myApp")
            self.logger.info(
                "============================================================")

        self._started()
 def __init__(self, mobile, apid, aid):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self.mobile = mobile
     self.apid = apid
     self.aid = aid
示例#37
0
 def __init__(self, _imsi):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self._imsi = _imsi
示例#38
0
 def __init__(self, _user, _cmd_info, ivrConfig):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self._user = _user
     self._cmd_info = _cmd_info
     self.ivrConfig = ivrConfig
示例#39
0
 def __init__(self, user, wxMoConfig, mobile):
     Greenlet.__init__(self)
     self.user = user
     self.wxMoConfig = wxMoConfig
     self.mobile = mobile
示例#40
0
 def rawlink(self, callback):
     if not self.result.ready() and not self._waiting:
         self._waiting = True
         Greenlet.spawn(self._wait)
     self.result.rawlink(linkproxy(callback, self))
示例#41
0
 def throw(self, *args, **kwargs):
     # pylint:disable=arguments-differ
     if self.saved is None and self._fileobj is not None:
         self.switch_in()
     Greenlet.throw(self, *args, **kwargs)
示例#42
0
文件: pool.py 项目: kienhung/phalconX
 def apply_cb(self, func, args=None, kwds=None, callback=None):
     result = self.apply(func, args, kwds)
     if callback is not None:
         Greenlet.spawn(callback, result)
     return result
示例#43
0
 def run(self):
     try:
         return Greenlet.run(self)
     finally:
         # Make sure to restore the originals.
         self.switch_out()
示例#44
0
 def adjust(self):
     self._adjust_step()
     if not self.manager and self._size > self._maxsize:
         # might need to feed more Nones into the pool
         self.manager = Greenlet.spawn(self._adjust_wait)
 def __init__(self, imsi, apid, aid):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self.imsi = imsi
     self.apid = apid
     self.aid = aid
示例#46
0
 def __init__(self, _user, _paraName):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self._user = _user
     self._paraName = _paraName
 def __init__(self, info):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self.info = info
示例#48
0
 def switch(self, *args, **kw):
     if self._fileobj is not None:
         self.switch_in()
     Greenlet.switch(self, *args, **kw)
示例#49
0
 def switch(self, *args, **kw):
     if self.stdin is not None:
         self.switch_in()
     Greenlet.switch(self, *args, **kw)
class VizTransformProcForMatplotlibGraphs(TransformDataProcess):
    """
    This class is used for instantiating worker processes that have subscriptions to data streams and convert
    incoming data from CDM format to Matplotlib graphs

    """
    def on_start(self):
        super(VizTransformProcForMatplotlibGraphs, self).on_start()
        #assert len(self.streams)==1
        self.initDataFlag = True
        self.graph_data = {
        }  # Stores a dictionary of variables : [List of values]

        # Need some clients
        self.rr_cli = ResourceRegistryServiceProcessClient(
            process=self, node=self.container.node)
        self.pubsub_cli = PubsubManagementServiceClient(
            node=self.container.node)

        # extract the various parameters passed to the transform process
        self.out_stream_id = self.CFG.get('process').get(
            'publish_streams').get('visualization_service_submit_stream_id')

        # Create a publisher on the output stream
        #stream_route = self.pubsub_cli.register_producer(stream_id=self.out_stream_id)
        out_stream_pub_registrar = StreamPublisherRegistrar(
            process=self.container, node=self.container.node)
        self.out_stream_pub = out_stream_pub_registrar.create_publisher(
            stream_id=self.out_stream_id)

        self.data_product_id = self.CFG.get('data_product_id')
        self.stream_def_id = self.CFG.get("stream_def_id")
        self.stream_def = self.rr_cli.read(self.stream_def_id)

        # Start the thread responsible for keeping track of time and generating graphs
        # Mutex for ensuring proper concurrent communications between threads
        self.lock = RLock()
        self.rendering_proc = Greenlet(self.rendering_thread)
        self.rendering_proc.start()

    def process(self, packet):
        log.debug('(%s): Received Viz Data Packet' % self.name)
        #log.debug('(%s):   - Processing: %s' % (self.name,packet))

        # parse the incoming data
        psd = PointSupplementStreamParser(
            stream_definition=self.stream_def.container, stream_granule=packet)

        # re-arrange incoming data into an easy to parse dictionary
        vardict = {}
        arrLen = None
        for varname in psd.list_field_names():
            vardict[varname] = psd.get_values(varname)
            arrLen = len(vardict[varname])

        if self.initDataFlag:
            # look at the incoming packet and store
            for varname in psd.list_field_names():
                self.lock.acquire()
                self.graph_data[varname] = []
                self.lock.release()

            self.initDataFlag = False

        # If code reached here, the graph data storage has been initialized. Just add values
        # to the list
        with self.lock:
            for varname in psd.list_field_names():
                self.graph_data[varname].extend(vardict[varname])

    def rendering_thread(self):
        from copy import deepcopy
        # Service Client

        # init Matplotlib
        fig = Figure()
        ax = fig.add_subplot(111)
        canvas = FigureCanvas(fig)
        imgInMem = StringIO.StringIO()
        while True:

            # Sleep for a pre-decided interval. Should be specifiable in a YAML file
            gevent.sleep(20)

            # If there's no data, wait
            # Lock is used here to make sure the entire vector exists start to finish, this assures that the data won
            working_set = None
            with self.lock:
                if len(self.graph_data) == 0:
                    continue
                else:
                    working_set = deepcopy(self.graph_data)

            # For the simple case of testing, lets plot all time variant variables one at a time
            xAxisVar = 'time'
            xAxisFloatData = working_set[xAxisVar]

            for varName, varData in working_set.iteritems():
                if varName == 'time' or varName == 'height' or varName == 'longitude' or varName == 'latitude':
                    continue

                yAxisVar = varName
                yAxisFloatData = working_set[varName]

                # Generate the plot

                ax.plot(xAxisFloatData, yAxisFloatData, 'ro')
                ax.set_xlabel(xAxisVar)
                ax.set_ylabel(yAxisVar)
                ax.set_title(yAxisVar + ' vs ' + xAxisVar)
                ax.set_autoscale_on(False)

                # generate filename for the output image
                fileName = yAxisVar + '_vs_' + xAxisVar + '.png'
                # Save the figure to the in memory file
                canvas.print_figure(imgInMem, format="png")
                imgInMem.seek(0)

                # submit resulting table back using the out stream publisher
                msg = {
                    "viz_product_type": "matplotlib_graphs",
                    "data_product_id": self.data_product_id,
                    "image_obj": imgInMem.getvalue(),
                    "image_name": fileName
                }
                self.out_stream_pub.publish(msg)

                #clear the canvas for the next image
                ax.clear()
示例#51
0
 def __init__(self, dev_eui, server, rx_window, *args):
     Greenlet.__init__(self)
     self.server = server
     self.dev_eui = dev_eui
     self.rx_window = rx_window
     self.args = list(args)
示例#52
0
 def __init__(self, arg):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self.arg = arg
示例#53
0
def start_new_thread(function, args=(), kwargs=None):
    if kwargs is not None:
        greenlet = Greenlet.spawn(function, *args, **kwargs)
    else:
        greenlet = Greenlet.spawn(function, *args)
    return get_ident(greenlet)
示例#54
0
 def __init__(self, _user, _cmd_info):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self._user = _user
     self._cmd_info = _cmd_info
示例#55
0
 def __init__(self, _record, _user, _return):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self._record = _record
     self._user = _user
     self._return = _return
示例#56
0
def start_new_thread(function, args=(), kwargs={}):
    greenlet = Greenlet.spawn(function, *args, **kwargs)
    return get_ident(greenlet)
示例#57
0
 def __init__(self, _record):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self._record = _record
示例#58
0
 def __init__(self, imsi):
     # super(greenlet, self).__init__()
     Greenlet.__init__(self)
     self.imsi = imsi
示例#59
0
 def _apply_async_cb_spawn(self, callback, result):
     Greenlet.spawn(callback, result)
示例#60
0
 def throw(self, *args, **kwargs):
     # pylint:disable=arguments-differ
     if self.prev_stdin is None and self.stdin is not None:
         self.switch_in()
     Greenlet.throw(self, *args, **kwargs)