Exemple #1
0
 def serve_forever(self, *args, **kwargs):
     stop_timeout = kwargs.pop('stop_timeout', 0)
     self.start(*args, **kwargs)
     try:
         self._stopped_event.wait()
     finally:
         Greenlet.spawn(self.stop, timeout=stop_timeout).join()
 def init_stream_listeners(self, stream_id):
     self.event_listeners[stream_id] = {}
     self.event_queue[stream_id] = gevent.queue.Queue()
     self.last_few_events[stream_id]  = StreamEvent.get_events(stream_id)
     
     Greenlet.spawn(EventListeners.start_publishing_events, self, stream_id)
     '''
Exemple #3
0
    def init_stream_listeners(self, stream_id):
        self.event_listeners[stream_id] = {}
        self.event_queue[stream_id] = gevent.queue.Queue()
        self.last_few_events[stream_id] = StreamEvent.get_events(stream_id)

        Greenlet.spawn(EventListeners.start_publishing_events, self, stream_id)
        '''
Exemple #4
0
 def serve_forever(self, stop_timeout=None):
     """Start the server if it hasn't been already started and wait until it's stopped."""
     # add test that serve_forever exists on stop()
     if not self.started:
         self.start()
     try:
         self._stop_event.wait()
     finally:
         Greenlet.spawn(self.stop, timeout=stop_timeout).join()
Exemple #5
0
 def serve_forever(self, stop_timeout=None):
     """Start the server if it hasn't been already started and wait until it's stopped."""
     # add test that serve_forever exists on stop()
     if not self.started:
         self.start()
     try:
         self._stop_event.wait()
     finally:
         Greenlet.spawn(self.stop, timeout=stop_timeout).join()
 def on_subscribe(self, message):
     # validate that user is actually allowed to perform these actions (we
     # should probably replace this with actual channel auth negotiation at
     # the namespace level)
     game_id = message.pop('game_id')
     if not self.validate(game_id):
         self.emit('error', {'error': 'unauthorized'})
         return
     # spawn a thread to listen for messages from redis
     channel_id = game_id + '-room'
     Greenlet.spawn(self.listener, channel_id)
 def on_subscribe(self, message):
     # validate that user is actually allowed to perform these actions (we
     # should probably replace this with actual channel auth negotiation at
     # the namespace level)
     game_id = message.pop('game_id')
     if not self.validate(game_id):
         self.emit('error', {'error': 'unauthorized'})
         return
     # spawn a thread to listen for messages from redis
     channel_id = game_id + '-room'
     Greenlet.spawn(self.listener, channel_id)
Exemple #8
0
    def start(self):
        """Start accepting the connections.

        If an address was provided in the constructor, then also create a socket, bind it and put it into the listening mode.
        """
        self.pre_start()
        try:
            self.start_accepting()
        except Exception:
            Greenlet.spawn(self.kill).join()
            raise
Exemple #9
0
    def start(self):
        """Start accepting the connections.

        If an address was provided in the constructor, then also create a socket, bind it and put it into the listening mode.
        """
        self.pre_start()
        try:
            self.start_accepting()
        except Exception:
            Greenlet.spawn(self.kill).join()
            raise
 def start_publishing_events(self, stream_id):
     while(True):
         event_data = self.event_queue[stream_id].get()
         event_id = event_data["event_id"]
         data_to_send = json_util.dumps(event_data).replace("\r\n", "\n\n")
         if(event_id!=Event.RESET_POLLS_AND_SONG):
             EventListeners.last_few_events[stream_id].append(data_to_send)                
         else:
             self.last_reset_event[stream_id] = data_to_send
             
         if(len(EventListeners.last_few_events[stream_id])>20):
             EventListeners.last_few_events[stream_id].pop(0)
         StreamEvent.add(stream_id, data_to_send)
         for socket in self.event_listeners[stream_id]:
             # send data in parallel ?
             Greenlet.spawn(EventListeners.send_event , self, stream_id , socket, data_to_send)
    def new_connection(self, sock):
        greenlet_recv = Greenlet.spawn(self._handle, sock)
        greenlet_send = Greenlet.spawn(self._write, sock)

        # closure
        def close(gr):
            greenlet_recv.kill()
            greenlet_send.kill()
            if not self._send_queue.has_key(sock):
                return
            sock.close()
            del self._send_queue[sock]

        greenlet_recv.link(close)
        greenlet_send.link(close)
        self._send_queue[sock] = Queue()
 def listen(self, host, port, backlog=1):
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
     sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     sock.bind((host, port))
     sock.listen(backlog)
     greenlet_accept = Greenlet.spawn(self._do_accept, sock)
     self._greenlet_accept[sock] = greenlet_accept
Exemple #13
0
 def listen(self, host, port, backlog=1):
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
     sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     sock.bind((host, port))
     sock.listen(backlog)
     greenlet_accept = Greenlet.spawn(self._do_accept, sock)
     self._greenlet_accept[sock] = greenlet_accept
Exemple #14
0
    def new_connection(self, sock):
        greenlet_recv = Greenlet.spawn(self._handle, sock)
        greenlet_send = Greenlet.spawn(self._write, sock)

        # closure
        def close(gr):
            greenlet_recv.kill()
            greenlet_send.kill()
            if not self._send_queue.has_key(sock):
                return
            sock.close()
            del self._send_queue[sock]

        greenlet_recv.link(close)
        greenlet_send.link(close)
        self._send_queue[sock] = Queue()
Exemple #15
0
def test_close_connections():
    """
    A test that exposes the problem where connections are returned to the
    connection pool (and closed) before the caller reads the response.
    
    I couldn't think of a way to test it without greenlets, so this test
    doesn't run as part of the standard test suite.  That way, no more
    dependencies are added to the test suite.
    """
    
    print "Running test_close_connections"

    # Connect to S3
    s3 = boto.connect_s3()

    # Clean previous tests.
    for b in s3.get_all_buckets():
        if b.name.startswith('test-'):
            for key in b.get_all_keys():
                key.delete()
            b.delete()

    # Make a test bucket
    bucket = s3.create_bucket('test-%d' % int(time.time()))

    # Create 30 threads that each create an object in S3.  The number
    # 30 is chosen because it is larger than the connection pool size
    # (20). 
    names = [str(uuid.uuid4) for _ in range(30)]
    threads = [
        Greenlet.spawn(put_object, bucket, name)
        for name in names
        ]
    for t in threads:
        t.get()

    # Create 30 threads to read the contents of the new objects.  This
    # is where closing the connection early is a problem, because
    # there is a response that needs to be read, and it can't be read
    # if the connection has already been closed.
    threads = [
        Greenlet.spawn(get_object, bucket, name)
        for name in names
        ]
    for t in threads:
        t.get()
Exemple #16
0
    def map_async(self, func, iterable, callback=None):
        """
        A variant of the map() method which returns a Greenlet object.

        If callback is specified then it should be a callable which accepts a
        single argument.
        """
        return Greenlet.spawn(self.map_cb, func, iterable, callback)
Exemple #17
0
    def start_publishing_events(self, stream_id):
        while (True):
            event_data = self.event_queue[stream_id].get()
            event_id = event_data["event_id"]
            data_to_send = json_util.dumps(event_data).replace("\r\n", "\n\n")
            if (event_id != Event.RESET_POLLS_AND_SONG):
                EventListeners.last_few_events[stream_id].append(data_to_send)
            else:
                self.last_reset_event[stream_id] = data_to_send

            if (len(EventListeners.last_few_events[stream_id]) > 20):
                EventListeners.last_few_events[stream_id].pop(0)
            StreamEvent.add(stream_id, data_to_send)
            for socket in self.event_listeners[stream_id]:
                # send data in parallel ?
                Greenlet.spawn(EventListeners.send_event, self, stream_id,
                               socket, data_to_send)
Exemple #18
0
def serve_forever(*servers, **opts):
	for server in servers:
		server.start()
	try:
		Event().wait()
	finally:
		stop_timeout=opts.get("stop_timeout")
		for th in [Greenlet.spawn(x.stop, timeout=stop_timeout) for x in servers]:
			th.join()
Exemple #19
0
    def __init__(self, work_dir, config, curses_screen=None):

        """
            Main class which runs Beeswarm in Client mode.

        :param work_dir: Working directory (usually the current working directory)
        :param config_arg: Beeswarm configuration dictionary.
        :param curses_screen: Contains a curses screen object, if UI is enabled. Default is None.
        """
        self.run_flag = True
        self.curses_screen = curses_screen

        with open('beeswarmcfg.json', 'r') as config_file:
            self.config = json.load(config_file, object_hook=asciify)

        # write ZMQ keys to files - as expected by pyzmq
        extract_keys(work_dir, config)

        BaitSession.client_id = self.config['general']['id']
        # TODO: Handle peering in other place
        BaitSession.honeypot_id = self.config['general']['id']

        if self.config['public_ip']['fetch_ip']:
            self.my_ip = urllib2.urlopen('http://api-sth01.exip.org/?call=ip').read()
            logger.info('Fetched {0} as my external ip.'.format(self.my_ip))
        else:
            self.my_ip = '127.0.0.1'

        self.status = {
            'mode': 'Client',
            'total_bees': 0,
            'active_bees': 0,
            'enabled_bees': [],
            'client_id': self.config['general']['client_id'],
            'managment_url': self.config['beeswarm_server']['managment_url'],
            'ip_address': self.my_ip
        }

        self.dispatchers = {}
        self.dispatcher_greenlets = []

        if self.curses_screen is not None:
            self.uihandler = ClientUIHandler(self.status, self.curses_screen)
            Greenlet.spawn(self.show_status_ui)
Exemple #20
0
    def apply_async(self, func, args=None, kwds=None, callback=None):
        """A variant of the apply() method which returns a Greenlet object.

        If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
        callback is applied to it (unless the call failed)."""
        if args is None:
            args = ()
        if kwds is None:
            kwds = {}
        return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
Exemple #21
0
def socketio(request):
    socketio = request.environ['socketio']

    while True:
        message = socketio.recv()

        if len(message) == 1:
            message = message[0].split(':')

            if message[0] == 'subscribe':
                print 'spawning sub listener'
                g = Greenlet.spawn(_sub_listener, socketio, message[1])

    return HttpResponse()
def socketio(request):
    socketio = request.environ['socketio']

    while True:
        message = socketio.recv()

        if len(message) == 1:
            message = message[0].split(':')

            if message[0] == 'subscribe':
                print 'spawning sub listener'
                g = Greenlet.spawn(_sub_listener, socketio, message[1])

    return HttpResponse()
Exemple #23
0
    def apply_async(self, func, args=None, kwds=None, callback=None):
        """A variant of the apply() method which returns a Greenlet object.

        If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
        callback is applied to it (unless the call failed)."""
        if args is None:
            args = ()
        if kwds is None:
            kwds = {}
        if self.full():
            # cannot call spawn() directly because it will block
            return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
        else:
            greenlet = self.spawn(func, *args, **kwds)
            if callback is not None:
                greenlet.link(pass_value(callback))
            return greenlet
    def apply_async(self, func, args=None, kwds=None, callback=None):
        """A variant of the apply() method which returns a Greenlet object.

        If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
        callback is applied to it (unless the call failed)."""
        if args is None:
            args = ()
        if kwds is None:
            kwds = {}
        if self.full():
            # cannot call spawn() directly because it will block
            return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
        else:
            greenlet = self.spawn(func, *args, **kwds)
            if callback is not None:
                greenlet.link(pass_value(callback))
            return greenlet
Exemple #25
0
def socketio(request):
    """
    Handles the appropriate subscribe message
    from the client and spawns off greenlet coroutines
    to monitor messages from redis.
    """
    socketio = request.environ['socketio']

    while True:
        message = socketio.recv()

        if len(message):
            message = message[0].split(':')

            if message[0] == 'subscribe':
                g = Greenlet.spawn(_subscribe_listener, socketio, message[1])

    return HttpResponse()
Exemple #26
0
    def apply_async(self, func, args=None, kwds=None, callback=None):
        """
        A variant of the :meth:`apply` method which returns a :class:`~.Greenlet` object.

        When the returned greenlet gets to run, it *will* call :meth:`apply`,
        passing in *func*, *args* and *kwds*.

        If *callback* is specified, then it should be a callable which
        accepts a single argument. When the result becomes ready
        callback is applied to it (unless the call failed).

        This method will never block, even if this group is full (that is,
        even if :meth:`spawn` would block, this method will not).

        .. caution:: The returned greenlet may or may not be tracked
           as part of this group, so :meth:`joining <join>` this group is
           not a reliable way to wait for the results to be available or
           for the returned greenlet to run; instead, join the returned
           greenlet.

        .. tip:: Because :class:`~.ThreadPool` objects do not track greenlets, the returned
           greenlet will never be a part of it. To reduce overhead and improve performance,
           :class:`Group` and :class:`Pool` may choose to track the returned
           greenlet. These are implementation details that may change.
        """
        if args is None:
            args = ()
        if kwds is None:
            kwds = {}
        if self._apply_async_use_greenlet():
            # cannot call self.spawn() directly because it will block
            # XXX: This is always the case for ThreadPool, but for Group/Pool
            # of greenlets, this is only the case when they are full...hence
            # the weasely language about "may or may not be tracked". Should we make
            # Group/Pool always return true as well so it's never tracked by any
            # implementation? That would simplify that logic, but could increase
            # the total number of greenlets in the system and add a layer of
            # overhead for the simple cases when the pool isn't full.
            return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)

        greenlet = self.spawn(func, *args, **kwds)
        if callback is not None:
            greenlet.link(pass_value(callback))
        return greenlet
Exemple #27
0
    def apply_async(self, func, args=None, kwds=None, callback=None):
        """
        A variant of the :meth:`apply` method which returns a :class:`~.Greenlet` object.

        When the returned greenlet gets to run, it *will* call :meth:`apply`,
        passing in *func*, *args* and *kwds*.

        If *callback* is specified, then it should be a callable which
        accepts a single argument. When the result becomes ready
        callback is applied to it (unless the call failed).

        This method will never block, even if this group is full (that is,
        even if :meth:`spawn` would block, this method will not).

        .. caution:: The returned greenlet may or may not be tracked
           as part of this group, so :meth:`joining <join>` this group is
           not a reliable way to wait for the results to be available or
           for the returned greenlet to run; instead, join the returned
           greenlet.

        .. tip:: Because :class:`~.ThreadPool` objects do not track greenlets, the returned
           greenlet will never be a part of it. To reduce overhead and improve performance,
           :class:`Group` and :class:`Pool` may choose to track the returned
           greenlet. These are implementation details that may change.
        """
        if args is None:
            args = ()
        if kwds is None:
            kwds = {}
        if self._apply_async_use_greenlet():
            # cannot call self.spawn() directly because it will block
            # XXX: This is always the case for ThreadPool, but for Group/Pool
            # of greenlets, this is only the case when they are full...hence
            # the weasely language about "may or may not be tracked". Should we make
            # Group/Pool always return true as well so it's never tracked by any
            # implementation? That would simplify that logic, but could increase
            # the total number of greenlets in the system and add a layer of
            # overhead for the simple cases when the pool isn't full.
            return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)

        greenlet = self.spawn(func, *args, **kwds)
        if callback is not None:
            greenlet.link(pass_value(callback))
        return greenlet
Exemple #28
0
def socketio(request):
    """
    This view will handle the 'subscribe' message
    from the client and spawn off greenlet coroutines
    to monitor messages on redis
    """
    socketio = request.environ["socketio"]

    while True:
        message = socketio.recv()

        if len(message) == 1:
            message = message[0].split(":")

            if message[0] == "subscribe":
                print "spawning sub listener"
                g = Greenlet.spawn(_sub_listener, socketio, message[1])

    return HttpResponse()
Exemple #29
0
    def map(self, func, iterable):
        """Return a list made by applying the *func* to each element of
        the iterable.

        .. seealso:: :meth:`imap`
        """
        # We can't return until they're all done and in order. It
        # wouldn't seem to much matter what order we wait on them in,
        # so the simple, fast (50% faster than imap) solution would be:

        # return [g.get() for g in
        #           [self.spawn(func, i) for i in iterable]]

        # If the pool size is unlimited (or more than the len(iterable)), this
        # is equivalent to imap (spawn() will never block, all of them run concurrently,
        # we call get() in the order the iterable was given).

        # Now lets imagine the pool if is limited size. Suppose the
        # func is time.sleep, our pool is limited to 3 threads, and
        # our input is [10, 1, 10, 1, 1] We would start three threads,
        # one to sleep for 10, one to sleep for 1, and the last to
        # sleep for 10. We would block starting the fourth thread. At
        # time 1, we would finish the second thread and start another
        # one for time 1. At time 2, we would finish that one and
        # start the last thread, and then begin executing get() on the first
        # thread.

        # Because it's spawn that blocks, this is *also* equivalent to what
        # imap would do.

        # The one remaining difference is that imap runs in its own
        # greenlet, potentially changing the way the event loop runs.
        # That's easy enough to do.

        g = Greenlet.spawn(self.__map, func, iterable)
        return g.get()
Exemple #30
0
    def map(self, func, iterable):
        """Return a list made by applying the *func* to each element of
        the iterable.

        .. seealso:: :meth:`imap`
        """
        # We can't return until they're all done and in order. It
        # wouldn't seem to much matter what order we wait on them in,
        # so the simple, fast (50% faster than imap) solution would be:

        # return [g.get() for g in
        #           [self.spawn(func, i) for i in iterable]]

        # If the pool size is unlimited (or more than the len(iterable)), this
        # is equivalent to imap (spawn() will never block, all of them run concurrently,
        # we call get() in the order the iterable was given).

        # Now lets imagine the pool if is limited size. Suppose the
        # func is time.sleep, our pool is limited to 3 threads, and
        # our input is [10, 1, 10, 1, 1] We would start three threads,
        # one to sleep for 10, one to sleep for 1, and the last to
        # sleep for 10. We would block starting the fourth thread. At
        # time 1, we would finish the second thread and start another
        # one for time 1. At time 2, we would finish that one and
        # start the last thread, and then begin executing get() on the first
        # thread.

        # Because it's spawn that blocks, this is *also* equivalent to what
        # imap would do.

        # The one remaining difference is that imap runs in its own
        # greenlet, potentially changing the way the event loop runs.
        # That's easy enough to do.

        g = Greenlet.spawn(self.__map, func, iterable)
        return g.get()
Exemple #31
0
def start_new_thread(function, args=(), kwargs=None):
    if kwargs is not None:
        greenlet = Greenlet.spawn(function, *args, **kwargs)  # pylint:disable=not-a-mapping
    else:
        greenlet = Greenlet.spawn(function, *args)
    return get_ident(greenlet)
Exemple #32
0
def start_new_thread(function, args=(), kwargs={}):
    greenlet = Greenlet.spawn(function, *args, **kwargs)
    return get_ident(greenlet)
 def on_subscribe(self, message):
     # spawn a thread to listen for messages from redis
     Greenlet.spawn(self.listener, 'lobby')
Exemple #34
0
    def recv_message(self, message):
        action, pk = message.split(':')

        if action == 'subscribe':
            Greenlet.spawn(self.listener, pk)
Exemple #35
0
 def apply_cb(self, func, args=None, kwds=None, callback=None):
     result = self.apply(func, args, kwds)
     if callback is not None:
         Greenlet.spawn(callback, result)
     return result
 def recv_connect(self):
     self.greenlet = Greenlet.spawn(self.listener)
Exemple #37
0
#print('Asynchronous:')
#asynchronous(start)
#print 'at %1.1f seconds' % (time.time() - start)

from gevent.coros import Semaphore
from gevent.greenlet import Greenlet
from gevent import sleep
import random
 
semaphore = Semaphore()
 
def test():
    #critical start!!!
    semaphore.acquire()
    for i in xrange(100):
        print "1"
        sleep(random.random())
        print "2"
    semaphore.release()
    #critical end!!!
 
if __name__ == "__main__":
    print "Hello World"
    print "Before ,"
    gs = []
    for i in xrange(100):
        gs.append(Greenlet.spawn(test))
    for g in gs:
        g.join()
    print "After ,"
Exemple #38
0
def start_new_thread(function, args=(), kwargs=None):
    if kwargs is not None:
        greenlet = Greenlet.spawn(function, *args, **kwargs)
    else:
        greenlet = Greenlet.spawn(function, *args)
    return get_ident(greenlet)
Exemple #39
0
 def recv_connect(self):
     self.greenlet = Greenlet.spawn(self.listener)
    def _go_greenlet(self, greenlet_count, put_count, get_count,
                     bench_item_count):
        """
        Doc
        :param greenlet_count: greenlet_count
        :param put_count: put_count
        :param get_count: get_count
        :param bench_item_count: bench_item_count
        """

        g_event = None
        g_array = None
        try:
            # Settings
            g_count = greenlet_count
            g_ms = 10000

            # Continue callback loop
            self.callback_return = True

            # Go
            self.redis_cache = RedisCache()

            # Item count
            self.bench_item_count = bench_item_count
            self.bench_put_weight = put_count
            self.bench_get_weight = get_count
            self.bench_ttl_min_ms = 1000
            self.bench_ttl_max_ms = int(g_ms / 2)

            # Go
            self.run_event = Event()
            self.exception_raised = 0
            self.open_count = 0
            self.thread_running = AtomicIntSafe()
            self.thread_running_ok = AtomicIntSafe()

            # Item per greenlet
            item_per_greenlet = self.bench_item_count / g_count

            # Signal
            self.gorun_event = Event()

            # Alloc greenlet
            g_array = list()
            g_event = list()
            for _ in range(0, g_count):
                greenlet = Greenlet()
                g_array.append(greenlet)
                g_event.append(Event())

            # Run them
            cur_idx = 0
            for idx in range(0, len(g_array)):
                greenlet = g_array[idx]
                event = g_event[idx]
                greenlet.spawn(self._run_cache_bench, event, cur_idx,
                               cur_idx + item_per_greenlet)
                cur_idx += item_per_greenlet
                SolBase.sleep(0)

            # Signal
            self.gorun_event.set()

            # Wait a bit
            dt = SolBase.mscurrent()
            while SolBase.msdiff(dt) < g_ms:
                SolBase.sleep(500)
                # Stat
                ms = SolBase.msdiff(dt)
                sec = float(ms / 1000.0)
                total_put = Meters.aig("rcs.cache_put")
                per_sec_put = round(float(total_put) / sec, 2)
                total_get = Meters.aig("rcs.cache_get_hit") + Meters.aig(
                    "rcs.cache_get_miss")
                per_sec_get = round(float(total_get) / sec, 2)

                logger.info(
                    "Running..., count=%s, run=%s, ok=%s, put/sec=%s get/sec=%s, cache=%s",
                    self.open_count, self.thread_running.get(),
                    self.thread_running_ok.get(), per_sec_put, per_sec_get,
                    self.redis_cache)
                self.assertEqual(self.exception_raised, 0)

            # Over, signal
            logger.info("Signaling, count=%s", self.open_count)
            self.run_event.set()

            # Wait
            for g in g_event:
                g.wait(30.0)
                self.assertTrue(g.isSet())

            g_event = None
            g_array = None

            # Log
            Meters.write_to_logger()
        finally:
            self.run_event.set()
            if g_event:
                for g in g_event:
                    g.set()

            if g_array:
                for g in g_array:
                    g.kill()

            if self.redis_cache:
                self.redis_cache.stop_cache()
                self.redis_cache = None
Exemple #41
0
def start_new_thread(function, args=(), kwargs={}):
    greenlet = Greenlet.spawn(function, *args, **kwargs)
    return get_ident(greenlet)
Exemple #42
0
 def start_pubsub():
     Greenlet.spawn(InstrumentsPriceNamespace.pubsub_consumer)
Exemple #43
0
 def rawlink(self, callback):
     if not self.result.ready() and not self._waiting:
         self._waiting = True
         Greenlet.spawn(self._wait)
     self.result.rawlink(linkproxy(callback, self))
Exemple #44
0
 def adjust(self):
     if self.manager:
         return
     if self._adjust(self.maxsize):
         return
     self.manager = Greenlet.spawn(self._manage)
Exemple #45
0
 def adjust(self):
     self._adjust_step()
     if not self.manager and self._size > self._maxsize:
         # might need to feed more Nones into the pool
         self.manager = Greenlet.spawn(self._adjust_wait)
Exemple #46
0
 def _apply_async_cb_spawn(self, callback, result):
     Greenlet.spawn(callback, result)
 def start_pubsub():
     Greenlet.spawn(InstrumentsPriceNamespace.pubsub_consumer)
 def adjust(self):
     self._adjust_step()
     if not self.manager and len(self._worker_greenlets) > self._maxsize:
         # might need to feed more Nones into the pool to shutdown
         # threads.
         self.manager = Greenlet.spawn(self._adjust_wait)
Exemple #49
0
    def recv_message(self, message):
        action, pk = message.split(':')

        if action == 'subscribe':
            Greenlet.spawn(self.listener, pk)
Exemple #50
0
 def _apply_async_cb_spawn(self, callback, result):
     Greenlet.spawn(callback, result)
Exemple #51
0
 def rawlink(self, callback):
     if not self.result.ready() and not self._waiting:
         self._waiting = True
         Greenlet.spawn(self._wait)
     self.result.rawlink(linkproxy(callback, self))
				http_is_redirect,http_redirect_number,http_ultimate_url, \
				http_ultimate_domainname,http_ultimate_protocol,https_is_redirect, \
				https_redirect_number,https_ultimate_url,https_ultimate_domainname, \
				https_ultimate_protocol) values('"                                            + domain_name + "','" + ip+ "','" +\
          website_property+ "'," +str(http_is_open)+ ",'" +\
                                      http_error+ "'," +str(https_is_open)+ ",'" +https_error\
          + "','" +\
          cert_name+ "'," +str(has_expired)+ ",'" +cert_chain_name+ "','" +\
          subject_cn+ "','" +issuer_cn+ "'," +str(cert_chain_depth)+ "," +\
                                      str(http_is_redirect)+ "," +str(http_redirect_number)+ ",'" +\
          http_ultimate_url+ "','" +\
                                      http_ultimate_domainname+ "','" +http_ultimate_protocol+ "'," +\
          str(https_is_redirect)+ "," +str(https_redirect_number)+ ",'" +\
          https_ultimate_url+ "','" +https_ultimate_domainname+"','"+\
          https_ultimate_protocol+"')"
        print insertsql
        db.execDB(insertsql)
        semaphore.release()


gs = []
for i in xrange(10):
    gs.append(Greenlet.spawn(task))
for g in gs:
    g.join()

db.closeDB()
endtime = datetime.datetime.now()
interval = (endtime - starttime).seconds
print interval
 def apply_cb(self, func, args=None, kwds=None, callback=None):
     result = self.apply(func, args, kwds)
     if callback is not None:
         Greenlet.spawn(callback, result)
     return result
Exemple #54
0
def start_new_thread(function, args=(), kwargs=None):
    if kwargs is not None:
        greenlet = Greenlet.spawn(function, *args, **kwargs)
    else:
        greenlet = Greenlet.spawn(function, *args)
    return get_ident(greenlet)
    def mainSocket(self, ws, path):
        path = str(path).split("/")
        queryArgs = dict(request.args.items())

        sessionId = request.cookies.get("session")

        # wait for the other socket to close if we were bounced
        sleep(.25)

        sessionState = self._getSessionState(sessionId)

        self._logger.info("entering websocket with path %s", path)
        reader = None
        isFirstMessage = True

        # set up message tracking
        timestamps = []

        lastDumpTimestamp = time.time()
        lastDumpMessages = 0
        lastDumpFrames = 0
        lastDumpTimeSpentCalculating = 0.0

        # set up cells
        cells = Cells(self.db)

        # reset the session state. There's only one per cells (which is why
        # we keep a list of sessions.)
        sessionState._reset(cells)

        cells = cells.withRoot(
            Subscribed(
                lambda: self.displayForPathAndQueryArgs(path, queryArgs)),
            serialization_context=self.db.serializationContext,
            session_state=sessionState)

        # large messages (more than frames_per_ack frames) send an ack
        # after every frames_per_ackth message
        largeMessageAck = gevent.queue.Queue()
        reader = Greenlet.spawn(
            functools.partial(readThread, ws, cells, largeMessageAck,
                              self._logger))

        self._logger.info("Starting main websocket handler with %s", ws)

        while not ws.closed:
            t0 = time.time()
            try:
                # make sure user is authenticated
                user = self.login_plugin.load_user(current_user.username)
                if not user.is_authenticated:
                    ws.close()
                    return

                messages = cells.renderMessages()

                lastDumpTimeSpentCalculating += time.time() - t0

                if isFirstMessage:
                    self._logger.info("Completed first rendering loop")
                    isFirstMessage = False

                for message in messages:
                    gevent.socket.wait_write(ws.stream.handler.socket.fileno())

                    writeJsonMessage(message, ws, largeMessageAck,
                                     self._logger)

                    lastDumpMessages += 1

                lastDumpFrames += 1
                # log slow messages
                if time.time() - lastDumpTimestamp > 60.0:
                    self._logger.info(
                        "In the last %.2f seconds, spent %.2f seconds"
                        " calculating %s messages over %s frames",
                        time.time() - lastDumpTimestamp,
                        lastDumpTimeSpentCalculating, lastDumpMessages,
                        lastDumpFrames)

                    lastDumpFrames = 0
                    lastDumpMessages = 0
                    lastDumpTimeSpentCalculating = 0
                    lastDumpTimestamp = time.time()

                # tell the browser to execute the postscripts that its built up
                writeJsonMessage("postscripts", ws, largeMessageAck,
                                 self._logger)

                # request an ACK from the browser before sending any more data
                # otherwise it can get overloaded and crash because it can't keep
                # up with the data volume
                writeJsonMessage("request_ack", ws, largeMessageAck,
                                 self._logger)

                ack = largeMessageAck.get()
                if ack is StopIteration:
                    raise Exception("Websocket closed.")

                cells.wait()

                timestamps.append(time.time())

                if len(timestamps) > MAX_FPS:
                    timestamps = timestamps[-MAX_FPS + 1:]
                    if (time.time() - timestamps[0]) < 1.0:
                        sleep(1.0 / MAX_FPS + .001)

            except Exception:
                self._logger.error("Websocket handler error: %s",
                                   traceback.format_exc())
                self.sessionStates[sessionId].append(sessionState)

                self._logger.info(
                    "Returning session state to pool for %s. Have %s",
                    sessionId, len(self.sessionStates[sessionId]))

                if reader:
                    reader.join()
Exemple #56
0
 def adjust(self):
     self._adjust_step()
     if not self.manager and self._size > self._maxsize:
         # might need to feed more Nones into the pool
         self.manager = Greenlet.spawn(self._adjust_wait)
    def _start(self):

        try:
            max_queue_size = self.config["queue_max_size"]
        except:
            max_queue_size = 1000

        self.gevent_queue = Queue(max_queue_size)

        self.intent_handler = IntentHandler(self.logger, self.config)
        self.greenlet = Greenlet.spawn(
            self.intent_handler.executeQueuedRequests, self.gevent_queue,
            self.api.handle_request_indication)
        self.logger.info("started greenlet")

        if self.config.get("enable_intent"):
            self.logger.info("starting intent server")
            from server import IntentServer
            self.intent_server = IntentServer(
                self.api.handle_request_indication, self.gevent_queue,
                self.config, self.logger)
            self.gevent_threadpool = ThreadPool(1)
            self.gevent_threadpool_worker = self.gevent_threadpool.spawn(
                self.intent_server.start)
            #self.intent_server.start()
            from .activate import PA_Activation
            pa_activation = PA_Activation(self.config, self.logger)
            pa_activation.start()

        for endpoint in self.config["endpoints"]:
            self.api.register_endpoint(
                endpoint["personality"],
                "%s://%s" % ("intent", endpoint["interface"]))

        if self.config.get("enable_test"):
            from .test import test_read_params, test_create_app, test_create_app_property, test_subscribe_pushed_data, test_push_data, test_destroy_app, test_subscribe_apps_with_search_str
            from .test import test_create_app_with_search_str, test_discover_apps_with_search_str, test_get_app, test_get_all_properties, test_get_latest_data_of_property
            contact = "intent://intent_test/m2m"
            from .test_retarget import test_retarget

            test_retarget(self.api.handle_request_indication, self.logger,
                          self.config, "retrieve", "/m2m")
            #            contact = "http://localhost:8080"
            test_read_params(self.api.handle_request_indication, self.logger,
                             self.config)
            self.logger.info(
                "============================================================")

            #           test_create_app(self.api.handle_request_indication, self.logger, self.config, "myApp")
            self.logger.info(
                "============================================================")

            #           test_create_app_property(self.api.handle_request_indication, self.logger, self.config, "myApp", "myProperty")
            self.logger.info(
                "============================================================")

            #           test_subscribe_pushed_data(self.api.handle_request_indication, self.logger, self.config, "myApp", "myProperty", contact)
            self.logger.info(
                "============================================================")

            #            test_push_data(self.api.handle_request_indication, self.logger, self.config, "myApp", "myProperty")
            self.logger.info(
                "============================================================")

            #            test_get_all_properties(self.api.handle_request_indication, self.logger, self.config, "myApp")
            self.logger.info(
                "============================================================")

            #            test_get_latest_data_of_property(self.api.handle_request_indication, self.logger, self.config, "myApp", "myProperty")
            self.logger.info(
                "============================================================")

            #            test_destroy_app(self.api.handle_request_indication, self.logger, self.config, "myApp")
            self.logger.info(
                "============================================================")

            #            test_subscribe_apps_with_search_str(self.api.handle_request_indication, self.logger, self.config, "healthDevice", contact)
            test_subscribe_apps_with_search_str(
                self.api.handle_request_indication, self.logger, self.config,
                None, contact)
            self.logger.info(
                "============================================================")

            #            test_create_app_with_search_str(self.api.handle_request_indication, self.logger, self.config, "myApp", "healthDevice")
            #            test_create_app_with_search_str(self.api.handle_request_indication, self.logger, self.config, "myApp", None)
            self.logger.info(
                "============================================================")

            #            test_discover_apps_with_search_str(self.api.handle_request_indication, self.logger, self.config, "healthDevice", "intent://test_action")
            self.logger.info(
                "============================================================")

            #            test_get_app(self.api.handle_request_indication, self.logger, self.config, "myApp", "intent://test_action")
            self.logger.info(
                "============================================================")

            #            test_destroy_app(self.api.handle_request_indication, self.logger, self.config, "myApp")
            self.logger.info(
                "============================================================")

        self._started()