Example #1
0
def application(environ, start_response):
    start_response('200 OK', [('Content-Type', 'text/html')])

    gevent.spawn(level1)

    for i in range(100):
        yield "counter: %d<br/>" % i
        def communicate(self, input=None):
            if self.stdin is not None:
                def _writer():
                    self.stdin.write(input)
                    self.stdin.close()
                writer = gevent.spawn(_writer)

            if self.stdout is not None:
                reader_stdout = gevent.spawn(self.stdout.read)

            if self.stderr is not None:
                reader_stderr = gevent.spawn(self.stderr.read)

            stdoutdata = None
            if self.stdout is not None:
                stdoutdata = reader_stdout.get()

            stderrdata = None
            if self.stderr is not None:
                stderrdata = reader_stderr.get()

            if self.stdin:
                writer.get()

            self.wait()
            return (stdoutdata, stderrdata)
def main():
	gevent.joinall([
		gevent.spawn(setter),
		gevent.spawn(waiter),
		gevent.spawn(waiter),
		gevent.spawn(waiter)
	])
Example #4
0
 def startRedis():
     '''连接Redis服务器'''
     if not Gcore.startedRedis:
         from sgLib.pyRedis import MyRedis
         #连到本服
         #print 'connect redisL',Gcore.config.REDISL_HOST,Gcore.config.REDISL_PORT
         Gcore.redisL = MyRedis(host=Gcore.config.REDISL_HOST, 
                                port=Gcore.config.REDISL_PORT,
                                password=Gcore.config.REDISL_PWD)
         Gcore.redisL.start('redisL')
         gevent.spawn(Gcore.redisL.loop)
         
         if Gcore.config.REDISL_HOST != Gcore.config.REDISM_HOST and Gcore.loadCoreCfg('CrossServer')!='0':
             #连到总服
             print '连到总服redis...'
             #print 'connect redisM',Gcore.config.REDISM_HOST,Gcore.config.REDISM_PORT
             Gcore.redisM = MyRedis(host=Gcore.config.REDISM_HOST, 
                                    port=Gcore.config.REDISM_PORT, 
                                    password=Gcore.config.REDISM_PWD)
             Gcore.redisM.start('redisM')
             gevent.spawn(Gcore.redisM.loop)
         else:
             Gcore.redisM = Gcore.redisL
         
         if 0:
             print 'redisL',Gcore.redisL.get('foo')
             print 'redisM',Gcore.redisM.get('foo')
         Gcore.startedRedis = True
Example #5
0
 def _do_socket(socket):
     socket.send('server started')
     
     socket.send(json.dumps(dict(request.cookies)))
     
     def _ping():
         while True:
             time.sleep(60)
             try:
                 socket.send('ping!')
             except websocket.Error:
                 break
     
     def _env():
         for k, v in sorted(socket.environ.iteritems()):
             time.sleep(1)
             try:
                 socket.send('%s: %r' % (k, v))
             except websocket.Error:
                 break
     
     def _echo():
         while True:
             msg = socket.recv()
             if not msg:
                 break
             socket.send(msg[::-1])
     
     tasks = [gevent.spawn(_ping), gevent.spawn(_echo)]
     for task in tasks:
         task.join()
Example #6
0
    def test_del_closes_socket(self):
        if PYPY:
            return
        timer = gevent.Timeout.start_new(0.5)

        def accept_once(listener):
            # delete/overwrite the original conn
            # object, only keeping the file object around
            # closing the file object should close everything
            try:
                conn, addr = listener.accept()
                conn = conn.makefile(mode='wb')
                conn.write(b'hello\n')
                conn.close()
                _write_to_closed(conn, b'a')
            finally:
                listener.close()

        server = tcp_listener(('0.0.0.0', 0))
        gevent.spawn(accept_once, server)
        client = socket.create_connection(('127.0.0.1', server.getsockname()[1]))
        fd = client.makefile()
        client.close()
        assert fd.read() == 'hello\n'
        assert fd.read() == ''

        timer.cancel()
def main():
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.bind(('', 80))
    s.listen(1)
    while True:
        conn, addr = s.accept()
        gevent.spawn(lambda: handle_client(conn, addr))
Example #8
0
    def needFile(self, inner_path, update=False, blocking=True, peer=None, priority=0):
        if self.storage.isFile(inner_path) and not update:  # File exist, no need to do anything
            return True
        elif self.settings["serving"] is False:  # Site not serving
            return False
        else:  # Wait until file downloaded
            self.bad_files[inner_path] = self.bad_files.get(inner_path, 0) + 1  # Mark as bad file
            if not self.content_manager.contents.get("content.json"):  # No content.json, download it first!
                self.log.debug("Need content.json first")
                gevent.spawn(self.announce)
                if inner_path != "content.json":  # Prevent double download
                    task = self.worker_manager.addTask("content.json", peer)
                    task.get()
                    self.content_manager.loadContent()
                    if not self.content_manager.contents.get("content.json"):
                        return False  # Content.json download failed

            if not inner_path.endswith("content.json") and not self.content_manager.getFileInfo(inner_path):
                # No info for file, download all content.json first
                self.log.debug("No info for %s, waiting for all content.json" % inner_path)
                success = self.downloadContent("content.json", download_files=False)
                if not success:
                    return False
                if not self.content_manager.getFileInfo(inner_path):
                    return False  # Still no info for file

            task = self.worker_manager.addTask(inner_path, peer, priority=priority)
            if blocking:
                return task.get()
            else:
                return task
Example #9
0
    def send_email(self, from_address, to_addresses, subject, message):
        """
        RPC Method allowing a platform to send an email address.

        One can also send an email through the pubsub mechanism.

        :param from_address:
        :param to_addresses:
        :param subject:
        :param message:
        """
        _log.info('Sending email {}'.format(subject))
        _log.debug('Mail from: {}, to: {}'.format(from_address, to_addresses))
        recipients = to_addresses
        if isinstance(recipients, basestring):
            recipients = [recipients]

        # Use unicode to protect against encod error
        # http://stackoverflow.com/questions/25891541/attributeerror-encode
        msg = MIMEText(unicode(message))
        msg['To'] = ', '.join(recipients)
        msg['FROM'] = from_address
        msg['Subject'] = subject

        gevent.spawn(self._send_email, from_address, recipients, msg)
        gevent.sleep(0.1)
Example #10
0
    def get(self, protocol='default'):
        D = self.failsafe_json_decode
        args = dict([(k, D(v[0])) for k, v in self.request.arguments.iteritems()])

        fn = args.pop('fn')
        m = dict(kwargs=args, fn=fn, args=[])
        gevent.spawn(lambda: self._handle_call(self.request, fn, m, protocol))
Example #11
0
    def do_job(self, task_file, split_size, num_reducer, input_file, output_file):
        self.output_file = output_file
        # identical name for each task
        # use this name to generate intermediate filename
        task_name = time.strftime("%Y%m%d%H%M%S", time.localtime())
        self.task_name = task_name
        print "Task %s get" % task_file[0]
        start_time = datetime.now()
        split_size = int(split_size)
        num_reducer = int(num_reducer)
        split_infos, file_info = self.split_file(split_size, input_file)

        num_mapper = len(split_infos)
        # initialize jobs_tracker for task_name
        self.jobs_tracker[task_name] = {"mappers": {}, "reducers": {},
                                        "num_mapper": num_mapper, "num_reducer": num_reducer,
                                        "task_file": task_file, "split_infos": split_infos,
                                        "file_info": file_info, "output_file": output_file,
                                        "split_size": split_size, "done": False}

        print "Task " + task_name + " : assigning %d mappers, %d reducers" % (num_mapper, num_reducer)
        # Map task
        mapper_procs = gevent.spawn(self.assign_mappers, task_name)
        mapper_procs.join()
        # Reduce task
        reducer_procs = gevent.spawn(self.assign_reducers, task_name)
        reducer_procs.join()
        print "Task finished"
        end_time = datetime.now()
        print('Duration: {}'.format(end_time - start_time))
    def _dispatch_send(self, message):
        """
        Dispatch the different steps of sending
        """

        if self.dryrun:
            return message

        if not self.socket:
            raise GraphiteSendException(
                "Socket was not created before send"
            )

        sending_function = self._send
        if self._autoreconnect:
            sending_function = self._send_and_reconnect

        try:
            if self.asynchronous and gevent:
                gevent.spawn(sending_function, message)
            else:
                sending_function(message)
        except Exception as e:
            self._handle_send_error(e)

        return "sent {0} long message: {1}".format(len(message), message[:75])
Example #13
0
	def start(self):

		#1.读取所有报警配置项
		db = self.getDBConn()
		cr = db.cursor()
#		sql="select * from giscore_AO_UserAlarmSettings where enable=true order by ao_id"
		sql = "select * from giscore_activeobject"
		cr.execute(sql)
		aos = dbconn.fetchallDict(cr)

		for ao in aos:
			sql = "select a.*,b.activeobject_id as aoid,c.name as aoname from \
					giscore_AO_UserAlarmSettings a,\
					giscore_activeobject_alarmsettings b, \
					giscore_activeobject c \
					where \
					a.id = b.ao_useralarmsettings_id \
					and b.activeobject_id=%s and \
					b.activeobject_id= c.id and \
			        a.enable=true"%(ao['id'])
			cr.execute(sql)

			rs = dbconn.fetchallDict(cr)
			for r in rs: #每个报警配置项处理
				asi = AlarmSettingItem(self)
				if asi.init(r):
					self.asis.append( asi )
		###
#		thread = threading.Thread(target=self.thread_check)
#		thread.start()
		import gevent
		gevent.spawn(self.thread_check)
def when_ready(server):
    def monitor():
        modify_times = {}
        while True:
            for module in sys.modules.values():
                path = getattr(module, "__file__", None)
                if not path: continue
                if path.endswith(".pyc") or path.endswith(".pyo"):
                    path = path[:-1]
                try:
                    modified = os.stat(path).st_mtime
                except:
                    continue
                if path not in modify_times:
                    modify_times[path] = modified
                    continue
                if modify_times[path] != modified:
                    logging.info("%s modified; restarting server", path)
                    os.kill(os.getpid(), signal.SIGHUP)
                    modify_times = {}
                    break
            gevent.sleep(1)

    import gevent
    gevent.spawn(monitor)
Example #15
0
    def test_fullduplex(self):

        def server():
            (client, addr) = self.listener.accept()
            # start reading, then, while reading, start writing. the reader should not hang forever
            N = 100000  # must be a big enough number so that sendall calls trampoline
            sender = gevent.spawn(client.sendall, 't' * N)
            result = client.recv(1000)
            assert result == 'hello world', result
            sender.join(timeout=0.2)
            sender.kill()
            sender.get()

        #print '%s: client' % getcurrent()

        server_proc = gevent.spawn(server)
        client = self.create_connection()
        client_reader = gevent.spawn(client.makefile().read)
        gevent.sleep(0.001)
        client.send('hello world')

        # close() used to hang
        client.close()

        # this tests "full duplex" bug;
        server_proc.get()

        client_reader.get()
Example #16
0
def test_concurency(server_port):
    s = tcp.Command("127.0.0.1", server_port)
    s.connect()

    def task_function(msg, i):
        assert s.write_readline(msg + '\n') == msg

    def task_with_exception(msg, i):
        msg += '_exception'
        try:
            transaction = s._write(msg)
            s._readline(transaction, timeout=0.01, eol='\r',
                        clear_transaction=False)
        except RuntimeError:  # timeout
            rxmsg = s._read(transaction, size=len(msg))
            assert rxmsg == msg

    tasks = []
    for i, msg in enumerate(['HELLO', 'WORLD', 'HOUPPI',
                             'tagada', 'super', 'mario',
                             'ludgi']):
        tasks.append(gevent.spawn(task_function, msg, i))
        if i % 2:
            tasks.append(gevent.spawn(task_with_exception, msg, i))

    for t in tasks:
        t.join(3)
Example #17
0
def test_rcp_streaming():
    endpoint = random_ipc_endpoint()

    class MySrv(zerorpc.Server):

        @zerorpc.rep
        def range(self, max):
            return xrange(max)

        @zerorpc.stream
        def xrange(self, max):
            return xrange(max)

    srv = MySrv(heartbeat=2)
    srv.bind(endpoint)
    gevent.spawn(srv.run)

    client = zerorpc.Client(heartbeat=2)
    client.connect(endpoint)

    r = client.range(10)
    assert r == tuple(range(10))

    r = client.xrange(10)
    assert getattr(r, 'next', None) is not None
    l = []
    print 'wait 4s for fun'
    gevent.sleep(4)
    for x in r:
        l.append(x)
    assert l == range(10)
Example #18
0
 def __init__(self):
     self.log = logging.getLogger("SiteManager")
     self.log.debug("SiteManager created.")
     self.sites = None
     self.loaded = False
     gevent.spawn(self.saveTimer)
     atexit.register(self.save)
Example #19
0
    def put(self, *args, **kwargs):
        """
        This is design to hanlde 3 situation
            1. Can pass just the key as the first argument and call put on queue
            that is returned.
            2. Pass key as fist argument and callback as second which will be called
            in a coroutine (or thread).
            3. Pass key as first argument and callback as second and the rest of the
            arguments and keywords will be passed to the callback.

        """
        key = args[0]
        callback = None
        args = args[1:]
        if len(args) > 0:
            callback = args[0]
            args = args[1:]
        q = Queue()
        dict.__setitem__(self, key, q)
        if callback:
            def handle():
                if args:
                    q.put(callback(*args))
                elif not args and kwargs:
                    q.put(callback(*args, **kwargs))
                else:
                    q.put(callback(*args, **kwargs))
            spawn(handle)
        return q
Example #20
0
def spawn_asyncresult(fn, *args, **kwargs):
    """
    Spawn a Greenlet and pass it's results to an AsyncResult.

    This function is useful to shuffle data from a Greenlet to
    AsyncResult, which then again is useful because any Greenlets that
    raise exceptions will cause tracebacks to be shown on stderr by
    gevent, even when ``.link_exception`` has been called. Using an
    AsyncResult avoids this.
    """
    r = gevent.event.AsyncResult()

    def wrapper():
        """
        Internal wrapper.
        """
        try:
            value = fn(*args, **kwargs)
        except Exception as e:
            r.set_exception(e)
        else:
            r.set(value)
    gevent.spawn(wrapper)

    return r
Example #21
0
    def _disconnect_timeout(self):
        self.timeout.clear()

        if self.timeout.wait(10.0):
            gevent.spawn(self._disconnect_timeout)
        else:
            self.kill()
    def get_response(self, parameters={}, url=SEARCH_ADRES_URL):
        """
        Actualy do the http api search call
        """
        # parameters = {'q': self.get_q()}
        async_r = grequests.get(url, params=parameters, session=self.session)
        # send a request and wait for results
        gevent.spawn(async_r.send).join()
        # Do something with the result count?

        if async_r.response is None:
            log.error('RESPONSE NONE %s %s', url, parameters)
            return {}

        if async_r.response.status_code == 404:
            log.error('404 %s %s', url, parameters)
            return {}

        if not async_r.response:
            log.error('NO RESPONSE %s %s', url, parameters)
            return {}

        result_json = async_r.response.json()

        return result_json
Example #23
0
def server(env, start_response):
    start_response("200 OK", [("Content-Type", "text/html")])
    path = env["PATH_INFO"]
    queryString = env["QUERY_STRING"]

    if path == GET_DOMAIN_URL:
        domain = redisConnection.spop(screenshotsQueueKey)
        if domain:
            return [domain]
        else:
            return ["empty"]

    if path == RESIZE_URL:
        query = parse_qs(urlparse("?" + queryString).query)
        screenshotsJson = query["screenshots"][0]
        domain = query["domain"][0]
        screenshotsJson = urllib2.unquote(screenshotsJson)
        redisConnection.sadd(resizeQueueKey, "%s@%s" % (domain, screenshotsJson))
        return ["okay"]

    if path == CREATE_SCREENSHOT:
        screenshots = queue.Queue()
        try:
            queryKey, domain = queryString.split("=")
        except Exception:
            return ["error"]
        redisConnection.sadd(screenshotsQueueKey, domain)
        gevent.spawn(waitForScreenshot, domain, screenshots)
        print "return screenshots to the client"
        return screenshots

    return ["error"]
Example #24
0
def main():

    if len(sys.argv) > 1:
        code = sys.argv[1]
        print 'stock code %s' % (code)
        global TID
        TID = code

    with open('robot.yaml') as f:
        cfg = yaml.load(f)
    rip = cfg['robot_listen']['rip']
    rport = cfg['robot_listen']['rport']
    addr = (rip, rport)
    print addr
    tester = Tester(TID, addr)

    pq_ip = cfg['pre_quo']['pqip']
    pq_port = cfg['pre_quo']['port']
    print pq_ip, pq_port
    pre_quo = pre_quotation.PreQuotation(tester, (pq_ip, pq_port))
    tester.pre_quo = pre_quo

    q_ip = cfg['quo_server']['qip']
    q_port = cfg['quo_server']['port']
    print q_ip, q_port
    quota = quotation.Quotation(tester, (q_ip, q_port))
    tester.quo = quota

    jobs = []
    jobs.append(gevent.spawn(pre_quo.recv_data))
    jobs.append(gevent.spawn(quota.recv_data))
    jobs.append(gevent.spawn(tester.recv_data))
    gevent.joinall(jobs)

    return 0
Example #25
0
        def on_message(self, msg):
            if logger.isEnabledFor(logging.DEBUG):
                logger.debug('Got broker message:[{}]'.format(msg))

            if msg.type == 'message':

                # Replace payload with stuff read off the KVDB in case this is where the actual message happens to reside.
                if msg.channel in NEEDS_TMP_KEY:
                    tmp_key = '{}.tmp'.format(msg.data)

                    if self.lua_container.run_lua('zato.rename_if_exists', [msg.data, tmp_key]) == CODE_NO_SUCH_FROM_KEY:
                        payload = None
                    else:
                        payload = self.kvdb.conn.get(tmp_key)
                        self.kvdb.conn.delete(tmp_key)  # Note that it would've expired anyway
                        if not payload:
                            logger.warning('No KVDB payload for key [{}] (already expired?)'.format(tmp_key))
                        else:
                            payload = loads(payload)
                else:
                    payload = loads(msg.data)

                if payload:
                    payload = Bunch(payload)
                    if logger.isEnabledFor(logging.DEBUG):
                        logger.debug('Got broker message payload [{}]'.format(payload))
                        
                    callback = self.topic_callbacks[msg.channel]
                    spawn(callback, payload)

                else:
                    if logger.isEnabledFor(logging.DEBUG):
                        logger.debug('No payload in msg:[{}]'.format(msg))
Example #26
0
def InstanceClient(host, port):
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    try:
        s.connect((host, port))
    except:
        raise
    else:
        socketName = s.getsockname()

    def handle_incoming_data(client_socket):  
        buffer = ""  
        while True:
           data = client_socket.recv(1024)
           if data == "":
             INSTANCE_HO.serverClosed()
             break
           buffer += data
           msgs = buffer.split(TERMINATOR)
           buffer = msgs.pop()
           for msg in msgs:
             INSTANCE_HO.clientMessageReceived(msg)
   
    CLIENTS[socketName] = s 
    gevent.spawn(handle_incoming_data, s) 

    return socketName
Example #27
0
 def _run(self):
     """Main loop; reap and process pkts"""
     try:
         args = self.orbname, self.select, self.reject
         print repr(self.orbname)
         print repr(args)
         with OrbreapThr(*args, timeout=1, queuesize=8, after=self.tafter) as orbreapthr:
             log.info("Connected to ORB %s %s %s" % (self.orbname, self.select,
                                                     self.reject))
             self.timeoff = self.timeon = datetime.utcnow()
             spawn(self._status_printer).link_exception(self._janitor)
             threadpool = ThreadPool(maxsize=1)
             try:
                 while True:
                     try:
                         success, value = threadpool.spawn(
                                 wrap_errors, (Exception,), orbreapthr.get, [], {}).get()
                         timestamp = datetime.utcnow()
                         if not success:
                             raise value
                     except (Timeout, NoData), e:
                         log.debug("orbreapthr.get exception %r" % type(e))
                         pass
                     else:
                         if value is None:
                             raise Exception('Nothing to publish')
                         self._process(value, timestamp)
             finally:
                 # This blocks until all threads in the pool return. That's
                 # critical; if the orbreapthr dies before the get thread,
                 # segfaults ensue.
                 threadpool.kill()
     except Exception, e:
         log.error("OrbPktSrc terminating due to exception", exc_info=True)
         raise
Example #28
0
    def go():

        app = get_app()
        app.debug = debug

        if app.debug:
            app.config.update(SEND_FILE_MAX_AGE_DEFAULT=0)

        #  TODO: asset debug settings will cause bad YSLOW rating
        app.config['COMPRESS_DEBUG'] = False
        app.config['ASSETS_DEBUG'] = debug

        # Breaks web socket communication
        # (WebSocketConnectionClosedException in client)
        # app = DebuggedApplication(app, evalex=True)

        host = conf_global['serverHost']
        port = conf_global['serverPort']
        print('Server: {0}:{1}'.format(host, port))
        http_server = WSGIServer((host, port),
                                 app,
                                 handler_class=WebSocketHandler)

        # gevent.spawn(send_dummy_notifications)
        gevent.spawn(send_bulk)

        http_server.serve_forever()
Example #29
0
    def test_multiple_waiters(self):
        # tests that multiple waiters get their results back
        q = queue.Queue()

        def waiter(q, evt):
            evt.set(q.get())

        sendings = ['1', '2', '3', '4']
        evts = [AsyncResult() for x in sendings]
        for i, x in enumerate(sendings):
            gevent.spawn(waiter, q, evts[i])  # XXX use waitall for them

        gevent.sleep(0.01)  # get 'em all waiting

        results = set()

        def collect_pending_results():
            for e in evts:
                with gevent.Timeout(0.001, False):
                    x = e.get()
                    results.add(x)
            return len(results)

        q.put(sendings[0])
        self.assertEquals(collect_pending_results(), 1)
        q.put(sendings[1])
        self.assertEquals(collect_pending_results(), 2)
        q.put(sendings[2])
        q.put(sendings[3])
        self.assertEquals(collect_pending_results(), 4)
Example #30
0
    def test_multiple_listeners_error(self):
        # if there was an error while calling a callback
        # it should not prevent the other listeners from being called
        # also, all of the errors should be logged, check the output
        # manually that they are
        p = gevent.spawn(lambda: 5)
        results = []

        def listener1(*args):
            results.append(10)
            raise ExpectedError('listener1')

        def listener2(*args):
            results.append(20)
            raise ExpectedError('listener2')

        def listener3(*args):
            raise ExpectedError('listener3')

        p.link(listener1)
        p.link(listener2)
        p.link(listener3)
        sleep(DELAY * 10)
        assert results in [[10, 20], [20, 10]], results

        p = gevent.spawn(lambda: getcurrent().throw(ExpectedError('test_multiple_listeners_error')))
        results = []
        p.link(listener1)
        p.link(listener2)
        p.link(listener3)
        sleep(DELAY * 10)
        assert results in [[10, 20], [20, 10]], results
Example #31
0
        'Accept-Charset':
        'ZWNobyAiZWVTenh1OTJuSURBYiI7IA==',  # 输出 eeSzxu92nIDAb
        'Accept-Encoding': 'gzip,deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9',
    }
    try:
        #print(url)
        res = rq.get(url, headers=headers, timeout=3)
        if res.status_code == 200:
            res.text.find('eeSzxu92nIDAb')
            #print(res.text.find('eeSzxu92nIDAb'))
            if res.text.find('eeSzxu92nIDAb') > -1:
                print("[存在漏洞] " + url)
                f = open("ok.txt", "a")
                f.write("[存在漏洞] " + url + "\n")
                f.close()
            else:
                print("[不存在漏洞] " + url)
    except Exception as e:
        #raise e
        print("[超时] " + url)


if __name__ == '__main__':
    print("phpStudy 批量检测 (需要 gevent,requests 库)")
    print("使用之前,请将URL保存为 url.txt 放置此程序同目录下")
    input("任意按键开始执行..")
    tasks = [gevent.spawn(check, url) for url in file_read()]
    print("正在执行...请等候")
    gevent.joinall(tasks)
    wait = input("执行完毕 任意键退出...")
Example #32
0
    def kill_game(self):
        if self.is_match and self.game.started:
            gevent.spawn(lambda: Subsystem.interconnect.publish(
                'speaker', [u'文文', u'“%s”意外终止了!' % self.game_name]))

        self.game.suicide = True  # game will kill itself in get_synctag()
Example #33
0
import gevent
from gevent import monkey
import time

monkey.patch_all()
def f(n):
    for i in range(n):
        print(gevent.getcurrent(), i)
        #用来模拟一个耗时操作,注意不是time模块中的sleep
        time.sleep(1)

g1 = gevent.spawn(f, 5)
g2 = gevent.spawn(f, 5)
g3 = gevent.spawn(f, 5)
g1.join()
g2.join()
g3.join()
Example #34
0
def main_comment():
    greenlets = []
    for i in range(20):
        mm = MuMian()
        greenlets.append(gevent.spawn(mm.get_comment))
    gevent.joinall(greenlets)
Example #35
0
 def start(self) -> gevent.Greenlet:
     assert not self.main_loop_spawned, 'Tried to spawn the main loop twice'
     greenlet = gevent.spawn(self.main_loop)
     self.main_loop_spawned = True
     return greenlet
Example #36
0
 def stop(self, exit_exc=None, caller=None):
     spawn(self._stop)
Example #37
0
# wang
from urllib import request
import gevent, time
from gevent import monkey
monkey.patch_all()


def f(url):
    print('GET:{}'.format(url))
    res = request.urlopen(url)
    date = res.read()
    print(len(date), url)


sync_time = time.time()
f('https://www.python.org/')
#f('https://www.yahoo.com/')
f('https://github.com/')
print(time.time() - sync_time)

rsync_time = time.time()
gevent.joinall([
    gevent.spawn(f, 'https://www.python.org/'),
    #    gevent.spawn(f,'https://www.yahoo.com/'),
    gevent.spawn(f, 'https://github.com/')
])
print(time.time() - rsync_time)
Example #38
0
def task(singer_type_list, type_href_list):
    gevent_list = []
    for i in range(len(singer_type_list)):
        g = gevent.spawn(worker, *(singer_type_list[i], type_href_list[i]))
        gevent_list.append(g)
    gevent.joinall(gevent_list)
Example #39
0
import gevent
from gevent.queue import Queue

tasks = Queue()


def worker(user):
    while not tasks.empty():
        task = tasks.get()
        print('%s got task %s' % (user, task))
        gevent.sleep(0)
    print('Quitting worker!')


def boss():
    for i in xrange(4):
        tasks.put_nowait(i)


gevent.spawn(boss).join()

gevent.joinall([
    gevent.spawn(worker, 'steve'),
    gevent.spawn(worker, 'john'),
])
Example #40
0
    def start(self):
        """ Start the node synchronously. Raises directly if anything went wrong on startup """
        if not self.stop_event.ready():
            raise RuntimeError(f'{self!r} already started')
        self.stop_event.clear()

        if self.database_dir is not None:
            self.db_lock.acquire(timeout=0)
            assert self.db_lock.is_locked

        # start the registration early to speed up the start
        if self.config['transport_type'] == 'udp':
            endpoint_registration_greenlet = gevent.spawn(
                self.discovery.register,
                self.address,
                self.config['transport']['udp']['external_ip'],
                self.config['transport']['udp']['external_port'],
            )

        # The database may be :memory:
        storage = sqlite.SQLiteStorage(self.database_path,
                                       serialize.JSONSerializer())
        self.wal = wal.restore_from_latest_snapshot(
            node.state_transition,
            storage,
        )

        if self.wal.state_manager.current_state is None:
            log.debug(
                'No recoverable state available, created inital state',
                node=pex(self.address),
            )
            block_number = self.chain.block_number()

            state_change = ActionInitChain(
                random.Random(),
                block_number,
                self.chain.node_address,
                self.chain.network_id,
            )
            self.wal.log_and_dispatch(state_change)
            payment_network = PaymentNetworkState(
                self.default_registry.address,
                [],  # empty list of token network states as it's the node's startup
            )
            state_change = ContractReceiveNewPaymentNetwork(
                constants.EMPTY_HASH,
                payment_network,
            )
            self.handle_state_change(state_change)

            # On first run Raiden needs to fetch all events for the payment
            # network, to reconstruct all token network graphs and find opened
            # channels
            last_log_block_number = 0
        else:
            # The `Block` state change is dispatched only after all the events
            # for that given block have been processed, filters can be safely
            # installed starting from this position without losing events.
            last_log_block_number = views.block_number(
                self.wal.state_manager.current_state)
            log.debug(
                'Restored state from WAL',
                last_restored_block=last_log_block_number,
                node=pex(self.address),
            )

            known_networks = views.get_payment_network_identifiers(
                views.state_from_raiden(self))
            if known_networks and self.default_registry.address not in known_networks:
                configured_registry = pex(self.default_registry.address)
                known_registries = lpex(known_networks)
                raise RuntimeError(
                    f'Token network address mismatch.\n'
                    f'Raiden is configured to use the smart contract '
                    f'{configured_registry}, which conflicts with the current known '
                    f'smart contracts {known_registries}', )

        # Clear ref cache & disable caching
        serialize.RaidenJSONDecoder.ref_cache.clear()
        serialize.RaidenJSONDecoder.cache_object_references = False

        # Restore the current snapshot group
        state_change_qty = self.wal.storage.count_state_changes()
        self.snapshot_group = state_change_qty // SNAPSHOT_STATE_CHANGES_COUNT

        # Install the filters using the correct from_block value, otherwise
        # blockchain logs can be lost.
        self.install_all_blockchain_filters(
            self.default_registry,
            self.default_secret_registry,
            last_log_block_number,
        )

        # Complete the first_run of the alarm task and synchronize with the
        # blockchain since the last run.
        #
        # Notes about setup order:
        # - The filters must be polled after the node state has been primed,
        # otherwise the state changes won't have effect.
        # - The alarm must complete its first run  before the transport is started,
        #  to avoid rejecting messages for unknown channels.
        self.alarm.register_callback(self._callback_new_block)

        # alarm.first_run may process some new channel, which would start_health_check_for
        # a partner, that's why transport needs to be already started at this point
        self.transport.start(self)

        self.alarm.first_run()

        chain_state = views.state_from_raiden(self)
        # Dispatch pending transactions
        pending_transactions = views.get_pending_transactions(chain_state, )
        log.debug(
            'Processing pending transactions',
            num_pending_transactions=len(pending_transactions),
            node=pex(self.address),
        )
        with self.dispatch_events_lock:
            for transaction in pending_transactions:
                try:
                    self.raiden_event_handler.on_raiden_event(
                        self, transaction)
                except RaidenRecoverableError as e:
                    log.error(str(e))

        self.alarm.start()

        # after transport and alarm is started, send queued messages
        events_queues = views.get_all_messagequeues(chain_state)

        for queue_identifier, event_queue in events_queues.items():
            self.start_health_check_for(queue_identifier.recipient)

            # repopulate identifier_to_results for pending transfers
            for event in event_queue:
                if type(event) == SendDirectTransfer:
                    self.identifier_to_results[
                        event.payment_identifier] = AsyncResult()

                message = message_from_sendevent(event, self.address)
                self.sign(message)
                self.transport.send_async(queue_identifier, message)

        # exceptions on these subtasks should crash the app and bubble up
        self.alarm.link_exception(self.on_error)
        self.transport.link_exception(self.on_error)

        # Health check needs the transport layer
        self.start_neighbours_healthcheck()

        if self.config['transport_type'] == 'udp':
            endpoint_registration_greenlet.get(
            )  # re-raise if exception occurred

        super().start()
Example #41
0
    def testExecuteWorker(self):
        import mars.tensor as mt
        mock_scheduler_addr = '127.0.0.1:%d' % get_next_port()
        try:

            session_id = str(uuid.uuid4())
            with create_actor_pool(n_process=1,
                                   backend='gevent',
                                   address=mock_scheduler_addr) as pool:
                pool.create_actor(ClusterInfoActor,
                                  schedulers=[mock_scheduler_addr],
                                  uid=ClusterInfoActor.default_name())
                pool.create_actor(ChunkMetaActor,
                                  uid=ChunkMetaActor.default_name())
                resource_ref = pool.create_actor(
                    ResourceActor, uid=ResourceActor.default_name())

                proc = subprocess.Popen([
                    sys.executable, '-m', 'mars.worker', '-a', '127.0.0.1',
                    '--schedulers', mock_scheduler_addr, '--cpu-procs', '1',
                    '--cache-mem', '10m', '--ignore-avail-mem'
                ])
                worker_ips = []

                def waiter():
                    check_time = time.time()
                    while True:
                        if not resource_ref.get_workers_meta():
                            gevent.sleep(0.5)
                            if proc.poll() is not None:
                                raise SystemError('Worker dead. exit code %s' %
                                                  proc.poll())
                            if time.time() - check_time > 20:
                                raise SystemError(
                                    'Check meta_timestamp timeout')
                            continue
                        else:
                            break
                    val = resource_ref.get_workers_meta()
                    worker_ips.extend(val.keys())

                gl = gevent.spawn(waiter)
                gl.join()

                a = mt.ones((100, 50), chunk_size=30)
                b = mt.ones((50, 200), chunk_size=30)
                result = a.dot(b)

                graph = result.build_graph(tiled=True)

                reply_ref = pool.create_actor(PromiseReplyTestActor)
                reply_callback = ((reply_ref.uid, reply_ref.address), 'reply')

                executor_ref = pool.actor_ref(ExecutionActor.default_name(),
                                              address=worker_ips[0])
                io_meta = dict(chunks=[c.key for c in result.chunks])
                executor_ref.execute_graph(session_id,
                                           str(id(graph)),
                                           serialize_graph(graph),
                                           io_meta,
                                           None,
                                           callback=reply_callback)

                check_time = time.time()
                while not reply_ref.get_reply():
                    gevent.sleep(0.1)
                    if time.time() - check_time > 20:
                        raise SystemError('Check reply timeout')
        finally:
            if proc.poll() is None:
                proc.send_signal(signal.SIGINT)
                check_time = time.time()
                while True:
                    time.sleep(1)
                    if proc.poll(
                    ) is not None or time.time() - check_time >= 5:
                        break
                if proc.poll() is None:
                    proc.kill()
            if os.path.exists(options.worker.plasma_socket):
                os.unlink(options.worker.plasma_socket)
Example #42
0
from gevent.queue import Queue, Empty

tasks = Queue(maxsize=3)


def worker(name):
    try:
        while True:
            task = tasks.get(timeout=1)
            print(('Worker %s got task %s' % (name, task)))
            gevent.sleep(0)
    except Empty:
        print(('Quitting time of %s!' % name))


def boss():
    for i in range(1, 11):
        tasks.put(i)
    print('Assigned all work in iteration 1')
    for i in range(11, 21):
        tasks.put(i)
    print('Assigned all work in iteration 2')


gevent.joinall([
    gevent.spawn(boss),
    gevent.spawn(worker, 'steve'),
    gevent.spawn(worker, 'john'),
    gevent.spawn(worker, 'bob'),
])
Example #43
0
def _update_tags_series_async_gevent(db, provider_name=None, dataset_code=None, 
                               max_bulk=100, update_only=False, dry_mode=False):
    

    import gevent
    from gevent.pool import Pool
    from gevent.queue import Queue

    pool = Pool(10)
    queue = Queue()

    count_errors = 0
    count_success = 0
    
    def _queue_process():

        _requests = []
        count_series = 0
        count_modified = 0
    
        def _process_requests():
            result = db[constants.COL_SERIES].bulk_write(_requests, ordered=False, bypass_document_validation=True)
            return result.modified_count

        try:
            while True:
                _id, _tags = queue.get()
                if not _id:
                    break
                 
                count_series += 1
                
                _requests.append(UpdateOne({'_id': _id}, {"$set": {'tags': _tags}}))
                
                if len(_requests) >= max_bulk:
                    count_modified += _process_requests()
                    _requests = []
        finally:
            if len(_requests) > 0:
                count_modified += _process_requests()

        return count_series, count_modified

    doc_provider = db[constants.COL_PROVIDERS].find_one({"enable": True,
                                                         "name": provider_name})

    if not doc_provider:
        logger.error("Provider [%s] not found or disable." % provider_name)
        return
    
    dataset_query = {'provider_name': provider_name, "enable": True}
    dataset_projection = {"doc_href": False, 
                          "dimension_list": False, "attribute_list": False}

    if dataset_code:
        dataset_query["dataset_code"] = dataset_code

    def _series_list_process(doc_dataset):
        
        series_query = { "provider_name": doc_dataset["provider_name"], 
                         "dataset_code": doc_dataset["dataset_code"]}
        series_projection = {"values": False}
    
        if update_only:
            series_query["tags.0"] = {"$exists": False}

        _modified_count = 0
        
        pool_series = Pool(200)

        def _serie_process(doc):
            select_for_tags = [tag for tag in generate_tags_series_async(doc, doc_provider, doc_dataset)]

            tags = []
            for value in select_for_tags:
                tags.extend(str_to_tags(value))

            queue.put((doc["_id"], sorted(list(set(tags)))))
            
        for doc in db[constants.COL_SERIES].find(series_query, series_projection):
            pool_series.spawn(_serie_process, doc)
            
        pool_series.join()
        
    def _process_ds():
        count_ds = 0
        
        for doc_dataset in db[constants.COL_DATASETS].find(dataset_query, 
                                                           dataset_projection):
    
            count_ds += 1
            pool.spawn(_series_list_process, doc_dataset)
            
        pool.join()

        queue.put((None, None))
        return count_ds

    queue_green = gevent.spawn(_queue_process) 
    ds_green = gevent.spawn(_process_ds)
    try:
        gevent.joinall([ds_green, queue_green])
    except KeyboardInterrupt:
        pass 

    count_stats = dict(
        count_ds = ds_green.value,
        count_series = queue_green.value[0],
        count_errors = count_errors,
        count_success = count_success,
        count_modified = queue_green.value[1]
    )

    msg = "modified[%(count_modified)s] - errors[%(count_errors)s] - success[%(count_success)s] - datasets[%(count_ds)s] - series[%(count_series)s]"
    logger.info(msg % count_stats)
Example #44
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import time
import gevent
from gevent import monkey

monkey.patch_all()

def create(n):
    print(n)
    time.sleep(0.5)

while True:
    g1 = gevent.spawn(create, 1)
    g2 = gevent.spawn(create, 2)
    g3 = gevent.spawn(create, 3)

    g1.join()
    g2.join()
    g3.join()
Example #45
0
    def execute_without_std(self, command, wait=False):
        """Execute the given command in the sandbox using
        subprocess.Popen and discarding standard input, output and
        error. More specifically, the standard input gets closed just
        after the execution has started; standard output and error are
        read until the end, in a way that prevents the execution from
        being blocked because of insufficient buffering.

        command ([string]): executable filename and arguments of the
            command.

        return (bool): True if the sandbox didn't report errors
            (caused by the sandbox itself), False otherwise

        """
        def preexec_fn(self):
            """Set limits for the child process.

            """
            if self.chdir:
                os.chdir(self.chdir)

            # TODO - We're not checking that setrlimit() returns
            # successfully (they may try to set to higher limits than
            # allowed to); anyway, this is just for testing
            # environment, not for real contests, so who cares.
            if self.timeout:
                rlimit_cpu = self.timeout
                if self.extra_timeout:
                    rlimit_cpu += self.extra_timeout
                rlimit_cpu = int(rlimit_cpu) + 1
                resource.setrlimit(resource.RLIMIT_CPU,
                                   (rlimit_cpu, rlimit_cpu))

            if self.address_space:
                rlimit_data = int(self.address_space * 1024)
                resource.setrlimit(resource.RLIMIT_DATA,
                                   (rlimit_data, rlimit_data))

            if self.stack_space:
                rlimit_stack = int(self.stack_space * 1024)
                resource.setrlimit(resource.RLIMIT_STACK,
                                   (rlimit_stack, rlimit_stack))

            # TODO - Doesn't work as expected
            #resource.setrlimit(resource.RLIMIT_NPROC, (1, 1))

        # Setup std*** redirection
        if self.stdin_file:
            stdin_fd = os.open(os.path.join(self.path, self.stdin_file),
                               os.O_RDONLY)
        else:
            stdin_fd = subprocess.PIPE
        if self.stdout_file:
            stdout_fd = os.open(os.path.join(self.path, self.stdout_file),
                                os.O_WRONLY | os.O_TRUNC | os.O_CREAT,
                                stat.S_IRUSR | stat.S_IRGRP |
                                stat.S_IROTH | stat.S_IWUSR)
        else:
            stdout_fd = subprocess.PIPE
        if self.stderr_file:
            stderr_fd = os.open(os.path.join(self.path, self.stderr_file),
                                os.O_WRONLY | os.O_TRUNC | os.O_CREAT,
                                stat.S_IRUSR | stat.S_IRGRP |
                                stat.S_IROTH | stat.S_IWUSR)
        else:
            stderr_fd = subprocess.PIPE

        # Note down execution time
        self.popen_time = monotonic_time()

        # Actually call the Popen
        self.popen = self._popen(command,
                                 stdin=stdin_fd,
                                 stdout=stdout_fd,
                                 stderr=stderr_fd,
                                 preexec_fn=partial(preexec_fn, self),
                                 close_fds=True)

        # Close file descriptors passed to the child
        if self.stdin_file:
            os.close(stdin_fd)
        if self.stdout_file:
            os.close(stdout_fd)
        if self.stderr_file:
            os.close(stderr_fd)

        if self.wallclock_timeout:
            # Kill the process after the wall clock time passed
            def timed_killer(timeout, popen):
                gevent.sleep(timeout)
                # TODO - Here we risk to kill some other process that gets
                # the same PID in the meantime; I don't know how to
                # properly solve this problem
                try:
                    popen.kill()
                except OSError:
                    # The process had died by itself
                    pass

            # Setup the killer
            full_wallclock_timeout = self.wallclock_timeout
            if self.extra_timeout:
                full_wallclock_timeout += self.extra_timeout
            gevent.spawn(timed_killer, full_wallclock_timeout, self.popen)

        # If the caller wants us to wait for completion, we also avoid
        # std*** to interfere with command. Otherwise we let the
        # caller handle these issues.
        if wait:
            return self.translate_box_exitcode(
                wait_without_std([self.popen])[0])
        else:
            return self.popen
Example #46
0
    def run(self):
        context = zmq.Context()

        receiver = context.socket(zmq.SUB)
        receiver.setsockopt(zmq.SUBSCRIBE, '')

        for binding in Settings.MONITOR_RECEIVER_BINDINGS:
            receiver.connect(binding)

        def monitor_worker(message):
            db = sqlite3.connect(Settings.MONITOR_DB)

            # Separate topic from message
            message = message.split(' |-| ')

            # Handle gateway not sending topic
            if len(message) > 1:
                message = message[1]
            else:
                message = message[0]

            if Settings.RELAY_DUPLICATE_MAX_MINUTES:
                if duplicateMessages.isDuplicated(message):
                    schemaID = 'DUPLICATE MESSAGE'

                    c = db.cursor()
                    c.execute('UPDATE schemas SET hits = hits + 1 WHERE `name` = ? AND `dateStats` = DATE("now", "utc")', (schemaID, ))
                    c.execute('INSERT OR IGNORE INTO schemas (name, dateStats) VALUES (?, DATE("now", "utc"))', (schemaID, ))
                    db.commit()

                    return

            if Settings.MONITOR_DECOMPRESS_MESSAGES:
                message = zlib.decompress(message)

            json = simplejson.loads(message)

            # Update software count
            softwareID = json['header']['softwareName'].encode('utf8') + ' | ' + json['header']['softwareVersion'].encode('utf8')

            c = db.cursor()
            c.execute('UPDATE softwares SET hits = hits + 1 WHERE `name` = ? AND `dateStats` = DATE("now", "utc")', (softwareID, ))
            c.execute('INSERT OR IGNORE INTO softwares (name, dateStats) VALUES (?, DATE("now", "utc"))', (softwareID, ))
            db.commit()

            # Update uploader count
            uploaderID = json['header']['uploaderID'].encode('utf8')

            if uploaderID:  # Don't get empty uploaderID
                c = db.cursor()
                c.execute('UPDATE uploaders SET hits = hits + 1 WHERE `name` = ? AND `dateStats` = DATE("now", "utc")', (uploaderID, ))
                c.execute('INSERT OR IGNORE INTO uploaders (name, dateStats) VALUES (?, DATE("now", "utc"))', (uploaderID, ))
                db.commit()

            # Update schemas count
            schemaID = json['$schemaRef']

            c = db.cursor()
            c.execute('UPDATE schemas SET hits = hits + 1 WHERE `name` = ? AND `dateStats` = DATE("now", "utc")', (schemaID, ))
            c.execute('INSERT OR IGNORE INTO schemas (name, dateStats) VALUES (?, DATE("now", "utc"))', (schemaID, ))
            db.commit()

            db.close()

        while True:
            inboundMessage = receiver.recv()
            gevent.spawn(monitor_worker, inboundMessage)
Example #47
0
jobs = pd.read_csv(
    "./job.csv",
    encoding="gbk",
    header=None,
    names=["panel", "PID", "sample_N", "sample_T", "chip", "name"])


def check_list(df):
    if df["sample_T"] == "yaml无T":
        return df["sample_N"]
    else:
        return df["sample_T"]


jobs["check_sample"] = jobs.apply(check_list, axis=1)


def func(sample):
    print("start", sample)
    # subprocess.call("hand ags list|grep -i W067750T",shell=True)
    sub = subprocess.Popen(f"hand ags list|grep -i {sample}", shell=True)
    out, err = sub.communicate()


job_list = []

for i in jobs["check_sample"]:
    job_list.append(gevent.spawn(func, i))

gevent.joinall(job_list)
Example #48
0
 def prep_task_queue(self):
     """prepare task_queue for another set of distributed tasks"""
     self._task_queue = JoinableQueue()
     spawn(self._consume_task_queue)
Example #49
0
def main():
    gevent.joinall(
        [gevent.spawn(setter),
         gevent.spawn(waiter),
         gevent.spawn(waiter)])
Example #50
0
def async_func():
    g_l = []
    for i in range(10):
        g_l.append(gevent.spawn(task, i))
    gevent.joinall(g_l)
Example #51
0
 def start(self):
     return gevent.spawn(self.serve_forever)
Example #52
0
import gevent
import gevent.wsgi
import gevent.queue
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.wsgi import WsgiServerTransport
from tinyrpc.server.gevent import RPCServerGreenlets
from tinyrpc.dispatch import RPCDispatcher
from threading import Lock

dispatcher = RPCDispatcher()
transport = WsgiServerTransport(max_content_length=4096 * 1024,
                                queue_class=gevent.queue.Queue)

# start wsgi server as a background-greenlet
wsgi_server = gevent.wsgi.WSGIServer(('0.0.0.0', 10090), transport.handle)
gevent.spawn(wsgi_server.serve_forever)

rpc_server = RPCServerGreenlets(transport, JSONRPCProtocol(), dispatcher)

TFPublisher = rospy.Publisher('tf', tfMessage, queue_size=1)
rospy.init_node('UE_ROS_Bridge')
tfBroadcaster = tf.TransformBroadcaster()

serviceHandlers = {}
messagePages = [{}, {}]
messageSendingPage = [0]
pageMutex = Lock()

SetupListeners(pageMutex, messagePages, messageSendingPage)
SetupServiceListeners(pageMutex, messagePages, messageSendingPage,
                      serviceHandlers)
# -*-coding:utf-8-*-
import gevent
from gevent.queue import Queue

q = Queue()


def producer(name):
    print(f'producer({name}) is running')
    while not q.empty():
        task = q.get()
        print(f'producer({name}) get task: {task}')
        gevent.sleep(1)


def customer():
    print(f'customer({gevent.getcurrent()}) is running')
    for i in range(10):
        q.put_nowait(i)


if __name__ == '__main__':
    gevent.spawn(customer).join()

    gevent.joinall([
        gevent.spawn(producer, 'a'),
        gevent.spawn(producer, 'b'),
        gevent.spawn(producer, 'c'),
    ])
Example #54
0
 def start(self):
     self.greenlet = gevent.spawn(self.start_server)
     self.greenlet.link_exception(greenlet_exception_handler)
Example #55
0
 def test():
     tasks = [spawn(test_echo, proxy) for proxy in proxys]
     joinall(tasks)
Example #56
0
def run_task():
    # symbol = ['btc_usd','bch_usd','eth_usd','ltc_usd','eos_usd']
    # contractType = ['this_week','next_week','quarter']
    try:
        greenlets = [
            gevent.spawn(
                future_kline_download("btc_usd", "this_week").download()),
            gevent.spawn(
                future_kline_download("bch_usd", "this_week").download()),
            gevent.spawn(
                future_kline_download("eth_usd", "this_week").download()),
            gevent.spawn(
                future_kline_download("ltc_usd", "this_week").download()),
            gevent.spawn(
                future_kline_download("eos_usd", "this_week").download()),
            gevent.spawn(
                future_kline_download("btc_usd", "next_week").download()),
            gevent.spawn(
                future_kline_download("bch_usd", "next_week").download()),
            gevent.spawn(
                future_kline_download("eth_usd", "next_week").download()),
            gevent.spawn(
                future_kline_download("ltc_usd", "next_week").download()),
            gevent.spawn(
                future_kline_download("eos_usd", "next_week").download()),
            gevent.spawn(
                future_kline_download("btc_usd", "quarter").download()),
            gevent.spawn(
                future_kline_download("bch_usd", "quarter").download()),
            gevent.spawn(
                future_kline_download("eth_usd", "quarter").download()),
            gevent.spawn(
                future_kline_download("ltc_usd", "quarter").download()),
            gevent.spawn(
                future_kline_download("eos_usd", "quarter").download())
        ]
        gevent.joinall(greenlets)
    except Exception as e:
        print(e)
Example #57
0
    while True:
        gevent.sleep(1)
        now = time.time()
        elapsed = now - last
        req_sec = (i - last_i) / elapsed

        print('%s total requests (%.2f/sec). last log %.2f sec ago.' %
              (i, req_sec, elapsed))
        last_i = i
        last = now


class Server(LoquiServer):
    def handle_request(self, request, session):
        global i
        i += 1
        if i and i % 50000 == 0:
            session.close()

        return 'm' * 1024

    def handle_push(self, push, session):
        print('pushed', push.data)
        return


if __name__ == '__main__':
    s = Server(('localhost', 8080))
    gevent.spawn(log_loop)
    s.serve_forever()
#-*- coding:utf-8 -*-

from gevent import monkey
monkey.patch_all()
import gevent
import requests


def get_page(url):
    print("GET:%s" % url)
    response = requests.get(url)
    print(url, len(response.text))
    return len(response.text)


g1 = gevent.spawn(get_page, 'https://www.python.org/doc')
g2 = gevent.spawn(get_page, 'http://www.163.com')
g3 = gevent.spawn(get_page, 'https://www.openstack.org')
gevent.joinall([
    g1,
    g2,
    g3,
])
print(g1.value, g2.value, g3.value)  #拿到返回值

#协程池
# from gevent.pool import Pool
# pool=Pool(2)
# g1=pool.spawn(get_page,'https://www.python.org/doc')
# g2=pool.spawn(get_page,'http://www.163.com')
# g3=pool.spawn(get_page,'https://www.openstack.org')
Example #59
0
    def process_gym(self, gym):
        gym_id = gym['id']

        # Update Gym details (if they exist)
        if gym_id not in self.__gym_info or gym['name'] != 'unknown':
            self.__gym_info[gym_id] = {
                "name": gym['name'],
                "description": gym['description'],
                "url": gym['url']
            }

        if self.__gym_settings['enabled'] is False:
            log.debug("Gym ignored: notifications are disabled.")
            return

        # Extract some basic information
        to_team_id = gym['new_team_id']
        from_team_id = self.__gym_hist.get(gym_id)

        # Doesn't look like anything to me
        if to_team_id == from_team_id:
            log.debug("Gym ignored: no change detected")
            return
        # Ignore changes to neutral
        if self.__gym_settings['ignore_neutral'] and to_team_id == 0:
            log.debug("Gym update ignored: changed to neutral")
            return
        # Update gym's last known team
        self.__gym_hist[gym_id] = to_team_id

        # Ignore first time updates
        if from_team_id is None:
            log.debug("Gym update ignored: first time seeing this gym")
            return

        # Get some more info out used to check filters
        lat, lng = gym['lat'], gym['lng']
        dist = get_earth_dist([lat, lng], self.__location)
        cur_team = self.__locale.get_team_name(to_team_id)
        old_team = self.__locale.get_team_name(from_team_id)

        filters = self.__gym_settings['filters']
        passed = False
        for filt_ct in range(len(filters)):
            filt = filters[filt_ct]
            # Check the distance from the set location
            if dist != 'unkn':
                if filt.check_dist(dist) is False:
                    if self.__quiet is False:
                        log.info(
                            "Gym rejected: distance ({:.2f}) was not in range"
                            + " {:.2f} to {:.2f} (F #{})".format(
                                dist, filt.min_dist, filt.max_dist, filt_ct))
                    continue
            else:
                log.debug(
                    "Gym dist was not checked because the manager has no location set."
                )

            # Check the old team
            if filt.check_from_team(from_team_id) is False:
                if self.__quiet is False:
                    log.info(
                        "Gym rejected: {} as old team is not correct (F #{})".
                        format(old_team, filt_ct))
                continue
            # Check the new team
            if filt.check_to_team(to_team_id) is False:
                if self.__quiet is False:
                    log.info(
                        "Gym rejected: {} as current team is not correct (F #{})"
                        .format(cur_team, filt_ct))
                continue

            # Nothing left to check, so it must have passed
            passed = True
            log.debug("Gym passed filter #{}".format(filt_ct))
            break

        if not passed:
            return

        # Check the geofences
        gym['geofence'] = self.check_geofences('Gym', lat, lng)
        if len(self.__geofences) > 0 and gym['geofence'] == 'unknown':
            log.info("Gym rejected: not inside geofence(s)")
            return

        # Check if in geofences
        if len(self.__geofences) > 0:
            inside = False
            for gf in self.__geofences:
                inside |= gf.contains(lat, lng)
            if inside is False:
                if self.__quiet is False:
                    log.info("Gym update ignored: located outside geofences.")
                return
        else:
            log.debug(
                "Gym inside geofences was not checked because no geofences were set."
            )

        gym_info = self.__gym_info.get(gym_id, {})

        gym.update({
            "gym_name":
            gym_info.get('name', 'unknown'),
            "gym_description":
            gym_info.get('description', 'unknown'),
            "gym_url":
            gym_info.get(
                'url',
                'https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/gym_0.png'
            ),
            "dist":
            get_dist_as_str(dist),
            'dir':
            get_cardinal_dir([lat, lng], self.__location),
            'new_team':
            cur_team,
            'new_team_id':
            to_team_id,
            'old_team':
            old_team,
            'old_team_id':
            from_team_id,
            'new_team_leader':
            self.__locale.get_leader_name(to_team_id),
            'old_team_leader':
            self.__locale.get_leader_name(from_team_id)
        })
        if self.__loc_service:
            self.__loc_service.add_optional_arguments(self.__location,
                                                      [lat, lng], gym)

        if self.__quiet is False:
            log.info(
                "Gym ({}) notification has been triggered!".format(gym_id))

        threads = []
        # Spawn notifications in threads so they can work in background
        for alarm in self.__alarms:
            threads.append(gevent.spawn(alarm.gym_alert, gym))
            gevent.sleep(0)  # explict context yield

        for thread in threads:
            thread.join()
Example #60
0
    def handle_one_response(self):
        path = self.environ.get('PATH_INFO')

        # Kick non-socket.io requests to our superclass
        if not path.lstrip('/').startswith(self.server.resource):
            return super(SocketIOHandler, self).handle_one_response()

        self.status = None
        self.headers_sent = False
        self.result = None
        self.response_length = 0
        self.response_use_chunked = False
        request_method = self.environ.get("REQUEST_METHOD")
        request_tokens = self.RE_REQUEST_URL.match(path)

        # Parse request URL and QUERY_STRING and do handshake
        if request_tokens:
            request_tokens = request_tokens.groupdict()
        else:
            handshake_tokens = self.RE_HANDSHAKE_URL.match(path)

            if handshake_tokens:
                return self._do_handshake(handshake_tokens.groupdict())
            else:
                # This is no socket.io request. Let the WSGI app handle it.
                return super(SocketIOHandler, self).handle_one_response()

        # Setup the transport and socket
        transport = self.handler_types.get(request_tokens["transport_id"])
        sessid = request_tokens["sessid"]
        socket = self.server.get_socket(sessid)

        # In case this is WebSocket request, switch to the WebSocketHandler
        # FIXME: fix this ugly class change
        if issubclass(
                transport,
            (transports.WebsocketTransport, transports.FlashSocketTransport)):
            self.__class__ = WebSocketHandler
            self.prevent_wsgi_call = True  # thank you
            # TODO: any errors, treat them ??
            self.handle_one_response()

        # Make the socket object available for WSGI apps
        self.environ['socketio'] = socket

        # Create a transport and handle the request likewise
        self.transport = transport(self)

        jobs = self.transport.connect(socket, request_method)
        # Keep track of those jobs (reading, writing and heartbeat jobs) so
        # that we can kill them later with Socket.kill()
        socket.jobs.extend(jobs)

        try:
            # We'll run the WSGI app if it wasn't already done.
            if socket.wsgi_app_greenlet is None:
                # TODO: why don't we spawn a call to handle_one_response here ?
                #       why call directly the WSGI machinery ?
                start_response = lambda status, headers, exc=None: None
                socket.wsgi_app_greenlet = gevent.spawn(
                    self.application, self.environ, start_response)
        except:
            self.handle_error(*sys.exc_info())

        # TODO DOUBLE-CHECK: do we need to joinall here ?
        gevent.joinall(jobs)