示例#1
0
文件: g.py 项目: tomsovic/Learn
def poolworker():
    index = 1
    pool = gevent.pool.Pool(5)
    while 1:
        if signal_stop: break
        pool.spawn(_poolworker, index)
        index += 1
示例#2
0
文件: glacier.py 项目: rymurr/tools
 def upload(self, passphrase=None, *filename):
     encrypt = passphrase is not None
     if not passphrase:
         pass
     elif isinstance(passphrase, str):
         self.passphrase = passphrase
     else:
         with open(passphrase) as f:
             self.passphrase = f.read()
     count = 0
     pool = gevent.pool.Pool(10)
     for file in filename:
         count += 1
         if self.shelve.has_upload(file):
             pool.spawn(continue_greenlet, vault=self.vault, file=file, object_id=self.shelve.get_upload(
                 file), shelf=self.shelve, count=count, total=len(filename), encrypt=encrypt, passphrase=passphrase)
             log.info(
                 'Spawning greenlet to finish filename {0}', format(file))
         elif not self.shelve.has_archive(file):
             pool.spawn(
                 create_greenlet, vault=self.vault, file=file, callback=self.get_upload_id, shelf=self.shelve, count=count,
                 total=len(filename), encrypt=encrypt, passphrase=passphrase)
             log.info('Spawning greenlet for filename {0}'.format(file))
         else:
             log.info(
                 'File {0} is already at glacier, this is file {1} of {2}',
                 file, count, len(filename))
     gevent.sleep(0)
示例#3
0
def customer_sendtime():
    """
    自定义时间发送报告
    :return:
    """
    try:
        t3 = time.time()
        pool = gevent.pool.Pool(_MAXTHREAD)
        for s in CustomerSetting.objects.filter(
                is_spamrpt_sendtime=True,
                spamrpt_sendtime__contains=time.strftime("%H:%M:")).exclude(
                    customer__gateway_status='disabled'):
            # for s in CustomerSetting.objects.filter(is_spamrpt_sendtime=True).exclude(customer__gateway_status='disabled'):
            if s.spamrpt:
                mails = get_mails_from_sendtime(s.customer_id,
                                                s.spamrpt_sendtime)
                for mail_to in set([m.mail_to for m in mails]):
                    pool.spawn(work_send, s.customer_id, mail_to, 'customer',
                               s.spamrpt_sendtime)
            if s.m_spamrpt:
                pool.spawn(work_send, s.customer_id, '', 'manager',
                           s.spamrpt_sendtime)
        log.info('waiting stop...')
        pool.join()
        t4 = time.time()
        log.info('spam_rpt send total time={}'.format(t4 - t3))
        return
    except (DatabaseError, InterfaceError) as e:
        log.error(u'DatabaseError', exc_info=1)
        connection.close()
    except BaseException as e:
        log.error(u'spam_rpt: exception', exc_info=1)
示例#4
0
文件: master.py 项目: yujunz/ssbench
    def cleanup_containers(self, auth_kwargs, container_base, concurrency,
                           policy):
        storage_urls, token = self._authenticate(auth_kwargs)

        _, container_list = client.get_account(random.choice(storage_urls),
                                               token)

        our_container_re = re.compile(self.DELETER_RE %
                                      (container_base, policy))

        start_time = time.time()
        obj_count = 0
        container_count = 0
        pool = gevent.pool.Pool(concurrency)
        for container_info in container_list:
            # e.g. {'count': 41, 'bytes': 496485, 'name': 'doc'}
            if our_container_re.match(container_info['name']):
                pool.spawn(_container_deleter, concurrency, storage_urls,
                           token, container_info)
                container_count += 1
                obj_count += container_info['count']
            else:
                logging.debug('Ignoring non-ssbench container %r',
                              container_info['name'])
        pool.join()
        delta_t = time.time() - start_time
        logging.info('Deleted %.1f containers/s, %.1f objs/s',
                     container_count / delta_t, obj_count / delta_t)
示例#5
0
def scan():
    sql = "SELECT email, browser, os, country, simple_country, area, ip_first, ip_last, open_total, open_first, open_last FROM active_emails;"
    res = DB.query(REMOTE_PG, sql)
    pool = gevent.pool.Pool(50)
    for data in res:
        pool.spawn(worker, data)
    pool.join()
示例#6
0
def manager_main():
    """
    跟客户管理员发送报告
    :return:
    """
    try:
        t3 = time.time()
        mail_model = get_mail_model(get_mail_date())
        customer_list = mail_model.objects.exclude(customer__gateway_status='disabled') \
            .filter(state='reject', mail_to__isnull=False, review_result='reject',
                    customer__customersetting__m_spamrpt=True, customer__customersetting__is_spamrpt_sendtime=False) \
            .distinct('customer_id') \
            .values_list('customer_id', flat=True)

        pool = gevent.pool.Pool(_MAXTHREAD)
        for customer_id in customer_list:
            pool.spawn(work_send, customer_id, '', 'manager')

        log.info('waiting stop...')
        pool.join()
        t4 = time.time()
        log.info('m_spam_rpt send total time={}'.format(t4 - t3))
        return
    except (DatabaseError, InterfaceError) as e:
        log.error(u'DatabaseError', exc_info=1)
        connection.close()
    except BaseException as e:
        log.error(u'spam_rpt: exception', exc_info=1)
        gevent.sleep(10)
示例#7
0
文件: master.py 项目: charz/ssbench
    def cleanup_containers(self, auth_kwargs, container_base, concurrency):
        storage_urls, token = self._authenticate(auth_kwargs)

        _, container_list = client.get_account(
            random.choice(storage_urls), token)

        our_container_re = re.compile('%s_\d+$' % container_base)

        start_time = time.time()
        obj_count = 0
        container_count = 0
        pool = gevent.pool.Pool(concurrency)
        for container_info in container_list:
            # e.g. {'count': 41, 'bytes': 496485, 'name': 'doc'}
            if our_container_re.match(container_info['name']):
                pool.spawn(_container_deleter, concurrency, storage_urls,
                           token, container_info)
                container_count += 1
                obj_count += container_info['count']
            else:
                logging.debug('Ignoring non-ssbench container %r',
                              container_info['name'])
        pool.join()
        delta_t = time.time() - start_time
        logging.info('Deleted %.1f containers/s, %.1f objs/s',
                     container_count / delta_t, obj_count / delta_t)
示例#8
0
def schedule_green_jobs(fns,
                        concurrency=DEFAULT_THREADS,
                        progress=None,
                        total=None):
    import gevent.pool

    if total is None:
        try:
            total = len(fns)
        except TypeError:  # generators don't have len
            pass

    pbar = tqdm(total=total, desc=progress, disable=(not progress))
    results = []

    def updatefn(fn):
        def realupdatefn():
            res = fn()
            pbar.update(1)
            results.append(res)

        return realupdatefn

    pool = gevent.pool.Pool(concurrency)
    for fn in fns:
        pool.spawn(updatefn(fn))

    pool.join()
    pool.kill()
    pbar.close()

    return results
示例#9
0
def worker_task(user_list):
    pool = gevent.pool.Pool(5)
    for user_id in user_list:
        pool.spawn(do_worker_task, user_id)
        gevent.sleep(0.01)
    pool.join()
    return
示例#10
0
def print_friend_username(http, friend_id):
    friend_url = URL('/' + str(friend_id))
    friend_url['access_token'] = Token

    # 在链接之前,线程会处于阻塞状态
    response = http.get(friend_url.request_uri)
    assert response.status_code == 200

    friend = json.load(response)

    # 判断是否存在username
    if friend.has_key('username'):
        print(f"{friend_url['username']} :{friend['name']}")

    else:
        print(f"{friend['name']}")

    # 设置一次运行20个线程
    pool = gevent.pool.Pool(20)

    # 循环读取
    for item in data:
        friend_id = item['id']
        pool.spawn(print_friend_username, http, friend_id)

    pool.json()
    # 关闭
    http.close()
示例#11
0
def scanurl():
    hosts = load_target()

    def find_plugins_by_name(search):
        for name, plugins in COMPONENT_PLUGIN_INFO.iteritems():
            if name in search:
                return plugins
        return False

    for i in hosts:
        url, server, title = i['url'], i['server'], i['title'].lower()
        parse = urlparse.urlparse(url)
        port = 80
        l = parse.netloc.split(':')
        if len(l) == 2:
            host, port = l
            port = int(port)
        else:
            host = l[0]

        plugins = find_plugins_by_name(title)
        if plugins:
            for plugin in plugins:
                pool.spawn(run_task, plugin, host, port)

    pool.join()
示例#12
0
文件: ping.py 项目: 9cat/bitnodes
def main(argv):
    if len(argv) < 2 or not os.path.exists(argv[1]):
        print("Usage: ping.py [config]")
        return 1

    # Initialize global settings
    init_settings(argv)

    # Initialize logger
    loglevel = logging.INFO
    if SETTINGS['debug']:
        loglevel = logging.DEBUG

    logformat = ("%(asctime)s,%(msecs)05.1f %(levelname)s (%(funcName)s) "
                 "%(message)s")
    logging.basicConfig(level=loglevel,
                        format=logformat,
                        filename=SETTINGS['logfile'],
                        filemode='w')
    print("Writing output to {}, press CTRL+C to terminate..".format(
          SETTINGS['logfile']))

    logging.info("Removing all keys")
    REDIS_CONN.delete('reachable')
    REDIS_CONN.delete('open')
    REDIS_CONN.delete('opendata')

    # Initialize a pool of workers (greenlets)
    pool = gevent.pool.Pool(SETTINGS['workers'])
    pool.spawn(cron, pool)
    pool.join()

    return 0
示例#13
0
        def workon(iap):
            pool = gevent.pool.Pool(self.poolsize)

            for i in range(self.poolsize):
                pool.spawn(iap.work)

            pool.join()
def interval_sendtime():
    """
    :return:
    """
    try:
        t3 = time.time()
        pool = gevent.pool.Pool(_MAXTHREAD)
        for s in CustomerSetting.objects.filter(
                interval_spamrpt__gt=0).exclude(
                    customer__gateway_status='disabled'):
            interval = int(s.interval_spamrpt)
            customer_id = s.customer_id
            if check_interval(customer_id, interval):
                log.info(
                    'check interval fail(customer_id:{}, interval:{})'.format(
                        customer_id, interval))
                continue
            if s.spamrpt:
                mails = get_mails_from_interval(customer_id, interval)
                for mail_to in set([m.mail_to for m in mails]):
                    pool.spawn(work_send, customer_id, mail_to, 'customer',
                               interval)
            if s.m_spamrpt:
                pool.spawn(work_send, customer_id, '', 'manager', interval)
        log.info('waiting stop...')
        pool.join()
        t4 = time.time()
        log.info('spam_rpt send total time={}'.format(t4 - t3))
        return
    except (DatabaseError, InterfaceError) as e:
        log.error(u'DatabaseError', exc_info=1)
        connection.close()
    except BaseException as e:
        log.error(u'spam_rpt: exception', exc_info=1)
示例#15
0
def process_events():
    r = redis.from_url(State.config['redis'])
    queues = ['event:p%s' % i for i in xrange(State.config['max_level'] + 1)]

    while True:
        q, raw = r.blpop(queues)
        pool.spawn(process_single_event, json.loads(raw))
示例#16
0
def process_events():
    r = redis.from_url(State.config['redis'])
    queues = ['satori-events:%s' % i for i in range(15)]

    while True:
        o, raw = r.blpop(queues)
        pool.spawn(process_single_event, json.loads(raw))
示例#17
0
def propogate():
    common = {'foo': 'bar', 'bar': 'foo'}
    pool = gevent.pool.Pool(10)
    pool.spawn(change_foo, common)
    pool.spawn(change_bar, common)
    pool.join()
    print common
示例#18
0
def main():
    init()

    # log.info('start worker_other.....')
    # worker_other()
    # log.info('finish worker_other.....')
    # gevent.sleep(0.1)

    log.info('start worker_day.....')
    pool = gevent.pool.Pool(10)
    for tablename in glb_maillog_tables:
        pool.spawn(worker_day, tablename)
        gevent.sleep(0.02)
    pool.join()
    gevent.sleep(0.1)
    log.info('finish worker_day.....')

    log.info('start worker_success.....')
    worker_success()
    log.info('finish worker_success.....')

    log.info('start worker_success.....')
    worker_success()
    log.info('finish worker_success.....')

    log.info('start worker_redis.....')
    worker_redis()
    log.info('finish worker_redis.....')

    return
示例#19
0
文件: tests.py 项目: 425776024/Learn
def async ():
    index = 200000
    pool = gevent.pool.Pool(100)
    while index:
        index -= 1
        pool.spawn(_async)
    pool.join()
示例#20
0
def test_fox_cub(games_to_test, dataset, client):
    pool = gevent.pool.Pool(1024)
    # unique testing id
    session_id = str(uuid.uuid4())

    for game in games_to_test:
        _, features = dataset.prepare_observation(game)

        home_team_season = dataset.get_team_stats(game.HomeTeam,
                                                  game.Season)['season']
        away_team_season = dataset.get_team_stats(game.AwayTeam,
                                                  game.Season)['season']

        home_team_res = home_team_season.get_team_scores(game.HomeTeam)
        away_team_res = away_team_season.get_team_scores(game.AwayTeam)

        season_avg = {
            "avgScoredHome": features.avg_goals_home_team / 2,
            "avgScoredAway": features.avg_goals_away_team / 2
        }
        pool.spawn(client.get_stats, home_team_res, away_team_res, season_avg,
                   game.HomeTeam, game.AwayTeam, session_id)

    pool.join()
    return session_id
示例#21
0
def propogate():
    common = {'foo': 'bar', 'bar': 'foo'}
    pool = gevent.pool.Pool(10)
    pool.spawn(change_foo, common)
    pool.spawn(change_bar, common)
    pool.join()
    print common
 def _regular_run(self):
     setproctitle.setproctitle('TDDC-ProxyAliveCheck-{}'.format(default_config.FEATURE))
     log.info('Proxy Alive Check Is Running.')
     pool = gevent.pool.Pool(self.REPEATED_CONCURRENT)
     gevent.sleep(5)
     while True:
         keys = RedisEx().keys('tddc:worker:config:common:proxy_check_list:*')
         items = [RedisEx().hgetall(key) for key in keys]
         platforms = {item.get('s_platform') for item in items
                      if item and item.get('s_platform') and item.get('s_feature')
                      and item.get('s_url') and item.get('b_valid')}
         pre_timestamp = RedisEx().hgetall('tddc:proxy:repeated')
         ts = int(time.time())
         for platform in platforms:
             if ts - int(pre_timestamp.get(platform, 0)) < self.REPEATED_INTERVAL:
                 continue
             coroutines = [
                 pool.spawn(self._check_handle, self.proxy_conf.pool, platform, proxy)
                 for proxy in RedisEx().smembers('{}:{}'.format(self.proxy_conf.pool, platform))
             ]
             gevent.joinall(coroutines)
             RedisEx().hset('tddc:proxy:repeated', platform, ts)
         for http_type in ['http', 'https']:
             if ts - int(pre_timestamp.get(http_type, 0)) < self.REPEATED_INTERVAL:
                 continue
             coroutines = [
                 pool.spawn(self._check_handle, self.proxy_conf.source, http_type, proxy)
                 for proxy in RedisEx().smembers('{}:{}'.format(self.proxy_conf.source, http_type))
             ]
             gevent.joinall(coroutines)
             RedisEx().hset('tddc:proxy:repeated', http_type, ts)
         gevent.sleep(10)
示例#23
0
def main_loop():
    # pool_size = config.getint('gevent', 'worker.pool.size')
    # (crimi) - Setting pool_size to 1 to avoid deadlocks. This is until we are able to demonstrate that
    #           the deadlocks are able to be avoided.
    #           An improvement would be to do the DB updates on single worker, allowing everything else to
    #           happen concurrently. But expected load for 1.0 isn't great .. more than manageable with 1 worker.
    #
    pool_size = 1
    pool = gevent.pool.Pool(pool_size)
    logger.info('Started gevent pool with size %d', pool_size)

    consumer = kafkareader.create_consumer(config)

    while True:
        try:
            raw_event = kafkareader.read_message(consumer)
            logger.debug('READ MESSAGE %s', raw_event)
            event = MessageItem(json.loads(raw_event))

            if event.get_command() in known_commands:
                pool.spawn(topology_event_handler, event)
            else:
                logger.debug('Received unknown type or command %s', raw_event)

        except Exception as e:
            logger.exception(e.message)
示例#24
0
 def go(self):
     logging.debug('Worker %s starting...', self.worker_id)
     gevent.spawn(self._result_writer)
     pool = gevent.pool.Pool(self.concurrency)
     jobs = self.work_pull.recv()
     if self.profile_count:
         import cProfile
         prof = cProfile.Profile()
         prof.enable()
     gotten = 1
     self.spawned = 0
     while jobs:
         job_data = msgpack.loads(jobs)
         for job_datum in job_data:
             if 'container' in job_datum:
                 logging.debug('WORK: %13s %s/%-17s',
                             job_datum['type'], job_datum['container'],
                             job_datum['name'])
             else:
                 logging.debug('CMD: %13s', job_datum['type'])
             if job_datum['type'] == 'SUICIDE':
                 logging.info('Got SUICIDE; closing sockets and exiting.')
                 self.work_pull.close()
                 self.results_push.close()
                 os._exit(88)
             pool.spawn(self.handle_job, job_datum)
             self.spawned += 1
             if self.profile_count and gotten >= self.profile_count:
                 prof.disable()
                 prof_output_path = '/tmp/worker_go.%d.prof' % os.getpid()
                 prof.dump_stats(prof_output_path)
                 logging.info('PROFILED worker go() to %s', prof_output_path)
                 self.profile_count = None
             gotten += 1
         jobs = self.work_pull.recv()
示例#25
0
        def workon(iap):
            pool = gevent.pool.Pool(self.poolsize)

            for i in range(self.poolsize):
                pool.spawn(iap.work)

            pool.join()
示例#26
0
def main(argv):
    if len(argv) < 2 or not os.path.exists(argv[1]):
        print("Usage: ping.py [config]")
        return 1

    # Initialize global settings
    init_settings(argv)

    # Initialize logger
    loglevel = logging.INFO
    if SETTINGS['debug']:
        loglevel = logging.DEBUG

    logformat = ("%(asctime)s,%(msecs)05.1f %(levelname)s (%(funcName)s) "
                 "%(message)s")
    logging.basicConfig(level=loglevel,
                        format=logformat,
                        filename=SETTINGS['logfile'],
                        filemode='w')
    print("Writing output to {}, press CTRL+C to terminate..".format(
        SETTINGS['logfile']))

    logging.info("Removing all keys")
    REDIS_CONN.delete('reachable')
    REDIS_CONN.delete('open')
    REDIS_CONN.delete('opendata')

    # Initialize a pool of workers (greenlets)
    pool = gevent.pool.Pool(SETTINGS['workers'])
    pool.spawn(cron, pool)
    pool.join()

    return 0
示例#27
0
文件: test2.py 项目: 425776024/Learn
def scan():
    index = 0
    pool = gevent.pool.Pool(10)
    while True:
        index += 1
        pool.spawn(woker1, index)
        gevent.sleep(1)
    pool.join()
示例#28
0
def woker_imap():
    smtp_dict = getSmtpData()
    pool = gevent.pool.Pool(10)
    for smtp_account_id in smtp_dict:
        smtp_list = smtp_dict[smtp_account_id]
        pool.spawn(do_woker_imap, smtp_account_id, smtp_list)
    pool.join()
    return
示例#29
0
def server(port, pool):
    s = socket.socket()
    s.bind(('0.0.0.0', port))
    s.listen()
    while True:
        cli, addr = s.accept()
        print('Wellcom %s to SockerServer' % str(addr[0]))
        pool.spawn(handle_request, cli)
示例#30
0
def worker_redis(cr):
    pool = gevent.pool.Pool(10)
    for start in range(0, 100, 10):
        for domain in GLB_DOMAINS:
            end = start + 10
            pool.spawn(do_worker_redis, cr, domain, start, end)
    pool.join()
    return
示例#31
0
def scanner():
    pool = gevent.pool.Pool(_MAXTHREAD)
    for t in _TABLES:
        if signal_stop: break
        pool.spawn(worker, t)
        gevent.sleep(0.01)
    pool.join()
    return
示例#32
0
 def pooledDownloadContent(self, inner_paths, pool_size=100):
     self.log.debug("New downloadContent pool: len: %s" % len(inner_paths))
     self.worker_manager.started_task_num += len(inner_paths)
     pool = gevent.pool.Pool(pool_size)
     for inner_path in inner_paths:
         pool.spawn(self.downloadContent, inner_path)
         self.worker_manager.started_task_num -= 1
     self.log.debug("Ended downloadContent pool len: %s" % len(inner_paths))
示例#33
0
def worker_redis():
    T = ['163.com', 'qq.com', '*']
    pool = gevent.pool.Pool(10)
    for index in xrange(1, 11):
        for domain in T:
            pool.spawn(do_worker_redis, domain, index)
    pool.join()
    return
示例#34
0
def server(port, pool):
    s = socket.socket()
    s.bind(('0.0.0.0', port))
    s.listen()
    while True:
        cli, addr = s.accept()
        #print("Welcome %s to SocketServer" % str(addr[0]))
        pool.spawn(handle_request, cli)    #通过pool.spawn()运行协程
示例#35
0
def worker_3():
    log.info('start worker_3...')
    pool = gevent.pool.Pool(5)
    while True:
        pool.spawn(do_worker_3, random.randint(1, 100000))
    pool.join()
    log.info('finish worker_3...')
    return
示例#36
0
 def pooledDownloadContent(self, inner_paths, pool_size=100):
     self.log.debug("New downloadContent pool: len: %s" % len(inner_paths))
     self.worker_manager.started_task_num += len(inner_paths)
     pool = gevent.pool.Pool(pool_size)
     for inner_path in inner_paths:
         pool.spawn(self.downloadContent, inner_path)
         self.worker_manager.started_task_num -= 1
     self.log.debug("Ended downloadContent pool len: %s" % len(inner_paths))
示例#37
0
文件: defaultdict.py 项目: Wc30/pyoop
def consumer():
    pool = gevent.pool.Pool(5)
    while 1:
        if not GLB_THREADS:
            gevent.sleep(0.2)
            continue
        ip, email = GLB_THREADS.pop()
        pool.spawn(__consumer, ip, email)
示例#38
0
文件: Site.py 项目: Emeraude/ZeroNet
 def pooledDownloadFile(self, inner_paths, pool_size=100, only_if_bad=False):
     self.log.debug("New downloadFile pool: len: %s" % len(inner_paths))
     self.worker_manager.started_task_num += len(inner_paths)
     pool = gevent.pool.Pool(pool_size)
     for inner_path in inner_paths:
         if not only_if_bad or inner_path in self.bad_files:
             pool.spawn(self.needFile, inner_path, update=True)
         self.worker_manager.started_task_num -= 1
     self.log.debug("Ended downloadFile pool len: %s" % len(inner_paths))
示例#39
0
文件: search.py 项目: aknott/scearch
	def dosearch(self):
		for offset in [i*200 for i in range(40)]:
			pool.spawn(self.getTracks,self.querystr,offset)
		pool.join()
		result = sorted(self.results, key=lambda x:x.tosort, reverse=True)[:20]
		for t in result:
			t.widget = makeWidget(t.id)
			print "(%s) %s - %s"%(t.tosort,t.username.encode('ascii', 'ignore'),t.title.encode('ascii', 'ignore'))
		return result
示例#40
0
    def run_with_gevent():
        from qs.misc import call_in_loop

        import gevent.pool
        pool = gevent.pool.Pool()
        for i in range(numgreenlets):
            pool.spawn(call_in_loop(1.0, start_worker))

        pool.join()
示例#41
0
文件: main.py 项目: fjghc/satori
def process_events():
    r = redis.from_url(State.config['redis'])
    queues = [
        'satori-events:%s' % i for i in range(15)
    ]

    while True:
        o, raw = r.blpop(queues)
        pool.spawn(process_single_event, json.loads(raw))
示例#42
0
def test_proxy_list(http_proxies, pool_size, server_port):
    pool = gevent.pool.Pool(pool_size)
    my_ip = whats_my_ip()
    globals()['LOCAL_SERVER'] = 'http://{}:{}/'.format(my_ip, server_port)
    for proxy in http_proxies:
        ip, port = proxy.rsplit(':')
        pool.spawn(test_single_proxy, my_ip, ip, port)
    pool.join()
    queue.put(StopIteration)
示例#43
0
 def resolve_hostname(self):
     """
     Concurrently resolves hostname for the unresolved addresses.
     """
     pool = gevent.pool.Pool(len(self.resolved['hostname']))
     with gevent.Timeout(15, False):
         for address in self.resolved['hostname']:
             pool.spawn(self.set_hostname, address)
     pool.join()
示例#44
0
def upgrade_article(row):
	pool.spawn(_upgrade_article, row)

	global count
	count += 1
	if count > 50:
		count = 0
		for greenlet in list(pool):
			if greenlet.dead:
				pool.discard(greenlet)
示例#45
0
文件: test.py 项目: iainb/simkad
def spawn_clients(pool, network, n):
    clients = []
    last, kad_client = spawn_client(pool, network)
    clients.append(kad_client)
    for i in xrange(0,n-1):
        last, kad_client = spawn_client(pool, network, last)
        clients.append(kad_client)

    print "test: spawned %s nodes" % n
    pool.spawn(client_summary, clients)
示例#46
0
def process_events():
    r = redis.from_url(State.config['redis'])
    queues = [
        'event:p%s' % i for i in
        xrange(State.config['max_level'] + 1)
    ]

    while True:
        q, raw = r.blpop(queues)
        pool.spawn(process_single_event, json.loads(raw))
示例#47
0
    def start(self):
        pool = gevent.pool.Pool(size=self.concurrency)
        try:
            for i in xrange(1, self.num_connectors + 1):
                pool.spawn(self.connector)
                time.sleep(self.spawn_interval)

            pool.join()
        except KeyboardInterrupt:
            pass
示例#48
0
def sync_table(table, fields):
    f1 = ", ".join(fields)
    pieces = {
        "ataobao2.item": 100,
        "ataobao2.item_by_date": 1000,
        "ataobao2.brand_by_date": 10,
        "ataobao2.shop_by_date": 10,
    }.get(table, 1)
    start = -2 ** 63
    step = 2 ** 64 / pieces
    print "migrating {} {}".format(table, f1)

    for i in range(pieces):
        start = -2 ** 63 + step * i
        end = min(2 ** 63 - 1, -2 ** 63 + step * (i + 1))
        with db1.connection() as cur:
            print "piece", i + 1
            # print 'select {} from {} where token({})>=:v1 and token({})<:v2'.format(f1, table, fields[0], fields[0]), dict(v1=start, v2=end)
            if table.endswith("_by_date") and "datestr" in fields:
                d0 = (datetime.utcnow() + timedelta(hours=8) - timedelta(days=2)).strftime("%Y-%m-%d")
                cur.execute(
                    "select {} from {} where token({})>=:v1 and token({})<:v2 and datestr>=:d0 allow filtering".format(
                        f1, table, fields[0], fields[0]
                    ),
                    dict(v1=start, v2=end, d0=d0),
                    consistency_level="ONE",
                )
            else:
                cur.execute(
                    "select {} from {} where token({})>=:v1 and token({})<:v2".format(f1, table, fields[0], fields[0]),
                    dict(v1=start, v2=end),
                    consistency_level="ONE",
                )
            for j, row in enumerate(cur):
                if j % 1000 == 0:
                    print "syncd {}".format(j)
                params = {}
                fs = list(fields)
                for k, v in zip(fields, row):
                    if k == "date":
                        if v is not None and len(v) == 8:
                            v = struct.unpack("!q", v)[0]
                        else:
                            continue
                    if v is not None:
                        params[k] = v
                fs = params.keys()
                fs1 = ", ".join(fs)
                fs2 = ", ".join([":" + f for f in fs])
                if "id" in params or "datestr" in params or "name" in params:
                    if table == "ataobao2.item_by_date" and "date" not in params:
                        continue
                    # print 'INSERT INTO {} ({}) VALUES ({})'.format(table, fs1, fs2), params
                    pool.spawn(db2.execute, "insert into {} ({}) values ({})".format(table, fs1, fs2), params)
示例#49
0
def download_images(posts):
    """Downloads images for the given posts"""
    pool = gevent.pool.Pool(size=96)
    for post in progress.bar(posts, width=60, every=100):
        if not post.static:
            continue

        pool.spawn(download_image, post)

    # wait for all jobs to finish
    pool.join()
def crawler(u):
    global crawled

    response = requests.get(u)
    print response.status_code, u

    for link in re.findall('<a href="(http.*?)"', response.content):

        if crawled < 10 and not pool.full():
            crawled += 1
            pool.spawn(crawler, link)
示例#51
0
def make_nuwiki(fsdir, metabook, options, podclient=None, status=None):
    id2wiki = {}
    for x in metabook.wikis:
        id2wiki[x.ident] = (x, [])

    for x in metabook.articles():
        assert x.wikiident in id2wiki, "no wikiconf for %r (%s)" % (x.wikiident, x)
        id2wiki[x.wikiident][1].append(x)

    is_multiwiki = len(id2wiki) > 1

    if is_multiwiki:
        progress = fetch.shared_progress(status=status)
    else:
        progress = None

    fetchers = []
    for id, (wikiconf, articles) in id2wiki.items():
        if id is None:
            id = ""
            assert not is_multiwiki, "id must be set in multiwiki"

        if not is_multiwiki:
            id = ""

        assert "/" not in id, "bad id: %r" % (id,)
        my_fsdir = os.path.join(fsdir, id)

        if is_multiwiki:
            my_mb = collection()
            my_mb.items = articles
        else:
            my_mb = metabook

        wikitrust(wikiconf.baseurl, my_mb)

        fetchers.append(start_fetcher(fsdir=my_fsdir, progress=progress, base_url=wikiconf.baseurl,
                                      metabook=my_mb, options=options, podclient=podclient, status=status))

    if is_multiwiki:
        if not os.path.exists(fsdir):
            os.makedirs(fsdir)
        open(os.path.join(fsdir, "metabook.json"), "wb").write(metabook.dumps())
        myjson.dump(dict(format="multi-nuwiki"), open(os.path.join(fsdir, "nfo.json"), "wb"))

    pool = gevent.pool.Pool()
    for x in fetchers:
        pool.spawn(x.run)
    pool.join(raise_error=True)

    import signal
    signal.signal(signal.SIGINT, signal.SIG_DFL)
    signal.signal(signal.SIGTERM, signal.SIG_DFL)
示例#52
0
def runworker(args):
    gevent.monkey.patch_all()
    initlog(optdict.get('-l', 'INFO'))

    app = apps.Application(args[0])
    size = int(optdict.get('-s', '100'))
    pool = gevent.pool.Pool(size)
    for n in xrange(size):
        pool.spawn(worker.BeanstalkWorker(
                app, optdict['-q'], optdict.get('-H', 'localhost'),
                optdict.get('-p', '11300'), int(optdict.get('-t', '10'))).run)
    pool.join()
示例#53
0
    def run(self):
        def update_latest_ids(cid):
            data = get_json(cid, page=1, sort='_oldstart')
            nids = get_ids(data)
            if nids:
                print 'found {} ids in category {}'.format(len(nids), cid)
                ai2.put(*list(nids))

        pool = gevent.pool.Pool(10)
        for cid in fecids:
            pool.spawn(update_latest_ids, cid)
        pool.join()
示例#54
0
文件: test.py 项目: iainb/simkad
def spawn_client(pool, network, initial_node=None):
    if initial_node is None:
        nodes = []
    else:
        nodes = [initial_node]

    rpc_chan  = queue.Queue()
    rpc_client = Rpc_Client(network, rpc_chan)
    node = rpc_client.return_node()
    kad_client = Kad_Client(pool, rpc_client, rpc_chan)
    pool.spawn(client_actions, kad_client, nodes)
    return node, kad_client
示例#55
0
 def process_group(group):
     group_results = []
     pool = gevent.pool.Pool(urls_group_size)
     for url in group:
         if not is_valid_url(url, allow_no_protocol=True):
             completed_urls[url] = (False, "Invalid URL")
             if len(completed_urls) == len(urls): #all done, trigger callback
                 return completed_callback(completed_urls)
             else:
                 continue
         assert url.startswith('http://') or url.startswith('https://')
         pool.spawn(make_stream_request, url)
     pool.join()
示例#56
0
文件: geventd.py 项目: rmyers/daemons
    def run(self):
        """This method puts the daemon into a poll/action loop.

        This method should not be extended or overwritten. Instead,
        implementations of this daemon should implement the 'get_message()'
        and 'handle_message()' methods.

        This loop makes use of an gevent Pool to manage maximum greenthread
        concurrency. The behaviour of the pool, and greenthreads in general, is
        such that there must be a cooperative yield in order for gevent to
        switch context into another greenthread.

        This loop, by default, will only yield on an empty message and when the
        Pool has allocated the maximum allowed greenthreads. To yield the
        loop after each message, set the aggressive_yield bit to True.
        """

        pool = gevent.pool.Pool(size=self.pool_size)

        while True:

            message = self.get_message()
            LOG.debug(
                "Daemon (%r) got message (%r).",
                self.pidfile,
                message
            )

            if message is None:

                LOG.debug(
                    "Daemon (%r) received no message. Going idle for (%r).",
                    self.pidfile,
                    self.idle_time
                )

                self.sleep(self.idle_time)
                continue

            LOG.debug(
                "Daemon (%r) attempting to start new greenthread with (%r) "
                "active and (%r) free",
                self.pidfile,
                pool.size - pool.free_count(),
                pool.free_count()
            )
            pool.spawn(self.handle_message, message)

            if self.aggressive_yield is True:

                gevent.sleep(0)
示例#57
0
文件: glacier.py 项目: rymurr/tools
 def retrieve(self, topic=None, *filename):
     count = 0
     pool = gevent.pool.Pool(10)
     for file in filename:
         count += 1
         if self.shelve.has_archive(file):
             object_id = self.shelve.get_archive(file)
             arn = get_arn(self.key, self.secret, topic) if topic else None
             pool.spawn(
                 fetch_greenlet, vault=self.vault, object_id=object_id,
                 count=count, total=len(filename), arn=arn, shelf=self.shelve, file=file)
             log.info(
                 "File {0} is being fetched from glacier, will call back to {1}")
     gevent.sleep(0)
示例#58
0
文件: glacier.py 项目: rymurr/tools
 def delete(self, *filename):
     count = 0
     pool = gevent.pool.Pool(10)
     for file in filename:
         count += 1
         print file, self.shelve.has_archive(file)
         if self.shelve.has_archive(file):
             object_id = self.shelve.get_archive(file)
             pool.spawn(
                 delete_greenlet, vault=self.vault, object_id=object_id, count=count,
                 total=len(filename), shelf=self.shelve, file=file)
             log.info(
                 "File {0} is being fetched from glacier, will call back to {1}")
     gevent.sleep(0)
示例#59
0
文件: ping.py 项目: ayeowch/bitnodes
def cron(pool):
    """
    Assigned to a worker to perform the following tasks periodically to
    maintain a continuous network-wide connections:

    [Master]
    1) Checks for a new snapshot
    2) Loads new reachable nodes into the reachable set in Redis
    3) Signals listener to get reachable nodes from opendata set
    4) Sets bestblockhash in Redis

    [Master/Slave]
    1) Spawns workers to establish and maintain connection with reachable nodes
    """
    publish_key = 'snapshot:{}'.format(hexlify(CONF['magic_number']))
    snapshot = None

    while True:
        if CONF['master']:
            new_snapshot = get_snapshot()

            if new_snapshot != snapshot:
                nodes = get_nodes(new_snapshot)
                if len(nodes) == 0:
                    continue

                logging.info("New snapshot: %s", new_snapshot)
                snapshot = new_snapshot

                logging.info("Nodes: %d", len(nodes))

                reachable_nodes = set_reachable(nodes)
                logging.info("New reachable nodes: %d", reachable_nodes)

                # Allow connections to stabilize before publishing snapshot
                gevent.sleep(CONF['socket_timeout'])
                REDIS_CONN.publish(publish_key, int(time.time()))

            connections = REDIS_CONN.scard('open')
            logging.info("Connections: %d", connections)

            set_bestblockhash()

        for _ in xrange(min(REDIS_CONN.scard('reachable'), pool.free_count())):
            pool.spawn(task)

        workers = CONF['workers'] - pool.free_count()
        logging.info("Workers: %d", workers)

        gevent.sleep(CONF['cron_delay'])
示例#60
0
    def test_contest(self):
        ns = NamespaceSemaphore()
        ns.acquire('/ex', 3, 0)
        self.q = gevent.queue.Queue()
        pool = gevent.pool.Pool(size=5)
        for i in xrange(5):
            pool.spawn(self.acquire, ns)

        pool.join()
        self.assertEquals(self.q.qsize(), 1)
        self.q.get()
        for i in xrange(5):
            pool.spawn(self.acquire, ns)
        pool.join()
        self.assertEquals(self.q.qsize(), 1)