コード例 #1
0
ファイル: views.py プロジェクト: skroutz/ganetimgr
def instance_owners(request):
    if request.user.is_superuser or request.user.has_perm('ganeti.view_instances'):
        p = Pool(20)
        instancesall = []
        bad_clusters = []

        def _get_instances(cluster):
            try:
                instancesall.extend(cluster.get_user_instances(request.user))
            except (GanetiApiError, Exception):
                bad_clusters.append(cluster)
        if not request.user.is_anonymous():
            p.imap(_get_instances, Cluster.objects.all())
            p.join()
        instances = [i for i in instancesall if i.users]

        def cmp_users(x, y):
            return cmp(",".join([ u.username for u in x.users]),
                       ",".join([ u.username for u in y.users]))
        instances.sort(cmp=cmp_users)

        return render_to_response("instance_owners.html",
                                  {"instances": instances},
                                  context_instance=RequestContext(request))
    else:
        return HttpResponseRedirect(reverse('user-instances'))
コード例 #2
0
def prepare_clusternodes(cluster=None):
    if not cluster:
        clusters = Cluster.objects.all()
    else:
        clusters = Cluster.objects.filter(slug=cluster)
    p = Pool(15)
    nodes = []
    bad_clusters = []
    bad_nodes = []

    def _get_nodes(cluster):
        try:
            for node in cluster.get_cluster_nodes():
                nodes.append(node)
                if node['offline'] is True:
                    bad_nodes.append(node['name'])
        except (GanetiApiError, Exception):
            cluster._client = None
            bad_clusters.append(cluster)
        finally:
            close_connection()

    p.imap(_get_nodes, clusters)
    p.join()
    return nodes, bad_clusters, bad_nodes
コード例 #3
0
def instance_owners(request):
    if request.user.is_superuser or request.user.has_perm('ganeti.view_instances'):
        p = Pool(20)
        instancesall = []
        bad_clusters = []

        def _get_instances(cluster):
            t = Timeout(RAPI_TIMEOUT)
            t.start()
            try:
                instancesall.extend(cluster.get_user_instances(request.user))
            except (GanetiApiError, Timeout):
                bad_clusters.append(cluster)
            finally:
                t.cancel()

        if not request.user.is_anonymous():
            p.imap(_get_instances, Cluster.objects.all())
            p.join()
        instances = [i for i in instancesall if i.users]
        def cmp_users(x, y):
            return cmp(",".join([ u.username for u in x.users]),
                       ",".join([ u.username for u in y.users]))
        instances.sort(cmp=cmp_users)
    
        return render_to_response("instance_owners.html",
                                  {"instances": instances},
                                  context_instance=RequestContext(request))
    else:
        return HttpResponseRedirect(reverse('user-instances'))
コード例 #4
0
def match_reports():
    report_set = json.loads(request.data)
    coordinates = []
    coord_indices = {}
    matched_reports = []
    matching_result = []
    lookupPool = Pool(50)

    for found, matched, report in lookupPool.imap(lookup_and_match, 
                                        ((r, report_set['repository'], report_set['readsetId']) 
                                            for r in report_set['reports'])):
        if report['clinicalSignificance'] in ('Uncertain significance', 'not provided', 'conflicting data from submitters', 'other'):
            continue
        if not found:
            new_coord = Coordinate(report['chrom'], report['seqStart'], report['seqEnd'])
            coordinates, coord_index = push_coordinates(coordinates, new_coord)
            coord_indices.setdefault(coord_index, []).append(report)
        elif matched:
            matched_reports.append(report)


    read_search = partial(make_read_search,
                        report_set['repository'],
                        report_set['readsetId'])

    def search_and_match(coord_index):
        coord = coordinates[coord_index]
        result = read_search(coord, coord_index)
        reads = ijson.items(HTTPStream(result), 'reads.item')
        reports = coord_indices[coord_index]
        reports_visited = []
        for read in reads:
            read_coord = Coordinate(read['referenceSequenceName'],
                                    read['position'],
                                    read['position']+get_ref_length(read['cigar']))
            covered_reports = [report for report in reports
                                    if (read_coord.chrom == report['chrom'] and
                                        read_coord.start <= report['seqStart'] and
                                        read_coord.end > report['seqEnd'])]
            if covered_reports:
                new_matched_reports = [report for report in covered_reports 
                                        if (report['reportId'] not in reports_visited and
                                            match(report, read))]
                matched_reports.extend(new_matched_reports)
                reports_visited.extend([report['reportId'] for report in covered_reports
                                            if report['reportId'] not in reports_visited])
                #push read into cache:
                read['repository'] = report_set['repository']
                read['readsetId'] = report_set['readsetId']
                read['start'] = read['position']
                read['end'] = read['position'] + get_ref_length(read['cigar'])
                CachedReads.save(read)
            if len(reports_visited) >= len(reports):
                break

    searchPool = Pool(50)
    (_ for _ in searchPool.imap(search_and_match, coord_indices.keys()))

    return Response(json.dumps(matched_reports), content_type='application/json; charset=UTF-8')
コード例 #5
0
ファイル: models.py プロジェクト: skroutz/ganetimgr
    def all(self):
        users, orgs, groups, instanceapps, networks = preload_instance_data()
        p = Pool(20)
        instances = []

        def _get_instances(cluster):
            try:
                instances.extend(cluster.get_instances())
            except (GanetiApiError, Exception):
                pass
            finally:
                close_connection()
        clusters = Cluster.objects.all()
        p.imap(_get_instances, clusters)
        p.join()
        return instances
コード例 #6
0
ファイル: fetchurls.py プロジェクト: enanale/code-samples
def test():
	ids = image_search("moon")

	# let's be nice to archive.org and limit concurrent requests to 2
	gpool = Pool(2)
	for urls in gpool.imap(get_image_urls, ids):
		print urls
コード例 #7
0
 def _path_to_xb(self, paths):
     d = self.embedding_service.dim()
     xb = np.ndarray(shape=(len(paths), d), dtype=np.float32)
     p = Pool(12)
     for i, emb in enumerate(p.imap(path_to_embedding, paths)):
         xb[i] = emb
     return xb
コード例 #8
0
def call_main():
    rawsql = None
    try:
        import sys
        import getopt
        options, args = getopt.getopt(sys.argv[1:], "hr:", ['help', "raw="])
        for name, value in options:
            if name in ('-h', '--help'):
                print("error")
            elif name in ('-r', '--raw'):
                rawsql = value
    except getopt.GetoptError:
        print("error")

    ora_rawsql = "select * from test where rownum < 10"
    ora_conn = OraPlumberConn(user=ora_con_dict['user'],
                              passwd=ora_con_dict['passwd'],
                              schema=ora_con_dict['schema'])
    ora_conn.execute(ora_rawsql)

    pg_rawsql = "select * from test limit 10"
    pg_conn = PgPlumberConn(database=pg_con_dict['database'],
                            user=pg_con_dict['user'],
                            password=pg_con_dict['password'],
                            host=pg_con_dict['host'],
                            port=pg_con_dict['port'])
    pg_conn.execute(pg_rawsql)

    insert_sql = 'insert into test values {}'
    process = ProcessInsert(pg_conn, insert_sql)

    from gevent.pool import Pool
    p = Pool(4)
    for _d in p.imap(process.handle, ora_conn.find_batch()):
        pass
コード例 #9
0
ファイル: simple.py プロジェクト: bynoting/python
def job():

	# gevent.joinall([gevent.spawn(dosomething1,2),gevent.spawn(dosomething2,2)])
	import time
	import gevent
	from gevent.pool import Pool
	from gevent import threadpool

	# todo:注意map的 Pool和threadPool的区别。 pool 处理 非IO 耗时计算时没有 显式gevent.sleep() 就会变成同步程序。。。换成threadPool
	pool = Pool(3)
	start = time.time()
	# for _ in xrange(2):
	# 	pool.add(gevent.spawn(time.sleep, 1))
	pool.imap(time.sleep,xrange(3))
	gevent.wait()
	delay = time.time() - start
	print 'Running "time.sleep(1)" 4 times with 3 threads. Should take about 2 seconds: %.3fs' %delay
コード例 #10
0
def main():
    top_story_ids = requests.get(
        "{}/topstories.json".format(API_PREFIX)).json()
    pool = Pool(50)
    rust_stories = list(
        filter(lambda story: "Rust" in story.get("title", ""),
               pool.imap(fetch_story, top_story_ids)))[:MAX_COUNT]
    stories_length = len(rust_stories)
    if stories_length < MAX_COUNT:
        existed_story_ids = set(map(int, redis.lrange(REDIS_KEY, 0, -1)))
        existed_story_ids -= set(item["id"] for item in rust_stories)
        rust_stories.extend(
            pool.imap(fetch_story,
                      list(existed_story_ids)[:MAX_COUNT - stories_length]))
    redis.lpush(REDIS_KEY, *[item["id"] for item in rust_stories])
    redis.ltrim(REDIS_KEY, 0, MAX_COUNT - 1)
    render(rust_stories)
コード例 #11
0
ファイル: hacker_news.py プロジェクト: rust-cn/rust_lang_cn
def main():
    top_story_ids = requests.get(
        "{}/topstories.json".format(API_PREFIX)).json()
    pool = Pool(50)
    rust_stories = list(
        filter(lambda story: "Rust" in story.get("title", ""),
               pool.imap(fetch_story, top_story_ids)))[:MAX_COUNT]
    stories_length = len(rust_stories)
    if stories_length < MAX_COUNT:
        existed_story_ids = set(map(int, redis.lrange(REDIS_KEY, 0, -1)))
        existed_story_ids -= set(item["id"] for item in rust_stories)
        rust_stories.extend(
            pool.imap(fetch_story,
                      list(existed_story_ids)[:MAX_COUNT - stories_length]))
    redis.lpush(REDIS_KEY, *[item["id"] for item in rust_stories])
    redis.ltrim(REDIS_KEY, 0, MAX_COUNT - 1)
    render(rust_stories)
コード例 #12
0
ファイル: 4_gevent.py プロジェクト: AugustLONG/medusa
def calc_pi(tries, n):
    # ------------------------------------------------------------------------------------------
    from gevent.pool import Pool
    pool = Pool()
    result = pool.imap(test, [tries]*n)  # gevent
    # ------------------------------------------------------------------------------------------
    pi = 4.0 * sum(result)/(tries * n)
    return pi
コード例 #13
0
ファイル: 4_gevent.py プロジェクト: gaohaoning/medusa
def calc_pi(tries, n):
    # ------------------------------------------------------------------------------------------
    from gevent.pool import Pool

    pool = Pool()
    result = pool.imap(test, [tries] * n)  # gevent
    # ------------------------------------------------------------------------------------------
    pi = 4.0 * sum(result) / (tries * n)
    return pi
コード例 #14
0
    def _load_records(self, options):
        pool_size = self.settings['__workers__']

        connection_pool = Pool(size=pool_size)

        for device_info in connection_pool.imap(self.prepare_device,
                                                self.get_all_devices(),
                                                maxsize=pool_size):
            yield device_info
コード例 #15
0
ファイル: models.py プロジェクト: akosiaris/ganetimgr
    def all(self):
        users, orgs, groups, instanceapps, networks = preload_instance_data()
        p = Pool(20)
        instances = []
        bad_clusters = []

        def _get_instances(cluster):
            t = Timeout(RAPI_TIMEOUT)
            t.start()
            try:
                instances.extend(cluster.get_instances())
            except (GanetiApiError, Timeout):
                pass
            finally:
                t.cancel()
        clusters = Cluster.objects.all()
        p.imap(_get_instances, clusters)
        p.join()
        return instances
コード例 #16
0
 def run_with_imap(cls):
     __greenlet_number = 5
     pool = Pool(size=__greenlet_number)
     # result = pool.imap(cls.main_run_fun, ("index_1", "index_2", "index_3", "index_4", "index_5", "index_6"))
     # result = pool.imap(cls.main_run_fun, (("index_1", "index_2"), ("index_3", "index_4"), ("index_5", "index_6")))
     result = pool.imap(cls.main_run_fun_with_args,
                        (("index_1", "index_2"), ("index_3", "index_4"),
                         ("index_5", "index_6")))
     result_list = [r for r in result]
     print("result: ", result_list)
コード例 #17
0
    def _load_records(self, options):

        pool_size = self.settings['__workers__']

        connection_pool = Pool(size=pool_size)

        for device_info in connection_pool.imap(self.fetch_asset_details, self.fetch_asset_ids(), maxsize=pool_size):
            if device_info:
                yield device_info
            else:
                raise StopIteration
コード例 #18
0
ファイル: models.py プロジェクト: redtocatta/ganetimgr
    def all(self):
        users, orgs, groups, instanceapps, networks = preload_instance_data()
        p = Pool(20)
        instances = []
        bad_clusters = []

        def _get_instances(cluster):
            t = Timeout(RAPI_TIMEOUT)
            t.start()
            try:
                instances.extend(cluster.get_instances())
            except (GanetiApiError, Timeout):
                pass
            finally:
                t.cancel()

        clusters = Cluster.objects.all()
        p.imap(_get_instances, clusters)
        p.join()
        return instances
コード例 #19
0
def cmd_assistant_discovery_queue(args):
    with init_client(args) as s:
        web = s.get_web_session()

        if not web:
            LOG.error("Failed to get web session")
            return 1  # error

        sessionid = web.cookies.get('sessionid',
                                    domain='store.steampowered.com')

        LOG.info("Generating new discovery queue...")

        try:
            data = web.post(
                'https://store.steampowered.com/explore/generatenewdiscoveryqueue',
                {
                    'sessionid': sessionid,
                    'queuetype': 0
                }).json()
        except Exception as exp:
            LOG.debug("Exception: %s", str(exp))
            data = None

        if not isinstance(data, dict) or not data.get('queue', None):
            LOG.error("Invalid/empty discovery response")
            return 1  # error

        def explore_app(appid):
            for delay in (1, 3, 5, 8, 14):
                resp = web.post('https://store.steampowered.com/app/10', {
                    'appid_to_clear_from_queue': appid,
                    'sessionid': sessionid
                })

                if resp.status_code == 200:
                    return True

                LOG.warning(
                    'Failed to explore app %s, retrying in %s second(s)',
                    appid, delay)
                s.sleep(delay)

            return False

        pool = Pool(6)

        result = pool.imap(explore_app, data['queue'])

        if all(result):
            LOG.info("Discovery queue explored successfully")
        else:
            LOG.error("Failed to explore some apps, try again")
            return 1  #error
コード例 #20
0
ファイル: parser.py プロジェクト: datalater/class
    def periodic_parsing(self):

        major_name_list = []
        pool = Pool(70)

        for major in self.all_major:
            if major.on_off and major.should_parse():
                major_name_list.append(major.major_name)

        for _ in pool.imap(self.parsing_major_name, major_name_list):
            pass
コード例 #21
0
ファイル: __init__.py プロジェクト: mahmoud/picritic
    def fetch_package_infos(self):
        self.load()
        get_url = urllib2.urlopen
        pool = Pool(self.concurrency)
        pkg_idx = self.package_index
        if not pkg_idx:
            pkg_idx = self._fetch_package_index()
        pkg_info_map = self.pkg_info_map
        if not pkg_info_map:
            pkg_info_map = PackageInfoMap(pkg_idx=pkg_idx)
            pkg_info_map.path = self.package_info_path
        pb = ProgressBar(widgets=[Percentage(),
                                  ' ', Bar(),
                                  ' ', SimpleProgress()],
                         maxval=len(pkg_idx) + 1)
        pb.start()
        pb.update(len(pkg_info_map))

        to_fetch = sorted(set(pkg_idx.package_rel_urls) -
                          set(pkg_info_map.pkg_infos.viewkeys()))

        def _get_package_info(package_rel_url):
            pkg_url = self.default_pypi_url + 'pypi/%s/json' % package_rel_url
            try:
                resp = get_url(pkg_url)
            except Exception as e:
                ret = {'error': repr(e)}
            else:
                ret = json.loads(resp.read())
            ret['rel_url'] = package_rel_url
            return ret

        pkg_info_iter = pool.imap(_get_package_info, to_fetch)
        err_count = 0
        for pkg_info in pkg_info_iter:
            try:
                pkg_info_map.add_dict(pkg_info)
            except KeyError:
                err_count += 1
                pkg_info_map.add_dict(pkg_info)
            pb.update(len(pkg_info_map))
            if len(pkg_info_map) % self.concurrency == 0:
                pkg_info_map.save()

        pool.join(timeout=0.3, raise_error=True)
        print 'Done fetching. Saving', len(pkg_info_map), 'package infos.'
        try:
            pkg_info_map.save()
        except Exception:
            print ExceptionInfo.from_current().get_formatted()
            import pdb;pdb.post_mortem()
        import pdb;pdb.set_trace()
        return
コード例 #22
0
 def load_devices_api_v1(self, *a, **kw):
     for partition in self.fetch_all_partitions():
         if self.settings['partitions'] == "All" or partition[
                 'name'] in self.settings['partitions']:
             pool_size = self.settings.get('__workers__', 2)
             connection_pool = Pool(size=pool_size)
             for device in connection_pool.imap(
                     self.load_hardware_and_serial_for_windows_devices,
                     self.fetch_all_devices_for_partition(partition['id']),
                     maxsize=pool_size):
                 yield device
         else:
             LOG.debug("Skipping partition %r", partition)
コード例 #23
0
    def _load_records(self, options):
        pool_size = self.settings['__workers__']

        connection_pool = Pool(size=pool_size)

        # fetch all the assets ordered by unique sys_id value
        all_asset_url = self.settings['url'] + "/api/now/table/alm_asset?" \
                                               "sysparm_display_value=all&" \
                                               "sysparm_query=^ORDERBYsys_id"

        for asset in connection_pool.imap(self.prepare_asset_payload,
                                          self.paginator(all_asset_url),
                                          maxsize=pool_size):
            yield asset
コード例 #24
0
def messages_p2p(config: Config) -> None:
    login_pool = Pool(size=config.number_of_users)
    number_of_senders = config.number_of_users // 2
    number_of_receivers = config.number_of_users // 2

    # login senders and receivers concurrently
    senders_users_results = login_pool.imap(
        new_user,
        repeat(config.sender_matrix_server_url, times=number_of_senders))
    receivers_users_results = login_pool.imap(
        new_user,
        repeat(config.receiver_matrix_server_url, times=number_of_receivers))
    senders = list(senders_users_results)
    receivers = list(receivers_users_results)

    invite_pool = Pool(size=config.number_of_concurrent_chat_rooms)
    rooms = list(login_pool.imap(init_clients_and_rooms, senders, receivers))
    invite_pool.join()

    message_pool = Pool(size=config.number_of_parallel_messages)
    for room in rooms:
        message_pool.spawn(send, room)
    message_pool.join()
コード例 #25
0
def generate_arrays_from_file():
    '''
    读取图片
    :return:
    '''
    files = glob("./data/image/*.png")
    files = files
    x = []
    pool = Pool(size=100)
    res = pool.imap(lambda filename: img_to_array(load_img(filename)) / 255,
                    files)
    for img_area in res:
        x.append(img_area)
    x = np.array(x)
    return x
コード例 #26
0
ファイル: utils.py プロジェクト: bernardraditio/ganetimgr
def prepare_clusternodes(cluster=None):
    if not cluster:
        clusters = Cluster.objects.all()
    else:
        clusters = Cluster.objects.filter(slug=cluster)
    p = Pool(15)
    nodes = []
    bad_clusters = []
    bad_nodes = []

    def _get_nodes(cluster):
        try:
            for node in cluster.get_cluster_nodes():
                nodes.append(node)
                if node['offline'] is True:
                    bad_nodes.append(node['name'])
        except (GanetiApiError, Exception):
            cluster._client = None
            bad_clusters.append(cluster)
        finally:
            close_connection()
    p.imap(_get_nodes, clusters)
    p.join()
    return nodes, bad_clusters, bad_nodes
コード例 #27
0
ファイル: parser.py プロジェクト: datalater/class
    def parsing_all(self):

        #-----조회할 데이터 옵션 선택-----#
        pool = Pool(70)

        # self.course_info_list = list()
        params_list = []

        for i in range(len(self.gubun_list)):
            if i == 0:
                for j in range(len(self.major_code_list)):
                    params = {
                        'tab_lang': 'K',
                        'type': '',
                        'ag_ledg_year': self.default_year,  # 년도
                        'ag_ledgr_sessn':
                        self.default_session,  # 1=1학기, 2=여름계절, 3=2학기, 4=겨울계절
                        'ag_org_sect':
                        'A',  # A=학부, B=대학원, D=통번역대학원, E=교육대학원, G=정치행정언론대학원, H=국제지역대학원, I=경영대학원(주간), J=경영대학원(야간), L=법학전문대학원, M=TESOL대학원, T=TESOL전문교육원
                        'campus_sect': 'H1',  # H1=서울, H2=글로벌
                        'gubun': self.gubun_list[i],  # 1=전공/부전공, 2=실용외국어/교양과목
                        'ag_crs_strct_cd': self.major_code_list[j],  # 전공 목록
                        'ag_compt_fld_cd': ''  # 교양 목록
                    }
                    params_list.append(params)

                    # self.major_data = list(self.parsing(params))
            else:
                for k in range(len(self.liberal_code_list)):
                    params = {
                        'tab_lang': 'K',
                        'type': '',
                        'ag_ledg_year': self.default_year,  # 년도
                        'ag_ledgr_sessn':
                        self.default_session,  # 1=1학기, 2=여름계절, 3=2학기, 4=겨울계절
                        'ag_org_sect':
                        'A',  # A=학부, B=대학원, D=통번역대학원, E=교육대학원, G=정치행정언론대학원, H=국제지역대학원, I=경영대학원(주간), J=경영대학원(야간), L=법학전문대학원, M=TESOL대학원, T=TESOL전문교육원
                        'campus_sect': 'H1',  # H1=서울, H2=글로벌
                        'gubun': self.gubun_list[i],  # 1=전공/부전공, 2=실용외국어/교양과목
                        'ag_crs_strct_cd': '',  # 전공 목록
                        'ag_compt_fld_cd': self.liberal_code_list[k]  # 교양 목록
                    }

                    params_list.append(params)

        for _ in pool.imap(self.parsing, params_list):
            pass
コード例 #28
0
    def device_page_generator(self, options):
        """
        Page info generator
        :param options:
        :return:
        """

        pool_size = self.settings['__workers__']

        connection_pool = Pool(size=pool_size)

        for page in connection_pool.imap(
                self.get_device_page_info,
                self.device_page_url_generator(options),
                maxsize=pool_size):
            if not page:
                raise StopIteration
            yield page
コード例 #29
0
    def collect_friends(self, uid=None, level=1):
        slot = []
        slot_dict = {}
        
        uid = uid or self.renren.uid
        uid = str(uid)
        
        def get_fs_by_level(lv):
            return filter(lambda s: s.level == lv, slot)
        
        @gtimeout(360, mute=True)
        def _collect(fo):
            friends = set(self.renren.get_friends(fo.uid))
            if uid in friends:
                friends.remove(uid)
            fo.friends = friends
            return fo
        
        

        fs = FriendsStore(uid, 0)
        slot.append(fs)
        
        def find_fs(uid, lv, parent):
            if uid not in slot_dict:
                slot_dict[uid] = FriendsStore(uid, lv, parent)
            return slot_dict[uid]

        pool = Pool(30)
        for l in range(level):
            pool_jobs = pool.imap(_collect, get_fs_by_level(l))
            for fo in pool_jobs:
                if not fo:
                    continue
                
                slot.extend(
                    [find_fs(u, l+1, fo.uid) for u in fo.friends]
                )
                
                
        print 'collect done'
        slot.pop(0)
        return slot
コード例 #30
0
def main():

    net_devices = get_devices()
    start_time = datetime.now()

    results = []
    pool = Pool(4)
    for a_result in pool.imap(run_task, (a_device for a_device in net_devices.values())):
        results.append(a_result)

    elapsed_time = datetime.now() - start_time

    print('\n--- Host task times ---')
    for a_result in results:
        if 'exception' in a_result:
            print(f"{a_result['host']} - {a_result['exception']}")
            #print(a_result['traceback'])
        else:
            print(f"{a_result['host']} - {a_result['ssh_runtime']}")
    print(f"\nTotal Elapsed time: {format(elapsed_time)}")
コード例 #31
0
def main():
    p = Pool(20)

    r = s.get("https://api.epandda.org/annotations?limit=1")
    r.raise_for_status()

    total = r.json()["counts"]["totalCount"]
    print(total)

    fetched = 0

    result_iter = p.imap(get_url, urls(total))
    with jsonlines.open("/home/godfoder/Downloads/annotations.jsonl",
                        mode="w") as writer:
        for res in result_iter:
            fetched += len(res)
            writer.write_all(res)

            if fetched % 10000 == 0:
                print(fetched)

    print(fetched)
コード例 #32
0
ファイル: plumber.py プロジェクト: jaiminpan/MYWIKI
def call_main():
    rawsql = None
    try:
        import sys
        import getopt
        options, args = getopt.getopt(sys.argv[1:], "hr:", ['help', "raw="])
        for name, value in options:
            if name in ('-h', '--help'):
                print("error")
            elif name in ('-r', '--raw'):
                rawsql = value
    except getopt.GetoptError:
        print("error")

    ora_rawsql = "select * from test where rownum < 10"
    ora_conn = OraPlumberConn(
        user=ora_con_dict['user'],
        passwd=ora_con_dict['passwd'],
        schema=ora_con_dict['schema'])
    ora_conn.execute(ora_rawsql)

    pg_rawsql = "select * from test limit 10"
    pg_conn = PgPlumberConn(
        database=pg_con_dict['database'],
        user=pg_con_dict['user'],
        password=pg_con_dict['password'],
        host=pg_con_dict['host'],
        port=pg_con_dict['port'])
    pg_conn.execute(pg_rawsql)

    insert_sql = 'insert into test values {}'
    process = ProcessInsert(pg_conn, insert_sql)

    from gevent.pool import Pool
    p = Pool(4)
    for _d in p.imap(process.handle, ora_conn.find_batch()):
        pass
コード例 #33
0
    def _load_records(self, options):

        if self.settings.get('dep_uuid'):
            # if the dep_uuid is given, we have to retrieve the different subset of devices from the separate API
            # it is not clear from the docs if the API supports pagination, looks like not
            # also this API is supported only by the AirWatch starting from 9.2(?)
            dep_api_url = '%s/api/mdm/dep/groups/%s/devices' % (
                self.settings['url'], self.settings['dep_uuid'])
            self.dep_devices = {
                _['deviceSerialNumber']: _
                for _ in self.get(dep_api_url).json()
            }

        pool_size = self.settings['__workers__']

        connection_pool = Pool(size=pool_size)

        for device_info in connection_pool.imap(
                self.retrieve_device_info,
                self.device_page_generator(options),
                maxsize=pool_size):
            if not device_info:
                raise StopIteration
            yield device_info
コード例 #34
0
            data = str(random.randint(10, 10000))
            w.write('%s, from client %d\n' % (data, cid))
            w.flush()
            print 'client', cid, 'send:', data

    def recv():
        while True:
            line = r.readline()
            print 'client', cid, 'recive:', line,
            if not line:
                break

    send_job = gevent.spawn_later(1, send)
    recv_job = gevent.spawn(recv)

    def clear(*args):
        gevent.killall([send_job, recv_job])
        s.close()

    send_job.link(clear)
    recv_job.link(clear)
    gevent.joinall([send_job, recv_job])
    print 'client', cid, 'finish'


clients = pool.imap(client, xrange(1000))
gevent.spawn_later(60, lambda: clients.kill()).start()
clients.start()
gevent.run()

コード例 #35
0
ファイル: views.py プロジェクト: akosiaris/ganetimgr
def get_user_group_list(request):
    if request.user.is_superuser or request.user.has_perm('ganeti.view_instances'):
        q_params = None
        try:
            q_params = request.GET['q']
        except:
            pass
        users = User.objects.all()
        groups = Group.objects.all()
        instances = []
        clusters = Cluster.objects.all()
        p = Pool(20)
        bad_clusters = []

        def _get_instances(cluster):
            t = Timeout(RAPI_TIMEOUT)
            t.start()
            try:
                instances.extend(cluster.get_user_instances(request.user))
            except (GanetiApiError, Timeout):
                bad_clusters.append(cluster)
            finally:
                t.cancel()
        if not request.user.is_anonymous():
            p.imap(_get_instances, Cluster.objects.all())
            p.join()
        if q_params:
            users = users.filter(username__icontains=q_params)
            groups = groups.filter(name__icontains=q_params)
            instances = Instance.objects.filter(name__icontains=q_params)
            clusters = clusters.filter(slug__icontains=q_params)
        ret_list = []
        for user in users:
            userd = {}
            userd['text']=user.username
            userd['email']=user.email
            userd['id']="u_%s"%user.pk
            userd['type']="user"
            ret_list.append(userd)
        for group in groups:
            groupd = {}
            groupd['text']=group.name
            groupd['id']="g_%s"%group.pk
            groupd['type']="group"
            ret_list.append(groupd)
        for instance in instances:
            instd = {}
            instd['text']=instance.name
            instd['id']="i_%s"%instance.name
            instd['type']="vm"
            ret_list.append(instd)
        for cluster in clusters:
            cld = {}
            cld['text']=cluster.slug
            cld['id']="c_%s"%cluster.pk
            cld['type']="cluster"
            ret_list.append(cld)
        action = ret_list
        return HttpResponse(json.dumps(action), mimetype='application/json')
    else:
        action = {'error':"Permissions' violation. This action has been logged and our admins will be notified about it"}
        return HttpResponse(json.dumps(action), mimetype='application/json')
コード例 #36
0
class OssUtils(object):
    def __init__(self, public_net=True, max_conn_pool=10):
        # default connection pool size
        # the pool size mast bg or eq than put&get thread number.
        """
        init function
        :param public_net: use public or aliyun private net.
        :param max_conn_pool: max num of threads
        """
        oss2.defaults.connection_pool_size = max_conn_pool
        self.is_public_net = public_net
        self.access_key_id = 'LTAIipB2qbeW3V99'
        self.access_key_secret = 'Dtyx9XKvFdQvmniSlns18j7lXpmA03'
        self.bucket_name = 'mojimeteo'
        if public_net:
            # public net like bj29 use this.
            # DO NOT USE THIS ADDRESS TO DOWNLOAD.
            self.endpoint = 'oss-cn-beijing.aliyuncs.com'
        else:
            # ali private net use this.
            self.endpoint = 'vpc100-oss-cn-beijing.aliyuncs.com'
        self.bucket = oss2.Bucket(
            oss2.Auth(self.access_key_id, self.access_key_secret),
            self.endpoint, self.bucket_name)
        self.greenlet_pool = Pool(max_conn_pool)

    def get_obj(self,
                key,
                dst,
                use_resume=True,
                part_size=(20 * 1024 * 1024),
                num_threads=4):
        """
        get files from oss,
        :param key: oss key
        :param dst: The path to save obj.
        :return obj save path at last
        """
        try:
            if self.is_public_net:
                raise Exception('Do not download from public')
            if use_resume:
                oss2.resumable_download(
                    self.bucket,
                    key,
                    dst,
                    store=oss2.ResumableDownloadStore(root='/tmp'),
                    multiget_threshold=20 * 1024 * 1024,
                    part_size=part_size,
                    num_threads=num_threads)
            else:
                self.bucket.get_object_to_file(key, dst)
            return dst
        except Exception as ex:
            print ex.message
            return None

    def put_obj(self,
                key,
                src,
                use_resume=True,
                part_size=(20 * 1024 * 1024),
                num_threads=4):
        """
        put file to oss
        :param key:
        :param src:
        """
        # use resume
        try:
            if use_resume:
                oss2.resumable_upload(self.bucket,
                                      key,
                                      src,
                                      store=oss2.ResumableStore(root='/tmp'),
                                      multipart_threshold=100 * 1024,
                                      part_size=part_size,
                                      num_threads=num_threads)
            else:
                self.bucket.put_object_from_file(key, src)
        except Exception as ex:
            print ex.message

    def exists(self, key):
        """
        judge if the key exists
        :param key:
        :return:
        """
        return self.bucket.object_exists(key)

    def copy_obj(self, src_key, dst_key):
        """
        copy obj from src_key to dst_key
        :param key:
        :return:
        """
        return self.bucket.copy_object(self.bucket_name, src_key, dst_key)

    def list_obj(self, prefix='', delimiter='/', max_count=1000):
        """
        return a iterator in bucket
        :param prefix:
        :param max_count:
        :return:
        """
        return islice(
            oss2.ObjectIterator(self.bucket,
                                delimiter=delimiter,
                                prefix=prefix), max_count)

    def get_objs(self, src_root, dst_root):
        """
        get objs in oss
        """
        monkey.patch_socket()
        task_list = []
        if src_root.startswith('/'):
            src_root = src_root[1:]
        if not src_root.endswith('/'):
            src_root += '/'

        # make local dir if not exist
        def _touch_dir(path):
            result = False
            try:
                path = path.strip().rstrip("\\")
                if not os.path.exists(path):
                    os.makedirs(path)
                    result = True
                else:
                    result = True
            except:
                result = False
            return result

        _touch_dir(dst_root)

        def _get_objs(key):
            for obj in self.list_obj(key):
                # remove prefix of src_root
                _rel_path = obj.key[obj.key.index(src_root) + len(src_root):]
                local_obj = os.path.join(dst_root, _rel_path)
                if obj.is_prefix():  # directory
                    _touch_dir(local_obj)
                    _get_objs(obj.key)
                else:  # file
                    task_list.append((obj.key, local_obj, False))
                    self.get_obj(obj.key, local_obj, False)
                    print 'file: %s->%s' % (obj.key, local_obj)

        _get_objs(src_root)
        tasks = self.greenlet_pool.imap(self.get_obj, task_list)
        tasks.join()

    def put_objs(self, src_root, dst_root):
        monkey.patch_all()
        task_list = []
        if dst_root.startswith('/'):
            dst_root = dst_root[1:]

        def _put_objs(_rel_path):
            local_path = os.path.join(src_root, _rel_path)
            filelist = os.listdir(local_path)
            for filename in filelist:
                oss_path = os.path.join(dst_root, _rel_path)
                key = os.path.join(oss_path, filename)
                local_obj = os.path.join(local_path, filename)
                _rel_path_obj = os.path.join(_rel_path, filename)
                if os.path.isdir(local_obj):
                    _put_objs(_rel_path_obj)
                else:
                    self.greenlet_pool.add(
                        gevent.spawn(self.put_obj, key, local_obj, False))

        _put_objs('')
        self.greenlet_pool.join()
コード例 #37
0
ファイル: ftp2.py プロジェクト: bearnard/ftptest
    zfile = '%s.zip' % site_id
    with zipfile.ZipFile(zfile) as z:
        z.extractall('tmp')

    file_workers = [
        pool.spawn(upload_files, i, worker_id, file_queue) for i in xrange(concurrency)
    ]

    for dirname, dirnames, filenames in os.walk('tmp/%s' % site_id):
        # print path to all subdirectories first.
        files = []
        for filename in filenames:
            files.append(os.path.join(dirname, filename))
        for f in files:
            file_queue.put(f, block=False)
        print "START_DIRS"
        dirs = []
        for subdirname in dirnames:
            dirs.append(os.path.join(dirname, subdirname))
        if dirs:
            print "POOLING:", dirs
            dir_pool.imap(mkdirs, dirs)
        print "END"
    #joinall(dir_jobs)
    #joinall([
    #    spawn([s_dir] + dirs) for s_dir, dirs in skel_dirs.iteritems()
    #])

    file_queue.join()
コード例 #38
0
ファイル: crawler.py プロジェクト: AMorporkian/tagprostats
                         'support', 'tags', 'wins')
        h_stats = {k+"_per_hour": float(data[k])/float(data['hours']) for k in per_stat_cols if float(data['hours']) > 0}
        g_stats = {k+"_per_game": float(data[k])/float(data['games']) for k in per_stat_cols if float(data['games']) > 0}
        return dict(data.items()+h_stats.items()+g_stats.items())
    player.set(last_updated=datetime.now(), **extract_data(all))
    if player.monthly is not None:
        player.monthly.set(**extract_data(monthly))
    else:
        Monthly(player=player, **extract_data(monthly))
    if player.weekly is not None:
        player.weekly.set(**extract_data(weekly))
    else:
        Weekly(player=player, **extract_data(weekly))
    if player.daily is not None:
        player.daily.set(**extract_data(daily))
    else:
        Daily(player=player, **extract_data(daily))
    c+=1
    if not c%100:
        print "{}/{} done.".format(c, total)

with db_session:
    players = select(x for x in Players)
    p = Pool(10)
    total = len(players)
    res = p.imap(update_player, players)
    gevent.wait([res])
    print "Committing..."
    commit()
    print "Done."
コード例 #39
0
ファイル: uptime_gevent.py プロジェクト: bearnard/challenge
    client = paramiko.SSHClient()
    client.get_host_keys()
    client.set_missing_host_key_policy(paramiko.WarningPolicy())
    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    client.connect(host, port=22)
    stdin, stdout, stderr = client.exec_command('uptime')
    result = stdout.read()
    client.close()
    return host, result


if __name__ == '__main__':

    if len(sys.argv) < 2:
        sys.exit('Usage: %s filename' % sys.argv[0])

    file_name = sys.argv[1]
    if not os.path.exists(file_name):
        sys.exit('ERROR: data file %s was not found!' % sys.argv[1])

    pool = Pool(25)
    hosts = []

    with open(file_name) as fh:
        for line in fh:
            hosts.append(line.strip())

    for host, result in pool.imap(uptime, hosts):
        print "Host: %s" % host, "Response: ", result

コード例 #40
0
ファイル: streamworker_client.py プロジェクト: ownermz/abchat
            fmt = struct.Struct('>i%ds' % data_len)
            data = fmt.pack(data_len, data)
            s.sendall(data)
            print 'client', cid, 'send:', nums

    def recv():
        while True:
            data = s.recv(4)
            data = head_fmt.unpack(data)
            length = data[0]
            data = s.recv(length)
            print 'client', cid, 'recive:', data

    send_job = gevent.spawn_later(1, send)
    recv_job = gevent.spawn(recv)

    def clear(*args):
        gevent.killall([send_job, recv_job])
        s.close()

    send_job.link(clear)
    recv_job.link(clear)
    gevent.joinall([send_job, recv_job])
    print 'client', cid, 'finish'


clients = pool.imap(client, xrange(1000))
gevent.spawn_later(60, lambda: clients.kill()).start()
clients.start()
gevent.run()
コード例 #41
0
ファイル: views.py プロジェクト: redtocatta/ganetimgr
def get_user_group_list(request):
    if request.user.is_superuser or request.user.has_perm(
            'ganeti.view_instances'):
        q_params = None
        try:
            q_params = request.GET['q']
        except:
            pass
        users = User.objects.all()
        groups = Group.objects.all()
        instances = []
        clusters = Cluster.objects.all()
        p = Pool(20)
        bad_clusters = []

        def _get_instances(cluster):
            t = Timeout(RAPI_TIMEOUT)
            t.start()
            try:
                instances.extend(cluster.get_user_instances(request.user))
            except (GanetiApiError, Timeout):
                bad_clusters.append(cluster)
            finally:
                t.cancel()

        if not request.user.is_anonymous():
            p.imap(_get_instances, Cluster.objects.all())
            p.join()
        if q_params:
            users = users.filter(username__icontains=q_params)
            groups = groups.filter(name__icontains=q_params)
            instances = Instance.objects.filter(name__icontains=q_params)
            clusters = clusters.filter(slug__icontains=q_params)
        ret_list = []
        for user in users:
            userd = {}
            userd['text'] = user.username
            userd['email'] = user.email
            userd['id'] = "u_%s" % user.pk
            userd['type'] = "user"
            ret_list.append(userd)
        for group in groups:
            groupd = {}
            groupd['text'] = group.name
            groupd['id'] = "g_%s" % group.pk
            groupd['type'] = "group"
            ret_list.append(groupd)
        for instance in instances:
            instd = {}
            instd['text'] = instance.name
            instd['id'] = "i_%s" % instance.name
            instd['type'] = "vm"
            ret_list.append(instd)
        for cluster in clusters:
            cld = {}
            cld['text'] = cluster.slug
            cld['id'] = "c_%s" % cluster.pk
            cld['type'] = "cluster"
            ret_list.append(cld)
        action = ret_list
        return HttpResponse(json.dumps(action), mimetype='application/json')
    else:
        action = {
            'error':
            "Permissions' violation. This action has been logged and our admins will be notified about it"
        }
        return HttpResponse(json.dumps(action), mimetype='application/json')
コード例 #42
0
            break
    #print "User: {user}; playtime: {playtime}; WaitTime: {wait}; Action:{action}".format(user=uid, playtime=playing_time,


#                                                                        wait=sleep_time, action=action)
    gevent.sleep(sleep_time)
    if action == "stop":
        inst.stop_instance()
    elif action == "noinput":
        inst.notify_instance('20')
    elif action == "crash":
        inst.notify_instance('11')
    else:
        pass
    print getcurrent()

pool = Pool(parallen)
pool.imap(cloud_play, range(1, 400))

weight['overtime'] = 0

while now_time < end_time - 30000:
    time.sleep(2)

    free_num = pool.free_count()
    print "==========", free_num
    if free_num > 0:
        pool.imap(cloud_play, range(end_num, end_num + free_num))
        end_num += free_num
    now_time = int(time.time() * 1000)
コード例 #43
0
	def multi(self, F, iterable):
		pool = Pool(size=self.pool_size)
		res = pool.imap(F, iterable)
		return res
コード例 #44
0
ファイル: prepare.py プロジェクト: hasadna/mkmap
      links.append({'source':source,
                    'target':target,
                    'value':value,
                    'count':rec['count'],
                    'count2':rec['count2'],
                    'percentile': percentile
                   })
  return links


if __name__=="__main__":
  members = get_all_members()
  members = ( member for member in members if member['is_current'] )

  reduced_member_info=[]
  full_member_info = pool.imap(get_single_member,members)
  for info in full_member_info:
      reduced_member_info.append( {
        'id': info['id'],
        'party':int(info['party_url'].split('/')[-2]),
        'name':"%(name)s" % info,
        'fullname':"%(name)s - %(party_name)s" % info
      } )
  reduced_member_info = dict((x['id'],x) for x in reduced_member_info)

  simple_bill_info = get_simple_bill_info()
  full_bill_info = pool.imap(get_full_bill,itertools.chain.from_iterable(simple_bill_info))
  full_bill_info = (bill for bill in full_bill_info if int(bill['stage_date'].split('-')[0])>=2013)

  proposer_list = ([ int(x.split('/')[-2]) for x in bill['proposers'] ] for bill in full_bill_info)
  links = process_links(proposer_list,reduced_member_info)