Пример #1
0
def main():

    pool = Pool(processes=3)

    for i in range(30):

        pool.apply(f, (i, ))
Пример #2
0
def parse_nji(source,
              output_c,
              output_h,
              classpath=None,
              use_pyjavap=False,
              SCONS_AWEFUL_HACK=False):
    global pool
    if pool is None:
        if SCONS_AWEFUL_HACK is True:
            #Oh god scons2 why do you have to be this way...
            #See https://stackoverflow.com/questions/24453387/scons-attributeerror-builtin-function-or-method-object-has-no-attribute-disp
            #This is only needed on Ubuntu 18.04s SCons 3.0.1 which does nasty things to pickle and cPickle SIGH
            import imp

            del sys.modules['pickle']
            del sys.modules['cPickle']

            sys.modules['pickle'] = imp.load_module('pickle',
                                                    *imp.find_module('pickle'))
            sys.modules['cPickle'] = imp.load_module(
                'cPickle', *imp.find_module('cPickle'))

            import pickle
            import cPickle
        pool = Pool(processes=4)
    #Use multi processing for both pyjavap and javap it speeds up the jinja templating
    pool.apply(_internal_parse_nji,
               args=(source, output_c, output_h, classpath, use_pyjavap))
Пример #3
0
    def handle(self, *args, **options):
        replace = options.get('replace')
        for route in args:
            app_label, model_name, field_name = route.rsplit('.')
            model_class = get_model(app_label, model_name)
            field = model_class._meta.get_field(field_name)

            queryset = model_class.objects \
                .exclude(**{'%s__isnull' % field_name: True}) \
                .exclude(**{field_name: ''})
            images = queryset.values_list(field_name, flat=True)

            pool = Pool(
                initializer=init_progressbar,
                initargs=[queryset.count()]
            )
            args = [
                dict(
                    file_name=file_name,
                    variations=field.variations,
                    replace=replace,
                )
                for file_name in images
            ]
            pool.map(render_field_variations, args)
            pool.apply(finish_progressbar)
            pool.close()
            pool.join()
Пример #4
0
 def start_service(self):
     self.Socket.listen()
     while True:
         newSocket, add = self.Socket.accept()
         pool = Pool(5)
         pool.apply(func=self.request, args=(newSocket, ))
         newSocket.close()
def run(years, months):
    manager = Manager()
    article_list = manager.list()
    pool = Pool()

    for year in years:
        for month in months:
            pool.apply(func=fetch_data, args=(
                year,
                month,
                article_list,
            ))

    pool.join()
    pool.close()

    # Sort the article based on the published date
    article_list = sorted(article_list, key=lambda article: article.pub_date)

    with open('result/articles.csv', 'w+') as csvFile:
        fieldNames = ['web_url', 'snippet', 'keywords', 'pub_date']
        writer = csv.DictWriter(csvFile, fieldnames=fieldNames)
        writer.writeheader()

        for article in article_list:
            try:
                article.pub_date = str(
                    article.pub_date)[:10]  # Write only YYYY-MM-DD
                writer.writerow(article.__dict__)

            except UnicodeEncodeError:
                # Sometimes this erro happens, and couldn't figure out why. But it happens very rarely
                pass
Пример #6
0
def detection_blink():
    num = 0
    left_blink, right_blink = (False, False)
    infile = 'temp.webm'
    outfile = 'temp.mp4'
    translate(infile, outfile)
    video2frame(outfile)
    pool = Pool(processes=4)
    d = Manager().dict()

    files = glob.glob(os.path.join('.', "*.jpg"))
    print('files', files)
    for f, i in zip(files, range(len(files))):
        pool.apply(piece_state, (f, i, d))  # _async

    pool.close()
    pool.join()

    print('dict:', d)
    for i in sorted(d):
        left_ear, right_ear = d[i]
        if left_ear < 0.20:
            left_blink = True
        if right_ear < 0.20:
            right_blink = True

        if left_ear >= 0.20 and right_ear >= 0.20 and left_blink and right_blink:
            num += 1
            right_blink = False
            left_blink = False

    return num
Пример #7
0
def main():
    # 1. 父进程与子进程通信(不使用pool),则可以直接用 multiprocessing.Queue队列
    queue = Queue()
    p = Process(target=childfunc, args=(queue, ), name="child-process")
    p.start()
    p.join()
    print("parent process get data:{}".format(queue.get()))

    # 2. 父进程与子进程通信(使用pool),则不能使用 multiprocessing.Queue队列,应使用 multiprocessing.Manager().Queue()
    queue = multiprocessing.Manager().Queue()
    pool = Pool()
    pool.apply(childfunc, args=(queue, ))
    print("parent process get data:{}".format(queue.get()))

    # 3. 通信还可以使用 Pipe(),代表 一个通信(connection)的两端, duplex双工
    parent, child = multiprocessing.Pipe(duplex=True)
    p = Process(target=child_pip, args=(child, ))
    parent.send('parent info')
    p.start()
    p.join()
    print("parent process get data:{}".format(parent.recv()))

    # 4. 使用 Value 或者 Array 来存储共享内容(可被任意修改)
    v = Value("d", 0.0)
    a = Array("i", list(range(10)))
    p = Process(target=child_share, args=(v, a))
    p.start()
    p.join()
    print("parent get shared data:{}={}".format(v.value, a[:]))
Пример #8
0
def main():
    print(os.getpid())

    pool = Pool(4)
    # pool.apply_async(process)
    # pool.apply_async(process)
    # pool.apply_async(process)
    # pool.apply_async(process)

    # pool.apply(process)
    # pool.close()
    # pool.join()
    # pool.apply(process)
    # pool.apply(process)
    # pool.apply(process)

    for i in range(20):
        pool.apply(process, args=(i, ))
        print("你好")

    # start = time.time()
    # for i in range(20):
    #     pool.apply_async(process,(i,))
    # end = time.time()
    # time.sleep(1)
    # print(end-start)
    pool.close()
    time.sleep(2)
    # pool.terminate()
    pool.join()

    print("finish")
Пример #9
0
def main():
    pool = Pool(processes=3)
    for i in range(30):
        # gei jin cheng chi fen pei 30 ge ren wu, da yin chuan ru can shu
        pool.apply(f, (i, ))
    pool.close()
    pool.join()
Пример #10
0
def runBamHash(obj):
    pool = Pool(processes=10)
    for element in obj.data_files:
        pool.apply(runBamHashWorker, (element, ))
       #pool.apply_async(runBamHashWorker, (element, ))
    pool.close()
    pool.join()
Пример #11
0
def main():
    #define 3 threads pool
    pool = Pool(processes=3)
    for i in range(30):
        pool.apply(f, (i, ))
    pool.close()
    pool.join()
Пример #12
0
def main():
    lock = Lock()
    pool = Pool()
    for i in range(0, 10):
        pool.apply(write_file, (lock, ))
        # pool.apply_async(write_file(lock))
    pool.close()
Пример #13
0
def test_pool():
    pool = Pool(8 * 2)

    for i in range(NUM):
        pool.apply(task, ('id0', ))

    pool.close()
    pool.join()
Пример #14
0
def main():
    po = Pool()
    q = Manager().Queue()  # 使用Manager中的Queue来初始化
    # 这里是阻塞模式,等待write_2_q执行完成在执行(read_2_q
    po.apply(write_2_q, (q, 'n', 'a', 'm', 'e'))
    po.apply(read_2_q, (q, ))
    po.close()
    po.join()
Пример #15
0
def main():
    p = Pool(3)

    for i in range(10):
        print(i)
        p.apply(test, (i, ))  # 程序以堵塞的方式运行。

    p.close()  # 关闭进程池,相当于不能再添加新任务了。
    p.join()  # 默认主进程不会等待子进程执行完才结束,程序需要设置主进程等待子进程。
Пример #16
0
def static_analysis(pcap_name):
    pool = Pool(processes=10)
    pcap_detil = parsing_pcap(pcap_name)
    for parent, dirnames, filenames in os.walk('./plugins', followlinks=True):
        for filename in filenames:
            file_path = os.path.join(parent, filename)
            pool.apply(base_loader, args=(pcap_detil, file_path))
    pool.close()
    pool.join()
Пример #17
0
def main2():
    pool = Pool(processes=4)

    for i in range(10):
        msg = 'hello' + str(i)
        pool.apply(func=worker, args=(msg, ))

    pool.close()
    pool.join()
Пример #18
0
def main():
    #初始化一个3个进程的进程池
    pool = Pool(processes=3)

    for i in range(30):
        #调用 apply方法开始处理任务,参数为f(任务函数),i(参数)
        pool.apply(f, (i, ))
    pool.close()
    pool.join()
Пример #19
0
def main():
    print('{} start'.format(os.getpid()))
    q = Manager().Queue() #
    po = Pool()
    po.apply(writer, (q,))
    po.apply(reader, (q,))
    po.close()
    po.join()
    print('{} end'.format(os.getpid()))
Пример #20
0
def update_collections_uniqe_keys():
    p = Pool(len(collections))
    for col in collections:
        p.apply(update_data, ('house', col))
    p.close()
    p.join()

# update_collections_uniqe_keys()
# update_data('house','house_chengdu')
def main():
    q = Manager().Queue()
    pool = Pool()
    #使用阻塞模式创建进程,这样就不需要在reader中使用死循环了,可以让writer完全执行完成后,再用reader去读取
    pool.apply(write, (q, ))
    pool.apply(read, (q, ))
    pool.close()
    pool.join()
    print("(%s) End" % os.getpid())
Пример #22
0
def block_style():
    po = Pool(3)  # 定义一个进程池,最大进程数3
    for i in range(0, 10):
        print("----apply---- >", i)
        po.apply(worker, (i, ))

    print("----start----")
    po.close()  # 关闭进程池,关闭后po不再接收新的请求
    po.join()  # 等待po中所有子进程执行完成,必须放在close语句之后
    print("-----end-----")
Пример #23
0
def main():
    print("主进程开始执行>>> pid={}".format(os.getpid()))
    p = Pool(5)  # 利用进程池Pool创建多进程
    for i in range(10):
        p.apply(run, args=(i, ))  # 同步执行
        # p.apply_async(run, args=(i, ))    # 异步执行

    p.close()  # 关闭进程池,停止接受其它进程
    p.join()  # 阻塞进程
    print("主进程终止")
 def render_in_parallel(field, images, count, replace):
     pool = Pool(initializer=init_progressbar, initargs=[count])
     args = [
         dict(file_name=file_name, variations=field.variations, replace=replace, storage=field.storage)
         for file_name in images
     ]
     pool.map(render_field_variations, args)
     pool.apply(finish_progressbar)
     pool.close()
     pool.join()
Пример #25
0
def main():
    # results = requests.get("http://www.xiami.com/artist/1", headers={'User-Agent': user_agent})
    # print results.content

    star_pool = Pool(10)
    for i in range(1630, 10000):
        star_pool.apply(get_star, (i, ))
        # print star_pool.apply(get_star, (i, ))
    star_pool.close()
    star_pool.join()
Пример #26
0
def run():
    ensure_index_built()

    activate_queue = Queue(1)
    keybind_process = Process(target=_start_keybind_process, args=(activate_queue,))
    keybind_process.start()

    translate_pool = Pool(processes=1, initializer=_init_translate_process,
                          maxtasksperchild=1)

    stop = []

    def stop_now(sig, *_):
        stop.append(sig)
        activate_queue.close()
        debug('stop due to signal %s' % sig)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGUSR1, signal.SIG_DFL)

    signal.signal(signal.SIGTERM, stop_now)
    signal.signal(signal.SIGINT, stop_now)
    signal.signal(signal.SIGUSR1, stop_now)

    while not stop:
        got = None
        try:
            got = activate_queue.get()
        except:
            if not stop:
                raise
        debug('parent got: %s' % got)

        if not got:
            break

        debug('invoke translate')
        try:
            translate_pool.apply(_run_translate_process)
        except Exception as e:
            debug('failed: %s' % e)

    if stop[0] == signal.SIGUSR1:
        # keybind child signaled an error
        keybind_process.join(10)
        os._exit(7)

    debug('exiting normally')
    keybind_process.terminate()

    # FIXME: this always hangs.  Why?
    # That's why we use _exit instead.
    #translate_pool.terminate()

    os._exit(0)
Пример #27
0
def run():
    ensure_index_built()

    activate_queue = Queue(1)
    keybind_process = Process(target=_start_keybind_process, args=(activate_queue,))
    keybind_process.start()

    translate_pool = Pool(processes=1, initializer=_init_translate_process,
                          maxtasksperchild=1)

    stop = []

    def stop_now(sig, *_):
        stop.append(sig)
        activate_queue.close()
        debug('stop due to signal %s' % sig)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGUSR1, signal.SIG_DFL)

    signal.signal(signal.SIGTERM, stop_now)
    signal.signal(signal.SIGINT, stop_now)
    signal.signal(signal.SIGUSR1, stop_now)

    while not stop:
        got = None
        try:
            got = activate_queue.get()
        except:
            if not stop:
                raise
        debug('parent got: %s' % got)

        if not got:
            break

        debug('invoke translate')
        try:
            translate_pool.apply(_run_translate_process)
        except StandardError as e:
            debug('failed: %s' % e)

    if stop[0] == signal.SIGUSR1:
        # keybind child signaled an error
        keybind_process.join(10)
        os._exit(7)

    debug('exiting normally')
    keybind_process.terminate()

    # FIXME: this always hangs.  Why?
    # That's why we use _exit instead.
    #translate_pool.terminate()

    os._exit(0)
def main(ticker):
    start = datetime.now()
    pool = Pool(processes=cpu_count())

    manager = Manager()
    words = manager.list()
    bags = manager.dict()

    with open(('result/%s.csv' % ticker), 'r') as ms_equity:
        reader = csv.reader(ms_equity)

        for row in list(reader):
            date = row[1]
            bags[(ticker, date)] = dict()

    header = True
    with open('articles.csv', 'r') as ms_articles:
        reader = csv.reader(ms_articles)
        for row in list(reader):
            if header:
                header = False
                continue

            url = row[0]
            date = row[3]

            if (ticker, date) in bags:
                pool.apply(func=get_sumbag, args=(ticker, url, bags, words, date, ))

    pool.close()
    pool.join()

    words = list(set(words))

    # Write to CSV file
    with open(('result/%s_result.csv' % ticker), 'w+') as csv_file:
        field_names = ['ticker', 'date']
        field_names += words
        writer = csv.DictWriter(csv_file, fieldnames=field_names)
        writer.writeheader()

        for key, bag in bags.items():
            ticker = key[0]
            date = key[1]

            # Count 0 word, that are in the other articles
            for word in words:
                if word not in bag:
                    bag[word] = 0

            bag['ticker'] = ticker
            bag['date'] = date
            writer.writerow(bag)

    print('Total Operation took: %s' % str(datetime.now() - start))
Пример #29
0
def main():

    #getincome()
    #computetax()
    #writefile()
    Process(target=getincome).start()
    #Process(target=computetax).start()

    pool = Pool(processes=3)
    pool.apply(computetax)
    Process(target=writefile).start()
Пример #30
0
 def render_context_to_html(self):
     """This method used multiprocessing package to accelerate the process
     of rendering those ORM objects of django queryset into respective HTML
     templates. The async process method was applied.
     """
     pool = Pool(processes=len(self.context_dict))
     for barcode, context in self.context_dict.items():
         pool.apply(self._render_context, args=(barcode, context))
     pool.close()
     pool.join()
     self.is_render_end = True
Пример #31
0
def main():
    """
    多进程处理任务
    pool是进程池对象
    """
    pool = Pool()
    for key in table_dict:
        pool.apply(read_info_new_file, (key, ))
        pool.apply(read_error_new_file, (key, ))
    pool.close()
    pool.join()
def waiter_synchro():
    '''
    同步方法
    :return:
    '''
    pool2 = Pool(processes=2)
    for x in Philosophers:
        pool2.apply(Eating, (x,))

    pool2.close()
    pool2.join()
def main():
    print("%s start" % os.getpid())
    q = Manager().Queue()  #使用Manager中的Queue来初始化
    mypool = Pool()
    mypool.apply(write, (q, ))  #使用阻塞式创建进程,以防没写完就读了
    mypool.apply(read, (q, ))
    mypool.close()
    mypool.join()
    print("%s stop" % os.getpid())
    print('\n所有数据读写完成')
    read(q)
Пример #34
0
def get_links_from(channel):
    for i in range(1, ):
        get_link_form(channel, i)


# if __name__ == '__main__':
     pool = Pool(4)
 	# 从数据库中将url迭代出来
     for link in url_list.find({}, {'_id': 0, 'url': 1}):
         pool.apply(bs_get_item_info, args=(link['url'],))
     pool.close()
     pool.join()
Пример #35
0
def find_dups_dev(ref_scores, length, max_plog, min_hlog, clusters,
                  processors):
    from multiprocessing import Manager, Pool
    m = Manager()
    q = m.Queue()
    my_dict_o = m.dict()
    p = Pool(processors)
    curr_dir = os.getcwd()
    dup_dict = {}
    duplicate_file = open("duplicate_ids.txt", "w")
    genome_specific_list_of_lists = []
    files = os.listdir(curr_dir)
    files_and_temp_names = []
    for idx, f in enumerate(files):
        files_and_temp_names.append([
            str(idx),
            os.path.join(curr_dir, f), ref_scores, length, max_plog, min_hlog,
            clusters, processors
        ])
    # Multiprocessing here (mp_shell for Ctrl+F)
    """How to test this function???"""
    for process in files_and_temp_names:
        p.apply(_perform_workflow_fdd, args=(q, my_dict_o, process))
    # Get rid of any duplicate values in queue
    unique = set()
    while q.empty() == False:
        unique.add(q.get())
    """This generates the list of all possible CDSs"""
    with open("dup_refs.txt", "a") as ref_file:
        ref_file.write("ID" + "\n")
        ref_file.write("\n".join(clusters) + "\n")
    ref_file.close()
    try:
        generate_dup_matrix()
        os.system("paste dup_refs.txt dup_values > dup_matrix.txt")
    except:
        print("problem generating duplicate matrix")
    """new way to report duplicates"""
    duplicate_IDs = []
    for line in open("dup_matrix.txt"):
        fields = line.split()
        if fields[0] == "ID":
            pass
        else:
            for field in fields[1:]:
                if float(field) > 1:
                    if fields[0] in duplicate_IDs:
                        pass
                    else:
                        duplicate_IDs.append(fields[0])
    duplicate_file.write("\n".join(duplicate_IDs))
    duplicate_file.close()
    return duplicate_IDs
Пример #36
0
def main():
    usage = """
    Copy data from one MongoDB instance to another.

    Example:
        arctic_copy_data --log "Copying data" --src user.library@host1 --dest user.library@host2 symbol1 symbol2
    """
    setup_logging()
    p = argparse.ArgumentParser(usage=usage)
    p.add_argument("--src", required=True, help="Source MongoDB like: library@hostname:port")
    p.add_argument("--dest", required=True, help="Destination MongoDB like: library@hostname:port")
    p.add_argument("--log", required=True, help="Data CR")
    p.add_argument("--force", default=False, action='store_true', help="Force overwrite of existing data for symbol.")
    p.add_argument("--splice", default=False, action='store_true', help="Keep existing data before and after the new data.")
    p.add_argument("--parallel", default=1, type=int, help="Number of imports to run in parallel.")
    p.add_argument("symbols", nargs='+', type=str, help="List of symbol regexes to copy from source to dest.")

    opts = p.parse_args()

    src = get_arctic_lib(opts.src)
    dest = get_arctic_lib(opts.dest)

    logger.info("Copying data from %s -> %s" % (opts.src, opts.dest))

    # Prune the list of symbols from the library according to the list of symbols.
    required_symbols = set()
    for symbol in opts.symbols:
        required_symbols.update(src.list_symbols(regex=symbol))
    required_symbols = sorted(required_symbols)

    logger.info("Copying: {} symbols".format(len(required_symbols)))
    if len(required_symbols) < 1:
        logger.warn("No symbols found that matched those provided.")
        return

    # Function we'll call to do the data copying
    copy_symbol = copy_symbols_helper(src, dest, opts.log, opts.force, opts.splice)

    if opts.parallel > 1:
        logger.info("Starting: {} jobs".format(opts.parallel))
        pool = Pool(processes=opts.parallel)
        # Break the jobs into chunks for multiprocessing
        chunk_size = len(required_symbols) / opts.parallel
        chunk_size = max(chunk_size, 1)
        chunks = [required_symbols[offs:offs + chunk_size] for offs in
                  range(0, len(required_symbols), chunk_size)]
        assert sum(len(x) for x in chunks) == len(required_symbols)
        pool.apply(copy_symbol, chunks)
    else:
        copy_symbol(required_symbols)
Пример #37
0
def main():
    page_id = 1
    artist_pool = Pool(pool_size)
    while 1:
        print "Current page: %s , processing..." % page_id
        artists, has_next = get_page_stars(page_id)
        for artist in artists:
            artist_pool.apply(get_star, (artist[1], ))
        if not has_next:
            break
        page_id += 1
    artist_pool.close()
    artist_pool.join()
    print "total page: %s " % page_id
Пример #38
0
def testing():
    readBlob = ['12'] * 100
    print readBlob
    freeze_support()
    pool = Pool(processes=cpu_count(), maxtasksperchild=1)
    step = 10
    for i in xrange(0, len(readBlob), step):
        print i
        pool.apply(recfromreadblob,
                   args=(readBlob[i:min(len(readBlob), i+step)],
                   len(readBlob)-i))
    pool.close()
    pool.join()
    pool.terminate()
    return
Пример #39
0
def find_dups_dev(ref_scores, length, max_plog, min_hlog, clusters, processors):
    from multiprocessing import Manager, Pool
    m = Manager()
    q = m.Queue()
    my_dict_o = m.dict()
    p = Pool(processors)
    curr_dir=os.getcwd()
    dup_dict = {}
    duplicate_file = open("duplicate_ids.txt", "w")
    genome_specific_list_of_lists = []
    files = os.listdir(curr_dir)
    files_and_temp_names = []
    for idx, f in enumerate(files):
        files_and_temp_names.append([str(idx), os.path.join(curr_dir, f), ref_scores, length, max_plog, min_hlog, clusters, processors])
    # Multiprocessing here (mp_shell for Ctrl+F)
    """How to test this function???"""
    for process in files_and_temp_names:
        p.apply(_perform_workflow_fdd, args=(q,my_dict_o,process))
    # Get rid of any duplicate values in queue
    unique = set()
    while q.empty() == False:
        unique.add(q.get())
    """This generates the list of all possible CDSs"""
    with open("dup_refs.txt", "a") as ref_file:
        ref_file.write("ID"+"\n")
        ref_file.write("\n".join(clusters)+"\n")
    ref_file.close()
    try:
        generate_dup_matrix()
        os.system("paste dup_refs.txt dup_values > dup_matrix.txt")
    except:
        print("problem generating duplicate matrix")
    """new way to report duplicates"""
    duplicate_IDs = []
    for line in open("dup_matrix.txt"):
        fields = line.split()
        if fields[0] == "ID":
            pass
        else:
            for field in fields[1:]:
                if float(field)>1:
                    if fields[0] in duplicate_IDs:
                        pass
                    else:
                        duplicate_IDs.append(fields[0])
    duplicate_file.write("\n".join(duplicate_IDs))
    duplicate_file.close()
    return duplicate_IDs
Пример #40
0
def usingMultiprocess():
    ''' using multiprocessing module for python concurrent programming '''
    num = 100
    processes = []

    print '************ using original process ***********'
    input_conn, output_conn = Pipe()
    for m in [-1,1,2,3]:
        p = Process(target=obtainQuadraticSumByPipe, args=(input_conn, num,m,))
        p.start()
        print output_conn.recv()

    print '------------- using Pool -------------'
    pool = Pool(processes=4)
    for m in [-1,1,2,3]:
        pool.apply(printQuadraticSum, (num,m))
def multiprocess_pool_sync():
    # In this function the apply() method creates a lock that prevents more than the specified number of
    # processes to take place at the same time. In this case, only 10 processes can be active at the same
    # time.
    jobs = []
    pool = Pool(processes=10)
    results = [pool.apply(func=freqsPerText, args=(text,)) for text in texts]
    print("Finished processing texts with Pool")
    print("Pool returned ", len(results), "results")
    return results
 def render(field, images, count, replace, do_render):
     pool = Pool(
         initializer=init_progressbar,
         initargs=[count]
     )
     args = [
         dict(
             file_name=file_name,
             do_render=do_render,
             variations=field.variations,
             replace=replace,
             storage=field.storage.deconstruct()[0],
         )
         for file_name in images
     ]
     pool.map(render_field_variations, args)
     pool.apply(finish_progressbar)
     pool.close()
     pool.join()
Пример #43
0
class MultiprocessingView(Singleton):
    """Provides a parallel view (similar to IPython)"""

    def __init__(self, *args, **kwargs):
        self._args = args
        self._kwargs = kwargs
        if not hasattr(self, 'pool'):
            self.pool = None

    def map(self, *args, **kwargs):
        if self.pool is None:
            self.pool = Pool(*self._args, **self._kwargs)
        return self.pool.map(*args, **kwargs)

    def apply(self, func, *args, **kwargs):
        if self.pool is None:
            self.pool = Pool(*self._args, **self._kwargs)
        return self.pool.apply(func, args=args, **kwargs)

    def apply_async(self, func, *args, **kwargs):
        if self.pool is None:
            self.pool = Pool(*self._args, **self._kwargs)
        self.pool.apply_async(func, args=args, **kwargs)

    def imap(self, func, *args, **kwargs):
        if self.pool is None:
            self.pool = Pool(*self._args, **self._kwargs)
        return self.pool.imap(func, *args, **kwargs)

    def __len__(self):
        if len(self._args) > 0:
            return self._args[0]
        elif "processes" in self._kwargs:
            return self._kwargs["processes"]
        else:
            return cpu_count()

    def shutdown(self):
        if self.pool is not None:
            logger.debug('Terminating multiprocessing pool')
            try:
                self.pool.terminate()
            except Exception as e:
                logger.debug('Could not terminate multiprocessing pool.')
                raise e
            else:
                self.pool = None
        else:
            logger.debug('No multiprocessing pool to shut down.')

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.shutdown()
Пример #44
0
 def runTest(self):
     pool = Pool(initializer=initfunc)
     results = resultset.load(self.filename)
     self.settings.update(results.meta())
     self.settings.load_test(informational=True)
     plotters.init_matplotlib("-", False, True)
     for p in self.settings.PLOTS.keys():
         plot = pool.apply(plot_one, (self.settings, p, results))
         if not plot.verify() and p not in PLOTS_MAY_FAIL:
             raise self.failureException(
                 "Verification of plot '%s' failed" % p)
Пример #45
0
def run_all_tests():
    to_run = []
    for solution in find_solutions():
        test = find_test_for_solution(solution)
        if test:
            to_run.append((solution, test))

    pool = Pool(maxtasksperchild=1)
    failed_solutions = []
    for solution, test in sorted(to_run):
        print("----------------------------------------------------------------------")
        print("----------------------------------------------------------------------")
        print("----------------------------------------------------------------------")
        print("\n\t{1}\n\tRunning tests for {0}\n".format(solution, test))
        if not pool.apply(run_one_testsuite_in_isolated_environment, args=(solution, test)):
            failed_solutions.append(solution)

    return failed_solutions
Пример #46
0
def find_max_path(input_table):
        N = len(input_table)
        path = max_path()
	pool = Pool()
#        for i in range(0, N):
 #               for j in range(0, N):
                        #print("Creating table [" + repr(i) + ', ' + repr(j) + ']')
                       
	new_path = [pool.apply(create_max_path, args=(input_table, i,j,)) for i in range(0,N) for j in range(0,N)]

	for i in range(0, len(new_path)):
	#if(is_int(new_path.max_value) and is_int(path.max_value)):
		if(new_path[i].max_value > path.max_value):
	                path = new_path[i]
	                      #print("Found new max")
	        elif(path.max_value == "-Inf"):
	        	path = new_path[i]

        return path
Пример #47
0
def infoGain(X, y, n = 2):
	sampleNum, featureSize = X.shape #get the parameters
	scoreVector = []
	Y = preprocessing.LabelBinarizer().fit_transform(y)
	if Y.shape[1] == 1:                         ### if two classes, then transform it into two column
		Y = np.append(1 - Y, Y, axis=1)
	classNum = Y.shape[1]
	X_T = X
	X_T = np.transpose(X_T)
	diff = array('d')
	empty = array('L')
	symbol = 2147483647
	oldScore = 0.0
	X_T[X_T == 0] = symbol
	p = Pool(8)
	for i in range(X_T.shape[0]):
		subtotal = p.apply(func, args = (X_T[i], Y))
		diff.append(oldScore - subtotal)
		empty.append(0)
	return diff, empty
Пример #48
0
def run_collect(opts):
    distutils.dir_util.copy_tree("css", opts.reports_dir + "/css")
    distutils.dir_util.copy_tree("fonts", opts.reports_dir + "/fonts")
    distutils.dir_util.copy_tree("js", opts.reports_dir + "/js")
    task_dir_list = glob.glob(opts.results_dir + '/*/*/*/*/*/*/*/*/*')
    pool = Pool(processes=processes_count)
    results = [pool.apply(collect_iteration, args=(opts, task_dir)) for task_dir in task_dir_list]
    #for task_dir in task_dir_list:
    #   collect_iteration(opts, task_dir)
    # creating reference page
    
    html_dir_list = glob.glob(opts.reports_dir + '/*/*/*/*/*/*/*/*/*/*.html')
    tmp_tree = []
    for report_file in html_dir_list:
        tmp_tree.append(report_file[(len(opts.reports_dir) + 1):])
    result_tree = build_tree(tmp_tree)
    f = open(opts.reports_dir + '/index.html', 'w')
    helper.write_head(f, "", "reference.html")
    f.write(
        '<body>\n' +
        '<div class="container">' +
        '<div class="row">' +
        '<h2>Reference</h2>' +
        '<hr>' +
        '<div id="includedContent"></div>' +
        '</div>' +
        '</div>' +
        '</body>' +
        '</html>')
    f.close()
    f = open(opts.reports_dir + '/reference.html', 'w')
    print_tree(result_tree, f, "", "", True)
    f.close()
    f = open(opts.reports_dir + '/reference_for_leafs.html', 'w')
    print_tree(result_tree, f, "", "../../../../../../../../../", True)
    f.close()
Пример #49
0
def getSimilarityMatrixMultiProcess(rawDataFrame):
	from multiprocessing import Pool
	rows = rawDataFrame.shape[0]

	if pv.outputDebugMsg:
		Utils.logMessage("\nBuild similarity matrix of size %d x %d started" %(rows, rows))
		Utils.logTime()

	indexes = [i for i in xrange(rows)]
	simMat = []
	pool = Pool(4)
	for idx in indexes:
		simMat.append(pool.apply(computeSim, (idx, rawDataFrame)))

	pool.close()
	pool.join()

	if pv.outputDebugMsg:
		Utils.logMessage("\nBuild similarity matrix finished")
		Utils.logTime()

	mat = np.matrix(simMat)

	return np.add(mat, mat.T)
Пример #50
0
if __name__ == '__main__':
    print "CPU Core: " + str(multiprocessing.cpu_count())
    print "Process:"
    sum_p = Process(target=sum_print, args=(2, 3, 4))
    square_p = Process(target=print_square, args=(5,))
    sum_p.start()
    square_p.start()
    sum_p.join()
    square_p.join()
    
    pool = Pool()
    print "Pool: apply()"
    for i in xrange(3, 6):
        list = [i-1, i, i+1]
        #applyはAsyncResultを返すまでブロックする
        pool.apply(sum_print, list)
        
    print "Pool: apply_async()"
    result_list = []
    for i in xrange(3, 6):
        list = [i-1, i, i+1]
        #apply_asyncは非同期でAsyncResultを返す
        result_list.append(pool.apply_async(sum, list))
    for result in result_list:
        print result.get()
        
    print "Pool: map()"
    list = range(1, 5)
    pool.map(print_square, list)
    print "map()が終わるまでブロックされる"
    
Пример #51
0
# -*- coding:utf-8 -*-
# Author: Zhoutao
#create_date = 2016/8/8/22:57

import time
from multiprocessing import Pool
#默认5个进程

def f1(arg):
    time.sleep(0.1)
    print(arg)

if __name__ == "__main__":
    pool = Pool(5)
    for i in range(30):
        pool.apply(func=f1,args=(i,)) #apply串型操作,一个进程执行完了,在执行另外一个进程

import time
from multiprocessing import Pool
#默认5个进程

def f1(arg):
    time.sleep(0.1)
    print(arg)

if __name__ == "__main__":
    pool = Pool(5)
    for i in range(30):
        pool.apply_async(func=f1,args=(i,))#apply异步方式
    # pool.close() #所有任务执行完毕,如果不加close会主动抛出一个断言错误
    time.sleep(0.5)
Пример #52
0
#input file must be fasta format, the rgx is here to distinguish between the fasta file and the blastdb files
rgx=re.compile('fasta$')
FileDir='C:\Users\gt41237\Cluster_on_RxLRs_only\JackKnife_dict_Pi_Pc_Ps_EER_trunc' #input files directory
OutDir='C:\Users\gt41237\Cluster_on_RxLRs_only' #output files directory

def SerialBlast(FileDir, OutDir):
  FDir=FileDir
  ODir=OutDir
  for element in os.listdir(FDir):
    if rgx.search(element):
      print element
      filepath=FDir+'\\'+element
      fileoutpath=ODir+'\\'+'SelfBLASTp'+element+'.csv'
      testsub=subprocess.Popen(['blastp', '-query',filepath, '-db', filepath, '-evalue', '1e-5','-outfmt', '6','-out', fileoutpath])
      print (element+ ' done!')
      return none
        

if __name__ == '__main__':
    pool=Pool()
    roots=pool.apply(SerialBlast, FileDir)
    roots.start()

#if __name__ == '__main__':
#  r=Process(target=SerialBlast)
#  r.start()

    


Пример #53
0
#!/usr/bin/python3

import sys
import requests
from multiprocessing import Pool


def bombard():
    while True:
        try:
            requests.get("http://localhost/")
        except KeyboardInterrupt:
            pass


try:
    N_THREADS = sys.argv[1]
except IndexError:
    N_THREADS = 4

e = Pool(processes=N_THREADS)
try:
    f = e.apply(bombard)
except KeyboardInterrupt:
    pass
Пример #54
0
fcntl.lockf(f, fcntl.LOCK_EX, 0, 0, os.SEEK_SET)   # lock entire file
fcntl.lockf(f, fcntl.LOCK_UN)                      # unlock


###############################################################################
#                           high level module
###############################################################################

# multiprocessing#################################################
# Using Pipe for IPC. Tasks are cached in Queue.Queue.
def f(x):
    return x*x

processes = Pool(2)
print(processes.map(f, range(5)))
print(processes.apply(f, (10,)))

r = processes.apply_async(f, (15,))
print(r.get())

processes.close()
processes.join()


# using futures ##################################################


###############################################################################
#                              other
###############################################################################
from multiprocessing import Pool
import os
import random
import time

def worker(num):
    for i in range(5):
        print("===pid=%d==num=%d="%(os.getpid(), num))
        time.sleep(1)

#3表示 进程池中对多有3个进程一起执行
pool = Pool(3)

for i in range(10):
    print("---%d---"%i)
    pool.apply(worker, (i,))#堵塞的方式


pool.close()#关闭进程池,相当于 不能够再次添加新任务了
pool.join()#主进程 创建/添加 任务后,主进程 默认不会等待进程池中的任务执行完后才结束
            #而是 当主进程的任务做完之后 立马结束,,,如果这个地方没join,会导致
            #进程池中的任务不会执行
Пример #56
0
                err.put((i, e))
            ### put it back in the queue
            if i:
                q.put(i)
            ### Or mark it as done
            else:
                done.put(i)
        if retries==10:
            break

def process(i):
    sleep(2) ###simulate work
    return i+1

if __name__ == '__main__':
    ###Dummy data
    workq.put(-1)
    workq.put(120)
    workq.put(0)
    workq.put(-1)
    workq.put(1)

    pool = Pool(processes=4)
    pool.apply(f)
    pool.join()

    #p = Process(target=f, args=(workq, doneq, errq))
    #p.start()
    #print q.get()    # prints "[42, None, 'hello']"
    #p.join()
Пример #57
0
from multiprocessing import Pool

def f(x):
    return x*x

pool = Pool(4)
print(pool.apply(f, (10,)))
Пример #58
0
            whois.query(dn)
            log.debug("[TAKEN] " + dn)
        except Exception:
            log.info("[FREE] " + dn)


    log.info("Permutations are " + str(calculate_permutations(ascii_lowercase, int(parser.options.length))))

    # Setup pool
    p = Pool(int(parser.options.workers))

    for e in PermutationIterator(ascii_lowercase, int(parser.options.length)):
        domain_name = str("".join(e) + parser.options.tld)
        result = None

        log.debug("Trying: " + domain_name)

        try:
            # Give it to an available worker
            p.apply(run_name, args=(domain_name,))

        except KeyboardInterrupt:
            p.terminate()


    # End the busy indicator
    stop_busy_indicator(bi)

    # Salute!
    log.info("Bye bye! :-)")
Пример #59
0
print "making shared p2"
p2 = pow(p.n, p.seed, share = p)
print "done shared p2"

def void(wire):
    global p2
    print "im in %d" % os.getpid()
    req = pow_req(pow_obj = p2, wire = wire)
    print "creating res"
    sys.stdout.flush()
    res = req.create_res()

    print "verifying res"
    sys.stdout.flush()
    print req.verify_res(res)
    sys.stdout.flush()

    return

pool = Pool(processes=100)
for i in range(100):
    pool.apply(void, (wire,))

print "memory usage:"
u = resource.getrusage(resource.RUSAGE_SELF)
mb = u.ru_maxrss / (1.0 * 2**10)
print "TOTAL: %d MB" % mb

if len(sys.argv) > 3:
    raw_input("paused... press enter to continue\n")
Пример #60
0
def apply_worker():
    pool = Pool(processes=8)
    res = [pool.apply(job, (x,)) for x in range(10)]
    print(res)