def processpool(streams, area_len): # Multiple process pool workers to fetch and upload to multiple streams pool = ProcessPoolExecutor(max_workers=area_len) results = pool.map(main_cfg, streams) for result in results: print(result) pool.close() pool.terminate() pool.join()
class ProcessPoolEvaluator(SubmitEvaluator): def __init__(self, processes=None): try: from concurrent.futures import ProcessPoolExecutor self.executor = ProcessPoolExecutor(processes) super(ProcessPoolEvaluator, self).__init__(self.executor.submit) LOGGER.log(logging.INFO, "Started process pool evaluator with %d processes", processes) except ImportError: # prevent error from showing in Eclipse raise def close(self): LOGGER.log(logging.DEBUG, "Closing process pool evaluator") self.executor.close() LOGGER.log(logging.INFO, "Closed process pool evaluator")
p = ProcessPoolExecutor(max_workers = threads) #with ThreadPoolExecutor(max_workers=threads) as p: #print path2SAGs SAGfiles=glob.glob(path2SAGs) metafiles=glob.glob(path2Metas) #print SAGfiles #all possible combos of SAGs vs Metagenomes inlist=[] for SAG in SAGfiles: os.system('makeblastdb -in '+SAG+' -out '+SAG+'.db'+' -dbtype nucl') for meta in metafiles: inlist.append([SAG, meta]) #print inlist p.map(call_blast,inlist) p.close() ''' The (approximate) size of these chunks can be specified by setting chunksize to # a positive integer. For very long iterables, using a large value for chunksize can significantly improve performance compared to the default size of 1. With ThreadPoolExecutor, chunksize has no effect. ''' # Larger than memory issue from concurrent.futures import ProcessPoolExecutor def sum_row(line): return sum([int(x) for x in line.split()]) with ProcessPoolExecutor(max_workers=4) as executor: with open('numbers.txt') as fh: