示例#1
0
    def run(self, initial_objective, minimize_initial, gamma=1 - 1e-6):

        M, N = self.ls.get_stoich_matrix_shape()
        result = {i: [0, 0] for i in range(N)}
        opt = LinearSystemOptimizer(self.ls, build=False)
        c = zeros(N)
        c[initial_objective] = 1
        self.ls.set_objective(c, minimize_initial)

        v0 = opt.optimize()
        z0 = v0.objective_value()

        self.ls.add_rows_to_model(c.reshape([1, N]), [z0 * gamma], [None],
                                  only_nonzero=True,
                                  names=['FASTFVAINITIALCONSTRAINT'])

        for sense in [True, False]:
            rx_per_job = N // self.n_jobs
            self.pool = _ProcessPool(processes=self.n_jobs,
                                     initializer=_fva_initializer,
                                     initargs=(self.ls, sense, gamma))
            for i, value in self.pool.imap_unordered(_fva_iteration,
                                                     range(N),
                                                     chunksize=rx_per_job):
                result[i][int(not sense)] = value

            self.pool.close()
            self.pool.join()

        self.ls.remove_from_model([M], 'const')
        return FluxVariabilityAnalysisResult([result[i] for i in range(N)])
示例#2
0
 def greedy_search(self,beam_size=1,cutoff=0.9,min_cutoff=3,max_round=20,nworker=20,bs=100):
     current_max_coverage=0.0
     beams=[]
     curr_beam=[]
     cnt=0
     pool=pp._ProcessPool(processes=nworker,initializer=initializer,initargs=())
     curr_min=min_cutoff
     up=float('inf')
     while current_max_coverage<cutoff and cnt<max_round:
         t0 = time.time()
         self.next_beam=[]
         if not len(curr_beam):
             curr_candidates=[[]]
         else:
             curr_candidates=[x[1].split('_') for x in curr_beam]#.keys()]
         all_args=[]
         test_epitopes=[x+[y] for x in curr_candidates for y in self.input_epitope_affinity.index if not y in x]
         for i in range(0,len(test_epitopes),bs):
             all_args.append((copy(test_epitopes[i:min(i+bs,len(test_epitopes))]),curr_min))
         del test_epitopes
         r=pool.map_async(self.batch_scan,all_args,callback=self.batch_update)
         r.get()
         scores=[x[0][x[0]['count']>=curr_min]['freq'].sum() for x in self.next_beam]
         topidx=np.argwhere(scores == np.amax(scores)).flatten()
         curr_beam=[self.next_beam[np.random.choice(topidx,1)[0]]]
         h=curr_beam[0][0]
         current_max_coverage=h[h['count']>=curr_min]['freq'].sum()
         beams.append((current_max_coverage,curr_beam[0][1]))
         cnt+=1
         t1 = time.time()
     pool.close()
     return curr_beam,beams
示例#3
0
 def exhaust_search_cover(self,beam_size=1,cutoff=0.9,min_cutoff=3,max_round=20,nworker=20,bs=100):
     current_max_coverage=0.0
     beams=[]
     curr_beam=[]
     cnt=0
     pool=pp._ProcessPool(processes=nworker,initializer=initializer,initargs=())
     curr_min=min_cutoff
     up=float('inf')
     while current_max_coverage<cutoff and cnt<max_round:
         t0 = time.time()
         print 'round ',cnt
         self.next_beam=[]
         if not len(curr_beam):
             curr_candidates=[[]]
         else:
             curr_candidates=[x[1].split('_') for x in curr_beam]#.keys()]
         all_args=[]
         test_epitopes=[x+[y] for x in curr_candidates for y in self.input_epitope_affinity.index if not y in x]
         for i in range(0,len(test_epitopes),bs):
             all_args.append((copy(test_epitopes[i:min(i+bs,len(test_epitopes))]),curr_min))
         #print len(test_epitopes),len(all_args)
         del test_epitopes
         r=pool.map_async(self.batch_scan,all_args,callback=self.batch_update)
         r.get()
         scores=[x[0] for x in self.next_beam]#[x[0][x[0]['count']>=curr_min]['freq'].sum() for x in self.next_beam]
         curr_beam=self.next_beam
         current_max_coverage=np.max(scores)
         beams.append((current_max_coverage,self.next_beam[np.argmax(scores)]))
         cnt+=1
         t1 = time.time()
         print('time passed: {}'.format(t1-t0))
     pool.close()
     return curr_beam,beams
示例#4
0
    def load_and_process_new_logs(self,filename = None):
        if self.data_source == 'file':
	    if self.n_mprocessing > 1 :
		error ("Multiprocessing on files is not supposeted yet");
            	pool = _ProcessPool(self.n_mprocessing);
            funclist = []
            reader = open(filename,'r')
            self.part = 0
            while True:
                nlines = [x.strip() for x in islice(reader, 10000)]
                if not nlines:
                    logging.debug("breaking " + filename + " date " )
                    break
                self.part = self.part + 1;
                
              #multiprocessing not working
		if self.n_mprocessing == 1 :
	         	self.process_frame(nlines, self.part, self.encapsulate_dest_path,filename)  # self.interval_tickList
		else:
              		f = pool.apply_async(self.process_frame,[nlines, self.part, self.encapsulate_dest_path, filename,])   
   			funclist.append(f)

     	    if self.n_mprocessing > 1:
           	 result = 0
           	 for f in funclist:
            	     result += f.get(timeout=100000) # timeout in 100000 seconds

           	 LOG.debug("There are {} rows of data in file {}".format(result, filename))
        elif self.data_source == 'stream':
                self.part = self.part + 1;
                self.process_frame(self.msg_list,self.part, self.encapsulate_dest_path, filename)
    def beam_search_parallel2(self,beam_size=20,cutoff=0.9,min_cutoff=5,\
                            max_round=20,curr_beam={},curr_min=0,nworker=20,bs=50,diverse_cut=3):
        print 'Using %d workers' % nworker
        current_max_coverage=curr_beam.items()[0][1] if len(curr_beam)>0 else 0.0
        beams=[]
        curr_length=len(curr_beam.items()[0][0].split('_')) if len(curr_beam)>0 else 0
        pool=pp._ProcessPool(processes=nworker,initializer=initializer,initargs=())
        outdir=join(self.outdir,'plots')
        print 'current beam: ',curr_beam.items()
        print 'current lower bound: ',curr_min
        while (current_max_coverage<cutoff or curr_min<min_cutoff) and curr_length<max_round:
            print 'beamsearch round ',curr_length
            t0 = time.time()
            self.next_beam=KthLargest(k=beam_size)
            if not len(curr_beam):
                curr_candidates=[[]]
            else:
                curr_candidates=[x.split('_') for x in curr_beam.keys()]
            all_args=[]
            print 'current candidates:',curr_candidates
            test_epitopes=[x+[y] for x in curr_candidates for y in self.input_epitope_affinity.index if diff1d(x,y,cut=diverse_cut)]
            for i in range(0,len(test_epitopes),bs):
                all_args.append((copy(test_epitopes[i:min(i+bs,len(test_epitopes))]),curr_min))
            print len(test_epitopes),len(all_args)
            del test_epitopes
            r=pool.map_async(self.batch_scan,all_args,callback=self.batch_update)
            r.get()
            curr_beam=self.next_beam.return_dict()
            current_max_coverage=curr_beam.items()[0][1]
            beams.append(curr_beam)
            # check histogram here
            curr_length=len(curr_beam.items()[0][0].split('_'))
            # for k,e in enumerate(curr_beam.items()):
            #     cover, hist_detail= self.overall_coverage(epitopes=e[0].split('_'), lower=curr_min,pre_map=True,verbose=True)
            #     plot_hist(hist_detail,' {:.2f} for lb={} #pept={}'.format(cover,curr_min,curr_length),join(outdir,'{}_{}_hist_{}.png'.format(curr_length,curr_min,k)))
            current_median=np.median([v[1] for v in curr_beam.items()])
            old_min=curr_min
            if current_median > cutoff:
                print 'median min_coverage of {} reached {}, raising min_coverage'.format(curr_min,current_median)
                curr_min+=1
            print 'current beam: ',curr_beam.items()
            print 'current lower bound: ',curr_min
            save_pickle(join(self.outdir,'beam_'+str(curr_length-1)+'.p'),curr_beam.items())
            save_beams(join(self.outdir,'beam_'+str(curr_length-1)),curr_beam,curr_min,old_min)
            t1 = time.time()
            print('time passed: {}'.format(t1-t0))

        pool.close()
        print 'Coverage cutoff reached, final solution:',curr_beam.items()[0]
        print 'Per region details:'
        details=self.overall_coverage(epitopes=curr_beam.items()[0][0].split('_'),lower=min_cutoff,pre_map=True,verbose=True)
        plot_hist(details[1],'final {:.2f} for lb={} #pept={}'.format(details[0],min_cutoff,curr_length),join(outdir,'final_hist.png'))
        return curr_beam.items()[0],details,beams
示例#6
0
 def beam_search_parallel(self,
                          beam_size=20,
                          cutoff=0.9,
                          max_round=20,
                          curr_beam={},
                          nworker=20,
                          diverse_cut=3):
     print 'Using %d workers' % nworker
     current_max_coverage = curr_beam.items(
     )[0][1] if len(curr_beam) > 0 else 0.0
     beams = []
     curr_length = len(
         curr_beam.items()[0][0].split('_')) if len(curr_beam) > 0 else 0
     pool = pp._ProcessPool(processes=nworker)
     while current_max_coverage < cutoff and curr_length < max_round:
         cnt = curr_length
         print 'beamsearch round ', curr_length
         t0 = time.time()
         self.next_beam = KthLargest(k=beam_size)
         if not len(curr_beam):
             curr_candidates = [[]]
         else:
             curr_candidates = [x.split('_') for x in curr_beam.keys()]
         print 'current candidates:', curr_candidates
         test_epitopes = [
             x + [y] for x in curr_candidates
             for y in self.input_epitope_affinity.index
             if diff1d(x, y, cut=diverse_cut)
         ]
         print len(test_epitopes)
         r = pool.map_async(self.batch_scan,
                            test_epitopes,
                            callback=self.batch_update)
         r.get()
         curr_beam = self.next_beam.return_dict()
         current_max_coverage = curr_beam.items()[0][1]
         beams.append(curr_beam)
         save_pickle(join(self.outdir, 'beam_' + str(cnt) + '.p'),
                     curr_beam.items())
         save_beams(join(self.outdir, 'beam_' + str(cnt)), curr_beam)
         print 'current beam: ', curr_beam.items()
         t1 = time.time()
         print('time passed: {}'.format(t1 - t0))
         curr_length = len(curr_beam.items()[0][0].split('_'))
     pool.close()
     print 'Coverage cutoff reached, final solution:', curr_beam.items()[0]
     print 'Per region details:'
     details = self.overall_coverage(
         epitopes=curr_beam.items()[0][0].split('_'))
     return curr_beam.items()[0], details, beams
示例#7
0
 def single_search(self,beam_size=1,cutoff=0.9,min_cutoff=3,max_round=20,nworker=20,bs=100,look=True):
     current_max_coverage=0.0
     beams=[]
     curr_beam=[]
     cnt=0
     pool=pp._ProcessPool(processes=nworker,initargs=())
     curr_min=1
     up=float('inf')
     while cnt<max_round:
         t0 = time.time()
         #print 'round ',cnt
         self.next_beam=[]
         if not len(curr_beam):
             curr_candidates=[[]]
         else:
             curr_candidates=[x[1].split('_') for x in curr_beam]#.keys()]
         all_args=[]
         test_epitopes=[x+[y] for x in curr_candidates for y in self.input_epitope_affinity.index if not y in x]
         for i in range(0,len(test_epitopes),bs):
             all_args.append((copy(test_epitopes[i:min(i+bs,len(test_epitopes))]),curr_min))
         del test_epitopes
         self.progress=0
         r=pool.map_async(self.batch_scan,all_args,callback=self.batch_update)
         r.get()
         scores=[x[0][x[0]['count']>=curr_min]['freq'].sum() for x in self.next_beam]
         topidx=np.argwhere(scores == np.amax(scores)).flatten()
         if look:
             tb_min=curr_min
             while len(topidx)>1 and tb_min<=min_cutoff:
                 self.next_beam=self.next_beam[list(topidx)]
                 tb_min+=1
                 scores=[x[0][x[0]['count']>=tb_min]['freq'].sum() for x in self.next_beam]
                 topidx=np.argwhere(scores == np.amax(scores)).flatten()
         curr_beam=[self.next_beam[np.random.choice(topidx,1)[0]]]
         beams.append(curr_beam[0])
         h=curr_beam[0][0]
         current_max_coverage=h[h['count']>=curr_min]['freq'].sum()
         if current_max_coverage >= cutoff:
             if curr_min==min_cutoff:
                 break
             else:
                 curr_min+=1
         cnt+=1
         t1 = time.time()
     pool.close()
     return curr_beam,beams
示例#8
0
def _batch_run(params, threads):
    jobs = len(params['iterable'])
    res_map = [None for _ in range(jobs)]
    true_threads = min((jobs // 2) + 1, threads)
    it_per_job = jobs // threads
    pool = _ProcessPool(processes=true_threads,
                        initializer=_pool_initializer,
                        initargs=([params]))
    for i, value in pool.imap_unordered(_batch_function,
                                        list(range(jobs)),
                                        chunksize=it_per_job):
        res_map[i] = value

    pool.close()
    pool.join()

    return res_map
示例#9
0
		def _corda_find_all_dependencies(reaction_list):
			res_map = {r:i for i,r in enumerate(reaction_list)}
			true_threads = min((len(reaction_list)//2)+1, threads)
			result = [None] * len(reaction_list)
			rx_per_job = len(reaction_list) // threads
			pool = _ProcessPool(
				processes=true_threads,
				initializer=_init_corda_worker,
				initargs=(self.corso_fba, constraint, constrainby, costfx, costbase, ntimes, 1e-6, self.lb)
			)
			for i, value in pool.imap_unordered(_corda_dependent_reactions_iteration, reaction_list,
												chunksize=rx_per_job):
				result[res_map[i]] = value

			pool.close()
			pool.join()

			return result
 def preprocess(self,test_epitopes,beam_size=20,cutoff=0.9,min_cutoff=5,\
                         max_round=20,curr_beam={},beams=[],curr_min=0,nworker=20,bs=50,diverse_cut=3):
     pool=pp._ProcessPool(processes=nworker,initializer=initializer,initargs=())
     all_args=[]
     for divide in range(10):
         bs=len(test_epitopes)//((divide+1)*nworker)+int(len(test_epitopes)%((divide+1)*nworker)>0)
         if bs<60:
             print 'using batch size =',bs
             break
     for i in range(0,len(test_epitopes),bs):
         all_args.append((copy(test_epitopes[i:min(i+bs,len(test_epitopes))]),beam_size,cutoff,min_cutoff,max_round,diverse_cut))
     print len(test_epitopes),len(all_args)
     del test_epitopes
     r=pool.map_async(self.batch_pre,all_args)
     results=r.get()
     final=pd.concat(results)
     final.columns=['compressed','allwindow','coverage']
     pool.close()
     return final
示例#11
0
def task_pool(model, task_components, task_fail_status, flux_constraints, task_added_reactions):
	threads = MP_THREADS
	task_list = list(task_components.keys())
	res_map = {r: i for i, r in enumerate(task_list)}
	true_threads = min((len(task_list) // 2) + 1, threads)
	it_per_job = len(task_list) // threads
	pool = _ProcessPool(
		processes=true_threads,
		initializer=_init_task_solver,
		initargs=(model, task_components, task_fail_status, flux_constraints, task_added_reactions)
	)
	for i, value in pool.imap_unordered(_task_iteration, task_list,
										chunksize=it_per_job):
		res_map[i] = value

	pool.close()
	pool.join()

	return res_map
示例#12
0
def optimization_pool(lsystem,
                      bound_change_list,
                      objective_coef_list,
                      objective_sense_list,
                      threads=MP_THREADS):
    res_map = [None for _ in range(len(bound_change_list))]
    true_threads = min((len(bound_change_list) // 2) + 1, threads)
    it_per_job = len(bound_change_list) // threads
    pool = _ProcessPool(processes=true_threads,
                        initializer=_pool_initializer,
                        initargs=(lsystem, bound_change_list,
                                  objective_coef_list, objective_sense_list))
    for i, value in pool.imap_unordered(_optimize_function,
                                        list(range(len(bound_change_list))),
                                        chunksize=it_per_job):
        res_map[i] = value

    pool.close()
    pool.join()
    return res_map
示例#13
0
    model = read_sbml_model(
        '/home/skapur/MEOCloud/Projectos/cobamp/examples/iAF1260_resources/original_model/Ec_iAF1260_flux2.xml'
    )
    mor = COBRAModelObjectReader(model)

    cbm_mp = mor.to_cobamp_cbm('CPLEX')
    cbm_fast = mor.to_cobamp_cbm('CPLEX')

    init_sol = cbm_mp.optimize({1004: 1}, False)
    Z0 = (1 - 1e-6) * init_sol.objective_value()
    cbm_mp.set_reaction_bounds(1004, lb=Z0)

    c1_time = time.time()

    pp = _ProcessPool(cpu_count())

    limits_mp = list(
        pp.map(cbm_mp.flux_limits, range(len(cbm_mp.reaction_names))))
    pp.close()
    pp.join()
    c2_time = time.time()
    print('Multi-threaded:', c2_time - c1_time, 'seconds')

    fva = FluxVariabilityAnalysis(cbm_fast.model)
    limits_fast = fva.run(1004, False)
    c3_time = time.time()
    print('Multi-threaded fast FVA:', c3_time - c2_time, 'seconds')

    error = 1e-6
    error_rx = []
示例#14
0
    def beam_search_parallel(self,beam_size=20,cutoff=0.9,min_cutoff=5,\
                            max_round=20,curr_beam={},curr_min=0,nworker=20,bs=50,diverse_cut=3,augment=False,upper=None,lower=None):
        print 'Using %d workers' % nworker
        #current_max_coverage=curr_beam.items()[0][1] if len(curr_beam)>0 else 0.0
        if len(curr_beam)>0:
            current_max_coverage=self.overall_coverage(epitopes=curr_beam.items()[0][0].split('_'),lower=curr_min,pre_map=True,verbose=False)
        else:
            current_max_coverage=self.overall_coverage(epitopes=[],lower=curr_min,pre_map=True,verbose=False)
        beams=[]
        curr_length=max([len(k[0].split('_')) for k in curr_beam.items()]) if len(curr_beam)>0 else 0
        #curr_length=len(curr_beam.items()[0][0].split('_')) if len(curr_beam)>0 else 0
        outdir=join(self.outdir,'plots')
        print 'current beam: ',curr_beam.items()
        print 'current lower bound: ',curr_min
        if augment:
            if curr_min in range(0,min_cutoff,4)+[min_cutoff]:
                iter_cutoff=max(upper['average'].iloc[curr_min]-0.01,(args.ratio*upper['average'].iloc[curr_min]+(1-args.ratio)*current_max_coverage))
            else:
                iter_cutoff=max(lower['average'].iloc[curr_min]+0.005,(args.ratio_low*max(current_max_coverage,lower['average'].iloc[curr_min])+(1-args.ratio_low)*upper['average'].iloc[curr_min]))
        else:
            if curr_min>0:
                    iter_cutoff=cutoff
            else:
                if args.initial_cut:
                    iter_cutoff=max(args.initial_cut,cutoff)
                else:
                    if args.type.split('_')[0][-1]=='1':
                        iter_cutoff=max(0.97,cutoff)
                    else:
                        iter_cutoff=max(0.93,cutoff)
        old_max=current_max_coverage
        while (current_max_coverage<iter_cutoff or curr_min<min_cutoff) and curr_length<max_round:
            pool=pp._ProcessPool(processes=nworker,initializer=initializer,initargs=())
            print 'beamsearch round ',curr_length,'current min:', curr_min, 'cutoff', iter_cutoff,'curr_max',current_max_coverage
            t0 = time.time()
            self.next_beam=KthLargest(k=beam_size)
            if not len(curr_beam):
                curr_candidates=[[]]
            else:
                curr_candidates=[x.split('_') for x in curr_beam.keys()]
            all_args=[]
            #print 'current candidates:',curr_candidates
            test_epitopes=[]
            test_epitopes_backup=[]
            seen_curr={}
            for c_keys in curr_candidates:
                ids=[x for x in self.input_epitope_affinity.index if diff1d(c_keys,x,cut=diverse_cut)]
                if len(ids)>0:
                    part=self.input_epitope_affinity.loc[ids].copy()
                    part['code']=part.apply(lambda x:''.join([str(k) for k in x]),axis=1)
                    for name, group in part.groupby('code'):
                        if len(group)>2 and curr_length>0:
                            group=group.sample(n=2)
                        test_epitopes.append((c_keys,group.index))
                else:
                    print "no more valid peptides, using current set"
                    test_epitopes_backup.append((c_keys,[[]]))
            if len(test_epitopes)==0:
                "Used up all peptides!"
                break
            test_epitopes=test_epitopes+test_epitopes_backup
            for divide in range(10):
                bs=len(test_epitopes)//((divide+1)*nworker)+int(len(test_epitopes)%((divide+1)*nworker)>0)
                if bs<80:
                    print 'using batch size =',bs
                    break
            #test_epitopes=[x+[y] for x in curr_candidates for y in self.input_epitope_affinity.index if diff1d(x,y,cut=diverse_cut)]
            for i in range(0,len(test_epitopes),bs):
                all_args.append((copy(test_epitopes[i:min(i+bs,len(test_epitopes))]),curr_min,beam_size))
            print len(test_epitopes),len(all_args)
            del test_epitopes
            r=pool.map_async(self.batch_scan,all_args,callback=self.batch_update)
            r.get()
            curr_beam=self.next_beam.return_dict()
            current_max_coverage=curr_beam.items()[0][1]
            beams.append(curr_beam)
            # check histogram here
            curr_length=max([len(k[0].split('_')) for k in curr_beam.items()])
            #curr_length=len(curr_beam.items()[0][0].split('_'))
            current_median=np.median([v[1] for v in curr_beam.items()])
            old_min=curr_min
            if current_median > iter_cutoff or current_max_coverage-old_max<1e-5:
                if current_max_coverage-old_max<1e-5:
                    print "converge, can not futher optimize!"
                else:
                    print 'median min_coverage of {} reached {}, raising min_coverage'.format(curr_min,current_median)
                curr_min+=1
                current_max_coverage=self.overall_coverage(epitopes=curr_beam.items()[0][0].split('_'),lower=curr_min,pre_map=True,verbose=False)
                if curr_min in range(0,min_cutoff,4)+[min_cutoff]:
                    iter_cutoff=max(upper['average'].iloc[curr_min]-0.01,(args.ratio*upper['average'].iloc[curr_min]+(1-args.ratio)*current_max_coverage))
                else:
                    iter_cutoff=max(lower['average'].iloc[curr_min]+0.01,(args.ratio_low*max(current_max_coverage,lower['average'].iloc[curr_min])+(1-args.ratio_low)*upper['average'].iloc[curr_min]))
            old_max=current_max_coverage
            print 'current beam: ',curr_beam.items()
            print 'current lower bound: ',curr_min,iter_cutoff,current_max_coverage
            save_pickle(join(self.outdir,'beam_'+str(curr_length-1)+'.p'),curr_beam.items())
            save_beams(join(self.outdir,'beam_'+str(curr_length-1)),curr_beam,curr_min,old_min)
            t1 = time.time()
            print('time passed: {}'.format(t1-t0))
            pool.close()

        print 'Coverage cutoff reached, final solution:',curr_beam.items()[0]
        print 'Per region details:'
        details=self.overall_coverage(epitopes=curr_beam.items()[0][0].split('_'),lower=min_cutoff,pre_map=True,verbose=True)
        result_hist=compute_probs(details[1])
        result_hist.to_csv(join(outdir,'best_result_histogram.csv'))
        np.savetxt(join(outdir,'final_sequences.txt'),curr_beam.items()[0][0].split('_'),fmt='%s')
        #plot_hist(details[1],'final {:.2f} for lb={} #pept={}'.format(details[0],min_cutoff,curr_length),join(outdir,'final_hist.png'))
        return curr_beam.items()[0],details,beams
示例#15
0
    def beam_search_parallel(self,beam_size=20,cutoff=0.9,min_cutoff=5,\
                            max_round=20,curr_beam={},curr_min=0,nworker=20,bs=50,diverse_cut=3):
        print 'Using %d workers' % nworker
        current_max_coverage = curr_beam.items(
        )[0][1] if len(curr_beam) > 0 else 0.0
        beams = []
        curr_length = len(
            curr_beam.items()[0][0].split('_')) if len(curr_beam) > 0 else 0
        outdir = join(self.outdir, 'plots')
        print 'current beam: ', curr_beam.items()
        print 'current lower bound: ', curr_min
        if curr_min > 0:
            iter_cutoff = cutoff
        else:
            if args.initial_cut:
                iter_cutoff = max(args.initial_cut, cutoff)
            else:
                if args.type.split('_')[0][-1] == '1':
                    iter_cutoff = max(0.97, cutoff)
                else:
                    iter_cutoff = max(0.93, cutoff)
        while (current_max_coverage < iter_cutoff
               or curr_min < min_cutoff) and curr_length < max_round:
            pool = pp._ProcessPool(processes=nworker,
                                   initializer=initializer,
                                   initargs=())
            print 'beamsearch round ', curr_length, 'cutoff', iter_cutoff
            t0 = time.time()
            self.next_beam = KthLargest(k=beam_size)
            if not len(curr_beam):
                curr_candidates = [[]]
            else:
                curr_candidates = [x.split('_') for x in curr_beam.keys()]
            all_args = []
            print 'current candidates:', curr_candidates
            test_epitopes = []
            seen_curr = {}
            for c_keys in curr_candidates:
                #                 counting=self.input_epitope_affinity.loc[c_keys].fillna(0.0).sum(axis=0)
                #                 curr_key=','.join(str(x) for x in counting.values)
                ids = [
                    x for x in self.input_epitope_affinity.index
                    if diff1d(c_keys, x, cut=diverse_cut)
                ]
                part = self.input_epitope_affinity.loc[ids].copy()
                part['code'] = part.apply(
                    lambda x: ''.join([str(k) for k in x]), axis=1)
                for name, group in part.groupby('code'):
                    if len(group) > 2 and curr_length > 0:
                        group = group.sample(n=2)
                    test_epitopes.append((c_keys, group.index))
            for divide in range(10):
                bs = len(test_epitopes) // ((divide + 1) * nworker) + int(
                    len(test_epitopes) % ((divide + 1) * nworker) > 0)
                if bs < 60:
                    print 'using batch size =', bs
                    break
            #test_epitopes=[x+[y] for x in curr_candidates for y in self.input_epitope_affinity.index if diff1d(x,y,cut=diverse_cut)]
            for i in range(0, len(test_epitopes), bs):
                all_args.append(
                    (copy(test_epitopes[i:min(i + bs, len(test_epitopes))]),
                     curr_min, beam_size))
            print len(test_epitopes), len(all_args)
            del test_epitopes
            r = pool.map_async(self.batch_scan,
                               all_args,
                               callback=self.batch_update)
            r.get()
            curr_beam = self.next_beam.return_dict()
            current_max_coverage = curr_beam.items()[0][1]
            beams.append(curr_beam)
            # check histogram here
            curr_length = len(curr_beam.items()[0][0].split('_'))
            # for k,e in enumerate(curr_beam.items()):
            #     cover, hist_detail= self.overall_coverage(epitopes=e[0].split('_'), lower=curr_min,pre_map=True,verbose=True)
            #     plot_hist(hist_detail,' {:.2f} for lb={} #pept={}'.format(cover,curr_min,curr_length),join(outdir,'{}_{}_hist_{}.png'.format(curr_length,curr_min,k)))
            current_median = np.median([v[1] for v in curr_beam.items()])
            old_min = curr_min
            if current_median > iter_cutoff:
                print 'median min_coverage of {} reached {}, raising min_coverage'.format(
                    curr_min, current_median)
                curr_min += 1
            print 'current beam: ', curr_beam.items()
            print 'current lower bound: ', curr_min
            save_pickle(
                join(self.outdir, 'beam_' + str(curr_length - 1) + '.p'),
                curr_beam.items())
            save_beams(join(self.outdir, 'beam_' + str(curr_length - 1)),
                       curr_beam, curr_min, old_min)
            t1 = time.time()
            print('time passed: {}'.format(t1 - t0))
            pool.close()
            if curr_min > 0:
                iter_cutoff = cutoff

        print 'Coverage cutoff reached, final solution:', curr_beam.items()[0]
        print 'Per region details:'
        details = self.overall_coverage(
            epitopes=curr_beam.items()[0][0].split('_'),
            lower=min_cutoff,
            pre_map=True,
            verbose=True)
        plot_hist(
            details[1],
            'final {:.2f} for lb={} #pept={}'.format(details[0], min_cutoff,
                                                     curr_length),
            join(outdir, 'final_hist.png'))
        return curr_beam.items()[0], details, beams