def do_proc_F2(dinp, doutp): "scan all rows of dinp, apply proc() and store into doutp" size = doutp.axis2.size #scan = min(dinp.size1, doutp.size1) # min() because no need to do extra work ! scan = dinp.size1 widgets = ['Processing F2: ', pg.Percentage(), ' ', pg.Bar(marker='-',left='[',right=']'), pg.ETA()] pbar= pg.ProgressBar(widgets=widgets, maxval=scan) #, fd=sys.stdout) print("dinp.axis1.itype ",dinp.axis1.itype) print("dinp.axis2.itype ",dinp.axis2.itype) print("doutp.axis1.itype ",doutp.axis1.itype) print("doutp.axis2.itype ",doutp.axis2.itype) print("dinp.axis1.size ",dinp.axis1.size) print("dinp.axis2.size ",dinp.axis2.size) print("doutp.axis1.size ",doutp.axis1.size) print("doutp.axis2.size ",doutp.axis2.size) print(doutp.report()) print(dir(doutp)) for i in xrange( scan): # if i%(scan/16) == 0: # print avancement # print "proc row %d / %d"%(i,scan) r = dinp.row(i) apod(r, size) r.rfft() doutp.set_row(i,r) pbar.update(i+1) pbar.finish()
def do_proc_F1_modu(dinp, doutp): "as do_proc_F1, but applies hypercomplex modulus() at the end" size = 2 * doutp.axis1.size scan = doutp.size2 # scan = doutp.size2 widgets = [ 'Processing F1 modu: ', pg.Percentage(), ' ', pg.Bar(marker='-', left='[', right=']'), pg.ETA() ] pbar = pg.ProgressBar(widgets=widgets, maxval=scan) #, fd=sys.stdout) for i in xrange(scan): # if i%(scan/16) == 0: # print avancement # print "proc col %d / %d"%(i,scan) d = FTICRData(buffer=np.zeros( (2 * doutp.size1, 2))) # 2 columns - used for hypercomplex modulus for off in (0, 1): p = dinp.col(2 * i + off) apod(p, size) p.rfft() d.set_col(off, p) d.axis1.itype = 1 d.axis2.itype = 1 d.modulus() doutp.set_col(i, d.col(0)) pbar.update(i + 1) pbar.finish()
def do_proc_F1_flip_modu(dinp, doutp, parameter, nproc=None): "as do_proc_F1, but applies flip and then complex modulus() at the end" size = 2*doutp.axis1.size scan = min(dinp.size2, doutp.size2) # scan = doutp.size2 widgets = ['Processing F1 flip-modu: ', pg.Percentage(), ' ', pg.Bar(marker='-',left='[',right=']'), pg.ETA()] pbar= pg.ProgressBar(widgets=widgets, maxval=scan) #, fd=sys.stdout) shift = doutp.axis1.mztoi( doutp.axis1.highmass ) # frequency shift in points, computed from location of highmass hshift = doutp.axis1.itoh(shift) # the same in Hz rot = dinp.axis1.mztoi( dinp.axis1.highmass ) # but correction is applied in the starting space print("LEFT_POINT", shift) doutp.axis1.left_point = shift doutp.axis1.specwidth += hshift # correction of specwidth if mpiutil.MPI_size>1: # MAD : KLUDGE nproc = 0 if nproc==None: print("doutp.axis1.itype ",doutp.axis1.itype) d = FTICRData( buffer=np.zeros((dinp.size1,2)) ) # 2 columns - used for hypercomplex modulus for i in xrange( scan ): for off in (0,1): p = dinp.col(2*i+off) d.set_col(off, p ) d.axis1.itype = 0 d.axis2.itype = 1 d.f1demodu(rot) p = d.col(0) apod(p, size) # avant ou apres ??? if parameter.do_rqrd: p.buffer = urQRd(p.buffer,parameter.rqrd_rank) #integrated rQRd p.rfft() p.modulus() #print "p.axis1.itype ", p.axis1.itype doutp.set_col(i,p) pbar.update(i+1) elif nproc >1: import multiprocessing as mp xarg = iterarg(dinp, rot, size, parameter.do_rqrd, parameter.rqrd_rank) pool = mp.Pool(nproc) res = pool.map(_do_proc_F1_flip_modu, xarg) for i,p in enumerate(res): doutp.set_col(i,p) pbar.update(i+1) elif nproc == 0: # 0 means MPI mpiutil.mprint('MPI NEW STYLE') xarg = iterarg(dinp, rot, size, parameter.do_rqrd, parameter.rqrd_rank) res = mpiutil.enum_imap(_do_proc_F1_flip_modu, xarg) # apply it for i,p in res: # and get results doutp.set_col(i,p) pbar.update(i+1) else: raise Exception("We have an internal problem n.1 here !") pbar.finish()
def do_proc_F1(dinp, doutp): "scan all cols of dinp, apply proc() and store into doutp" size = doutp.axis1.size scan = min(dinp.size2, doutp.size2) # min() because no need to do extra work ! widgets = ['Processing F1: ', pg.Percentage(), ' ', pg.Bar(marker='-',left='[',right=']'), pg.ETA()] pbar= pg.ProgressBar(widgets=widgets, maxval=scan) #, fd=sys.stdout) for i in xrange( scan): # if i%(scan/16) == 0: # print avancement # print "proc col %d / %d"%(i,scan) c = dinp.col(i) apod(c, size) c.rfft() doutp.set_col(i,c) pbar.update(i) pbar.finish()
def __init__(self, board_id, interface, debug): Bootloader.__init__(self, board_id, interface, debug) # create a progressbar to show the progress while programming self.progressbar = progressbar.ProgressBar(max=1.0, width=60)
def main(): """does the whole job, if we are running in MPI, this is only called by job #0 all other jobs are running mpi.slave() """ argv = sys.argv if len(argv) != 2: print(""" syntax is : (mpirun -np N) python program configfile.mscf """) sys.exit(1) # get parameters configfile = argv[1] cp = NPKConfigParser() cp.readfp(open(configfile)) infile = cp.getword("Cadzow", "namein") print("infile", infile) outfile = cp.getword("Cadzow", "nameout") print("outfile", outfile) algo = cp.getword("Cadzow", "algorithm") print("algorithm", algo) n_of_line = cp.getint("Cadzow", "n_of_lines", 70) print("n_of_line", n_of_line) n_of_iter = cp.getint("Cadzow", "n_of_iters", 1) print("n_of_iter", n_of_iter) orda = cp.getint("Cadzow", "order", 500) print("order", orda) n_of_column = cp.getint("Cadzow", "n_of_column", 100) print("n_of_column", n_of_column) progress = cp.getboolean("Cadzow", "progress", True) d0 = load_input(infile) d0.check2D() # raise error if not a 2D Set_Table_Param() hfar = HDF5File(outfile, "w", debug=0) # OUTFILE d1 = FTICRData(dim=2) # create dummy 2D copyaxes(d0, d1) # copy axes from d0 to d1 group = 'resol1' hfar.create_from_template(d1, group) # prepare index and method if n_of_column == 0: indexes = range(d0.size2) # process all else: indexes = selectcol(d0, n_of_column) # selections if algo == "Cadzow": meth = cadz elif algo == "rQRd": # meth = rqr else: raise ("wrong algo") # then loop t0 = time.time() if progress: widgets = [ 'Processing %s: ' % (algo), pg.Percentage(), ' ', pg.Bar(marker='-', left='[', right=']'), pg.ETA() ] pbar = pg.ProgressBar(widgets=widgets, maxval=len(indexes)) #, fd=sys.stdout) d1D = d0.col(0) # template xarg = iterarg(indexes, d0, n_of_line, n_of_iter, orda) if mpiutil.MPI_size > 1: # means we are running under MPI ! mpiutil.mprint('MPI Master job - starting slave jobs - ') res = mpiutil.enum_imap(meth, xarg) # apply it for i, p in res: # and get results d1D.buffer = p d1.set_col(indexes[i], d1D) if progress: pbar.update(i + 1) else: import itertools res = itertools.imap(meth, xarg) # apply it for i, p in enumerate(res): # and get results d1D.buffer = p d1.set_col(indexes[i], d1D) if progress: pbar.update(i + 1) print("Processing time : ", time.time() - t0)