def main(): def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror): if mirror: m = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0 - psi, 0, 0, 1.0) else: m = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0 - psi, 0, 0, 1.0) return alpha, sx, sy, m progname = os.path.basename(sys.argv[0]) usage = progname + " prj_stack --ave2D= --var2D= --ave3D= --var3D= --img_per_grp= --fl=0.2 --aa=0.1 --sym=symmetry --CTF" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--ave2D", type="string", default=False, help="write to the disk a stack of 2D averages") parser.add_option("--var2D", type="string", default=False, help="write to the disk a stack of 2D variances") parser.add_option("--ave3D", type="string", default=False, help="write to the disk reconstructed 3D average") parser.add_option("--var3D", type="string", default=False, help="compute 3D variability (time consuming!)") parser.add_option("--img_per_grp", type="int", default=10, help="number of neighbouring projections") parser.add_option("--no_norm", action="store_true", default=False, help="do not use normalization") parser.add_option("--radius", type="int", default=-1, help="radius for 3D variability") parser.add_option("--npad", type="int", default=2, help="number of time to pad the original images") parser.add_option("--sym", type="string", default="c1", help="symmetry") parser.add_option("--fl", type="float", default=0.0, help="stop-band frequency (Default - no filtration)") parser.add_option("--aa", type="float", default=0.0, help="fall off of the filter (Default - no filtration)") parser.add_option("--CTF", action="store_true", default=False, help="use CFT correction") parser.add_option("--VERBOSE", action="store_true", default=False, help="Long output for debugging") #parser.add_option("--MPI" , action="store_true", default=False, help="use MPI version") #parser.add_option("--radiuspca", type="int" , default=-1 , help="radius for PCA" ) #parser.add_option("--iter", type="int" , default=40 , help="maximum number of iterations (stop criterion of reconstruction process)" ) #parser.add_option("--abs", type="float" , default=0.0 , help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" ) #parser.add_option("--squ", type="float" , default=0.0 , help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" ) parser.add_option( "--VAR", action="store_true", default=False, help="stack on input consists of 2D variances (Default False)") parser.add_option( "--decimate", type="float", default=1.0, help="image decimate rate, a number large than 1. default is 1") parser.add_option( "--window", type="int", default=0, help= "reduce images to a small image size without changing pixel_size. Default value is zero." ) #parser.add_option("--SND", action="store_true", default=False, help="compute squared normalized differences (Default False)") parser.add_option( "--nvec", type="int", default=0, help="number of eigenvectors, default = 0 meaning no PCA calculated") parser.add_option( "--symmetrize", action="store_true", default=False, help="Prepare input stack for handling symmetry (Default False)") (options, args) = parser.parse_args() ##### from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX from applications import MPI_start_end from reconstruction import recons3d_em, recons3d_em_MPI from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI from utilities import print_begin_msg, print_end_msg, print_msg from utilities import read_text_row, get_image, get_im from utilities import bcast_EMData_to_all, bcast_number_to_all from utilities import get_symt # This is code for handling symmetries by the above program. To be incorporated. PAP 01/27/2015 from EMAN2db import db_open_dict # Set up global variables related to bdb cache if global_def.CACHE_DISABLE: from utilities import disable_bdb_cache disable_bdb_cache() # Set up global variables related to ERROR function global_def.BATCH = True # detect if program is running under MPI RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ if RUNNING_UNDER_MPI: global_def.MPI = True if options.symmetrize: if RUNNING_UNDER_MPI: try: sys.argv = mpi_init(len(sys.argv), sys.argv) try: number_of_proc = mpi_comm_size(MPI_COMM_WORLD) if (number_of_proc > 1): ERROR( "Cannot use more than one CPU for symmetry prepration", "sx3dvariability", 1) except: pass except: pass # Input #instack = "Clean_NORM_CTF_start_wparams.hdf" #instack = "bdb:data" instack = args[0] sym = options.sym if (sym == "c1"): ERROR("Thre is no need to symmetrize stack for C1 symmetry", "sx3dvariability", 1) if (instack[:4] != "bdb:"): stack = "bdb:data" delete_bdb(stack) junk = cmdexecute("sxcpy.py " + instack + " " + stack) else: stack = instack qt = EMUtil.get_all_attributes(stack, 'xform.projection') na = len(qt) ts = get_symt(sym) ks = len(ts) angsa = [None] * na for k in xrange(ks): delete_bdb("bdb:Q%1d" % k) junk = cmdexecute("e2bdb.py " + stack + " --makevstack=bdb:Q%1d" % k) DB = db_open_dict("bdb:Q%1d" % k) for i in xrange(na): ut = qt[i] * ts[k] DB.set_attr(i, "xform.projection", ut) #bt = ut.get_params("spider") #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]] #write_text_row(angsa, 'ptsma%1d.txt'%k) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) #junk = cmdexecute("sxheader.py bdb:Q%1d --params=xform.projection --import=ptsma%1d.txt"%(k,k)) DB.close() delete_bdb("bdb:sdata") junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q") #junk = cmdexecute("ls EMAN2DB/sdata*") a = get_im("bdb:sdata") a.set_attr("variabilitysymmetry", sym) a.write_image("bdb:sdata") else: sys.argv = mpi_init(len(sys.argv), sys.argv) myid = mpi_comm_rank(MPI_COMM_WORLD) number_of_proc = mpi_comm_size(MPI_COMM_WORLD) main_node = 0 if len(args) == 1: stack = args[0] else: print("usage: " + usage) print("Please run '" + progname + " -h' for detailed options") return 1 t0 = time() # obsolete flags options.MPI = True options.nvec = 0 options.radiuspca = -1 options.iter = 40 options.abs = 0.0 options.squ = 0.0 if options.fl > 0.0 and options.aa == 0.0: ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid) if options.VAR and options.SND: ERROR("Only one of var and SND can be set!", "sx3dvariability", myid) exit() if options.VAR and (options.ave2D or options.ave3D or options.var2D): ERROR( "When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid) exit() #if options.SND and (options.ave2D or options.ave3D): # ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid) # exit() if options.nvec > 0: ERROR("PCA option not implemented", "sx3dvariability", 1, myid) exit() if options.nvec > 0 and options.ave3D == None: ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", myid=myid) exit() import string options.sym = options.sym.lower() # if global_def.CACHE_DISABLE: # from utilities import disable_bdb_cache # disable_bdb_cache() # global_def.BATCH = True if myid == main_node: print_begin_msg("sx3dvariability") print_msg("%-70s: %s\n" % ("Input stack", stack)) img_per_grp = options.img_per_grp nvec = options.nvec radiuspca = options.radiuspca symbaselen = 0 if myid == main_node: nima = EMUtil.get_image_count(stack) img = get_image(stack) nx = img.get_xsize() ny = img.get_ysize() if options.sym != "c1": imgdata = get_im(stack) try: i = imgdata.get_attr("variabilitysymmetry") if (i != options.sym): ERROR( "The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", myid=myid) except: ERROR( "Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", myid=myid) from utilities import get_symt i = len(get_symt(options.sym)) if ((nima / i) * i != nima): ERROR( "The length of the input stack is incorrect for symmetry processing", "sx3dvariability", myid=myid) symbaselen = nima / i else: symbaselen = nima else: nima = 0 nx = 0 ny = 0 nima = bcast_number_to_all(nima) nx = bcast_number_to_all(nx) ny = bcast_number_to_all(ny) Tracker = {} Tracker["total_stack"] = nima if options.decimate == 1.: if options.window != 0: nx = options.window ny = options.window else: if options.window == 0: nx = int(nx / options.decimate) ny = int(ny / options.decimate) else: nx = int(options.window / options.decimate) ny = nx Tracker["nx"] = nx Tracker["ny"] = ny Tracker["nz"] = nx symbaselen = bcast_number_to_all(symbaselen) if radiuspca == -1: radiuspca = nx / 2 - 2 if myid == main_node: print_msg("%-70s: %d\n" % ("Number of projection", nima)) img_begin, img_end = MPI_start_end(nima, number_of_proc, myid) """ if options.SND: from projection import prep_vol, prgs from statistics import im_diff from utilities import get_im, model_circle, get_params_proj, set_params_proj from utilities import get_ctf, generate_ctf from filter import filt_ctf imgdata = EMData.read_images(stack, range(img_begin, img_end)) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) bcast_EMData_to_all(vol, myid) volft, kb = prep_vol(vol) mask = model_circle(nx/2-2, nx, ny) varList = [] for i in xrange(img_begin, img_end): phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin]) ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y]) if options.CTF: ctf_params = get_ctf(imgdata[i-img_begin]) ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params)) diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask) diff2 = diff*diff set_params_proj(diff2, [phi, theta, psi, s2x, s2y]) varList.append(diff2) mpi_barrier(MPI_COMM_WORLD) """ if options.VAR: #varList = EMData.read_images(stack, range(img_begin, img_end)) varList = [] this_image = EMData() for index_of_particle in xrange(img_begin, img_end): this_image.read_image(stack, index_of_particle) varList.append( image_decimate_window_xform_ctf(this_image, options.decimate, options.window, options.CTF)) else: from utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData from utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2 from utilities import model_blank, nearest_proj, model_circle from applications import pca from statistics import avgvar, avgvar_ctf, ccc from filter import filt_tanl from morphology import threshold, square_root from projection import project, prep_vol, prgs from sets import Set if myid == main_node: t1 = time() proj_angles = [] aveList = [] tab = EMUtil.get_all_attributes(stack, 'xform.projection') for i in xrange(nima): t = tab[i].get_params('spider') phi = t['phi'] theta = t['theta'] psi = t['psi'] x = theta if x > 90.0: x = 180.0 - x x = x * 10000 + psi proj_angles.append([x, t['phi'], t['theta'], t['psi'], i]) t2 = time() print_msg("%-70s: %d\n" % ("Number of neighboring projections", img_per_grp)) print_msg("...... Finding neighboring projections\n") if options.VERBOSE: print "Number of images per group: ", img_per_grp print "Now grouping projections" proj_angles.sort() proj_angles_list = [0.0] * (nima * 4) if myid == main_node: for i in xrange(nima): proj_angles_list[i * 4] = proj_angles[i][1] proj_angles_list[i * 4 + 1] = proj_angles[i][2] proj_angles_list[i * 4 + 2] = proj_angles[i][3] proj_angles_list[i * 4 + 3] = proj_angles[i][4] proj_angles_list = bcast_list_to_all(proj_angles_list, myid, main_node) proj_angles = [] for i in xrange(nima): proj_angles.append([ proj_angles_list[i * 4], proj_angles_list[i * 4 + 1], proj_angles_list[i * 4 + 2], int(proj_angles_list[i * 4 + 3]) ]) del proj_angles_list proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end)) all_proj = Set() for im in proj_list: for jm in im: all_proj.add(proj_angles[jm][3]) all_proj = list(all_proj) if options.VERBOSE: print "On node %2d, number of images needed to be read = %5d" % ( myid, len(all_proj)) index = {} for i in xrange(len(all_proj)): index[all_proj[i]] = i mpi_barrier(MPI_COMM_WORLD) if myid == main_node: print_msg("%-70s: %.2f\n" % ("Finding neighboring projections lasted [s]", time() - t2)) print_msg("%-70s: %d\n" % ("Number of groups processed on the main node", len(proj_list))) if options.VERBOSE: print "Grouping projections took: ", (time() - t2) / 60, "[min]" print "Number of groups on main node: ", len(proj_list) mpi_barrier(MPI_COMM_WORLD) if myid == main_node: print_msg("...... calculating the stack of 2D variances \n") if options.VERBOSE: print "Now calculating the stack of 2D variances" proj_params = [0.0] * (nima * 5) aveList = [] varList = [] if nvec > 0: eigList = [[] for i in xrange(nvec)] if options.VERBOSE: print "Begin to read images on processor %d" % (myid) ttt = time() #imgdata = EMData.read_images(stack, all_proj) imgdata = [] for index_of_proj in xrange(len(all_proj)): img = EMData() img.read_image(stack, all_proj[index_of_proj]) dmg = image_decimate_window_xform_ctf(img, options.decimate, options.window, options.CTF) #print dmg.get_xsize(), "init" imgdata.append(dmg) if options.VERBOSE: print "Reading images on processor %d done, time = %.2f" % ( myid, time() - ttt) print "On processor %d, we got %d images" % (myid, len(imgdata)) mpi_barrier(MPI_COMM_WORLD) ''' imgdata2 = EMData.read_images(stack, range(img_begin, img_end)) if options.fl > 0.0: for k in xrange(len(imgdata2)): imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) if myid == main_node: vol.write_image("vol_ctf.hdf") print_msg("Writing to the disk volume reconstructed from averages as : %s\n"%("vol_ctf.hdf")) del vol, imgdata2 mpi_barrier(MPI_COMM_WORLD) ''' from applications import prepare_2d_forPCA from utilities import model_blank for i in xrange(len(proj_list)): ki = proj_angles[proj_list[i][0]][3] if ki >= symbaselen: continue mi = index[ki] phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi]) grp_imgdata = [] for j in xrange(img_per_grp): mj = index[proj_angles[proj_list[i][j]][3]] phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj]) alpha, sx, sy, mirror = params_3D_2D_NEW( phi, theta, psi, s2x, s2y, mirror_list[i][j]) if thetaM <= 90: if mirror == 0: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, phiM - phi, 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, 180 - (phiM - phi), 0.0, 0.0, 1.0) else: if mirror == 0: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, -(phiM - phi), 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, -(180 - (phiM - phi)), 0.0, 0.0, 1.0) set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0]) grp_imgdata.append(imgdata[mj]) #print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize() if not options.no_norm: #print grp_imgdata[j].get_xsize() mask = model_circle(nx / 2 - 2, nx, nx) for k in xrange(img_per_grp): ave, std, minn, maxx = Util.infomask( grp_imgdata[k], mask, False) grp_imgdata[k] -= ave grp_imgdata[k] /= std del mask if options.fl > 0.0: from filter import filt_ctf, filt_table from fundamentals import fft, window2d nx2 = 2 * nx ny2 = 2 * ny if options.CTF: from utilities import pad for k in xrange(img_per_grp): grp_imgdata[k] = window2d( fft( filt_tanl( filt_ctf( fft( pad(grp_imgdata[k], nx2, ny2, 1, 0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa)), nx, ny) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) else: for k in xrange(img_per_grp): grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) else: from utilities import pad, read_text_file from filter import filt_ctf, filt_table from fundamentals import fft, window2d nx2 = 2 * nx ny2 = 2 * ny if options.CTF: from utilities import pad for k in xrange(img_per_grp): grp_imgdata[k] = window2d( fft( filt_ctf(fft( pad(grp_imgdata[k], nx2, ny2, 1, 0.0)), grp_imgdata[k].get_attr("ctf"), binary=1)), nx, ny) #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny) #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) ''' if i < 10 and myid == main_node: for k in xrange(10): grp_imgdata[k].write_image("grp%03d.hdf"%i, k) ''' """ if myid == main_node and i==0: for pp in xrange(len(grp_imgdata)): grp_imgdata[pp].write_image("pp.hdf", pp) """ ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata) """ if myid == main_node and i==0: for pp in xrange(len(grp_imgdata)): grp_imgdata[pp].write_image("qq.hdf", pp) """ var = model_blank(nx, ny) for q in grp_imgdata: Util.add_img2(var, q) Util.mul_scalar(var, 1.0 / (len(grp_imgdata) - 1)) # Switch to std dev var = square_root(threshold(var)) #if options.CTF: ave, var = avgvar_ctf(grp_imgdata, mode="a") #else: ave, var = avgvar(grp_imgdata, mode="a") """ if myid == main_node: ave.write_image("avgv.hdf",i) var.write_image("varv.hdf",i) """ set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0]) set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0]) aveList.append(ave) varList.append(var) if options.VERBOSE: print "%5.2f%% done on processor %d" % ( i * 100.0 / len(proj_list), myid) if nvec > 0: eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True) for k in xrange(nvec): set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0]) eigList[k].append(eig[k]) """ if myid == 0 and i == 0: for k in xrange(nvec): eig[k].write_image("eig.hdf", k) """ del imgdata # To this point, all averages, variances, and eigenvectors are computed if options.ave2D: from fundamentals import fpol if myid == main_node: km = 0 for i in xrange(number_of_proc): if i == main_node: for im in xrange(len(aveList)): aveList[im].write_image(options.ave2D, km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in xrange(nl): ave = recv_EMData(i, im + i + 70000) """ nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nm = int(nm[0]) members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('members', map(int, members)) members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('pix_err', map(float, members)) members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('refprojdir', map(float, members)) """ tmpvol = fpol(ave, Tracker["nx"], Tracker["nx"], 1) tmpvol.write_image(options.ave2D, km) km += 1 else: mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in xrange(len(aveList)): send_EMData(aveList[im], main_node, im + myid + 70000) """ members = aveList[im].get_attr('members') mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) members = aveList[im].get_attr('pix_err') mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) try: members = aveList[im].get_attr('refprojdir') mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) except: mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) """ if options.ave3D: from fundamentals import fpol if options.VERBOSE: print "Reconstructing 3D average volume" ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(ave3D, myid) if myid == main_node: ave3D = fpol(ave3D, Tracker["nx"], Tracker["nx"], Tracker["nx"]) ave3D.write_image(options.ave3D) print_msg("%-70s: %s\n" % ( "Writing to the disk volume reconstructed from averages as", options.ave3D)) del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList if nvec > 0: for k in xrange(nvec): if options.VERBOSE: print "Reconstruction eigenvolumes", k cont = True ITER = 0 mask2d = model_circle(radiuspca, nx, nx) while cont: #print "On node %d, iteration %d"%(myid, ITER) eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(eig3D, myid, main_node) if options.fl > 0.0: eig3D = filt_tanl(eig3D, options.fl, options.aa) if myid == main_node: eig3D.write_image("eig3d_%03d.hdf" % k, ITER) Util.mul_img(eig3D, model_circle(radiuspca, nx, nx, nx)) eig3Df, kb = prep_vol(eig3D) del eig3D cont = False icont = 0 for l in xrange(len(eigList[k])): phi, theta, psi, s2x, s2y = get_params_proj( eigList[k][l]) proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y]) cl = ccc(proj, eigList[k][l], mask2d) if cl < 0.0: icont += 1 cont = True eigList[k][l] *= -1.0 u = int(cont) u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD) icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if myid == main_node: u = int(u[0]) print " Eigenvector: ", k, " number changed ", int( icont[0]) else: u = 0 u = bcast_number_to_all(u, main_node) cont = bool(u) ITER += 1 del eig3Df, kb mpi_barrier(MPI_COMM_WORLD) del eigList, mask2d if options.ave3D: del ave3D if options.var2D: from fundamentals import fpol if myid == main_node: km = 0 for i in xrange(number_of_proc): if i == main_node: for im in xrange(len(varList)): tmpvol = fpol(varList[im], Tracker["nx"], Tracker["nx"], 1) tmpvol.write_image(options.var2D, km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in xrange(nl): ave = recv_EMData(i, im + i + 70000) tmpvol = fpol(ave, Tracker["nx"], Tracker["nx"], 1) tmpvol.write_image(options.var2D, km) km += 1 else: mpi_send(len(varList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in xrange(len(varList)): send_EMData(varList[im], main_node, im + myid + 70000) # What with the attributes?? mpi_barrier(MPI_COMM_WORLD) if options.var3D: if myid == main_node and options.VERBOSE: print "Reconstructing 3D variability volume" t6 = time() radiusvar = options.radius if (radiusvar < 0): radiusvar = nx // 2 - 3 res = recons3d_4nn_MPI(myid, varList, symmetry=options.sym, npad=options.npad) #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ) if myid == main_node: from fundamentals import fpol res = fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"]) res.write_image(options.var3D) if myid == main_node: print_msg( "%-70s: %.2f\n" % ("Reconstructing 3D variability took [s]", time() - t6)) if options.VERBOSE: print "Reconstruction took: %.2f [min]" % ( (time() - t6) / 60) if myid == main_node: print_msg( "%-70s: %.2f\n" % ("Total time for these computations [s]", time() - t0)) if options.VERBOSE: print "Total time for these computations: %.2f [min]" % ( (time() - t0) / 60) print_end_msg("sx3dvariability") from mpi import mpi_finalize mpi_finalize() if RUNNING_UNDER_MPI: global_def.MPI = False global_def.BATCH = False
def main(): def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror): # the final ali2d parameters already combine shifts operation first and rotation operation second for parameters converted from 3D if mirror: m = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0) else: m = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0) return alpha, sx, sy, m progname = os.path.basename(sys.argv[0]) usage = progname + " prj_stack --ave2D= --var2D= --ave3D= --var3D= --img_per_grp= --fl= --aa= --sym=symmetry --CTF" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--output_dir", type="string" , default="./", help="Output directory") parser.add_option("--ave2D", type="string" , default=False, help="Write to the disk a stack of 2D averages") parser.add_option("--var2D", type="string" , default=False, help="Write to the disk a stack of 2D variances") parser.add_option("--ave3D", type="string" , default=False, help="Write to the disk reconstructed 3D average") parser.add_option("--var3D", type="string" , default=False, help="Compute 3D variability (time consuming!)") parser.add_option("--img_per_grp", type="int" , default=100, help="Number of neighbouring projections.(Default is 100)") parser.add_option("--no_norm", action="store_true", default=False, help="Do not use normalization.(Default is to apply normalization)") #parser.add_option("--radius", type="int" , default=-1 , help="radius for 3D variability" ) parser.add_option("--npad", type="int" , default=2 , help="Number of time to pad the original images.(Default is 2 times padding)") parser.add_option("--sym" , type="string" , default="c1", help="Symmetry. (Default is no symmetry)") parser.add_option("--fl", type="float" , default=0.0, help="Low pass filter cutoff in absolute frequency (0.0 - 0.5) and is applied to decimated images. (Default - no filtration)") parser.add_option("--aa", type="float" , default=0.02 , help="Fall off of the filter. Use default value if user has no clue about falloff (Default value is 0.02)") parser.add_option("--CTF", action="store_true", default=False, help="Use CFT correction.(Default is no CTF correction)") #parser.add_option("--MPI" , action="store_true", default=False, help="use MPI version") #parser.add_option("--radiuspca", type="int" , default=-1 , help="radius for PCA" ) #parser.add_option("--iter", type="int" , default=40 , help="maximum number of iterations (stop criterion of reconstruction process)" ) #parser.add_option("--abs", type="float" , default=0.0 , help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" ) #parser.add_option("--squ", type="float" , default=0.0 , help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" ) parser.add_option("--VAR" , action="store_true", default=False, help="Stack of input consists of 2D variances (Default False)") parser.add_option("--decimate", type ="float", default=0.25, help="Image decimate rate, a number less than 1. (Default is 0.25)") parser.add_option("--window", type ="int", default=0, help="Target image size relative to original image size. (Default value is zero.)") #parser.add_option("--SND", action="store_true", default=False, help="compute squared normalized differences (Default False)") #parser.add_option("--nvec", type="int" , default=0 , help="Number of eigenvectors, (Default = 0 meaning no PCA calculated)") parser.add_option("--symmetrize", action="store_true", default=False, help="Prepare input stack for handling symmetry (Default False)") parser.add_option("--overhead", type ="float", default=0.5, help="python overhead per CPU.") (options,args) = parser.parse_args() ##### from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX #from mpi import * from applications import MPI_start_end from reconstruction import recons3d_em, recons3d_em_MPI from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI from utilities import print_begin_msg, print_end_msg, print_msg from utilities import read_text_row, get_image, get_im, wrap_mpi_send, wrap_mpi_recv from utilities import bcast_EMData_to_all, bcast_number_to_all from utilities import get_symt # This is code for handling symmetries by the above program. To be incorporated. PAP 01/27/2015 from EMAN2db import db_open_dict # Set up global variables related to bdb cache if global_def.CACHE_DISABLE: from utilities import disable_bdb_cache disable_bdb_cache() # Set up global variables related to ERROR function global_def.BATCH = True # detect if program is running under MPI RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ if RUNNING_UNDER_MPI: global_def.MPI = True if options.output_dir =="./": current_output_dir = os.path.abspath(options.output_dir) else: current_output_dir = options.output_dir if options.symmetrize : if RUNNING_UNDER_MPI: try: sys.argv = mpi_init(len(sys.argv), sys.argv) try: number_of_proc = mpi_comm_size(MPI_COMM_WORLD) if( number_of_proc > 1 ): ERROR("Cannot use more than one CPU for symmetry preparation","sx3dvariability",1) except: pass except: pass if not os.path.exists(current_output_dir): os.mkdir(current_output_dir) # Input #instack = "Clean_NORM_CTF_start_wparams.hdf" #instack = "bdb:data" from logger import Logger,BaseLogger_Files if os.path.exists(os.path.join(current_output_dir, "log.txt")): os.remove(os.path.join(current_output_dir, "log.txt")) log_main=Logger(BaseLogger_Files()) log_main.prefix = os.path.join(current_output_dir, "./") instack = args[0] sym = options.sym.lower() if( sym == "c1" ): ERROR("There is no need to symmetrize stack for C1 symmetry","sx3dvariability",1) line ="" for a in sys.argv: line +=" "+a log_main.add(line) if(instack[:4] !="bdb:"): #if output_dir =="./": stack = "bdb:data" stack = "bdb:"+current_output_dir+"/data" delete_bdb(stack) junk = cmdexecute("sxcpy.py "+instack+" "+stack) else: stack = instack qt = EMUtil.get_all_attributes(stack,'xform.projection') na = len(qt) ts = get_symt(sym) ks = len(ts) angsa = [None]*na for k in range(ks): #Qfile = "Q%1d"%k #if options.output_dir!="./": Qfile = os.path.join(options.output_dir,"Q%1d"%k) Qfile = os.path.join(current_output_dir, "Q%1d"%k) #delete_bdb("bdb:Q%1d"%k) delete_bdb("bdb:"+Qfile) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:"+Qfile) #DB = db_open_dict("bdb:Q%1d"%k) DB = db_open_dict("bdb:"+Qfile) for i in range(na): ut = qt[i]*ts[k] DB.set_attr(i, "xform.projection", ut) #bt = ut.get_params("spider") #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]] #write_text_row(angsa, 'ptsma%1d.txt'%k) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) #junk = cmdexecute("sxheader.py bdb:Q%1d --params=xform.projection --import=ptsma%1d.txt"%(k,k)) DB.close() #if options.output_dir =="./": delete_bdb("bdb:sdata") delete_bdb("bdb:" + current_output_dir + "/"+"sdata") #junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q") sdata = "bdb:"+current_output_dir+"/"+"sdata" print(sdata) junk = cmdexecute("e2bdb.py " + current_output_dir +" --makevstack="+sdata +" --filt=Q") #junk = cmdexecute("ls EMAN2DB/sdata*") #a = get_im("bdb:sdata") a = get_im(sdata) a.set_attr("variabilitysymmetry",sym) #a.write_image("bdb:sdata") a.write_image(sdata) else: from fundamentals import window2d sys.argv = mpi_init(len(sys.argv), sys.argv) myid = mpi_comm_rank(MPI_COMM_WORLD) number_of_proc = mpi_comm_size(MPI_COMM_WORLD) main_node = 0 shared_comm = mpi_comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL) myid_on_node = mpi_comm_rank(shared_comm) no_of_processes_per_group = mpi_comm_size(shared_comm) masters_from_groups_vs_everything_else_comm = mpi_comm_split(MPI_COMM_WORLD, main_node == myid_on_node, myid_on_node) color, no_of_groups, balanced_processor_load_on_nodes = get_colors_and_subsets(main_node, MPI_COMM_WORLD, myid, \ shared_comm, myid_on_node, masters_from_groups_vs_everything_else_comm) overhead_loading = options.overhead*number_of_proc #memory_per_node = options.memory_per_node #if memory_per_node == -1.: memory_per_node = 2.*no_of_processes_per_group keepgoing = 1 current_window = options.window current_decimate = options.decimate if len(args) == 1: stack = args[0] else: print(( "usage: " + usage)) print(( "Please run '" + progname + " -h' for detailed options")) return 1 t0 = time() # obsolete flags options.MPI = True #options.nvec = 0 options.radiuspca = -1 options.iter = 40 options.abs = 0.0 options.squ = 0.0 if options.fl > 0.0 and options.aa == 0.0: ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid) #if options.VAR and options.SND: # ERROR("Only one of var and SND can be set!", "sx3dvariability", myid) if options.VAR and (options.ave2D or options.ave3D or options.var2D): ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid) #if options.SND and (options.ave2D or options.ave3D): # ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid) #if options.nvec > 0 : # ERROR("PCA option not implemented", "sx3dvariability", 1, myid) #if options.nvec > 0 and options.ave3D == None: # ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", 1, myid) if current_decimate>1.0 or current_decimate<0.0: ERROR("Decimate rate should be a value between 0.0 and 1.0", "sx3dvariability", 1, myid) if current_window < 0.0: ERROR("Target window size should be always larger than zero", "sx3dvariability", 1, myid) if myid == main_node: img = get_image(stack, 0) nx = img.get_xsize() ny = img.get_ysize() if(min(nx, ny) < current_window): keepgoing = 0 keepgoing = bcast_number_to_all(keepgoing, main_node, MPI_COMM_WORLD) if keepgoing == 0: ERROR("The target window size cannot be larger than the size of decimated image", "sx3dvariability", 1, myid) import string options.sym = options.sym.lower() # if global_def.CACHE_DISABLE: # from utilities import disable_bdb_cache # disable_bdb_cache() # global_def.BATCH = True if myid == main_node: if not os.path.exists(current_output_dir): os.mkdir(current_output_dir)# Never delete output_dir in the program! img_per_grp = options.img_per_grp #nvec = options.nvec radiuspca = options.radiuspca from logger import Logger,BaseLogger_Files #if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt")) log_main=Logger(BaseLogger_Files()) log_main.prefix = os.path.join(current_output_dir, "./") if myid == main_node: line = "" for a in sys.argv: line +=" "+a log_main.add(line) log_main.add("-------->>>Settings given by all options<<<-------") log_main.add("Symmetry : %s"%options.sym) log_main.add("Input stack : %s"%stack) log_main.add("Output_dir : %s"%current_output_dir) if options.ave3D: log_main.add("Ave3d : %s"%options.ave3D) if options.var3D: log_main.add("Var3d : %s"%options.var3D) if options.ave2D: log_main.add("Ave2D : %s"%options.ave2D) if options.var2D: log_main.add("Var2D : %s"%options.var2D) if options.VAR: log_main.add("VAR : True") else: log_main.add("VAR : False") if options.CTF: log_main.add("CTF correction : True ") else: log_main.add("CTF correction : False ") log_main.add("Image per group : %5d"%options.img_per_grp) log_main.add("Image decimate rate : %4.3f"%current_decimate) log_main.add("Low pass filter : %4.3f"%options.fl) current_fl = options.fl if current_fl == 0.0: current_fl = 0.5 log_main.add("Current low pass filter is equivalent to cutoff frequency %4.3f for original image size"%round((current_fl*current_decimate),3)) log_main.add("Window size : %5d "%current_window) log_main.add("sx3dvariability begins") symbaselen = 0 if myid == main_node: nima = EMUtil.get_image_count(stack) img = get_image(stack) nx = img.get_xsize() ny = img.get_ysize() nnxo = nx nnyo = ny if options.sym != "c1" : imgdata = get_im(stack) try: i = imgdata.get_attr("variabilitysymmetry").lower() if(i != options.sym): ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", 1, myid) except: ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", 1, myid) from utilities import get_symt i = len(get_symt(options.sym)) if((nima/i)*i != nima): ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", 1, myid) symbaselen = nima/i else: symbaselen = nima else: nima = 0 nx = 0 ny = 0 nnxo = 0 nnyo = 0 nima = bcast_number_to_all(nima) nx = bcast_number_to_all(nx) ny = bcast_number_to_all(ny) nnxo = bcast_number_to_all(nnxo) nnyo = bcast_number_to_all(nnyo) if current_window > max(nx, ny): ERROR("Window size is larger than the original image size", "sx3dvariability", 1) if current_decimate == 1.: if current_window !=0: nx = current_window ny = current_window else: if current_window == 0: nx = int(nx*current_decimate+0.5) ny = int(ny*current_decimate+0.5) else: nx = int(current_window*current_decimate+0.5) ny = nx symbaselen = bcast_number_to_all(symbaselen) # check FFT prime number from fundamentals import smallprime is_fft_friendly = (nx == smallprime(nx)) if not is_fft_friendly: if myid == main_node: log_main.add("The target image size is not a product of small prime numbers") log_main.add("Program adjusts the input settings!") ### two cases if current_decimate == 1.: nx = smallprime(nx) ny = nx current_window = nx # update if myid == main_node: log_main.add("The window size is updated to %d."%current_window) else: if current_window == 0: nx = smallprime(int(nx*current_decimate+0.5)) current_decimate = float(nx)/nnxo ny = nx if (myid == main_node): log_main.add("The decimate rate is updated to %f."%current_decimate) else: nx = smallprime(int(current_window*current_decimate+0.5)) ny = nx current_window = int(nx/current_decimate+0.5) if (myid == main_node): log_main.add("The window size is updated to %d."%current_window) if myid == main_node: log_main.add("The target image size is %d"%nx) if radiuspca == -1: radiuspca = nx/2-2 if myid == main_node: log_main.add("%-70s: %d\n"%("Number of projection", nima)) img_begin, img_end = MPI_start_end(nima, number_of_proc, myid) """ if options.SND: from projection import prep_vol, prgs from statistics import im_diff from utilities import get_im, model_circle, get_params_proj, set_params_proj from utilities import get_ctf, generate_ctf from filter import filt_ctf imgdata = EMData.read_images(stack, range(img_begin, img_end)) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) bcast_EMData_to_all(vol, myid) volft, kb = prep_vol(vol) mask = model_circle(nx/2-2, nx, ny) varList = [] for i in xrange(img_begin, img_end): phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin]) ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y]) if options.CTF: ctf_params = get_ctf(imgdata[i-img_begin]) ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params)) diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask) diff2 = diff*diff set_params_proj(diff2, [phi, theta, psi, s2x, s2y]) varList.append(diff2) mpi_barrier(MPI_COMM_WORLD) """ if options.VAR: # 2D variance images have no shifts #varList = EMData.read_images(stack, range(img_begin, img_end)) from EMAN2 import Region for index_of_particle in range(img_begin,img_end): image = get_im(stack, index_of_proj) if current_window > 0: varList.append(fdecimate(window2d(image,current_window,current_window), nx,ny)) else: varList.append(fdecimate(image, nx,ny)) else: from utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData from utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2 from utilities import model_blank, nearest_proj, model_circle, write_text_row, wrap_mpi_gatherv from applications import pca from statistics import avgvar, avgvar_ctf, ccc from filter import filt_tanl from morphology import threshold, square_root from projection import project, prep_vol, prgs from sets import Set from utilities import wrap_mpi_recv, wrap_mpi_bcast, wrap_mpi_send import numpy as np if myid == main_node: t1 = time() proj_angles = [] aveList = [] tab = EMUtil.get_all_attributes(stack, 'xform.projection') for i in range(nima): t = tab[i].get_params('spider') phi = t['phi'] theta = t['theta'] psi = t['psi'] x = theta if x > 90.0: x = 180.0 - x x = x*10000+psi proj_angles.append([x, t['phi'], t['theta'], t['psi'], i]) t2 = time() log_main.add( "%-70s: %d\n"%("Number of neighboring projections", img_per_grp)) log_main.add("...... Finding neighboring projections\n") log_main.add( "Number of images per group: %d"%img_per_grp) log_main.add( "Now grouping projections") proj_angles.sort() proj_angles_list = np.full((nima, 4), 0.0, dtype=np.float32) for i in range(nima): proj_angles_list[i][0] = proj_angles[i][1] proj_angles_list[i][1] = proj_angles[i][2] proj_angles_list[i][2] = proj_angles[i][3] proj_angles_list[i][3] = proj_angles[i][4] else: proj_angles_list = 0 proj_angles_list = wrap_mpi_bcast(proj_angles_list, main_node, MPI_COMM_WORLD) proj_angles = [] for i in range(nima): proj_angles.append([proj_angles_list[i][0], proj_angles_list[i][1], proj_angles_list[i][2], int(proj_angles_list[i][3])]) del proj_angles_list proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end)) all_proj = Set() for im in proj_list: for jm in im: all_proj.add(proj_angles[jm][3]) all_proj = list(all_proj) index = {} for i in range(len(all_proj)): index[all_proj[i]] = i mpi_barrier(MPI_COMM_WORLD) if myid == main_node: log_main.add("%-70s: %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2)) log_main.add("%-70s: %d\n"%("Number of groups processed on the main node", len(proj_list))) log_main.add("Grouping projections took: %12.1f [m]"%((time()-t2)/60.)) log_main.add("Number of groups on main node: ", len(proj_list)) mpi_barrier(MPI_COMM_WORLD) if myid == main_node: log_main.add("...... Calculating the stack of 2D variances \n") # Memory estimation. There are two memory consumption peaks # peak 1. Compute ave, var; # peak 2. Var volume reconstruction; # proj_params = [0.0]*(nima*5) aveList = [] varList = [] #if nvec > 0: eigList = [[] for i in range(nvec)] dnumber = len(all_proj)# all neighborhood set for assigned to myid pnumber = len(proj_list)*2. + img_per_grp # aveList and varList tnumber = dnumber+pnumber vol_size2 = nx**3*4.*8/1.e9 vol_size1 = 2.*nnxo**3*4.*8/1.e9 proj_size = nnxo*nnyo*len(proj_list)*4.*2./1.e9 # both aveList and varList orig_data_size = nnxo*nnyo*4.*tnumber/1.e9 reduced_data_size = nx*nx*4.*tnumber/1.e9 full_data = np.full((number_of_proc, 2), -1., dtype=np.float16) full_data[myid] = orig_data_size, reduced_data_size if myid != main_node: wrap_mpi_send(full_data, main_node, MPI_COMM_WORLD) if myid == main_node: for iproc in range(number_of_proc): if iproc != main_node: dummy = wrap_mpi_recv(iproc, MPI_COMM_WORLD) full_data[np.where(dummy>-1)] = dummy[np.where(dummy>-1)] del dummy mpi_barrier(MPI_COMM_WORLD) full_data = wrap_mpi_bcast(full_data, main_node, MPI_COMM_WORLD) # find the CPU with heaviest load minindx = np.argsort(full_data, 0) heavy_load_myid = minindx[-1][1] total_mem = sum(full_data) if myid == main_node: if current_window == 0: log_main.add("Nx: current image size = %d. Decimated by %f from %d"%(nx, current_decimate, nnxo)) else: log_main.add("Nx: current image size = %d. Windowed to %d, and decimated by %f from %d"%(nx, current_window, current_decimate, nnxo)) log_main.add("Nproj: number of particle images.") log_main.add("Navg: number of 2D average images.") log_main.add("Nvar: number of 2D variance images.") log_main.add("Img_per_grp: user defined image per group for averaging = %d"%img_per_grp) log_main.add("Overhead: total python overhead memory consumption = %f"%overhead_loading) log_main.add("Total memory) = 4.0*nx^2*(nproj + navg +nvar+ img_per_grp)/1.0e9 + overhead: %12.3f [GB]"%\ (total_mem[1] + overhead_loading)) del full_data mpi_barrier(MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add("Begin reading and preprocessing images on processor. Wait... ") ttt = time() #imgdata = EMData.read_images(stack, all_proj) imgdata = [ None for im in range(len(all_proj))] for index_of_proj in range(len(all_proj)): #image = get_im(stack, all_proj[index_of_proj]) if( current_window > 0): imgdata[index_of_proj] = fdecimate(window2d(get_im(stack, all_proj[index_of_proj]),current_window,current_window), nx, ny) else: imgdata[index_of_proj] = fdecimate(get_im(stack, all_proj[index_of_proj]), nx, ny) if (current_decimate> 0.0 and options.CTF): ctf = imgdata[index_of_proj].get_attr("ctf") ctf.apix = ctf.apix/current_decimate imgdata[index_of_proj].set_attr("ctf", ctf) if myid == heavy_load_myid and index_of_proj%100 == 0: log_main.add(" ...... %6.2f%% "%(index_of_proj/float(len(all_proj))*100.)) mpi_barrier(MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add("All_proj preprocessing cost %7.2f m"%((time()-ttt)/60.)) log_main.add("Wait untill reading on all CPUs done...") ''' imgdata2 = EMData.read_images(stack, range(img_begin, img_end)) if options.fl > 0.0: for k in xrange(len(imgdata2)): imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) if myid == main_node: vol.write_image("vol_ctf.hdf") print_msg("Writing to the disk volume reconstructed from averages as : %s\n"%("vol_ctf.hdf")) del vol, imgdata2 mpi_barrier(MPI_COMM_WORLD) ''' from applications import prepare_2d_forPCA from utilities import model_blank from EMAN2 import Transform if not options.no_norm: mask = model_circle(nx/2-2, nx, nx) if options.CTF: from utilities import pad from filter import filt_ctf from filter import filt_tanl if myid == heavy_load_myid: log_main.add("Start computing 2D aveList and varList. Wait...") ttt = time() inner=nx//2-4 outer=inner+2 xform_proj_for_2D = [ None for i in range(len(proj_list))] for i in range(len(proj_list)): ki = proj_angles[proj_list[i][0]][3] if ki >= symbaselen: continue mi = index[ki] dpar = Util.get_transform_params(imgdata[mi], "xform.projection", "spider") phiM, thetaM, psiM, s2xM, s2yM = dpar["phi"],dpar["theta"],dpar["psi"],-dpar["tx"]*current_decimate,-dpar["ty"]*current_decimate grp_imgdata = [] for j in range(img_per_grp): mj = index[proj_angles[proj_list[i][j]][3]] cpar = Util.get_transform_params(imgdata[mj], "xform.projection", "spider") alpha, sx, sy, mirror = params_3D_2D_NEW(cpar["phi"], cpar["theta"],cpar["psi"], -cpar["tx"]*current_decimate, -cpar["ty"]*current_decimate, mirror_list[i][j]) if thetaM <= 90: if mirror == 0: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM - cpar["phi"], 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM - cpar["phi"]), 0.0, 0.0, 1.0) else: if mirror == 0: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM- cpar["phi"]), 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM - cpar["phi"])), 0.0, 0.0, 1.0) imgdata[mj].set_attr("xform.align2d", Transform({"type":"2D","alpha":alpha,"tx":sx,"ty":sy,"mirror":mirror,"scale":1.0})) grp_imgdata.append(imgdata[mj]) if not options.no_norm: for k in range(img_per_grp): ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False) grp_imgdata[k] -= ave grp_imgdata[k] /= std if options.fl > 0.0: for k in range(img_per_grp): grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) # Because of background issues, only linear option works. if options.CTF: ave, var = aves_wiener(grp_imgdata, SNR = 1.0e5, interpolation_method = "linear") else: ave, var = ave_var(grp_imgdata) # Switch to std dev # threshold is not really needed,it is just in case due to numerical accuracy something turns out negative. var = square_root(threshold(var)) set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0]) set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0]) aveList.append(ave) varList.append(var) xform_proj_for_2D[i] = [phiM, thetaM, 0.0, 0.0, 0.0] ''' if nvec > 0: eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True) for k in range(nvec): set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0]) eigList[k].append(eig[k]) """ if myid == 0 and i == 0: for k in xrange(nvec): eig[k].write_image("eig.hdf", k) """ ''' if (myid == heavy_load_myid) and (i%100 == 0): log_main.add(" ......%6.2f%% "%(i/float(len(proj_list))*100.)) del imgdata, grp_imgdata, cpar, dpar, all_proj, proj_angles, index if not options.no_norm: del mask if myid == main_node: del tab # At this point, all averages and variances are computed mpi_barrier(MPI_COMM_WORLD) if (myid == heavy_load_myid): log_main.add("Computing aveList and varList took %12.1f [m]"%((time()-ttt)/60.)) xform_proj_for_2D = wrap_mpi_gatherv(xform_proj_for_2D, main_node, MPI_COMM_WORLD) if (myid == main_node): write_text_row(xform_proj_for_2D, os.path.join(current_output_dir, "params.txt")) del xform_proj_for_2D mpi_barrier(MPI_COMM_WORLD) if options.ave2D: from fundamentals import fpol from applications import header if myid == main_node: log_main.add("Compute ave2D ... ") km = 0 for i in range(number_of_proc): if i == main_node : for im in range(len(aveList)): aveList[im].write_image(os.path.join(current_output_dir, options.ave2D), km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in range(nl): ave = recv_EMData(i, im+i+70000) """ nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nm = int(nm[0]) members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('members', map(int, members)) members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('pix_err', map(float, members)) members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('refprojdir', map(float, members)) """ tmpvol=fpol(ave, nx, nx,1) tmpvol.write_image(os.path.join(current_output_dir, options.ave2D), km) km += 1 else: mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in range(len(aveList)): send_EMData(aveList[im], main_node,im+myid+70000) """ members = aveList[im].get_attr('members') mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) members = aveList[im].get_attr('pix_err') mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) try: members = aveList[im].get_attr('refprojdir') mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) except: mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) """ if myid == main_node: header(os.path.join(current_output_dir, options.ave2D), params='xform.projection', fimport = os.path.join(current_output_dir, "params.txt")) mpi_barrier(MPI_COMM_WORLD) if options.ave3D: from fundamentals import fpol t5 = time() if myid == main_node: log_main.add("Reconstruct ave3D ... ") ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(ave3D, myid) if myid == main_node: if current_decimate != 1.0: ave3D = resample(ave3D, 1./current_decimate) ave3D = fpol(ave3D, nnxo, nnxo, nnxo) # always to the orignal image size set_pixel_size(ave3D, 1.0) ave3D.write_image(os.path.join(current_output_dir, options.ave3D)) log_main.add("Ave3D reconstruction took %12.1f [m]"%((time()-t5)/60.0)) log_main.add("%-70s: %s\n"%("The reconstructed ave3D is saved as ", options.ave3D)) mpi_barrier(MPI_COMM_WORLD) del ave, var, proj_list, stack, alpha, sx, sy, mirror, aveList ''' if nvec > 0: for k in range(nvec): if myid == main_node:log_main.add("Reconstruction eigenvolumes", k) cont = True ITER = 0 mask2d = model_circle(radiuspca, nx, nx) while cont: #print "On node %d, iteration %d"%(myid, ITER) eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(eig3D, myid, main_node) if options.fl > 0.0: eig3D = filt_tanl(eig3D, options.fl, options.aa) if myid == main_node: eig3D.write_image(os.path.join(options.outpout_dir, "eig3d_%03d.hdf"%(k, ITER))) Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) ) eig3Df, kb = prep_vol(eig3D) del eig3D cont = False icont = 0 for l in range(len(eigList[k])): phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l]) proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y]) cl = ccc(proj, eigList[k][l], mask2d) if cl < 0.0: icont += 1 cont = True eigList[k][l] *= -1.0 u = int(cont) u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD) icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if myid == main_node: u = int(u[0]) log_main.add(" Eigenvector: ",k," number changed ",int(icont[0])) else: u = 0 u = bcast_number_to_all(u, main_node) cont = bool(u) ITER += 1 del eig3Df, kb mpi_barrier(MPI_COMM_WORLD) del eigList, mask2d ''' if options.ave3D: del ave3D if options.var2D: from fundamentals import fpol from applications import header if myid == main_node: log_main.add("Compute var2D...") km = 0 for i in range(number_of_proc): if i == main_node : for im in range(len(varList)): tmpvol=fpol(varList[im], nx, nx,1) tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in range(nl): ave = recv_EMData(i, im+i+70000) tmpvol=fpol(ave, nx, nx,1) tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km) km += 1 else: mpi_send(len(varList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in range(len(varList)): send_EMData(varList[im], main_node, im+myid+70000)# What with the attributes?? mpi_barrier(MPI_COMM_WORLD) if myid == main_node: from applications import header header(os.path.join(current_output_dir, options.var2D), params = 'xform.projection',fimport = os.path.join(current_output_dir, "params.txt")) mpi_barrier(MPI_COMM_WORLD) if options.var3D: if myid == main_node: log_main.add("Reconstruct var3D ...") t6 = time() # radiusvar = options.radius # if( radiusvar < 0 ): radiusvar = nx//2 -3 res = recons3d_4nn_MPI(myid, varList, symmetry = options.sym, npad=options.npad) #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ) if myid == main_node: from fundamentals import fpol if current_decimate != 1.0: res = resample(res, 1./current_decimate) res = fpol(res, nnxo, nnxo, nnxo) set_pixel_size(res, 1.0) res.write_image(os.path.join(current_output_dir, options.var3D)) log_main.add("%-70s: %s\n"%("The reconstructed var3D is saved as ", options.var3D)) log_main.add("Var3D reconstruction took %f12.1 [m]"%((time()-t6)/60.0)) log_main.add("Total computation time %f12.1 [m]"%((time()-t0)/60.0)) log_main.add("sx3dvariability finishes") from mpi import mpi_finalize mpi_finalize() if RUNNING_UNDER_MPI: global_def.MPI = False global_def.BATCH = False
def main(): progname = os.path.basename(sys.argv[0]) usage = progname + """ input_micrograph_list_file input_micrograph_pattern input_coordinates_pattern output_directory --coordinates_format --box_size=box_size --invert --import_ctf=ctf_file --limit_ctf --resample_ratio=resample_ratio --defocus_error=defocus_error --astigmatism_error=astigmatism_error Window particles from micrographs in input list file. The coordinates of the particles should be given as input. Please specify name pattern of input micrographs and coordinates files with a wild card (*). Use the wild card to indicate the place of micrograph ID (e.g. serial number, time stamp, and etc). The name patterns must be enclosed by single quotes (') or double quotes ("). (Note: sxgui.py automatically adds single quotes (')). BDB files can not be selected as input micrographs. sxwindow.py mic_list.txt ./mic*.hdf info/mic*_info.json particles --coordinates_format=eman2 --box_size=64 --invert --import_ctf=outdir_cter/partres/partres.txt If micrograph list file name is not provided, all files matched with the micrograph name pattern will be processed. sxwindow.py ./mic*.hdf info/mic*_info.json particles --coordinates_format=eman2 --box_size=64 --invert --import_ctf=outdir_cter/partres/partres.txt """ parser = OptionParser(usage, version=SPARXVERSION) parser.add_option( "--coordinates_format", type="string", default="eman1", help= "format of input coordinates files: 'sparx', 'eman1', 'eman2', or 'spider'. the coordinates of sparx, eman2, and spider format is particle center. the coordinates of eman1 format is particle box conner associated with the original box size. (default eman1)" ) parser.add_option( "--box_size", type="int", default=256, help= "x and y dimension of square area to be windowed (in pixels): pixel size after resampling is assumed when resample_ratio < 1.0 (default 256)" ) parser.add_option( "--invert", action="store_true", default=False, help="invert image contrast: recommended for cryo data (default False)" ) parser.add_option( "--import_ctf", type="string", default="", help="file name of sxcter output: normally partres.txt (default none)") parser.add_option( "--limit_ctf", action="store_true", default=False, help= "filter micrographs based on the CTF limit: this option requires --import_ctf. (default False)" ) parser.add_option( "--resample_ratio", type="float", default=1.0, help= "ratio of new to old image size (or old to new pixel size) for resampling: Valid range is 0.0 < resample_ratio <= 1.0. (default 1.0)" ) parser.add_option( "--defocus_error", type="float", default=1000000.0, help= "defocus errror limit: exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocus_error percent. the error is computed as (std dev defocus)/defocus*100%. (default 1000000.0)" ) parser.add_option( "--astigmatism_error", type="float", default=360.0, help= "astigmatism error limit: Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatism_error degrees. (default 360.0)" ) ### detect if program is running under MPI RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ main_node = 0 if RUNNING_UNDER_MPI: from mpi import mpi_init from mpi import MPI_COMM_WORLD, mpi_comm_rank, mpi_comm_size, mpi_barrier, mpi_reduce, MPI_INT, MPI_SUM mpi_init(0, []) myid = mpi_comm_rank(MPI_COMM_WORLD) number_of_processes = mpi_comm_size(MPI_COMM_WORLD) else: number_of_processes = 1 myid = 0 (options, args) = parser.parse_args(sys.argv[1:]) mic_list_file_path = None mic_pattern = None coords_pattern = None error_status = None while True: if len(args) < 3 or len(args) > 4: error_status = ( "Please check usage for number of arguments.\n Usage: " + usage + "\n" + "Please run %s -h for help." % (progname), getframeinfo(currentframe())) break if len(args) == 3: mic_pattern = args[0] coords_pattern = args[1] out_dir = args[2] else: # assert(len(args) == 4) mic_list_file_path = args[0] mic_pattern = args[1] coords_pattern = args[2] out_dir = args[3] if mic_list_file_path != None: if os.path.splitext(mic_list_file_path)[1] != ".txt": error_status = ( "Extension of input micrograph list file must be \".txt\". Please check input_micrograph_list_file argument. Run %s -h for help." % (progname), getframeinfo(currentframe())) break if mic_pattern[:len("bdb:")].lower() == "bdb": error_status = ( "BDB file can not be selected as input micrographs. Please convert the format, and restart the program. Run %s -h for help." % (progname), getframeinfo(currentframe())) break if mic_pattern.find("*") == -1: error_status = ( "Input micrograph file name pattern must contain wild card (*). Please check input_micrograph_pattern argument. Run %s -h for help." % (progname), getframeinfo(currentframe())) break if coords_pattern.find("*") == -1: error_status = ( "Input coordinates file name pattern must contain wild card (*). Please check input_coordinates_pattern argument. Run %s -h for help." % (progname), getframeinfo(currentframe())) break if myid == main_node: if os.path.exists(out_dir): error_status = ( "Output directory exists. Please change the name and restart the program.", getframeinfo(currentframe())) break break if_error_then_all_processes_exit_program(error_status) # Check invalid conditions of options check_options(options, progname) mic_name_list = None error_status = None if myid == main_node: if mic_list_file_path != None: print("Loading micrograph list from %s file ..." % (mic_list_file_path)) mic_name_list = read_text_file(mic_list_file_path) if len(mic_name_list) == 0: print("Directory of first micrograph entry is " % (os.path.dirname(mic_name_list[0]))) else: # assert (mic_list_file_path == None) print("Generating micrograph list in %s directory..." % (os.path.dirname(mic_pattern))) mic_name_list = glob.glob(mic_pattern) if len(mic_name_list) == 0: error_status = ( "No micrograph file is found. Please check input_micrograph_pattern and/or input_micrograph_list_file argument. Run %s -h for help." % (progname), getframeinfo(currentframe())) else: print("Found %d microgarphs" % len(mic_name_list)) if_error_then_all_processes_exit_program(error_status) if RUNNING_UNDER_MPI: mic_name_list = wrap_mpi_bcast(mic_name_list, main_node) coords_name_list = None error_status = None if myid == main_node: coords_name_list = glob.glob(coords_pattern) if len(coords_name_list) == 0: error_status = ( "No coordinates file is found. Please check input_coordinates_pattern argument. Run %s -h for help." % (progname), getframeinfo(currentframe())) if_error_then_all_processes_exit_program(error_status) if RUNNING_UNDER_MPI: coords_name_list = wrap_mpi_bcast(coords_name_list, main_node) ################################################################################################################################################################################################################## ################################################################################################################################################################################################################## ################################################################################################################################################################################################################## # all processes must have access to indices if options.import_ctf: i_enum = -1 i_enum += 1 idx_cter_def = i_enum # defocus [um]; index must be same as ctf object format i_enum += 1 idx_cter_cs = i_enum # Cs [mm]; index must be same as ctf object format i_enum += 1 idx_cter_vol = i_enum # voltage[kV]; index must be same as ctf object format i_enum += 1 idx_cter_apix = i_enum # pixel size [A]; index must be same as ctf object format i_enum += 1 idx_cter_bfactor = i_enum # B-factor [A^2]; index must be same as ctf object format i_enum += 1 idx_cter_ac = i_enum # amplitude contrast [%]; index must be same as ctf object format i_enum += 1 idx_cter_astig_amp = i_enum # astigmatism amplitude [um]; index must be same as ctf object format i_enum += 1 idx_cter_astig_ang = i_enum # astigmatism angle [degree]; index must be same as ctf object format i_enum += 1 idx_cter_sd_def = i_enum # std dev of defocus [um] i_enum += 1 idx_cter_sd_astig_amp = i_enum # std dev of ast amp [A] i_enum += 1 idx_cter_sd_astig_ang = i_enum # std dev of ast angle [degree] i_enum += 1 idx_cter_cv_def = i_enum # coefficient of variation of defocus [%] i_enum += 1 idx_cter_cv_astig_amp = i_enum # coefficient of variation of ast amp [%] i_enum += 1 idx_cter_spectra_diff = i_enum # average of differences between with- and without-astig. experimental 1D spectra at extrema i_enum += 1 idx_cter_error_def = i_enum # frequency at which signal drops by 50% due to estimated error of defocus alone [1/A] i_enum += 1 idx_cter_error_astig = i_enum # frequency at which signal drops by 50% due to estimated error of defocus and astigmatism [1/A] i_enum += 1 idx_cter_error_ctf = i_enum # limit frequency by CTF error [1/A] i_enum += 1 idx_cter_mic_name = i_enum # micrograph name i_enum += 1 n_idx_cter = i_enum # Prepare loop variables mic_basename_pattern = os.path.basename( mic_pattern) # file pattern without path mic_baseroot_pattern = os.path.splitext(mic_basename_pattern)[ 0] # file pattern without path and extension coords_format = options.coordinates_format.lower() box_size = options.box_size box_half = box_size // 2 mask2d = model_circle( box_size // 2, box_size, box_size ) # Create circular 2D mask to Util.infomask of particle images resample_ratio = options.resample_ratio n_mic_process = 0 n_mic_reject_no_coords = 0 n_mic_reject_no_cter_entry = 0 n_global_coords_detect = 0 n_global_coords_process = 0 n_global_coords_reject_out_of_boundary = 0 serial_id_list = [] error_status = None ## not a real while, an if with the opportunity to use break when errors need to be reported while myid == main_node: # # NOTE: 2016/05/24 Toshio Moriya # Now, ignores the path in mic_pattern and entries of mic_name_list to create serial ID # Only the basename (file name) in micrograph path must be match # # Create list of micrograph serial ID # Break micrograph name pattern into prefix and suffix to find the head index of the micrograph serial id # mic_basename_tokens = mic_basename_pattern.split('*') # assert (len(mic_basename_tokens) == 2) serial_id_head_index = len(mic_basename_tokens[0]) # Loop through micrograph names for mic_name in mic_name_list: # Find the tail index of the serial id and extract serial id from the micrograph name mic_basename = os.path.basename(mic_name) serial_id_tail_index = mic_basename.index(mic_basename_tokens[1]) serial_id = mic_basename[serial_id_head_index:serial_id_tail_index] serial_id_list.append(serial_id) # assert (len(serial_id_list) == len(mic_name)) del mic_name_list # Do not need this anymore # Load CTFs if necessary if options.import_ctf: ctf_list = read_text_row(options.import_ctf) # print("Detected CTF entries : %6d ..." % (len(ctf_list))) if len(ctf_list) == 0: error_status = ( "No CTF entry is found in %s. Please check --import_ctf option. Run %s -h for help." % (options.import_ctf, progname), getframeinfo(currentframe())) break if (len(ctf_list[0]) != n_idx_cter): error_status = ( "Number of columns (%d) must be %d in %s. The format might be old. Please run sxcter.py again." % (len(ctf_list[0]), n_idx_cter, options.import_ctf), getframeinfo(currentframe())) break ctf_dict = {} n_reject_defocus_error = 0 ctf_error_limit = [ options.defocus_error / 100.0, options.astigmatism_error ] for ctf_params in ctf_list: assert (len(ctf_params) == n_idx_cter) # mic_baseroot is name of micrograph minus the path and extension mic_baseroot = os.path.splitext( os.path.basename(ctf_params[idx_cter_mic_name]))[0] if (ctf_params[idx_cter_sd_def] / ctf_params[idx_cter_def] > ctf_error_limit[0]): print( "Defocus error %f exceeds the threshold. Micrograph %s is rejected." % (ctf_params[idx_cter_sd_def] / ctf_params[idx_cter_def], mic_baseroot)) n_reject_defocus_error += 1 else: if (ctf_params[idx_cter_sd_astig_ang] > ctf_error_limit[1]): ctf_params[idx_cter_astig_amp] = 0.0 ctf_params[idx_cter_astig_ang] = 0.0 ctf_dict[mic_baseroot] = ctf_params del ctf_list # Do not need this anymore break if_error_then_all_processes_exit_program(error_status) if options.import_ctf: if options.limit_ctf: cutoff_histogram = [ ] #@ming compute the histogram for micrographs cut of by ctf_params limit. ################################################################################################################################################################################################################## ################################################################################################################################################################################################################## ################################################################################################################################################################################################################## restricted_serial_id_list = [] if myid == main_node: # Loop over serial IDs of micrographs for serial_id in serial_id_list: # mic_baseroot is name of micrograph minus the path and extension mic_baseroot = mic_baseroot_pattern.replace("*", serial_id) mic_name = mic_pattern.replace("*", serial_id) coords_name = coords_pattern.replace("*", serial_id) ########### # CHECKS: BEGIN if coords_name not in coords_name_list: print(" Cannot read %s. Skipping %s ..." % (coords_name, mic_baseroot)) n_mic_reject_no_coords += 1 continue # IF mic is in CTER results if options.import_ctf: if mic_baseroot not in ctf_dict: print( " Is not listed in CTER results. Skipping %s ..." % (mic_baseroot)) n_mic_reject_no_cter_entry += 1 continue else: ctf_params = ctf_dict[mic_baseroot] # CHECKS: END n_mic_process += 1 restricted_serial_id_list.append(serial_id) # restricted_serial_id_list = restricted_serial_id_list[:128] ## for testing against the nonMPI version if myid != main_node: if options.import_ctf: ctf_dict = None error_status = None if len(restricted_serial_id_list) < number_of_processes: error_status = ( 'Number of processes (%d) supplied by --np in mpirun cannot be greater than %d (number of micrographs that satisfy all criteria to be processed) ' % (number_of_processes, len(restricted_serial_id_list)), getframeinfo(currentframe())) if_error_then_all_processes_exit_program(error_status) ## keep a copy of the original output directory where the final bdb will be created original_out_dir = out_dir if RUNNING_UNDER_MPI: mpi_barrier(MPI_COMM_WORLD) restricted_serial_id_list = wrap_mpi_bcast(restricted_serial_id_list, main_node) mic_start, mic_end = MPI_start_end(len(restricted_serial_id_list), number_of_processes, myid) restricted_serial_id_list_not_sliced = restricted_serial_id_list restricted_serial_id_list = restricted_serial_id_list[ mic_start:mic_end] if options.import_ctf: ctf_dict = wrap_mpi_bcast(ctf_dict, main_node) # generate subdirectories of out_dir, one for each process out_dir = os.path.join(out_dir, "%03d" % myid) if myid == main_node: print( "Micrographs processed by main process (including percent complete):" ) len_processed_by_main_node_divided_by_100 = len( restricted_serial_id_list) / 100.0 ################################################################################################################################################################################################################## ################################################################################################################################################################################################################## ################################################################################################################################################################################################################## ##### Starting main parallel execution for my_idx, serial_id in enumerate(restricted_serial_id_list): mic_baseroot = mic_baseroot_pattern.replace("*", serial_id) mic_name = mic_pattern.replace("*", serial_id) coords_name = coords_pattern.replace("*", serial_id) if myid == main_node: print( mic_name, " ---> % 2.2f%%" % (my_idx / len_processed_by_main_node_divided_by_100)) mic_img = get_im(mic_name) # Read coordinates according to the specified format and # make the coordinates the center of particle image if coords_format == "sparx": coords_list = read_text_row(coords_name) elif coords_format == "eman1": coords_list = read_text_row(coords_name) for i in xrange(len(coords_list)): coords_list[i] = [(coords_list[i][0] + coords_list[i][2] // 2), (coords_list[i][1] + coords_list[i][3] // 2)] elif coords_format == "eman2": coords_list = js_open_dict(coords_name)["boxes"] for i in xrange(len(coords_list)): coords_list[i] = [coords_list[i][0], coords_list[i][1]] elif coords_format == "spider": coords_list = read_text_row(coords_name) for i in xrange(len(coords_list)): coords_list[i] = [coords_list[i][2], coords_list[i][3]] # else: assert (False) # Unreachable code # Calculate the new pixel size if options.import_ctf: ctf_params = ctf_dict[mic_baseroot] pixel_size_origin = ctf_params[idx_cter_apix] if resample_ratio < 1.0: # assert (resample_ratio > 0.0) new_pixel_size = pixel_size_origin / resample_ratio print( "Resample micrograph to pixel size %6.4f and window segments from resampled micrograph." % new_pixel_size) else: # assert (resample_ratio == 1.0) new_pixel_size = pixel_size_origin # Set ctf along with new pixel size in resampled micrograph ctf_params[idx_cter_apix] = new_pixel_size else: # assert (not options.import_ctf) if resample_ratio < 1.0: # assert (resample_ratio > 0.0) print( "Resample micrograph with ratio %6.4f and window segments from resampled micrograph." % resample_ratio) # else: # assert (resample_ratio == 1.0) # Apply filters to micrograph fftip(mic_img) if options.limit_ctf: # assert (options.import_ctf) # Cut off frequency components higher than CTF limit q1, q2 = ctflimit(box_size, ctf_params[idx_cter_def], ctf_params[idx_cter_cs], ctf_params[idx_cter_vol], new_pixel_size) # This is absolute frequency of CTF limit in scale of original micrograph if resample_ratio < 1.0: # assert (resample_ratio > 0.0) q1 = resample_ratio * q1 / float( box_size ) # q1 = (pixel_size_origin / new_pixel_size) * q1/float(box_size) else: # assert (resample_ratio == 1.0) -> pixel_size_origin == new_pixel_size -> pixel_size_origin / new_pixel_size == 1.0 q1 = q1 / float(box_size) if q1 < 0.5: mic_img = filt_tanl(mic_img, q1, 0.01) cutoff_histogram.append(q1) # Cut off frequency components lower than the box size can express mic_img = fft(filt_gaussh(mic_img, resample_ratio / box_size)) # Resample micrograph, map coordinates, and window segments from resampled micrograph using new coordinates # after resampling by resample_ratio, new pixel size will be pixel_size/resample_ratio = new_pixel_size # NOTE: 2015/04/13 Toshio Moriya # resample() efficiently takes care of the case resample_ratio = 1.0 but # it does not set apix_*. Even though it sets apix_* when resample_ratio < 1.0 ... mic_img = resample(mic_img, resample_ratio) if options.invert: mic_stats = Util.infomask( mic_img, None, True) # mic_stat[0:mean, 1:SD, 2:min, 3:max] Util.mul_scalar(mic_img, -1.0) mic_img += 2 * mic_stats[0] if options.import_ctf: from utilities import generate_ctf ctf_obj = generate_ctf( ctf_params ) # indexes 0 to 7 (idx_cter_def to idx_cter_astig_ang) must be same in cter format & ctf object format. # Prepare loop variables nx = mic_img.get_xsize() ny = mic_img.get_ysize() x0 = nx // 2 y0 = ny // 2 n_coords_reject_out_of_boundary = 0 local_stack_name = "bdb:%s#" % out_dir + mic_baseroot + '_ptcls' local_particle_id = 0 # can be different from coordinates_id # Loop over coordinates for coords_id in xrange(len(coords_list)): x = int(coords_list[coords_id][0]) y = int(coords_list[coords_id][1]) if resample_ratio < 1.0: # assert (resample_ratio > 0.0) x = int(x * resample_ratio) y = int(y * resample_ratio) # else: # assert(resample_ratio == 1.0) if ((0 <= x - box_half) and (x + box_half <= nx) and (0 <= y - box_half) and (y + box_half <= ny)): particle_img = Util.window(mic_img, box_size, box_size, 1, x - x0, y - y0) else: print( "In %s, coordinates ID = %04d (x = %4d, y = %4d, box_size = %4d) is out of micrograph bound, skipping ..." % (mic_baseroot, coords_id, x, y, box_size)) n_coords_reject_out_of_boundary += 1 continue particle_img = ramp(particle_img) particle_stats = Util.infomask( particle_img, mask2d, False) # particle_stats[0:mean, 1:SD, 2:min, 3:max] particle_img -= particle_stats[0] particle_img /= particle_stats[1] # NOTE: 2015/04/09 Toshio Moriya # ptcl_source_image might be redundant information ... # Consider re-organizing header entries... particle_img.set_attr("ptcl_source_image", mic_name) particle_img.set_attr("ptcl_source_coord_id", coords_id) particle_img.set_attr("ptcl_source_coord", [ int(coords_list[coords_id][0]), int(coords_list[coords_id][1]) ]) particle_img.set_attr("resample_ratio", resample_ratio) # NOTE: 2015/04/13 Toshio Moriya # apix_* attributes are updated by resample() only when resample_ratio != 1.0 # Let's make sure header info is consistent by setting apix_* = 1.0 # regardless of options, so it is not passed down the processing line particle_img.set_attr("apix_x", 1.0) particle_img.set_attr("apix_y", 1.0) particle_img.set_attr("apix_z", 1.0) if options.import_ctf: particle_img.set_attr("ctf", ctf_obj) particle_img.set_attr("ctf_applied", 0) particle_img.set_attr("pixel_size_origin", pixel_size_origin) # particle_img.set_attr("apix_x", new_pixel_size) # particle_img.set_attr("apix_y", new_pixel_size) # particle_img.set_attr("apix_z", new_pixel_size) # NOTE: 2015/04/13 Toshio Moriya # Pawel Comment: Micrograph is not supposed to have CTF header info. # So, let's assume it does not exist & ignore its presence. # Note that resample() "correctly" updates pixel size of CTF header info if it exists # elif (particle_img.has_ctff()): # assert(not options.import_ctf) # ctf_origin = particle_img.get_attr("ctf_obj") # pixel_size_origin = round(ctf_origin.apix, 5) # Because SXCTER ouputs up to 5 digits # particle_img.set_attr("apix_x",pixel_size_origin) # particle_img.set_attr("apix_y",pixel_size_origin) # particle_img.set_attr("apix_z",pixel_size_origin) # print("local_stack_name, local_particle_id", local_stack_name, local_particle_id) particle_img.write_image(local_stack_name, local_particle_id) local_particle_id += 1 n_global_coords_detect += len(coords_list) n_global_coords_process += local_particle_id n_global_coords_reject_out_of_boundary += n_coords_reject_out_of_boundary # # MRK_DEBUG: Toshio Moriya 2016/05/03 # # Following codes are for debugging bdb. Delete in future # result = db_check_dict(local_stack_name) # print('# MRK_DEBUG: result = db_check_dict(local_stack_name): %s' % (result)) # result = db_list_dicts('bdb:%s' % out_dir) # print('# MRK_DEBUG: result = db_list_dicts(out_dir): %s' % (result)) # result = db_get_image_info(local_stack_name) # print('# MRK_DEBUG: result = db_get_image_info(local_stack_name)', result) # Release the data base of local stack from this process # so that the subprocess can access to the data base db_close_dict(local_stack_name) # # MRK_DEBUG: Toshio Moriya 2016/05/03 # # Following codes are for debugging bdb. Delete in future # cmd_line = "e2iminfo.py %s" % (local_stack_name) # print('# MRK_DEBUG: Executing the command: %s' % (cmd_line)) # cmdexecute(cmd_line) # # MRK_DEBUG: Toshio Moriya 2016/05/03 # # Following codes are for debugging bdb. Delete in future # cmd_line = "e2iminfo.py bdb:%s#data" % (out_dir) # print('# MRK_DEBUG: Executing the command: %s' % (cmd_line)) # cmdexecute(cmd_line) if RUNNING_UNDER_MPI: if options.import_ctf: if options.limit_ctf: cutoff_histogram = wrap_mpi_gatherv(cutoff_histogram, main_node) if myid == main_node: if options.limit_ctf: # Print out the summary of CTF-limit filtering print(" ") print("Global summary of CTF-limit filtering (--limit_ctf) ...") print("Percentage of filtered micrographs: %8.2f\n" % (len(cutoff_histogram) * 100.0 / len(restricted_serial_id_list_not_sliced))) n_bins = 10 if len(cutoff_histogram) >= n_bins: from statistics import hist_list cutoff_region, cutoff_counts = hist_list( cutoff_histogram, n_bins) print(" Histogram of cut-off frequency") print(" cut-off counts") for bin_id in xrange(n_bins): print(" %14.7f %7d" % (cutoff_region[bin_id], cutoff_counts[bin_id])) else: print( "The number of filtered micrographs (%d) is less than the number of bins (%d). No histogram is produced." % (len(cutoff_histogram), n_bins)) n_mic_process = mpi_reduce(n_mic_process, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) n_mic_reject_no_coords = mpi_reduce(n_mic_reject_no_coords, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) n_mic_reject_no_cter_entry = mpi_reduce(n_mic_reject_no_cter_entry, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) n_global_coords_detect = mpi_reduce(n_global_coords_detect, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) n_global_coords_process = mpi_reduce(n_global_coords_process, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) n_global_coords_reject_out_of_boundary = mpi_reduce( n_global_coords_reject_out_of_boundary, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) # Print out the summary of all micrographs if main_node == myid: print(" ") print("Global summary of micrographs ...") print("Detected : %6d" % (len(restricted_serial_id_list_not_sliced))) print("Processed : %6d" % (n_mic_process)) print("Rejected by no coordinates file : %6d" % (n_mic_reject_no_coords)) print("Rejected by no CTER entry : %6d" % (n_mic_reject_no_cter_entry)) print(" ") print("Global summary of coordinates ...") print("Detected : %6d" % (n_global_coords_detect)) print("Processed : %6d" % (n_global_coords_process)) print("Rejected by out of boundary : %6d" % (n_global_coords_reject_out_of_boundary)) # print(" ") # print("DONE!!!") mpi_barrier(MPI_COMM_WORLD) if main_node == myid: import time time.sleep(1) print("\n Creating bdb:%s/data\n" % original_out_dir) for proc_i in range(number_of_processes): mic_start, mic_end = MPI_start_end( len(restricted_serial_id_list_not_sliced), number_of_processes, proc_i) for serial_id in restricted_serial_id_list_not_sliced[ mic_start:mic_end]: e2bdb_command = "e2bdb.py " mic_baseroot = mic_baseroot_pattern.replace("*", serial_id) if RUNNING_UNDER_MPI: e2bdb_command += "bdb:" + os.path.join( original_out_dir, "%03d/" % proc_i) + mic_baseroot + "_ptcls " else: e2bdb_command += "bdb:" + os.path.join( original_out_dir, mic_baseroot + "_ptcls ") e2bdb_command += " --appendvstack=bdb:%s/data 1>/dev/null" % original_out_dir cmdexecute(e2bdb_command, printing_on_success=False) print("Done!\n") if RUNNING_UNDER_MPI: mpi_barrier(MPI_COMM_WORLD) from mpi import mpi_finalize mpi_finalize() sys.stdout.flush() sys.exit(0)
def shiftali_MPI(stack, maskfile=None, maxit=100, CTF=False, snr=1.0, Fourvar=False, search_rng=-1, oneDx=False, search_rng_y=-1): from applications import MPI_start_end from utilities import model_circle, model_blank, get_image, peak_search, get_im from utilities import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all from statistics import varf2d_MPI from fundamentals import fft, ccf, rot_shift3D, rot_shift2D from utilities import get_params2D, set_params2D from utilities import print_msg, print_begin_msg, print_end_msg import os import sys from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv from mpi import MPI_SUM, MPI_FLOAT, MPI_INT from EMAN2 import Processor from time import time number_of_proc = mpi_comm_size(MPI_COMM_WORLD) myid = mpi_comm_rank(MPI_COMM_WORLD) main_node = 0 ftp = file_type(stack) if myid == main_node: print_begin_msg("shiftali_MPI") max_iter = int(maxit) if myid == main_node: if ftp == "bdb": from EMAN2db import db_open_dict dummy = db_open_dict(stack, True) nima = EMUtil.get_image_count(stack) else: nima = 0 nima = bcast_number_to_all(nima, source_node=main_node) list_of_particles = list(range(nima)) image_start, image_end = MPI_start_end(nima, number_of_proc, myid) list_of_particles = list_of_particles[image_start:image_end] # read nx and ctf_app (if CTF) and broadcast to all nodes if myid == main_node: ima = EMData() ima.read_image(stack, list_of_particles[0], True) nx = ima.get_xsize() ny = ima.get_ysize() if CTF: ctf_app = ima.get_attr_default('ctf_applied', 2) del ima else: nx = 0 ny = 0 if CTF: ctf_app = 0 nx = bcast_number_to_all(nx, source_node=main_node) ny = bcast_number_to_all(ny, source_node=main_node) if CTF: ctf_app = bcast_number_to_all(ctf_app, source_node=main_node) if ctf_app > 0: ERROR("data cannot be ctf-applied", "shiftali_MPI", 1, myid) if maskfile == None: mrad = min(nx, ny) mask = model_circle(mrad // 2 - 2, nx, ny) else: mask = get_im(maskfile) if CTF: from filter import filt_ctf from morphology import ctf_img ctf_abs_sum = EMData(nx, ny, 1, False) ctf_2_sum = EMData(nx, ny, 1, False) else: ctf_2_sum = None from global_def import CACHE_DISABLE if CACHE_DISABLE: data = EMData.read_images(stack, list_of_particles) else: for i in range(number_of_proc): if myid == i: data = EMData.read_images(stack, list_of_particles) if ftp == "bdb": mpi_barrier(MPI_COMM_WORLD) for im in range(len(data)): data[im].set_attr('ID', list_of_particles[im]) st = Util.infomask(data[im], mask, False) data[im] -= st[0] if CTF: ctf_params = data[im].get_attr("ctf") ctfimg = ctf_img(nx, ctf_params, ny=ny) Util.add_img2(ctf_2_sum, ctfimg) Util.add_img_abs(ctf_abs_sum, ctfimg) if CTF: reduce_EMData_to_root(ctf_2_sum, myid, main_node) reduce_EMData_to_root(ctf_abs_sum, myid, main_node) else: ctf_2_sum = None if CTF: if myid != main_node: del ctf_2_sum del ctf_abs_sum else: temp = EMData(nx, ny, 1, False) for i in range(0, nx, 2): for j in range(ny): temp.set_value_at(i, j, snr) Util.add_img(ctf_2_sum, temp) del temp total_iter = 0 # apply initial xform.align2d parameters stored in header init_params = [] for im in range(len(data)): t = data[im].get_attr('xform.align2d') init_params.append(t) p = t.get_params("2d") data[im] = rot_shift2D(data[im], p['alpha'], sx=p['tx'], sy=p['ty'], mirror=p['mirror'], scale=p['scale']) # fourier transform all images, and apply ctf if CTF for im in range(len(data)): if CTF: ctf_params = data[im].get_attr("ctf") data[im] = filt_ctf(fft(data[im]), ctf_params) else: data[im] = fft(data[im]) sx_sum = 0 sy_sum = 0 sx_sum_total = 0 sy_sum_total = 0 shift_x = [0.0] * len(data) shift_y = [0.0] * len(data) ishift_x = [0.0] * len(data) ishift_y = [0.0] * len(data) for Iter in range(max_iter): if myid == main_node: start_time = time() print_msg("Iteration #%4d\n" % (total_iter)) total_iter += 1 avg = EMData(nx, ny, 1, False) for im in data: Util.add_img(avg, im) reduce_EMData_to_root(avg, myid, main_node) if myid == main_node: if CTF: tavg = Util.divn_filter(avg, ctf_2_sum) else: tavg = Util.mult_scalar(avg, 1.0 / float(nima)) else: tavg = EMData(nx, ny, 1, False) if Fourvar: bcast_EMData_to_all(tavg, myid, main_node) vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF) if myid == main_node: if Fourvar: tavg = fft(Util.divn_img(fft(tavg), vav)) vav_r = Util.pack_complex_to_real(vav) # normalize and mask tavg in real space tavg = fft(tavg) stat = Util.infomask(tavg, mask, False) tavg -= stat[0] Util.mul_img(tavg, mask) # For testing purposes: shift tavg to some random place and see if the centering is still correct #tavg = rot_shift3D(tavg,sx=3,sy=-4) tavg = fft(tavg) if Fourvar: del vav bcast_EMData_to_all(tavg, myid, main_node) sx_sum = 0 sy_sum = 0 if search_rng > 0: nwx = 2 * search_rng + 1 else: nwx = nx if search_rng_y > 0: nwy = 2 * search_rng_y + 1 else: nwy = ny not_zero = 0 for im in range(len(data)): if oneDx: ctx = Util.window(ccf(data[im], tavg), nwx, 1) p1 = peak_search(ctx) p1_x = -int(p1[0][3]) ishift_x[im] = p1_x sx_sum += p1_x else: p1 = peak_search(Util.window(ccf(data[im], tavg), nwx, nwy)) p1_x = -int(p1[0][4]) p1_y = -int(p1[0][5]) ishift_x[im] = p1_x ishift_y[im] = p1_y sx_sum += p1_x sy_sum += p1_y if not_zero == 0: if (not (ishift_x[im] == 0.0)) or (not (ishift_y[im] == 0.0)): not_zero = 1 sx_sum = mpi_reduce(sx_sum, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if not oneDx: sy_sum = mpi_reduce(sy_sum, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if myid == main_node: sx_sum_total = int(sx_sum[0]) if not oneDx: sy_sum_total = int(sy_sum[0]) else: sx_sum_total = 0 sy_sum_total = 0 sx_sum_total = bcast_number_to_all(sx_sum_total, source_node=main_node) if not oneDx: sy_sum_total = bcast_number_to_all(sy_sum_total, source_node=main_node) sx_ave = round(float(sx_sum_total) / nima) sy_ave = round(float(sy_sum_total) / nima) for im in range(len(data)): p1_x = ishift_x[im] - sx_ave p1_y = ishift_y[im] - sy_ave params2 = { "filter_type": Processor.fourier_filter_types.SHIFT, "x_shift": p1_x, "y_shift": p1_y, "z_shift": 0.0 } data[im] = Processor.EMFourierFilter(data[im], params2) shift_x[im] += p1_x shift_y[im] += p1_y # stop if all shifts are zero not_zero = mpi_reduce(not_zero, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if myid == main_node: not_zero_all = int(not_zero[0]) else: not_zero_all = 0 not_zero_all = bcast_number_to_all(not_zero_all, source_node=main_node) if myid == main_node: print_msg("Time of iteration = %12.2f\n" % (time() - start_time)) start_time = time() if not_zero_all == 0: break #for im in xrange(len(data)): data[im] = fft(data[im]) This should not be required as only header information is used # combine shifts found with the original parameters for im in range(len(data)): t0 = init_params[im] t1 = Transform() t1.set_params({ "type": "2D", "alpha": 0, "scale": t0.get_scale(), "mirror": 0, "tx": shift_x[im], "ty": shift_y[im] }) # combine t0 and t1 tt = t1 * t0 data[im].set_attr("xform.align2d", tt) # write out headers and STOP, under MPI writing has to be done sequentially mpi_barrier(MPI_COMM_WORLD) par_str = ["xform.align2d", "ID"] if myid == main_node: from utilities import file_type if (file_type(stack) == "bdb"): from utilities import recv_attr_dict_bdb recv_attr_dict_bdb(main_node, stack, data, par_str, image_start, image_end, number_of_proc) else: from utilities import recv_attr_dict recv_attr_dict(main_node, stack, data, par_str, image_start, image_end, number_of_proc) else: send_attr_dict(main_node, data, par_str, image_start, image_end) if myid == main_node: print_end_msg("shiftali_MPI")
def expand_step(projections, stable_subset, stable_threshold, options, tries_per_unstable=4, mpi_env=None, log=None, iteration=-1): from applications import MPI_start_end from multi_shc import multi_shc from utilities import wrap_mpi_recv, wrap_mpi_send, wrap_mpi_bcast, wrap_mpi_gatherv import os if log == None: from logger import Logger logger = Logger() if mpi_env.main_rank == 0: log.add("-------------> Expanding step - BEGIN") # generate subsets (on main root) if mpi_env.main_rank == 0: nprojs = len(projections) stable_subset = set(stable_subset) unstable_subset = set(range(nprojs)) - stable_subset trg_subset_size = max([len(unstable_subset) / 4, 1]) subsets = generate_subsets(unstable_subset, trg_subset_size, tries_per_unstable) for i in xrange(len(subsets)): subsets[i] = list(stable_subset | set(subsets[i])) log.add("Count of subsets: ", len(subsets)) # scatter subsets between sub-communicators (fragments of subsets are sent to roots of sub-communicators) assigned_subsets = None if mpi_env.main_rank == 0: for iSC in xrange(mpi_env.subcomms_count): subset_begin, subset_end = MPI_start_end(len(subsets), mpi_env.subcomms_count, iSC) dest = mpi_env.subcomms_roots[iSC] if mpi_env.main_rank != dest: wrap_mpi_send(subsets[subset_begin:subset_end], dest, mpi_env.main_comm) else: assigned_subsets = subsets[subset_begin:subset_end] else: if mpi_env.sub_rank == 0: assigned_subsets = wrap_mpi_recv(0, mpi_env.main_comm) # broadcast subsets among sub-communicators (from roots to other processes) assigned_subsets = wrap_mpi_bcast(assigned_subsets, 0, mpi_env.sub_comm) # run SHC (in each sub-communicator separately) params = [] for iAS in xrange(len(assigned_subsets)): out_dir = str(iteration) + "_expanding_" + str( mpi_env.subcomm_id) + "_" + str(iAS) if mpi_env.sub_rank == 0: os.mkdir(log.prefix + out_dir) log.add("3SHC --> " + log.prefix + out_dir) subset = assigned_subsets[iAS] out_p, out_vol, out_peaks = multi_shc(projections, subset, 3, options, mpi_comm=mpi_env.sub_comm, log=log.sublog(out_dir + "/")) if mpi_env.sub_rank == 0: assert (len(subset) == len(out_p)) if mpi_env.sub_rank == 0: params.append(out_p) # gather obtained parameters to main root if mpi_env.sub_rank == 0: params = wrap_mpi_gatherv(params, 0, mpi_env.main_comm) else: params = wrap_mpi_gatherv([], 0, mpi_env.main_comm) # calculate new stable subset if mpi_env.main_rank == 0: log.add("Calculate new common subset") #if prefix != None: # for pi in xrange(len(params)): # p = params[pi] # s = subsets[pi] # write_text_row(p, prefix + "_params_" + str(pi) + "_" + log.prefix + ".txt") # write_text_file(s, prefix + "_subset_" + str(pi) + "_" + log.prefix + ".txt") #new_stable_subset = recalculate_subset(stable_threshold, params, subsets) new_stable_subset = find_new_stable_projections(params, subsets, stable_threshold, stable_subset, log=log) new_stable_subset = list(set(stable_subset) | set(new_stable_subset)) log.add("New subset:", len(new_stable_subset), new_stable_subset) else: new_stable_subset = None if mpi_env.main_rank == 0: log.add("-------------> Expanding step - END") return new_stable_subset
def main(): import global_def from optparse import OptionParser from EMAN2 import EMUtil import os import sys from time import time progname = os.path.basename(sys.argv[0]) usage = progname + " proj_stack output_averages --MPI" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--img_per_group", type="int", default=100, help="number of images per group") parser.add_option("--radius", type="int", default=-1, help="radius for alignment") parser.add_option( "--xr", type="string", default="2 1", help="range for translation search in x direction, search is +/xr") parser.add_option( "--yr", type="string", default="-1", help= "range for translation search in y direction, search is +/yr (default = same as xr)" ) parser.add_option( "--ts", type="string", default="1 0.5", help= "step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional" ) parser.add_option( "--iter", type="int", default=30, help="number of iterations within alignment (default = 30)") parser.add_option( "--num_ali", type="int", default=5, help="number of alignments performed for stability (default = 5)") parser.add_option("--thld_err", type="float", default=1.0, help="threshold of pixel error (default = 1.732)") parser.add_option( "--grouping", type="string", default="GRP", help= "do grouping of projections: PPR - per projection, GRP - different size groups, exclusive (default), GEV - grouping equal size" ) parser.add_option( "--delta", type="float", default=-1.0, help="angular step for reference projections (required for GEV method)" ) parser.add_option( "--fl", type="float", default=0.3, help="cut-off frequency of hyperbolic tangent low-pass Fourier filter") parser.add_option( "--aa", type="float", default=0.2, help="fall-off of hyperbolic tangent low-pass Fourier filter") parser.add_option("--CTF", action="store_true", default=False, help="Consider CTF correction during the alignment ") parser.add_option("--MPI", action="store_true", default=False, help="use MPI version") (options, args) = parser.parse_args() from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD from mpi import mpi_barrier, mpi_send, mpi_recv, mpi_bcast, MPI_INT, mpi_finalize, MPI_FLOAT from applications import MPI_start_end, within_group_refinement, ali2d_ras from pixel_error import multi_align_stability from utilities import send_EMData, recv_EMData from utilities import get_image, bcast_number_to_all, set_params2D, get_params2D from utilities import group_proj_by_phitheta, model_circle, get_input_from_string sys.argv = mpi_init(len(sys.argv), sys.argv) myid = mpi_comm_rank(MPI_COMM_WORLD) number_of_proc = mpi_comm_size(MPI_COMM_WORLD) main_node = 0 if len(args) == 2: stack = args[0] outdir = args[1] else: ERROR("incomplete list of arguments", "sxproj_stability", 1, myid=myid) exit() if not options.MPI: ERROR("Non-MPI not supported!", "sxproj_stability", myid=myid) exit() if global_def.CACHE_DISABLE: from utilities import disable_bdb_cache disable_bdb_cache() global_def.BATCH = True #if os.path.exists(outdir): ERROR('Output directory exists, please change the name and restart the program', "sxproj_stability", 1, myid) #mpi_barrier(MPI_COMM_WORLD) img_per_grp = options.img_per_group radius = options.radius ite = options.iter num_ali = options.num_ali thld_err = options.thld_err xrng = get_input_from_string(options.xr) if options.yr == "-1": yrng = xrng else: yrng = get_input_from_string(options.yr) step = get_input_from_string(options.ts) if myid == main_node: nima = EMUtil.get_image_count(stack) img = get_image(stack) nx = img.get_xsize() ny = img.get_ysize() else: nima = 0 nx = 0 ny = 0 nima = bcast_number_to_all(nima) nx = bcast_number_to_all(nx) ny = bcast_number_to_all(ny) if radius == -1: radius = nx / 2 - 2 mask = model_circle(radius, nx, nx) st = time() if options.grouping == "GRP": if myid == main_node: print(" A ", myid, " ", time() - st) proj_attr = EMUtil.get_all_attributes(stack, "xform.projection") proj_params = [] for i in range(nima): dp = proj_attr[i].get_params("spider") phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp[ "psi"], -dp["tx"], -dp["ty"] proj_params.append([phi, theta, psi, s2x, s2y]) # Here is where the grouping is done, I didn't put enough annotation in the group_proj_by_phitheta, # So I will briefly explain it here # proj_list : Returns a list of list of particle numbers, each list contains img_per_grp particle numbers # except for the last one. Depending on the number of particles left, they will either form a # group or append themselves to the last group # angle_list : Also returns a list of list, each list contains three numbers (phi, theta, delta), (phi, # theta) is the projection angle of the center of the group, delta is the range of this group # mirror_list: Also returns a list of list, each list contains img_per_grp True or False, which indicates # whether it should take mirror position. # In this program angle_list and mirror list are not of interest. proj_list_all, angle_list, mirror_list = group_proj_by_phitheta( proj_params, img_per_grp=img_per_grp) del proj_params print(" B number of groups ", myid, " ", len(proj_list_all), time() - st) mpi_barrier(MPI_COMM_WORLD) # Number of groups, actually there could be one or two more groups, since the size of the remaining group varies # we will simply assign them to main node. n_grp = nima / img_per_grp - 1 # Divide proj_list_all equally to all nodes, and becomes proj_list proj_list = [] for i in range(n_grp): proc_to_stay = i % number_of_proc if proc_to_stay == main_node: if myid == main_node: proj_list.append(proj_list_all[i]) elif myid == main_node: mpi_send(len(proj_list_all[i]), 1, MPI_INT, proc_to_stay, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) mpi_send(proj_list_all[i], len(proj_list_all[i]), MPI_INT, proc_to_stay, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) elif myid == proc_to_stay: img_per_grp = mpi_recv(1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) img_per_grp = int(img_per_grp[0]) temp = mpi_recv(img_per_grp, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) proj_list.append(list(map(int, temp))) del temp mpi_barrier(MPI_COMM_WORLD) print(" C ", myid, " ", time() - st) if myid == main_node: # Assign the remaining groups to main_node for i in range(n_grp, len(proj_list_all)): proj_list.append(proj_list_all[i]) del proj_list_all, angle_list, mirror_list # Compute stability per projection projection direction, equal number assigned, thus overlaps elif options.grouping == "GEV": if options.delta == -1.0: ERROR( "Angular step for reference projections is required for GEV method", "sxproj_stability", 1) from utilities import even_angles, nearestk_to_refdir, getvec refproj = even_angles(options.delta) img_begin, img_end = MPI_start_end(len(refproj), number_of_proc, myid) # Now each processor keeps its own share of reference projections refprojdir = refproj[img_begin:img_end] del refproj ref_ang = [0.0] * (len(refprojdir) * 2) for i in range(len(refprojdir)): ref_ang[i * 2] = refprojdir[0][0] ref_ang[i * 2 + 1] = refprojdir[0][1] + i * 0.1 print(" A ", myid, " ", time() - st) proj_attr = EMUtil.get_all_attributes(stack, "xform.projection") # the solution below is very slow, do not use it unless there is a problem with the i/O """ for i in xrange(number_of_proc): if myid == i: proj_attr = EMUtil.get_all_attributes(stack, "xform.projection") mpi_barrier(MPI_COMM_WORLD) """ print(" B ", myid, " ", time() - st) proj_ang = [0.0] * (nima * 2) for i in range(nima): dp = proj_attr[i].get_params("spider") proj_ang[i * 2] = dp["phi"] proj_ang[i * 2 + 1] = dp["theta"] print(" C ", myid, " ", time() - st) asi = Util.nearestk_to_refdir(proj_ang, ref_ang, img_per_grp) del proj_ang, ref_ang proj_list = [] for i in range(len(refprojdir)): proj_list.append(asi[i * img_per_grp:(i + 1) * img_per_grp]) del asi print(" D ", myid, " ", time() - st) #from sys import exit #exit() # Compute stability per projection elif options.grouping == "PPR": print(" A ", myid, " ", time() - st) proj_attr = EMUtil.get_all_attributes(stack, "xform.projection") print(" B ", myid, " ", time() - st) proj_params = [] for i in range(nima): dp = proj_attr[i].get_params("spider") phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp[ "psi"], -dp["tx"], -dp["ty"] proj_params.append([phi, theta, psi, s2x, s2y]) img_begin, img_end = MPI_start_end(nima, number_of_proc, myid) print(" C ", myid, " ", time() - st) from utilities import nearest_proj proj_list, mirror_list = nearest_proj( proj_params, img_per_grp, list(range(img_begin, img_begin + 1))) #range(img_begin, img_end)) refprojdir = proj_params[img_begin:img_end] del proj_params, mirror_list print(" D ", myid, " ", time() - st) else: ERROR("Incorrect projection grouping option", "sxproj_stability", 1) """ from utilities import write_text_file for i in xrange(len(proj_list)): write_text_file(proj_list[i],"projlist%06d_%04d"%(i,myid)) """ ########################################################################################################### # Begin stability test from utilities import get_params_proj, read_text_file #if myid == 0: # from utilities import read_text_file # proj_list[0] = map(int, read_text_file("lggrpp0.txt")) from utilities import model_blank aveList = [model_blank(nx, ny)] * len(proj_list) if options.grouping == "GRP": refprojdir = [[0.0, 0.0, -1.0]] * len(proj_list) for i in range(len(proj_list)): print(" E ", myid, " ", time() - st) class_data = EMData.read_images(stack, proj_list[i]) #print " R ",myid," ",time()-st if options.CTF: from filter import filt_ctf for im in range(len(class_data)): # MEM LEAK!! atemp = class_data[im].copy() btemp = filt_ctf(atemp, atemp.get_attr("ctf"), binary=1) class_data[im] = btemp #class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1) for im in class_data: try: t = im.get_attr( "xform.align2d") # if they are there, no need to set them! except: try: t = im.get_attr("xform.projection") d = t.get_params("spider") set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0]) except: set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0]) #print " F ",myid," ",time()-st # Here, we perform realignment num_ali times all_ali_params = [] for j in range(num_ali): if (xrng[0] == 0.0 and yrng[0] == 0.0): avet = ali2d_ras(class_data, randomize=True, ir=1, ou=radius, rs=1, step=1.0, dst=90.0, maxit=ite, check_mirror=True, FH=options.fl, FF=options.aa) else: avet = within_group_refinement(class_data, mask, True, 1, radius, 1, xrng, yrng, step, 90.0, ite, options.fl, options.aa) ali_params = [] for im in range(len(class_data)): alpha, sx, sy, mirror, scale = get_params2D(class_data[im]) ali_params.extend([alpha, sx, sy, mirror]) all_ali_params.append(ali_params) #aveList[i] = avet #print " G ",myid," ",time()-st del ali_params # We determine the stability of this group here. # stable_set contains all particles deemed stable, it is a list of list # each list has two elements, the first is the pixel error, the second is the image number # stable_set is sorted based on pixel error #from utilities import write_text_file #write_text_file(all_ali_params, "all_ali_params%03d.txt"%myid) stable_set, mir_stab_rate, average_pix_err = multi_align_stability( all_ali_params, 0.0, 10000.0, thld_err, False, 2 * radius + 1) #print " H ",myid," ",time()-st if (len(stable_set) > 5): stable_set_id = [] members = [] pix_err = [] # First put the stable members into attr 'members' and 'pix_err' for s in stable_set: # s[1] - number in this subset stable_set_id.append(s[1]) # the original image number members.append(proj_list[i][s[1]]) pix_err.append(s[0]) # Then put the unstable members into attr 'members' and 'pix_err' from fundamentals import rot_shift2D avet.to_zero() if options.grouping == "GRP": aphi = 0.0 atht = 0.0 vphi = 0.0 vtht = 0.0 l = -1 for j in range(len(proj_list[i])): # Here it will only work if stable_set_id is sorted in the increasing number, see how l progresses if j in stable_set_id: l += 1 avet += rot_shift2D(class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3]) if options.grouping == "GRP": phi, theta, psi, sxs, sys = get_params_proj( class_data[j]) if (theta > 90.0): phi = (phi + 540.0) % 360.0 theta = 180.0 - theta aphi += phi atht += theta vphi += phi * phi vtht += theta * theta else: members.append(proj_list[i][j]) pix_err.append(99999.99) aveList[i] = avet.copy() if l > 1: l += 1 aveList[i] /= l if options.grouping == "GRP": aphi /= l atht /= l vphi = (vphi - l * aphi * aphi) / l vtht = (vtht - l * atht * atht) / l from math import sqrt refprojdir[i] = [ aphi, atht, (sqrt(max(vphi, 0.0)) + sqrt(max(vtht, 0.0))) / 2.0 ] # Here more information has to be stored, PARTICULARLY WHAT IS THE REFERENCE DIRECTION aveList[i].set_attr('members', members) aveList[i].set_attr('refprojdir', refprojdir[i]) aveList[i].set_attr('pixerr', pix_err) else: print(" empty group ", i, refprojdir[i]) aveList[i].set_attr('members', [-1]) aveList[i].set_attr('refprojdir', refprojdir[i]) aveList[i].set_attr('pixerr', [99999.]) del class_data if myid == main_node: km = 0 for i in range(number_of_proc): if i == main_node: for im in range(len(aveList)): aveList[im].write_image(args[1], km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in range(nl): ave = recv_EMData(i, im + i + 70000) nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nm = int(nm[0]) members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('members', list(map(int, members))) members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('pixerr', list(map(float, members))) members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('refprojdir', list(map(float, members))) ave.write_image(args[1], km) km += 1 else: mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in range(len(aveList)): send_EMData(aveList[im], main_node, im + myid + 70000) members = aveList[im].get_attr('members') mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) members = aveList[im].get_attr('pixerr') mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) try: members = aveList[im].get_attr('refprojdir') mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) except: mpi_send([-999.0, -999.0, -999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) global_def.BATCH = False mpi_barrier(MPI_COMM_WORLD) from mpi import mpi_finalize mpi_finalize()