def vomq(classavgstack, classmap, classdoc, log=None, verbose=False): """ Separate particles according to class assignment. Arguments: classavgstack : Input image stack classmap : Output class-to-particle lookup table. Each (long) line contains particles assigned to a class, one file for all classes classdoc : Output lists of particles assigned to a class, one file per class mode : Mode, viper (pre-existing angles for each input image), projmatch (angles from internal projection-matching) log : instance of Logger class verbose : (boolean) Whether to write additional information to screen """ # Generate class-to-particle lookup table print_log_msg( "Exporting members of stack %s to class map %s" % (classavgstack, classmap), log, verbose) cmd = "sp_header.py %s --params=members --export=%s" % (classavgstack, classmap) print_log_msg(cmd, log, verbose) header(classavgstack, 'members', fexport=classmap) class_counter = 0 # Loop through classes with open(classmap) as r: for class_num, line in enumerate(r.readlines()): class_list = line[1:-3].split(', ') class_counter += 1 part_counter += len(class_list) write_text_row(class_list, classdoc.format(class_num)) print_log_msg("Wrote %s class selection files\n" % counter, log, verbose)
def output_volume(freqvol, resolut, apix, outdir, prefix, fsc, out_ang_res, nx, ny, nz, res_overall): outvol = optparse.os.path.join(outdir, "{0}.hdf".format(prefix)) outvol_ang = optparse.os.path.splitext(outvol)[0] + "_ang.hdf" outvol_shifted = optparse.os.path.splitext(outvol)[0] + "_shift.hdf" outvol_shifted_ang = optparse.os.path.splitext( outvol_shifted)[0] + "_ang.hdf" freqvol.write_image(outvol) if out_ang_res: outAngResVol = makeAngRes(freqvol, nx, ny, nz, apix) outAngResVol.write_image(outvol_ang) if res_overall != -1.0: for ifreq in range(len(resolut)): if resolut[ifreq][0] > res_overall: break for jfreq in range(ifreq, len(resolut)): resolut[jfreq][1] = 0.0 data_freqvol = freqvol.get_3dview() mask = data_freqvol > 0 percentile_25 = numpy.percentile(data_freqvol[mask], 25) percentile_75 = numpy.percentile(data_freqvol[mask], 75) iqr = percentile_75 - percentile_25 mask_low_pass = data_freqvol < percentile_75 + 1.5 * iqr mask_high_pass = data_freqvol > percentile_25 - 1.5 * iqr mean_real = old_div( 1, float( numpy.mean(data_freqvol[mask & mask_low_pass & mask_high_pass]))) overall_res_real = old_div(1, float(res_overall)) # mean_ang = options.apix / float(EMAN2_cppwrap.Util.infomask(freqvol, m, True)[0]) volume_out_real = makeAngRes(freqvol, nx, ny, nz, 1) volume_out_real += overall_res_real - mean_real volume_out = makeAngRes(volume_out_real, nx, ny, nz, 1, False) volume_out.write_image(outvol_shifted) if out_ang_res: outAngResVol = makeAngRes(volume_out, nx, ny, nz, apix) outAngResVol.write_image(outvol_shifted_ang) if fsc != None: sp_utilities.write_text_row(resolut, fsc)
def mode_meridien(reconfile, classavgstack, classdocs, partangles, selectdoc, maxshift, outerrad, outanglesdoc, outaligndoc, interpolation_method=1, outliers=None, goodclassparttemplate=None, alignopt='apsh', ringstep=1, log=None, verbose=False): # Resample reference recondata = EMAN2.EMData(reconfile) idim = recondata['nx'] reconprep = prep_vol(recondata, npad=2, interpolation_method=interpolation_method) # Initialize output angles outangleslist = [] outalignlist = [] # Read class lists classdoclist = glob.glob(classdocs) partangleslist = read_text_row(partangles) # Loop through class lists for classdoc in classdoclist: # [classdoclist[32]]: # # Strip out three-digit filenumber classexample = os.path.splitext(classdoc) classnum = int(classexample[0][-3:]) # Initial average [avg_phi_init, avg_theta_init] = average_angles(partangleslist, classdoc, selectdoc=selectdoc) # Look for outliers if outliers: [avg_phi_final, avg_theta_final] = average_angles( partangleslist, classdoc, selectdoc=selectdoc, init_angles=[avg_phi_init, avg_theta_init], threshold=outliers, goodpartdoc=goodclassparttemplate.format(classnum), log=log, verbose=verbose) else: [avg_phi_final, avg_theta_final] = [avg_phi_init, avg_theta_init] # Compute re-projection refprjreal = prgl(reconprep, [avg_phi_final, avg_theta_final, 0, 0, 0], interpolation_method=1, return_real=True) # Align to class average classavg = get_im(classavgstack, classnum) # Alignment using self-correlation function if alignopt == 'scf': ang_align2d, sxs, sys, mirrorflag, peak = align2d_scf(classavg, refprjreal, maxshift, maxshift, ou=outerrad) # Weird results elif alignopt == 'align2d': # Set search range currshift = 0 txrng = tyrng = search_range(idim, outerrad, currshift, maxshift) # Perform alignment ang_align2d, sxs, sys, mirrorflag, peak = align2d( classavg, refprjreal, txrng, tyrng, last_ring=outerrad) # Direct3 (angles seemed to be quantized) elif alignopt == 'direct3': [[ang_align2d, sxs, sys, mirrorflag, peak]] = align2d_direct3([classavg], refprjreal, maxshift, maxshift, ou=outerrad) # APSH-like alignment (default) else: [[ang_align2d, sxs, sys, mirrorflag, scale]] = apsh(refprjreal, classavg, outerradius=outerrad, maxshift=maxshift, ringstep=ringstep) outalignlist.append([ang_align2d, sxs, sys, mirrorflag, 1]) msg = "Particle list %s: ang_align2d=%s sx=%s sy=%s mirror=%s\n" % ( classdoc, ang_align2d, sxs, sys, mirrorflag) print_log_msg(msg, log, verbose) # Check for mirroring if mirrorflag == 1: tempeulers = list( compose_transform3(avg_phi_final, avg_theta_final, 0, 0, 0, 0, 1, 0, 180, 0, 0, 0, 0, 1)) combinedparams = list( compose_transform3(tempeulers[0], tempeulers[1], tempeulers[2], tempeulers[3], tempeulers[4], 0, 1, 0, 0, -ang_align2d, 0, 0, 0, 1)) else: combinedparams = list( compose_transform3(avg_phi_final, avg_theta_final, 0, 0, 0, 0, 1, 0, 0, -ang_align2d, 0, 0, 0, 1)) # compose_transform3: returns phi,theta,psi, tx,ty,tz, scale outangleslist.append(combinedparams) # End class-loop write_text_row(outangleslist, outanglesdoc) write_text_row(outalignlist, outaligndoc) print_log_msg( 'Wrote alignment parameters to %s and %s\n' % (outanglesdoc, outaligndoc), log, verbose) del recondata # Clean up
def apsh(refimgs, imgs2align, outangles=None, refanglesdoc=None, outaligndoc=None, outerradius=-1, maxshift=0, ringstep=1, mode="F", log=None, verbose=False): """ Generates polar representations of a series of images to be used as alignment references. Arguments: refimgs : Input reference image stack (filename or EMData object) imgs2align : Image stack to be aligned (filename or EMData object) outangles : Output Euler angles doc file refanglesdoc : Input Euler angles for reference projections outaligndoc : Output 2D alignment doc file outerradius : Outer alignment radius maxshift : Maximum shift allowed ringstep : Alignment radius step size mode : Mode, full circle ("F") vs. half circle ("H") log : Logger object verbose : (boolean) Whether to write additional information to screen """ # Generate polar representation(s) of reference(s) alignrings, polarrefs = mref2polar(refimgs, outerradius=outerradius, ringstep=ringstep, log=log, verbose=verbose) # Read image stack (as a filename or already an EMDataobject) if isinstance(imgs2align, str): imagestacklist = EMData.read_images(imgs2align) else: imagestacklist = [imgs2align] # Get number of images numimg = len(imagestacklist) # Get image dimensions (assuming square, and that images and references have the same dimension) idim = imagestacklist[0]['nx'] # Calculate image center halfdim = idim / 2 + 1 # Set constants currshift = 0 scale = 1 # Initialize output angles outangleslist = [] outalignlist = [] if outerradius <= 0: outerradius = halfdim - 3 # Set search range txrng = tyrng = search_range(idim, outerradius, currshift, maxshift) print_log_msg( 'Running multireference alignment allowing a maximum shift of %s\n' % maxshift, log, verbose) # Loop through images for imgindex in range(numimg): currimg = imagestacklist[imgindex] # Perform multi-reference alignment (adapted from alignment.mref_ali2d) best2dparamslist = [ angt, sxst, syst, mirrorfloat, bestreffloat, peakt ] = Util.multiref_polar_ali_2d(currimg, polarrefs, txrng, tyrng, ringstep, mode, alignrings, halfdim, halfdim) bestref = int(bestreffloat) mirrorflag = int(mirrorfloat) # Store parameters params2dlist = [angt, sxst, syst, mirrorflag, scale] outalignlist.append(params2dlist) if refanglesdoc: refangleslist = read_text_row(refanglesdoc) besteulers = refangleslist[bestref] else: besteulers = [0] * 5 # Check for mirroring if mirrorflag == 1: tempeulers = list( compose_transform3(besteulers[0], besteulers[1], besteulers[2], besteulers[3], besteulers[4], 0, 1, 0, 180, 0, 0, 0, 0, 1)) combinedparams = list( compose_transform3(tempeulers[0], tempeulers[1], tempeulers[2], tempeulers[3], tempeulers[4], 0, 1, 0, 0, -angt, 0, 0, 0, 1)) else: combinedparams = list( compose_transform3(besteulers[0], besteulers[1], besteulers[2], besteulers[3], besteulers[4], 0, 1, 0, 0, -angt, 0, 0, 0, 1)) # compose_transform3: returns phi,theta,psi, tx,ty,tz, scale outangleslist.append(combinedparams) # Set transformations as image attribute set_params2D(currimg, params2dlist, xform="xform.align2d" ) # sometimes I get a vector error with sxheader set_params_proj(currimg, besteulers, xform="xform.projection") # use shifts if outangles or outaligndoc: msg = '' if outangles: write_text_row(outangleslist, outangles) msg += 'Wrote alignment angles to %s\n' % outangles print_log_msg(msg, log, verbose) if outaligndoc: write_text_row(outalignlist, outaligndoc) msg += 'Wrote 2D alignment parameters to %s\n' % outaligndoc print_log_msg(msg, log, verbose) return outalignlist
def average_angles(alignlist, partdoc, selectdoc=None, init_angles=None, threshold=None, goodpartdoc=None, log=None, verbose=False): """ Computes a vector average of a set of particles' Euler angles phi and theta. Arguments: alignlist : Alignment parameter doc file, i.e., from MERIDIEN refinment partdoc : List of particle indices whose angles should be averaged selectdoc : Input substack selection file if particles removed before refinement (e.g., Substack/isac_substack_particle_id_list.txt) init_angles : List (2 elements) with initial phi and theta angles, for excluding outliers threshold : Angular threshold (degrees) beyond which particles exceeding this angular difference from init_angles will be excluded goodpartdoc : Output list of retained particles if a threshold was specified log : Logger object verbose : (boolean) Whether to write additional information to screen Returns: list of 2 elements: avg_phi avg_theta """ # Read alignment parameters if isinstance(alignlist, str): alignlist = read_text_row(outaligndoc) # (If loading the same parameters repeatedly, better to read the file once externally and pass only the list.) # Read class list partlist = read_text_row(partdoc) if selectdoc: selectlist = read_text_row(selectdoc) else: selectlist = None sum_phi = np.array([0.0, 0.0]) sum_theta = np.array([0.0, 0.0]) totparts = 0 num_outliers = 0 goodpartlist = [] goodpartcounter = 0 # Loop through particles for totpartnum in partlist: if selectlist: goodpartnum = selectlist.index(totpartnum) else: goodpartnum = totpartnum[0] try: phi_deg = alignlist[goodpartnum][0] theta_deg = alignlist[goodpartnum][1] phi_rad = np.deg2rad(phi_deg) theta_rad = np.deg2rad(theta_deg) except IndexError: msg = "\nERROR!! %s tries to access particle #%s" % (partdoc, goodpartnum) numalignparts = len(alignlist) msg += "\nAlignment doc file has only %s entries" % (numalignparts) msg += "\nMaybe try substack selection file with flag '--select <substack_select>'?" sp_global_def.ERROR(msg, __file__, 1) exit() if init_angles: angdif = angle_diff(init_angles, [phi_deg, theta_deg]) if angdif > 180: angdif = 360.0 - angdif totparts += 1 # Exclude particles exceeding optional threshold if threshold == None or angdif < threshold: sum_phi += (np.cos(phi_rad), np.sin(phi_rad)) sum_theta += (np.cos(theta_rad), np.sin(theta_rad)) goodpartlist.append(goodpartnum) goodpartcounter += 1 else: num_outliers += 1 # Compute final average avg_phi = degrees(atan2(sum_phi[1], sum_phi[0])) avg_theta = degrees(atan2(sum_theta[1], sum_theta[0])) # Clean up, might reuse del alignlist del partlist del selectlist msg = "Particle list %s: average angles (%s, %s)" % (partdoc, avg_phi, avg_theta) print_log_msg(msg, log, verbose) if threshold: msg = "Found %s out of %s outliers exceeding an angle difference of %s degrees from initial estimate" % ( num_outliers, totparts, threshold) print_log_msg(msg, log, verbose) if goodpartdoc: if goodpartcounter > 0: write_text_row(goodpartlist, goodpartdoc) msg = "Wrote %s particles to %s" % (goodpartcounter, goodpartdoc) print_log_msg(msg, log, verbose) else: msg = "WARNING!! Kept 0 particles from class %s" % partdoc print_log_msg(msg, log, verbose) [avg_phi, avg_theta] = init_angles return [avg_phi, avg_theta]
def main(): def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror): # the final ali2d parameters already combine shifts operation first and rotation operation second for parameters converted from 3D if mirror: m = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0 - psi, 0, 0, 1.0) else: m = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0 - psi, 0, 0, 1.0) return alpha, sx, sy, m progname = os.path.basename(sys.argv[0]) usage = progname + " prj_stack --ave2D= --var2D= --ave3D= --var3D= --img_per_grp= --fl= --aa= --sym=symmetry --CTF" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--output_dir", type="string", default="./", help="Output directory") parser.add_option("--ave2D", type="string", default=False, help="Write to the disk a stack of 2D averages") parser.add_option("--var2D", type="string", default=False, help="Write to the disk a stack of 2D variances") parser.add_option("--ave3D", type="string", default=False, help="Write to the disk reconstructed 3D average") parser.add_option("--var3D", type="string", default=False, help="Compute 3D variability (time consuming!)") parser.add_option( "--img_per_grp", type="int", default=100, help="Number of neighbouring projections.(Default is 100)") parser.add_option( "--no_norm", action="store_true", default=False, help="Do not use normalization.(Default is to apply normalization)") #parser.add_option("--radius", type="int" , default=-1 , help="radius for 3D variability" ) parser.add_option( "--npad", type="int", default=2, help= "Number of time to pad the original images.(Default is 2 times padding)" ) parser.add_option("--sym", type="string", default="c1", help="Symmetry. (Default is no symmetry)") parser.add_option( "--fl", type="float", default=0.0, help= "Low pass filter cutoff in absolute frequency (0.0 - 0.5) and is applied to decimated images. (Default - no filtration)" ) parser.add_option( "--aa", type="float", default=0.02, help= "Fall off of the filter. Use default value if user has no clue about falloff (Default value is 0.02)" ) parser.add_option("--CTF", action="store_true", default=False, help="Use CFT correction.(Default is no CTF correction)") #parser.add_option("--MPI" , action="store_true", default=False, help="use MPI version") #parser.add_option("--radiuspca", type="int" , default=-1 , help="radius for PCA" ) #parser.add_option("--iter", type="int" , default=40 , help="maximum number of iterations (stop criterion of reconstruction process)" ) #parser.add_option("--abs", type="float" , default=0.0 , help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" ) #parser.add_option("--squ", type="float" , default=0.0 , help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" ) parser.add_option( "--VAR", action="store_true", default=False, help="Stack of input consists of 2D variances (Default False)") parser.add_option( "--decimate", type="float", default=0.25, help="Image decimate rate, a number less than 1. (Default is 0.25)") parser.add_option( "--window", type="int", default=0, help= "Target image size relative to original image size. (Default value is zero.)" ) #parser.add_option("--SND", action="store_true", default=False, help="compute squared normalized differences (Default False)") #parser.add_option("--nvec", type="int" , default=0 , help="Number of eigenvectors, (Default = 0 meaning no PCA calculated)") parser.add_option( "--symmetrize", action="store_true", default=False, help="Prepare input stack for handling symmetry (Default False)") parser.add_option("--overhead", type="float", default=0.5, help="python overhead per CPU.") (options, args) = parser.parse_args() ##### from mpi import mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX #from mpi import * from sp_applications import MPI_start_end from sp_reconstruction import recons3d_em, recons3d_em_MPI from sp_reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI from sp_utilities import print_begin_msg, print_end_msg, print_msg from sp_utilities import read_text_row, get_image, get_im, wrap_mpi_send, wrap_mpi_recv from sp_utilities import bcast_EMData_to_all, bcast_number_to_all from sp_utilities import get_symt # This is code for handling symmetries by the above program. To be incorporated. PAP 01/27/2015 from EMAN2db import db_open_dict # Set up global variables related to bdb cache if sp_global_def.CACHE_DISABLE: from sp_utilities import disable_bdb_cache disable_bdb_cache() # Set up global variables related to ERROR function sp_global_def.BATCH = True # detect if program is running under MPI RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ if RUNNING_UNDER_MPI: sp_global_def.MPI = True if options.output_dir == "./": current_output_dir = os.path.abspath(options.output_dir) else: current_output_dir = options.output_dir if options.symmetrize: if mpi.mpi_comm_size(MPI_COMM_WORLD) > 1: ERROR("Cannot use more than one CPU for symmetry preparation") if not os.path.exists(current_output_dir): os.makedirs(current_output_dir) sp_global_def.write_command(current_output_dir) from sp_logger import Logger, BaseLogger_Files if os.path.exists(os.path.join(current_output_dir, "log.txt")): os.remove(os.path.join(current_output_dir, "log.txt")) log_main = Logger(BaseLogger_Files()) log_main.prefix = os.path.join(current_output_dir, "./") instack = args[0] sym = options.sym.lower() if (sym == "c1"): ERROR("There is no need to symmetrize stack for C1 symmetry") line = "" for a in sys.argv: line += " " + a log_main.add(line) if (instack[:4] != "bdb:"): #if output_dir =="./": stack = "bdb:data" stack = "bdb:" + current_output_dir + "/data" delete_bdb(stack) junk = cmdexecute("sp_cpy.py " + instack + " " + stack) else: stack = instack qt = EMUtil.get_all_attributes(stack, 'xform.projection') na = len(qt) ts = get_symt(sym) ks = len(ts) angsa = [None] * na for k in range(ks): #Qfile = "Q%1d"%k #if options.output_dir!="./": Qfile = os.path.join(options.output_dir,"Q%1d"%k) Qfile = os.path.join(current_output_dir, "Q%1d" % k) #delete_bdb("bdb:Q%1d"%k) delete_bdb("bdb:" + Qfile) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) junk = cmdexecute("e2bdb.py " + stack + " --makevstack=bdb:" + Qfile) #DB = db_open_dict("bdb:Q%1d"%k) DB = db_open_dict("bdb:" + Qfile) for i in range(na): ut = qt[i] * ts[k] DB.set_attr(i, "xform.projection", ut) #bt = ut.get_params("spider") #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]] #write_text_row(angsa, 'ptsma%1d.txt'%k) #junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) #junk = cmdexecute("sxheader.py bdb:Q%1d --params=xform.projection --import=ptsma%1d.txt"%(k,k)) DB.close() #if options.output_dir =="./": delete_bdb("bdb:sdata") delete_bdb("bdb:" + current_output_dir + "/" + "sdata") #junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q") sdata = "bdb:" + current_output_dir + "/" + "sdata" sxprint(sdata) junk = cmdexecute("e2bdb.py " + current_output_dir + " --makevstack=" + sdata + " --filt=Q") #junk = cmdexecute("ls EMAN2DB/sdata*") #a = get_im("bdb:sdata") a = get_im(sdata) a.set_attr("variabilitysymmetry", sym) #a.write_image("bdb:sdata") a.write_image(sdata) else: from sp_fundamentals import window2d myid = mpi_comm_rank(MPI_COMM_WORLD) number_of_proc = mpi_comm_size(MPI_COMM_WORLD) main_node = 0 shared_comm = mpi_comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL) myid_on_node = mpi_comm_rank(shared_comm) no_of_processes_per_group = mpi_comm_size(shared_comm) masters_from_groups_vs_everything_else_comm = mpi_comm_split( MPI_COMM_WORLD, main_node == myid_on_node, myid_on_node) color, no_of_groups, balanced_processor_load_on_nodes = get_colors_and_subsets(main_node, MPI_COMM_WORLD, myid, \ shared_comm, myid_on_node, masters_from_groups_vs_everything_else_comm) overhead_loading = options.overhead * number_of_proc #memory_per_node = options.memory_per_node #if memory_per_node == -1.: memory_per_node = 2.*no_of_processes_per_group keepgoing = 1 current_window = options.window current_decimate = options.decimate if len(args) == 1: stack = args[0] else: sxprint("Usage: " + usage) sxprint("Please run \'" + progname + " -h\' for detailed options") ERROR( "Invalid number of parameters used. Please see usage information above." ) return t0 = time() # obsolete flags options.MPI = True #options.nvec = 0 options.radiuspca = -1 options.iter = 40 options.abs = 0.0 options.squ = 0.0 if options.fl > 0.0 and options.aa == 0.0: ERROR("Fall off has to be given for the low-pass filter", myid=myid) #if options.VAR and options.SND: # ERROR( "Only one of var and SND can be set!",myid=myid ) if options.VAR and (options.ave2D or options.ave3D or options.var2D): ERROR( "When VAR is set, the program cannot output ave2D, ave3D or var2D", myid=myid) #if options.SND and (options.ave2D or options.ave3D): # ERROR( "When SND is set, the program cannot output ave2D or ave3D", myid=myid ) #if options.nvec > 0 : # ERROR( "PCA option not implemented", myid=myid ) #if options.nvec > 0 and options.ave3D == None: # ERROR( "When doing PCA analysis, one must set ave3D", myid=myid ) if current_decimate > 1.0 or current_decimate < 0.0: ERROR("Decimate rate should be a value between 0.0 and 1.0", myid=myid) if current_window < 0.0: ERROR("Target window size should be always larger than zero", myid=myid) if myid == main_node: img = get_image(stack, 0) nx = img.get_xsize() ny = img.get_ysize() if (min(nx, ny) < current_window): keepgoing = 0 keepgoing = bcast_number_to_all(keepgoing, main_node, MPI_COMM_WORLD) if keepgoing == 0: ERROR( "The target window size cannot be larger than the size of decimated image", myid=myid) import string options.sym = options.sym.lower() # if global_def.CACHE_DISABLE: # from utilities import disable_bdb_cache # disable_bdb_cache() # global_def.BATCH = True if myid == main_node: if not os.path.exists(current_output_dir): os.makedirs(current_output_dir ) # Never delete output_dir in the program! img_per_grp = options.img_per_grp #nvec = options.nvec radiuspca = options.radiuspca from sp_logger import Logger, BaseLogger_Files #if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt")) log_main = Logger(BaseLogger_Files()) log_main.prefix = os.path.join(current_output_dir, "./") if myid == main_node: line = "" for a in sys.argv: line += " " + a log_main.add(line) log_main.add("-------->>>Settings given by all options<<<-------") log_main.add("Symmetry : %s" % options.sym) log_main.add("Input stack : %s" % stack) log_main.add("Output_dir : %s" % current_output_dir) if options.ave3D: log_main.add("Ave3d : %s" % options.ave3D) if options.var3D: log_main.add("Var3d : %s" % options.var3D) if options.ave2D: log_main.add("Ave2D : %s" % options.ave2D) if options.var2D: log_main.add("Var2D : %s" % options.var2D) if options.VAR: log_main.add("VAR : True") else: log_main.add("VAR : False") if options.CTF: log_main.add("CTF correction : True ") else: log_main.add("CTF correction : False ") log_main.add("Image per group : %5d" % options.img_per_grp) log_main.add("Image decimate rate : %4.3f" % current_decimate) log_main.add("Low pass filter : %4.3f" % options.fl) current_fl = options.fl if current_fl == 0.0: current_fl = 0.5 log_main.add( "Current low pass filter is equivalent to cutoff frequency %4.3f for original image size" % round((current_fl * current_decimate), 3)) log_main.add("Window size : %5d " % current_window) log_main.add("sx3dvariability begins") symbaselen = 0 if myid == main_node: nima = EMUtil.get_image_count(stack) img = get_image(stack) nx = img.get_xsize() ny = img.get_ysize() nnxo = nx nnyo = ny if options.sym != "c1": imgdata = get_im(stack) try: i = imgdata.get_attr("variabilitysymmetry").lower() if (i != options.sym): ERROR( "The symmetry provided does not agree with the symmetry of the input stack", myid=myid) except: ERROR( "Input stack is not prepared for symmetry, please follow instructions", myid=myid) from sp_utilities import get_symt i = len(get_symt(options.sym)) if ((nima / i) * i != nima): ERROR( "The length of the input stack is incorrect for symmetry processing", myid=myid) symbaselen = nima / i else: symbaselen = nima else: nima = 0 nx = 0 ny = 0 nnxo = 0 nnyo = 0 nima = bcast_number_to_all(nima) nx = bcast_number_to_all(nx) ny = bcast_number_to_all(ny) nnxo = bcast_number_to_all(nnxo) nnyo = bcast_number_to_all(nnyo) if current_window > max(nx, ny): ERROR("Window size is larger than the original image size") if current_decimate == 1.: if current_window != 0: nx = current_window ny = current_window else: if current_window == 0: nx = int(nx * current_decimate + 0.5) ny = int(ny * current_decimate + 0.5) else: nx = int(current_window * current_decimate + 0.5) ny = nx symbaselen = bcast_number_to_all(symbaselen) # check FFT prime number from sp_fundamentals import smallprime is_fft_friendly = (nx == smallprime(nx)) if not is_fft_friendly: if myid == main_node: log_main.add( "The target image size is not a product of small prime numbers" ) log_main.add("Program adjusts the input settings!") ### two cases if current_decimate == 1.: nx = smallprime(nx) ny = nx current_window = nx # update if myid == main_node: log_main.add("The window size is updated to %d." % current_window) else: if current_window == 0: nx = smallprime(int(nx * current_decimate + 0.5)) current_decimate = float(nx) / nnxo ny = nx if (myid == main_node): log_main.add("The decimate rate is updated to %f." % current_decimate) else: nx = smallprime( int(current_window * current_decimate + 0.5)) ny = nx current_window = int(nx / current_decimate + 0.5) if (myid == main_node): log_main.add("The window size is updated to %d." % current_window) if myid == main_node: log_main.add("The target image size is %d" % nx) if radiuspca == -1: radiuspca = nx / 2 - 2 if myid == main_node: log_main.add("%-70s: %d\n" % ("Number of projection", nima)) img_begin, img_end = MPI_start_end(nima, number_of_proc, myid) """ if options.SND: from sp_projection import prep_vol, prgs from sp_statistics import im_diff from sp_utilities import get_im, model_circle, get_params_proj, set_params_proj from sp_utilities import get_ctf, generate_ctf from sp_filter import filt_ctf imgdata = EMData.read_images(stack, range(img_begin, img_end)) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) bcast_EMData_to_all(vol, myid) volft, kb = prep_vol(vol) mask = model_circle(nx/2-2, nx, ny) varList = [] for i in xrange(img_begin, img_end): phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin]) ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y]) if options.CTF: ctf_params = get_ctf(imgdata[i-img_begin]) ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params)) diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask) diff2 = diff*diff set_params_proj(diff2, [phi, theta, psi, s2x, s2y]) varList.append(diff2) mpi_barrier(MPI_COMM_WORLD) """ if options.VAR: # 2D variance images have no shifts #varList = EMData.read_images(stack, range(img_begin, img_end)) from EMAN2 import Region for index_of_particle in range(img_begin, img_end): image = get_im(stack, index_of_proj) if current_window > 0: varList.append( fdecimate( window2d(image, current_window, current_window), nx, ny)) else: varList.append(fdecimate(image, nx, ny)) else: from sp_utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData from sp_utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2 from sp_utilities import model_blank, nearest_proj, model_circle, write_text_row, wrap_mpi_gatherv from sp_applications import pca from sp_statistics import avgvar, avgvar_ctf, ccc from sp_filter import filt_tanl from sp_morphology import threshold, square_root from sp_projection import project, prep_vol, prgs from sets import Set from sp_utilities import wrap_mpi_recv, wrap_mpi_bcast, wrap_mpi_send import numpy as np if myid == main_node: t1 = time() proj_angles = [] aveList = [] tab = EMUtil.get_all_attributes(stack, 'xform.projection') for i in range(nima): t = tab[i].get_params('spider') phi = t['phi'] theta = t['theta'] psi = t['psi'] x = theta if x > 90.0: x = 180.0 - x x = x * 10000 + psi proj_angles.append([x, t['phi'], t['theta'], t['psi'], i]) t2 = time() log_main.add( "%-70s: %d\n" % ("Number of neighboring projections", img_per_grp)) log_main.add("...... Finding neighboring projections\n") log_main.add("Number of images per group: %d" % img_per_grp) log_main.add("Now grouping projections") proj_angles.sort() proj_angles_list = np.full((nima, 4), 0.0, dtype=np.float32) for i in range(nima): proj_angles_list[i][0] = proj_angles[i][1] proj_angles_list[i][1] = proj_angles[i][2] proj_angles_list[i][2] = proj_angles[i][3] proj_angles_list[i][3] = proj_angles[i][4] else: proj_angles_list = 0 proj_angles_list = wrap_mpi_bcast(proj_angles_list, main_node, MPI_COMM_WORLD) proj_angles = [] for i in range(nima): proj_angles.append([ proj_angles_list[i][0], proj_angles_list[i][1], proj_angles_list[i][2], int(proj_angles_list[i][3]) ]) del proj_angles_list proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end)) all_proj = Set() for im in proj_list: for jm in im: all_proj.add(proj_angles[jm][3]) all_proj = list(all_proj) index = {} for i in range(len(all_proj)): index[all_proj[i]] = i mpi_barrier(MPI_COMM_WORLD) if myid == main_node: log_main.add("%-70s: %.2f\n" % ("Finding neighboring projections lasted [s]", time() - t2)) log_main.add("%-70s: %d\n" % ("Number of groups processed on the main node", len(proj_list))) log_main.add("Grouping projections took: %12.1f [m]" % ((time() - t2) / 60.)) log_main.add("Number of groups on main node: ", len(proj_list)) mpi_barrier(MPI_COMM_WORLD) if myid == main_node: log_main.add("...... Calculating the stack of 2D variances \n") # Memory estimation. There are two memory consumption peaks # peak 1. Compute ave, var; # peak 2. Var volume reconstruction; # proj_params = [0.0]*(nima*5) aveList = [] varList = [] #if nvec > 0: eigList = [[] for i in range(nvec)] dnumber = len( all_proj) # all neighborhood set for assigned to myid pnumber = len(proj_list) * 2. + img_per_grp # aveList and varList tnumber = dnumber + pnumber vol_size2 = nx**3 * 4. * 8 / 1.e9 vol_size1 = 2. * nnxo**3 * 4. * 8 / 1.e9 proj_size = nnxo * nnyo * len( proj_list) * 4. * 2. / 1.e9 # both aveList and varList orig_data_size = nnxo * nnyo * 4. * tnumber / 1.e9 reduced_data_size = nx * nx * 4. * tnumber / 1.e9 full_data = np.full((number_of_proc, 2), -1., dtype=np.float16) full_data[myid] = orig_data_size, reduced_data_size if myid != main_node: wrap_mpi_send(full_data, main_node, MPI_COMM_WORLD) if myid == main_node: for iproc in range(number_of_proc): if iproc != main_node: dummy = wrap_mpi_recv(iproc, MPI_COMM_WORLD) full_data[np.where(dummy > -1)] = dummy[np.where( dummy > -1)] del dummy mpi_barrier(MPI_COMM_WORLD) full_data = wrap_mpi_bcast(full_data, main_node, MPI_COMM_WORLD) # find the CPU with heaviest load minindx = np.argsort(full_data, 0) heavy_load_myid = minindx[-1][1] total_mem = sum(full_data) if myid == main_node: if current_window == 0: log_main.add( "Nx: current image size = %d. Decimated by %f from %d" % (nx, current_decimate, nnxo)) else: log_main.add( "Nx: current image size = %d. Windowed to %d, and decimated by %f from %d" % (nx, current_window, current_decimate, nnxo)) log_main.add("Nproj: number of particle images.") log_main.add("Navg: number of 2D average images.") log_main.add("Nvar: number of 2D variance images.") log_main.add( "Img_per_grp: user defined image per group for averaging = %d" % img_per_grp) log_main.add( "Overhead: total python overhead memory consumption = %f" % overhead_loading) log_main.add("Total memory) = 4.0*nx^2*(nproj + navg +nvar+ img_per_grp)/1.0e9 + overhead: %12.3f [GB]"%\ (total_mem[1] + overhead_loading)) del full_data mpi_barrier(MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add( "Begin reading and preprocessing images on processor. Wait... " ) ttt = time() #imgdata = EMData.read_images(stack, all_proj) imgdata = [None for im in range(len(all_proj))] for index_of_proj in range(len(all_proj)): #image = get_im(stack, all_proj[index_of_proj]) if (current_window > 0): imgdata[index_of_proj] = fdecimate( window2d(get_im(stack, all_proj[index_of_proj]), current_window, current_window), nx, ny) else: imgdata[index_of_proj] = fdecimate( get_im(stack, all_proj[index_of_proj]), nx, ny) if (current_decimate > 0.0 and options.CTF): ctf = imgdata[index_of_proj].get_attr("ctf") ctf.apix = ctf.apix / current_decimate imgdata[index_of_proj].set_attr("ctf", ctf) if myid == heavy_load_myid and index_of_proj % 100 == 0: log_main.add(" ...... %6.2f%% " % (index_of_proj / float(len(all_proj)) * 100.)) mpi_barrier(MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add("All_proj preprocessing cost %7.2f m" % ((time() - ttt) / 60.)) log_main.add("Wait untill reading on all CPUs done...") ''' imgdata2 = EMData.read_images(stack, range(img_begin, img_end)) if options.fl > 0.0: for k in xrange(len(imgdata2)): imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa) if options.CTF: vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) else: vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1) if myid == main_node: vol.write_image("vol_ctf.hdf") print_msg("Writing to the disk volume reconstructed from averages as : %s\n"%("vol_ctf.hdf")) del vol, imgdata2 mpi_barrier(MPI_COMM_WORLD) ''' from sp_applications import prepare_2d_forPCA from sp_utilities import model_blank from EMAN2 import Transform if not options.no_norm: mask = model_circle(nx / 2 - 2, nx, nx) if options.CTF: from sp_utilities import pad from sp_filter import filt_ctf from sp_filter import filt_tanl if myid == heavy_load_myid: log_main.add("Start computing 2D aveList and varList. Wait...") ttt = time() inner = nx // 2 - 4 outer = inner + 2 xform_proj_for_2D = [None for i in range(len(proj_list))] for i in range(len(proj_list)): ki = proj_angles[proj_list[i][0]][3] if ki >= symbaselen: continue mi = index[ki] dpar = Util.get_transform_params(imgdata[mi], "xform.projection", "spider") phiM, thetaM, psiM, s2xM, s2yM = dpar["phi"], dpar[ "theta"], dpar[ "psi"], -dpar["tx"] * current_decimate, -dpar[ "ty"] * current_decimate grp_imgdata = [] for j in range(img_per_grp): mj = index[proj_angles[proj_list[i][j]][3]] cpar = Util.get_transform_params(imgdata[mj], "xform.projection", "spider") alpha, sx, sy, mirror = params_3D_2D_NEW( cpar["phi"], cpar["theta"], cpar["psi"], -cpar["tx"] * current_decimate, -cpar["ty"] * current_decimate, mirror_list[i][j]) if thetaM <= 90: if mirror == 0: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, phiM - cpar["phi"], 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, 180 - (phiM - cpar["phi"]), 0.0, 0.0, 1.0) else: if mirror == 0: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, -(phiM - cpar["phi"]), 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = compose_transform2( alpha, sx, sy, 1.0, -(180 - (phiM - cpar["phi"])), 0.0, 0.0, 1.0) imgdata[mj].set_attr( "xform.align2d", Transform({ "type": "2D", "alpha": alpha, "tx": sx, "ty": sy, "mirror": mirror, "scale": 1.0 })) grp_imgdata.append(imgdata[mj]) if not options.no_norm: for k in range(img_per_grp): ave, std, minn, maxx = Util.infomask( grp_imgdata[k], mask, False) grp_imgdata[k] -= ave grp_imgdata[k] /= std if options.fl > 0.0: for k in range(img_per_grp): grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa) # Because of background issues, only linear option works. if options.CTF: ave, var = aves_wiener(grp_imgdata, SNR=1.0e5, interpolation_method="linear") else: ave, var = ave_var(grp_imgdata) # Switch to std dev # threshold is not really needed,it is just in case due to numerical accuracy something turns out negative. var = square_root(threshold(var)) set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0]) set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0]) aveList.append(ave) varList.append(var) xform_proj_for_2D[i] = [phiM, thetaM, 0.0, 0.0, 0.0] ''' if nvec > 0: eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True) for k in range(nvec): set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0]) eigList[k].append(eig[k]) """ if myid == 0 and i == 0: for k in xrange(nvec): eig[k].write_image("eig.hdf", k) """ ''' if (myid == heavy_load_myid) and (i % 100 == 0): log_main.add(" ......%6.2f%% " % (i / float(len(proj_list)) * 100.)) del imgdata, grp_imgdata, cpar, dpar, all_proj, proj_angles, index if not options.no_norm: del mask if myid == main_node: del tab # At this point, all averages and variances are computed mpi_barrier(MPI_COMM_WORLD) if (myid == heavy_load_myid): log_main.add("Computing aveList and varList took %12.1f [m]" % ((time() - ttt) / 60.)) xform_proj_for_2D = wrap_mpi_gatherv(xform_proj_for_2D, main_node, MPI_COMM_WORLD) if (myid == main_node): write_text_row([str(entry) for entry in xform_proj_for_2D], os.path.join(current_output_dir, "params.txt")) del xform_proj_for_2D mpi_barrier(MPI_COMM_WORLD) if options.ave2D: from sp_fundamentals import fpol from sp_applications import header if myid == main_node: log_main.add("Compute ave2D ... ") km = 0 for i in range(number_of_proc): if i == main_node: for im in range(len(aveList)): aveList[im].write_image( os.path.join(current_output_dir, options.ave2D), km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in range(nl): ave = recv_EMData(i, im + i + 70000) """ nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nm = int(nm[0]) members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('members', map(int, members)) members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('pix_err', map(float, members)) members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) ave.set_attr('refprojdir', map(float, members)) """ tmpvol = fpol(ave, nx, nx, 1) tmpvol.write_image( os.path.join(current_output_dir, options.ave2D), km) km += 1 else: mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in range(len(aveList)): send_EMData(aveList[im], main_node, im + myid + 70000) """ members = aveList[im].get_attr('members') mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) members = aveList[im].get_attr('pix_err') mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) try: members = aveList[im].get_attr('refprojdir') mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) except: mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) """ if myid == main_node: header(os.path.join(current_output_dir, options.ave2D), params='xform.projection', fimport=os.path.join(current_output_dir, "params.txt")) mpi_barrier(MPI_COMM_WORLD) if options.ave3D: from sp_fundamentals import fpol t5 = time() if myid == main_node: log_main.add("Reconstruct ave3D ... ") ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(ave3D, myid) if myid == main_node: if current_decimate != 1.0: ave3D = resample(ave3D, 1. / current_decimate) ave3D = fpol(ave3D, nnxo, nnxo, nnxo) # always to the orignal image size set_pixel_size(ave3D, 1.0) ave3D.write_image( os.path.join(current_output_dir, options.ave3D)) log_main.add("Ave3D reconstruction took %12.1f [m]" % ((time() - t5) / 60.0)) log_main.add("%-70s: %s\n" % ("The reconstructed ave3D is saved as ", options.ave3D)) mpi_barrier(MPI_COMM_WORLD) del ave, var, proj_list, stack, alpha, sx, sy, mirror, aveList ''' if nvec > 0: for k in range(nvec): if myid == main_node:log_main.add("Reconstruction eigenvolumes", k) cont = True ITER = 0 mask2d = model_circle(radiuspca, nx, nx) while cont: #print "On node %d, iteration %d"%(myid, ITER) eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad) bcast_EMData_to_all(eig3D, myid, main_node) if options.fl > 0.0: eig3D = filt_tanl(eig3D, options.fl, options.aa) if myid == main_node: eig3D.write_image(os.path.join(options.outpout_dir, "eig3d_%03d.hdf"%(k, ITER))) Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) ) eig3Df, kb = prep_vol(eig3D) del eig3D cont = False icont = 0 for l in range(len(eigList[k])): phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l]) proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y]) cl = ccc(proj, eigList[k][l], mask2d) if cl < 0.0: icont += 1 cont = True eigList[k][l] *= -1.0 u = int(cont) u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD) icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD) if myid == main_node: u = int(u[0]) log_main.add(" Eigenvector: ",k," number changed ",int(icont[0])) else: u = 0 u = bcast_number_to_all(u, main_node) cont = bool(u) ITER += 1 del eig3Df, kb mpi_barrier(MPI_COMM_WORLD) del eigList, mask2d ''' if options.ave3D: del ave3D if options.var2D: from sp_fundamentals import fpol from sp_applications import header if myid == main_node: log_main.add("Compute var2D...") km = 0 for i in range(number_of_proc): if i == main_node: for im in range(len(varList)): tmpvol = fpol(varList[im], nx, nx, 1) tmpvol.write_image( os.path.join(current_output_dir, options.var2D), km) km += 1 else: nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) nl = int(nl[0]) for im in range(nl): ave = recv_EMData(i, im + i + 70000) tmpvol = fpol(ave, nx, nx, 1) tmpvol.write_image( os.path.join(current_output_dir, options.var2D), km) km += 1 else: mpi_send(len(varList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD) for im in range(len(varList)): send_EMData(varList[im], main_node, im + myid + 70000) # What with the attributes?? mpi_barrier(MPI_COMM_WORLD) if myid == main_node: from sp_applications import header header(os.path.join(current_output_dir, options.var2D), params='xform.projection', fimport=os.path.join(current_output_dir, "params.txt")) mpi_barrier(MPI_COMM_WORLD) if options.var3D: if myid == main_node: log_main.add("Reconstruct var3D ...") t6 = time() # radiusvar = options.radius # if( radiusvar < 0 ): radiusvar = nx//2 -3 res = recons3d_4nn_MPI(myid, varList, symmetry=options.sym, npad=options.npad) #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ) if myid == main_node: from sp_fundamentals import fpol if current_decimate != 1.0: res = resample(res, 1. / current_decimate) res = fpol(res, nnxo, nnxo, nnxo) set_pixel_size(res, 1.0) res.write_image(os.path.join(current_output_dir, options.var3D)) log_main.add( "%-70s: %s\n" % ("The reconstructed var3D is saved as ", options.var3D)) log_main.add("Var3D reconstruction took %f12.1 [m]" % ((time() - t6) / 60.0)) log_main.add("Total computation time %f12.1 [m]" % ((time() - t0) / 60.0)) log_main.add("sx3dvariability finishes") if RUNNING_UNDER_MPI: sp_global_def.MPI = False sp_global_def.BATCH = False
def main(): def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror): # the final ali2d parameters already combine shifts operation first and rotation operation second for parameters converted from 3D if mirror: m = 1 alpha, sx, sy, scalen = sp_utilities.compose_transform2( 0, s2x, s2y, 1.0, 540.0 - psi, 0, 0, 1.0) else: m = 0 alpha, sx, sy, scalen = sp_utilities.compose_transform2( 0, s2x, s2y, 1.0, 360.0 - psi, 0, 0, 1.0) return alpha, sx, sy, m progname = optparse.os.path.basename(sys.argv[0]) usage = ( progname + " prj_stack --ave2D= --var2D= --ave3D= --var3D= --img_per_grp= --fl= --aa= --sym=symmetry --CTF" ) parser = optparse.OptionParser(usage, version=sp_global_def.SPARXVERSION) parser.add_option("--output_dir", type="string", default="./", help="Output directory") parser.add_option( "--ave2D", type="string", default=False, help="Write to the disk a stack of 2D averages", ) parser.add_option( "--var2D", type="string", default=False, help="Write to the disk a stack of 2D variances", ) parser.add_option( "--ave3D", type="string", default=False, help="Write to the disk reconstructed 3D average", ) parser.add_option( "--var3D", type="string", default=False, help="Compute 3D variability (time consuming!)", ) parser.add_option( "--img_per_grp", type="int", default=100, help="Number of neighbouring projections.(Default is 100)", ) parser.add_option( "--no_norm", action="store_true", default=False, help="Do not use normalization.(Default is to apply normalization)", ) # parser.add_option("--radius", type="int" , default=-1 , help="radius for 3D variability" ) parser.add_option( "--npad", type="int", default=2, help= "Number of time to pad the original images.(Default is 2 times padding)", ) parser.add_option("--sym", type="string", default="c1", help="Symmetry. (Default is no symmetry)") parser.add_option( "--fl", type="float", default=0.0, help= "Low pass filter cutoff in absolute frequency (0.0 - 0.5) and is applied to decimated images. (Default - no filtration)", ) parser.add_option( "--aa", type="float", default=0.02, help= "Fall off of the filter. Use default value if user has no clue about falloff (Default value is 0.02)", ) parser.add_option( "--CTF", action="store_true", default=False, help="Use CFT correction.(Default is no CTF correction)", ) # parser.add_option("--MPI" , action="store_true", default=False, help="use MPI version") # parser.add_option("--radiuspca", type="int" , default=-1 , help="radius for PCA" ) # parser.add_option("--iter", type="int" , default=40 , help="maximum number of iterations (stop criterion of reconstruction process)" ) # parser.add_option("--abs", type="float" , default=0.0 , help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" ) # parser.add_option("--squ", type="float" , default=0.0 , help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" ) parser.add_option( "--VAR", action="store_true", default=False, help="Stack of input consists of 2D variances (Default False)", ) parser.add_option( "--decimate", type="float", default=0.25, help="Image decimate rate, a number less than 1. (Default is 0.25)", ) parser.add_option( "--window", type="int", default=0, help= "Target image size relative to original image size. (Default value is zero.)", ) # parser.add_option("--SND", action="store_true", default=False, help="compute squared normalized differences (Default False)") # parser.add_option("--nvec", type="int" , default=0 , help="Number of eigenvectors, (Default = 0 meaning no PCA calculated)") parser.add_option( "--symmetrize", action="store_true", default=False, help="Prepare input stack for handling symmetry (Default False)", ) parser.add_option("--overhead", type="float", default=0.5, help="python overhead per CPU.") (options, args) = parser.parse_args() ##### # from mpi import * # This is code for handling symmetries by the above program. To be incorporated. PAP 01/27/2015 # Set up global variables related to bdb cache if sp_global_def.CACHE_DISABLE: sp_utilities.disable_bdb_cache() # Set up global variables related to ERROR function sp_global_def.BATCH = True # detect if program is running under MPI RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in optparse.os.environ if RUNNING_UNDER_MPI: sp_global_def.MPI = True if options.output_dir == "./": current_output_dir = optparse.os.path.abspath(options.output_dir) else: current_output_dir = options.output_dir if options.symmetrize: if mpi.mpi_comm_size(mpi.MPI_COMM_WORLD) > 1: sp_global_def.ERROR( "Cannot use more than one CPU for symmetry preparation") if not optparse.os.path.exists(current_output_dir): optparse.os.makedirs(current_output_dir) sp_global_def.write_command(current_output_dir) if optparse.os.path.exists( optparse.os.path.join(current_output_dir, "log.txt")): optparse.os.remove( optparse.os.path.join(current_output_dir, "log.txt")) log_main = sp_logger.Logger(sp_logger.BaseLogger_Files()) log_main.prefix = optparse.os.path.join(current_output_dir, "./") instack = args[0] sym = options.sym.lower() if sym == "c1": sp_global_def.ERROR( "There is no need to symmetrize stack for C1 symmetry") line = "" for a in sys.argv: line += " " + a log_main.add(line) if instack[:4] != "bdb:": # if output_dir =="./": stack = "bdb:data" stack = "bdb:" + current_output_dir + "/data" sp_utilities.delete_bdb(stack) junk = sp_utilities.cmdexecute("sp_cpy.py " + instack + " " + stack) else: stack = instack qt = EMAN2_cppwrap.EMUtil.get_all_attributes(stack, "xform.projection") na = len(qt) ts = sp_utilities.get_symt(sym) ks = len(ts) angsa = [None] * na for k in range(ks): # Qfile = "Q%1d"%k # if options.output_dir!="./": Qfile = os.path.join(options.output_dir,"Q%1d"%k) Qfile = optparse.os.path.join(current_output_dir, "Q%1d" % k) # delete_bdb("bdb:Q%1d"%k) sp_utilities.delete_bdb("bdb:" + Qfile) # junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) junk = sp_utilities.cmdexecute("e2bdb.py " + stack + " --makevstack=bdb:" + Qfile) # DB = db_open_dict("bdb:Q%1d"%k) DB = EMAN2db.db_open_dict("bdb:" + Qfile) for i in range(na): ut = qt[i] * ts[k] DB.set_attr(i, "xform.projection", ut) # bt = ut.get_params("spider") # angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]] # write_text_row(angsa, 'ptsma%1d.txt'%k) # junk = cmdexecute("e2bdb.py "+stack+" --makevstack=bdb:Q%1d"%k) # junk = cmdexecute("sxheader.py bdb:Q%1d --params=xform.projection --import=ptsma%1d.txt"%(k,k)) DB.close() # if options.output_dir =="./": delete_bdb("bdb:sdata") sp_utilities.delete_bdb("bdb:" + current_output_dir + "/" + "sdata") # junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q") sdata = "bdb:" + current_output_dir + "/" + "sdata" sp_global_def.sxprint(sdata) junk = sp_utilities.cmdexecute("e2bdb.py " + current_output_dir + " --makevstack=" + sdata + " --filt=Q") # junk = cmdexecute("ls EMAN2DB/sdata*") # a = get_im("bdb:sdata") a = sp_utilities.get_im(sdata) a.set_attr("variabilitysymmetry", sym) # a.write_image("bdb:sdata") a.write_image(sdata) else: myid = mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD) number_of_proc = mpi.mpi_comm_size(mpi.MPI_COMM_WORLD) main_node = 0 shared_comm = mpi.mpi_comm_split_type(mpi.MPI_COMM_WORLD, mpi.MPI_COMM_TYPE_SHARED, 0, mpi.MPI_INFO_NULL) myid_on_node = mpi.mpi_comm_rank(shared_comm) no_of_processes_per_group = mpi.mpi_comm_size(shared_comm) masters_from_groups_vs_everything_else_comm = mpi.mpi_comm_split( mpi.MPI_COMM_WORLD, main_node == myid_on_node, myid_on_node) color, no_of_groups, balanced_processor_load_on_nodes = sp_utilities.get_colors_and_subsets( main_node, mpi.MPI_COMM_WORLD, myid, shared_comm, myid_on_node, masters_from_groups_vs_everything_else_comm, ) overhead_loading = options.overhead * number_of_proc # memory_per_node = options.memory_per_node # if memory_per_node == -1.: memory_per_node = 2.*no_of_processes_per_group keepgoing = 1 current_window = options.window current_decimate = options.decimate if len(args) == 1: stack = args[0] else: sp_global_def.sxprint("Usage: " + usage) sp_global_def.sxprint("Please run '" + progname + " -h' for detailed options") sp_global_def.ERROR( "Invalid number of parameters used. Please see usage information above." ) return t0 = time.time() # obsolete flags options.MPI = True # options.nvec = 0 options.radiuspca = -1 options.iter = 40 options.abs = 0.0 options.squ = 0.0 if options.fl > 0.0 and options.aa == 0.0: sp_global_def.ERROR( "Fall off has to be given for the low-pass filter", myid=myid) # if options.VAR and options.SND: # ERROR( "Only one of var and SND can be set!",myid=myid ) if options.VAR and (options.ave2D or options.ave3D or options.var2D): sp_global_def.ERROR( "When VAR is set, the program cannot output ave2D, ave3D or var2D", myid=myid, ) # if options.SND and (options.ave2D or options.ave3D): # ERROR( "When SND is set, the program cannot output ave2D or ave3D", myid=myid ) # if options.nvec > 0 : # ERROR( "PCA option not implemented", myid=myid ) # if options.nvec > 0 and options.ave3D == None: # ERROR( "When doing PCA analysis, one must set ave3D", myid=myid ) if current_decimate > 1.0 or current_decimate < 0.0: sp_global_def.ERROR( "Decimate rate should be a value between 0.0 and 1.0", myid=myid) if current_window < 0.0: sp_global_def.ERROR( "Target window size should be always larger than zero", myid=myid) if myid == main_node: img = sp_utilities.get_image(stack, 0) nx = img.get_xsize() ny = img.get_ysize() if min(nx, ny) < current_window: keepgoing = 0 keepgoing = sp_utilities.bcast_number_to_all(keepgoing, main_node, mpi.MPI_COMM_WORLD) if keepgoing == 0: sp_global_def.ERROR( "The target window size cannot be larger than the size of decimated image", myid=myid, ) options.sym = options.sym.lower() # if global_def.CACHE_DISABLE: # from utilities import disable_bdb_cache # disable_bdb_cache() # global_def.BATCH = True if myid == main_node: if not optparse.os.path.exists(current_output_dir): optparse.os.makedirs( current_output_dir ) # Never delete output_dir in the program! img_per_grp = options.img_per_grp # nvec = options.nvec radiuspca = options.radiuspca # if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt")) log_main = sp_logger.Logger(sp_logger.BaseLogger_Files()) log_main.prefix = optparse.os.path.join(current_output_dir, "./") if myid == main_node: line = "" for a in sys.argv: line += " " + a log_main.add(line) log_main.add("-------->>>Settings given by all options<<<-------") log_main.add("Symmetry : %s" % options.sym) log_main.add("Input stack : %s" % stack) log_main.add("Output_dir : %s" % current_output_dir) if options.ave3D: log_main.add("Ave3d : %s" % options.ave3D) if options.var3D: log_main.add("Var3d : %s" % options.var3D) if options.ave2D: log_main.add("Ave2D : %s" % options.ave2D) if options.var2D: log_main.add("Var2D : %s" % options.var2D) if options.VAR: log_main.add("VAR : True") else: log_main.add("VAR : False") if options.CTF: log_main.add("CTF correction : True ") else: log_main.add("CTF correction : False ") log_main.add("Image per group : %5d" % options.img_per_grp) log_main.add("Image decimate rate : %4.3f" % current_decimate) log_main.add("Low pass filter : %4.3f" % options.fl) current_fl = options.fl if current_fl == 0.0: current_fl = 0.5 log_main.add( "Current low pass filter is equivalent to cutoff frequency %4.3f for original image size" % round((current_fl * current_decimate), 3)) log_main.add("Window size : %5d " % current_window) log_main.add("sx3dvariability begins") symbaselen = 0 if myid == main_node: nima = EMAN2_cppwrap.EMUtil.get_image_count(stack) img = sp_utilities.get_image(stack) nx = img.get_xsize() ny = img.get_ysize() nnxo = nx nnyo = ny if options.sym != "c1": imgdata = sp_utilities.get_im(stack) try: i = imgdata.get_attr("variabilitysymmetry").lower() if i != options.sym: sp_global_def.ERROR( "The symmetry provided does not agree with the symmetry of the input stack", myid=myid, ) except: sp_global_def.ERROR( "Input stack is not prepared for symmetry, please follow instructions", myid=myid, ) i = len(sp_utilities.get_symt(options.sym)) if (old_div(nima, i)) * i != nima: sp_global_def.ERROR( "The length of the input stack is incorrect for symmetry processing", myid=myid, ) symbaselen = old_div(nima, i) else: symbaselen = nima else: nima = 0 nx = 0 ny = 0 nnxo = 0 nnyo = 0 nima = sp_utilities.bcast_number_to_all(nima) nx = sp_utilities.bcast_number_to_all(nx) ny = sp_utilities.bcast_number_to_all(ny) nnxo = sp_utilities.bcast_number_to_all(nnxo) nnyo = sp_utilities.bcast_number_to_all(nnyo) if current_window > max(nx, ny): sp_global_def.ERROR( "Window size is larger than the original image size") if current_decimate == 1.0: if current_window != 0: nx = current_window ny = current_window else: if current_window == 0: nx = int(nx * current_decimate + 0.5) ny = int(ny * current_decimate + 0.5) else: nx = int(current_window * current_decimate + 0.5) ny = nx symbaselen = sp_utilities.bcast_number_to_all(symbaselen) # check FFT prime number is_fft_friendly = nx == sp_fundamentals.smallprime(nx) if not is_fft_friendly: if myid == main_node: log_main.add( "The target image size is not a product of small prime numbers" ) log_main.add("Program adjusts the input settings!") ### two cases if current_decimate == 1.0: nx = sp_fundamentals.smallprime(nx) ny = nx current_window = nx # update if myid == main_node: log_main.add("The window size is updated to %d." % current_window) else: if current_window == 0: nx = sp_fundamentals.smallprime( int(nx * current_decimate + 0.5)) current_decimate = old_div(float(nx), nnxo) ny = nx if myid == main_node: log_main.add("The decimate rate is updated to %f." % current_decimate) else: nx = sp_fundamentals.smallprime( int(current_window * current_decimate + 0.5)) ny = nx current_window = int(old_div(nx, current_decimate) + 0.5) if myid == main_node: log_main.add("The window size is updated to %d." % current_window) if myid == main_node: log_main.add("The target image size is %d" % nx) if radiuspca == -1: radiuspca = old_div(nx, 2) - 2 if myid == main_node: log_main.add("%-70s: %d\n" % ("Number of projection", nima)) img_begin, img_end = sp_applications.MPI_start_end( nima, number_of_proc, myid) """Multiline Comment0""" """ Comments from adnan, replace index_of_proj to index_of_particle, index_of_proj was not defined also varList is not defined not made an empty list there """ if options.VAR: # 2D variance images have no shifts varList = [] # varList = EMData.read_images(stack, range(img_begin, img_end)) for index_of_particle in range(img_begin, img_end): image = sp_utilities.get_im(stack, index_of_particle) if current_window > 0: varList.append( sp_fundamentals.fdecimate( sp_fundamentals.window2d(image, current_window, current_window), nx, ny, )) else: varList.append(sp_fundamentals.fdecimate(image, nx, ny)) else: if myid == main_node: t1 = time.time() proj_angles = [] aveList = [] tab = EMAN2_cppwrap.EMUtil.get_all_attributes( stack, "xform.projection") for i in range(nima): t = tab[i].get_params("spider") phi = t["phi"] theta = t["theta"] psi = t["psi"] x = theta if x > 90.0: x = 180.0 - x x = x * 10000 + psi proj_angles.append([x, t["phi"], t["theta"], t["psi"], i]) t2 = time.time() log_main.add( "%-70s: %d\n" % ("Number of neighboring projections", img_per_grp)) log_main.add("...... Finding neighboring projections\n") log_main.add("Number of images per group: %d" % img_per_grp) log_main.add("Now grouping projections") proj_angles.sort() proj_angles_list = numpy.full((nima, 4), 0.0, dtype=numpy.float32) for i in range(nima): proj_angles_list[i][0] = proj_angles[i][1] proj_angles_list[i][1] = proj_angles[i][2] proj_angles_list[i][2] = proj_angles[i][3] proj_angles_list[i][3] = proj_angles[i][4] else: proj_angles_list = 0 proj_angles_list = sp_utilities.wrap_mpi_bcast( proj_angles_list, main_node, mpi.MPI_COMM_WORLD) proj_angles = [] for i in range(nima): proj_angles.append([ proj_angles_list[i][0], proj_angles_list[i][1], proj_angles_list[i][2], int(proj_angles_list[i][3]), ]) del proj_angles_list proj_list, mirror_list = sp_utilities.nearest_proj( proj_angles, img_per_grp, range(img_begin, img_end)) all_proj = [] for im in proj_list: for jm in im: all_proj.append(proj_angles[jm][3]) all_proj = list(set(all_proj)) index = {} for i in range(len(all_proj)): index[all_proj[i]] = i mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if myid == main_node: log_main.add("%-70s: %.2f\n" % ("Finding neighboring projections lasted [s]", time.time() - t2)) log_main.add("%-70s: %d\n" % ("Number of groups processed on the main node", len(proj_list))) log_main.add("Grouping projections took: %12.1f [m]" % (old_div((time.time() - t2), 60.0))) log_main.add("Number of groups on main node: ", len(proj_list)) mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if myid == main_node: log_main.add("...... Calculating the stack of 2D variances \n") # Memory estimation. There are two memory consumption peaks # peak 1. Compute ave, var; # peak 2. Var volume reconstruction; # proj_params = [0.0]*(nima*5) aveList = [] varList = [] # if nvec > 0: eigList = [[] for i in range(nvec)] dnumber = len( all_proj) # all neighborhood set for assigned to myid pnumber = len(proj_list) * 2.0 + img_per_grp # aveList and varList tnumber = dnumber + pnumber vol_size2 = old_div(nx**3 * 4.0 * 8, 1.0e9) vol_size1 = old_div(2.0 * nnxo**3 * 4.0 * 8, 1.0e9) proj_size = old_div(nnxo * nnyo * len(proj_list) * 4.0 * 2.0, 1.0e9) # both aveList and varList orig_data_size = old_div(nnxo * nnyo * 4.0 * tnumber, 1.0e9) reduced_data_size = old_div(nx * nx * 4.0 * tnumber, 1.0e9) full_data = numpy.full((number_of_proc, 2), -1.0, dtype=numpy.float16) full_data[myid] = orig_data_size, reduced_data_size if myid != main_node: sp_utilities.wrap_mpi_send(full_data, main_node, mpi.MPI_COMM_WORLD) if myid == main_node: for iproc in range(number_of_proc): if iproc != main_node: dummy = sp_utilities.wrap_mpi_recv( iproc, mpi.MPI_COMM_WORLD) full_data[numpy.where(dummy > -1)] = dummy[numpy.where( dummy > -1)] del dummy mpi.mpi_barrier(mpi.MPI_COMM_WORLD) full_data = sp_utilities.wrap_mpi_bcast(full_data, main_node, mpi.MPI_COMM_WORLD) # find the CPU with heaviest load minindx = numpy.argsort(full_data, 0) heavy_load_myid = minindx[-1][1] total_mem = sum(full_data) if myid == main_node: if current_window == 0: log_main.add( "Nx: current image size = %d. Decimated by %f from %d" % (nx, current_decimate, nnxo)) else: log_main.add( "Nx: current image size = %d. Windowed to %d, and decimated by %f from %d" % (nx, current_window, current_decimate, nnxo)) log_main.add("Nproj: number of particle images.") log_main.add("Navg: number of 2D average images.") log_main.add("Nvar: number of 2D variance images.") log_main.add( "Img_per_grp: user defined image per group for averaging = %d" % img_per_grp) log_main.add( "Overhead: total python overhead memory consumption = %f" % overhead_loading) log_main.add( "Total memory) = 4.0*nx^2*(nproj + navg +nvar+ img_per_grp)/1.0e9 + overhead: %12.3f [GB]" % (total_mem[1] + overhead_loading)) del full_data mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add( "Begin reading and preprocessing images on processor. Wait... " ) ttt = time.time() # imgdata = EMData.read_images(stack, all_proj) imgdata = [None for im in range(len(all_proj))] for index_of_proj in range(len(all_proj)): # image = get_im(stack, all_proj[index_of_proj]) if current_window > 0: imgdata[index_of_proj] = sp_fundamentals.fdecimate( sp_fundamentals.window2d( sp_utilities.get_im(stack, all_proj[index_of_proj]), current_window, current_window, ), nx, ny, ) else: imgdata[index_of_proj] = sp_fundamentals.fdecimate( sp_utilities.get_im(stack, all_proj[index_of_proj]), nx, ny) if current_decimate > 0.0 and options.CTF: ctf = imgdata[index_of_proj].get_attr("ctf") ctf.apix = old_div(ctf.apix, current_decimate) imgdata[index_of_proj].set_attr("ctf", ctf) if myid == heavy_load_myid and index_of_proj % 100 == 0: log_main.add( " ...... %6.2f%% " % (old_div(index_of_proj, float(len(all_proj))) * 100.0)) mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add("All_proj preprocessing cost %7.2f m" % (old_div( (time.time() - ttt), 60.0))) log_main.add("Wait untill reading on all CPUs done...") """Multiline Comment1""" if not options.no_norm: mask = sp_utilities.model_circle(old_div(nx, 2) - 2, nx, nx) if myid == heavy_load_myid: log_main.add("Start computing 2D aveList and varList. Wait...") ttt = time.time() inner = old_div(nx, 2) - 4 outer = inner + 2 xform_proj_for_2D = [None for i in range(len(proj_list))] for i in range(len(proj_list)): ki = proj_angles[proj_list[i][0]][3] if ki >= symbaselen: continue mi = index[ki] dpar = EMAN2_cppwrap.Util.get_transform_params( imgdata[mi], "xform.projection", "spider") phiM, thetaM, psiM, s2xM, s2yM = ( dpar["phi"], dpar["theta"], dpar["psi"], -dpar["tx"] * current_decimate, -dpar["ty"] * current_decimate, ) grp_imgdata = [] for j in range(img_per_grp): mj = index[proj_angles[proj_list[i][j]][3]] cpar = EMAN2_cppwrap.Util.get_transform_params( imgdata[mj], "xform.projection", "spider") alpha, sx, sy, mirror = params_3D_2D_NEW( cpar["phi"], cpar["theta"], cpar["psi"], -cpar["tx"] * current_decimate, -cpar["ty"] * current_decimate, mirror_list[i][j], ) if thetaM <= 90: if mirror == 0: alpha, sx, sy, scale = sp_utilities.compose_transform2( alpha, sx, sy, 1.0, phiM - cpar["phi"], 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = sp_utilities.compose_transform2( alpha, sx, sy, 1.0, 180 - (phiM - cpar["phi"]), 0.0, 0.0, 1.0, ) else: if mirror == 0: alpha, sx, sy, scale = sp_utilities.compose_transform2( alpha, sx, sy, 1.0, -(phiM - cpar["phi"]), 0.0, 0.0, 1.0) else: alpha, sx, sy, scale = sp_utilities.compose_transform2( alpha, sx, sy, 1.0, -(180 - (phiM - cpar["phi"])), 0.0, 0.0, 1.0, ) imgdata[mj].set_attr( "xform.align2d", EMAN2_cppwrap.Transform({ "type": "2D", "alpha": alpha, "tx": sx, "ty": sy, "mirror": mirror, "scale": 1.0, }), ) grp_imgdata.append(imgdata[mj]) if not options.no_norm: for k in range(img_per_grp): ave, std, minn, maxx = EMAN2_cppwrap.Util.infomask( grp_imgdata[k], mask, False) grp_imgdata[k] -= ave grp_imgdata[k] = old_div(grp_imgdata[k], std) if options.fl > 0.0: for k in range(img_per_grp): grp_imgdata[k] = sp_filter.filt_tanl( grp_imgdata[k], options.fl, options.aa) # Because of background issues, only linear option works. if options.CTF: ave, var = sp_statistics.aves_wiener( grp_imgdata, SNR=1.0e5, interpolation_method="linear") else: ave, var = sp_statistics.ave_var(grp_imgdata) # Switch to std dev # threshold is not really needed,it is just in case due to numerical accuracy something turns out negative. var = sp_morphology.square_root(sp_morphology.threshold(var)) sp_utilities.set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0]) sp_utilities.set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0]) aveList.append(ave) varList.append(var) xform_proj_for_2D[i] = [phiM, thetaM, 0.0, 0.0, 0.0] """Multiline Comment2""" if (myid == heavy_load_myid) and (i % 100 == 0): log_main.add(" ......%6.2f%% " % (old_div(i, float(len(proj_list))) * 100.0)) del imgdata, grp_imgdata, cpar, dpar, all_proj, proj_angles, index if not options.no_norm: del mask if myid == main_node: del tab # At this point, all averages and variances are computed mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if myid == heavy_load_myid: log_main.add("Computing aveList and varList took %12.1f [m]" % (old_div((time.time() - ttt), 60.0))) xform_proj_for_2D = sp_utilities.wrap_mpi_gatherv( xform_proj_for_2D, main_node, mpi.MPI_COMM_WORLD) if myid == main_node: sp_utilities.write_text_row( [str(entry) for entry in xform_proj_for_2D], optparse.os.path.join(current_output_dir, "params.txt"), ) del xform_proj_for_2D mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if options.ave2D: if myid == main_node: log_main.add("Compute ave2D ... ") km = 0 for i in range(number_of_proc): if i == main_node: for im in range(len(aveList)): aveList[im].write_image( optparse.os.path.join( current_output_dir, options.ave2D), km, ) km += 1 else: nl = mpi.mpi_recv( 1, mpi.MPI_INT, i, sp_global_def.SPARX_MPI_TAG_UNIVERSAL, mpi.MPI_COMM_WORLD, ) nl = int(nl[0]) for im in range(nl): ave = sp_utilities.recv_EMData( i, im + i + 70000) """Multiline Comment3""" tmpvol = sp_fundamentals.fpol(ave, nx, nx, 1) tmpvol.write_image( optparse.os.path.join( current_output_dir, options.ave2D), km, ) km += 1 else: mpi.mpi_send( len(aveList), 1, mpi.MPI_INT, main_node, sp_global_def.SPARX_MPI_TAG_UNIVERSAL, mpi.MPI_COMM_WORLD, ) for im in range(len(aveList)): sp_utilities.send_EMData(aveList[im], main_node, im + myid + 70000) """Multiline Comment4""" if myid == main_node: sp_applications.header( optparse.os.path.join(current_output_dir, options.ave2D), params="xform.projection", fimport=optparse.os.path.join(current_output_dir, "params.txt"), ) mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if options.ave3D: t5 = time.time() if myid == main_node: log_main.add("Reconstruct ave3D ... ") ave3D = sp_reconstruction.recons3d_4nn_MPI( myid, aveList, symmetry=options.sym, npad=options.npad) sp_utilities.bcast_EMData_to_all(ave3D, myid) if myid == main_node: if current_decimate != 1.0: ave3D = sp_fundamentals.resample( ave3D, old_div(1.0, current_decimate)) ave3D = sp_fundamentals.fpol( ave3D, nnxo, nnxo, nnxo) # always to the orignal image size sp_utilities.set_pixel_size(ave3D, 1.0) ave3D.write_image( optparse.os.path.join(current_output_dir, options.ave3D)) log_main.add("Ave3D reconstruction took %12.1f [m]" % (old_div((time.time() - t5), 60.0))) log_main.add("%-70s: %s\n" % ("The reconstructed ave3D is saved as ", options.ave3D)) mpi.mpi_barrier(mpi.MPI_COMM_WORLD) del ave, var, proj_list, stack, alpha, sx, sy, mirror, aveList """Multiline Comment5""" if options.ave3D: del ave3D if options.var2D: if myid == main_node: log_main.add("Compute var2D...") km = 0 for i in range(number_of_proc): if i == main_node: for im in range(len(varList)): tmpvol = sp_fundamentals.fpol( varList[im], nx, nx, 1) tmpvol.write_image( optparse.os.path.join( current_output_dir, options.var2D), km, ) km += 1 else: nl = mpi.mpi_recv( 1, mpi.MPI_INT, i, sp_global_def.SPARX_MPI_TAG_UNIVERSAL, mpi.MPI_COMM_WORLD, ) nl = int(nl[0]) for im in range(nl): ave = sp_utilities.recv_EMData( i, im + i + 70000) tmpvol = sp_fundamentals.fpol(ave, nx, nx, 1) tmpvol.write_image( optparse.os.path.join( current_output_dir, options.var2D), km, ) km += 1 else: mpi.mpi_send( len(varList), 1, mpi.MPI_INT, main_node, sp_global_def.SPARX_MPI_TAG_UNIVERSAL, mpi.MPI_COMM_WORLD, ) for im in range(len(varList)): sp_utilities.send_EMData( varList[im], main_node, im + myid + 70000) # What with the attributes?? mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if myid == main_node: sp_applications.header( optparse.os.path.join(current_output_dir, options.var2D), params="xform.projection", fimport=optparse.os.path.join(current_output_dir, "params.txt"), ) mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if options.var3D: if myid == main_node: log_main.add("Reconstruct var3D ...") t6 = time.time() # radiusvar = options.radius # if( radiusvar < 0 ): radiusvar = nx//2 -3 res = sp_reconstruction.recons3d_4nn_MPI(myid, varList, symmetry=options.sym, npad=options.npad) # res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ) if myid == main_node: if current_decimate != 1.0: res = sp_fundamentals.resample( res, old_div(1.0, current_decimate)) res = sp_fundamentals.fpol(res, nnxo, nnxo, nnxo) sp_utilities.set_pixel_size(res, 1.0) res.write_image(os.path.join(current_output_dir, options.var3D)) log_main.add( "%-70s: %s\n" % ("The reconstructed var3D is saved as ", options.var3D)) log_main.add("Var3D reconstruction took %f12.1 [m]" % (old_div( (time.time() - t6), 60.0))) log_main.add("Total computation time %f12.1 [m]" % (old_div( (time.time() - t0), 60.0))) log_main.add("sx3dvariability finishes") if RUNNING_UNDER_MPI: sp_global_def.MPI = False sp_global_def.BATCH = False
def main(): global Tracker, Blockdata progname = os.path.basename(sys.argv[0]) usage = progname + " --output_dir=output_dir --isac_dir=output_dir_of_isac " parser = optparse.OptionParser(usage, version=sp_global_def.SPARXVERSION) parser.add_option( "--pw_adjustment", type="string", default="analytical_model", help= "adjust power spectrum of 2-D averages to an analytic model. Other opions: no_adjustment; bfactor; a text file of 1D rotationally averaged PW", ) #### Four options for --pw_adjustment: # 1> analytical_model(default); # 2> no_adjustment; # 3> bfactor; # 4> adjust_to_given_pw2(user has to provide a text file that contains 1D rotationally averaged PW) # options in common parser.add_option( "--isac_dir", type="string", default="", help="ISAC run output directory, input directory for this command", ) parser.add_option( "--output_dir", type="string", default="", help="output directory where computed averages are saved", ) parser.add_option( "--pixel_size", type="float", default=-1.0, help= "pixel_size of raw images. one can put 1.0 in case of negative stain data", ) parser.add_option( "--fl", type="float", default=-1.0, help= "low pass filter, = -1.0, not applied; =0.0, using FH1 (initial resolution), = 1.0 using FH2 (resolution after local alignment), or user provided value in absolute freqency [0.0:0.5]", ) parser.add_option("--stack", type="string", default="", help="data stack used in ISAC") parser.add_option("--radius", type="int", default=-1, help="radius") parser.add_option("--xr", type="float", default=-1.0, help="local alignment search range") # parser.add_option("--ts", type ="float", default =1.0, help= "local alignment search step") parser.add_option( "--fh", type="float", default=-1.0, help="local alignment high frequencies limit", ) # parser.add_option("--maxit", type ="int", default =5, help= "local alignment iterations") parser.add_option("--navg", type="int", default=1000000, help="number of aveages") parser.add_option( "--local_alignment", action="store_true", default=False, help="do local alignment", ) parser.add_option( "--noctf", action="store_true", default=False, help= "no ctf correction, useful for negative stained data. always ctf for cryo data", ) parser.add_option( "--B_start", type="float", default=45.0, help= "start frequency (Angstrom) of power spectrum for B_factor estimation", ) parser.add_option( "--Bfactor", type="float", default=-1.0, help= "User defined bactors (e.g. 25.0[A^2]). By default, the program automatically estimates B-factor. ", ) (options, args) = parser.parse_args(sys.argv[1:]) adjust_to_analytic_model = (True if options.pw_adjustment == "analytical_model" else False) no_adjustment = True if options.pw_adjustment == "no_adjustment" else False B_enhance = True if options.pw_adjustment == "bfactor" else False adjust_to_given_pw2 = ( True if not (adjust_to_analytic_model or no_adjustment or B_enhance) else False) # mpi nproc = mpi.mpi_comm_size(mpi.MPI_COMM_WORLD) myid = mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD) Blockdata = {} Blockdata["nproc"] = nproc Blockdata["myid"] = myid Blockdata["main_node"] = 0 Blockdata["shared_comm"] = mpi.mpi_comm_split_type( mpi.MPI_COMM_WORLD, mpi.MPI_COMM_TYPE_SHARED, 0, mpi.MPI_INFO_NULL) Blockdata["myid_on_node"] = mpi.mpi_comm_rank(Blockdata["shared_comm"]) Blockdata["no_of_processes_per_group"] = mpi.mpi_comm_size( Blockdata["shared_comm"]) masters_from_groups_vs_everything_else_comm = mpi.mpi_comm_split( mpi.MPI_COMM_WORLD, Blockdata["main_node"] == Blockdata["myid_on_node"], Blockdata["myid_on_node"], ) Blockdata["color"], Blockdata[ "no_of_groups"], balanced_processor_load_on_nodes = sp_utilities.get_colors_and_subsets( Blockdata["main_node"], mpi.MPI_COMM_WORLD, Blockdata["myid"], Blockdata["shared_comm"], Blockdata["myid_on_node"], masters_from_groups_vs_everything_else_comm, ) # We need two nodes for processing of volumes Blockdata["node_volume"] = [ Blockdata["no_of_groups"] - 3, Blockdata["no_of_groups"] - 2, Blockdata["no_of_groups"] - 1, ] # For 3D stuff take three last nodes # We need two CPUs for processing of volumes, they are taken to be main CPUs on each volume # We have to send the two myids to all nodes so we can identify main nodes on two selected groups. Blockdata["nodes"] = [ Blockdata["node_volume"][0] * Blockdata["no_of_processes_per_group"], Blockdata["node_volume"][1] * Blockdata["no_of_processes_per_group"], Blockdata["node_volume"][2] * Blockdata["no_of_processes_per_group"], ] # End of Blockdata: sorting requires at least three nodes, and the used number of nodes be integer times of three sp_global_def.BATCH = True sp_global_def.MPI = True if adjust_to_given_pw2: checking_flag = 0 if Blockdata["myid"] == Blockdata["main_node"]: if not os.path.exists(options.pw_adjustment): checking_flag = 1 checking_flag = sp_utilities.bcast_number_to_all( checking_flag, Blockdata["main_node"], mpi.MPI_COMM_WORLD) if checking_flag == 1: sp_global_def.ERROR("User provided power spectrum does not exist", myid=Blockdata["myid"]) Tracker = {} Constants = {} Constants["isac_dir"] = options.isac_dir Constants["masterdir"] = options.output_dir Constants["pixel_size"] = options.pixel_size Constants["orgstack"] = options.stack Constants["radius"] = options.radius Constants["xrange"] = options.xr Constants["FH"] = options.fh Constants["low_pass_filter"] = options.fl # Constants["maxit"] = options.maxit Constants["navg"] = options.navg Constants["B_start"] = options.B_start Constants["Bfactor"] = options.Bfactor if adjust_to_given_pw2: Constants["modelpw"] = options.pw_adjustment Tracker["constants"] = Constants # ------------------------------------------------------------- # # Create and initialize Tracker dictionary with input options # State Variables # <<<---------------------->>>imported functions<<<--------------------------------------------- # x_range = max(Tracker["constants"]["xrange"], int(1./Tracker["ini_shrink"])+1) # y_range = x_range ####----------------------------------------------------------- # Create Master directory and associated subdirectories line = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + " =>" if Tracker["constants"]["masterdir"] == Tracker["constants"]["isac_dir"]: masterdir = os.path.join(Tracker["constants"]["isac_dir"], "sharpen") else: masterdir = Tracker["constants"]["masterdir"] if Blockdata["myid"] == Blockdata["main_node"]: msg = "Postprocessing ISAC 2D averages starts" sp_global_def.sxprint(line, "Postprocessing ISAC 2D averages starts") if not masterdir: timestring = time.strftime("_%d_%b_%Y_%H_%M_%S", time.localtime()) masterdir = "sharpen_" + Tracker["constants"]["isac_dir"] os.makedirs(masterdir) else: if os.path.exists(masterdir): sp_global_def.sxprint("%s already exists" % masterdir) else: os.makedirs(masterdir) sp_global_def.write_command(masterdir) subdir_path = os.path.join(masterdir, "ali2d_local_params_avg") if not os.path.exists(subdir_path): os.mkdir(subdir_path) subdir_path = os.path.join(masterdir, "params_avg") if not os.path.exists(subdir_path): os.mkdir(subdir_path) li = len(masterdir) else: li = 0 li = mpi.mpi_bcast(li, 1, mpi.MPI_INT, Blockdata["main_node"], mpi.MPI_COMM_WORLD)[0] masterdir = mpi.mpi_bcast(masterdir, li, mpi.MPI_CHAR, Blockdata["main_node"], mpi.MPI_COMM_WORLD) masterdir = b"".join(masterdir).decode('latin1') Tracker["constants"]["masterdir"] = masterdir log_main = sp_logger.Logger(sp_logger.BaseLogger_Files()) log_main.prefix = Tracker["constants"]["masterdir"] + "/" while not os.path.exists(Tracker["constants"]["masterdir"]): sp_global_def.sxprint( "Node ", Blockdata["myid"], " waiting...", Tracker["constants"]["masterdir"], ) time.sleep(1) mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if Blockdata["myid"] == Blockdata["main_node"]: init_dict = {} sp_global_def.sxprint(Tracker["constants"]["isac_dir"]) Tracker["directory"] = os.path.join(Tracker["constants"]["isac_dir"], "2dalignment") core = sp_utilities.read_text_row( os.path.join(Tracker["directory"], "initial2Dparams.txt")) for im in range(len(core)): init_dict[im] = core[im] del core else: init_dict = 0 init_dict = sp_utilities.wrap_mpi_bcast(init_dict, Blockdata["main_node"], communicator=mpi.MPI_COMM_WORLD) ### do_ctf = True if options.noctf: do_ctf = False if Blockdata["myid"] == Blockdata["main_node"]: if do_ctf: sp_global_def.sxprint("CTF correction is on") else: sp_global_def.sxprint("CTF correction is off") if options.local_alignment: sp_global_def.sxprint("local refinement is on") else: sp_global_def.sxprint("local refinement is off") if B_enhance: sp_global_def.sxprint("Bfactor is to be applied on averages") elif adjust_to_given_pw2: sp_global_def.sxprint( "PW of averages is adjusted to a given 1D PW curve") elif adjust_to_analytic_model: sp_global_def.sxprint( "PW of averages is adjusted to analytical model") else: sp_global_def.sxprint("PW of averages is not adjusted") # Tracker["constants"]["orgstack"] = "bdb:"+ os.path.join(Tracker["constants"]["isac_dir"],"../","sparx_stack") image = sp_utilities.get_im(Tracker["constants"]["orgstack"], 0) Tracker["constants"]["nnxo"] = image.get_xsize() if Tracker["constants"]["pixel_size"] == -1.0: sp_global_def.sxprint( "Pixel size value is not provided by user. extracting it from ctf header entry of the original stack." ) try: ctf_params = image.get_attr("ctf") Tracker["constants"]["pixel_size"] = ctf_params.apix except: sp_global_def.ERROR( "Pixel size could not be extracted from the original stack.", myid=Blockdata["myid"], ) ## Now fill in low-pass filter isac_shrink_path = os.path.join(Tracker["constants"]["isac_dir"], "README_shrink_ratio.txt") if not os.path.exists(isac_shrink_path): sp_global_def.ERROR( "%s does not exist in the specified ISAC run output directory" % (isac_shrink_path), myid=Blockdata["myid"], ) isac_shrink_file = open(isac_shrink_path, "r") isac_shrink_lines = isac_shrink_file.readlines() isac_shrink_ratio = float( isac_shrink_lines[5] ) # 6th line: shrink ratio (= [target particle radius]/[particle radius]) used in the ISAC run isac_radius = float( isac_shrink_lines[6] ) # 7th line: particle radius at original pixel size used in the ISAC run isac_shrink_file.close() print("Extracted parameter values") print("ISAC shrink ratio : {0}".format(isac_shrink_ratio)) print("ISAC particle radius : {0}".format(isac_radius)) Tracker["ini_shrink"] = isac_shrink_ratio else: Tracker["ini_shrink"] = 0.0 Tracker = sp_utilities.wrap_mpi_bcast(Tracker, Blockdata["main_node"], communicator=mpi.MPI_COMM_WORLD) # print(Tracker["constants"]["pixel_size"], "pixel_size") x_range = max( Tracker["constants"]["xrange"], int(old_div(1.0, Tracker["ini_shrink"]) + 0.99999), ) a_range = y_range = x_range if Blockdata["myid"] == Blockdata["main_node"]: parameters = sp_utilities.read_text_row( os.path.join(Tracker["constants"]["isac_dir"], "all_parameters.txt")) else: parameters = 0 parameters = sp_utilities.wrap_mpi_bcast(parameters, Blockdata["main_node"], communicator=mpi.MPI_COMM_WORLD) params_dict = {} list_dict = {} # parepare params_dict # navg = min(Tracker["constants"]["navg"]*Blockdata["nproc"], EMUtil.get_image_count(os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf"))) navg = min( Tracker["constants"]["navg"], EMAN2_cppwrap.EMUtil.get_image_count( os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf")), ) global_dict = {} ptl_list = [] memlist = [] if Blockdata["myid"] == Blockdata["main_node"]: sp_global_def.sxprint("Number of averages computed in this run is %d" % navg) for iavg in range(navg): params_of_this_average = [] image = sp_utilities.get_im( os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf"), iavg, ) members = sorted(image.get_attr("members")) memlist.append(members) for im in range(len(members)): abs_id = members[im] global_dict[abs_id] = [iavg, im] P = sp_utilities.combine_params2( init_dict[abs_id][0], init_dict[abs_id][1], init_dict[abs_id][2], init_dict[abs_id][3], parameters[abs_id][0], old_div(parameters[abs_id][1], Tracker["ini_shrink"]), old_div(parameters[abs_id][2], Tracker["ini_shrink"]), parameters[abs_id][3], ) if parameters[abs_id][3] == -1: sp_global_def.sxprint( "WARNING: Image #{0} is an unaccounted particle with invalid 2D alignment parameters and should not be the member of any classes. Please check the consitency of input dataset." .format(abs_id) ) # How to check what is wrong about mirror = -1 (Toshio 2018/01/11) params_of_this_average.append([P[0], P[1], P[2], P[3], 1.0]) ptl_list.append(abs_id) params_dict[iavg] = params_of_this_average list_dict[iavg] = members sp_utilities.write_text_row( params_of_this_average, os.path.join( Tracker["constants"]["masterdir"], "params_avg", "params_avg_%03d.txt" % iavg, ), ) ptl_list.sort() init_params = [None for im in range(len(ptl_list))] for im in range(len(ptl_list)): init_params[im] = [ptl_list[im]] + params_dict[global_dict[ ptl_list[im]][0]][global_dict[ptl_list[im]][1]] sp_utilities.write_text_row( init_params, os.path.join(Tracker["constants"]["masterdir"], "init_isac_params.txt"), ) else: params_dict = 0 list_dict = 0 memlist = 0 params_dict = sp_utilities.wrap_mpi_bcast(params_dict, Blockdata["main_node"], communicator=mpi.MPI_COMM_WORLD) list_dict = sp_utilities.wrap_mpi_bcast(list_dict, Blockdata["main_node"], communicator=mpi.MPI_COMM_WORLD) memlist = sp_utilities.wrap_mpi_bcast(memlist, Blockdata["main_node"], communicator=mpi.MPI_COMM_WORLD) # Now computing! del init_dict tag_sharpen_avg = 1000 ## always apply low pass filter to B_enhanced images to suppress noise in high frequencies enforced_to_H1 = False if B_enhance: if Tracker["constants"]["low_pass_filter"] == -1.0: enforced_to_H1 = True # distribute workload among mpi processes image_start, image_end = sp_applications.MPI_start_end( navg, Blockdata["nproc"], Blockdata["myid"]) if Blockdata["myid"] == Blockdata["main_node"]: cpu_dict = {} for iproc in range(Blockdata["nproc"]): local_image_start, local_image_end = sp_applications.MPI_start_end( navg, Blockdata["nproc"], iproc) for im in range(local_image_start, local_image_end): cpu_dict[im] = iproc else: cpu_dict = 0 cpu_dict = sp_utilities.wrap_mpi_bcast(cpu_dict, Blockdata["main_node"], communicator=mpi.MPI_COMM_WORLD) slist = [None for im in range(navg)] ini_list = [None for im in range(navg)] avg1_list = [None for im in range(navg)] avg2_list = [None for im in range(navg)] data_list = [None for im in range(navg)] plist_dict = {} if Blockdata["myid"] == Blockdata["main_node"]: if B_enhance: sp_global_def.sxprint( "Avg ID B-factor FH1(Res before ali) FH2(Res after ali)") else: sp_global_def.sxprint( "Avg ID FH1(Res before ali) FH2(Res after ali)") FH_list = [[0, 0.0, 0.0] for im in range(navg)] for iavg in range(image_start, image_end): mlist = EMAN2_cppwrap.EMData.read_images( Tracker["constants"]["orgstack"], list_dict[iavg]) for im in range(len(mlist)): sp_utilities.set_params2D(mlist[im], params_dict[iavg][im], xform="xform.align2d") if options.local_alignment: new_avg, plist, FH2 = sp_applications.refinement_2d_local( mlist, Tracker["constants"]["radius"], a_range, x_range, y_range, CTF=do_ctf, SNR=1.0e10, ) plist_dict[iavg] = plist FH1 = -1.0 else: new_avg, frc, plist = compute_average( mlist, Tracker["constants"]["radius"], do_ctf) FH1 = get_optimistic_res(frc) FH2 = -1.0 FH_list[iavg] = [iavg, FH1, FH2] if B_enhance: new_avg, gb = apply_enhancement( new_avg, Tracker["constants"]["B_start"], Tracker["constants"]["pixel_size"], Tracker["constants"]["Bfactor"], ) sp_global_def.sxprint(" %6d %6.3f %4.3f %4.3f" % (iavg, gb, FH1, FH2)) elif adjust_to_given_pw2: roo = sp_utilities.read_text_file(Tracker["constants"]["modelpw"], -1) roo = roo[0] # always on the first column new_avg = adjust_pw_to_model(new_avg, Tracker["constants"]["pixel_size"], roo) sp_global_def.sxprint(" %6d %4.3f %4.3f " % (iavg, FH1, FH2)) elif adjust_to_analytic_model: new_avg = adjust_pw_to_model(new_avg, Tracker["constants"]["pixel_size"], None) sp_global_def.sxprint(" %6d %4.3f %4.3f " % (iavg, FH1, FH2)) elif no_adjustment: pass if Tracker["constants"]["low_pass_filter"] != -1.0: if Tracker["constants"]["low_pass_filter"] == 0.0: low_pass_filter = FH1 elif Tracker["constants"]["low_pass_filter"] == 1.0: low_pass_filter = FH2 if not options.local_alignment: low_pass_filter = FH1 else: low_pass_filter = Tracker["constants"]["low_pass_filter"] if low_pass_filter >= 0.45: low_pass_filter = 0.45 new_avg = sp_filter.filt_tanl(new_avg, low_pass_filter, 0.02) else: # No low pass filter but if enforced if enforced_to_H1: new_avg = sp_filter.filt_tanl(new_avg, FH1, 0.02) if B_enhance: new_avg = sp_fundamentals.fft(new_avg) new_avg.set_attr("members", list_dict[iavg]) new_avg.set_attr("n_objects", len(list_dict[iavg])) slist[iavg] = new_avg sp_global_def.sxprint( time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + " =>", "Refined average %7d" % iavg, ) ## send to main node to write mpi.mpi_barrier(mpi.MPI_COMM_WORLD) for im in range(navg): # avg if (cpu_dict[im] == Blockdata["myid"] and Blockdata["myid"] != Blockdata["main_node"]): sp_utilities.send_EMData(slist[im], Blockdata["main_node"], tag_sharpen_avg) elif (cpu_dict[im] == Blockdata["myid"] and Blockdata["myid"] == Blockdata["main_node"]): slist[im].set_attr("members", memlist[im]) slist[im].set_attr("n_objects", len(memlist[im])) slist[im].write_image( os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im, ) elif (cpu_dict[im] != Blockdata["myid"] and Blockdata["myid"] == Blockdata["main_node"]): new_avg_other_cpu = sp_utilities.recv_EMData( cpu_dict[im], tag_sharpen_avg) new_avg_other_cpu.set_attr("members", memlist[im]) new_avg_other_cpu.set_attr("n_objects", len(memlist[im])) new_avg_other_cpu.write_image( os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), im, ) if options.local_alignment: if cpu_dict[im] == Blockdata["myid"]: sp_utilities.write_text_row( plist_dict[im], os.path.join( Tracker["constants"]["masterdir"], "ali2d_local_params_avg", "ali2d_local_params_avg_%03d.txt" % im, ), ) if (cpu_dict[im] == Blockdata["myid"] and cpu_dict[im] != Blockdata["main_node"]): sp_utilities.wrap_mpi_send(plist_dict[im], Blockdata["main_node"], mpi.MPI_COMM_WORLD) sp_utilities.wrap_mpi_send(FH_list, Blockdata["main_node"], mpi.MPI_COMM_WORLD) elif (cpu_dict[im] != Blockdata["main_node"] and Blockdata["myid"] == Blockdata["main_node"]): dummy = sp_utilities.wrap_mpi_recv(cpu_dict[im], mpi.MPI_COMM_WORLD) plist_dict[im] = dummy dummy = sp_utilities.wrap_mpi_recv(cpu_dict[im], mpi.MPI_COMM_WORLD) FH_list[im] = dummy[im] else: if (cpu_dict[im] == Blockdata["myid"] and cpu_dict[im] != Blockdata["main_node"]): sp_utilities.wrap_mpi_send(FH_list, Blockdata["main_node"], mpi.MPI_COMM_WORLD) elif (cpu_dict[im] != Blockdata["main_node"] and Blockdata["myid"] == Blockdata["main_node"]): dummy = sp_utilities.wrap_mpi_recv(cpu_dict[im], mpi.MPI_COMM_WORLD) FH_list[im] = dummy[im] mpi.mpi_barrier(mpi.MPI_COMM_WORLD) mpi.mpi_barrier(mpi.MPI_COMM_WORLD) if options.local_alignment: if Blockdata["myid"] == Blockdata["main_node"]: ali3d_local_params = [None for im in range(len(ptl_list))] for im in range(len(ptl_list)): ali3d_local_params[im] = [ptl_list[im]] + plist_dict[ global_dict[ptl_list[im]][0]][global_dict[ptl_list[im]][1]] sp_utilities.write_text_row( ali3d_local_params, os.path.join(Tracker["constants"]["masterdir"], "ali2d_local_params.txt"), ) sp_utilities.write_text_row( FH_list, os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt")) else: if Blockdata["myid"] == Blockdata["main_node"]: sp_utilities.write_text_row( FH_list, os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt")) mpi.mpi_barrier(mpi.MPI_COMM_WORLD) target_xr = 3 target_yr = 3 if Blockdata["myid"] == 0: cmd = "{} {} {} {} {} {} {} {} {} {}".format( "sp_chains.py", os.path.join(Tracker["constants"]["masterdir"], "class_averages.hdf"), os.path.join(Tracker["constants"]["masterdir"], "junk.hdf"), os.path.join(Tracker["constants"]["masterdir"], "ordered_class_averages.hdf"), "--circular", "--radius=%d" % Tracker["constants"]["radius"], "--xr=%d" % (target_xr + 1), "--yr=%d" % (target_yr + 1), "--align", ">/dev/null", ) junk = sp_utilities.cmdexecute(cmd) cmd = "{} {}".format( "rm -rf", os.path.join(Tracker["constants"]["masterdir"], "junk.hdf")) junk = sp_utilities.cmdexecute(cmd) return
def combine_isac_params(isac_dir, classavgstack, chains_params_file, old_combined_parts, classdoc, combined_params_file, log=False, verbose=False): """ Combines initial and all_params from ISAC. Arguments: isac_dir : ISAC directory classavgstack : Input image stack chains_params_file : Input alignment parameters applied to averages in sp_chains old_combined_parts classdoc combined_params_file : Output combined alignment parameters log : instance of Logger class verbose : (boolean) Whether to write to screen """ from sp_utilities import combine_params2, read_text_row # File-handling init_params_file = os.path.join(isac_dir, "2dalignment", "initial2Dparams.txt") all_params_file = os.path.join(isac_dir, "all_parameters.txt") init_params_list = read_text_row(init_params_file) all_params = read_text_row(all_params_file) isac_shrink_path = os.path.join(isac_dir, "README_shrink_ratio.txt") isac_shrink_file = open(isac_shrink_path, "r") isac_shrink_lines = isac_shrink_file.readlines() isac_shrink_ratio = float(isac_shrink_lines[5]) """ Three cases: 1) Using class_averages.hdf 2) Using ordered_class_averages.hdf, but chains_params.txt doesn't exist 3) Using ordered_class_averages.hdf and chains_params.txt """ msg = "Combining alignment parameters from %s and %s, dividing by %s, and writing to %s" % \ (init_params_file, all_params_file, isac_shrink_ratio, combined_params_file) # Check if using ordered class averages and whether chains_params exists if os.path.basename(classavgstack) == 'ordered_class_averages.hdf': if not os.path.exists(chains_params_file): msg += "WARNING: '%s' does not exist. " % chains_params_file msg += " Using '%s' but alignment parameters correspond to 'class_averages.hdf'.\n" % classavgstack else: msg = "Combining alignment parameters from %s, %s, and %s, dividing by %s, and writing to %s" % \ (init_params_file, all_params_file, chains_params_file, isac_shrink_ratio, combined_params_file) print_log_msg(msg, log, verbose) if os.path.basename( classavgstack) == 'ordered_class_averages.hdf' and os.path.exists( chains_params_file): chains_params_list = read_text_row(chains_params_file) old_combined_list = read_text_row(old_combined_parts) num_classes = EMUtil.get_image_count(classavgstack) tmp_combined = [] # Loop through classes for class_num in range(num_classes): # Extract members image = get_im(classavgstack, class_num) members = sorted(image.get_attr("members")) old_class_list = read_text_row(classdoc.format(class_num)) new_class_list = [] # Loop through particles for idx, im in enumerate(members): tmp_par = combine_params2( init_params_list[im][0], init_params_list[im][1], init_params_list[im][2], init_params_list[im][3], all_params[im][0], all_params[im][1] / isac_shrink_ratio, all_params[im][2] / isac_shrink_ratio, all_params[im][3]) # Combine with class-average parameters P = combine_params2(tmp_par[0], tmp_par[1], tmp_par[2], tmp_par[3], chains_params_list[class_num][2], chains_params_list[class_num][3], chains_params_list[class_num][4], chains_params_list[class_num][5]) tmp_combined.append([im, P[0], P[1], P[2], P[3]]) # Need to update class number in class docs old_part_num = old_class_list[idx] try: new_part_num = old_combined_list.index(old_part_num) except ValueError: print( "Couldn't find particle: class_num %s, old_part_num %s, new_part_num %s" % (class_num, old_part_num[0], new_part_num)) new_class_list.append(new_part_num) # End particle-loop # Overwrite pre-existing class doc write_text_row(new_class_list, classdoc.format(class_num)) # End class-loop # Sort by particle number combined_params = sorted(tmp_combined, key=itemgetter(0)) # Remove first column for row in combined_params: del row[0] # Not applying alignments of ordered_class_averages else: combined_params = [] # Loop through images for im in range(len(all_params)): P = combine_params2( init_params_list[im][0], init_params_list[im][1], init_params_list[im][2], init_params_list[im][3], all_params[im][0], all_params[im][1] / isac_shrink_ratio, all_params[im][2] / isac_shrink_ratio, all_params[im][3]) combined_params.append([P[0], P[1], P[2], P[3], 1.0]) write_text_row(combined_params, combined_params_file) print_log_msg( 'Wrote %s entries to %s\n' % (len(combined_params), combined_params_file), log, verbose) return combined_params
def main(): progname = os.path.basename(sys.argv[0]) usage = progname + """ [options] <inputfile> <outputfile> Forms chains of 2D images based on their similarities. Functionality: Order a 2-D stack of image based on pair-wise similarity (computed as a cross-correlation coefficent). Options 1-3 require image stack to be aligned. The program will apply orientation parameters if present in headers. The ways to use the program: 1. Use option initial to specify which image will be used as an initial seed to form the chain. sp_chains.py input_stack.hdf output_stack.hdf --initial=23 --radius=25 2. If options initial is omitted, the program will determine which image best serves as initial seed to form the chain sp_chains.py input_stack.hdf output_stack.hdf --radius=25 3. Use option circular to form a circular chain. sp_chains.py input_stack.hdf output_stack.hdf --circular--radius=25 4. New circular code based on pairwise alignments sp_chains.py aclf.hdf chain.hdf circle.hdf --align --radius=25 --xr=2 --pairwiseccc=lcc.txt 5. Circular ordering based on pairwise alignments sp_chains.py vols.hdf chain.hdf mask.hdf --dd --radius=25 """ parser = OptionParser(usage, version=SPARXVERSION) parser.add_option( "--dd", action="store_true", help="Circular ordering without adjustment of orientations", default=False) parser.add_option( "--circular", action="store_true", help= "Select circular ordering (first image has to be similar to the last)", default=False) parser.add_option( "--align", action="store_true", help= "Compute all pairwise alignments and from the table of image similarities find the best chain", default=False) parser.add_option( "--initial", type="int", default=-1, help= "Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)" ) parser.add_option( "--radius", type="int", default=-1, help="Radius of a circular mask for similarity based ordering") # import params for 2D alignment parser.add_option( "--ou", type="int", default=-1, help= "outer radius for 2D alignment < nx/2-1 (set to the radius of the particle)" ) parser.add_option( "--xr", type="int", default=0, help="range for translation search in x direction, search is +/xr (0)") parser.add_option( "--yr", type="int", default=0, help="range for translation search in y direction, search is +/yr (0)") # parser.add_option("--nomirror", action="store_true", default=False, help="Disable checking mirror orientations of images (default False)") parser.add_option("--pairwiseccc", type="string", default=" ", help="Input/output pairwise ccc file") (options, args) = parser.parse_args() sp_global_def.BATCH = True if options.dd: nargs = len(args) if nargs != 3: ERROR("Must provide name of input and two output files!") return stack = args[0] new_stack = args[1] from sp_utilities import model_circle from sp_statistics import ccc from sp_statistics import mono lend = EMUtil.get_image_count(stack) lccc = [None] * (old_div(lend * (lend - 1), 2)) for i in range(lend - 1): v1 = get_im(stack, i) if (i == 0 and nargs == 2): nx = v1.get_xsize() ny = v1.get_ysize() nz = v1.get_ysize() if options.ou < 1: radius = old_div(nx, 2) - 2 else: radius = options.ou mask = model_circle(radius, nx, ny, nz) else: mask = get_im(args[2]) for j in range(i + 1, lend): lccc[mono(i, j)] = [ccc(v1, get_im(stack, j), mask), 0] order = tsp(lccc) if (len(order) != lend): ERROR("Problem with data length") return sxprint("Total sum of cccs :", TotalDistance(order, lccc)) sxprint("ordering :", order) for i in range(lend): get_im(stack, order[i]).write_image(new_stack, i) elif options.align: nargs = len(args) if nargs != 3: ERROR("Must provide name of input and two output files!") return from sp_utilities import get_params2D, model_circle from sp_fundamentals import rot_shift2D from sp_statistics import ccc from time import time from sp_alignment import align2d, align2d_scf stack = args[0] new_stack = args[1] d = EMData.read_images(stack) if (len(d) < 6): ERROR( "Chains requires at least six images in the input stack to be executed" ) return """ # will align anyway try: ttt = d[0].get_attr('xform.params2d') for i in xrange(len(d)): alpha, sx, sy, mirror, scale = get_params2D(d[i]) d[i] = rot_shift2D(d[i], alpha, sx, sy, mirror) except: pass """ nx = d[0].get_xsize() ny = d[0].get_ysize() if options.ou < 1: radius = old_div(nx, 2) - 2 else: radius = options.ou mask = model_circle(radius, nx, ny) if (options.xr < 0): xrng = 0 else: xrng = options.xr if (options.yr < 0): yrng = xrng else: yrng = options.yr initial = max(options.initial, 0) from sp_statistics import mono lend = len(d) lccc = [None] * (old_div(lend * (lend - 1), 2)) from sp_utilities import read_text_row if options.pairwiseccc == " " or not os.path.exists( options.pairwiseccc): st = time() for i in range(lend - 1): for j in range(i + 1, lend): # j>i meaning mono entry (i,j) or (j,i) indicates T i->j (from smaller index to larger) # alpha, sx, sy, mir, peak = align2d(d[i],d[j], xrng, yrng, step=options.ts, first_ring=options.ir, last_ring=radius, mode = "F") alpha, sx, sy, mir, peak = align2d_scf(d[i], d[j], xrng, yrng, ou=radius) lccc[mono(i, j)] = [ ccc(d[j], rot_shift2D(d[i], alpha, sx, sy, mir, 1.0), mask), alpha, sx, sy, mir ] # print " %4d %10.1f"%(i,time()-st) if ((not os.path.exists(options.pairwiseccc)) and (options.pairwiseccc != " ")): write_text_row([[initial, 0, 0, 0, 0]] + lccc, options.pairwiseccc) elif (os.path.exists(options.pairwiseccc)): lccc = read_text_row(options.pairwiseccc) initial = int(lccc[0][0] + 0.1) del lccc[0] for i in range(len(lccc)): T = Transform({ "type": "2D", "alpha": lccc[i][1], "tx": lccc[i][2], "ty": lccc[i][3], "mirror": int(lccc[i][4] + 0.1) }) lccc[i] = [lccc[i][0], T] tdummy = Transform({"type": "2D"}) maxsum = -1.023 for m in range(0, lend): # initial, initial+1): indc = list(range(lend)) lsnake = [[m, tdummy, 0.0]] del indc[m] lsum = 0.0 while len(indc) > 1: maxcit = -111. for i in range(len(indc)): cuc = lccc[mono(indc[i], lsnake[-1][0])][0] if cuc > maxcit: maxcit = cuc qi = indc[i] # Here we need transformation from the current to the previous, # meaning indc[i] -> lsnake[-1][0] T = lccc[mono(indc[i], lsnake[-1][0])][1] # If direction is from larger to smaller index, the transformation has to be inverted if (indc[i] > lsnake[-1][0]): T = T.inverse() lsnake.append([qi, T, maxcit]) lsum += maxcit del indc[indc.index(qi)] T = lccc[mono(indc[-1], lsnake[-1][0])][1] if (indc[-1] > lsnake[-1][0]): T = T.inverse() lsnake.append( [indc[-1], T, lccc[mono(indc[-1], lsnake[-1][0])][0]]) sxprint(" initial image and lsum ", m, lsum) # print lsnake if (lsum > maxsum): maxsum = lsum init = m snake = [lsnake[i] for i in range(lend)] sxprint(" Initial image selected : ", init, maxsum, " ", TotalDistance([snake[m][0] for m in range(lend)], lccc)) # for q in snake: print q from copy import deepcopy trans = deepcopy([snake[i][1] for i in range(len(snake))]) sxprint([snake[i][0] for i in range(len(snake))]) """ for m in xrange(lend): prms = trans[m].get_params("2D") print " %3d %7.1f %7.1f %7.1f %2d %6.2f"%(snake[m][0], prms["alpha"], prms["tx"], prms["ty"], prms["mirror"], snake[m][2]) """ for k in range(lend - 2, 0, -1): T = snake[k][1] for i in range(k + 1, lend): trans[i] = T * trans[i] # To add - apply all transformations and do the overall centering. for m in range(lend): prms = trans[m].get_params("2D") # print(" %3d %7.1f %7.1f %7.1f %2d %6.2f"%(snake[m][0], prms["alpha"], prms["tx"], prms["ty"], prms["mirror"], snake[m][2]) ) # rot_shift2D(d[snake[m][0]], prms["alpha"], prms["tx"], prms["ty"], prms["mirror"]).write_image(new_stack, m) rot_shift2D(d[snake[m][0]], prms["alpha"], 0.0, 0.0, prms["mirror"]).write_image(new_stack, m) order = tsp(lccc) if (len(order) != lend): ERROR("Problem with data length") return sxprint(TotalDistance(order, lccc)) sxprint(order) ibeg = order.index(init) order = [order[(i + ibeg) % lend] for i in range(lend)] sxprint(TotalDistance(order, lccc)) sxprint(order) snake = [tdummy] for i in range(1, lend): # Here we need transformation from the current to the previous, # meaning order[i] -> order[i-1]] T = lccc[mono(order[i], order[i - 1])][1] # If direction is from larger to smaller index, the transformation has to be inverted if (order[i] > order[i - 1]): T = T.inverse() snake.append(T) assert (len(snake) == lend) from copy import deepcopy trans = deepcopy(snake) for k in range(lend - 2, 0, -1): T = snake[k] for i in range(k + 1, lend): trans[i] = T * trans[i] # Try to smooth the angles - complicated, I am afraid one would have to use angles forward and backwards # and find their average?? # In addition, one would have to recenter them """ trms = [] for m in xrange(lend): prms = trans[m].get_params("2D") trms.append([prms["alpha"], prms["mirror"]]) for i in xrange(3): for m in xrange(lend): mb = (m-1)%lend me = (m+1)%lend # angles order mb,m,me # calculate predicted angles mb->m """ best_params = [] for m in range(lend): prms = trans[m].get_params("2D") # rot_shift2D(d[order[m]], prms["alpha"], prms["tx"], prms["ty"], prms["mirror"]).write_image("metro.hdf", m) rot_shift2D(d[order[m]], prms["alpha"], 0.0, 0.0, prms["mirror"]).write_image(args[2], m) best_params.append( [m, order[m], prms["alpha"], 0.0, 0.0, prms["mirror"]]) # Write alignment parameters outdir = os.path.dirname(args[2]) aligndoc = os.path.join(outdir, "chains_params.txt") write_text_row(best_params, aligndoc) """ # This was an effort to get number of loops, inconclusive, to say the least from numpy import outer, zeros, float32, sqrt lend = len(d) cor = zeros(lend,float32) cor = outer(cor, cor) for i in xrange(lend): cor[i][i] = 1.0 for i in xrange(lend-1): for j in xrange(i+1, lend): cor[i,j] = lccc[mono(i,j)][0] cor[j,i] = cor[i,j] lmbd, eigvec = pca(cor) from sp_utilities import write_text_file nvec=20 print [lmbd[j] for j in xrange(nvec)] print " G" mm = [-1]*lend for i in xrange(lend): # row mi = -1.0e23 for j in xrange(nvec): qt = eigvec[j][i] if(abs(qt)>mi): mi = abs(qt) mm[i] = j for j in xrange(nvec): qt = eigvec[j][i] print round(qt,3), # eigenvector print mm[i] print for j in xrange(nvec): qt = [] for i in xrange(lend): if(mm[i] == j): qt.append(i) if(len(qt)>0): write_text_file(qt,"loop%02d.txt"%j) """ """ print [lmbd[j] for j in xrange(nvec)] print " B" mm = [-1]*lend for i in xrange(lend): # row mi = -1.0e23 for j in xrange(nvec): qt = eigvec[j][i]/sqrt(lmbd[j]) if(abs(qt)>mi): mi = abs(qt) mm[i] = j for j in xrange(nvec): qt = eigvec[j][i]/sqrt(lmbd[j]) print round(qt,3), # eigenvector print mm[i] print """ """ lend=3 cor = zeros(lend,float32) cor = outer(cor, cor) cor[0][0] =136.77 cor[0][1] = 79.15 cor[0][2] = 37.13 cor[1][0] = 79.15 cor[2][0] = 37.13 cor[1][1] = 50.04 cor[1][2] = 21.65 cor[2][1] = 21.65 cor[2][2] = 13.26 lmbd, eigvec = pca(cor) print lmbd print eigvec for i in xrange(lend): # row for j in xrange(lend): print eigvec[j][i], # eigenvector print print " B" for i in xrange(lend): # row for j in xrange(lend): print eigvec[j][i]/sqrt(lmbd[j]), # eigenvector print print " G" for i in xrange(lend): # row for j in xrange(lend): print eigvec[j][i]*sqrt(lmbd[j]), # eigenvector print """ else: nargs = len(args) if nargs != 2: ERROR("Must provide name of input and output file!") return from sp_utilities import get_params2D, model_circle from sp_fundamentals import rot_shift2D from sp_statistics import ccc from time import time from sp_alignment import align2d stack = args[0] new_stack = args[1] d = EMData.read_images(stack) try: sxprint("Using 2D alignment parameters from header.") ttt = d[0].get_attr('xform.params2d') for i in range(len(d)): alpha, sx, sy, mirror, scale = get_params2D(d[i]) d[i] = rot_shift2D(d[i], alpha, sx, sy, mirror) except: pass nx = d[0].get_xsize() ny = d[0].get_ysize() if options.radius < 1: radius = old_div(nx, 2) - 2 else: radius = options.radius mask = model_circle(radius, nx, ny) init = options.initial if init > -1: sxprint(" initial image: %d" % init) temp = d[init].copy() temp.write_image(new_stack, 0) del d[init] k = 1 lsum = 0.0 while len(d) > 1: maxcit = -111. for i in range(len(d)): cuc = ccc(d[i], temp, mask) if cuc > maxcit: maxcit = cuc qi = i # sxprint k, maxcit lsum += maxcit temp = d[qi].copy() del d[qi] temp.write_image(new_stack, k) k += 1 sxprint(lsum) d[0].write_image(new_stack, k) else: if options.circular: sxprint("Using options.circular, no alignment") # figure the "best circular" starting image maxsum = -1.023 for m in range(len(d)): indc = list(range(len(d))) lsnake = [-1] * (len(d) + 1) lsnake[0] = m lsnake[-1] = m del indc[m] temp = d[m].copy() lsum = 0.0 direction = +1 k = 1 while len(indc) > 1: maxcit = -111. for i in range(len(indc)): cuc = ccc(d[indc[i]], temp, mask) if cuc > maxcit: maxcit = cuc qi = indc[i] lsnake[k] = qi lsum += maxcit del indc[indc.index(qi)] direction = -direction for i in range(1, len(d)): if (direction > 0): if (lsnake[i] == -1): temp = d[lsnake[i - 1]].copy() # print " forw ",lsnake[i-1] k = i break else: if (lsnake[len(d) - i] == -1): temp = d[lsnake[len(d) - i + 1]].copy() # print " back ",lsnake[len(d) - i +1] k = len(d) - i break lsnake[lsnake.index(-1)] = indc[-1] # print " initial image and lsum ",m,lsum # print lsnake if (lsum > maxsum): maxsum = lsum init = m snake = [lsnake[i] for i in range(len(d))] sxprint(" Initial image selected : ", init, maxsum) sxprint(lsnake) for m in range(len(d)): d[snake[m]].write_image(new_stack, m) else: # figure the "best" starting image sxprint("Straight chain, no alignment") maxsum = -1.023 for m in range(len(d)): indc = list(range(len(d))) lsnake = [m] del indc[m] temp = d[m].copy() lsum = 0.0 while len(indc) > 1: maxcit = -111. for i in range(len(indc)): cuc = ccc(d[indc[i]], temp, mask) if cuc > maxcit: maxcit = cuc qi = indc[i] lsnake.append(qi) lsum += maxcit temp = d[qi].copy() del indc[indc.index(qi)] lsnake.append(indc[-1]) # sxprint " initial image and lsum ",m,lsum # sxprint lsnake if (lsum > maxsum): maxsum = lsum init = m snake = [lsnake[i] for i in range(len(d))] sxprint(" Initial image selected : ", init, maxsum) sxprint(lsnake) for m in range(len(d)): d[snake[m]].write_image(new_stack, m)
def main(): progname = os.path.basename(sys.argv[0]) usage = progname + " stack [output_directory] initial_volume --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range --ts=translational_search_step --delta=angular_step --an=angular_neighborhood --CTF --fl --aa --ref_a=S --sym=c1" parser = OptionParser(usage,version=SPARXVERSION) parser.add_option("--ir", type= "int", default= 1, help="inner radius for rotational correlation > 0 (set to 1)") parser.add_option("--ou", type= "int", default= -1, help="outer radius for rotational correlation < int(nx/2)-1 (set to the radius of the particle)") parser.add_option("--rs", type= "int", default= 1, help="step between rings in rotational correlation >0 (set to 1)" ) parser.add_option("--xr", type="string", default= "-1", help="range for translation search in x direction, search is +/xr (default 0)") parser.add_option("--yr", type="string", default= "-1", help="range for translation search in y direction, search is +/yr (default = same as xr)") parser.add_option("--ts", type="string", default= "1", help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional") parser.add_option("--delta", type="string", default= "-1", help="angular step of reference projections during initialization step (default automatically selected based on radius of the structure.)") parser.add_option("--an", type="string", default= "-1", help="angular neighborhood for local searches (phi and theta) (Default exhaustive searches)") parser.add_option("--CTF", action="store_true", default=False, help="Use CTF (Default no CTF correction)") parser.add_option("--shrink", type="float", default= 1.0, help="Reduce data size by shrink factor (default 1.0)") parser.add_option("--snr", type="float", default= 1.0, help="Signal-to-Noise Ratio of the data (default 1.0)") parser.add_option("--ref_a", type="string", default= "S", help="method for generating the quasi-uniformly distributed projection directions (default S)") parser.add_option("--sym", type="string", default= "c1", help="symmetry of the refined structure") parser.add_option("--npad", type="int", default= 2, help="padding size for 3D reconstruction (default=2)") #options introduced for the do_volume function parser.add_option("--fl", type="float", default=0.12, help="cut-off frequency of hyperbolic tangent low-pass Fourier filte (default 0.12)") parser.add_option("--aa", type="float", default=0.1, help="fall-off of hyperbolic tangent low-pass Fourier filter (default 0.1)") parser.add_option("--pwreference", type="string", default="", help="text file with a reference power spectrum (default no power spectrum adjustment)") parser.add_option("--mask3D", type="string", default=None, help="3D mask file (default a sphere WHAT RADIUS??)") (options, args) = parser.parse_args(sys.argv[1:]) #print( " args ",args) if( len(args) == 3): volinit = args[2] masterdir = args[1] elif(len(args) == 2): volinit = args[1] masterdir = "" else: sxprint( "Usage: " + usage ) sxprint( "Please run \'" + progname + " -h\' for detailed options" ) ERROR( "Invalid number of parameters used. Please see usage information above." ) return stack = args[0] # INPUT PARAMETERS radi = options.ou sp_global_def.BATCH = True ali3d_options.ir = options.ir ali3d_options.rs = options.rs ali3d_options.ou = options.ou ali3d_options.xr = options.xr ali3d_options.yr = options.yr ali3d_options.ts = options.ts ali3d_options.an = "-1" ali3d_options.sym = options.sym ali3d_options.delta = options.delta ali3d_options.npad = options.npad ali3d_options.CTF = options.CTF ali3d_options.ref_a = options.ref_a ali3d_options.snr = options.snr ali3d_options.mask3D = options.mask3D ali3d_options.pwreference = "" # It will have to be turned on after exhaustive done by setting to options.pwreference ali3d_options.fl = 0.4 ali3d_options.initfl = 0.4 ali3d_options.aa = 0.1 nproc = mpi_comm_size(MPI_COMM_WORLD) myid = mpi_comm_rank(MPI_COMM_WORLD) main_node = 0 # Get the pixel size, if none set to 1.0, and the original image size if(myid == main_node): total_stack = EMUtil.get_image_count(stack) a = get_im(stack) nxinit = a.get_xsize() if ali3d_options.CTF: i = a.get_attr('ctf') pixel_size = i.apix fq = pixel_size/fq else: pixel_size = 1.0 # No pixel size, fusing computed as 5 Fourier pixels fq = 5.0/nxinit del a else: total_stack = 0 nxinit = 0 pixel_size = 1.0 total_stack = bcast_number_to_all(total_stack, source_node = main_node) pixel_size = bcast_number_to_all(pixel_size, source_node = main_node) nxinit = bcast_number_to_all(nxinit, source_node = main_node) if(radi < 1): radi = nxinit//2-2 elif((2*radi+2)>nxinit): ERROR("Particle radius set too large!", myid=myid ) ali3d_options.ou = radi shrink = options.shrink nxshrink = int(nxinit*shrink+0.5) angular_neighborhood = "-1" # MASTER DIRECTORY if(myid == main_node): sxprint( " masterdir ",masterdir) if( masterdir == ""): timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime()) masterdir = "master"+timestring li = len(masterdir) cmd = "{} {}".format("mkdir -p", masterdir) junk = cmdexecute(cmd) sp_global_def.write_command(masterdir) else: li = 0 li = mpi_bcast(li,1,MPI_INT,main_node,MPI_COMM_WORLD)[0] if( li > 0 ): masterdir = mpi_bcast(masterdir,li,MPI_CHAR,main_node,MPI_COMM_WORLD) masterdir = string.join(masterdir,"") nnxo = nxinit # INITIALIZATION initdir = masterdir # This is initial setting, has to be initialized here, we do not want it to run too long. # INITIALIZATION THAT FOLLOWS WILL HAVE TO BE CHANGED SO THE USER CAN PROVIDE INITIAL GUESS OF RESOLUTION # If we new the initial resolution, it could be done more densely if(options.xr == "-1"): xr = "%d"%((nnxo - (2*radi-1))//2) else: xr = options.xr if(options.yr == "-1"): yr = xr else: yr = options.yr delta = float(options.delta) if(delta <= 0.0): delta = "%f"%round(degrees(atan(1.0/float(radi))), 2) else: delta = "%f"%delta paramsdict = { "stack":stack,"delta":delta, "ts":"1.0", "xr":xr, "an":angular_neighborhood, \ "center":"0", "maxit":1, "local":False,\ "lowpass":options.fl, "initialfl":0.4, "falloff":options.aa, "radius":radi, \ "nsoft":0, "delpreviousmax":True, "shrink":options.shrink, "saturatecrit":1.0, "pixercutoff":2.0,\ "refvol":volinit, "mask3D":options.mask3D} partids = os.path.join(masterdir, "ids.txt") partstack = os.path.join(masterdir, "paramszero.txt") if( myid == main_node ): write_text_file(list(range(total_stack)), partids) write_text_row([[0.0,0.0,0.0,0.0,0.0] for i in range(total_stack) ], partstack) run3Dalignment(paramsdict, partids, partstack, initdir, 0, myid, main_node, nproc) mpi_barrier(MPI_COMM_WORLD)
def run3Dalignment(paramsdict, partids, partstack, outputdir, procid, myid, main_node, nproc): # Reads from paramsdict["stack"] particles partids set parameters in partstack # and do refinement as specified in paramsdict # # Will create outputdir # Will write to outputdir output parameters: params-chunk0.txt and params-chunk1.txt if(myid == main_node): # Create output directory log = Logger(BaseLogger_Files()) log.prefix = os.path.join(outputdir) #cmd = "mkdir "+log.prefix #junk = cmdexecute(cmd) log.prefix += "/" else: log = None mpi_barrier(MPI_COMM_WORLD) ali3d_options.delta = paramsdict["delta"] ali3d_options.ts = paramsdict["ts"] ali3d_options.xr = paramsdict["xr"] # low pass filter is applied to shrank data, so it has to be adjusted ali3d_options.fl = paramsdict["lowpass"]/paramsdict["shrink"] ali3d_options.initfl = paramsdict["initialfl"]/paramsdict["shrink"] ali3d_options.aa = paramsdict["falloff"] ali3d_options.maxit = paramsdict["maxit"] ali3d_options.mask3D = paramsdict["mask3D"] ali3d_options.an = paramsdict["an"] ali3d_options.ou = paramsdict["radius"] # This is changed in ali3d_base, but the shrank value is needed in vol recons, fixt it! shrinkage = paramsdict["shrink"] projdata = getindexdata(paramsdict["stack"], partids, partstack, myid, nproc) onx = projdata[0].get_xsize() last_ring = ali3d_options.ou if last_ring < 0: last_ring = int(onx/2) - 2 mask2D = model_circle(last_ring,onx,onx) - model_circle(ali3d_options.ir,onx,onx) if(shrinkage < 1.0): # get the new size masks2D = resample(mask2D, shrinkage) nx = masks2D.get_xsize() masks2D = model_circle(int(last_ring*shrinkage+0.5),nx,nx) - model_circle(max(int(ali3d_options.ir*shrinkage+0.5),1),nx,nx) nima = len(projdata) oldshifts = [0.0,0.0]*nima for im in range(nima): #data[im].set_attr('ID', list_of_particles[im]) ctf_applied = projdata[im].get_attr_default('ctf_applied', 0) phi,theta,psi,sx,sy = get_params_proj(projdata[im]) projdata[im] = fshift(projdata[im], sx, sy) set_params_proj(projdata[im],[phi,theta,psi,0.0,0.0]) # For local SHC set anchor #if(nsoft == 1 and an[0] > -1): # set_params_proj(data[im],[phi,tetha,psi,0.0,0.0], "xform.anchor") oldshifts[im] = [sx,sy] if ali3d_options.CTF : ctf_params = projdata[im].get_attr("ctf") if ctf_applied == 0: st = Util.infomask(projdata[im], mask2D, False) projdata[im] -= st[0] projdata[im] = filt_ctf(projdata[im], ctf_params) projdata[im].set_attr('ctf_applied', 1) if(shrinkage < 1.0): #phi,theta,psi,sx,sy = get_params_proj(projdata[im]) projdata[im] = resample(projdata[im], shrinkage) st = Util.infomask(projdata[im], None, True) projdata[im] -= st[0] st = Util.infomask(projdata[im], masks2D, True) projdata[im] /= st[1] #sx *= shrinkage #sy *= shrinkage #set_params_proj(projdata[im], [phi,theta,psi,sx,sy]) if ali3d_options.CTF : ctf_params.apix /= shrinkage projdata[im].set_attr('ctf', ctf_params) else: st = Util.infomask(projdata[im], None, True) projdata[im] -= st[0] st = Util.infomask(projdata[im], mask2D, True) projdata[im] /= st[1] del mask2D if(shrinkage < 1.0): del masks2D """ if(paramsdict["delpreviousmax"]): for i in xrange(len(projdata)): try: projdata[i].del_attr("previousmax") except: pass """ if(myid == main_node): print_dict(paramsdict,"3D alignment parameters") sxprint(" => actual lowpass : "******" => actual init lowpass : "******" => PW adjustment : ",ali3d_options.pwreference) sxprint(" => partids : ",partids) sxprint(" => partstack : ",partstack) if(ali3d_options.fl > 0.46): ERROR("Low pass filter in 3D alignment > 0.46 on the scale of shrank data", myid=myid) # Run alignment command, it returns params per CPU params = center_projections_3D(projdata, paramsdict["refvol"], \ ali3d_options, onx, shrinkage, \ mpi_comm = MPI_COMM_WORLD, myid = myid, main_node = main_node, log = log ) del log, projdata params = wrap_mpi_gatherv(params, main_node, MPI_COMM_WORLD) # store params if(myid == main_node): for im in range(nima): params[im][0] = params[im][0]/shrinkage +oldshifts[im][0] params[im][1] = params[im][1]/shrinkage +oldshifts[im][1] line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" sxprint(line,"Executed successfully: ","3D alignment"," number of images:%7d"%len(params)) write_text_row(params, os.path.join(outputdir,"params.txt") )
def main(): from sp_utilities import get_input_from_string progname = os.path.basename(sys.argv[0]) usage = progname + " stack output_average --radius=particle_radius --xr=xr --yr=yr --ts=ts --thld_err=thld_err --num_ali=num_ali --fl=fl --aa=aa --CTF --verbose --stables" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--radius", type="int", default=-1, help=" particle radius for alignment") parser.add_option( "--xr", type="string", default="2 1", help= "range for translation search in x direction, search is +/xr (default 2,1)" ) parser.add_option( "--yr", type="string", default="-1", help= "range for translation search in y direction, search is +/yr (default = same as xr)" ) parser.add_option( "--ts", type="string", default="1 0.5", help= "step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default: 1,0.5)" ) parser.add_option("--thld_err", type="float", default=0.7, help="threshld of pixel error (default = 0.75)") parser.add_option( "--num_ali", type="int", default=5, help="number of alignments performed for stability (default = 5)") parser.add_option("--maxit", type="int", default=30, help="number of iterations for each xr (default = 30)") parser.add_option( "--fl", type="float", default=0.45, help= "cut-off frequency of hyperbolic tangent low-pass Fourier filter (default = 0.3)" ) parser.add_option( "--aa", type="float", default=0.2, help= "fall-off of hyperbolic tangent low-pass Fourier filter (default = 0.2)" ) parser.add_option("--CTF", action="store_true", default=False, help="Use CTF correction during the alignment ") parser.add_option("--verbose", action="store_true", default=False, help="print individual pixel error (default = False)") parser.add_option( "--stables", action="store_true", default=False, help="output the stable particles number in file (default = False)") parser.add_option( "--method", type="string", default=" ", help="SHC (standard method is default when flag is ommitted)") (options, args) = parser.parse_args() if len(args) != 1 and len(args) != 2: sxprint("Usage: " + usage) sxprint("Please run \'" + progname + " -h\' for detailed options") ERROR( "Invalid number of parameters used. Please see usage information above." ) return else: if sp_global_def.CACHE_DISABLE: from sp_utilities import disable_bdb_cache disable_bdb_cache() from sp_applications import within_group_refinement, ali2d_ras from sp_pixel_error import multi_align_stability from sp_utilities import write_text_file, write_text_row sp_global_def.BATCH = True xrng = get_input_from_string(options.xr) if options.yr == "-1": yrng = xrng else: yrng = get_input_from_string(options.yr) step = get_input_from_string(options.ts) class_data = EMData.read_images(args[0]) nx = class_data[0].get_xsize() ou = options.radius num_ali = options.num_ali if ou == -1: ou = nx / 2 - 2 from sp_utilities import model_circle, get_params2D, set_params2D mask = model_circle(ou, nx, nx) if options.CTF: from sp_filter import filt_ctf for im in range(len(class_data)): # Flip phases class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1) for im in class_data: im.set_attr("previousmax", -1.0e10) try: t = im.get_attr( "xform.align2d") # if they are there, no need to set them! except: try: t = im.get_attr("xform.projection") d = t.get_params("spider") set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0]) except: set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0]) all_ali_params = [] for ii in range(num_ali): ali_params = [] if options.verbose: ALPHA = [] SX = [] SY = [] MIRROR = [] if (xrng[0] == 0.0 and yrng[0] == 0.0): avet = ali2d_ras(class_data, randomize = True, ir = 1, ou = ou, rs = 1, step = 1.0, dst = 90.0, \ maxit = options.maxit, check_mirror = True, FH=options.fl, FF=options.aa) else: avet = within_group_refinement(class_data, mask, True, 1, ou, 1, xrng, yrng, step, 90.0, \ maxit = options.maxit, FH=options.fl, FF=options.aa, method = options.method) from sp_utilities import info #print " avet ",info(avet) for im in class_data: alpha, sx, sy, mirror, scale = get_params2D(im) ali_params.extend([alpha, sx, sy, mirror]) if options.verbose: ALPHA.append(alpha) SX.append(sx) SY.append(sy) MIRROR.append(mirror) all_ali_params.append(ali_params) if options.verbose: write_text_file([ALPHA, SX, SY, MIRROR], "ali_params_run_%d" % ii) """ avet = class_data[0] from sp_utilities import read_text_file all_ali_params = [] for ii in xrange(5): temp = read_text_file( "ali_params_run_%d"%ii,-1) uuu = [] for k in xrange(len(temp[0])): uuu.extend([temp[0][k],temp[1][k],temp[2][k],temp[3][k]]) all_ali_params.append(uuu) """ stable_set, mir_stab_rate, pix_err = multi_align_stability( all_ali_params, 0.0, 10000.0, options.thld_err, options.verbose, 2 * ou + 1) sxprint("%4s %20s %20s %20s %30s %6.2f" % ("", "Size of set", "Size of stable set", "Mirror stab rate", "Pixel error prior to pruning the set above threshold of", options.thld_err)) sxprint("Average stat: %10d %20d %20.2f %15.2f" % (len(class_data), len(stable_set), mir_stab_rate, pix_err)) if (len(stable_set) > 0): if options.stables: stab_mem = [[0, 0.0, 0] for j in range(len(stable_set))] for j in range(len(stable_set)): stab_mem[j] = [int(stable_set[j][1]), stable_set[j][0], j] write_text_row(stab_mem, "stable_particles.txt") stable_set_id = [] particle_pixerr = [] for s in stable_set: stable_set_id.append(s[1]) particle_pixerr.append(s[0]) from sp_fundamentals import rot_shift2D avet.to_zero() l = -1 sxprint("average parameters: angle, x-shift, y-shift, mirror") for j in stable_set_id: l += 1 sxprint(" %4d %4d %12.2f %12.2f %12.2f %1d" % (l, j, stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], int(stable_set[l][2][3]))) avet += rot_shift2D(class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3]) avet /= (l + 1) avet.set_attr('members', stable_set_id) avet.set_attr('pix_err', pix_err) avet.set_attr('pixerr', particle_pixerr) avet.write_image(args[1]) sp_global_def.BATCH = False