def defocus_gett(roo, voltage=300.0, Pixel_size=1.0, Cs=2.0, wgh=0.1, f_start=0.0, f_stop=-1.0, round_off=1.0, nr1=3, nr2=6, parent=None): """ 1. Estimate envelope function and baseline noise using constrained simplex method so as to extract CTF imprints from 1D power spectrum 2. Based one extracted ctf imprints, perform exhaustive defocus searching to get defocus which matches the extracted CTF imprints """ from utilities import generate_ctf #print "CTF params:", voltage, Pixel_size, Cs, wgh, f_start, f_stop, round_off, nr1, nr2, parent res = [] Res_roo = [] Res_TE = [] if f_start == 0 : i_start = 0 else: i_start = int(Pixel_size*2.*len(roo)*f_start) if f_stop <= f_start : i_stop = len(roo) else: i_stop = int(Pixel_size*2.*len(roo)*f_stop) if i_stop > len(roo): i_stop = len(roo) #print "f_start, i_start, f_stop, i_stop:", f_start, i_start, f_stop, i_stop TE = defocus_env_baseline_fit(roo, i_start, i_stop, int(nr1), 4) Pn1 = defocus_env_baseline_fit(roo, i_start, i_stop, int(nr2), 3) Res_roo = [] Res_TE = [] for i in xrange(len(roo)): Res_roo.append(roo[i] - Pn1[i]) Res_TE.append( TE[i] - Pn1[i]) defocus = defocus_guess(Res_roo, Res_TE, voltage, Cs, Pixel_size, wgh, i_start, i_stop, 2, round_off) nx = int(len(Res_roo)*2) #ctf = ctf_1d(nx, generate_ctf([defocus, Cs, voltage, Pixel_size, 0.0, wgh])) ctf = ctf_2(nx, generate_ctf([defocus, Cs, voltage, Pixel_size, 0.0, wgh])) if (parent is not None): parent.ctf_data=[roo, Res_roo, Res_TE] parent.i_start = i_start parent.i_stop = i_stop from utilities import write_text_file write_text_file([roo, Res_roo, Res_TE, ctf], "procpw.txt") else: from utilities import write_text_file write_text_file([roo, Res_roo, Res_TE, ctf], "procpw.txt") return defocus
def main(): # open a text file dirty_text = read_text_file('alice.txt') # clean the text and create list of words word_list = scrub_text(dirty_text) # loop through the list of words and count the number of occurances of each word_counts = tally_words(word_list) # number of words to show n = 15 # find the top n most frequent words and their counts top_n = find_most_frequent(word_counts, n) # write the top n words to a text file print(write_text_file('top_n_words.txt', top_n, n))
def isac_substack(args): from utilities import get_im, write_text_file from EMAN2db import db_open_dict from e2bdb import makerelpath # To make the execution exit upon fatal error by ERROR in global_def.py global_def.BATCH = True # Check error conditions subcommand_name = 'isac_substack' if not os.path.exists(args.input_isac_class_avgs_path): ERROR('Input ISAC class average stack file does not exist. Please check the file path and restart the program.', subcommand_name) # action=1 - fatal error, exit if os.path.exists(args.output_directory): ERROR('Output directory exists. Please change the name and restart the program.', subcommand_name) # action=1 - fatal error, exit assert(os.path.exists(args.input_isac_class_avgs_path)) assert(not os.path.exists(args.output_directory)) # Create output directory os.mkdir(args.output_directory) # Retrieve original particle IDs of member particles listed in ISAC class average stack n_img_processed = EMUtil.get_image_count(args.input_isac_class_avgs_path) isac_substack_particle_id_list = [] for i_img in xrange(n_img_processed): isac_substack_particle_id_list += get_im(args.input_isac_class_avgs_path, i_img).get_attr('members') isac_substack_particle_id_list.sort() # Save the substack particle id list isac_substack_particle_id_list_file_path = os.path.join(args.output_directory, 'isac_substack_particle_id_list.txt') write_text_file(isac_substack_particle_id_list, isac_substack_particle_id_list_file_path) # Open the output BDB dictionary assert(args.output_directory != '') output_virtual_bdb_stack_real_path = 'bdb:%s#isac_substack' % args.output_directory output_virtual_bdb_stack = db_open_dict(output_virtual_bdb_stack_real_path) # Convert an absolute path to the actual output data to a relative path by eliminating any symbolic links output_virtual_bdb_stack_real_path=os.path.realpath(output_virtual_bdb_stack.path)+"/" # Open the input BDB dictionary input_bdb_stack = db_open_dict(args.input_bdb_stack_path, ro=True) # Read only # Copy the header from input to output BDB dictionary n_img_detected = len(isac_substack_particle_id_list) print_progress("Detected %d ISAC validated particles in %s"%(n_img_detected, args.input_isac_class_avgs_path)) print(" ") # Loop through all ISAC validated particles n_img_processed = 0 for i_img_detected, isac_substack_particle_id in enumerate(isac_substack_particle_id_list): # Print progress if i_img_detected % 1000 == 0: try: print_progress("Progress %5.2f%%: Processing %6dth entry (Particle ID %6d)."%(float(i_img_detected)/n_img_detected*100.0, i_img_detected, isac_substack_particle_id)) sys.stdout.flush() except: pass # Read a particle image header from input bdb stack try: img_header = input_bdb_stack.get(isac_substack_particle_id, nodata=1).get_attr_dict() # Need only header information except: ERROR('Failed to read image header of particle #%d from %s. Skipping this image...' % (isac_substack_particle_id, args.input_bdb_stack_path), subcommand_name, action = 0) # action = 0 - non-fatal, print a warning; continue # Convert an absolute path to the actual input data to a relative path by eliminating any symbolic links try: input_bdb_stack_real_path = os.path.realpath(input_bdb_stack.get_data_path(isac_substack_particle_id)) # Conver the path to OS specific format if os.name == 'nt': output_virtual_bdb_stack_real_path = output_virtual_bdb_stack_real_path.replace("\\", '/') input_bdb_stack_real_path = input_bdb_stack_real_path.replace('\\', '/') # Takes a pair of paths /a/b/c/d and /a/b/e/f/g and returns a relative path to b from a, ../../e/f/g common_relative_path = makerelpath(output_virtual_bdb_stack_real_path, input_bdb_stack_real_path) except: ERROR('Failure to find common relative data path for particle image #%d. Skipping this image...' % (isac_substack_particle_id), subcommand_name, action = 0) # action = 0 - non-fatal, print a warning; continue assert(img_header["data_path"] != None) # Update the image header for output img_header["data_path"] = common_relative_path img_header["data_n"] = isac_substack_particle_id img_header["data_source"] = args.input_bdb_stack_path # Register the image header to output virtual bdb stack output_virtual_bdb_stack[n_img_processed] = img_header # Increment process image counts n_img_processed += 1 # Close input and output bdb stacks output_virtual_bdb_stack.close() input_bdb_stack.close() # Print summary of processing print(" ") print_progress("Summary of processing...") print_progress("Detected : %6d"%(n_img_detected)) print_progress("Processed : %6d"%(n_img_processed)) print(" ")
# along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # ======================================================================================== # Imports # ======================================================================================== # Python Standard Libraries from __future__ import print_function import sys import os import argparse from utilities import get_im, write_text_file # SPHIRE/EMAN2 Libraries import global_def from global_def import * #========================================================================================= input_isac_class_avgs_path = sys.argv[1] # only command line argument is saved class averages file # Retrieve original particle IDs of member particles listed in ISAC class average stack n_img_processed = EMUtil.get_image_count(input_isac_class_avgs_path) isac_substack_particle_id_list = [] for i_img in xrange(n_img_processed): isac_substack_particle_id_list += get_im(input_isac_class_avgs_path, i_img).get_attr('members') isac_substack_particle_id_list.sort() # Save the substack particle id list isac_substack_particle_id_list_file_path = 'isac_substack_particle_id_list.txt' write_text_file(isac_substack_particle_id_list, isac_substack_particle_id_list_file_path)
def main(): from utilities import get_input_from_string progname = os.path.basename(sys.argv[0]) usage = progname + " stack output_average --radius=particle_radius --xr=xr --yr=yr --ts=ts --thld_err=thld_err --num_ali=num_ali --fl=fl --aa=aa --CTF --verbose --stables" parser = OptionParser(usage,version=SPARXVERSION) parser.add_option("--radius", type="int", default=-1, help=" particle radius for alignment") parser.add_option("--xr", type="string" , default="2 1", help="range for translation search in x direction, search is +/xr (default 2,1)") parser.add_option("--yr", type="string" , default="-1", help="range for translation search in y direction, search is +/yr (default = same as xr)") parser.add_option("--ts", type="string" , default="1 0.5", help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default: 1,0.5)") parser.add_option("--thld_err", type="float", default=0.75, help="threshld of pixel error (default = 0.75)") parser.add_option("--num_ali", type="int", default=5, help="number of alignments performed for stability (default = 5)") parser.add_option("--maxit", type="int", default=30, help="number of iterations for each xr (default = 30)") parser.add_option("--fl", type="float" , default=0.3, help="cut-off frequency of hyperbolic tangent low-pass Fourier filter (default = 0.3)") parser.add_option("--aa", type="float" , default=0.2, help="fall-off of hyperbolic tangent low-pass Fourier filter (default = 0.2)") parser.add_option("--CTF", action="store_true", default=False, help="Use CTF correction during the alignment ") parser.add_option("--verbose", action="store_true", default=False, help="print individual pixel error (default = False)") parser.add_option("--stables", action="store_true", default=False, help="output the stable particles number in file (default = False)") parser.add_option("--method", type="string" , default=" ", help="SHC (standard method is default when flag is ommitted)") (options, args) = parser.parse_args() if len(args) != 1 and len(args) != 2: print "usage: " + usage print "Please run '" + progname + " -h' for detailed options" else: if global_def.CACHE_DISABLE: from utilities import disable_bdb_cache disable_bdb_cache() from applications import within_group_refinement, ali2d_ras from pixel_error import multi_align_stability from utilities import write_text_file, write_text_row global_def.BATCH = True xrng = get_input_from_string(options.xr) if options.yr == "-1": yrng = xrng else : yrng = get_input_from_string(options.yr) step = get_input_from_string(options.ts) class_data = EMData.read_images(args[0]) nx = class_data[0].get_xsize() ou = options.radius num_ali = options.num_ali if ou == -1: ou = nx/2-2 from utilities import model_circle, get_params2D, set_params2D mask = model_circle(ou, nx, nx) if options.CTF : from filter import filt_ctf for im in xrange(len(class_data)): # Flip phases class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1) for im in class_data: im.set_attr("previousmax", -1.0e10) try: t = im.get_attr("xform.align2d") # if they are there, no need to set them! except: try: t = im.get_attr("xform.projection") d = t.get_params("spider") set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0]) except: set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0]) all_ali_params = [] for ii in xrange(num_ali): ali_params = [] if options.verbose: ALPHA = [] SX = [] SY = [] MIRROR = [] if( xrng[0] == 0.0 and yrng[0] == 0.0 ): avet = ali2d_ras(class_data, randomize = True, ir = 1, ou = ou, rs = 1, step = 1.0, dst = 90.0, \ maxit = options.maxit, check_mirror = True, FH=options.fl, FF=options.aa) else: avet = within_group_refinement(class_data, mask, True, 1, ou, 1, xrng, yrng, step, 90.0, \ maxit = options.maxit, FH=options.fl, FF=options.aa, method = options.method) from utilities import info #print " avet ",info(avet) for im in class_data: alpha, sx, sy, mirror, scale = get_params2D(im) ali_params.extend([alpha, sx, sy, mirror]) if options.verbose: ALPHA.append(alpha) SX.append(sx) SY.append(sy) MIRROR.append(mirror) all_ali_params.append(ali_params) if options.verbose: write_text_file([ALPHA, SX, SY, MIRROR], "ali_params_run_%d"%ii) """ avet = class_data[0] from utilities import read_text_file all_ali_params = [] for ii in xrange(5): temp = read_text_file( "ali_params_run_%d"%ii,-1) uuu = [] for k in xrange(len(temp[0])): uuu.extend([temp[0][k],temp[1][k],temp[2][k],temp[3][k]]) all_ali_params.append(uuu) """ stable_set, mir_stab_rate, pix_err = multi_align_stability(all_ali_params, 0.0, 10000.0, options.thld_err, options.verbose, 2*ou+1) print "%4s %20s %20s %20s %30s %6.2f"%("", "Size of set", "Size of stable set", "Mirror stab rate", "Pixel error prior to pruning the set above threshold of",options.thld_err) print "Average stat: %10d %20d %20.2f %15.2f"%( len(class_data), len(stable_set), mir_stab_rate, pix_err) if( len(stable_set) > 0): if options.stables: stab_mem = [[0,0.0,0] for j in xrange(len(stable_set))] for j in xrange(len(stable_set)): stab_mem[j] = [int(stable_set[j][1]), stable_set[j][0], j] write_text_row(stab_mem, "stable_particles.txt") stable_set_id = [] particle_pixerr = [] for s in stable_set: stable_set_id.append(s[1]) particle_pixerr.append(s[0]) from fundamentals import rot_shift2D avet.to_zero() l = -1 print "average parameters: angle, x-shift, y-shift, mirror" for j in stable_set_id: l += 1 print " %4d %4d %12.2f %12.2f %12.2f %1d"%(l,j, stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], int(stable_set[l][2][3])) avet += rot_shift2D(class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3] ) avet /= (l+1) avet.set_attr('members', stable_set_id) avet.set_attr('pix_err', pix_err) avet.set_attr('pixerr', particle_pixerr) avet.write_image(args[1]) global_def.BATCH = False
def main(): from logger import Logger, BaseLogger_Files arglist = [] i = 0 while( i < len(sys.argv) ): if sys.argv[i]=='-p4pg': i = i+2 elif sys.argv[i]=='-p4wd': i = i+2 else: arglist.append( sys.argv[i] ) i = i+1 progname = os.path.basename(arglist[0]) usage = progname + " stack outdir <mask> --focus=3Dmask --radius=outer_radius --delta=angular_step" +\ "--an=angular_neighborhood --maxit=max_iter --CTF --sym=c1 --function=user_function --independent=indenpendent_runs --number_of_images_per_group=number_of_images_per_group --low_pass_frequency=.25 --seed=random_seed" parser = OptionParser(usage,version=SPARXVERSION) parser.add_option("--focus", type ="string", default ='', help="bineary 3D mask for focused clustering ") parser.add_option("--ir", type = "int", default =1, help="inner radius for rotational correlation > 0 (set to 1)") parser.add_option("--radius", type = "int", default =-1, help="particle radius in pixel for rotational correlation <nx-1 (set to the radius of the particle)") parser.add_option("--maxit", type = "int", default =25, help="maximum number of iteration") parser.add_option("--rs", type = "int", default =1, help="step between rings in rotational correlation >0 (set to 1)" ) parser.add_option("--xr", type ="string", default ='1', help="range for translation search in x direction, search is +/-xr ") parser.add_option("--yr", type ="string", default ='-1', help="range for translation search in y direction, search is +/-yr (default = same as xr)") parser.add_option("--ts", type ="string", default ='0.25', help="step size of the translation search in both directions direction, search is -xr, -xr+ts, 0, xr-ts, xr ") parser.add_option("--delta", type ="string", default ='2', help="angular step of reference projections") parser.add_option("--an", type ="string", default ='-1', help="angular neighborhood for local searches") parser.add_option("--center", type ="int", default =0, help="0 - if you do not want the volume to be centered, 1 - center the volume using cog (default=0)") parser.add_option("--nassign", type ="int", default =1, help="number of reassignment iterations performed for each angular step (set to 3) ") parser.add_option("--nrefine", type ="int", default =0, help="number of alignment iterations performed for each angular step (set to 0)") parser.add_option("--CTF", action ="store_true", default =False, help="do CTF correction during clustring") parser.add_option("--stoprnct", type ="float", default =3.0, help="Minimum percentage of assignment change to stop the program") parser.add_option("--sym", type ="string", default ='c1', help="symmetry of the structure ") parser.add_option("--function", type ="string", default ='do_volume_mrk05', help="name of the reference preparation function") parser.add_option("--independent", type ="int", default = 3, help="number of independent run") parser.add_option("--number_of_images_per_group", type ="int", default =1000, help="number of groups") parser.add_option("--low_pass_filter", type ="float", default =-1.0, help="absolute frequency of low-pass filter for 3d sorting on the original image size" ) parser.add_option("--nxinit", type ="int", default =64, help="initial image size for sorting" ) parser.add_option("--unaccounted", action ="store_true", default =False, help="reconstruct the unaccounted images") parser.add_option("--seed", type ="int", default =-1, help="random seed for create initial random assignment for EQ Kmeans") parser.add_option("--smallest_group", type ="int", default =500, help="minimum members for identified group") parser.add_option("--sausage", action ="store_true", default =False, help="way of filter volume") parser.add_option("--chunkdir", type ="string", default ='', help="chunkdir for computing margin of error") parser.add_option("--PWadjustment", type ="string", default ='', help="1-D power spectrum of PDB file used for EM volume power spectrum correction") parser.add_option("--protein_shape", type ="string", default ='g', help="protein shape. It defines protein preferred orientation angles. Currently it has g and f two types ") parser.add_option("--upscale", type ="float", default =0.5, help=" scaling parameter to adjust the power spectrum of EM volumes") parser.add_option("--wn", type ="int", default =0, help="optimal window size for data processing") parser.add_option("--interpolation", type ="string", default ="4nn", help="3-d reconstruction interpolation method, two options trl and 4nn") (options, args) = parser.parse_args(arglist[1:]) if len(args) < 1 or len(args) > 4: print "usage: " + usage print "Please run '" + progname + " -h' for detailed options" else: if len(args)>2: mask_file = args[2] else: mask_file = None orgstack =args[0] masterdir =args[1] global_def.BATCH = True #---initialize MPI related variables from mpi import mpi_init, mpi_comm_size, MPI_COMM_WORLD, mpi_comm_rank,mpi_barrier,mpi_bcast, mpi_bcast, MPI_INT,MPI_CHAR sys.argv = mpi_init(len(sys.argv),sys.argv) nproc = mpi_comm_size(MPI_COMM_WORLD) myid = mpi_comm_rank(MPI_COMM_WORLD) mpi_comm = MPI_COMM_WORLD main_node= 0 # import some utilities from utilities import get_im,bcast_number_to_all,cmdexecute,write_text_file,read_text_file,wrap_mpi_bcast, get_params_proj, write_text_row from applications import recons3d_n_MPI, mref_ali3d_MPI, Kmref_ali3d_MPI from statistics import k_means_match_clusters_asg_new,k_means_stab_bbenum from applications import mref_ali3d_EQ_Kmeans, ali3d_mref_Kmeans_MPI # Create the main log file from logger import Logger,BaseLogger_Files if myid ==main_node: log_main=Logger(BaseLogger_Files()) log_main.prefix = masterdir+"/" else: log_main =None #--- fill input parameters into dictionary named after Constants Constants ={} Constants["stack"] = args[0] Constants["masterdir"] = masterdir Constants["mask3D"] = mask_file Constants["focus3Dmask"] = options.focus Constants["indep_runs"] = options.independent Constants["stoprnct"] = options.stoprnct Constants["number_of_images_per_group"] = options.number_of_images_per_group Constants["CTF"] = options.CTF Constants["maxit"] = options.maxit Constants["ir"] = options.ir Constants["radius"] = options.radius Constants["nassign"] = options.nassign Constants["rs"] = options.rs Constants["xr"] = options.xr Constants["yr"] = options.yr Constants["ts"] = options.ts Constants["delta"] = options.delta Constants["an"] = options.an Constants["sym"] = options.sym Constants["center"] = options.center Constants["nrefine"] = options.nrefine #Constants["fourvar"] = options.fourvar Constants["user_func"] = options.function Constants["low_pass_filter"] = options.low_pass_filter # enforced low_pass_filter #Constants["debug"] = options.debug Constants["main_log_prefix"] = args[1] #Constants["importali3d"] = options.importali3d Constants["myid"] = myid Constants["main_node"] = main_node Constants["nproc"] = nproc Constants["log_main"] = log_main Constants["nxinit"] = options.nxinit Constants["unaccounted"] = options.unaccounted Constants["seed"] = options.seed Constants["smallest_group"] = options.smallest_group Constants["sausage"] = options.sausage Constants["chunkdir"] = options.chunkdir Constants["PWadjustment"] = options.PWadjustment Constants["upscale"] = options.upscale Constants["wn"] = options.wn Constants["3d-interpolation"] = options.interpolation Constants["protein_shape"] = options.protein_shape # ----------------------------------------------------- # # Create and initialize Tracker dictionary with input options Tracker = {} Tracker["constants"] = Constants Tracker["maxit"] = Tracker["constants"]["maxit"] Tracker["radius"] = Tracker["constants"]["radius"] #Tracker["xr"] = "" #Tracker["yr"] = "-1" # Do not change! #Tracker["ts"] = 1 #Tracker["an"] = "-1" #Tracker["delta"] = "2.0" #Tracker["zoom"] = True #Tracker["nsoft"] = 0 #Tracker["local"] = False #Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"] Tracker["upscale"] = Tracker["constants"]["upscale"] #Tracker["upscale"] = 0.5 Tracker["applyctf"] = False # Should the data be premultiplied by the CTF. Set to False for local continuous. #Tracker["refvol"] = None Tracker["nxinit"] = Tracker["constants"]["nxinit"] #Tracker["nxstep"] = 32 Tracker["icurrentres"] = -1 #Tracker["ireachedres"] = -1 #Tracker["lowpass"] = 0.4 #Tracker["falloff"] = 0.2 #Tracker["inires"] = options.inires # Now in A, convert to absolute before using Tracker["fuse_freq"] = 50 # Now in A, convert to absolute before using #Tracker["delpreviousmax"] = False #Tracker["anger"] = -1.0 #Tracker["shifter"] = -1.0 #Tracker["saturatecrit"] = 0.95 #Tracker["pixercutoff"] = 2.0 #Tracker["directory"] = "" #Tracker["previousoutputdir"] = "" #Tracker["eliminated-outliers"] = False #Tracker["mainiteration"] = 0 #Tracker["movedback"] = False #Tracker["state"] = Tracker["constants"]["states"][0] #Tracker["global_resolution"] =0.0 Tracker["orgstack"] = orgstack #-------------------------------------------------------------------- # import from utilities from utilities import sample_down_1D_curve,get_initial_ID,remove_small_groups,print_upper_triangular_matrix,print_a_line_with_timestamp from utilities import print_dict,get_resolution_mrk01,partition_to_groups,partition_independent_runs,get_outliers from utilities import merge_groups, save_alist, margin_of_error, get_margin_of_error, do_two_way_comparison, select_two_runs, get_ali3d_params from utilities import counting_projections, unload_dict, load_dict, get_stat_proj, create_random_list, get_number_of_groups, recons_mref from utilities import apply_low_pass_filter, get_groups_from_partition, get_number_of_groups, get_complementary_elements_total, update_full_dict from utilities import count_chunk_members, set_filter_parameters_from_adjusted_fsc, adjust_fsc_down, get_two_chunks_from_stack ####------------------------------------------------------------------ # # Get the pixel size; if none, set to 1.0, and the original image size from utilities import get_shrink_data_huang if(myid == main_node): line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>" print(line+"Initialization of 3-D sorting") a = get_im(orgstack) nnxo = a.get_xsize() if( Tracker["nxinit"] > nnxo ): ERROR("Image size less than minimum permitted $d"%Tracker["nxinit"],"sxsort3d.py",1) nnxo = -1 else: if Tracker["constants"]["CTF"]: i = a.get_attr('ctf') pixel_size = i.apix fq = pixel_size/Tracker["fuse_freq"] else: pixel_size = 1.0 # No pixel size, fusing computed as 5 Fourier pixels fq = 5.0/nnxo del a else: nnxo = 0 fq = 0.0 pixel_size = 1.0 nnxo = bcast_number_to_all(nnxo, source_node = main_node) if( nnxo < 0 ): mpi_finalize() exit() pixel_size = bcast_number_to_all(pixel_size, source_node = main_node) fq = bcast_number_to_all(fq, source_node = main_node) if Tracker["constants"]["wn"]==0: Tracker["constants"]["nnxo"] = nnxo else: Tracker["constants"]["nnxo"] = Tracker["constants"]["wn"] nnxo = Tracker["constants"]["nnxo"] Tracker["constants"]["pixel_size"] = pixel_size Tracker["fuse_freq"] = fq del fq, nnxo, pixel_size if(Tracker["constants"]["radius"] < 1): Tracker["constants"]["radius"] = Tracker["constants"]["nnxo"]//2-2 elif((2*Tracker["constants"]["radius"] +2) > Tracker["constants"]["nnxo"]): ERROR("Particle radius set too large!","sxsort3d.py",1,myid) ####----------------------------------------------------------------------------------------- # Master directory if myid == main_node: if masterdir =="": timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime()) masterdir ="master_sort3d"+timestring li =len(masterdir) cmd="{} {}".format("mkdir", masterdir) os.system(cmd) else: li=0 li = mpi_bcast(li,1,MPI_INT,main_node,MPI_COMM_WORLD)[0] if li>0: masterdir = mpi_bcast(masterdir,li,MPI_CHAR,main_node,MPI_COMM_WORLD) import string masterdir = string.join(masterdir,"") if myid ==main_node: print_dict(Tracker["constants"],"Permanent settings of 3-D sorting program") ######### create a vstack from input stack to the local stack in masterdir # stack name set to default Tracker["constants"]["stack"] = "bdb:"+masterdir+"/rdata" Tracker["constants"]["ali3d"] = os.path.join(masterdir, "ali3d_init.txt") Tracker["constants"]["ctf_params"] = os.path.join(masterdir, "ctf_params.txt") Tracker["constants"]["partstack"] = Tracker["constants"]["ali3d"] # also serves for refinement if myid == main_node: total_stack = EMUtil.get_image_count(Tracker["orgstack"]) else: total_stack = 0 total_stack = bcast_number_to_all(total_stack, source_node = main_node) mpi_barrier(MPI_COMM_WORLD) from time import sleep while not os.path.exists(masterdir): print "Node ",myid," waiting..." sleep(5) mpi_barrier(MPI_COMM_WORLD) if myid == main_node: log_main.add("Sphire sort3d ") log_main.add("the sort3d master directory is "+masterdir) ##### ###---------------------------------------------------------------------------------- # Initial data analysis and handle two chunk files from random import shuffle # Compute the resolution #### make chunkdir dictionary for computing margin of error import user_functions user_func = user_functions.factory[Tracker["constants"]["user_func"]] chunk_dict = {} chunk_list = [] if myid == main_node: chunk_one = read_text_file(os.path.join(Tracker["constants"]["chunkdir"],"chunk0.txt")) chunk_two = read_text_file(os.path.join(Tracker["constants"]["chunkdir"],"chunk1.txt")) else: chunk_one = 0 chunk_two = 0 chunk_one = wrap_mpi_bcast(chunk_one, main_node) chunk_two = wrap_mpi_bcast(chunk_two, main_node) mpi_barrier(MPI_COMM_WORLD) ######################## Read/write bdb: data on main node ############################ if myid==main_node: if(orgstack[:4] == "bdb:"): cmd = "{} {} {}".format("e2bdb.py", orgstack,"--makevstack="+Tracker["constants"]["stack"]) else: cmd = "{} {} {}".format("sxcpy.py", orgstack, Tracker["constants"]["stack"]) cmdexecute(cmd) cmd = "{} {} {}".format("sxheader.py --params=xform.projection", "--export="+Tracker["constants"]["ali3d"],orgstack) cmdexecute(cmd) cmd = "{} {} {}".format("sxheader.py --params=ctf", "--export="+Tracker["constants"]["ctf_params"],orgstack) cmdexecute(cmd) mpi_barrier(MPI_COMM_WORLD) ########----------------------------------------------------------------------------- Tracker["total_stack"] = total_stack Tracker["constants"]["total_stack"] = total_stack Tracker["shrinkage"] = float(Tracker["nxinit"])/Tracker["constants"]["nnxo"] Tracker["radius"] = Tracker["constants"]["radius"]*Tracker["shrinkage"] if Tracker["constants"]["mask3D"]: Tracker["mask3D"] = os.path.join(masterdir,"smask.hdf") else: Tracker["mask3D"] = None if Tracker["constants"]["focus3Dmask"]: Tracker["focus3D"] = os.path.join(masterdir,"sfocus.hdf") else: Tracker["focus3D"] = None if myid == main_node: if Tracker["constants"]["mask3D"]: mask_3D = get_shrink_3dmask(Tracker["nxinit"],Tracker["constants"]["mask3D"]) mask_3D.write_image(Tracker["mask3D"]) if Tracker["constants"]["focus3Dmask"]: mask_3D = get_shrink_3dmask(Tracker["nxinit"],Tracker["constants"]["focus3Dmask"]) st = Util.infomask(mask_3D, None, True) if( st[0] == 0.0 ): ERROR("sxrsort3d","incorrect focused mask, after binarize all values zero",1) mask_3D.write_image(Tracker["focus3D"]) del mask_3D if Tracker["constants"]["PWadjustment"] !='': PW_dict = {} nxinit_pwsp = sample_down_1D_curve(Tracker["constants"]["nxinit"],Tracker["constants"]["nnxo"],Tracker["constants"]["PWadjustment"]) Tracker["nxinit_PW"] = os.path.join(masterdir,"spwp.txt") if myid == main_node: write_text_file(nxinit_pwsp,Tracker["nxinit_PW"]) PW_dict[Tracker["constants"]["nnxo"]] = Tracker["constants"]["PWadjustment"] PW_dict[Tracker["constants"]["nxinit"]] = Tracker["nxinit_PW"] Tracker["PW_dict"] = PW_dict mpi_barrier(MPI_COMM_WORLD) #-----------------------From two chunks to FSC, and low pass filter-----------------------------------------### for element in chunk_one: chunk_dict[element] = 0 for element in chunk_two: chunk_dict[element] = 1 chunk_list =[chunk_one, chunk_two] Tracker["chunk_dict"] = chunk_dict Tracker["P_chunk0"] = len(chunk_one)/float(total_stack) Tracker["P_chunk1"] = len(chunk_two)/float(total_stack) ### create two volumes to estimate resolution if myid == main_node: for index in xrange(2): write_text_file(chunk_list[index],os.path.join(masterdir,"chunk%01d.txt"%index)) mpi_barrier(MPI_COMM_WORLD) vols = [] for index in xrange(2): data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nxinit"], os.path.join(masterdir,"chunk%01d.txt"%index), Tracker["constants"]["partstack"],myid,main_node,nproc,preshift=True) vol = recons3d_4nn_ctf_MPI(myid=myid, prjlist=data,symmetry=Tracker["constants"]["sym"], finfo=None) if myid == main_node: vol.write_image(os.path.join(masterdir, "vol%d.hdf"%index)) vols.append(vol) mpi_barrier(MPI_COMM_WORLD) if myid ==main_node: low_pass, falloff,currentres = get_resolution_mrk01(vols,Tracker["constants"]["radius"],Tracker["constants"]["nxinit"],masterdir,Tracker["mask3D"]) if low_pass >Tracker["constants"]["low_pass_filter"]: low_pass= Tracker["constants"]["low_pass_filter"] else: low_pass =0.0 falloff =0.0 currentres =0.0 bcast_number_to_all(currentres,source_node = main_node) bcast_number_to_all(low_pass,source_node = main_node) bcast_number_to_all(falloff,source_node = main_node) Tracker["currentres"] = currentres Tracker["falloff"] = falloff if Tracker["constants"]["low_pass_filter"] ==-1.0: Tracker["low_pass_filter"] = min(.45,low_pass/Tracker["shrinkage"]) # no better than .45 else: Tracker["low_pass_filter"] = min(.45,Tracker["constants"]["low_pass_filter"]/Tracker["shrinkage"]) Tracker["lowpass"] = Tracker["low_pass_filter"] Tracker["falloff"] =.1 Tracker["global_fsc"] = os.path.join(masterdir, "fsc.txt") ############################################################################################ if myid == main_node: log_main.add("The command-line inputs are as following:") log_main.add("**********************************************************") for a in sys.argv: if myid == main_node:log_main.add(a) if myid == main_node: log_main.add("number of cpus used in this run is %d"%Tracker["constants"]["nproc"]) log_main.add("**********************************************************") from filter import filt_tanl ### START 3-D sorting if myid ==main_node: log_main.add("----------3-D sorting program------- ") log_main.add("current resolution %6.3f for images of original size in terms of absolute frequency"%Tracker["currentres"]) log_main.add("equivalent to %f Angstrom resolution"%(Tracker["constants"]["pixel_size"]/Tracker["currentres"]/Tracker["shrinkage"])) log_main.add("the user provided enforced low_pass_filter is %f"%Tracker["constants"]["low_pass_filter"]) #log_main.add("equivalent to %f Angstrom resolution"%(Tracker["constants"]["pixel_size"]/Tracker["constants"]["low_pass_filter"])) for index in xrange(2): filt_tanl(get_im(os.path.join(masterdir,"vol%01d.hdf"%index)), Tracker["low_pass_filter"],Tracker["falloff"]).write_image(os.path.join(masterdir, "volf%01d.hdf"%index)) mpi_barrier(MPI_COMM_WORLD) from utilities import get_input_from_string delta = get_input_from_string(Tracker["constants"]["delta"]) delta = delta[0] from utilities import even_angles n_angles = even_angles(delta, 0, 180) this_ali3d = Tracker["constants"]["ali3d"] sampled = get_stat_proj(Tracker,delta,this_ali3d) if myid ==main_node: nc = 0 for a in sampled: if len(sampled[a])>0: nc += 1 log_main.add("total sampled direction %10d at angle step %6.3f"%(len(n_angles), delta)) log_main.add("captured sampled directions %10d percentage covered by data %6.3f"%(nc,float(nc)/len(n_angles)*100)) number_of_images_per_group = Tracker["constants"]["number_of_images_per_group"] if myid ==main_node: log_main.add("user provided number_of_images_per_group %d"%number_of_images_per_group) Tracker["number_of_images_per_group"] = number_of_images_per_group number_of_groups = get_number_of_groups(total_stack,number_of_images_per_group) Tracker["number_of_groups"] = number_of_groups generation =0 partition_dict ={} full_dict ={} workdir =os.path.join(masterdir,"generation%03d"%generation) Tracker["this_dir"] = workdir if myid ==main_node: log_main.add("---- generation %5d"%generation) log_main.add("number of images per group is set as %d"%number_of_images_per_group) log_main.add("the initial number of groups is %10d "%number_of_groups) cmd="{} {}".format("mkdir",workdir) os.system(cmd) mpi_barrier(MPI_COMM_WORLD) list_to_be_processed = range(Tracker["constants"]["total_stack"]) Tracker["this_data_list"] = list_to_be_processed create_random_list(Tracker) ################################# full_dict ={} for iptl in xrange(Tracker["constants"]["total_stack"]): full_dict[iptl] = iptl Tracker["full_ID_dict"] = full_dict ################################# for indep_run in xrange(Tracker["constants"]["indep_runs"]): Tracker["this_particle_list"] = Tracker["this_indep_list"][indep_run] ref_vol = recons_mref(Tracker) if myid == main_node: log_main.add("independent run %10d"%indep_run) mpi_barrier(MPI_COMM_WORLD) Tracker["this_data_list"] = list_to_be_processed Tracker["total_stack"] = len(Tracker["this_data_list"]) Tracker["this_particle_text_file"] = os.path.join(workdir,"independent_list_%03d.txt"%indep_run) # for get_shrink_data if myid == main_node: write_text_file(Tracker["this_data_list"], Tracker["this_particle_text_file"]) mpi_barrier(MPI_COMM_WORLD) outdir = os.path.join(workdir, "EQ_Kmeans%03d"%indep_run) ref_vol = apply_low_pass_filter(ref_vol,Tracker) mref_ali3d_EQ_Kmeans(ref_vol, outdir, Tracker["this_particle_text_file"], Tracker) partition_dict[indep_run]=Tracker["this_partition"] Tracker["partition_dict"] = partition_dict Tracker["total_stack"] = len(Tracker["this_data_list"]) Tracker["this_total_stack"] = Tracker["total_stack"] ############################### do_two_way_comparison(Tracker) ############################### ref_vol_list = [] from time import sleep number_of_ref_class = [] for igrp in xrange(len(Tracker["two_way_stable_member"])): Tracker["this_data_list"] = Tracker["two_way_stable_member"][igrp] Tracker["this_data_list_file"] = os.path.join(workdir,"stable_class%d.txt"%igrp) if myid == main_node: write_text_file(Tracker["this_data_list"], Tracker["this_data_list_file"]) data,old_shifts = get_shrink_data_huang(Tracker,Tracker["nxinit"], Tracker["this_data_list_file"], Tracker["constants"]["partstack"], myid, main_node, nproc, preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"], finfo = None) ref_vol_list.append(volref) number_of_ref_class.append(len(Tracker["this_data_list"])) if myid == main_node: log_main.add("group %d members %d "%(igrp,len(Tracker["this_data_list"]))) Tracker["number_of_ref_class"] = number_of_ref_class nx_of_image = ref_vol_list[0].get_xsize() if Tracker["constants"]["PWadjustment"]: Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image] else: Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"] # no PW adjustment if myid == main_node: for iref in xrange(len(ref_vol_list)): refdata = [None]*4 refdata[0] = ref_vol_list[iref] refdata[1] = Tracker refdata[2] = Tracker["constants"]["myid"] refdata[3] = Tracker["constants"]["nproc"] volref = user_func(refdata) volref.write_image(os.path.join(workdir,"volf_stable.hdf"),iref) mpi_barrier(MPI_COMM_WORLD) Tracker["this_data_list"] = Tracker["this_accounted_list"] outdir = os.path.join(workdir,"Kmref") empty_group, res_groups, final_list = ali3d_mref_Kmeans_MPI(ref_vol_list,outdir,Tracker["this_accounted_text"],Tracker) Tracker["this_unaccounted_list"] = get_complementary_elements(list_to_be_processed,final_list) if myid == main_node: log_main.add("the number of particles not processed is %d"%len(Tracker["this_unaccounted_list"])) write_text_file(Tracker["this_unaccounted_list"],Tracker["this_unaccounted_text"]) update_full_dict(Tracker["this_unaccounted_list"], Tracker) ####################################### number_of_groups = len(res_groups) vol_list = [] number_of_ref_class = [] for igrp in xrange(number_of_groups): data,old_shifts = get_shrink_data_huang(Tracker, Tracker["constants"]["nnxo"], os.path.join(outdir,"Class%d.txt"%igrp), Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"], finfo=None) vol_list.append(volref) if( myid == main_node ): npergroup = len(read_text_file(os.path.join(outdir,"Class%d.txt"%igrp))) else: npergroup = 0 npergroup = bcast_number_to_all(npergroup, main_node ) number_of_ref_class.append(npergroup) Tracker["number_of_ref_class"] = number_of_ref_class mpi_barrier(MPI_COMM_WORLD) nx_of_image = vol_list[0].get_xsize() if Tracker["constants"]["PWadjustment"]: Tracker["PWadjustment"]=Tracker["PW_dict"][nx_of_image] else: Tracker["PWadjustment"]=Tracker["constants"]["PWadjustment"] if myid == main_node: for ivol in xrange(len(vol_list)): refdata =[None]*4 refdata[0] = vol_list[ivol] refdata[1] = Tracker refdata[2] = Tracker["constants"]["myid"] refdata[3] = Tracker["constants"]["nproc"] volref = user_func(refdata) volref.write_image(os.path.join(workdir,"volf_of_Classes.hdf"),ivol) log_main.add("number of unaccounted particles %10d"%len(Tracker["this_unaccounted_list"])) log_main.add("number of accounted particles %10d"%len(Tracker["this_accounted_list"])) Tracker["this_data_list"] = Tracker["this_unaccounted_list"] # reset parameters for the next round calculation Tracker["total_stack"] = len(Tracker["this_unaccounted_list"]) Tracker["this_total_stack"] = Tracker["total_stack"] number_of_groups = get_number_of_groups(len(Tracker["this_unaccounted_list"]),number_of_images_per_group) Tracker["number_of_groups"] = number_of_groups while number_of_groups >= 2 : generation +=1 partition_dict ={} workdir =os.path.join(masterdir,"generation%03d"%generation) Tracker["this_dir"] = workdir if myid ==main_node: log_main.add("*********************************************") log_main.add("----- generation %5d "%generation) log_main.add("number of images per group is set as %10d "%number_of_images_per_group) log_main.add("the number of groups is %10d "%number_of_groups) log_main.add(" number of particles for clustering is %10d"%Tracker["total_stack"]) cmd ="{} {}".format("mkdir",workdir) os.system(cmd) mpi_barrier(MPI_COMM_WORLD) create_random_list(Tracker) for indep_run in xrange(Tracker["constants"]["indep_runs"]): Tracker["this_particle_list"] = Tracker["this_indep_list"][indep_run] ref_vol = recons_mref(Tracker) if myid == main_node: log_main.add("independent run %10d"%indep_run) outdir = os.path.join(workdir, "EQ_Kmeans%03d"%indep_run) Tracker["this_data_list"] = Tracker["this_unaccounted_list"] #ref_vol=apply_low_pass_filter(ref_vol,Tracker) mref_ali3d_EQ_Kmeans(ref_vol,outdir,Tracker["this_unaccounted_text"],Tracker) partition_dict[indep_run] = Tracker["this_partition"] Tracker["this_data_list"] = Tracker["this_unaccounted_list"] Tracker["total_stack"] = len(Tracker["this_unaccounted_list"]) Tracker["partition_dict"] = partition_dict Tracker["this_total_stack"] = Tracker["total_stack"] total_list_of_this_run = Tracker["this_unaccounted_list"] ############################### do_two_way_comparison(Tracker) ############################### ref_vol_list = [] number_of_ref_class = [] for igrp in xrange(len(Tracker["two_way_stable_member"])): Tracker["this_data_list"] = Tracker["two_way_stable_member"][igrp] Tracker["this_data_list_file"] = os.path.join(workdir,"stable_class%d.txt"%igrp) if myid == main_node: write_text_file(Tracker["this_data_list"], Tracker["this_data_list_file"]) mpi_barrier(MPI_COMM_WORLD) data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nxinit"],Tracker["this_data_list_file"],Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None) #volref = filt_tanl(volref, Tracker["constants"]["low_pass_filter"],.1) if myid == main_node:volref.write_image(os.path.join(workdir,"vol_stable.hdf"),iref) #volref = resample(volref,Tracker["shrinkage"]) ref_vol_list.append(volref) number_of_ref_class.append(len(Tracker["this_data_list"])) mpi_barrier(MPI_COMM_WORLD) Tracker["number_of_ref_class"] = number_of_ref_class Tracker["this_data_list"] = Tracker["this_accounted_list"] outdir = os.path.join(workdir,"Kmref") empty_group, res_groups, final_list = ali3d_mref_Kmeans_MPI(ref_vol_list,outdir,Tracker["this_accounted_text"],Tracker) # calculate the 3-D structure of original image size for each group number_of_groups = len(res_groups) Tracker["this_unaccounted_list"] = get_complementary_elements(total_list_of_this_run,final_list) if myid == main_node: log_main.add("the number of particles not processed is %d"%len(Tracker["this_unaccounted_list"])) write_text_file(Tracker["this_unaccounted_list"],Tracker["this_unaccounted_text"]) mpi_barrier(MPI_COMM_WORLD) update_full_dict(Tracker["this_unaccounted_list"],Tracker) vol_list = [] for igrp in xrange(number_of_groups): data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nnxo"], os.path.join(outdir,"Class%d.txt"%igrp), Tracker["constants"]["partstack"], myid, main_node, nproc,preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None) vol_list.append(volref) mpi_barrier(MPI_COMM_WORLD) nx_of_image=ref_vol_list[0].get_xsize() if Tracker["constants"]["PWadjustment"]: Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image] else: Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"] if myid == main_node: for ivol in xrange(len(vol_list)): refdata = [None]*4 refdata[0] = vol_list[ivol] refdata[1] = Tracker refdata[2] = Tracker["constants"]["myid"] refdata[3] = Tracker["constants"]["nproc"] volref = user_func(refdata) volref.write_image(os.path.join(workdir, "volf_of_Classes.hdf"),ivol) log_main.add("number of unaccounted particles %10d"%len(Tracker["this_unaccounted_list"])) log_main.add("number of accounted particles %10d"%len(Tracker["this_accounted_list"])) del vol_list mpi_barrier(MPI_COMM_WORLD) number_of_groups = get_number_of_groups(len(Tracker["this_unaccounted_list"]),number_of_images_per_group) Tracker["number_of_groups"] = number_of_groups Tracker["this_data_list"] = Tracker["this_unaccounted_list"] Tracker["total_stack"] = len(Tracker["this_unaccounted_list"]) if Tracker["constants"]["unaccounted"]: data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nnxo"],Tracker["this_unaccounted_text"],Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True) volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None) nx_of_image = volref.get_xsize() if Tracker["constants"]["PWadjustment"]: Tracker["PWadjustment"]=Tracker["PW_dict"][nx_of_image] else: Tracker["PWadjustment"]=Tracker["constants"]["PWadjustment"] if( myid == main_node ): refdata = [None]*4 refdata[0] = volref refdata[1] = Tracker refdata[2] = Tracker["constants"]["myid"] refdata[3] = Tracker["constants"]["nproc"] volref = user_func(refdata) #volref = filt_tanl(volref, Tracker["constants"]["low_pass_filter"],.1) volref.write_image(os.path.join(workdir,"volf_unaccounted.hdf")) # Finish program if myid ==main_node: log_main.add("sxsort3d finishes") mpi_barrier(MPI_COMM_WORLD) from mpi import mpi_finalize mpi_finalize() exit()
def found_outliers(list_of_projection_indices, outlier_percentile, rviper_iter, masterdir, bdb_stack_location, outlier_index_threshold_method, angle_threshold): # sxheader.py bdb:nj --consecutive --params=OID import numpy as np mainoutputdir = masterdir + DIR_DELIM + NAME_OF_MAIN_DIR + ("%03d" + DIR_DELIM) %(rviper_iter) # if this data analysis step was already performed in the past then return for check_run in list_of_projection_indices: if not (os.path.exists(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(check_run) + DIR_DELIM + "rotated_reduced_params.txt")): break else: return print "identify_outliers" projs = [] for i1 in list_of_projection_indices: projs.append(read_text_row(mainoutputdir + NAME_OF_RUN_DIR + "%03d"%(i1) + DIR_DELIM + "params.txt")) # ti1, ti3, out = find_common_subset(projs, 1.0) subset, avg_diff_per_image, rotated_params = find_common_subset(projs, target_threshold = 0) # subset, avg_diff_per_image, rotated_params = find_common_subset(projs, target_threshold = 1.0) error_values_and_indices = [] for i in xrange(len(avg_diff_per_image)): error_values_and_indices.append([avg_diff_per_image[i], i]) del subset, avg_diff_per_image error_values_and_indices.sort() if outlier_index_threshold_method == "discontinuity_in_derivative": outlier_index_threshold = find_index_of_discontinuity_in_derivative([i[0] for i in error_values_and_indices], list_of_projection_indices, mainoutputdir, outlier_percentile) elif outlier_index_threshold_method == "percentile": outlier_index_threshold = outlier_percentile * (len(error_values_and_indices) - 1)/ 100.0 elif outlier_index_threshold_method == "angle_measure": error_values = [i[0] for i in error_values_and_indices] outlier_index_threshold = min(range(len(error_values)), key=lambda i: abs(error_values[i]-angle_threshold)) elif outlier_index_threshold_method == "use all images": outlier_index_threshold = len(error_values_and_indices) index_keep_images = [i[1] for i in error_values_and_indices[:outlier_index_threshold]] index_outliers = [i[1] for i in error_values_and_indices[outlier_index_threshold:]] # print "error_values_and_indices: %f"%error_values_and_indices print "index_outliers: ", index_outliers import copy reversed_sorted_index_outliers = copy.deepcopy(index_outliers) reversed_sorted_index_outliers.sort(reverse=True) for k in xrange(len(projs)): for l in reversed_sorted_index_outliers: del rotated_params[k][l] index_outliers.sort() index_keep_images.sort() write_text_file(index_outliers, mainoutputdir + "this_iteration_index_outliers.txt") write_text_file(index_keep_images, mainoutputdir + "this_iteration_index_keep_images.txt") #if len(index_outliers) < 3: #return False if len(index_outliers) > 0: cmd = "{} {} {} {}".format("e2bdb.py ", bdb_stack_location + "_%03d"%(rviper_iter - 1), "--makevstack=" + bdb_stack_location + "_outliers_%03d"%(rviper_iter), "--list=" + mainoutputdir + "this_iteration_index_outliers.txt") cmdexecute(cmd) cmd = "{} {} {} {}".format("e2bdb.py ", bdb_stack_location + "_%03d"%(rviper_iter - 1), "--makevstack=" + bdb_stack_location + "_%03d"%(rviper_iter), "--list=" + mainoutputdir + "this_iteration_index_keep_images.txt") cmdexecute(cmd) dat = EMData.read_images(bdb_stack_location + "_%03d"%(rviper_iter - 1)) write_text_file([dat[i].get_attr("original_image_index") for i in index_outliers],mainoutputdir + "index_outliers.txt") write_text_file([dat[i].get_attr("original_image_index") for i in index_keep_images],mainoutputdir + "index_keep_images.txt") print "index_outliers:: " + str(index_outliers) # write rotated param files for i1 in range(len(list_of_projection_indices)): write_text_row(rotated_params[i1], mainoutputdir + NAME_OF_RUN_DIR + "%03d"%(list_of_projection_indices[i1]) + DIR_DELIM + "rotated_reduced_params.txt") return True
def kernel(projections, stable_subset, target_threshold, options, minimal_subset_size, number_of_runs, number_of_winners, mpi_env, log, prefix=""): from multi_shc import multi_shc, find_common_subset_3 from utilities import wrap_mpi_bcast, write_text_row, write_text_file, wrap_mpi_gatherv, average_angles from itertools import combinations import os if log == None: from logger import Logger log = Logger() stable_subset = wrap_mpi_bcast(stable_subset, 0, mpi_env.main_comm) if mpi_env.main_rank == 0: log.add("Start ", number_of_runs, "* 3SHC") for i in xrange(number_of_runs): log.add("3SHC --> " + log.prefix + prefix + "_" + str(i)) completed_mshc = 0 params = [] while completed_mshc < number_of_runs: runs_to_do = min([(number_of_runs - completed_mshc), mpi_env.subcomms_count]) if mpi_env.subcomm_id < runs_to_do: out_dir = prefix + "_" + str(completed_mshc + mpi_env.subcomm_id) if mpi_env.sub_rank == 0: os.mkdir(log.prefix + out_dir) out_params, out_vol, out_peaks = multi_shc(projections, stable_subset, 3, options, mpi_env.sub_comm, log=log.sublog(out_dir + "/")) else: out_params = None if mpi_env.main_rank in mpi_env.subcomms_roots and mpi_env.subcomm_id < runs_to_do: params_temp = wrap_mpi_gatherv([out_params], 0, mpi_env.main_comm) else: params_temp = wrap_mpi_gatherv([], 0, mpi_env.main_comm) if mpi_env.main_rank == 0: params.extend(params_temp) completed_mshc += runs_to_do # find common subset if mpi_env.main_rank == 0: log.add("Calculate common subset") best_confs = [] largest_subset = [] largest_subset_error = 999.0 msg = "" for it in combinations(range(number_of_runs), number_of_winners): confs = list(it) input_params = [] for c in confs: input_params.append(params[c]) subset_thr, subset_size, err_thr, err_size = find_common_subset_3( input_params, target_threshold, minimal_subset_size, thresholds=True) msg += str(len(subset_size)) + "(" + str(err_size) + ") " if len(subset_size) > len(largest_subset) or ( len(subset_size) == len(largest_subset) and err_size < largest_subset_error): best_confs = confs largest_subset = subset_size largest_subset_error = err_size log.add(msg) subset = largest_subset threshold = largest_subset_error new_stable_subset = [] for i in subset: new_stable_subset.append(stable_subset[i]) log.add("Best solutions (winners): ", best_confs) average_params = [] for i in subset: temp = [params[j][i] for j in best_confs] average_params.append(average_angles(temp)) write_text_file(new_stable_subset, log.prefix + prefix + "_indexes.txt") write_text_row(average_params, log.prefix + prefix + "_params.txt") else: threshold = None new_stable_subset = None # broadcast threshold and new stable subset and exit threshold = wrap_mpi_bcast(threshold, 0, mpi_env.main_comm) new_stable_subset = wrap_mpi_bcast(new_stable_subset, 0, mpi_env.main_comm) return threshold, new_stable_subset
def main(): import sys import os import math import random import pyemtbx.options import time from random import random, seed, randint from optparse import OptionParser progname = os.path.basename(sys.argv[0]) usage = progname + """ [options] <inputfile> <outputfile> Generic 2-D image processing programs. Functionality: 1. Phase flip a stack of images and write output to new file: sxprocess.py input_stack.hdf output_stack.hdf --phase_flip 2. Resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size. The window size will change accordingly. sxprocess input.hdf output.hdf --changesize --ratio=0.5 3. Compute average power spectrum of a stack of 2D images with optional padding (option wn) with zeroes or a 3-D volume. sxprocess.py input_stack.hdf powerspectrum.hdf --pw [--wn=1024] 4. Generate a stack of projections bdb:data and micrographs with prefix mic (i.e., mic0.hdf, mic1.hdf etc) from structure input_structure.hdf, with CTF applied to both projections and micrographs: sxprocess.py input_structure.hdf data mic --generate_projections format="bdb":apix=5.2:CTF=True:boxsize=64 5. Retrieve original image numbers in the selected ISAC group (here group 12 from generation 3): sxprocess.py bdb:test3 class_averages_generation_3.hdf list3_12.txt --isacgroup=12 --params=originalid 6. Retrieve original image numbers of images listed in ISAC output stack of averages: sxprocess.py select1.hdf ohk.txt 7. Adjust rotationally averaged power spectrum of an image to that of a reference image or a reference 1D power spectrum stored in an ASCII file. Optionally use a tangent low-pass filter. Also works for a stack of images, in which case the output is also a stack. sxprocess.py vol.hdf ref.hdf avol.hdf < 0.25 0.2> --adjpw sxprocess.py vol.hdf pw.txt avol.hdf < 0.25 0.2> --adjpw 8. Generate a 1D rotationally averaged power spectrum of an image. sxprocess.py vol.hdf --rotwp=rotpw.txt # Output will contain three columns: (1) rotationally averaged power spectrum (2) logarithm of the rotationally averaged power spectrum (3) integer line number (from zero to approximately to half the image size) 9. Apply 3D transformation (rotation and/or shift) to a set of orientation parameters associated with projection data. sxprocess.py --transfromparams=phi,theta,psi,tx,ty,tz input.txt output.txt The output file is then imported and 3D transformed volume computed: sxheader.py bdb:p --params=xform.projection --import=output.txt mpirun -np 2 sxrecons3d_n.py bdb:p tvol.hdf --MPI The reconstructed volume is in the position of the volume computed using the input.txt parameters and then transformed with rot_shift3D(vol, phi,theta,psi,tx,ty,tz) 10. Import ctf parameters from the output of sxcter into windowed particle headers. There are three possible input files formats: (1) all particles are in one stack, (2 aor 3) particles are in stacks, each stack corresponds to a single micrograph. In each case the particles should contain a name of the micrograph of origin stores using attribute name 'ptcl_source_image'. Normally this is done by e2boxer.py during windowing. Particles whose defocus or astigmatism error exceed set thresholds will be skipped, otherwise, virtual stacks with the original way preceded by G will be created. sxprocess.py --input=bdb:data --importctf=outdir/partres --defocuserror=10.0 --astigmatismerror=5.0 # Output will be a vritual stack bdb:Gdata sxprocess.py --input="bdb:directory/stacks*" --importctf=outdir/partres --defocuserror=10.0 --astigmatismerror=5.0 To concatenate output files: cd directory e2bdb.py . --makevstack=bdb:allparticles --filt=G IMPORTANT: Please do not move (or remove!) any input/intermediate EMAN2DB files as the information is linked between them. 11. Scale 3D shifts. The shifts in the input five columns text file with 3D orientation parameters will be DIVIDED by the scale factor sxprocess.py orientationparams.txt scaledparams.txt scale=0.5 12. Generate 3D mask from a given 3-D volume automatically or using threshold provided by user. 13. Postprocess 3-D or 2-D images: for 3-D volumes: calculate FSC with provided mask; weight summed volume with FSC; estimate B-factor from FSC weighted summed two volumes; apply negative B-factor to the weighted volume. for 2-D images: calculate B-factor and apply negative B-factor to 2-D images. 14. Winow stack file -reduce size of images without changing the pixel size. """ parser = OptionParser(usage,version=SPARXVERSION) parser.add_option("--order", action="store_true", help="Two arguments are required: name of input stack and desired name of output stack. The output stack is the input stack sorted by similarity in terms of cross-correlation coefficent.", default=False) parser.add_option("--order_lookup", action="store_true", help="Test/Debug.", default=False) parser.add_option("--order_metropolis", action="store_true", help="Test/Debug.", default=False) parser.add_option("--order_pca", action="store_true", help="Test/Debug.", default=False) parser.add_option("--initial", type="int", default=-1, help="Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)") parser.add_option("--circular", action="store_true", help="Select circular ordering (fisr image has to be similar to the last", default=False) parser.add_option("--radius", type="int", default=-1, help="Radius of a circular mask for similarity based ordering") parser.add_option("--changesize", action="store_true", help="resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size.", default=False) parser.add_option("--ratio", type="float", default=1.0, help="The ratio of new to old image size (if <1 the pixel size will increase and image size decrease, if>1, the other way round") parser.add_option("--pw", action="store_true", help="compute average power spectrum of a stack of 2-D images with optional padding (option wn) with zeroes", default=False) parser.add_option("--wn", type="int", default=-1, help="Size of window to use (should be larger/equal than particle box size, default padding to max(nx,ny))") parser.add_option("--phase_flip", action="store_true", help="Phase flip the input stack", default=False) parser.add_option("--makedb", metavar="param1=value1:param2=value2", type="string", action="append", help="One argument is required: name of key with which the database will be created. Fill in database with parameters specified as follows: --makedb param1=value1:param2=value2, e.g. 'gauss_width'=1.0:'pixel_input'=5.2:'pixel_output'=5.2:'thr_low'=1.0") parser.add_option("--generate_projections", metavar="param1=value1:param2=value2", type="string", action="append", help="Three arguments are required: name of input structure from which to generate projections, desired name of output projection stack, and desired prefix for micrographs (e.g. if prefix is 'mic', then micrographs mic0.hdf, mic1.hdf etc will be generated). Optional arguments specifying format, apix, box size and whether to add CTF effects can be entered as follows after --generate_projections: format='bdb':apix=5.2:CTF=True:boxsize=100, or format='hdf', etc., where format is bdb or hdf, apix (pixel size) is a float, CTF is True or False, and boxsize denotes the dimension of the box (assumed to be a square). If an optional parameter is not specified, it will default as follows: format='bdb', apix=2.5, CTF=False, boxsize=64.") parser.add_option("--isacgroup", type="int", help="Retrieve original image numbers in the selected ISAC group. See ISAC documentation for details.", default=-1) parser.add_option("--isacselect", action="store_true", help="Retrieve original image numbers of images listed in ISAC output stack of averages. See ISAC documentation for details.", default=False) parser.add_option("--params", type="string", default=None, help="Name of header of parameter, which one depends on specific option") parser.add_option("--adjpw", action="store_true", help="Adjust rotationally averaged power spectrum of an image", default=False) parser.add_option("--rotpw", type="string", default=None, help="Name of the text file to contain rotationally averaged power spectrum of the input image.") parser.add_option("--transformparams", type="string", default=None, help="Transform 3D projection orientation parameters using six 3D parameters (phi, theta,psi,sx,sy,sz). Input: --transformparams=45.,66.,12.,-2,3,-5.5 desired six transformation of the reconstructed structure. Output: file with modified orientation parameters.") # import ctf estimates done using cter parser.add_option("--input", type="string", default= None, help="Input particles.") parser.add_option("--importctf", type="string", default= None, help="Name of the file containing CTF parameters produced by sxcter.") parser.add_option("--defocuserror", type="float", default=1000000.0, help="Exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocuserror percent. The error is computed as (std dev defocus)/defocus*100%") parser.add_option("--astigmatismerror", type="float", default=360.0, help="Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatismerror degrees.") # import ctf estimates done using cter parser.add_option("--scale", type="float", default=-1.0, help="Divide shifts in the input 3D orientation parameters text file by the scale factor.") # generate adaptive mask from an given 3-D volume parser.add_option("--adaptive_mask", action="store_true", help="create adavptive 3-D mask from a given volume", default=False) parser.add_option("--nsigma", type="float", default= 1., help="number of times of sigma of the input volume to obtain the the large density cluster") parser.add_option("--ndilation", type="int", default= 3, help="number of times of dilation applied to the largest cluster of density") parser.add_option("--kernel_size", type="int", default= 11, help="convolution kernel for smoothing the edge of the mask") parser.add_option("--gauss_standard_dev", type="int", default= 9, help="stanadard deviation value to generate Gaussian edge") parser.add_option("--threshold", type="float", default= 9999., help="threshold provided by user to binarize input volume") parser.add_option("--ne", type="int", default= 0, help="number of times to erode the binarized input image") parser.add_option("--nd", type="int", default= 0, help="number of times to dilate the binarized input image") parser.add_option("--postprocess", action="store_true", help="postprocess unfiltered odd, even 3-D volumes",default=False) parser.add_option("--fsc_weighted", action="store_true", help="postprocess unfiltered odd, even 3-D volumes") parser.add_option("--low_pass_filter", action="store_true", default=False, help="postprocess unfiltered odd, even 3-D volumes") parser.add_option("--ff", type="float", default=.25, help="low pass filter stop band frequency in absolute unit") parser.add_option("--aa", type="float", default=.1, help="low pass filter falloff" ) parser.add_option("--mask", type="string", help="input mask file", default=None) parser.add_option("--output", type="string", help="output file name", default="postprocessed.hdf") parser.add_option("--pixel_size", type="float", help="pixel size of the data", default=1.0) parser.add_option("--B_start", type="float", help="starting frequency in Angstrom for B-factor estimation", default=10.) parser.add_option("--FSC_cutoff", type="float", help="stop frequency in Angstrom for B-factor estimation", default=0.143) parser.add_option("--2d", action="store_true", help="postprocess isac 2-D averaged images",default=False) parser.add_option("--window_stack", action="store_true", help="window stack images using a smaller window size", default=False) parser.add_option("--box", type="int", default= 0, help="the new window size ") (options, args) = parser.parse_args() global_def.BATCH = True if options.phase_flip: nargs = len(args) if nargs != 2: print "must provide name of input and output file!" return from EMAN2 import Processor instack = args[0] outstack = args[1] nima = EMUtil.get_image_count(instack) from filter import filt_ctf for i in xrange(nima): img = EMData() img.read_image(instack, i) try: ctf = img.get_attr('ctf') except: print "no ctf information in input stack! Exiting..." return dopad = True sign = 1 binary = 1 # phase flip assert img.get_ysize() > 1 dict = ctf.to_dict() dz = dict["defocus"] cs = dict["cs"] voltage = dict["voltage"] pixel_size = dict["apix"] b_factor = dict["bfactor"] ampcont = dict["ampcont"] dza = dict["dfdiff"] azz = dict["dfang"] if dopad and not img.is_complex(): ip = 1 else: ip = 0 params = {"filter_type": Processor.fourier_filter_types.CTF_, "defocus" : dz, "Cs": cs, "voltage": voltage, "Pixel_size": pixel_size, "B_factor": b_factor, "amp_contrast": ampcont, "dopad": ip, "binary": binary, "sign": sign, "dza": dza, "azz":azz} tmp = Processor.EMFourierFilter(img, params) tmp.set_attr_dict({"ctf": ctf}) tmp.write_image(outstack, i) elif options.changesize: nargs = len(args) if nargs != 2: ERROR("must provide name of input and output file!", "change size", 1) return from utilities import get_im instack = args[0] outstack = args[1] sub_rate = float(options.ratio) nima = EMUtil.get_image_count(instack) from fundamentals import resample for i in xrange(nima): resample(get_im(instack, i), sub_rate).write_image(outstack, i) elif options.isacgroup>-1: nargs = len(args) if nargs != 3: ERROR("Three files needed on input!", "isacgroup", 1) return from utilities import get_im instack = args[0] m=get_im(args[1],int(options.isacgroup)).get_attr("members") l = [] for k in m: l.append(int(get_im(args[0],k).get_attr(options.params))) from utilities import write_text_file write_text_file(l, args[2]) elif options.isacselect: nargs = len(args) if nargs != 2: ERROR("Two files needed on input!", "isacgroup", 1) return from utilities import get_im nima = EMUtil.get_image_count(args[0]) m = [] for k in xrange(nima): m += get_im(args[0],k).get_attr("members") m.sort() from utilities import write_text_file write_text_file(m, args[1]) elif options.pw: nargs = len(args) if nargs < 2: ERROR("must provide name of input and output file!", "pw", 1) return from utilities import get_im, write_text_file from fundamentals import rops_table d = get_im(args[0]) ndim = d.get_ndim() if ndim ==3: pw = rops_table(d) write_text_file(pw, args[1]) else: nx = d.get_xsize() ny = d.get_ysize() if nargs ==3: mask = get_im(args[2]) wn = int(options.wn) if wn == -1: wn = max(nx, ny) else: if( (wn<nx) or (wn<ny) ): ERROR("window size cannot be smaller than the image size","pw",1) n = EMUtil.get_image_count(args[0]) from utilities import model_blank, model_circle, pad from EMAN2 import periodogram p = model_blank(wn,wn) for i in xrange(n): d = get_im(args[0], i) if nargs==3: d *=mask st = Util.infomask(d, None, True) d -= st[0] p += periodogram(pad(d, wn, wn, 1, 0.)) p /= n p.write_image(args[1]) elif options.adjpw: if len(args) < 3: ERROR("filt_by_rops input target output fl aa (the last two are optional parameters of a low-pass filter)","adjpw",1) return img_stack = args[0] from math import sqrt from fundamentals import rops_table, fft from utilities import read_text_file, get_im from filter import filt_tanl, filt_table if( args[1][-3:] == 'txt'): rops_dst = read_text_file( args[1] ) else: rops_dst = rops_table(get_im( args[1] )) out_stack = args[2] if(len(args) >4): fl = float(args[3]) aa = float(args[4]) else: fl = -1.0 aa = 0.0 nimage = EMUtil.get_image_count( img_stack ) for i in xrange(nimage): img = fft(get_im(img_stack, i) ) rops_src = rops_table(img) assert len(rops_dst) == len(rops_src) table = [0.0]*len(rops_dst) for j in xrange( len(rops_dst) ): table[j] = sqrt( rops_dst[j]/rops_src[j] ) if( fl > 0.0): img = filt_tanl(img, fl, aa) img = fft(filt_table(img, table)) img.write_image(out_stack, i) elif options.rotpw != None: if len(args) != 1: ERROR("Only one input permitted","rotpw",1) return from utilities import write_text_file, get_im from fundamentals import rops_table from math import log10 t = rops_table(get_im(args[0])) x = range(len(t)) r = [0.0]*len(x) for i in x: r[i] = log10(t[i]) write_text_file([t,r,x],options.rotpw) elif options.transformparams != None: if len(args) != 2: ERROR("Please provide names of input and output files with orientation parameters","transformparams",1) return from utilities import read_text_row, write_text_row transf = [0.0]*6 spl=options.transformparams.split(',') for i in xrange(len(spl)): transf[i] = float(spl[i]) write_text_row( rotate_shift_params(read_text_row(args[0]), transf) , args[1]) elif options.makedb != None: nargs = len(args) if nargs != 1: print "must provide exactly one argument denoting database key under which the input params will be stored" return dbkey = args[0] print "database key under which params will be stored: ", dbkey gbdb = js_open_dict("e2boxercache/gauss_box_DB.json") parmstr = 'dummy:'+options.makedb[0] (processorname, param_dict) = parsemodopt(parmstr) dbdict = {} for pkey in param_dict: if (pkey == 'invert_contrast') or (pkey == 'use_variance'): if param_dict[pkey] == 'True': dbdict[pkey] = True else: dbdict[pkey] = False else: dbdict[pkey] = param_dict[pkey] gbdb[dbkey] = dbdict elif options.generate_projections: nargs = len(args) if nargs != 3: ERROR("Must provide name of input structure(s) from which to generate projections, name of output projection stack, and prefix for output micrographs."\ "sxprocess - generate projections",1) return inpstr = args[0] outstk = args[1] micpref = args[2] parmstr = 'dummy:'+options.generate_projections[0] (processorname, param_dict) = parsemodopt(parmstr) parm_CTF = False parm_format = 'bdb' parm_apix = 2.5 if 'CTF' in param_dict: if param_dict['CTF'] == 'True': parm_CTF = True if 'format' in param_dict: parm_format = param_dict['format'] if 'apix' in param_dict: parm_apix = float(param_dict['apix']) boxsize = 64 if 'boxsize' in param_dict: boxsize = int(param_dict['boxsize']) print "pixel size: ", parm_apix, " format: ", parm_format, " add CTF: ", parm_CTF, " box size: ", boxsize scale_mult = 2500 sigma_add = 1.5 sigma_proj = 30.0 sigma2_proj = 17.5 sigma_gauss = 0.3 sigma_mic = 30.0 sigma2_mic = 17.5 sigma_gauss_mic = 0.3 if 'scale_mult' in param_dict: scale_mult = float(param_dict['scale_mult']) if 'sigma_add' in param_dict: sigma_add = float(param_dict['sigma_add']) if 'sigma_proj' in param_dict: sigma_proj = float(param_dict['sigma_proj']) if 'sigma2_proj' in param_dict: sigma2_proj = float(param_dict['sigma2_proj']) if 'sigma_gauss' in param_dict: sigma_gauss = float(param_dict['sigma_gauss']) if 'sigma_mic' in param_dict: sigma_mic = float(param_dict['sigma_mic']) if 'sigma2_mic' in param_dict: sigma2_mic = float(param_dict['sigma2_mic']) if 'sigma_gauss_mic' in param_dict: sigma_gauss_mic = float(param_dict['sigma_gauss_mic']) from filter import filt_gaussl, filt_ctf from utilities import drop_spider_doc, even_angles, model_gauss, delete_bdb, model_blank,pad,model_gauss_noise,set_params2D, set_params_proj from projection import prep_vol,prgs seed(14567) delta = 29 angles = even_angles(delta, 0.0, 89.9, 0.0, 359.9, "S") nangle = len(angles) modelvol = [] nvlms = EMUtil.get_image_count(inpstr) from utilities import get_im for k in xrange(nvlms): modelvol.append(get_im(inpstr,k)) nx = modelvol[0].get_xsize() if nx != boxsize: ERROR("Requested box dimension does not match dimension of the input model.", \ "sxprocess - generate projections",1) nvol = 10 volfts = [[] for k in xrange(nvlms)] for k in xrange(nvlms): for i in xrange(nvol): sigma = sigma_add + random() # 1.5-2.5 addon = model_gauss(sigma, boxsize, boxsize, boxsize, sigma, sigma, 38, 38, 40 ) scale = scale_mult * (0.5+random()) vf, kb = prep_vol(modelvol[k] + scale*addon) volfts[k].append(vf) del vf, modelvol if parm_format == "bdb": stack_data = "bdb:"+outstk delete_bdb(stack_data) else: stack_data = outstk + ".hdf" Cs = 2.0 pixel = parm_apix voltage = 120.0 ampcont = 10.0 ibd = 4096/2-boxsize iprj = 0 width = 240 xstart = 8 + boxsize/2 ystart = 8 + boxsize/2 rowlen = 17 from random import randint params = [] for idef in xrange(3, 8): irow = 0 icol = 0 mic = model_blank(4096, 4096) defocus = idef * 0.5#0.2 if parm_CTF: astampl=defocus*0.15 astangl=50.0 ctf = generate_ctf([defocus, Cs, voltage, pixel, ampcont, 0.0, astampl, astangl]) for i in xrange(nangle): for k in xrange(12): dphi = 8.0*(random()-0.5) dtht = 8.0*(random()-0.5) psi = 360.0*random() phi = angles[i][0]+dphi tht = angles[i][1]+dtht s2x = 4.0*(random()-0.5) s2y = 4.0*(random()-0.5) params.append([phi, tht, psi, s2x, s2y]) ivol = iprj % nvol #imgsrc = randint(0,nvlms-1) imgsrc = iprj % nvlms proj = prgs(volfts[imgsrc][ivol], kb, [phi, tht, psi, -s2x, -s2y]) x = xstart + irow * width y = ystart + icol * width mic += pad(proj, 4096, 4096, 1, 0.0, x-2048, y-2048, 0) proj = proj + model_gauss_noise( sigma_proj, nx, nx ) if parm_CTF: proj = filt_ctf(proj, ctf) proj.set_attr_dict({"ctf":ctf, "ctf_applied":0}) proj = proj + filt_gaussl(model_gauss_noise(sigma2_proj, nx, nx), sigma_gauss) proj.set_attr("origimgsrc",imgsrc) proj.set_attr("test_id", iprj) # flags describing the status of the image (1 = true, 0 = false) set_params2D(proj, [0.0, 0.0, 0.0, 0, 1.0]) set_params_proj(proj, [phi, tht, psi, s2x, s2y]) proj.write_image(stack_data, iprj) icol += 1 if icol == rowlen: icol = 0 irow += 1 iprj += 1 mic += model_gauss_noise(sigma_mic,4096,4096) if parm_CTF: #apply CTF mic = filt_ctf(mic, ctf) mic += filt_gaussl(model_gauss_noise(sigma2_mic, 4096, 4096), sigma_gauss_mic) mic.write_image(micpref + "%1d.hdf" % (idef-3), 0) drop_spider_doc("params.txt", params) elif options.importctf != None: print ' IMPORTCTF ' from utilities import read_text_row,write_text_row from random import randint import subprocess grpfile = 'groupid%04d'%randint(1000,9999) ctfpfile = 'ctfpfile%04d'%randint(1000,9999) cterr = [options.defocuserror/100.0, options.astigmatismerror] ctfs = read_text_row(options.importctf) for kk in xrange(len(ctfs)): root,name = os.path.split(ctfs[kk][-1]) ctfs[kk][-1] = name[:-4] if(options.input[:4] != 'bdb:'): ERROR('Sorry, only bdb files implemented','importctf',1) d = options.input[4:] #try: str = d.index('*') #except: str = -1 from string import split import glob uu = os.path.split(d) uu = os.path.join(uu[0],'EMAN2DB',uu[1]+'.bdb') flist = glob.glob(uu) for i in xrange(len(flist)): root,name = os.path.split(flist[i]) root = root[:-7] name = name[:-4] fil = 'bdb:'+os.path.join(root,name) sourcemic = EMUtil.get_all_attributes(fil,'ptcl_source_image') nn = len(sourcemic) gctfp = [] groupid = [] for kk in xrange(nn): junk,name2 = os.path.split(sourcemic[kk]) name2 = name2[:-4] ctfp = [-1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0] for ll in xrange(len(ctfs)): if(name2 == ctfs[ll][-1]): # found correct if(ctfs[ll][8]/ctfs[ll][0] <= cterr[0]): # acceptable defocus error ctfp = ctfs[ll][:8] if(ctfs[ll][10] > cterr[1] ): # error of astigmatism exceed the threshold, set astigmatism to zero. ctfp[6] = 0.0 ctfp[7] = 0.0 gctfp.append(ctfp) groupid.append(kk) break if(len(groupid) > 0): write_text_row(groupid, grpfile) write_text_row(gctfp, ctfpfile) cmd = "{} {} {} {}".format('e2bdb.py',fil,'--makevstack=bdb:'+root+'G'+name,'--list='+grpfile) #print cmd subprocess.call(cmd, shell=True) cmd = "{} {} {} {}".format('sxheader.py','bdb:'+root+'G'+name,'--params=ctf','--import='+ctfpfile) #print cmd subprocess.call(cmd, shell=True) else: print ' >>> Group ',name,' skipped.' cmd = "{} {} {}".format("rm -f",grpfile,ctfpfile) subprocess.call(cmd, shell=True) elif options.scale > 0.0: from utilities import read_text_row,write_text_row scale = options.scale nargs = len(args) if nargs != 2: print "Please provide names of input and output file!" return p = read_text_row(args[0]) for i in xrange(len(p)): p[i][3] /= scale p[i][4] /= scale write_text_row(p, args[1]) elif options.adaptive_mask: from utilities import get_im from morphology import adaptive_mask, binarize, erosion, dilation nsigma = options.nsigma ndilation = options.ndilation kernel_size = options.kernel_size gauss_standard_dev = options.gauss_standard_dev nargs = len(args) if nargs ==0: print " Create 3D mask from a given volume, either automatically or from the user provided threshold." elif nargs > 2: print "Too many inputs are given, try again!" return else: inputvol = get_im(args[0]) input_path, input_file_name = os.path.split(args[0]) input_file_name_root,ext=os.path.splitext(input_file_name) if nargs == 2: mask_file_name = args[1] else: mask_file_name = "adaptive_mask_for_"+input_file_name_root+".hdf" # Only hdf file is output. if options.threshold !=9999.: mask3d = binarize(inputvol, options.threshold) for i in xrange(options.ne): mask3d = erosion(mask3d) for i in xrange(options.nd): mask3d = dilation(mask3d) else: mask3d = adaptive_mask(inputvol, nsigma, ndilation, kernel_size, gauss_standard_dev) mask3d.write_image(mask_file_name) elif options.postprocess: from utilities import get_im from fundamentals import rot_avg_table from morphology import compute_bfactor,power from statistics import fsc from filter import filt_table, filt_gaussinv from EMAN2 import periodogram e1 = get_im(args[0],0) if e1.get_zsize()==1: nimage = EMUtil.get_image_count(args[0]) if options.mask !=None: m = get_im(options.mask) else: m = None for i in xrange(nimage): e1 = get_im(args[0],i) if m: e1 *=m guinerline = rot_avg_table(power(periodogram(e1),.5)) freq_max = 1/(2.*pixel_size) freq_min = 1./options.B_start b,junk=compute_bfactor(guinerline, freq_min, freq_max, pixel_size) tmp = b/pixel_size**2 sigma_of_inverse=sqrt(2./tmp) e1 = filt_gaussinv(e1,sigma_of_inverse) if options.low_pass_filter: from filter import filt_tanl e1 =filt_tanl(e1,options.ff, options.aa) e1.write_image(options.output) else: nargs = len(args) e1 = get_im(args[0]) if nargs >1: e2 = get_im(args[1]) if options.mask !=None: m = get_im(options.mask) else: m =None pixel_size = options.pixel_size from math import sqrt if m !=None: e1 *=m if nargs >1 :e2 *=m if options.fsc_weighted: frc = fsc(e1,e2,1) ## FSC is done on masked two images #### FSC weighting sqrt((2.*fsc)/(1+fsc)); fil = len(frc[1])*[None] for i in xrange(len(fil)): if frc[1][i]>=options.FSC_cutoff: tmp = frc[1][i] else: tmp = 0.0 fil[i] = sqrt(2.*tmp/(1.+tmp)) if nargs>1: e1 +=e2 if options.fsc_weighted: e1=filt_table(e1,fil) guinerline = rot_avg_table(power(periodogram(e1),.5)) freq_max = 1/(2.*pixel_size) freq_min = 1./options.B_start b,junk = compute_bfactor(guinerline, freq_min, freq_max, pixel_size) tmp = b/pixel_size**2 sigma_of_inverse=sqrt(2./tmp) e1 = filt_gaussinv(e1,sigma_of_inverse) if options.low_pass_filter: from filter import filt_tanl e1 =filt_tanl(e1,options.ff, options.aa) e1.write_image(options.output) elif options.window_stack: nargs = len(args) if nargs ==0: print " Reduce image size of a stack" return else: output_stack_name = None inputstack = args[0] if nargs ==2:output_stack_name = args[1] input_path,input_file_name=os.path.split(inputstack) input_file_name_root,ext=os.path.splitext(input_file_name) if input_file_name_root[0:3]=="bdb":stack_is_bdb= True else: stack_is_bdb= False if output_stack_name is None: if stack_is_bdb: output_stack_name ="bdb:reduced_"+input_file_name_root[4:] else:output_stack_name = "reduced_"+input_file_name_root+".hdf" # Only hdf file is output. nimage = EMUtil.get_image_count(inputstack) from fundamentals import window2d for i in xrange(nimage): image = EMData() image.read_image(inputstack,i) w = window2d(image,options.box,options.box) w.write_image(output_stack_name,i) else: ERROR("Please provide option name","sxprocess.py",1)
def main(): import sys import os import math import random import pyemtbx.options import time from random import random, seed, randint from optparse import OptionParser progname = os.path.basename(sys.argv[0]) usage = progname + """ [options] <inputfile> <outputfile> Generic 2-D image processing programs. Functionality: 1. Phase flip a stack of images and write output to new file: sxprocess.py input_stack.hdf output_stack.hdf --phase_flip 2. Resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size. The window size will change accordingly. sxprocess input.hdf output.hdf --changesize --ratio=0.5 3. Compute average power spectrum of a stack of 2D images with optional padding (option wn) with zeroes. sxprocess.py input_stack.hdf powerspectrum.hdf --pw [--wn=1024] 4. Generate a stack of projections bdb:data and micrographs with prefix mic (i.e., mic0.hdf, mic1.hdf etc) from structure input_structure.hdf, with CTF applied to both projections and micrographs: sxprocess.py input_structure.hdf data mic --generate_projections format="bdb":apix=5.2:CTF=True:boxsize=64 5. Retrieve original image numbers in the selected ISAC group (here group 12 from generation 3): sxprocess.py bdb:test3 class_averages_generation_3.hdf list3_12.txt --isacgroup=12 --params=originalid 6. Retrieve original image numbers of images listed in ISAC output stack of averages: sxprocess.py select1.hdf ohk.txt 7. Adjust rotationally averaged power spectrum of an image to that of a reference image or a reference 1D power spectrum stored in an ASCII file. Optionally use a tangent low-pass filter. Also works for a stack of images, in which case the output is also a stack. sxprocess.py vol.hdf ref.hdf avol.hdf < 0.25 0.2> --adjpw sxprocess.py vol.hdf pw.txt avol.hdf < 0.25 0.2> --adjpw 8. Generate a 1D rotationally averaged power spectrum of an image. sxprocess.py vol.hdf --rotwp=rotpw.txt # Output will contain three columns: (1) rotationally averaged power spectrum (2) logarithm of the rotationally averaged power spectrum (3) integer line number (from zero to approximately to half the image size) 9. Apply 3D transformation (rotation and/or shift) to a set of orientation parameters associated with projection data. sxprocess.py --transfromparams=phi,theta,psi,tx,ty,tz input.txt output.txt The output file is then imported and 3D transformed volume computed: sxheader.py bdb:p --params=xform.projection --import=output.txt mpirun -np 2 sxrecons3d_n.py bdb:p tvol.hdf --MPI The reconstructed volume is in the position of the volume computed using the input.txt parameters and then transformed with rot_shift3D(vol, phi,theta,psi,tx,ty,tz) 10. Import ctf parameters from the output of sxcter into windowed particle headers. There are three possible input files formats: (1) all particles are in one stack, (2 aor 3) particles are in stacks, each stack corresponds to a single micrograph. In each case the particles should contain a name of the micrograph of origin stores using attribute name 'ptcl_source_image'. Normally this is done by e2boxer.py during windowing. Particles whose defocus or astigmatism error exceed set thresholds will be skipped, otherwise, virtual stacks with the original way preceded by G will be created. sxprocess.py --input=bdb:data --importctf=outdir/partres --defocuserror=10.0 --astigmatismerror=5.0 # Output will be a vritual stack bdb:Gdata sxprocess.py --input="bdb:directory/stacks*" --importctf=outdir/partres --defocuserror=10.0 --astigmatismerror=5.0 To concatenate output files: cd directory e2bdb.py . --makevstack=bdb:allparticles --filt=G IMPORTANT: Please do not move (or remove!) any input/intermediate EMAN2DB files as the information is linked between them. 11. Scale 3D shifts. The shifts in the input five columns text file with 3D orientation parameters will be DIVIDED by the scale factor sxprocess.py orientationparams.txt scaledparams.txt scale=0.5 12. Generate adaptive mask from a given 3-D volume. """ parser = OptionParser(usage, version=SPARXVERSION) parser.add_option( "--order", action="store_true", help= "Two arguments are required: name of input stack and desired name of output stack. The output stack is the input stack sorted by similarity in terms of cross-correlation coefficent.", default=False) parser.add_option("--order_lookup", action="store_true", help="Test/Debug.", default=False) parser.add_option("--order_metropolis", action="store_true", help="Test/Debug.", default=False) parser.add_option("--order_pca", action="store_true", help="Test/Debug.", default=False) parser.add_option( "--initial", type="int", default=-1, help= "Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)" ) parser.add_option( "--circular", action="store_true", help= "Select circular ordering (fisr image has to be similar to the last", default=False) parser.add_option( "--radius", type="int", default=-1, help="Radius of a circular mask for similarity based ordering") parser.add_option( "--changesize", action="store_true", help= "resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size.", default=False) parser.add_option( "--ratio", type="float", default=1.0, help= "The ratio of new to old image size (if <1 the pixel size will increase and image size decrease, if>1, the other way round" ) parser.add_option( "--pw", action="store_true", help= "compute average power spectrum of a stack of 2-D images with optional padding (option wn) with zeroes", default=False) parser.add_option( "--wn", type="int", default=-1, help= "Size of window to use (should be larger/equal than particle box size, default padding to max(nx,ny))" ) parser.add_option("--phase_flip", action="store_true", help="Phase flip the input stack", default=False) parser.add_option( "--makedb", metavar="param1=value1:param2=value2", type="string", action="append", help= "One argument is required: name of key with which the database will be created. Fill in database with parameters specified as follows: --makedb param1=value1:param2=value2, e.g. 'gauss_width'=1.0:'pixel_input'=5.2:'pixel_output'=5.2:'thr_low'=1.0" ) parser.add_option( "--generate_projections", metavar="param1=value1:param2=value2", type="string", action="append", help= "Three arguments are required: name of input structure from which to generate projections, desired name of output projection stack, and desired prefix for micrographs (e.g. if prefix is 'mic', then micrographs mic0.hdf, mic1.hdf etc will be generated). Optional arguments specifying format, apix, box size and whether to add CTF effects can be entered as follows after --generate_projections: format='bdb':apix=5.2:CTF=True:boxsize=100, or format='hdf', etc., where format is bdb or hdf, apix (pixel size) is a float, CTF is True or False, and boxsize denotes the dimension of the box (assumed to be a square). If an optional parameter is not specified, it will default as follows: format='bdb', apix=2.5, CTF=False, boxsize=64." ) parser.add_option( "--isacgroup", type="int", help= "Retrieve original image numbers in the selected ISAC group. See ISAC documentation for details.", default=-1) parser.add_option( "--isacselect", action="store_true", help= "Retrieve original image numbers of images listed in ISAC output stack of averages. See ISAC documentation for details.", default=False) parser.add_option( "--params", type="string", default=None, help="Name of header of parameter, which one depends on specific option" ) parser.add_option( "--adjpw", action="store_true", help="Adjust rotationally averaged power spectrum of an image", default=False) parser.add_option( "--rotpw", type="string", default=None, help= "Name of the text file to contain rotationally averaged power spectrum of the input image." ) parser.add_option( "--transformparams", type="string", default=None, help= "Transform 3D projection orientation parameters using six 3D parameters (phi, theta,psi,sx,sy,sz). Input: --transformparams=45.,66.,12.,-2,3,-5.5 desired six transformation of the reconstructed structure. Output: file with modified orientation parameters." ) # import ctf estimates done using cter parser.add_option("--input", type="string", default=None, help="Input particles.") parser.add_option( "--importctf", type="string", default=None, help="Name of the file containing CTF parameters produced by sxcter.") parser.add_option( "--defocuserror", type="float", default=1000000.0, help= "Exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocuserror percent. The error is computed as (std dev defocus)/defocus*100%" ) parser.add_option( "--astigmatismerror", type="float", default=360.0, help= "Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatismerror degrees." ) # import ctf estimates done using cter parser.add_option( "--scale", type="float", default=-1.0, help= "Divide shifts in the input 3D orientation parameters text file by the scale factor." ) # generate adaptive mask from an given 3-Db volue parser.add_option("--adaptive_mask", action="store_true", help="create adavptive 3-D mask from a given volume", default=False) parser.add_option( "--nsigma", type="float", default=1., help= "number of times of sigma of the input volume to obtain the the large density cluster" ) parser.add_option( "--ndilation", type="int", default=3, help= "number of times of dilation applied to the largest cluster of density" ) parser.add_option( "--kernel_size", type="int", default=11, help="convolution kernel for smoothing the edge of the mask") parser.add_option( "--gauss_standard_dev", type="int", default=9, help="stanadard deviation value to generate Gaussian edge") (options, args) = parser.parse_args() global_def.BATCH = True if options.phase_flip: nargs = len(args) if nargs != 2: print "must provide name of input and output file!" return from EMAN2 import Processor instack = args[0] outstack = args[1] nima = EMUtil.get_image_count(instack) from filter import filt_ctf for i in xrange(nima): img = EMData() img.read_image(instack, i) try: ctf = img.get_attr('ctf') except: print "no ctf information in input stack! Exiting..." return dopad = True sign = 1 binary = 1 # phase flip assert img.get_ysize() > 1 dict = ctf.to_dict() dz = dict["defocus"] cs = dict["cs"] voltage = dict["voltage"] pixel_size = dict["apix"] b_factor = dict["bfactor"] ampcont = dict["ampcont"] dza = dict["dfdiff"] azz = dict["dfang"] if dopad and not img.is_complex(): ip = 1 else: ip = 0 params = { "filter_type": Processor.fourier_filter_types.CTF_, "defocus": dz, "Cs": cs, "voltage": voltage, "Pixel_size": pixel_size, "B_factor": b_factor, "amp_contrast": ampcont, "dopad": ip, "binary": binary, "sign": sign, "dza": dza, "azz": azz } tmp = Processor.EMFourierFilter(img, params) tmp.set_attr_dict({"ctf": ctf}) tmp.write_image(outstack, i) elif options.changesize: nargs = len(args) if nargs != 2: ERROR("must provide name of input and output file!", "change size", 1) return from utilities import get_im instack = args[0] outstack = args[1] sub_rate = float(options.ratio) nima = EMUtil.get_image_count(instack) from fundamentals import resample for i in xrange(nima): resample(get_im(instack, i), sub_rate).write_image(outstack, i) elif options.isacgroup > -1: nargs = len(args) if nargs != 3: ERROR("Three files needed on input!", "isacgroup", 1) return from utilities import get_im instack = args[0] m = get_im(args[1], int(options.isacgroup)).get_attr("members") l = [] for k in m: l.append(int(get_im(args[0], k).get_attr(options.params))) from utilities import write_text_file write_text_file(l, args[2]) elif options.isacselect: nargs = len(args) if nargs != 2: ERROR("Two files needed on input!", "isacgroup", 1) return from utilities import get_im nima = EMUtil.get_image_count(args[0]) m = [] for k in xrange(nima): m += get_im(args[0], k).get_attr("members") m.sort() from utilities import write_text_file write_text_file(m, args[1]) elif options.pw: nargs = len(args) if nargs < 2: ERROR("must provide name of input and output file!", "pw", 1) return from utilities import get_im d = get_im(args[0]) nx = d.get_xsize() ny = d.get_ysize() if nargs == 3: mask = get_im(args[2]) wn = int(options.wn) if wn == -1: wn = max(nx, ny) else: if ((wn < nx) or (wn < ny)): ERROR("window size cannot be smaller than the image size", "pw", 1) n = EMUtil.get_image_count(args[0]) from utilities import model_blank, model_circle, pad from EMAN2 import periodogram p = model_blank(wn, wn) for i in xrange(n): d = get_im(args[0], i) if nargs == 3: d *= mask st = Util.infomask(d, None, True) d -= st[0] p += periodogram(pad(d, wn, wn, 1, 0.)) p /= n p.write_image(args[1]) elif options.adjpw: if len(args) < 3: ERROR( "filt_by_rops input target output fl aa (the last two are optional parameters of a low-pass filter)", "adjpw", 1) return img_stack = args[0] from math import sqrt from fundamentals import rops_table, fft from utilities import read_text_file, get_im from filter import filt_tanl, filt_table if (args[1][-3:] == 'txt'): rops_dst = read_text_file(args[1]) else: rops_dst = rops_table(get_im(args[1])) out_stack = args[2] if (len(args) > 4): fl = float(args[3]) aa = float(args[4]) else: fl = -1.0 aa = 0.0 nimage = EMUtil.get_image_count(img_stack) for i in xrange(nimage): img = fft(get_im(img_stack, i)) rops_src = rops_table(img) assert len(rops_dst) == len(rops_src) table = [0.0] * len(rops_dst) for j in xrange(len(rops_dst)): table[j] = sqrt(rops_dst[j] / rops_src[j]) if (fl > 0.0): img = filt_tanl(img, fl, aa) img = fft(filt_table(img, table)) img.write_image(out_stack, i) elif options.rotpw != None: if len(args) != 1: ERROR("Only one input permitted", "rotpw", 1) return from utilities import write_text_file, get_im from fundamentals import rops_table from math import log10 t = rops_table(get_im(args[0])) x = range(len(t)) r = [0.0] * len(x) for i in x: r[i] = log10(t[i]) write_text_file([t, r, x], options.rotpw) elif options.transformparams != None: if len(args) != 2: ERROR( "Please provide names of input and output files with orientation parameters", "transformparams", 1) return from utilities import read_text_row, write_text_row transf = [0.0] * 6 spl = options.transformparams.split(',') for i in xrange(len(spl)): transf[i] = float(spl[i]) write_text_row(rotate_shift_params(read_text_row(args[0]), transf), args[1]) elif options.makedb != None: nargs = len(args) if nargs != 1: print "must provide exactly one argument denoting database key under which the input params will be stored" return dbkey = args[0] print "database key under which params will be stored: ", dbkey gbdb = js_open_dict("e2boxercache/gauss_box_DB.json") parmstr = 'dummy:' + options.makedb[0] (processorname, param_dict) = parsemodopt(parmstr) dbdict = {} for pkey in param_dict: if (pkey == 'invert_contrast') or (pkey == 'use_variance'): if param_dict[pkey] == 'True': dbdict[pkey] = True else: dbdict[pkey] = False else: dbdict[pkey] = param_dict[pkey] gbdb[dbkey] = dbdict elif options.generate_projections: nargs = len(args) if nargs != 3: ERROR("Must provide name of input structure(s) from which to generate projections, name of output projection stack, and prefix for output micrographs."\ "sxprocess - generate projections",1) return inpstr = args[0] outstk = args[1] micpref = args[2] parmstr = 'dummy:' + options.generate_projections[0] (processorname, param_dict) = parsemodopt(parmstr) parm_CTF = False parm_format = 'bdb' parm_apix = 2.5 if 'CTF' in param_dict: if param_dict['CTF'] == 'True': parm_CTF = True if 'format' in param_dict: parm_format = param_dict['format'] if 'apix' in param_dict: parm_apix = float(param_dict['apix']) boxsize = 64 if 'boxsize' in param_dict: boxsize = int(param_dict['boxsize']) print "pixel size: ", parm_apix, " format: ", parm_format, " add CTF: ", parm_CTF, " box size: ", boxsize scale_mult = 2500 sigma_add = 1.5 sigma_proj = 30.0 sigma2_proj = 17.5 sigma_gauss = 0.3 sigma_mic = 30.0 sigma2_mic = 17.5 sigma_gauss_mic = 0.3 if 'scale_mult' in param_dict: scale_mult = float(param_dict['scale_mult']) if 'sigma_add' in param_dict: sigma_add = float(param_dict['sigma_add']) if 'sigma_proj' in param_dict: sigma_proj = float(param_dict['sigma_proj']) if 'sigma2_proj' in param_dict: sigma2_proj = float(param_dict['sigma2_proj']) if 'sigma_gauss' in param_dict: sigma_gauss = float(param_dict['sigma_gauss']) if 'sigma_mic' in param_dict: sigma_mic = float(param_dict['sigma_mic']) if 'sigma2_mic' in param_dict: sigma2_mic = float(param_dict['sigma2_mic']) if 'sigma_gauss_mic' in param_dict: sigma_gauss_mic = float(param_dict['sigma_gauss_mic']) from filter import filt_gaussl, filt_ctf from utilities import drop_spider_doc, even_angles, model_gauss, delete_bdb, model_blank, pad, model_gauss_noise, set_params2D, set_params_proj from projection import prep_vol, prgs seed(14567) delta = 29 angles = even_angles(delta, 0.0, 89.9, 0.0, 359.9, "S") nangle = len(angles) modelvol = [] nvlms = EMUtil.get_image_count(inpstr) from utilities import get_im for k in xrange(nvlms): modelvol.append(get_im(inpstr, k)) nx = modelvol[0].get_xsize() if nx != boxsize: ERROR("Requested box dimension does not match dimension of the input model.", \ "sxprocess - generate projections",1) nvol = 10 volfts = [[] for k in xrange(nvlms)] for k in xrange(nvlms): for i in xrange(nvol): sigma = sigma_add + random() # 1.5-2.5 addon = model_gauss(sigma, boxsize, boxsize, boxsize, sigma, sigma, 38, 38, 40) scale = scale_mult * (0.5 + random()) vf, kb = prep_vol(modelvol[k] + scale * addon) volfts[k].append(vf) del vf, modelvol if parm_format == "bdb": stack_data = "bdb:" + outstk delete_bdb(stack_data) else: stack_data = outstk + ".hdf" Cs = 2.0 pixel = parm_apix voltage = 120.0 ampcont = 10.0 ibd = 4096 / 2 - boxsize iprj = 0 width = 240 xstart = 8 + boxsize / 2 ystart = 8 + boxsize / 2 rowlen = 17 from random import randint params = [] for idef in xrange(3, 8): irow = 0 icol = 0 mic = model_blank(4096, 4096) defocus = idef * 0.5 #0.2 if parm_CTF: astampl = defocus * 0.15 astangl = 50.0 ctf = generate_ctf([ defocus, Cs, voltage, pixel, ampcont, 0.0, astampl, astangl ]) for i in xrange(nangle): for k in xrange(12): dphi = 8.0 * (random() - 0.5) dtht = 8.0 * (random() - 0.5) psi = 360.0 * random() phi = angles[i][0] + dphi tht = angles[i][1] + dtht s2x = 4.0 * (random() - 0.5) s2y = 4.0 * (random() - 0.5) params.append([phi, tht, psi, s2x, s2y]) ivol = iprj % nvol #imgsrc = randint(0,nvlms-1) imgsrc = iprj % nvlms proj = prgs(volfts[imgsrc][ivol], kb, [phi, tht, psi, -s2x, -s2y]) x = xstart + irow * width y = ystart + icol * width mic += pad(proj, 4096, 4096, 1, 0.0, x - 2048, y - 2048, 0) proj = proj + model_gauss_noise(sigma_proj, nx, nx) if parm_CTF: proj = filt_ctf(proj, ctf) proj.set_attr_dict({"ctf": ctf, "ctf_applied": 0}) proj = proj + filt_gaussl( model_gauss_noise(sigma2_proj, nx, nx), sigma_gauss) proj.set_attr("origimgsrc", imgsrc) proj.set_attr("test_id", iprj) # flags describing the status of the image (1 = true, 0 = false) set_params2D(proj, [0.0, 0.0, 0.0, 0, 1.0]) set_params_proj(proj, [phi, tht, psi, s2x, s2y]) proj.write_image(stack_data, iprj) icol += 1 if icol == rowlen: icol = 0 irow += 1 iprj += 1 mic += model_gauss_noise(sigma_mic, 4096, 4096) if parm_CTF: #apply CTF mic = filt_ctf(mic, ctf) mic += filt_gaussl(model_gauss_noise(sigma2_mic, 4096, 4096), sigma_gauss_mic) mic.write_image(micpref + "%1d.hdf" % (idef - 3), 0) drop_spider_doc("params.txt", params) elif options.importctf != None: print ' IMPORTCTF ' from utilities import read_text_row, write_text_row from random import randint import subprocess grpfile = 'groupid%04d' % randint(1000, 9999) ctfpfile = 'ctfpfile%04d' % randint(1000, 9999) cterr = [options.defocuserror / 100.0, options.astigmatismerror] ctfs = read_text_row(options.importctf) for kk in xrange(len(ctfs)): root, name = os.path.split(ctfs[kk][-1]) ctfs[kk][-1] = name[:-4] if (options.input[:4] != 'bdb:'): ERROR('Sorry, only bdb files implemented', 'importctf', 1) d = options.input[4:] #try: str = d.index('*') #except: str = -1 from string import split import glob uu = os.path.split(d) uu = os.path.join(uu[0], 'EMAN2DB', uu[1] + '.bdb') flist = glob.glob(uu) for i in xrange(len(flist)): root, name = os.path.split(flist[i]) root = root[:-7] name = name[:-4] fil = 'bdb:' + os.path.join(root, name) sourcemic = EMUtil.get_all_attributes(fil, 'ptcl_source_image') nn = len(sourcemic) gctfp = [] groupid = [] for kk in xrange(nn): junk, name2 = os.path.split(sourcemic[kk]) name2 = name2[:-4] ctfp = [-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] for ll in xrange(len(ctfs)): if (name2 == ctfs[ll][-1]): # found correct if (ctfs[ll][8] / ctfs[ll][0] <= cterr[0]): # acceptable defocus error ctfp = ctfs[ll][:8] if (ctfs[ll][10] > cterr[1]): # error of astigmatism exceed the threshold, set astigmatism to zero. ctfp[6] = 0.0 ctfp[7] = 0.0 gctfp.append(ctfp) groupid.append(kk) break if (len(groupid) > 0): write_text_row(groupid, grpfile) write_text_row(gctfp, ctfpfile) cmd = "{} {} {} {}".format( 'e2bdb.py', fil, '--makevstack=bdb:' + root + 'G' + name, '--list=' + grpfile) #print cmd subprocess.call(cmd, shell=True) cmd = "{} {} {} {}".format('sxheader.py', 'bdb:' + root + 'G' + name, '--params=ctf', '--import=' + ctfpfile) #print cmd subprocess.call(cmd, shell=True) else: print ' >>> Group ', name, ' skipped.' cmd = "{} {} {}".format("rm -f", grpfile, ctfpfile) subprocess.call(cmd, shell=True) elif options.scale > 0.0: from utilities import read_text_row, write_text_row scale = options.scale nargs = len(args) if nargs != 2: print "Please provide names of input and output file!" return p = read_text_row(args[0]) for i in xrange(len(p)): p[i][3] /= scale p[i][4] /= scale write_text_row(p, args[1]) elif options.adaptive_mask: from utilities import get_im from morphology import adaptive_mask nsigma = options.nsigma ndilation = options.ndilation kernel_size = options.kernel_size gauss_standard_dev = options.gauss_standard_dev nargs = len(args) if nargs > 2: print "Too many inputs are given, try again!" return else: inputvol = get_im(args[0]) input_path, input_file_name = os.path.split(args[0]) input_file_name_root, ext = os.path.splitext(input_file_name) if nargs == 2: mask_file_name = args[1] else: mask_file_name = "adaptive_mask_for" + input_file_name_root + ".hdf" # Only hdf file is output. mask3d = adaptive_mask(inputvol, nsigma, ndilation, kernel_size, gauss_standard_dev) mask3d.write_image(mask_file_name) else: ERROR("Please provide option name", "sxprocess.py", 1)
def main(): from utilities import get_input_from_string progname = os.path.basename(sys.argv[0]) usage = ( progname + " stack output_average --radius=particle_radius --xr=xr --yr=yr --ts=ts --thld_err=thld_err --num_ali=num_ali --fl=fl --aa=aa --CTF --verbose --stables" ) parser = OptionParser(usage, version=SPARXVERSION) parser.add_option("--radius", type="int", default=-1, help=" particle radius for alignment") parser.add_option( "--xr", type="string", default="2 1", help="range for translation search in x direction, search is +/xr (default 2,1)", ) parser.add_option( "--yr", type="string", default="-1", help="range for translation search in y direction, search is +/yr (default = same as xr)", ) parser.add_option( "--ts", type="string", default="1 0.5", help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default: 1,0.5)", ) parser.add_option("--thld_err", type="float", default=0.75, help="threshld of pixel error (default = 0.75)") parser.add_option( "--num_ali", type="int", default=5, help="number of alignments performed for stability (default = 5)" ) parser.add_option("--maxit", type="int", default=30, help="number of iterations for each xr (default = 30)") parser.add_option( "--fl", type="float", default=0.3, help="cut-off frequency of hyperbolic tangent low-pass Fourier filter (default = 0.3)", ) parser.add_option( "--aa", type="float", default=0.2, help="fall-off of hyperbolic tangent low-pass Fourier filter (default = 0.2)" ) parser.add_option("--CTF", action="store_true", default=False, help="Use CTF correction during the alignment ") parser.add_option( "--verbose", action="store_true", default=False, help="print individual pixel error (default = False)" ) parser.add_option( "--stables", action="store_true", default=False, help="output the stable particles number in file (default = False)", ) parser.add_option( "--method", type="string", default=" ", help="SHC (standard method is default when flag is ommitted)" ) (options, args) = parser.parse_args() if len(args) != 1 and len(args) != 2: print "usage: " + usage print "Please run '" + progname + " -h' for detailed options" else: if global_def.CACHE_DISABLE: from utilities import disable_bdb_cache disable_bdb_cache() from applications import within_group_refinement, ali2d_ras from pixel_error import multi_align_stability from utilities import write_text_file, write_text_row global_def.BATCH = True xrng = get_input_from_string(options.xr) if options.yr == "-1": yrng = xrng else: yrng = get_input_from_string(options.yr) step = get_input_from_string(options.ts) class_data = EMData.read_images(args[0]) nx = class_data[0].get_xsize() ou = options.radius num_ali = options.num_ali if ou == -1: ou = nx / 2 - 2 from utilities import model_circle, get_params2D, set_params2D mask = model_circle(ou, nx, nx) if options.CTF: from filter import filt_ctf for im in xrange(len(class_data)): # Flip phases class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1) for im in class_data: im.set_attr("previousmax", -1.0e10) try: t = im.get_attr("xform.align2d") # if they are there, no need to set them! except: try: t = im.get_attr("xform.projection") d = t.get_params("spider") set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0]) except: set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0]) all_ali_params = [] for ii in xrange(num_ali): ali_params = [] if options.verbose: ALPHA = [] SX = [] SY = [] MIRROR = [] if xrng[0] == 0.0 and yrng[0] == 0.0: avet = ali2d_ras( class_data, randomize=True, ir=1, ou=ou, rs=1, step=1.0, dst=90.0, maxit=options.maxit, check_mirror=True, FH=options.fl, FF=options.aa, ) else: avet = within_group_refinement( class_data, mask, True, 1, ou, 1, xrng, yrng, step, 90.0, maxit=options.maxit, FH=options.fl, FF=options.aa, method=options.method, ) from utilities import info # print " avet ",info(avet) for im in class_data: alpha, sx, sy, mirror, scale = get_params2D(im) ali_params.extend([alpha, sx, sy, mirror]) if options.verbose: ALPHA.append(alpha) SX.append(sx) SY.append(sy) MIRROR.append(mirror) all_ali_params.append(ali_params) if options.verbose: write_text_file([ALPHA, SX, SY, MIRROR], "ali_params_run_%d" % ii) """ avet = class_data[0] from utilities import read_text_file all_ali_params = [] for ii in xrange(5): temp = read_text_file( "ali_params_run_%d"%ii,-1) uuu = [] for k in xrange(len(temp[0])): uuu.extend([temp[0][k],temp[1][k],temp[2][k],temp[3][k]]) all_ali_params.append(uuu) """ stable_set, mir_stab_rate, pix_err = multi_align_stability( all_ali_params, 0.0, 10000.0, options.thld_err, options.verbose, 2 * ou + 1 ) print "%4s %20s %20s %20s %30s %6.2f" % ( "", "Size of set", "Size of stable set", "Mirror stab rate", "Pixel error prior to pruning the set above threshold of", options.thld_err, ) print "Average stat: %10d %20d %20.2f %15.2f" % (len(class_data), len(stable_set), mir_stab_rate, pix_err) if len(stable_set) > 0: if options.stables: stab_mem = [[0, 0.0, 0] for j in xrange(len(stable_set))] for j in xrange(len(stable_set)): stab_mem[j] = [int(stable_set[j][1]), stable_set[j][0], j] write_text_row(stab_mem, "stable_particles.txt") stable_set_id = [] particle_pixerr = [] for s in stable_set: stable_set_id.append(s[1]) particle_pixerr.append(s[0]) from fundamentals import rot_shift2D avet.to_zero() l = -1 print "average parameters: angle, x-shift, y-shift, mirror" for j in stable_set_id: l += 1 print " %4d %4d %12.2f %12.2f %12.2f %1d" % ( l, j, stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], int(stable_set[l][2][3]), ) avet += rot_shift2D( class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3] ) avet /= l + 1 avet.set_attr("members", stable_set_id) avet.set_attr("pix_err", pix_err) avet.set_attr("pixerr", particle_pixerr) avet.write_image(args[1]) global_def.BATCH = False
def main(): from EMAN2 import EMData from utilities import write_text_file from mpi import mpi_init, mpi_finalize, MPI_COMM_WORLD, mpi_comm_rank, mpi_comm_size, mpi_comm_split, mpi_barrier from logger import Logger, BaseLogger_Files from air import air import sys import os import user_functions from optparse import OptionParser from global_def import SPARXVERSION progname = os.path.basename(sys.argv[0]) usage = progname + " projections minimal_subset_size target_threshold output_directory --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range --ts=translational_search_step --delta=angular_step --an=angular_neighborhood --center=center_type --maxit=max_iter --CTF --snr=SNR --ref_a=S --sym=c1 --function=user_function --MPI" parser = OptionParser(usage, version=SPARXVERSION) parser.add_option( "--ir", type="int", default=1, help="inner radius for rotational correlation > 0 (set to 1)") parser.add_option( "--ou", type="int", default=-1, help= "outer radius for rotational correlation < int(nx/2)-1 (set to the radius of the particle)" ) parser.add_option( "--rs", type="int", default=1, help="step between rings in rotational correlation >0 (set to 1)") parser.add_option( "--xr", type="string", default="0", help="range for translation search in x direction, search is +/xr") parser.add_option( "--yr", type="string", default="-1", help= "range for translation search in y direction, search is +/yr (default = same as xr)" ) parser.add_option( "--ts", type="string", default="1", help= "step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional" ) parser.add_option("--delta", type="string", default="2", help="angular step of reference projections") parser.add_option( "--an", type="string", default="-1", help="angular neighborhood for local searches (phi and theta)") parser.add_option( "--center", type="float", default=-1, help= "-1: average shift method; 0: no centering; 1: center of gravity (default=-1)" ) parser.add_option( "--maxit", type="float", default=50, help= "maximum number of iterations performed for each angular step (set to 50) " ) parser.add_option("--CTF", action="store_true", default=False, help="Consider CTF correction during the alignment ") parser.add_option("--snr", type="float", default=1.0, help="Signal-to-Noise Ratio of the data") parser.add_option( "--ref_a", type="string", default="S", help= "method for generating the quasi-uniformly distributed projection directions (default S)" ) parser.add_option("--sym", type="string", default="c1", help="symmetry of the refined structure") parser.add_option( "--function", type="string", default="ref_ali3d", help="name of the reference preparation function (ref_ali3d by default)" ) parser.add_option("--npad", type="int", default=2, help="padding size for 3D reconstruction (default=2)") parser.add_option( "--MPI", action="store_true", default=True, help="whether to use MPI version - this is always set to True") parser.add_option( "--proc_mshc", type="int", default=3, help="number of MPI processes per multiSHC, 3 is minimum (default=3)") (options, args) = parser.parse_args(sys.argv[1:]) if len(args) < 4: print "usage: " + usage print "Please run '" + progname + " -h' for detailed options" return 1 mpi_init(0, []) mpi_size = mpi_comm_size(MPI_COMM_WORLD) mpi_rank = mpi_comm_rank(MPI_COMM_WORLD) proc_per_mshc = int(options.proc_mshc) if mpi_size < proc_per_mshc: print "Number of processes can't be smaller than value given as the parameter --proc_mshc" mpi_finalize() return log = Logger(BaseLogger_Files()) projs = EMData.read_images(args[0]) minimal_subset_size = int(args[1]) target_threshold = float(args[2]) outdir = args[3] if mpi_rank == 0: if os.path.exists(outdir): ERROR( 'Output directory exists, please change the name and restart the program', "sxmulti_shc", 1) mpi_finalize() return os.mkdir(outdir) import global_def global_def.LOGFILE = os.path.join(outdir, global_def.LOGFILE) mpi_barrier(MPI_COMM_WORLD) if outdir[-1] != "/": outdir += "/" log.prefix = outdir me = wrap_mpi_split(MPI_COMM_WORLD, mpi_size / proc_per_mshc) options.user_func = user_functions.factory[options.function] new_subset, new_threshold = air(projs, minimal_subset_size, target_threshold, options, number_of_runs=6, number_of_winners=3, mpi_env=me, log=log) if mpi_rank == 0: log.add("Output threshold =", new_threshold) log.add("Output subset: ", len(new_subset), new_subset) write_text_file(new_subset, log.prefix + "final_subset.txt") mpi_finalize()