Esempio n. 1
0
def measure_for_outlier_criterion(criterion_name, masterdir, rviper_iter, list_of_viper_run_indices):

	# main_iterations = [NAME_OF_MAIN_DIR + "%03d" % i for i in range(1, rviper_iter + 1)]
	main_iterations = [NAME_OF_MAIN_DIR + "%03d" % i for i in range(rviper_iter, rviper_iter + 1)]
	mainoutputdir = masterdir + DIR_DELIM + main_iterations[0] + DIR_DELIM

	p = []
	for i1 in list_of_viper_run_indices:
		p.append(read_text_row(mainoutputdir + NAME_OF_RUN_DIR + "%03d" % (i1) + DIR_DELIM + "params.txt"))
	subset, avg_diff_per_image, outp = find_common_subset(p, 0)

	avg_diff_per_image.sort()
	x1 = len(avg_diff_per_image)
	y1 = avg_diff_per_image[-1]
	
	if y1 <= ANGLE_ERROR_THRESHOLD:
		return TRIPLET_WITH_ANGLE_ERROR_LESS_THAN_THRESHOLD_HAS_BEEN_FOUND

	if criterion_name == "80th percentile":
		return avg_diff_per_image[int(x1*PERCENT_THRESHOLD_X)]/y1
	elif criterion_name == "fastest increase in the last quartile":
		for k in range(5,6):
			avg_diff_per_image_diff = [x - avg_diff_per_image[i - k] for i, x in enumerate(avg_diff_per_image)][k:]
			
			avg_diff_per_image_diff_max = max(avg_diff_per_image_diff)
			avg_diff_per_image_diff_max_normalized = max(avg_diff_per_image_diff)/y1
			
			if avg_diff_per_image_diff.index(avg_diff_per_image_diff_max) >= int(x1*0.75):
				return avg_diff_per_image_diff_max_normalized
			return 0.0
	else:
		print "Error, no criterion name is specified!"
		mpi_finalize()
		sys.exit()
def f1():

    myid = mpi_comm_rank(MPI_COMM_WORLD)
    program_state_stack(locals(), getframeinfo(currentframe()), "my_state.json")

    for local_var_i in range(2):
        for local_var_j in range(2):
            if program_state_stack(locals(), getframeinfo(currentframe())):
                my_s = "%s_f1_%d_%d_%03d_%03d.txt" % (
                    mydir,
                    local_var_i,
                    local_var_j,
                    myid,
                    getframeinfo(currentframe()).lineno,
                )
                f = open(my_s, "w")
                f.write(my_s[5:])
                f.flush()
                f.close()
            if program_state_stack(locals(), getframeinfo(currentframe())):
                my_s = "%s_f1_%d_%d_%03d_%03d.txt" % (
                    mydir,
                    local_var_i,
                    local_var_j,
                    myid,
                    getframeinfo(currentframe()).lineno,
                )
                f = open(my_s, "w")
                f.write(my_s[5:])
                f.flush()
                f.close()
            if program_state_stack(locals(), getframeinfo(currentframe())):
                my_s = "%s_f1_%d_%d_%03d_%03d.txt" % (
                    mydir,
                    local_var_i,
                    local_var_j,
                    myid,
                    getframeinfo(currentframe()).lineno,
                )
                f = open(my_s, "w")
                f.write(my_s[5:])
                f.flush()
                f.close()
                a(local_var_i, local_var_j)
            if program_state_stack(locals(), getframeinfo(currentframe())):
                my_s = "%s_f1_%d_%d_%03d_%03d.txt" % (
                    mydir,
                    local_var_i,
                    local_var_j,
                    myid,
                    getframeinfo(currentframe()).lineno,
                )
                f = open(my_s, "w")
                f.write(my_s[5:])
                f.flush()
                f.close()

    program_state_stack(locals(), getframeinfo(currentframe()), last_call="LastCall")

    mpi_finalize()
Esempio n. 3
0
def get_already_processed_viper_runs(run_get_already_processed_viper_runs):

	import random

	if run_get_already_processed_viper_runs:
		location_location = "/Users/hvoicu/Analysis/rrviper/particle__PIC_ISAC_g1_clean/0001__sim_r_viper_pool_001/"
		location_location = "/Users/hvoicu/Analysis/rrviper/particle__sp_MED_isac_clean_v1/0001__sim_r_viper_pool_001/"
		
		if "counter" not in get_already_processed_viper_runs.__dict__:
			# function needs to be called once before being used !
			get_already_processed_viper_runs.counter = -2
	
			path, dirs, files = os.walk(location_location).next()
			# dirs = filter(lambda x:'run' in x, dirs)
			import re
			dirs = filter(lambda x:re.search('run\d\d\d$', x), dirs)
			get_already_processed_viper_runs.r_permutation = range(len(dirs))
			random.shuffle(get_already_processed_viper_runs.r_permutation)
			print str(get_already_processed_viper_runs.r_permutation)
		get_already_processed_viper_runs.counter += 1
		print "get_already_processed_viper_runs.counter: " + str(get_already_processed_viper_runs.counter)
		# if get_already_processed_viper_runs.counter > 9:
		if get_already_processed_viper_runs.counter > (MAXIMUM_NO_OF_VIPER_RUNS_ANALYZED_TOGETHER - 1):
			print "get_already_processed_viper_runs.counter > 9"
			mpi_finalize()
			sys.exit()
	
		return location_location + NAME_OF_RUN_DIR + "%03d"%get_already_processed_viper_runs.r_permutation[get_already_processed_viper_runs.counter]
	else:
		get_already_processed_viper_runs.r_permutation = [0]*20
Esempio n. 4
0
def main():
	
	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack outdir <maskfile> --K=2 --nb_part=5  --th_nobj=10 --rand_seed=10 --opt_method=SSE --maxit=1000 --normalize --CTF  --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--K",              type="int",          default=2,         help="Number of classes for K-means (default 2)")
	parser.add_option("--nb_part",        type="int",          default=5,         help="Number of partitions used to calculate the stability (default 5)")
	#parser.add_option("--F",              type="float",        default=0.0,       help="Cooling factor in simulated annealing, <1.0")
	#parser.add_option("--T0",             type="float",        default=0.0,       help="Simulated annealing first temperature")
	parser.add_option("--th_nobj",        type="int",          default=1,         help="Cleanning threshold, classes with number of images < th_nobj are removed (default 1)")
	parser.add_option("--rand_seed",      type="int",          default=0,         help="Random seed")
	#parser.add_option("--opt_method",     type='string',       default='SSE',     help="K-means method: SSE (default), cla")
	#parser.add_option("--match",          type='string',       default='bbenum',     help='Algorithm to match partitions: pwa, pair-wise agreement (default), or hh, hierarchical Hungarian algorithm, or bbenum')
	parser.add_option("--maxit",          type="int",          default=1e9,       help="Maximum number of iterations for k-means")
	parser.add_option("--normalize",      action="store_true", default=False,     help="Normalize images under the mask")
	parser.add_option("--CTF",            action="store_true", default=False,     help="Perform classification using CTF information")
	#parser.add_option("--CUDA",           action="store_true", default=False,     help="CUDA version")
	parser.add_option("--MPI",            action="store_true", default=False,     help="Use MPI version ")	
	(options, args) = parser.parse_args()
    	if len(args) < 2 or len(args) > 3:
				print "usage: " + usage
        			print "Please run '" + progname + " -h' for detailed options"
	else:
		if len(args) == 2: mask = None
		else:              mask = args[2]

		if options.K < 2:
			sys.stderr.write('ERROR: K must be > 1 group\n\n')
			sys.exit()

		if options.nb_part < 2:
			sys.stderr.write('ERROR: nb_part must be > 1 partition\n\n')
			sys.exit()
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()

		global_def.BATCH = True
		if options.MPI:
			from mpi import mpi_init
			sys.argv = mpi_init(len(sys.argv), sys.argv)
			'''if options.CUDA:
				from  development import  k_means_stab_MPICUDA_stream_YANG
				k_means_stab_MPICUDA_stream_YANG(args[0], args[1], mask, options.K, options.nb_part, options.F, options.T0, options.th_nobj, options.rand_seed, options.maxit)
			else:'''
			from  statistics import  k_means_stab_MPI_stream
			k_means_stab_MPI_stream(args[0], args[1], mask, options.K, options.nb_part, 0.0, 0.0, options.th_nobj, options.rand_seed, "SSE", options.CTF, options.maxit)
		else:
			'''if options.CUDA:
				from  development  import  k_means_stab_CUDA_stream
				k_means_stab_CUDA_stream(args[0], args[1], mask, options.K, options.nb_part, options.F, options.T0, options.th_nobj, options.rand_seed, options.maxit)
			else:'''
			
			from  statistics  import  k_means_stab_stream
			k_means_stab_stream(args[0], args[1], mask, options.K, options.nb_part, 0.0, 0.0, options.th_nobj, options.rand_seed, "SSE", options.CTF, options.maxit)
		global_def.BATCH = False

		if options.MPI:
			from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 5
0
def main():
	arglist = []
	for arg in sys.argv:
		arglist.append( arg )
	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack outdir <maskfile> --ou=outer_radius --delta=angular_bracket --maxit=max_iter --chunk=data_chunk_for_update --center --CTF --snr=SNR --sym=symmetry  --function=user_function --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--ou",       type="float",        default=-1,      help="outer radius of a circular mask that should encompass the particle< int(nx/2)-1 (set to int(nx/2)-1)")
	parser.add_option("--delta",    type="float",        default=2,       help="angular bracket (set to 2)")
	parser.add_option("--ts",       type="float",        default=2,       help="shift bracket (set to 2)")
	parser.add_option("--center",   type="float",        default=0,       help="-1 - average centering method; 0 - no cetnering of template volume (default), 1 - center the volume using center of gravity")
	parser.add_option("--maxit",    type="int",          default=10,      help="maximum number of iterations (set to 10)")
	parser.add_option("--chunk",    type="float",        default=1.0,     help="chunk of data after which the 3-D structure will be updated 0<chunk<=1.0 (set to 1.0)")
	parser.add_option("--CTF",      action="store_true", default=False,   help="Consider CTF correction during the alignments")
	parser.add_option("--snr",      type="float", 	     default=1,       help="SNR > 0.0 (set to 1.0)")
	parser.add_option("--sym",      type="string",       default="c1",    help="symmetry group (set to c1)")
	parser.add_option("--function", type="string",       default="ref_ali3d", help="name of the user-supplied reference preparation function")
	parser.add_option("--npad",     type="int",          default= 2,      help="padding size for 3D reconstruction")
	parser.add_option("--debug",    action="store_true", default=False,   help="Debug printout")
	parser.add_option("--MPI",      action="store_true", default=False,   help="use MPI version")
	parser.add_option("--fourvar",  action="store_true", default=False,   help="compute Fourier variance")
	(options, args) = parser.parse_args(arglist[1:])
	if(len(args) < 2 or len(args) > 3):
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
	else:
	
		if(len(args) == 2):
			mask = None
		else:
			mask = args[2]

		if options.MPI:
			from mpi import mpi_init
			sys.argv = mpi_init(len(sys.argv), sys.argv)

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()

		
		global_def.BATCH = True
		if options.fourvar:
			from development import nlocal_ali3d_MPI
			nlocal_ali3d_MPI(args[0], args[1], mask, options.ou, options.delta, options.ts, options.center, options.maxit,
			options.CTF, options.snr, options.sym, options.chunk, options.function, options.fourvar,
			options.npad, options.debug)
		else:
			from applications import local_ali3d
			local_ali3d(args[0], args[1], mask, options.ou, options.delta, options.ts, options.center, options.maxit,
			options.CTF, options.snr, options.sym, options.chunk, options.function, options.fourvar,
			options.npad, options.debug, options.MPI)
		global_def.BATCH = False

		if options.MPI:
			from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 6
0
def main():
	from   optparse       import OptionParser
	progname = os.path.basename(sys.argv[0])
	usage = progname + " filelist outdir  --fl=filter_low_value --aa=filter_fall_off --radccc=radius_ccc  -repair=repairfile --pca --pcamask --pcanvec --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--fl",             type="float",        default=0.0,       help="cut-off frequency of hyperbolic tangent low-pass Fourier filter")
	parser.add_option("--aa",             type="float",        default=0.0,       help="fall-off of hyperbolic tangent low-pass Fourier filter")
	parser.add_option("--radccc",         type="int",          default=-1,        help="radius for ccc calculation")
	parser.add_option("--MPI",            action="store_true", default=False,     help="use MPI version" )
	parser.add_option("--repair",         type="string",       default="default", help="repair original bootstrap volumes: None or repair file name")
	parser.add_option("--pca",            action="store_true", default=False,     help="run pca" )
	parser.add_option("--pcamask",        type="string",       default=None,      help="mask for pca" )
	parser.add_option("--pcanvec",        type="int",          default=2,         help="number of eigvectors computed in PCA")
	parser.add_option("--n",              action="store_true", default=False,     help="new")
	parser.add_option("--scratch",        type="string",       default="./",      help="scratch directory")
	(options, args) = parser.parse_args(sys.argv[1:])

	if len(args)<2 :
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
	else:
		files = args[0:-1]
		outdir = args[-1]

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		if options.MPI:
			from mpi import mpi_init
			sys.argv = mpi_init( len(sys.argv), sys.argv )


			arglist = []
			for arg in sys.argv:
				arglist.append( arg )

			global_def.BATCH = True
			
			if(options.n):
				from development import var_mpi_new
				var_mpi_new( files[0], outdir, options.scratch, options.fl, options.aa, options.radccc, False, False, options.repair, options.pca, options.pcamask, options.pcanvec)
			else:
				from applications import var_mpi
				var_mpi( files, outdir, options.fl, options.aa, options.radccc, options.repair, options.pca, options.pcamask, options.pcanvec)

			global_def.BATCH = False
			from mpi import mpi_finalize
			mpi_finalize()
		else:
			global_def.BATCH = True
			ERROR("Please use MPI version","sxvar",1)
			from applications import defvar
			defvar(  files, outdir, options.fl, options.aa, options.radccc, options.repair, options.pca, options.pcamask, options.pcanvec)
			global_def.BATCH = False
Esempio n. 7
0
def main():
        arglist = []
        for arg in sys.argv:
        	arglist.append( arg )
	progname = os.path.basename(sys.argv[0])
	usage = progname + " data_stack reference_stack outdir <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translation_step --center=center_type --maxit=max_iteration --CTF --snr=SNR --function=user_function_name --rand_seed=random_seed --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--ir", type="float", default=1, help="  inner radius for rotational correlation > 0 (set to 1)")
	parser.add_option("--ou", type="float", default=-1, help="  outer radius for rotational correlation < nx/2-1 (set to the radius of the particle)")
	parser.add_option("--rs", type="float", default=1, help="  step between rings in rotational correlation > 0 (set to 1)" )
	parser.add_option("--xr", type="float", default=0, help="  range for translation search in x direction, search is +/-xr ")
	parser.add_option("--yr", type="float", default=0, help="  range for translation search in y direction, search is +/-yr ")
	parser.add_option("--ts", type="float", default=1, help="  step of translation search in both directions")
	parser.add_option("--center", type="float", default=1, help="  0 - if you do not want the average to be centered, 1 - center the average (default=1)")
	parser.add_option("--maxit", type="float", default=10, help="  maximum number of iterations (set to 10) ")
	parser.add_option("--CTF", action="store_true", default=False, help=" Consider CTF correction during multiple reference alignment")
	parser.add_option("--snr", type="float",  default= 1.0, help="  signal-to-noise ratio of the data (set to 1.0)")
	parser.add_option("--function", type="string", default="ref_ali2d", help="  name of the reference preparation function")
	parser.add_option("--rand_seed", type="int", default=1000, help=" random seed of initial (set to 1000)" )
	parser.add_option("--MPI", action="store_true", default=False,     help="  whether to use MPI version ")
	parser.add_option("--EQ", action="store_true", default=False,     help="  equal version ")
	(options, args) = parser.parse_args(arglist[1:])
	if len(args) < 3 or len(args) > 4:
    		print "usage: " + usage
    		print "Please run '" + progname + " -h' for detailed options"
	else:
	
		if len(args) == 3:
			mask = None
		else:
			mask = args[3]

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		
	        if options.MPI:
			from mpi import mpi_init
   			sys.argv = mpi_init(len(sys.argv), sys.argv)

		global_def.BATCH = True
		if options.EQ:
			from development import mrefeq_ali2df
			#print  "  calling MPI",options.MPI,options.function,options.rand_seed
			#print  args
			mrefeq_ali2df(args[0], args[1], mask, options.ir, options.ou, options.rs, options.xr, options.yr, options.ts, options.center, options.maxit, options.CTF, options.snr, options.function, options.rand_seed, options.MPI)
		else:
			from applications import mref_ali2d
			mref_ali2d(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr, options.yr, options.ts, options.center, options.maxit, options.CTF, options.snr, options.function, options.rand_seed, options.MPI)
		global_def.BATCH = False
                if options.MPI:
		        from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 8
0
def main():

	import sys

        arglist = []
        for arg in sys.argv:
	    arglist.append( arg )

	progname = os.path.basename(arglist[0])
	usage = progname + " prjstack outdir bufprefix --delta --d --nvol --nbufvol --seedbase --snr --npad --CTF --MPI --verbose"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--nvol",     type="int",                         help="number of resample volumes to be generated")
	parser.add_option("--nbufvol",  type="int",          default=1,     help="number of fftvols in the memory")
	parser.add_option("--delta",    type="float",        default=10.0,  help="angular step for cones")
	parser.add_option("--d",        type="float",        default=0.1,   help="fraction of projections to leave out")
	parser.add_option("--CTF",      action="store_true", default=False, help="use CTF")
	parser.add_option("--snr",      type="float",        default=1.0,   help="Signal-to-Noise Ratio")
	parser.add_option("--npad",     type="int",          default=2,     help="times of padding")
	parser.add_option("--seedbase", type="int",          default=-1,    help="random seed base")
	parser.add_option("--MPI",      action="store_true", default=False, help="use MPI")
	parser.add_option("--verbose",  type="int",          default=0,     help="verbose level: 0 no, 1 yes")

	(options, args) = parser.parse_args( arglist[1:] )

	if( len(args) !=1 and len(args) != 3):
		print "usage: " + usage
		return None

	prjfile = args[0]

	if options.MPI:
		from mpi import mpi_barrier, mpi_comm_rank, mpi_comm_size, mpi_comm_split, MPI_COMM_WORLD
                from mpi import mpi_init
                sys.argv = mpi_init( len(sys.argv), sys.argv )
		myid = mpi_comm_rank( MPI_COMM_WORLD )
		ncpu = mpi_comm_size( MPI_COMM_WORLD )
	else:
		myid = 0
		ncpu = 1

	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()

	outdir = args[1]
	bufprefix = args[2]
	resample( prjfile, outdir, bufprefix, options.nbufvol, options.nvol, options.seedbase,\
	           options.delta, options.d, options.snr, options.CTF, options.npad,\
		   options.MPI, myid, ncpu, options.verbose )
	if options.MPI:
		from mpi import mpi_finalize
		mpi_finalize()
Esempio n. 9
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack outdir <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range --ts=translation_step --dst=delta --center=center --maxit=max_iteration --CTF --snr=SNR --Fourvar=Fourier_variance --Ng=group_number --Function=user_function_name --CUDA --GPUID --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--ir",       type="float",  default=1,             help="inner radius for rotational correlation > 0 (set to 1)")
	parser.add_option("--ou",       type="float",  default=-1,            help="outer radius for rotational correlation < nx/2-1 (set to the radius of the particle)")
	parser.add_option("--rs",       type="float",  default=1,             help="step between rings in rotational correlation > 0 (set to 1)" ) 
	parser.add_option("--xr",       type="string", default="4 2 1 1",     help="range for translation search in x direction, search is +/xr ")
	parser.add_option("--yr",       type="string", default="-1",          help="range for translation search in y direction, search is +/yr ")
	parser.add_option("--ts",       type="string", default="2 1 0.5 0.25",help="step of translation search in both directions")
	parser.add_option("--dst",      type="float",  default=0.0,           help="delta")
	parser.add_option("--center",   type="float",  default=-1,            help="-1.average center method; 0.not centered; 1.phase approximation; 2.cc with Gaussian function; 3.cc with donut-shaped image 4.cc with user-defined reference 5.cc with self-rotated average")
	parser.add_option("--maxit",    type="float",  default=0,             help="maximum number of iterations (0 means the maximum iterations is 10, but it will automatically stop should the criterion falls")
	parser.add_option("--CTF",      action="store_true", default=False,   help="use CTF correction during alignment ")
	parser.add_option("--snr",      type="float",  default=1.0,           help="signal-to-noise ratio of the data (set to 1.0)")
	parser.add_option("--Fourvar",  action="store_true", default=False,   help="compute Fourier variance")
	parser.add_option("--Ng",       type="int",    default=-1,            help="number of groups in the new CTF filteration")
	parser.add_option("--num_ali",  type="int",    default=3,             help="number of independent alignments to do")
	parser.add_option("--function", type="string", default="ref_ali2d",   help="name of the reference preparation function (default ref_ali2d)")
	parser.add_option("--CUDA",     action="store_true", default=False,   help="use CUDA program")
	parser.add_option("--GPUID",    type="string",    default="",         help="ID of GPUs available")
	parser.add_option("--MPI",      action="store_true", default=False,   help="use MPI version ")
	(options, args) = parser.parse_args()
	if len(args) < 2 or len(args) > 3:
    		print "usage: " + usage
    		print "Please run '" + progname + " -h' for detailed options"
	else:
		if args[1] == 'None': outdir = None
		else:		      outdir = args[1]

		if len(args) == 2: mask = None
		else:              mask = args[2]
		
		from development import multi_ali2d

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		
		if options.MPI:
			from mpi import mpi_init
			sys.argv = mpi_init(len(sys.argv),sys.argv)

		global_def.BATCH = True
		multi_ali2d(args[0], outdir, mask, options.ir, options.ou, options.rs, options.xr, options.yr, options.ts, options.dst, options.center, \
			options.maxit, options.CTF, options.snr, options.Fourvar, options.Ng, options.num_ali, options.function, options.CUDA, options.GPUID, options.MPI)
		global_def.BATCH = False

		if options.MPI:
			from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 10
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = progname + " out_averages outdir --ou=outer_radius --xr=x_range --ts=translation_step --maxit=max_iteration --CTF --snr=SNR --function=user_function_name --Fourvar --ali=kind_of_alignment --center=center_type"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--ou",       type="int",        default=-1,             help="outer radius for rotational correlation < nx/2-1 (set to the radius of the particle)")
	parser.add_option("--xr",       type="string",       default="4 2",      help="range for translation search in x direction, search is +/xr ")
	parser.add_option("--ts",       type="string",       default="2 1", help="step of translation search in both directions")
	parser.add_option("--maxit",    type="float",        default=0,              help="maximum number of iterations (0 means the maximum iterations is 10, but it will automatically stop should the criterion falls")
	parser.add_option("--CTF",      action="store_true", default=False,          help="Consider CTF correction during the alignment ")
	parser.add_option("--snr",      type="float",        default=1.0,            help="signal-to-noise ratio of the data (set to 1.0)")
	parser.add_option("--Fourvar",  action="store_true", default=False,          help="compute Fourier variance")
	parser.add_option("--function", type="string",       default="ref_ali2d",    help="name of the reference preparation function")
	parser.add_option('--Ng',	type='int',		default=-1,		help='Ng')
	parser.add_option('--num_ali',	type='int',		default=2,		help='number of alignments')
	parser.add_option('--err_th',	type='float',		default=1.0,		help='')
	parser.add_option('--th_mir',	type='float',		default=0.5,		help='')
	parser.add_option('--th_err',	type='float',		default=1.0,		help='')
	parser.add_option('--K',        type='int',            default=100,             help='number of clusters')
	parser.add_option('--dst',	type='float',		default=0.0,		help='')
	parser.add_option("--center",   type="float",  default=-1,            help="-1.average center method; 0.not centered; 1.phase approximation; 2.cc with Gaussian function; 3.cc with donut-shaped image 4.cc with user-defined reference 5.cc with self-rotated average")
	parser.add_option("--CUDA",     action="store_true", default=False,          help="whether to use CUDA ")
	parser.add_option("--GPUID",      type="string",        default="",            help="the IDs of GPU to use")
	parser.add_option('--MPI',      action='store_true',   default=False,          help='MPI')
	parser.add_option('--old',      action='store_true',   default=False,          help='old')

	(options, args) = parser.parse_args()
	if options.old == False and len(args) != 3 or options.old and len(args) != 4:
    		print "usage: " + usage
    		print "Please run '" + progname + " -h' for detailed options"
	else:
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()

		if options.MPI:
			from mpi import mpi_init
			sys.argv = mpi_init(len(sys.argv),sys.argv)

		global_def.BATCH = True
		if options.old:
			from development import realid
			realid(args[0], args[1], args[2], args[3], options.ou, options.xr, options.ts, options.maxit, options.function, options.snr, options.CTF, options.Fourvar, options.Ng, options.num_ali, options.th_mir, options.th_err, options.dst, options.center, options.CUDA, options.GPUID, options.MPI)
		else:
			from development import realignment
			realignment(args[0], args[1], args[2], options.ou, options.xr, options.ts, options.maxit, options.function, options.snr, options.CTF, options.Fourvar, options.Ng, options.num_ali, options.err_th, options.K, options.dst, options.center, options.CUDA, options.GPUID, options.MPI)
		global_def.BATCH = False
		
		if options.MPI:
			from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 11
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = progname + " configure_file.cfg"
	
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--ir", type="float", default=1, help="  inner radius for rotational correlation (set to 1)")
	parser.add_option("--ou", type="float", default=-1, help="  outer radius for rotational correlation (set to the radius of the particle)")
	parser.add_option("--rs", type="float", default=1, help="  step between rings in rotational correlation (set to 1)" )
	parser.add_option("--xr", type="float", default=0, help="  range for translation search in x direction, search is +/-xr ")
	parser.add_option("--yr", type="float", default=-1, help="  range for translation search in y direction, search is +/-yr ")
	parser.add_option("--ts", type="float", default=1, help="  step of translation search in both directions")
	parser.add_option("--CTF", action="store_true", default=False, help=" Consider CTF correction during multiple reference assignment")
	parser.add_option("--CUDA", action="store_true", default=False, help=" whether to use CUDA")
	parser.add_option("--GPUID", type="string", default="0 1 2 3",  help=" the IDs of GPU to use")
	parser.add_option("--SA",   action="store_true", default=False,  help=" whether to use simulated annealing")
	parser.add_option("--T",   type="float",  default=0.001,  help=" the temperature of simulated annealing")
	parser.add_option("--F",   type="float",  default=0.995,  help=" the temperature cooling rate")
	parser.add_option("--heads_up",   action="store_true", default=False,  help=" whether to give a heads up")
	parser.add_option("--MPI", action="store_true", default=False, help="  whether to use MPI version ")

	(options, args) = parser.parse_args()
	if len(args) < 3 or len(args) > 4:
    		print "usage: " + usage
    		print "Please run '" + progname + " -h' for detailed options"
		sys.exit()
	
	if len(args) == 4:	mask = args[3]
	else:	mask = None

	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()

	if options.MPI:
		from mpi import mpi_init
		sys.argv = mpi_init(len(sys.argv),sys.argv)		
		
	from development import multi_assign
	global_def.BATCH = True
	multi_assign(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr, options.yr, options.ts,  
			options.CTF, options.CUDA, options.GPUID, options.SA, options.T, options.F, options.heads_up, options.MPI)
	global_def.BATCH = False

	if options.MPI:
		from mpi import mpi_finalize
		mpi_finalize()
Esempio n. 12
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack <maskfile> --search_rng=10 --maxit=max_iteration --CTF --snr=SNR --Fourvar=Fourier_variance --oneDx --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--search_rng",       type="int",           default=-1,      help="Search range for x-shift")
	parser.add_option("--search_ang",       type="int",           default=-1,      help="Search range for inplane rotation angle")
	parser.add_option("--search_rng_y",     type="int",           default=-1,      help="Search range for x-shift. Not used for 1D search (oneDx flag set).")
	parser.add_option("--maxit",            type="int",           default=100,     help="Maximum number of iterations program will perform")
	parser.add_option("--CTF",              action="store_true",  default=False,   help="Use CTF correction")
	parser.add_option("--snr",              type="float",         default=1.0,     help="signal-to-noise ratio of the data (default is 1.0)")
	parser.add_option("--Fourvar",          action="store_true",  default=False,   help="compute Fourier variance")
	parser.add_option("--oneDx",            action="store_true",  default=False,   help="1D search along x-axis")
	parser.add_option("--MPI",              action="store_true",  default=False,   help="use MPI")
	parser.add_option("--curvature",        action="store_true",  default=False,   help="for curved filament alignment")
	(options, args) = parser.parse_args()
	
	if not(options.MPI):
		print "Only MPI version is currently implemented."
		print "Please run '" + progname + " -h' for detailed options"
		return
			
	if len(args) < 1 or len(args) > 2:
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
	else:
	
		if len(args) == 1: mask = None
		else:              mask = args[1]
			
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		
		from mpi import mpi_init
		sys.argv = mpi_init(len(sys.argv),sys.argv)

		global_def.BATCH = True
		if options.oneDx:
			helicalshiftali_MPI(args[0], mask, options.maxit, options.CTF, options.snr, options.Fourvar, options.search_rng)		
		else:
			shiftali_MPI(args[0], mask, options.maxit, options.CTF, options.snr, options.Fourvar,options.search_rng,options.oneDx,options.search_rng_y)
		global_def.BATCH = False
		
		from mpi import mpi_finalize
		mpi_finalize()
Esempio n. 13
0
def main():
	arglist = []
	for arg in sys.argv:
		arglist.append( arg )
	progname = os.path.basename(arglist[0])
	usage = progname + " stack ref_vol outdir --dp=rise --dphi=rotation --apix=pixel_size --phistep=phi_step --zstep=z_step --fract=helicising_fraction --rmax=maximum_radius --rmin=min_radius --CTF --sym=c1 --function=user_function --maxit=max_iter --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--dp",       type="float",        default= 1.0,                help="delta z - translation in Angstroms")   
	parser.add_option("--dphi",     type="float",        default= 1.0,                help="delta phi - rotation in degrees")  
	parser.add_option("--apix",     type="float",        default= 1.84,               help="pixel size in Angstroms")
	parser.add_option("--rmin",     type="int",          default= 0,                  help="minimal radial extent of structure")   
	parser.add_option("--rmax",     type="int",          default= 70,                 help="maximal radial extent of structure")
	parser.add_option("--fract",    type="float",        default= 0.66,               help="fraction of the volume used for helical search")
	parser.add_option("--sym",      type="string",       default="c1",                help="symmetry of the structure")
	parser.add_option("--function", type="string",       default="helical",  	      help="name of the reference preparation function")
	parser.add_option("--zstep",    type="int",          default= 1,                  help="Step size for translational search along z")   
	parser.add_option("--CTF",      action="store_true", default=False,               help="CTF correction")
	parser.add_option("--maxit",    type="int",          default=5,                   help="maximum number of iterations performed")
	parser.add_option("--MPI",      action="store_true", default=False,               help="use MPI version")
	(options, args) = parser.parse_args(arglist[1:])
	if len(args) != 3:
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
	else:
		if options.MPI:
			from mpi import mpi_init, mpi_finalize
			sys.argv = mpi_init(len(sys.argv), sys.argv)
		else:
			print "There is only MPI version of sxfilrecons3d.py. See SPARX wiki page for downloading MyMPI details."
			sys.exit()
			
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()

		from development import filrecons3D_MPI
		global_def.BATCH = True
		filrecons3D_MPI(args[0], args[1], args[2], options.dp, options.dphi, options.apix, options.function, options.zstep, options.fract, options.rmax, options.rmin,
		                options.CTF, options.maxit, options.sym)
		
		global_def.BATCH = False

		if options.MPI:  mpi_finalize()
Esempio n. 14
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = progname + "  input_stack output_stack --subavg=average_image --rad=mask_radius --nvec=number_of_eigenvectors --incore --mask=maskfile --shuffle --usebuf --MPI"
	parser = OptionParser(usage, version=SPARXVERSION)
	parser.add_option("--subavg",  type="string",       default="",    help="subtract average")
	parser.add_option("--rad",     type="int",          default=-1,    help="radius of mask")
	parser.add_option("--nvec",    type="int",          default=1,     help="number of eigenvectors")
	parser.add_option("--mask",    type="string",       default="",    help="mask file" )
	parser.add_option("--genbuf",  action="store_true", default=False, help="use existing buffer")
	parser.add_option("--shuffle", action="store_true", default=False, help="use shuffle")
	parser.add_option("--incore",  action="store_true", default=False, help="no buffer on a disk" )
	parser.add_option("--MPI",     action="store_true", default=False, help="run mpi version" )

	(options, args) = parser.parse_args()

	input_stacks = args[0:-1]
	output_stack = args[-1]

	if options.nvec is None:
		print "Error: number of components is not given"
		sys.exit(-2) 

	isRoot = True
	if options.MPI:
		from mpi import mpi_init, mpi_comm_rank, MPI_COMM_WORLD
		sys.argv = mpi_init( len(sys.argv), sys.argv )
		isRoot = (mpi_comm_rank(MPI_COMM_WORLD) == 0)
		
	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	from applications import pca
	global_def.BATCH = True
	vecs = []
	vecs = pca(input_stacks, options.subavg, options.rad, options.nvec, options.incore, options.shuffle, not(options.genbuf), options.mask, options.MPI)
	if isRoot:
		for i in xrange(len(vecs)):
			vecs[i].write_image(output_stack, i)
	
	global_def.BATCH = False
        if options.MPI:
		from mpi import mpi_finalize
		mpi_finalize()
Esempio n. 15
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = progname + " tifdir <micdir> --inx=tif --foc=f --ext=spi --cst=1 pixel_size=2 --sca_a=1 --sca_b=1 --step=63.5 --mag=40 --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--inx",        type = "string", default="tif", help =" input extension ")
	parser.add_option("--foc",        type = "string", default="f",   help =" film or CCD frames ")
	parser.add_option("--ext",        type = "string", default="spi", help =" extenstion of output file")
	parser.add_option("--cst",        type = "float",  default=1,     help =" contrast invert or not, -1=invert ")
	parser.add_option("--pixel_size", type = "float",  default=1,     help =" the dimension adjusted output image pixel size")
	parser.add_option("--sca_a",      type = "float",  default=1,     help =" scanner OD converting parameter a, check manual of the scanner ")
	parser.add_option("--sca_b",      type = "float",  default=1,     help =" scanner OD converting parameter b, check manual of the scanner ")
	parser.add_option("--step",       type = "float",  default=63.5,  help =" scan step size of scanner or CCD camera ")
 	parser.add_option("--mag",        type = "float",  default=40,    help =" magnification at which the images are taken ")		
	parser.add_option("--MPI", action="store_true", default=False,     help="  whether using MPI version ")
	(options, args) = parser.parse_args()    	
    	if len(args) < 1:
        	print "usage: "      + usage
        	print "Please run '" + progname + " -h' for detailed options"
	else:
	
		if len(args) == 1: 
			outdir = None
		else:
			outdir = args[1]

		from applications import copyfromtif

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()

		if options.MPI:
			from mpi import mpi_init
			sys.argv = mpi_init(len(sys.argv),sys.argv)		

		global_def.BATCH = True

		copyfromtif(args[0], outdir, options.inx, options.foc, options.ext, options.cst, options.pixel_size, options.sca_a, options.sca_b, options.step, options.mag, options.MPI)
		global_def.BATCH = False
		
		if options.MPI:
			from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 16
0
def main():

	progname = os.path.basename(sys.argv[0])
	usage = progname + " stackfile outdir  <maskfile> --K1=Min_number_of_Cluster --K2=Max_number_of_Clusters --opt_method=K-means_method --trials=Number_of_trials_of_K-means --CTF --rand_seed=1000 --maxit=Maximum_number_of_iterations --F=simulated_annealing --T0=simulated_annealing --MPI --CUDA --debug"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--K1",          type="int",          default=2,          help="Mimimum number of clusters")
	parser.add_option("--K2",          type="int",          default=3,          help="Maximum number of clusters")
	parser.add_option("--trials",      type="int",          default=1,          help="Number of trials in K-means (default 1)")
	parser.add_option("--CTF",         action="store_true", default=False,      help="Perform clustering using CTF information")
	parser.add_option("--rand_seed",   type="int",          default=-1,         help="Random seed of initial (default random)" )
	parser.add_option("--maxit",       type="int",          default=100,        help="Mimimum number of iterations within K-means")
	#parser.add_option("--F",           type="float",        default=0.0,        help="Factor to decrease temperature in simulated annealing, ex.: 0.9")
	#parser.add_option("--T0",          type="float",        default=0.0,        help="Initial temperature in simulated annealing, ex: 100")
	parser.add_option("--MPI",         action="store_true", default=False,      help="Use MPI version")
	parser.add_option("--debug",       action="store_true", default=False,      help="Debug output")

	(options, args) = parser.parse_args()
	if len(args) < 2 or len(args) > 3:
				print "usage: " + usage
				print "Please run '" + progname + " -h' for detailed options"
	elif options.trials < 1:
			sys.stderr.write("ERROR: Number of trials should be at least 1.\n\n")
			sys.exit()
	
	else: 
		if len(args)==2: mask = None
		else:            mask = args[2]

		if options.K1 < 2:
			sys.stderr.write('ERROR: K1 must be > 1 group\n\n')
			sys.exit()

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		from applications import k_means_groups
		global_def.BATCH = True
		k_means_groups(args[0], args[1], mask, "SSE", options.K1, options.K2, options.rand_seed, options.maxit, options.trials, options.CTF, 0.0, 0.0, options.MPI, False, options.debug)
		global_def.BATCH = False
		
		if options.MPI:
			from mpi import mpi_finalize
			mpi_finalize()
def if_error_then_all_processes_exit_program(error_status):
	import sys, os
	from utilities import print_msg

	if "OMPI_COMM_WORLD_SIZE" not in os.environ:
		def mpi_comm_rank(n): return 0
		def mpi_bcast(*largs):
			return [largs[0]]
		def mpi_finalize():
			return None
		MPI_INT, MPI_COMM_WORLD = 0, 0
	else:
		from mpi import mpi_comm_rank, mpi_bcast, mpi_finalize, MPI_INT, MPI_COMM_WORLD

	myid = mpi_comm_rank(MPI_COMM_WORLD)
	if error_status != None and error_status != 0:
		error_status_info = error_status
		error_status = 1
	else:
		error_status = 0

	error_status = mpi_bcast(error_status, 1, MPI_INT, 0, MPI_COMM_WORLD)
	error_status = int(error_status[0])

	if error_status > 0:
		if myid == 0:
			if type(error_status_info) == type((1,1)):
				if len(error_status_info) == 2:
					frameinfo = error_status_info[1]
					print_msg("***********************************\n")
					print_msg("** Error: %s\n"%error_status_info[0])
					print_msg("***********************************\n")
					print_msg("** Location: %s\n"%(frameinfo.filename + ":" + str(frameinfo.lineno)))
					print_msg("***********************************\n")
		sys.stdout.flush()
		mpi_finalize()
		sys.exit(1)
Esempio n. 18
0
def main():

    progname = os.path.basename(sys.argv[0])
    usage = progname + " stackfile outdir  <maskfile> --K1=Min_number_of_Cluster --K2=Max_number_of_Clusters --opt_method=K-means_method --trials=Number_of_trials_of_K-means --CTF --rand_seed=1000 --maxit=Maximum_number_of_iterations --F=simulated_annealing --T0=simulated_annealing --MPI --CUDA --debug"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--K1",
                      type="int",
                      default=2,
                      help="Mimimum number of clusters")
    parser.add_option("--K2",
                      type="int",
                      default=3,
                      help="Maximum number of clusters")
    parser.add_option("--trials",
                      type="int",
                      default=1,
                      help="Number of trials in K-means (default 1)")
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Perform clustering using CTF information")
    parser.add_option("--rand_seed",
                      type="int",
                      default=-1,
                      help="Random seed of initial (default random)")
    parser.add_option("--maxit",
                      type="int",
                      default=100,
                      help="Mimimum number of iterations within K-means")
    #parser.add_option("--F",           type="float",        default=0.0,        help="Factor to decrease temperature in simulated annealing, ex.: 0.9")
    #parser.add_option("--T0",          type="float",        default=0.0,        help="Initial temperature in simulated annealing, ex: 100")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="Use MPI version")
    parser.add_option("--debug",
                      action="store_true",
                      default=False,
                      help="Debug output")

    (options, args) = parser.parse_args()
    if len(args) < 2 or len(args) > 3:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    elif options.trials < 1:
        sys.stderr.write("ERROR: Number of trials should be at least 1.\n\n")
        sys.exit()

    else:
        if len(args) == 2: mask = None
        else: mask = args[2]

        if options.K1 < 2:
            sys.stderr.write('ERROR: K1 must be > 1 group\n\n')
            sys.exit()

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()
        from applications import k_means_groups
        global_def.BATCH = True
        k_means_groups(args[0], args[1], mask, "SSE", options.K1, options.K2,
                       options.rand_seed, options.maxit, options.trials,
                       options.CTF, 0.0, 0.0, options.MPI, False,
                       options.debug)
        global_def.BATCH = False

        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()
Esempio n. 19
0
def main():
	arglist = []
	for arg in sys.argv:
		arglist.append( arg )

	progname = os.path.basename( arglist[0] )
	usage = progname + " prj_stack volume [begin end step] --CTF --npad=ntimes_padding --list=file --group=ID --snr=SNR --sym=symmetry --verbose=(0|1) --xysize --MPI"
	parser = OptionParser(usage, version=SPARXVERSION)

	parser.add_option("--CTF",     action="store_true", default=False, help="apply CTF correction")
	parser.add_option("--snr",     type="float",	    default=1.0,   help="Signal-to-Noise Ratio" )
	parser.add_option("--sym",     type="string",	    default="c1",  help="symmetry" )
	parser.add_option("--list",    type="string",                      help="file with list of images to be used in the first column" )
	parser.add_option("--group",   type="int",          default=-1,    help="perform reconstruction using images for a given group number (group is attribute in the header)" )
	parser.add_option("--MPI",     action="store_true", default=False, help="use MPI version ")
	parser.add_option("--npad",    type="int",	        default=2,     help="number of times padding (default 2)" )
	parser.add_option("--verbose", type="int",          default=0,     help="verbose level: 0 no verbose, 1 verbose" )
	parser.add_option("--xysize",  type="int",	        default=-1,    help="user expected size at xy direction" )
	parser.add_option("--zsize",   type="int",	        default=-1,    help="user expected size at z direction" )
	parser.add_option("--smearstep",   type="float",	default=0.0,   help="Rotational smear step (default 0.0, no smear)" )

	(options,args) = parser.parse_args(arglist[1:])


	if options.MPI:
		from mpi import mpi_init
		sys.argv = mpi_init(len(sys.argv), sys.argv)

	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()

	if len(args) == 2:
		prj_stack = args[0]
		vol_stack = args[1]
		nimage = EMUtil.get_image_count( prj_stack )
		pid_list = range(0, nimage)
	elif len(args) == 5:
		prj_stack = args[0]
		vol_stack = args[1]
		begin = atoi( args[2] )
		end   = atoi( args[3] )
		step  = atoi( args[4] )
		pid_list = range(begin, end, step)
	else:
		ERROR("incomplete list of arguments","recon3d_n",1)
		exit()

	if(options.list and options.group > -1):
		ERROR("options group and list cannot be used together","recon3d_n",1)
		sys.exit()

	from applications import recons3d_n

	global_def.BATCH = True
	recons3d_n(prj_stack, pid_list, vol_stack, options.CTF, options.snr, 1, options.npad,\
		 options.sym, options.list, options.group, options.verbose, options.MPI,options.xysize, options.zsize, options.smearstep)
	global_def.BATCH = False

	if options.MPI:
		from mpi import mpi_finalize
		mpi_finalize()
Esempio n. 20
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = progname + "  input_stack output_stack --subavg=average_image --rad=mask_radius --nvec=number_of_eigenvectors --incore --mask=maskfile --shuffle --usebuf --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--subavg",
                      type="string",
                      default="",
                      help="subtract average")
    parser.add_option("--rad", type="int", default=-1, help="radius of mask")
    parser.add_option("--nvec",
                      type="int",
                      default=1,
                      help="number of eigenvectors")
    parser.add_option("--mask", type="string", default="", help="mask file")
    parser.add_option("--genbuf",
                      action="store_true",
                      default=False,
                      help="use existing buffer")
    parser.add_option("--shuffle",
                      action="store_true",
                      default=False,
                      help="use shuffle")
    parser.add_option("--incore",
                      action="store_true",
                      default=False,
                      help="no buffer on a disk")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="run mpi version")

    (options, args) = parser.parse_args()

    input_stacks = args[0:-1]
    output_stack = args[-1]

    if options.nvec is None:
        print("Error: number of components is not given")
        sys.exit(-2)

    isRoot = True
    if options.MPI:
        from mpi import mpi_init, mpi_comm_rank, MPI_COMM_WORLD
        sys.argv = mpi_init(len(sys.argv), sys.argv)
        isRoot = (mpi_comm_rank(MPI_COMM_WORLD) == 0)

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()
    from applications import pca
    global_def.BATCH = True
    vecs = []
    vecs = pca(input_stacks, options.subavg, options.rad, options.nvec,
               options.incore, options.shuffle, not (options.genbuf),
               options.mask, options.MPI)
    if isRoot:
        for i in range(len(vecs)):
            vecs[i].write_image(output_stack, i)

    global_def.BATCH = False
    if options.MPI:
        from mpi import mpi_finalize
        mpi_finalize()
Esempio n. 21
0
def main():
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage = progname + " stack <output_volume> <ssnr_text_file> <reference_structure> <2Dmaskfile> --ou=outer_radius --rw=ring_width --npad=padding_times --CTF --MPI --sign=CTF_sign --sym=symmetry --random_angles=0"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--ou",
                      type="int",
                      default=-1,
                      help="  radius of particle (set to int(nx/2)-1)")
    parser.add_option(
        "--rw",
        type="float",
        default=1.0,
        help=
        "  ring width for calculating Fourier shell/ring correlation (set to 1)"
    )
    parser.add_option("--npad",
                      type="int",
                      default=1,
                      help="  image padding for 3D reconstruction (set to 1)")
    parser.add_option(
        "--CTF",
        action="store_true",
        default=False,
        help=
        "  Consider CTF correction during the reconstruction (set to False)")
    parser.add_option("--sign",
                      type="int",
                      default=1,
                      help="  sign of the CTF (set to 1)")
    parser.add_option("--sym",
                      type="string",
                      default="c1",
                      help="  symmetry of the structure (set to c1)")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="  use MPI version ")
    parser.add_option(
        "--random_angles",
        type="int",
        default="0",
        help="  randomize Euler angles: 0 - no, 1 - only psi, 2 - all three")
    (options, args) = parser.parse_args(arglist[1:])
    if len(args) < 1 or len(args) > 4:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    else:
        stack = args[0]
        if len(args) == 1:
            out_vol = "SSNR.spi"
            ssnr_file = "ssnr"
            reference = None
            mask = None
        elif len(args) == 2:
            out_vol = args[1]
            ssnr_file = "ssnr"
            reference = None
            mask = None
        elif len(args) == 3:
            out_vol = args[1]
            ssnr_file = args[2]
            reference = None
            mask = None
        elif len(args) == 4:
            out_vol = args[1]
            ssnr_file = args[2]
            reference = args[3]
            mask = None
        elif len(args) == 5:
            out_vol = args[1]
            ssnr_file = args[2]
            reference = args[3]
            mask = args[4]

        if options.MPI:
            from mpi import mpi_init
            sys.argv = mpi_init(len(sys.argv), sys.argv)

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        from applications import ssnr3d
        global_def.BATCH = True
        ssnr3d(stack, out_vol, ssnr_file, mask, reference, options.ou,
               options.rw, options.npad, options.CTF, options.sign,
               options.sym, options.MPI, options.random_angles)
        global_def.BATCH = False

        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()
Esempio n. 22
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = progname + " stack outdir <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range --ts=translation_step --dst=delta --center=center --maxit=max_iteration --CTF --snr=SNR --Fourvar=Fourier_variance --Ng=group_number --Function=user_function_name --CUDA --GPUID --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--ir",
        type="float",
        default=1,
        help="inner radius for rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--ou",
        type="float",
        default=-1,
        help=
        "outer radius for rotational correlation < nx/2-1 (set to the radius of the particle)"
    )
    parser.add_option(
        "--rs",
        type="float",
        default=1,
        help="step between rings in rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--xr",
        type="string",
        default="4 2 1 1",
        help="range for translation search in x direction, search is +/xr ")
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help="range for translation search in y direction, search is +/yr ")
    parser.add_option("--ts",
                      type="string",
                      default="2 1 0.5 0.25",
                      help="step of translation search in both directions")
    parser.add_option("--dst", type="float", default=0.0, help="delta")
    parser.add_option(
        "--center",
        type="float",
        default=-1,
        help=
        "-1.average center method; 0.not centered; 1.phase approximation; 2.cc with Gaussian function; 3.cc with donut-shaped image 4.cc with user-defined reference 5.cc with self-rotated average"
    )
    parser.add_option(
        "--maxit",
        type="float",
        default=0,
        help=
        "maximum number of iterations (0 means the maximum iterations is 10, but it will automatically stop should the criterion falls"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="use CTF correction during alignment ")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="signal-to-noise ratio of the data (set to 1.0)")
    parser.add_option("--Fourvar",
                      action="store_true",
                      default=False,
                      help="compute Fourier variance")
    parser.add_option("--Ng",
                      type="int",
                      default=-1,
                      help="number of groups in the new CTF filteration")
    parser.add_option("--num_ali",
                      type="int",
                      default=3,
                      help="number of independent alignments to do")
    parser.add_option(
        "--function",
        type="string",
        default="ref_ali2d",
        help="name of the reference preparation function (default ref_ali2d)")
    parser.add_option("--CUDA",
                      action="store_true",
                      default=False,
                      help="use CUDA program")
    parser.add_option("--GPUID",
                      type="string",
                      default="",
                      help="ID of GPUs available")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version ")
    (options, args) = parser.parse_args()
    if len(args) < 2 or len(args) > 3:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    else:
        if args[1] == 'None': outdir = None
        else: outdir = args[1]

        if len(args) == 2: mask = None
        else: mask = args[2]

        from development import multi_ali2d

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        if options.MPI:
            from mpi import mpi_init
            sys.argv = mpi_init(len(sys.argv), sys.argv)

        global_def.BATCH = True
        multi_ali2d(args[0], outdir, mask, options.ir, options.ou, options.rs, options.xr, options.yr, options.ts, options.dst, options.center, \
         options.maxit, options.CTF, options.snr, options.Fourvar, options.Ng, options.num_ali, options.function, options.CUDA, options.GPUID, options.MPI)
        global_def.BATCH = False

        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()
Esempio n. 23
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = progname + """  input_micrograph_list_file  input_micrograph_pattern  input_coordinates_pattern  output_directory  --coordinates_format  --box_size=box_size  --invert  --import_ctf=ctf_file  --limit_ctf  --resample_ratio=resample_ratio  --defocus_error=defocus_error  --astigmatism_error=astigmatism_error
	
Window particles from micrographs in input list file. The coordinates of the particles should be given as input.
Please specify name pattern of input micrographs and coordinates files with a wild card (*). Use the wild card to indicate the place of micrograph ID (e.g. serial number, time stamp, and etc). 
The name patterns must be enclosed by single quotes (') or double quotes ("). (Note: sxgui.py automatically adds single quotes (')). 
BDB files can not be selected as input micrographs.
	
	sxwindow.py  mic_list.txt  ./mic*.hdf  info/mic*_info.json  particles  --coordinates_format=eman2  --box_size=64  --invert  --import_ctf=outdir_cter/partres/partres.txt
	
If micrograph list file name is not provided, all files matched with the micrograph name pattern will be processed.
	
	sxwindow.py  ./mic*.hdf  info/mic*_info.json  particles  --coordinates_format=eman2  --box_size=64  --invert  --import_ctf=outdir_cter/partres/partres.txt
	
"""
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--coordinates_format",
        type="string",
        default="eman1",
        help=
        "format of input coordinates files: 'sparx', 'eman1', 'eman2', or 'spider'. the coordinates of sparx, eman2, and spider format is particle center. the coordinates of eman1 format is particle box conner associated with the original box size. (default eman1)"
    )
    parser.add_option(
        "--box_size",
        type="int",
        default=256,
        help=
        "x and y dimension of square area to be windowed (in pixels): pixel size after resampling is assumed when resample_ratio < 1.0 (default 256)"
    )
    parser.add_option(
        "--invert",
        action="store_true",
        default=False,
        help="invert image contrast: recommended for cryo data (default False)"
    )
    parser.add_option(
        "--import_ctf",
        type="string",
        default="",
        help="file name of sxcter output: normally partres.txt (default none)")
    parser.add_option(
        "--limit_ctf",
        action="store_true",
        default=False,
        help=
        "filter micrographs based on the CTF limit: this option requires --import_ctf. (default False)"
    )
    parser.add_option(
        "--resample_ratio",
        type="float",
        default=1.0,
        help=
        "ratio of new to old image size (or old to new pixel size) for resampling: Valid range is 0.0 < resample_ratio <= 1.0. (default 1.0)"
    )
    parser.add_option(
        "--defocus_error",
        type="float",
        default=1000000.0,
        help=
        "defocus errror limit: exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocus_error percent. the error is computed as (std dev defocus)/defocus*100%. (default 1000000.0)"
    )
    parser.add_option(
        "--astigmatism_error",
        type="float",
        default=360.0,
        help=
        "astigmatism error limit: Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatism_error degrees. (default 360.0)"
    )

    ### detect if program is running under MPI
    RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ

    main_node = 0

    if RUNNING_UNDER_MPI:
        from mpi import mpi_init
        from mpi import MPI_COMM_WORLD, mpi_comm_rank, mpi_comm_size, mpi_barrier, mpi_reduce, MPI_INT, MPI_SUM

        mpi_init(0, [])
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        number_of_processes = mpi_comm_size(MPI_COMM_WORLD)
    else:
        number_of_processes = 1
        myid = 0

    (options, args) = parser.parse_args(sys.argv[1:])

    mic_list_file_path = None
    mic_pattern = None
    coords_pattern = None
    error_status = None
    while True:
        if len(args) < 3 or len(args) > 4:
            error_status = (
                "Please check usage for number of arguments.\n Usage: " +
                usage + "\n" + "Please run %s -h for help." % (progname),
                getframeinfo(currentframe()))
            break

        if len(args) == 3:
            mic_pattern = args[0]
            coords_pattern = args[1]
            out_dir = args[2]
        else:  # assert(len(args) == 4)
            mic_list_file_path = args[0]
            mic_pattern = args[1]
            coords_pattern = args[2]
            out_dir = args[3]

        if mic_list_file_path != None:
            if os.path.splitext(mic_list_file_path)[1] != ".txt":
                error_status = (
                    "Extension of input micrograph list file must be \".txt\". Please check input_micrograph_list_file argument. Run %s -h for help."
                    % (progname), getframeinfo(currentframe()))
                break

        if mic_pattern[:len("bdb:")].lower() == "bdb":
            error_status = (
                "BDB file can not be selected as input micrographs. Please convert the format, and restart the program. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
            break

        if mic_pattern.find("*") == -1:
            error_status = (
                "Input micrograph file name pattern must contain wild card (*). Please check input_micrograph_pattern argument. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
            break

        if coords_pattern.find("*") == -1:
            error_status = (
                "Input coordinates file name pattern must contain wild card (*). Please check input_coordinates_pattern argument. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
            break

        if myid == main_node:
            if os.path.exists(out_dir):
                error_status = (
                    "Output directory exists. Please change the name and restart the program.",
                    getframeinfo(currentframe()))
                break

        break
    if_error_then_all_processes_exit_program(error_status)

    # Check invalid conditions of options
    check_options(options, progname)

    mic_name_list = None
    error_status = None
    if myid == main_node:
        if mic_list_file_path != None:
            print("Loading micrograph list from %s file ..." %
                  (mic_list_file_path))
            mic_name_list = read_text_file(mic_list_file_path)
            if len(mic_name_list) == 0:
                print("Directory of first micrograph entry is " %
                      (os.path.dirname(mic_name_list[0])))
        else:  # assert (mic_list_file_path == None)
            print("Generating micrograph list in %s directory..." %
                  (os.path.dirname(mic_pattern)))
            mic_name_list = glob.glob(mic_pattern)
        if len(mic_name_list) == 0:
            error_status = (
                "No micrograph file is found. Please check input_micrograph_pattern and/or input_micrograph_list_file argument. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
        else:
            print("Found %d microgarphs" % len(mic_name_list))

    if_error_then_all_processes_exit_program(error_status)
    if RUNNING_UNDER_MPI:
        mic_name_list = wrap_mpi_bcast(mic_name_list, main_node)

    coords_name_list = None
    error_status = None
    if myid == main_node:
        coords_name_list = glob.glob(coords_pattern)
        if len(coords_name_list) == 0:
            error_status = (
                "No coordinates file is found. Please check input_coordinates_pattern argument. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
    if_error_then_all_processes_exit_program(error_status)
    if RUNNING_UNDER_MPI:
        coords_name_list = wrap_mpi_bcast(coords_name_list, main_node)

##################################################################################################################################################################################################################
##################################################################################################################################################################################################################
##################################################################################################################################################################################################################

# all processes must have access to indices
    if options.import_ctf:
        i_enum = -1
        i_enum += 1
        idx_cter_def = i_enum  # defocus [um]; index must be same as ctf object format
        i_enum += 1
        idx_cter_cs = i_enum  # Cs [mm]; index must be same as ctf object format
        i_enum += 1
        idx_cter_vol = i_enum  # voltage[kV]; index must be same as ctf object format
        i_enum += 1
        idx_cter_apix = i_enum  # pixel size [A]; index must be same as ctf object format
        i_enum += 1
        idx_cter_bfactor = i_enum  # B-factor [A^2]; index must be same as ctf object format
        i_enum += 1
        idx_cter_ac = i_enum  # amplitude contrast [%]; index must be same as ctf object format
        i_enum += 1
        idx_cter_astig_amp = i_enum  # astigmatism amplitude [um]; index must be same as ctf object format
        i_enum += 1
        idx_cter_astig_ang = i_enum  # astigmatism angle [degree]; index must be same as ctf object format
        i_enum += 1
        idx_cter_sd_def = i_enum  # std dev of defocus [um]
        i_enum += 1
        idx_cter_sd_astig_amp = i_enum  # std dev of ast amp [A]
        i_enum += 1
        idx_cter_sd_astig_ang = i_enum  # std dev of ast angle [degree]
        i_enum += 1
        idx_cter_cv_def = i_enum  # coefficient of variation of defocus [%]
        i_enum += 1
        idx_cter_cv_astig_amp = i_enum  # coefficient of variation of ast amp [%]
        i_enum += 1
        idx_cter_spectra_diff = i_enum  # average of differences between with- and without-astig. experimental 1D spectra at extrema
        i_enum += 1
        idx_cter_error_def = i_enum  # frequency at which signal drops by 50% due to estimated error of defocus alone [1/A]
        i_enum += 1
        idx_cter_error_astig = i_enum  # frequency at which signal drops by 50% due to estimated error of defocus and astigmatism [1/A]
        i_enum += 1
        idx_cter_error_ctf = i_enum  # limit frequency by CTF error [1/A]
        i_enum += 1
        idx_cter_mic_name = i_enum  # micrograph name
        i_enum += 1
        n_idx_cter = i_enum

    # Prepare loop variables
    mic_basename_pattern = os.path.basename(
        mic_pattern)  # file pattern without path
    mic_baseroot_pattern = os.path.splitext(mic_basename_pattern)[
        0]  # file pattern without path and extension
    coords_format = options.coordinates_format.lower()
    box_size = options.box_size
    box_half = box_size // 2
    mask2d = model_circle(
        box_size // 2, box_size, box_size
    )  # Create circular 2D mask to Util.infomask of particle images
    resample_ratio = options.resample_ratio

    n_mic_process = 0
    n_mic_reject_no_coords = 0
    n_mic_reject_no_cter_entry = 0
    n_global_coords_detect = 0
    n_global_coords_process = 0
    n_global_coords_reject_out_of_boundary = 0

    serial_id_list = []
    error_status = None
    ## not a real while, an if with the opportunity to use break when errors need to be reported
    while myid == main_node:
        #
        # NOTE: 2016/05/24 Toshio Moriya
        # Now, ignores the path in mic_pattern and entries of mic_name_list to create serial ID
        # Only the basename (file name) in micrograph path must be match
        #
        # Create list of micrograph serial ID
        # Break micrograph name pattern into prefix and suffix to find the head index of the micrograph serial id
        #
        mic_basename_tokens = mic_basename_pattern.split('*')
        # assert (len(mic_basename_tokens) == 2)
        serial_id_head_index = len(mic_basename_tokens[0])
        # Loop through micrograph names
        for mic_name in mic_name_list:
            # Find the tail index of the serial id and extract serial id from the micrograph name
            mic_basename = os.path.basename(mic_name)
            serial_id_tail_index = mic_basename.index(mic_basename_tokens[1])
            serial_id = mic_basename[serial_id_head_index:serial_id_tail_index]
            serial_id_list.append(serial_id)
        # assert (len(serial_id_list) == len(mic_name))
        del mic_name_list  # Do not need this anymore

        # Load CTFs if necessary
        if options.import_ctf:

            ctf_list = read_text_row(options.import_ctf)
            # print("Detected CTF entries : %6d ..." % (len(ctf_list)))

            if len(ctf_list) == 0:
                error_status = (
                    "No CTF entry is found in %s. Please check --import_ctf option. Run %s -h for help."
                    % (options.import_ctf, progname),
                    getframeinfo(currentframe()))
                break

            if (len(ctf_list[0]) != n_idx_cter):
                error_status = (
                    "Number of columns (%d) must be %d in %s. The format might be old. Please run sxcter.py again."
                    % (len(ctf_list[0]), n_idx_cter, options.import_ctf),
                    getframeinfo(currentframe()))
                break

            ctf_dict = {}
            n_reject_defocus_error = 0
            ctf_error_limit = [
                options.defocus_error / 100.0, options.astigmatism_error
            ]
            for ctf_params in ctf_list:
                assert (len(ctf_params) == n_idx_cter)
                # mic_baseroot is name of micrograph minus the path and extension
                mic_baseroot = os.path.splitext(
                    os.path.basename(ctf_params[idx_cter_mic_name]))[0]
                if (ctf_params[idx_cter_sd_def] / ctf_params[idx_cter_def] >
                        ctf_error_limit[0]):
                    print(
                        "Defocus error %f exceeds the threshold. Micrograph %s is rejected."
                        % (ctf_params[idx_cter_sd_def] /
                           ctf_params[idx_cter_def], mic_baseroot))
                    n_reject_defocus_error += 1
                else:
                    if (ctf_params[idx_cter_sd_astig_ang] >
                            ctf_error_limit[1]):
                        ctf_params[idx_cter_astig_amp] = 0.0
                        ctf_params[idx_cter_astig_ang] = 0.0
                    ctf_dict[mic_baseroot] = ctf_params
            del ctf_list  # Do not need this anymore

        break

    if_error_then_all_processes_exit_program(error_status)

    if options.import_ctf:
        if options.limit_ctf:
            cutoff_histogram = [
            ]  #@ming compute the histogram for micrographs cut of by ctf_params limit.

##################################################################################################################################################################################################################
##################################################################################################################################################################################################################
##################################################################################################################################################################################################################

    restricted_serial_id_list = []
    if myid == main_node:
        # Loop over serial IDs of micrographs
        for serial_id in serial_id_list:
            # mic_baseroot is name of micrograph minus the path and extension
            mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
            mic_name = mic_pattern.replace("*", serial_id)
            coords_name = coords_pattern.replace("*", serial_id)

            ########### # CHECKS: BEGIN
            if coords_name not in coords_name_list:
                print("    Cannot read %s. Skipping %s ..." %
                      (coords_name, mic_baseroot))
                n_mic_reject_no_coords += 1
                continue

            # IF mic is in CTER results
            if options.import_ctf:
                if mic_baseroot not in ctf_dict:
                    print(
                        "    Is not listed in CTER results. Skipping %s ..." %
                        (mic_baseroot))
                    n_mic_reject_no_cter_entry += 1
                    continue
                else:
                    ctf_params = ctf_dict[mic_baseroot]
            # CHECKS: END

            n_mic_process += 1

            restricted_serial_id_list.append(serial_id)
        # restricted_serial_id_list = restricted_serial_id_list[:128]  ## for testing against the nonMPI version

    if myid != main_node:
        if options.import_ctf:
            ctf_dict = None

    error_status = None
    if len(restricted_serial_id_list) < number_of_processes:
        error_status = (
            'Number of processes (%d) supplied by --np in mpirun cannot be greater than %d (number of micrographs that satisfy all criteria to be processed) '
            % (number_of_processes, len(restricted_serial_id_list)),
            getframeinfo(currentframe()))
    if_error_then_all_processes_exit_program(error_status)

    ## keep a copy of the original output directory where the final bdb will be created
    original_out_dir = out_dir
    if RUNNING_UNDER_MPI:
        mpi_barrier(MPI_COMM_WORLD)
        restricted_serial_id_list = wrap_mpi_bcast(restricted_serial_id_list,
                                                   main_node)
        mic_start, mic_end = MPI_start_end(len(restricted_serial_id_list),
                                           number_of_processes, myid)
        restricted_serial_id_list_not_sliced = restricted_serial_id_list
        restricted_serial_id_list = restricted_serial_id_list[
            mic_start:mic_end]

        if options.import_ctf:
            ctf_dict = wrap_mpi_bcast(ctf_dict, main_node)

        # generate subdirectories of out_dir, one for each process
        out_dir = os.path.join(out_dir, "%03d" % myid)

    if myid == main_node:
        print(
            "Micrographs processed by main process (including percent complete):"
        )

    len_processed_by_main_node_divided_by_100 = len(
        restricted_serial_id_list) / 100.0

    ##################################################################################################################################################################################################################
    ##################################################################################################################################################################################################################
    ##################################################################################################################################################################################################################
    #####  Starting main parallel execution

    for my_idx, serial_id in enumerate(restricted_serial_id_list):
        mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
        mic_name = mic_pattern.replace("*", serial_id)
        coords_name = coords_pattern.replace("*", serial_id)

        if myid == main_node:
            print(
                mic_name, " ---> % 2.2f%%" %
                (my_idx / len_processed_by_main_node_divided_by_100))
        mic_img = get_im(mic_name)

        # Read coordinates according to the specified format and
        # make the coordinates the center of particle image
        if coords_format == "sparx":
            coords_list = read_text_row(coords_name)
        elif coords_format == "eman1":
            coords_list = read_text_row(coords_name)
            for i in xrange(len(coords_list)):
                coords_list[i] = [(coords_list[i][0] + coords_list[i][2] // 2),
                                  (coords_list[i][1] + coords_list[i][3] // 2)]
        elif coords_format == "eman2":
            coords_list = js_open_dict(coords_name)["boxes"]
            for i in xrange(len(coords_list)):
                coords_list[i] = [coords_list[i][0], coords_list[i][1]]
        elif coords_format == "spider":
            coords_list = read_text_row(coords_name)
            for i in xrange(len(coords_list)):
                coords_list[i] = [coords_list[i][2], coords_list[i][3]]
            # else: assert (False) # Unreachable code

        # Calculate the new pixel size
        if options.import_ctf:
            ctf_params = ctf_dict[mic_baseroot]
            pixel_size_origin = ctf_params[idx_cter_apix]

            if resample_ratio < 1.0:
                # assert (resample_ratio > 0.0)
                new_pixel_size = pixel_size_origin / resample_ratio
                print(
                    "Resample micrograph to pixel size %6.4f and window segments from resampled micrograph."
                    % new_pixel_size)
            else:
                # assert (resample_ratio == 1.0)
                new_pixel_size = pixel_size_origin

            # Set ctf along with new pixel size in resampled micrograph
            ctf_params[idx_cter_apix] = new_pixel_size
        else:
            # assert (not options.import_ctf)
            if resample_ratio < 1.0:
                # assert (resample_ratio > 0.0)
                print(
                    "Resample micrograph with ratio %6.4f and window segments from resampled micrograph."
                    % resample_ratio)
            # else:
            #	assert (resample_ratio == 1.0)

        # Apply filters to micrograph
        fftip(mic_img)
        if options.limit_ctf:
            # assert (options.import_ctf)
            # Cut off frequency components higher than CTF limit
            q1, q2 = ctflimit(box_size, ctf_params[idx_cter_def],
                              ctf_params[idx_cter_cs],
                              ctf_params[idx_cter_vol], new_pixel_size)

            # This is absolute frequency of CTF limit in scale of original micrograph
            if resample_ratio < 1.0:
                # assert (resample_ratio > 0.0)
                q1 = resample_ratio * q1 / float(
                    box_size
                )  # q1 = (pixel_size_origin / new_pixel_size) * q1/float(box_size)
            else:
                # assert (resample_ratio == 1.0) -> pixel_size_origin == new_pixel_size -> pixel_size_origin / new_pixel_size == 1.0
                q1 = q1 / float(box_size)

            if q1 < 0.5:
                mic_img = filt_tanl(mic_img, q1, 0.01)
                cutoff_histogram.append(q1)

        # Cut off frequency components lower than the box size can express
        mic_img = fft(filt_gaussh(mic_img, resample_ratio / box_size))

        # Resample micrograph, map coordinates, and window segments from resampled micrograph using new coordinates
        # after resampling by resample_ratio, new pixel size will be pixel_size/resample_ratio = new_pixel_size
        # NOTE: 2015/04/13 Toshio Moriya
        # resample() efficiently takes care of the case resample_ratio = 1.0 but
        # it does not set apix_*. Even though it sets apix_* when resample_ratio < 1.0 ...
        mic_img = resample(mic_img, resample_ratio)

        if options.invert:
            mic_stats = Util.infomask(
                mic_img, None, True)  # mic_stat[0:mean, 1:SD, 2:min, 3:max]
            Util.mul_scalar(mic_img, -1.0)
            mic_img += 2 * mic_stats[0]

        if options.import_ctf:
            from utilities import generate_ctf
            ctf_obj = generate_ctf(
                ctf_params
            )  # indexes 0 to 7 (idx_cter_def to idx_cter_astig_ang) must be same in cter format & ctf object format.

        # Prepare loop variables
        nx = mic_img.get_xsize()
        ny = mic_img.get_ysize()
        x0 = nx // 2
        y0 = ny // 2

        n_coords_reject_out_of_boundary = 0
        local_stack_name = "bdb:%s#" % out_dir + mic_baseroot + '_ptcls'
        local_particle_id = 0  # can be different from coordinates_id
        # Loop over coordinates
        for coords_id in xrange(len(coords_list)):

            x = int(coords_list[coords_id][0])
            y = int(coords_list[coords_id][1])

            if resample_ratio < 1.0:
                # assert (resample_ratio > 0.0)
                x = int(x * resample_ratio)
                y = int(y * resample_ratio)
            # else:
            # 	assert(resample_ratio == 1.0)

            if ((0 <= x - box_half) and (x + box_half <= nx)
                    and (0 <= y - box_half) and (y + box_half <= ny)):
                particle_img = Util.window(mic_img, box_size, box_size, 1,
                                           x - x0, y - y0)
            else:
                print(
                    "In %s, coordinates ID = %04d (x = %4d, y = %4d, box_size = %4d) is out of micrograph bound, skipping ..."
                    % (mic_baseroot, coords_id, x, y, box_size))
                n_coords_reject_out_of_boundary += 1
                continue

            particle_img = ramp(particle_img)
            particle_stats = Util.infomask(
                particle_img, mask2d,
                False)  # particle_stats[0:mean, 1:SD, 2:min, 3:max]
            particle_img -= particle_stats[0]
            particle_img /= particle_stats[1]

            # NOTE: 2015/04/09 Toshio Moriya
            # ptcl_source_image might be redundant information ...
            # Consider re-organizing header entries...
            particle_img.set_attr("ptcl_source_image", mic_name)
            particle_img.set_attr("ptcl_source_coord_id", coords_id)
            particle_img.set_attr("ptcl_source_coord", [
                int(coords_list[coords_id][0]),
                int(coords_list[coords_id][1])
            ])
            particle_img.set_attr("resample_ratio", resample_ratio)

            # NOTE: 2015/04/13 Toshio Moriya
            # apix_* attributes are updated by resample() only when resample_ratio != 1.0
            # Let's make sure header info is consistent by setting apix_* = 1.0
            # regardless of options, so it is not passed down the processing line
            particle_img.set_attr("apix_x", 1.0)
            particle_img.set_attr("apix_y", 1.0)
            particle_img.set_attr("apix_z", 1.0)
            if options.import_ctf:
                particle_img.set_attr("ctf", ctf_obj)
                particle_img.set_attr("ctf_applied", 0)
                particle_img.set_attr("pixel_size_origin", pixel_size_origin)
                # particle_img.set_attr("apix_x", new_pixel_size)
                # particle_img.set_attr("apix_y", new_pixel_size)
                # particle_img.set_attr("apix_z", new_pixel_size)
            # NOTE: 2015/04/13 Toshio Moriya
            # Pawel Comment: Micrograph is not supposed to have CTF header info.
            # So, let's assume it does not exist & ignore its presence.
            # Note that resample() "correctly" updates pixel size of CTF header info if it exists
            # elif (particle_img.has_ctff()):
            # 	assert(not options.import_ctf)
            # 	ctf_origin = particle_img.get_attr("ctf_obj")
            # 	pixel_size_origin = round(ctf_origin.apix, 5) # Because SXCTER ouputs up to 5 digits
            # 	particle_img.set_attr("apix_x",pixel_size_origin)
            # 	particle_img.set_attr("apix_y",pixel_size_origin)
            # 	particle_img.set_attr("apix_z",pixel_size_origin)

            # print("local_stack_name, local_particle_id", local_stack_name, local_particle_id)
            particle_img.write_image(local_stack_name, local_particle_id)
            local_particle_id += 1

        n_global_coords_detect += len(coords_list)
        n_global_coords_process += local_particle_id
        n_global_coords_reject_out_of_boundary += n_coords_reject_out_of_boundary

        #		# MRK_DEBUG: Toshio Moriya 2016/05/03
        #		# Following codes are for debugging bdb. Delete in future
        #		result = db_check_dict(local_stack_name)
        #		print('# MRK_DEBUG: result = db_check_dict(local_stack_name): %s' % (result))
        #		result = db_list_dicts('bdb:%s' % out_dir)
        #		print('# MRK_DEBUG: result = db_list_dicts(out_dir): %s' % (result))
        #		result = db_get_image_info(local_stack_name)
        #		print('# MRK_DEBUG: result = db_get_image_info(local_stack_name)', result)

        # Release the data base of local stack from this process
        # so that the subprocess can access to the data base
        db_close_dict(local_stack_name)


#		# MRK_DEBUG: Toshio Moriya 2016/05/03
#		# Following codes are for debugging bdb. Delete in future
#		cmd_line = "e2iminfo.py %s" % (local_stack_name)
#		print('# MRK_DEBUG: Executing the command: %s' % (cmd_line))
#		cmdexecute(cmd_line)

#		# MRK_DEBUG: Toshio Moriya 2016/05/03
#		# Following codes are for debugging bdb. Delete in future
#		cmd_line = "e2iminfo.py bdb:%s#data" % (out_dir)
#		print('# MRK_DEBUG: Executing the command: %s' % (cmd_line))
#		cmdexecute(cmd_line)

    if RUNNING_UNDER_MPI:
        if options.import_ctf:
            if options.limit_ctf:
                cutoff_histogram = wrap_mpi_gatherv(cutoff_histogram,
                                                    main_node)

    if myid == main_node:
        if options.limit_ctf:
            # Print out the summary of CTF-limit filtering
            print(" ")
            print("Global summary of CTF-limit filtering (--limit_ctf) ...")
            print("Percentage of filtered micrographs: %8.2f\n" %
                  (len(cutoff_histogram) * 100.0 /
                   len(restricted_serial_id_list_not_sliced)))

            n_bins = 10
            if len(cutoff_histogram) >= n_bins:
                from statistics import hist_list
                cutoff_region, cutoff_counts = hist_list(
                    cutoff_histogram, n_bins)
                print("      Histogram of cut-off frequency")
                print("      cut-off       counts")
                for bin_id in xrange(n_bins):
                    print(" %14.7f     %7d" %
                          (cutoff_region[bin_id], cutoff_counts[bin_id]))
            else:
                print(
                    "The number of filtered micrographs (%d) is less than the number of bins (%d). No histogram is produced."
                    % (len(cutoff_histogram), n_bins))

    n_mic_process = mpi_reduce(n_mic_process, 1, MPI_INT, MPI_SUM, main_node,
                               MPI_COMM_WORLD)
    n_mic_reject_no_coords = mpi_reduce(n_mic_reject_no_coords, 1, MPI_INT,
                                        MPI_SUM, main_node, MPI_COMM_WORLD)
    n_mic_reject_no_cter_entry = mpi_reduce(n_mic_reject_no_cter_entry, 1,
                                            MPI_INT, MPI_SUM, main_node,
                                            MPI_COMM_WORLD)
    n_global_coords_detect = mpi_reduce(n_global_coords_detect, 1, MPI_INT,
                                        MPI_SUM, main_node, MPI_COMM_WORLD)
    n_global_coords_process = mpi_reduce(n_global_coords_process, 1, MPI_INT,
                                         MPI_SUM, main_node, MPI_COMM_WORLD)
    n_global_coords_reject_out_of_boundary = mpi_reduce(
        n_global_coords_reject_out_of_boundary, 1, MPI_INT, MPI_SUM, main_node,
        MPI_COMM_WORLD)

    # Print out the summary of all micrographs
    if main_node == myid:
        print(" ")
        print("Global summary of micrographs ...")
        print("Detected                        : %6d" %
              (len(restricted_serial_id_list_not_sliced)))
        print("Processed                       : %6d" % (n_mic_process))
        print("Rejected by no coordinates file : %6d" %
              (n_mic_reject_no_coords))
        print("Rejected by no CTER entry       : %6d" %
              (n_mic_reject_no_cter_entry))
        print(" ")
        print("Global summary of coordinates ...")
        print("Detected                        : %6d" %
              (n_global_coords_detect))
        print("Processed                       : %6d" %
              (n_global_coords_process))
        print("Rejected by out of boundary     : %6d" %
              (n_global_coords_reject_out_of_boundary))
        # print(" ")
        # print("DONE!!!")

    mpi_barrier(MPI_COMM_WORLD)

    if main_node == myid:

        import time
        time.sleep(1)
        print("\n Creating bdb:%s/data\n" % original_out_dir)
        for proc_i in range(number_of_processes):
            mic_start, mic_end = MPI_start_end(
                len(restricted_serial_id_list_not_sliced), number_of_processes,
                proc_i)
            for serial_id in restricted_serial_id_list_not_sliced[
                    mic_start:mic_end]:
                e2bdb_command = "e2bdb.py "
                mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
                if RUNNING_UNDER_MPI:
                    e2bdb_command += "bdb:" + os.path.join(
                        original_out_dir,
                        "%03d/" % proc_i) + mic_baseroot + "_ptcls "
                else:
                    e2bdb_command += "bdb:" + os.path.join(
                        original_out_dir, mic_baseroot + "_ptcls ")

                e2bdb_command += " --appendvstack=bdb:%s/data  1>/dev/null" % original_out_dir
                cmdexecute(e2bdb_command, printing_on_success=False)

        print("Done!\n")

    if RUNNING_UNDER_MPI:
        mpi_barrier(MPI_COMM_WORLD)
        from mpi import mpi_finalize
        mpi_finalize()

    sys.stdout.flush()
    sys.exit(0)
Esempio n. 24
0
def main():
    from logger import Logger, BaseLogger_Files
    arglist = []
    i = 0
    while (i < len(sys.argv)):
        if sys.argv[i] == '-p4pg':
            i = i + 2
        elif sys.argv[i] == '-p4wd':
            i = i + 2
        else:
            arglist.append(sys.argv[i])
            i = i + 1
    progname = os.path.basename(arglist[0])
    usage = progname + " stack  outdir  <mask> --focus=3Dmask --radius=outer_radius --delta=angular_step" +\
    "--an=angular_neighborhood --maxit=max_iter  --CTF --sym=c1 --function=user_function --independent=indenpendent_runs  --number_of_images_per_group=number_of_images_per_group  --low_pass_filter=.25  --seed=random_seed"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--focus",
                      type="string",
                      default='',
                      help="bineary 3D mask for focused clustering ")
    parser.add_option(
        "--ir",
        type="int",
        default=1,
        help="inner radius for rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--radius",
        type="int",
        default=-1,
        help=
        "particle radius in pixel for rotational correlation <nx-1 (set to the radius of the particle)"
    )
    parser.add_option("--maxit",
                      type="int",
                      default=25,
                      help="maximum number of iteration")
    parser.add_option(
        "--rs",
        type="int",
        default=1,
        help="step between rings in rotational correlation >0 (set to 1)")
    parser.add_option(
        "--xr",
        type="string",
        default='1',
        help="range for translation search in x direction, search is +/-xr ")
    parser.add_option(
        "--yr",
        type="string",
        default='-1',
        help=
        "range for translation search in y direction, search is +/-yr (default = same as xr)"
    )
    parser.add_option(
        "--ts",
        type="string",
        default='0.25',
        help=
        "step size of the translation search in both directions direction, search is -xr, -xr+ts, 0, xr-ts, xr "
    )
    parser.add_option("--delta",
                      type="string",
                      default='2',
                      help="angular step of reference projections")
    parser.add_option("--an",
                      type="string",
                      default='-1',
                      help="angular neighborhood for local searches")
    parser.add_option(
        "--center",
        type="int",
        default=0,
        help=
        "0 - if you do not want the volume to be centered, 1 - center the volume using cog (default=0)"
    )
    parser.add_option(
        "--nassign",
        type="int",
        default=1,
        help=
        "number of reassignment iterations performed for each angular step (set to 3) "
    )
    parser.add_option(
        "--nrefine",
        type="int",
        default=0,
        help=
        "number of alignment iterations performed for each angular step (set to 0)"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="do CTF correction during clustring")
    parser.add_option(
        "--stoprnct",
        type="float",
        default=3.0,
        help="Minimum percentage of assignment change to stop the program")
    parser.add_option("--sym",
                      type="string",
                      default='c1',
                      help="symmetry of the structure ")
    parser.add_option("--function",
                      type="string",
                      default='do_volume_mrk05',
                      help="name of the reference preparation function")
    parser.add_option("--independent",
                      type="int",
                      default=3,
                      help="number of independent run")
    parser.add_option("--number_of_images_per_group",
                      type="int",
                      default=1000,
                      help="number of groups")
    parser.add_option(
        "--low_pass_filter",
        type="float",
        default=-1.0,
        help=
        "absolute frequency of low-pass filter for 3d sorting on the original image size"
    )
    parser.add_option("--nxinit",
                      type="int",
                      default=64,
                      help="initial image size for sorting")
    parser.add_option("--unaccounted",
                      action="store_true",
                      default=False,
                      help="reconstruct the unaccounted images")
    parser.add_option(
        "--seed",
        type="int",
        default=-1,
        help="random seed for create initial random assignment for EQ Kmeans")
    parser.add_option("--smallest_group",
                      type="int",
                      default=500,
                      help="minimum members for identified group")
    parser.add_option("--sausage",
                      action="store_true",
                      default=False,
                      help="way of filter volume")
    parser.add_option("--chunk0",
                      type="string",
                      default='',
                      help="chunk0 for computing margin of error")
    parser.add_option("--chunk1",
                      type="string",
                      default='',
                      help="chunk1 for computing margin of error")
    parser.add_option(
        "--PWadjustment",
        type="string",
        default='',
        help=
        "1-D power spectrum of PDB file used for EM volume power spectrum correction"
    )
    parser.add_option(
        "--protein_shape",
        type="string",
        default='g',
        help=
        "protein shape. It defines protein preferred orientation angles. Currently it has g and f two types "
    )
    parser.add_option(
        "--upscale",
        type="float",
        default=0.5,
        help=" scaling parameter to adjust the power spectrum of EM volumes")
    parser.add_option("--wn",
                      type="int",
                      default=0,
                      help="optimal window size for data processing")
    parser.add_option(
        "--interpolation",
        type="string",
        default="4nn",
        help="3-d reconstruction interpolation method, two options trl and 4nn"
    )
    (options, args) = parser.parse_args(arglist[1:])
    if len(args) < 1 or len(args) > 4:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    else:

        if len(args) > 2:
            mask_file = args[2]
        else:
            mask_file = None

        orgstack = args[0]
        masterdir = args[1]
        global_def.BATCH = True
        #---initialize MPI related variables
        from mpi import mpi_init, mpi_comm_size, MPI_COMM_WORLD, mpi_comm_rank, mpi_barrier, mpi_bcast, mpi_bcast, MPI_INT, MPI_CHAR
        sys.argv = mpi_init(len(sys.argv), sys.argv)
        nproc = mpi_comm_size(MPI_COMM_WORLD)
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        mpi_comm = MPI_COMM_WORLD
        main_node = 0
        # import some utilities
        from utilities import get_im, bcast_number_to_all, cmdexecute, write_text_file, read_text_file, wrap_mpi_bcast, get_params_proj, write_text_row
        from applications import recons3d_n_MPI, mref_ali3d_MPI, Kmref_ali3d_MPI
        from statistics import k_means_match_clusters_asg_new, k_means_stab_bbenum
        from applications import mref_ali3d_EQ_Kmeans, ali3d_mref_Kmeans_MPI
        # Create the main log file
        from logger import Logger, BaseLogger_Files
        if myid == main_node:
            log_main = Logger(BaseLogger_Files())
            log_main.prefix = masterdir + "/"
        else:
            log_main = None
        #--- fill input parameters into dictionary named after Constants
        Constants = {}
        Constants["stack"] = args[0]
        Constants["masterdir"] = masterdir
        Constants["mask3D"] = mask_file
        Constants["focus3Dmask"] = options.focus
        Constants["indep_runs"] = options.independent
        Constants["stoprnct"] = options.stoprnct
        Constants[
            "number_of_images_per_group"] = options.number_of_images_per_group
        Constants["CTF"] = options.CTF
        Constants["maxit"] = options.maxit
        Constants["ir"] = options.ir
        Constants["radius"] = options.radius
        Constants["nassign"] = options.nassign
        Constants["rs"] = options.rs
        Constants["xr"] = options.xr
        Constants["yr"] = options.yr
        Constants["ts"] = options.ts
        Constants["delta"] = options.delta
        Constants["an"] = options.an
        Constants["sym"] = options.sym
        Constants["center"] = options.center
        Constants["nrefine"] = options.nrefine
        #Constants["fourvar"]            		 = options.fourvar
        Constants["user_func"] = options.function
        Constants[
            "low_pass_filter"] = options.low_pass_filter  # enforced low_pass_filter
        #Constants["debug"]              		 = options.debug
        Constants["main_log_prefix"] = args[1]
        #Constants["importali3d"]        		 = options.importali3d
        Constants["myid"] = myid
        Constants["main_node"] = main_node
        Constants["nproc"] = nproc
        Constants["log_main"] = log_main
        Constants["nxinit"] = options.nxinit
        Constants["unaccounted"] = options.unaccounted
        Constants["seed"] = options.seed
        Constants["smallest_group"] = options.smallest_group
        Constants["sausage"] = options.sausage
        Constants["chunk0"] = options.chunk0
        Constants["chunk1"] = options.chunk1
        Constants["PWadjustment"] = options.PWadjustment
        Constants["upscale"] = options.upscale
        Constants["wn"] = options.wn
        Constants["3d-interpolation"] = options.interpolation
        Constants["protein_shape"] = options.protein_shape
        # -----------------------------------------------------
        #
        # Create and initialize Tracker dictionary with input options
        Tracker = {}
        Tracker["constants"] = Constants
        Tracker["maxit"] = Tracker["constants"]["maxit"]
        Tracker["radius"] = Tracker["constants"]["radius"]
        #Tracker["xr"]             = ""
        #Tracker["yr"]             = "-1"  # Do not change!
        #Tracker["ts"]             = 1
        #Tracker["an"]             = "-1"
        #Tracker["delta"]          = "2.0"
        #Tracker["zoom"]           = True
        #Tracker["nsoft"]          = 0
        #Tracker["local"]          = False
        #Tracker["PWadjustment"]   = Tracker["constants"]["PWadjustment"]
        Tracker["upscale"] = Tracker["constants"]["upscale"]
        #Tracker["upscale"]        = 0.5
        Tracker[
            "applyctf"] = False  #  Should the data be premultiplied by the CTF.  Set to False for local continuous.
        #Tracker["refvol"]         = None
        Tracker["nxinit"] = Tracker["constants"]["nxinit"]
        #Tracker["nxstep"]         = 32
        Tracker["icurrentres"] = -1
        #Tracker["ireachedres"]    = -1
        #Tracker["lowpass"]        = 0.4
        #Tracker["falloff"]        = 0.2
        #Tracker["inires"]         = options.inires  # Now in A, convert to absolute before using
        Tracker["fuse_freq"] = 50  # Now in A, convert to absolute before using
        #Tracker["delpreviousmax"] = False
        #Tracker["anger"]          = -1.0
        #Tracker["shifter"]        = -1.0
        #Tracker["saturatecrit"]   = 0.95
        #Tracker["pixercutoff"]    = 2.0
        #Tracker["directory"]      = ""
        #Tracker["previousoutputdir"] = ""
        #Tracker["eliminated-outliers"] = False
        #Tracker["mainiteration"]  = 0
        #Tracker["movedback"]      = False
        #Tracker["state"]          = Tracker["constants"]["states"][0]
        #Tracker["global_resolution"] =0.0
        Tracker["orgstack"] = orgstack
        #--------------------------------------------------------------------
        # import from utilities
        from utilities import sample_down_1D_curve, get_initial_ID, remove_small_groups, print_upper_triangular_matrix, print_a_line_with_timestamp
        from utilities import print_dict, get_resolution_mrk01, partition_to_groups, partition_independent_runs, get_outliers
        from utilities import merge_groups, save_alist, margin_of_error, get_margin_of_error, do_two_way_comparison, select_two_runs, get_ali3d_params
        from utilities import counting_projections, unload_dict, load_dict, get_stat_proj, create_random_list, get_number_of_groups, recons_mref
        from utilities import apply_low_pass_filter, get_groups_from_partition, get_number_of_groups, get_complementary_elements_total, update_full_dict
        from utilities import count_chunk_members, set_filter_parameters_from_adjusted_fsc, get_two_chunks_from_stack
        ####------------------------------------------------------------------
        #
        # Get the pixel size; if none, set to 1.0, and the original image size
        from utilities import get_shrink_data_huang
        if (myid == main_node):
            line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
            print((line + "Initialization of 3-D sorting"))
            a = get_im(orgstack)
            nnxo = a.get_xsize()
            if (Tracker["nxinit"] > nnxo):
                ERROR(
                    "Image size less than minimum permitted $d" %
                    Tracker["nxinit"], "sxsort3d.py", 1)
                nnxo = -1
            else:
                if Tracker["constants"]["CTF"]:
                    i = a.get_attr('ctf')
                    pixel_size = i.apix
                    fq = pixel_size / Tracker["fuse_freq"]
                else:
                    pixel_size = 1.0
                    #  No pixel size, fusing computed as 5 Fourier pixels
                    fq = 5.0 / nnxo
                    del a
        else:
            nnxo = 0
            fq = 0.0
            pixel_size = 1.0
        nnxo = bcast_number_to_all(nnxo, source_node=main_node)
        if (nnxo < 0):
            mpi_finalize()
            exit()
        pixel_size = bcast_number_to_all(pixel_size, source_node=main_node)
        fq = bcast_number_to_all(fq, source_node=main_node)
        if Tracker["constants"]["wn"] == 0:
            Tracker["constants"]["nnxo"] = nnxo
        else:
            Tracker["constants"]["nnxo"] = Tracker["constants"]["wn"]
            nnxo = Tracker["constants"]["nnxo"]
        Tracker["constants"]["pixel_size"] = pixel_size
        Tracker["fuse_freq"] = fq
        del fq, nnxo, pixel_size
        if (Tracker["constants"]["radius"] < 1):
            Tracker["constants"][
                "radius"] = Tracker["constants"]["nnxo"] // 2 - 2
        elif ((2 * Tracker["constants"]["radius"] + 2) >
              Tracker["constants"]["nnxo"]):
            ERROR("Particle radius set too large!", "sxsort3d.py", 1, myid)


####-----------------------------------------------------------------------------------------
# Master directory
        if myid == main_node:
            if masterdir == "":
                timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime())
                masterdir = "master_sort3d" + timestring
            li = len(masterdir)
            cmd = "{} {}".format("mkdir", masterdir)
            os.system(cmd)
        else:
            li = 0
        li = mpi_bcast(li, 1, MPI_INT, main_node, MPI_COMM_WORLD)[0]
        if li > 0:
            masterdir = mpi_bcast(masterdir, li, MPI_CHAR, main_node,
                                  MPI_COMM_WORLD)
            import string
            masterdir = string.join(masterdir, "")
        if myid == main_node:
            print_dict(Tracker["constants"],
                       "Permanent settings of 3-D sorting program")
        ######### create a vstack from input stack to the local stack in masterdir
        # stack name set to default
        Tracker["constants"]["stack"] = "bdb:" + masterdir + "/rdata"
        Tracker["constants"]["ali3d"] = os.path.join(masterdir,
                                                     "ali3d_init.txt")
        Tracker["constants"]["ctf_params"] = os.path.join(
            masterdir, "ctf_params.txt")
        Tracker["constants"]["partstack"] = Tracker["constants"][
            "ali3d"]  # also serves for refinement
        if myid == main_node:
            total_stack = EMUtil.get_image_count(Tracker["orgstack"])
        else:
            total_stack = 0
        total_stack = bcast_number_to_all(total_stack, source_node=main_node)
        mpi_barrier(MPI_COMM_WORLD)
        from time import sleep
        while not os.path.exists(masterdir):
            print("Node ", myid, "  waiting...")
            sleep(5)
        mpi_barrier(MPI_COMM_WORLD)
        if myid == main_node:
            log_main.add("Sphire sort3d ")
            log_main.add("the sort3d master directory is " + masterdir)
        #####
        ###----------------------------------------------------------------------------------
        # Initial data analysis and handle two chunk files
        from random import shuffle
        # Compute the resolution
        #### make chunkdir dictionary for computing margin of error
        import user_functions
        user_func = user_functions.factory[Tracker["constants"]["user_func"]]
        chunk_dict = {}
        chunk_list = []
        if myid == main_node:
            chunk_one = read_text_file(Tracker["constants"]["chunk0"])
            chunk_two = read_text_file(Tracker["constants"]["chunk1"])
        else:
            chunk_one = 0
            chunk_two = 0
        chunk_one = wrap_mpi_bcast(chunk_one, main_node)
        chunk_two = wrap_mpi_bcast(chunk_two, main_node)
        mpi_barrier(MPI_COMM_WORLD)
        ######################## Read/write bdb: data on main node ############################
        if myid == main_node:
            if (orgstack[:4] == "bdb:"):
                cmd = "{} {} {}".format(
                    "e2bdb.py", orgstack,
                    "--makevstack=" + Tracker["constants"]["stack"])
            else:
                cmd = "{} {} {}".format("sxcpy.py", orgstack,
                                        Tracker["constants"]["stack"])
            junk = cmdexecute(cmd)
            cmd = "{} {} {}".format(
                "sxheader.py  --params=xform.projection",
                "--export=" + Tracker["constants"]["ali3d"], orgstack)
            junk = cmdexecute(cmd)
            cmd = "{} {} {}".format(
                "sxheader.py  --params=ctf",
                "--export=" + Tracker["constants"]["ctf_params"], orgstack)
            junk = cmdexecute(cmd)
        mpi_barrier(MPI_COMM_WORLD)
        ########-----------------------------------------------------------------------------
        Tracker["total_stack"] = total_stack
        Tracker["constants"]["total_stack"] = total_stack
        Tracker["shrinkage"] = float(
            Tracker["nxinit"]) / Tracker["constants"]["nnxo"]
        Tracker[
            "radius"] = Tracker["constants"]["radius"] * Tracker["shrinkage"]
        if Tracker["constants"]["mask3D"]:
            Tracker["mask3D"] = os.path.join(masterdir, "smask.hdf")
        else:
            Tracker["mask3D"] = None
        if Tracker["constants"]["focus3Dmask"]:
            Tracker["focus3D"] = os.path.join(masterdir, "sfocus.hdf")
        else:
            Tracker["focus3D"] = None
        if myid == main_node:
            if Tracker["constants"]["mask3D"]:
                mask_3D = get_shrink_3dmask(Tracker["nxinit"],
                                            Tracker["constants"]["mask3D"])
                mask_3D.write_image(Tracker["mask3D"])
            if Tracker["constants"]["focus3Dmask"]:
                mask_3D = get_shrink_3dmask(
                    Tracker["nxinit"], Tracker["constants"]["focus3Dmask"])
                st = Util.infomask(mask_3D, None, True)
                if (st[0] == 0.0):
                    ERROR(
                        "sxrsort3d",
                        "incorrect focused mask, after binarize all values zero",
                        1)
                mask_3D.write_image(Tracker["focus3D"])
                del mask_3D
        if Tracker["constants"]["PWadjustment"] != '':
            PW_dict = {}
            nxinit_pwsp = sample_down_1D_curve(
                Tracker["constants"]["nxinit"], Tracker["constants"]["nnxo"],
                Tracker["constants"]["PWadjustment"])
            Tracker["nxinit_PW"] = os.path.join(masterdir, "spwp.txt")
            if myid == main_node:
                write_text_file(nxinit_pwsp, Tracker["nxinit_PW"])
            PW_dict[Tracker["constants"]
                    ["nnxo"]] = Tracker["constants"]["PWadjustment"]
            PW_dict[Tracker["constants"]["nxinit"]] = Tracker["nxinit_PW"]
            Tracker["PW_dict"] = PW_dict
        mpi_barrier(MPI_COMM_WORLD)
        #-----------------------From two chunks to FSC, and low pass filter-----------------------------------------###
        for element in chunk_one:
            chunk_dict[element] = 0
        for element in chunk_two:
            chunk_dict[element] = 1
        chunk_list = [chunk_one, chunk_two]
        Tracker["chunk_dict"] = chunk_dict
        Tracker["P_chunk0"] = len(chunk_one) / float(total_stack)
        Tracker["P_chunk1"] = len(chunk_two) / float(total_stack)
        ### create two volumes to estimate resolution
        if myid == main_node:
            for index in xrange(2):
                write_text_file(
                    chunk_list[index],
                    os.path.join(masterdir, "chunk%01d.txt" % index))
        mpi_barrier(MPI_COMM_WORLD)
        vols = []
        for index in xrange(2):
            data, old_shifts = get_shrink_data_huang(
                Tracker,
                Tracker["constants"]["nxinit"],
                os.path.join(masterdir, "chunk%01d.txt" % index),
                Tracker["constants"]["partstack"],
                myid,
                main_node,
                nproc,
                preshift=True)
            vol = recons3d_4nn_ctf_MPI(myid=myid,
                                       prjlist=data,
                                       symmetry=Tracker["constants"]["sym"],
                                       finfo=None)
            if myid == main_node:
                vol.write_image(os.path.join(masterdir, "vol%d.hdf" % index))
            vols.append(vol)
            mpi_barrier(MPI_COMM_WORLD)
        if myid == main_node:
            low_pass, falloff, currentres = get_resolution_mrk01(
                vols, Tracker["constants"]["radius"],
                Tracker["constants"]["nxinit"], masterdir, Tracker["mask3D"])
            if low_pass > Tracker["constants"]["low_pass_filter"]:
                low_pass = Tracker["constants"]["low_pass_filter"]
        else:
            low_pass = 0.0
            falloff = 0.0
            currentres = 0.0
        bcast_number_to_all(currentres, source_node=main_node)
        bcast_number_to_all(low_pass, source_node=main_node)
        bcast_number_to_all(falloff, source_node=main_node)
        Tracker["currentres"] = currentres
        Tracker["falloff"] = falloff
        if Tracker["constants"]["low_pass_filter"] == -1.0:
            Tracker["low_pass_filter"] = min(
                .45, low_pass / Tracker["shrinkage"])  # no better than .45
        else:
            Tracker["low_pass_filter"] = min(
                .45,
                Tracker["constants"]["low_pass_filter"] / Tracker["shrinkage"])
        Tracker["lowpass"] = Tracker["low_pass_filter"]
        Tracker["falloff"] = .1
        Tracker["global_fsc"] = os.path.join(masterdir, "fsc.txt")
        ############################################################################################
        if myid == main_node:
            log_main.add("The command-line inputs are as following:")
            log_main.add(
                "**********************************************************")
        for a in sys.argv:
            if myid == main_node: log_main.add(a)
        if myid == main_node:
            log_main.add("number of cpus used in this run is %d" %
                         Tracker["constants"]["nproc"])
            log_main.add(
                "**********************************************************")
        from filter import filt_tanl
        ### START 3-D sorting
        if myid == main_node:
            log_main.add("----------3-D sorting  program------- ")
            log_main.add(
                "current resolution %6.3f for images of original size in terms of absolute frequency"
                % Tracker["currentres"])
            log_main.add("equivalent to %f Angstrom resolution" %
                         (Tracker["constants"]["pixel_size"] /
                          Tracker["currentres"] / Tracker["shrinkage"]))
            log_main.add("the user provided enforced low_pass_filter is %f" %
                         Tracker["constants"]["low_pass_filter"])
            #log_main.add("equivalent to %f Angstrom resolution"%(Tracker["constants"]["pixel_size"]/Tracker["constants"]["low_pass_filter"]))
            for index in xrange(2):
                filt_tanl(
                    get_im(os.path.join(masterdir, "vol%01d.hdf" % index)),
                    Tracker["low_pass_filter"],
                    Tracker["falloff"]).write_image(
                        os.path.join(masterdir, "volf%01d.hdf" % index))
        mpi_barrier(MPI_COMM_WORLD)
        from utilities import get_input_from_string
        delta = get_input_from_string(Tracker["constants"]["delta"])
        delta = delta[0]
        from utilities import even_angles
        n_angles = even_angles(delta, 0, 180)
        this_ali3d = Tracker["constants"]["ali3d"]
        sampled = get_stat_proj(Tracker, delta, this_ali3d)
        if myid == main_node:
            nc = 0
            for a in sampled:
                if len(sampled[a]) > 0:
                    nc += 1
            log_main.add("total sampled direction %10d  at angle step %6.3f" %
                         (len(n_angles), delta))
            log_main.add(
                "captured sampled directions %10d percentage covered by data  %6.3f"
                % (nc, float(nc) / len(n_angles) * 100))
        number_of_images_per_group = Tracker["constants"][
            "number_of_images_per_group"]
        if myid == main_node:
            log_main.add("user provided number_of_images_per_group %d" %
                         number_of_images_per_group)
        Tracker["number_of_images_per_group"] = number_of_images_per_group
        number_of_groups = get_number_of_groups(total_stack,
                                                number_of_images_per_group)
        Tracker["number_of_groups"] = number_of_groups
        generation = 0
        partition_dict = {}
        full_dict = {}
        workdir = os.path.join(masterdir, "generation%03d" % generation)
        Tracker["this_dir"] = workdir
        if myid == main_node:
            log_main.add("---- generation         %5d" % generation)
            log_main.add("number of images per group is set as %d" %
                         number_of_images_per_group)
            log_main.add("the initial number of groups is  %10d " %
                         number_of_groups)
            cmd = "{} {}".format("mkdir", workdir)
            os.system(cmd)
        mpi_barrier(MPI_COMM_WORLD)
        list_to_be_processed = range(Tracker["constants"]["total_stack"])
        Tracker["this_data_list"] = list_to_be_processed
        create_random_list(Tracker)
        #################################
        full_dict = {}
        for iptl in xrange(Tracker["constants"]["total_stack"]):
            full_dict[iptl] = iptl
        Tracker["full_ID_dict"] = full_dict
        #################################
        for indep_run in xrange(Tracker["constants"]["indep_runs"]):
            Tracker["this_particle_list"] = Tracker["this_indep_list"][
                indep_run]
            ref_vol = recons_mref(Tracker)
            if myid == main_node:
                log_main.add("independent run  %10d" % indep_run)
            mpi_barrier(MPI_COMM_WORLD)
            Tracker["this_data_list"] = list_to_be_processed
            Tracker["total_stack"] = len(Tracker["this_data_list"])
            Tracker["this_particle_text_file"] = os.path.join(
                workdir,
                "independent_list_%03d.txt" % indep_run)  # for get_shrink_data
            if myid == main_node:
                write_text_file(Tracker["this_data_list"],
                                Tracker["this_particle_text_file"])
            mpi_barrier(MPI_COMM_WORLD)
            outdir = os.path.join(workdir, "EQ_Kmeans%03d" % indep_run)
            ref_vol = apply_low_pass_filter(ref_vol, Tracker)
            mref_ali3d_EQ_Kmeans(ref_vol, outdir,
                                 Tracker["this_particle_text_file"], Tracker)
            partition_dict[indep_run] = Tracker["this_partition"]
        Tracker["partition_dict"] = partition_dict
        Tracker["total_stack"] = len(Tracker["this_data_list"])
        Tracker["this_total_stack"] = Tracker["total_stack"]
        ###############################
        do_two_way_comparison(Tracker)
        ###############################
        ref_vol_list = []
        from time import sleep
        number_of_ref_class = []
        for igrp in xrange(len(Tracker["two_way_stable_member"])):
            Tracker["this_data_list"] = Tracker["two_way_stable_member"][igrp]
            Tracker["this_data_list_file"] = os.path.join(
                workdir, "stable_class%d.txt" % igrp)
            if myid == main_node:
                write_text_file(Tracker["this_data_list"],
                                Tracker["this_data_list_file"])
            data, old_shifts = get_shrink_data_huang(
                Tracker,
                Tracker["nxinit"],
                Tracker["this_data_list_file"],
                Tracker["constants"]["partstack"],
                myid,
                main_node,
                nproc,
                preshift=True)
            volref = recons3d_4nn_ctf_MPI(myid=myid,
                                          prjlist=data,
                                          symmetry=Tracker["constants"]["sym"],
                                          finfo=None)
            ref_vol_list.append(volref)
            number_of_ref_class.append(len(Tracker["this_data_list"]))
            if myid == main_node:
                log_main.add("group  %d  members %d " %
                             (igrp, len(Tracker["this_data_list"])))
        Tracker["number_of_ref_class"] = number_of_ref_class
        nx_of_image = ref_vol_list[0].get_xsize()
        if Tracker["constants"]["PWadjustment"]:
            Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image]
        else:
            Tracker["PWadjustment"] = Tracker["constants"][
                "PWadjustment"]  # no PW adjustment
        if myid == main_node:
            for iref in xrange(len(ref_vol_list)):
                refdata = [None] * 4
                refdata[0] = ref_vol_list[iref]
                refdata[1] = Tracker
                refdata[2] = Tracker["constants"]["myid"]
                refdata[3] = Tracker["constants"]["nproc"]
                volref = user_func(refdata)
                volref.write_image(os.path.join(workdir, "volf_stable.hdf"),
                                   iref)
        mpi_barrier(MPI_COMM_WORLD)
        Tracker["this_data_list"] = Tracker["this_accounted_list"]
        outdir = os.path.join(workdir, "Kmref")
        empty_group, res_groups, final_list = ali3d_mref_Kmeans_MPI(
            ref_vol_list, outdir, Tracker["this_accounted_text"], Tracker)
        Tracker["this_unaccounted_list"] = get_complementary_elements(
            list_to_be_processed, final_list)
        if myid == main_node:
            log_main.add("the number of particles not processed is %d" %
                         len(Tracker["this_unaccounted_list"]))
            write_text_file(Tracker["this_unaccounted_list"],
                            Tracker["this_unaccounted_text"])
        update_full_dict(Tracker["this_unaccounted_list"], Tracker)
        #######################################
        number_of_groups = len(res_groups)
        vol_list = []
        number_of_ref_class = []
        for igrp in xrange(number_of_groups):
            data, old_shifts = get_shrink_data_huang(
                Tracker,
                Tracker["constants"]["nnxo"],
                os.path.join(outdir, "Class%d.txt" % igrp),
                Tracker["constants"]["partstack"],
                myid,
                main_node,
                nproc,
                preshift=True)
            volref = recons3d_4nn_ctf_MPI(myid=myid,
                                          prjlist=data,
                                          symmetry=Tracker["constants"]["sym"],
                                          finfo=None)
            vol_list.append(volref)

            if (myid == main_node):
                npergroup = len(
                    read_text_file(os.path.join(outdir, "Class%d.txt" % igrp)))
            else:
                npergroup = 0
            npergroup = bcast_number_to_all(npergroup, main_node)
            number_of_ref_class.append(npergroup)

        Tracker["number_of_ref_class"] = number_of_ref_class

        mpi_barrier(MPI_COMM_WORLD)
        nx_of_image = vol_list[0].get_xsize()
        if Tracker["constants"]["PWadjustment"]:
            Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image]
        else:
            Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"]

        if myid == main_node:
            for ivol in xrange(len(vol_list)):
                refdata = [None] * 4
                refdata[0] = vol_list[ivol]
                refdata[1] = Tracker
                refdata[2] = Tracker["constants"]["myid"]
                refdata[3] = Tracker["constants"]["nproc"]
                volref = user_func(refdata)
                volref.write_image(
                    os.path.join(workdir, "volf_of_Classes.hdf"), ivol)
                log_main.add("number of unaccounted particles  %10d" %
                             len(Tracker["this_unaccounted_list"]))
                log_main.add("number of accounted particles  %10d" %
                             len(Tracker["this_accounted_list"]))

        Tracker["this_data_list"] = Tracker[
            "this_unaccounted_list"]  # reset parameters for the next round calculation
        Tracker["total_stack"] = len(Tracker["this_unaccounted_list"])
        Tracker["this_total_stack"] = Tracker["total_stack"]
        number_of_groups = get_number_of_groups(
            len(Tracker["this_unaccounted_list"]), number_of_images_per_group)
        Tracker["number_of_groups"] = number_of_groups
        while number_of_groups >= 2:
            generation += 1
            partition_dict = {}
            workdir = os.path.join(masterdir, "generation%03d" % generation)
            Tracker["this_dir"] = workdir
            if myid == main_node:
                log_main.add("*********************************************")
                log_main.add("-----    generation             %5d    " %
                             generation)
                log_main.add("number of images per group is set as %10d " %
                             number_of_images_per_group)
                log_main.add("the number of groups is  %10d " %
                             number_of_groups)
                log_main.add(" number of particles for clustering is %10d" %
                             Tracker["total_stack"])
                cmd = "{} {}".format("mkdir", workdir)
                os.system(cmd)
            mpi_barrier(MPI_COMM_WORLD)
            create_random_list(Tracker)
            for indep_run in xrange(Tracker["constants"]["indep_runs"]):
                Tracker["this_particle_list"] = Tracker["this_indep_list"][
                    indep_run]
                ref_vol = recons_mref(Tracker)
                if myid == main_node:
                    log_main.add("independent run  %10d" % indep_run)
                    outdir = os.path.join(workdir, "EQ_Kmeans%03d" % indep_run)
                Tracker["this_data_list"] = Tracker["this_unaccounted_list"]
                #ref_vol=apply_low_pass_filter(ref_vol,Tracker)
                mref_ali3d_EQ_Kmeans(ref_vol, outdir,
                                     Tracker["this_unaccounted_text"], Tracker)
                partition_dict[indep_run] = Tracker["this_partition"]
                Tracker["this_data_list"] = Tracker["this_unaccounted_list"]
                Tracker["total_stack"] = len(Tracker["this_unaccounted_list"])
                Tracker["partition_dict"] = partition_dict
                Tracker["this_total_stack"] = Tracker["total_stack"]
            total_list_of_this_run = Tracker["this_unaccounted_list"]
            ###############################
            do_two_way_comparison(Tracker)
            ###############################
            ref_vol_list = []
            number_of_ref_class = []
            for igrp in xrange(len(Tracker["two_way_stable_member"])):
                Tracker["this_data_list"] = Tracker["two_way_stable_member"][
                    igrp]
                Tracker["this_data_list_file"] = os.path.join(
                    workdir, "stable_class%d.txt" % igrp)
                if myid == main_node:
                    write_text_file(Tracker["this_data_list"],
                                    Tracker["this_data_list_file"])
                mpi_barrier(MPI_COMM_WORLD)
                data, old_shifts = get_shrink_data_huang(
                    Tracker,
                    Tracker["constants"]["nxinit"],
                    Tracker["this_data_list_file"],
                    Tracker["constants"]["partstack"],
                    myid,
                    main_node,
                    nproc,
                    preshift=True)
                volref = recons3d_4nn_ctf_MPI(
                    myid=myid,
                    prjlist=data,
                    symmetry=Tracker["constants"]["sym"],
                    finfo=None)
                #volref = filt_tanl(volref, Tracker["constants"]["low_pass_filter"],.1)
                if myid == main_node:
                    volref.write_image(os.path.join(workdir, "vol_stable.hdf"),
                                       iref)
                #volref = resample(volref,Tracker["shrinkage"])
                ref_vol_list.append(volref)
                number_of_ref_class.append(len(Tracker["this_data_list"]))
                mpi_barrier(MPI_COMM_WORLD)
            Tracker["number_of_ref_class"] = number_of_ref_class
            Tracker["this_data_list"] = Tracker["this_accounted_list"]
            outdir = os.path.join(workdir, "Kmref")
            empty_group, res_groups, final_list = ali3d_mref_Kmeans_MPI(
                ref_vol_list, outdir, Tracker["this_accounted_text"], Tracker)
            # calculate the 3-D structure of original image size for each group
            number_of_groups = len(res_groups)
            Tracker["this_unaccounted_list"] = get_complementary_elements(
                total_list_of_this_run, final_list)
            if myid == main_node:
                log_main.add("the number of particles not processed is %d" %
                             len(Tracker["this_unaccounted_list"]))
                write_text_file(Tracker["this_unaccounted_list"],
                                Tracker["this_unaccounted_text"])
            mpi_barrier(MPI_COMM_WORLD)
            update_full_dict(Tracker["this_unaccounted_list"], Tracker)
            vol_list = []
            for igrp in xrange(number_of_groups):
                data, old_shifts = get_shrink_data_huang(
                    Tracker,
                    Tracker["constants"]["nnxo"],
                    os.path.join(outdir, "Class%d.txt" % igrp),
                    Tracker["constants"]["partstack"],
                    myid,
                    main_node,
                    nproc,
                    preshift=True)
                volref = recons3d_4nn_ctf_MPI(
                    myid=myid,
                    prjlist=data,
                    symmetry=Tracker["constants"]["sym"],
                    finfo=None)
                vol_list.append(volref)

            mpi_barrier(MPI_COMM_WORLD)
            nx_of_image = ref_vol_list[0].get_xsize()
            if Tracker["constants"]["PWadjustment"]:
                Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image]
            else:
                Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"]

            if myid == main_node:
                for ivol in xrange(len(vol_list)):
                    refdata = [None] * 4
                    refdata[0] = vol_list[ivol]
                    refdata[1] = Tracker
                    refdata[2] = Tracker["constants"]["myid"]
                    refdata[3] = Tracker["constants"]["nproc"]
                    volref = user_func(refdata)
                    volref.write_image(
                        os.path.join(workdir, "volf_of_Classes.hdf"), ivol)
                log_main.add("number of unaccounted particles  %10d" %
                             len(Tracker["this_unaccounted_list"]))
                log_main.add("number of accounted particles  %10d" %
                             len(Tracker["this_accounted_list"]))
            del vol_list
            mpi_barrier(MPI_COMM_WORLD)
            number_of_groups = get_number_of_groups(
                len(Tracker["this_unaccounted_list"]),
                number_of_images_per_group)
            Tracker["number_of_groups"] = number_of_groups
            Tracker["this_data_list"] = Tracker["this_unaccounted_list"]
            Tracker["total_stack"] = len(Tracker["this_unaccounted_list"])
        if Tracker["constants"]["unaccounted"]:
            data, old_shifts = get_shrink_data_huang(
                Tracker,
                Tracker["constants"]["nnxo"],
                Tracker["this_unaccounted_text"],
                Tracker["constants"]["partstack"],
                myid,
                main_node,
                nproc,
                preshift=True)
            volref = recons3d_4nn_ctf_MPI(myid=myid,
                                          prjlist=data,
                                          symmetry=Tracker["constants"]["sym"],
                                          finfo=None)
            nx_of_image = volref.get_xsize()
            if Tracker["constants"]["PWadjustment"]:
                Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image]
            else:
                Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"]
            if (myid == main_node):
                refdata = [None] * 4
                refdata[0] = volref
                refdata[1] = Tracker
                refdata[2] = Tracker["constants"]["myid"]
                refdata[3] = Tracker["constants"]["nproc"]
                volref = user_func(refdata)
                #volref    = filt_tanl(volref, Tracker["constants"]["low_pass_filter"],.1)
                volref.write_image(
                    os.path.join(workdir, "volf_unaccounted.hdf"))
        # Finish program
        if myid == main_node: log_main.add("sxsort3d finishes")
        mpi_barrier(MPI_COMM_WORLD)
        from mpi import mpi_finalize
        mpi_finalize()
        exit()
Esempio n. 25
0
def main():
    import os
    import sys
    from optparse import OptionParser
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage = progname + """ firstvolume  secondvolume maskfile outputfile --wn --step --cutoff  --radius  --fsc  --res_overall  --MPI

	Compute local resolution in real space within area outlined by the maskfile and within regions wn x wn x wn
	"""
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option(
        "--wn",
        type="int",
        default=7,
        help=
        "Size of window within which local real-space FSC is computed (default 7"
    )
    parser.add_option(
        "--step",
        type="float",
        default=1.0,
        help="Shell step in Fourier size in pixels (default 1.0)")
    parser.add_option("--cutoff",
                      type="float",
                      default=0.5,
                      help="resolution cut-off for FSC (default 0.5)")
    parser.add_option(
        "--radius",
        type="int",
        default=-1,
        help=
        "if there is no maskfile, sphere with r=radius will be used, by default the radius is nx/2-wn"
    )
    parser.add_option(
        "--fsc",
        type="string",
        default=None,
        help="overall FSC curve (might be truncated) (default no curve)")
    parser.add_option("--res_overall",
                      type="float",
                      default=-1.0,
                      help="overall resolution estimated by users")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version")

    (options, args) = parser.parse_args(arglist[1:])

    if len(args) < 3 or len(args) > 4:
        print "See usage " + usage
        sys.exit()

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    res_overall = options.res_overall

    if options.MPI:
        from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
        from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv, mpi_send, mpi_recv
        from mpi import MPI_SUM, MPI_FLOAT, MPI_INT
        sys.argv = mpi_init(len(sys.argv), sys.argv)

        number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        main_node = 0
        cutoff = options.cutoff

        nk = int(options.wn)

        if (myid == main_node):
            #print sys.argv
            vi = get_im(sys.argv[1])
            ui = get_im(sys.argv[2])

            nx = vi.get_xsize()
            ny = vi.get_ysize()
            nz = vi.get_zsize()
            dis = [nx, ny, nz]
        else:
            dis = [0, 0, 0, 0]

        dis = bcast_list_to_all(dis, myid, source_node=main_node)

        if (myid != main_node):
            nx = int(dis[0])
            ny = int(dis[1])
            nz = int(dis[2])

            vi = model_blank(nx, ny, nz)
            ui = model_blank(nx, ny, nz)

        if len(args) == 3:
            m = model_circle((min(nx, ny, nz) - nk) // 2, nx, ny, nz)
            outvol = args[2]

        elif len(args) == 4:
            if (myid == main_node):
                m = binarize(get_im(args[2]), 0.5)
            else:
                m = model_blank(nx, ny, nz)
            outvol = args[3]
        bcast_EMData_to_all(m, myid, main_node)

        from statistics import locres
        """
		res_overall = 0.5
		if myid ==main_node:
			fsc_curve = fsc(vi, ui)
			for ifreq in xrange(len(fsc_curve[0])-1, -1, -1):
				if fsc_curve[1][ifreq] > options.cutoff:
					res_overall = fsc_curve[0][ifreq]
					break
		res_overall = bcast_number_to_all(res_overall, main_node)
		"""
        freqvol, resolut = locres(vi, ui, m, nk, cutoff, options.step, myid,
                                  main_node, number_of_proc)
        if (myid == 0):
            if res_overall != -1.0:
                freqvol += (res_overall - Util.infomask(freqvol, m, True)[0])
                for ifreq in xrange(len(resolut)):
                    if resolut[ifreq][0] > res_overall:
                        break
                for jfreq in xrange(ifreq, len(resolut)):
                    resolut[jfreq][1] = 0.0
            freqvol.write_image(outvol)
            if (options.fsc != None): write_text_row(resolut, options.fsc)
        from mpi import mpi_finalize
        mpi_finalize()

    else:
        cutoff = options.cutoff
        vi = get_im(args[0])
        ui = get_im(args[1])

        nn = vi.get_xsize()
        nk = int(options.wn)

        if len(args) == 3:
            m = model_circle((nn - nk) // 2, nn, nn, nn)
            outvol = args[2]

        elif len(args) == 4:
            m = binarize(get_im(args[2]), 0.5)
            outvol = args[3]

        mc = model_blank(nn, nn, nn, 1.0) - m

        vf = fft(vi)
        uf = fft(ui)
        """		
		res_overall = 0.5
		fsc_curve = fsc(vi, ui)
		for ifreq in xrange(len(fsc_curve[0])-1, -1, -1):
			if fsc_curve[1][ifreq] > options.cutoff:
				res_overall = fsc_curve[0][ifreq]
				break
		"""
        lp = int(nn / 2 / options.step + 0.5)
        step = 0.5 / lp

        freqvol = model_blank(nn, nn, nn)
        resolut = []
        for i in xrange(1, lp):
            fl = step * i
            fh = fl + step
            print lp, i, step, fl, fh
            v = fft(filt_tophatb(vf, fl, fh))
            u = fft(filt_tophatb(uf, fl, fh))
            tmp1 = Util.muln_img(v, v)
            tmp2 = Util.muln_img(u, u)

            do = Util.infomask(square_root(Util.muln_img(tmp1, tmp2)), m,
                               True)[0]

            tmp3 = Util.muln_img(u, v)
            dp = Util.infomask(tmp3, m, True)[0]
            resolut.append([i, (fl + fh) / 2.0, dp / do])

            tmp1 = Util.box_convolution(tmp1, nk)
            tmp2 = Util.box_convolution(tmp2, nk)
            tmp3 = Util.box_convolution(tmp3, nk)

            Util.mul_img(tmp1, tmp2)

            tmp1 = square_root(tmp1)

            Util.mul_img(tmp1, m)
            Util.add_img(tmp1, mc)

            Util.mul_img(tmp3, m)
            Util.add_img(tmp3, mc)

            Util.div_img(tmp3, tmp1)

            Util.mul_img(tmp3, m)
            freq = (fl + fh) / 2.0
            bailout = True
            for x in xrange(nn):
                for y in xrange(nn):
                    for z in xrange(nn):
                        if (m.get_value_at(x, y, z) > 0.5):
                            if (freqvol.get_value_at(x, y, z) == 0.0):
                                if (tmp3.get_value_at(x, y, z) < cutoff):
                                    freqvol.set_value_at(x, y, z, freq)
                                    bailout = False
                                else:
                                    bailout = False
            if (bailout): break
        print len(resolut)
        if res_overall != -1.0:
            freqvol += (res_overall - Util.infomask(freqvol, m, True)[0])
            for ifreq in xrange(len(resolut)):
                if resolut[ifreq][1] > res_overall:
                    break
            for jfreq in xrange(ifreq, len(resolut)):
                resolut[jfreq][2] = 0.0
        freqvol.write_image(outvol)
        if (options.fsc != None): write_text_row(resolut, options.fsc)
Esempio n. 26
0
def main():
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)

    progname = os.path.basename(arglist[0])
    usage = progname + " prj_stack volume [begin end step] --CTF --npad=ntimes_padding --list=file --group=ID --snr=SNR --sym=symmetry --verbose=(0|1) --xysize --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="apply CTF correction")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="Signal-to-Noise Ratio")
    parser.add_option("--sym", type="string", default="c1", help="symmetry")
    parser.add_option(
        "--list",
        type="string",
        help="file with list of images to be used in the first column")
    parser.add_option(
        "--group",
        type="int",
        default=-1,
        help=
        "perform reconstruction using images for a given group number (group is attribute in the header)"
    )
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version ")
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="number of times padding (default 2)")
    parser.add_option("--verbose",
                      type="int",
                      default=0,
                      help="verbose level: 0 no verbose, 1 verbose")
    parser.add_option("--xysize",
                      type="int",
                      default=-1,
                      help="user expected size at xy direction")
    parser.add_option("--zsize",
                      type="int",
                      default=-1,
                      help="user expected size at z direction")
    parser.add_option("--smearstep",
                      type="float",
                      default=0.0,
                      help="Rotational smear step (default 0.0, no smear)")

    (options, args) = parser.parse_args(arglist[1:])

    if options.MPI:
        from mpi import mpi_init
        sys.argv = mpi_init(len(sys.argv), sys.argv)

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    if len(args) == 2:
        prj_stack = args[0]
        vol_stack = args[1]
        nimage = EMUtil.get_image_count(prj_stack)
        pid_list = range(0, nimage)
    elif len(args) == 5:
        prj_stack = args[0]
        vol_stack = args[1]
        begin = atoi(args[2])
        end = atoi(args[3])
        step = atoi(args[4])
        pid_list = range(begin, end, step)
    else:
        ERROR("incomplete list of arguments", "recon3d_n", 1)
        exit()

    if (options.list and options.group > -1):
        ERROR("options group and list cannot be used together", "recon3d_n", 1)
        sys.exit()

    from applications import recons3d_n

    global_def.BATCH = True
    recons3d_n(prj_stack, pid_list, vol_stack, options.CTF, options.snr, 1, options.npad,\
      options.sym, options.list, options.group, options.verbose, options.MPI,options.xysize, options.zsize, options.smearstep)
    global_def.BATCH = False

    if options.MPI:
        from mpi import mpi_finalize
        mpi_finalize()
Esempio n. 27
0
def main():

        arglist = []
        for arg in sys.argv:
        	arglist.append( arg )
	progname = os.path.basename(arglist[0])
	usage = progname + " stack ref_vol outdir <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translational_search_step  --delta=angular_step --an=angular_neighborhood --deltapsi=Delta_psi --startpsi=Start_psi --maxit=max_iter --stoprnct=percentage_to_stop --CTF --snr=SNR  --ref_a=S --sym=c1 --function=user_function --Fourvar=Fourier_variance --debug --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--ir",       type= "int",         default= 1,                  help="inner radius for rotational correlation > 0 (set to 1)")
	parser.add_option("--ou",       type= "int",         default= -1,                 help="outer radius for rotational correlation < int(nx/2)-1 (set to the radius of the particle)")
	parser.add_option("--rs",       type= "int",         default= 1,                  help="step between rings in rotational correlation >0  (set to 1)" )
	parser.add_option("--xr",       type="string",       default= "4 2 1 1 1",        help="range for translation search in x direction, search is +/xr")
	parser.add_option("--yr",       type="string",       default= "-1",               help="range for translation search in y direction, search is +/yr (default = same as xr)")
	parser.add_option("--ts",       type="string",       default= "1 1 1 0.5 0.25",   help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional")
	parser.add_option("--delta",    type="string",       default= "10 6 4 3 2",       help="angular step of reference projections, (default is a sequence: 10 6 4 3 2")
	parser.add_option("--an",       type="string",       default= "-1",               help="angular neighborhood for local searches (phi and theta)")
	parser.add_option("--apsi",     type="string",       default= "-1",               help="angular neighborhood for local searches (psi)")
	parser.add_option("--deltapsi", type="string",       default= "-1",               help="Delta psi for coarse search")
	parser.add_option("--startpsi", type="string",       default= "-1",               help="Start psi for coarse search")
	#parser.add_option("--center",   type="float",        default= -1,                 help="-1: average shift method; 0: no centering; 1: center of gravity (default=-1)")
	parser.add_option("--maxit",    type="float",        default= 5,                  help="maximum number of iterations performed for each angular step (set to 5) ")
	parser.add_option("--stoprnct", type="float",        default=0.0,                 help="Minimum percentage of particles that change orientation to stop the program")
	parser.add_option("--CTF",      action="store_true", default=False,               help="Consider CTF correction during the alignment ")
	parser.add_option("--snr",      type="float",        default= 1.0,                help="Signal-to-Noise Ratio of the data")
	parser.add_option("--ref_a",    type="string",       default= "S",                help="method for generating the quasi-uniformly distributed projection directions (default S)")
	parser.add_option("--sym",      type="string",       default= "c1",               help="symmetry of the refined structure")
	parser.add_option("--function", type="string",       default="ref_ali3d",         help="name of the reference preparation function (ref_ali3d)")
	parser.add_option("--MPI",      action="store_true", default=False,               help="whether to use MPI version")
	parser.add_option("--Fourvar",  action="store_true", default=False,               help="compute Fourier variance")
	parser.add_option("--npad",     type="int",          default= 2,                  help="padding size for 3D reconstruction (default=2)")
	parser.add_option("--debug",    action="store_true", default=False,               help="debug")
	parser.add_option("--shc",      action="store_true", default=False,               help="use SHC algorithm")
	parser.add_option("--nsoft",    type="int",          default= 1,                  help="number of SHC soft assignments (default=1)")
	parser.add_option("--nh2",      action="store_true", default=False,               help="new - SHC2")
	parser.add_option("--ns",       action="store_true", default=False,               help="new - saturn")
	parser.add_option("--ns2",      action="store_true", default=False,               help="new - saturn2")
	parser.add_option("--chunk",    type="float",        default= 0.2,                help="percentage of data used for alignment")
	parser.add_option("--rantest",  action="store_true", default=False,               help="rantest")
	parser.add_option("--searchpsi",action="store_true", default= False,              help="psi refinement")
	parser.add_option("--gamma",    type="float",        default= -1.0,               help="gamma")
	(options, args) = parser.parse_args(arglist[1:])
	if len(args) < 3 or len(args) > 4:
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
	else:
		if len(args) == 3 :
			mask = None
		else:
			mask = args[3]
		if options.MPI:
			from mpi import mpi_init, mpi_finalize
			sys.argv = mpi_init(len(sys.argv), sys.argv)

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		#  centering permanently disabled due to the way new polar searches are done
		center = 0
		if(options.ns):
			global_def.BATCH = True
			from development import  ali3d_saturn
			ali3d_saturn(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr,
				options.yr, options.ts, options.delta, options.an, options.apsi, options.deltapsi, options.startpsi,
				center, options.maxit, options.CTF, options.snr, options.ref_a, options.sym,
				options.function, options.Fourvar, options.npad, options.debug, options.MPI, options.stoprnct, gamma=options.gamma)
			global_def.BATCH = False
		elif(options.ns2):
			global_def.BATCH = True
			from development import  ali3d_saturn2
			ali3d_saturn2(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr,
				options.yr, options.ts, options.delta, options.an, options.apsi, options.deltapsi, options.startpsi,
				center, options.maxit, options.CTF, options.snr, options.ref_a, options.sym,
				options.function, options.Fourvar, options.npad, options.debug, options.MPI, options.stoprnct)
			global_def.BATCH = False
		elif(options.shc):
			if not options.MPI:
				print "Only MPI version is implemented!!!"
			else:
				global_def.BATCH = True
				if(options.nsoft == 1):
					from applications import ali3d_shcMPI
					ali3d_shcMPI(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr,
					options.yr, options.ts, options.delta, options.an, options.apsi, options.deltapsi, options.startpsi,
					center, options.maxit, options.CTF, options.snr, options.ref_a, options.sym,
					options.function, options.Fourvar, options.npad, options.debug, options.stoprnct, gamma=options.gamma)
				elif(options.nsoft == 0):
					from applications import ali3d_shc0MPI
					ali3d_shc0MPI(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr,
					options.yr, options.ts, options.delta, options.an, options.apsi, options.deltapsi, options.startpsi,
					center, options.maxit, options.CTF, options.snr, options.ref_a, options.sym,
					options.function, options.Fourvar, options.npad, options.debug, options.stoprnct, gamma=options.gamma)
				else:
					from multi_shc import ali3d_multishc_soft
					import user_functions
					options.user_func = user_functions.factory[options.function]
					ali3d_multishc_soft(args[0], args[1], options, mpi_comm = None, log = None, nsoft = options.nsoft )
				global_def.BATCH = False
		elif(options.nh2):
			global_def.BATCH = True
			from development import ali3d_shc2
			ali3d_shc2(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr,
				options.yr, options.ts, options.delta, options.an, options.apsi, options.deltapsi, options.startpsi,
				center, options.maxit, options.CTF, options.snr, options.ref_a, options.sym,
				options.function, options.Fourvar, options.npad, options.debug, options.MPI, options.stoprnct)
			global_def.BATCH = False
		elif options.searchpsi:
			from applications import ali3dpsi_MPI
			global_def.BATCH = True
			ali3dpsi_MPI(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr,
			options.yr, options.ts, options.delta, options.an, options.apsi, options.deltapsi, options.startpsi,
			center, options.maxit, options.CTF, options.snr, options.ref_a, options.sym,
			options.function, options.Fourvar, options.npad, options.debug, options.stoprnct)
			global_def.BATCH = False
		else:
			if options.rantest:
				from development import ali3d_rantest
				global_def.BATCH = True
				ali3d_rantest(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr,
				options.yr, options.ts, options.delta, options.an, options.deltapsi, options.startpsi,
				center, options.maxit, options.CTF, options.snr, options.ref_a, options.sym,
				options.function, options.Fourvar, options.npad, options.debug, options.stoprnct)
				global_def.BATCH = False
			else:
				from applications import ali3d
				global_def.BATCH = True
				ali3d(args[0], args[1], args[2], mask, options.ir, options.ou, options.rs, options.xr,
				options.yr, options.ts, options.delta, options.an, options.apsi, options.deltapsi, options.startpsi,
				center, options.maxit, options.CTF, options.snr, options.ref_a, options.sym,
				options.function, options.Fourvar, options.npad, options.debug, options.MPI, options.stoprnct)
				global_def.BATCH = False

		if options.MPI:  mpi_finalize()
Esempio n. 28
0
def main():
	import	global_def
	from	optparse 	import OptionParser
	from	EMAN2 		import EMUtil
	import	os
	import	sys
	from time import time

	progname = os.path.basename(sys.argv[0])
	usage = progname + " proj_stack output_averages --MPI"
	parser = OptionParser(usage, version=SPARXVERSION)

	parser.add_option("--img_per_group",type="int"         ,	default=100  ,				help="number of images per group" )
	parser.add_option("--radius", 		type="int"         ,	default=-1   ,				help="radius for alignment" )
	parser.add_option("--xr",           type="string"      ,    default="2 1",              help="range for translation search in x direction, search is +/xr")
	parser.add_option("--yr",           type="string"      ,    default="-1",               help="range for translation search in y direction, search is +/yr (default = same as xr)")
	parser.add_option("--ts",           type="string"      ,    default="1 0.5",            help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional")
	parser.add_option("--iter", 		type="int"         ,	default=30,                 help="number of iterations within alignment (default = 30)" )
	parser.add_option("--num_ali",      type="int"     	   ,    default=5,         			help="number of alignments performed for stability (default = 5)" )
	parser.add_option("--thld_err",     type="float"       ,    default=1.0,         		help="threshold of pixel error (default = 1.732)" )
	parser.add_option("--grouping" , 	type="string"      ,	default="GRP",				help="do grouping of projections: PPR - per projection, GRP - different size groups, exclusive (default), GEV - grouping equal size")
	parser.add_option("--delta",        type="float"       ,    default=-1.0,         		help="angular step for reference projections (required for GEV method)")
	parser.add_option("--fl",           type="float"       ,    default=0.3,                help="cut-off frequency of hyperbolic tangent low-pass Fourier filter")
	parser.add_option("--aa",           type="float"       ,    default=0.2,                help="fall-off of hyperbolic tangent low-pass Fourier filter")
	parser.add_option("--CTF",          action="store_true",    default=False,              help="Consider CTF correction during the alignment ")
	parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")

	(options,args) = parser.parse_args()
	
	from mpi          import mpi_init, mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD, MPI_TAG_UB
	from mpi          import mpi_barrier, mpi_send, mpi_recv, mpi_bcast, MPI_INT, mpi_finalize, MPI_FLOAT
	from applications import MPI_start_end, within_group_refinement, ali2d_ras
	from pixel_error  import multi_align_stability
	from utilities    import send_EMData, recv_EMData
	from utilities    import get_image, bcast_number_to_all, set_params2D, get_params2D
	from utilities    import group_proj_by_phitheta, model_circle, get_input_from_string

	sys.argv = mpi_init(len(sys.argv), sys.argv)
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	main_node = 0

	if len(args) == 2:
		stack  = args[0]
		outdir = args[1]
	else:
		ERROR("incomplete list of arguments", "sxproj_stability", 1, myid=myid)
		exit()
	if not options.MPI:
		ERROR("Non-MPI not supported!", "sxproj_stability", myid=myid)
		exit()		 

	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	global_def.BATCH = True

	#if os.path.exists(outdir):  ERROR('Output directory exists, please change the name and restart the program', "sxproj_stability", 1, myid)
	#mpi_barrier(MPI_COMM_WORLD)

	
	img_per_grp = options.img_per_group
	radius = options.radius
	ite = options.iter
	num_ali = options.num_ali
	thld_err = options.thld_err

	xrng        = get_input_from_string(options.xr)
	if  options.yr == "-1":  yrng = xrng
	else          :  yrng = get_input_from_string(options.yr)
	step        = get_input_from_string(options.ts)


	if myid == main_node:
		nima = EMUtil.get_image_count(stack)
		img  = get_image(stack)
		nx   = img.get_xsize()
		ny   = img.get_ysize()
	else:
		nima = 0
		nx = 0
		ny = 0
	nima = bcast_number_to_all(nima)
	nx   = bcast_number_to_all(nx)
	ny   = bcast_number_to_all(ny)
	if radius == -1: radius = nx/2-2
	mask = model_circle(radius, nx, nx)

	st = time()
	if options.grouping == "GRP":
		if myid == main_node:
			print "  A  ",myid,"  ",time()-st
			proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			proj_params = []
			for i in xrange(nima):
				dp = proj_attr[i].get_params("spider")
				phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp["psi"], -dp["tx"], -dp["ty"]
				proj_params.append([phi, theta, psi, s2x, s2y])

			# Here is where the grouping is done, I didn't put enough annotation in the group_proj_by_phitheta,
			# So I will briefly explain it here
			# proj_list  : Returns a list of list of particle numbers, each list contains img_per_grp particle numbers
			#              except for the last one. Depending on the number of particles left, they will either form a
			#              group or append themselves to the last group
			# angle_list : Also returns a list of list, each list contains three numbers (phi, theta, delta), (phi, 
			#              theta) is the projection angle of the center of the group, delta is the range of this group
			# mirror_list: Also returns a list of list, each list contains img_per_grp True or False, which indicates
			#              whether it should take mirror position.
			# In this program angle_list and mirror list are not of interest.

			proj_list_all, angle_list, mirror_list = group_proj_by_phitheta(proj_params, img_per_grp=img_per_grp)
			del proj_params
			print "  B  number of groups  ",myid,"  ",len(proj_list_all),time()-st
		mpi_barrier(MPI_COMM_WORLD)

		# Number of groups, actually there could be one or two more groups, since the size of the remaining group varies
		# we will simply assign them to main node.
		n_grp = nima/img_per_grp-1

		# Divide proj_list_all equally to all nodes, and becomes proj_list
		proj_list = []
		for i in xrange(n_grp):
			proc_to_stay = i%number_of_proc
			if proc_to_stay == main_node:
				if myid == main_node: 	proj_list.append(proj_list_all[i])
			elif myid == main_node:
				mpi_send(len(proj_list_all[i]), 1, MPI_INT, proc_to_stay, MPI_TAG_UB, MPI_COMM_WORLD)
				mpi_send(proj_list_all[i], len(proj_list_all[i]), MPI_INT, proc_to_stay, MPI_TAG_UB, MPI_COMM_WORLD)
			elif myid == proc_to_stay:
				img_per_grp = mpi_recv(1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
				img_per_grp = int(img_per_grp[0])
				temp = mpi_recv(img_per_grp, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
				proj_list.append(map(int, temp))
				del temp
			mpi_barrier(MPI_COMM_WORLD)
		print "  C  ",myid,"  ",time()-st
		if myid == main_node:
			# Assign the remaining groups to main_node
			for i in xrange(n_grp, len(proj_list_all)):
				proj_list.append(proj_list_all[i])
			del proj_list_all, angle_list, mirror_list


	#   Compute stability per projection projection direction, equal number assigned, thus overlaps
	elif options.grouping == "GEV":
		if options.delta == -1.0: ERROR("Angular step for reference projections is required for GEV method","sxproj_stability",1)
		from utilities import even_angles, nearestk_to_refdir, getvec
		refproj = even_angles(options.delta)
		img_begin, img_end = MPI_start_end(len(refproj), number_of_proc, myid)
		# Now each processor keeps its own share of reference projections
		refprojdir = refproj[img_begin: img_end]
		del refproj

		ref_ang = [0.0]*(len(refprojdir)*2)
		for i in xrange(len(refprojdir)):
			ref_ang[i*2]   = refprojdir[0][0]
			ref_ang[i*2+1] = refprojdir[0][1]+i*0.1

		print "  A  ",myid,"  ",time()-st
		proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
		#  the solution below is very slow, do not use it unless there is a problem with the i/O
		"""
		for i in xrange(number_of_proc):
			if myid == i:
				proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			mpi_barrier(MPI_COMM_WORLD)
		"""
		print "  B  ",myid,"  ",time()-st

		proj_ang = [0.0]*(nima*2)
		for i in xrange(nima):
			dp = proj_attr[i].get_params("spider")
			proj_ang[i*2]   = dp["phi"]
			proj_ang[i*2+1] = dp["theta"]
		print "  C  ",myid,"  ",time()-st
		asi = Util.nearestk_to_refdir(proj_ang, ref_ang, img_per_grp)
		del proj_ang, ref_ang
		proj_list = []
		for i in xrange(len(refprojdir)):
			proj_list.append(asi[i*img_per_grp:(i+1)*img_per_grp])
		del asi
		print "  D  ",myid,"  ",time()-st
		#from sys import exit
		#exit()


	#   Compute stability per projection
	elif options.grouping == "PPR":
		print "  A  ",myid,"  ",time()-st
		proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
		print "  B  ",myid,"  ",time()-st
		proj_params = []
		for i in xrange(nima):
			dp = proj_attr[i].get_params("spider")
			phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp["psi"], -dp["tx"], -dp["ty"]
			proj_params.append([phi, theta, psi, s2x, s2y])
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		print "  C  ",myid,"  ",time()-st
		from utilities import nearest_proj
		proj_list, mirror_list = nearest_proj(proj_params, img_per_grp, range(img_begin, img_begin+1))#range(img_begin, img_end))
		refprojdir = proj_params[img_begin: img_end]
		del proj_params, mirror_list
		print "  D  ",myid,"  ",time()-st
	else:  ERROR("Incorrect projection grouping option","sxproj_stability",1)
	"""
	from utilities import write_text_file
	for i in xrange(len(proj_list)):
		write_text_file(proj_list[i],"projlist%06d_%04d"%(i,myid))
	"""

	###########################################################################################################
	# Begin stability test
	from utilities import get_params_proj, read_text_file
	#if myid == 0:
	#	from utilities import read_text_file
	#	proj_list[0] = map(int, read_text_file("lggrpp0.txt"))


	from utilities import model_blank
	aveList = [model_blank(nx,ny)]*len(proj_list)
	if options.grouping == "GRP":  refprojdir = [[0.0,0.0,-1.0]]*len(proj_list)
	for i in xrange(len(proj_list)):
		print "  E  ",myid,"  ",time()-st
		class_data = EMData.read_images(stack, proj_list[i])
		#print "  R  ",myid,"  ",time()-st
		if options.CTF :
			from filter import filt_ctf
			for im in xrange(len(class_data)):  #  MEM LEAK!!
				atemp = class_data[im].copy()
				btemp = filt_ctf(atemp, atemp.get_attr("ctf"), binary=1)
				class_data[im] = btemp
				#class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
		for im in class_data:
			try:
				t = im.get_attr("xform.align2d") # if they are there, no need to set them!
			except:
				try:
					t = im.get_attr("xform.projection")
					d = t.get_params("spider")
					set_params2D(im, [0.0,-d["tx"],-d["ty"],0,1.0])
				except:
					set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
		#print "  F  ",myid,"  ",time()-st
		# Here, we perform realignment num_ali times
		all_ali_params = []
		for j in xrange(num_ali):
			if( xrng[0] == 0.0 and yrng[0] == 0.0 ):
				avet = ali2d_ras(class_data, randomize = True, ir = 1, ou = radius, rs = 1, step = 1.0, dst = 90.0, maxit = ite, check_mirror = True, FH=options.fl, FF=options.aa)
			else:
				avet = within_group_refinement(class_data, mask, True, 1, radius, 1, xrng, yrng, step, 90.0, ite, options.fl, options.aa)
			ali_params = []
			for im in xrange(len(class_data)):
				alpha, sx, sy, mirror, scale = get_params2D(class_data[im])
				ali_params.extend( [alpha, sx, sy, mirror] )
			all_ali_params.append(ali_params)
		#aveList[i] = avet
		#print "  G  ",myid,"  ",time()-st
		del ali_params
		# We determine the stability of this group here.
		# stable_set contains all particles deemed stable, it is a list of list
		# each list has two elements, the first is the pixel error, the second is the image number
		# stable_set is sorted based on pixel error
		#from utilities import write_text_file
		#write_text_file(all_ali_params, "all_ali_params%03d.txt"%myid)
		stable_set, mir_stab_rate, average_pix_err = multi_align_stability(all_ali_params, 0.0, 10000.0, thld_err, False, 2*radius+1)
		#print "  H  ",myid,"  ",time()-st
		if(len(stable_set) > 5):
			stable_set_id = []
			members = []
			pix_err = []
			# First put the stable members into attr 'members' and 'pix_err'
			for s in stable_set:
				# s[1] - number in this subset
				stable_set_id.append(s[1])
				# the original image number
				members.append(proj_list[i][s[1]])
				pix_err.append(s[0])
			# Then put the unstable members into attr 'members' and 'pix_err'
			from fundamentals import rot_shift2D
			avet.to_zero()
			if options.grouping == "GRP":
				aphi = 0.0
				atht = 0.0
				vphi = 0.0
				vtht = 0.0
			l = -1
			for j in xrange(len(proj_list[i])):
				#  Here it will only work if stable_set_id is sorted in the increasing number, see how l progresses
				if j in stable_set_id:
					l += 1
					avet += rot_shift2D(class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3] )
					if options.grouping == "GRP":
						phi, theta, psi, sxs, sys = get_params_proj(class_data[j])
						if( theta > 90.0):
							phi = (phi+540.0)%360.0
							theta = 180.0 - theta
						aphi += phi
						atht += theta
						vphi += phi*phi
						vtht += theta*theta
				else:
					members.append(proj_list[i][j])
					pix_err.append(99999.99)
			aveList[i] = avet.copy()
			if l>1 :
				l += 1
				aveList[i] /= l
				if options.grouping == "GRP":
					aphi /= l
					atht /= l
					vphi = (vphi - l*aphi*aphi)/l
					vtht = (vtht - l*atht*atht)/l
					from math import sqrt
					refprojdir[i] = [aphi, atht, (sqrt(max(vphi,0.0))+sqrt(max(vtht,0.0)))/2.0]

			# Here more information has to be stored, PARTICULARLY WHAT IS THE REFERENCE DIRECTION
			aveList[i].set_attr('members', members)
			aveList[i].set_attr('refprojdir',refprojdir[i])
			aveList[i].set_attr('pixerr', pix_err)
		else:
			print  " empty group ",i, refprojdir[i]
			aveList[i].set_attr('members',[-1])
			aveList[i].set_attr('refprojdir',refprojdir[i])
			aveList[i].set_attr('pixerr', [99999.])

	del class_data

	if myid == main_node:
		km = 0
		for i in xrange(number_of_proc):
			if i == main_node :
				for im in xrange(len(aveList)):
					aveList[im].write_image(args[1], km)
					km += 1
			else:
				nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
				nl = int(nl[0])
				for im in xrange(nl):
					ave = recv_EMData(i, im+i+70000)
					nm = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					nm = int(nm[0])
					members = mpi_recv(nm, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('members', map(int, members))
					members = mpi_recv(nm, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('pixerr', map(float, members))
					members = mpi_recv(3, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('refprojdir', map(float, members))
					ave.write_image(args[1], km)
					km += 1
	else:
		mpi_send(len(aveList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
		for im in xrange(len(aveList)):
			send_EMData(aveList[im], main_node,im+myid+70000)
			members = aveList[im].get_attr('members')
			mpi_send(len(members), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			mpi_send(members, len(members), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			members = aveList[im].get_attr('pixerr')
			mpi_send(members, len(members), MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			try:
				members = aveList[im].get_attr('refprojdir')
				mpi_send(members, 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			except:
				mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)

	global_def.BATCH = False
	mpi_barrier(MPI_COMM_WORLD)
	from mpi import mpi_finalize
	mpi_finalize()
Esempio n. 29
0
def main():
    import os
    import sys
    from optparse import OptionParser
    from global_def import SPARXVERSION
    import global_def
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage = progname + " stack ref_vol outdir  <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --ynumber=y_numbers  --txs=translational_search_stepx  --delta=angular_step --an=angular_neighborhood --center=1 --maxit=max_iter --CTF --snr=1.0  --ref_a=S --sym=c1 --datasym=symdoc --new"

    parser = OptionParser(usage, version=SPARXVERSION)
    #parser.add_option("--ir",                 type="float", 	     default= -1,                 help="inner radius for rotational correlation > 0 (set to 1) (Angstroms)")
    parser.add_option(
        "--ou",
        type="float",
        default=-1,
        help=
        "outer radius for rotational 2D correlation < int(nx/2)-1 (set to the radius of the particle) (Angstroms)"
    )
    parser.add_option(
        "--rs",
        type="int",
        default=1,
        help="step between rings in rotational correlation >0  (set to 1)")
    parser.add_option(
        "--xr",
        type="string",
        default=" 4  2 1  1   1",
        help=
        "range for translation search in x direction, search is +/-xr (Angstroms) "
    )
    parser.add_option(
        "--txs",
        type="string",
        default="1 1 1 0.5 0.25",
        help=
        "step size of the translation search in x directions, search is -xr, -xr+ts, 0, xr-ts, xr (Angstroms)"
    )
    parser.add_option(
        "--y_restrict",
        type="string",
        default="-1 -1 -1 -1 -1",
        help=
        "range for translational search in y-direction, search is +/-y_restrict in Angstroms. This only applies to local search, i.e., when an is not -1. If y_restrict < 0, then for ihrsrlocalcons (option --localcons local search with consistency), the y search range is set such that it is the same ratio to dp as angular search range is to dphi. For regular ihrsr, y search range is the full range when y_restrict< 0. Default is -1."
    )
    parser.add_option(
        "--ynumber",
        type="string",
        default="4 8 16 32 32",
        help=
        "even number of the translation search in y direction, search is (-dpp/2,-dpp/2+dpp/ny,,..,0,..,dpp/2-dpp/ny dpp/2]"
    )
    parser.add_option("--delta",
                      type="string",
                      default=" 10 6 4  3   2",
                      help="angular step of reference projections")
    parser.add_option(
        "--an",
        type="string",
        default="-1",
        help=
        "angular neighborhood for local searches (default -1, meaning do exhaustive search)"
    )
    parser.add_option(
        "--maxit",
        type="int",
        default=30,
        help=
        "maximum number of iterations performed for each angular step (default 30) "
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="CTF correction")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="Signal-to-Noise Ratio of the data (default 1)")
    parser.add_option("--MPI",
                      action="store_true",
                      default=True,
                      help="use MPI version")
    #parser.add_option("--fourvar",           action="store_true",   default=False,               help="compute Fourier variance")
    parser.add_option("--apix",
                      type="float",
                      default=-1.0,
                      help="pixel size in Angstroms")
    parser.add_option("--dp",
                      type="float",
                      default=-1.0,
                      help="delta z - translation in Angstroms")
    parser.add_option("--dphi",
                      type="float",
                      default=-1.0,
                      help="delta phi - rotation in degrees")

    parser.add_option(
        "--ndp",
        type="int",
        default=12,
        help=
        "In symmetrization search, number of delta z steps equals to 2*ndp+1")
    parser.add_option(
        "--ndphi",
        type="int",
        default=12,
        help="In symmetrization search,number of dphi steps equas to 2*ndphi+1"
    )
    parser.add_option("--dp_step",
                      type="float",
                      default=0.1,
                      help="delta z (Angstroms) step  for symmetrization")
    parser.add_option("--dphi_step",
                      type="float",
                      default=0.1,
                      help="dphi step for symmetrization")

    parser.add_option(
        "--psi_max",
        type="float",
        default=10.0,
        help=
        "maximum psi - how far rotation in plane can can deviate from 90 or 270 degrees (default 10)"
    )
    parser.add_option("--rmin",
                      type="float",
                      default=0.0,
                      help="minimal radius for hsearch (Angstroms)")
    parser.add_option("--rmax",
                      type="float",
                      default=80.0,
                      help="maximal radius for hsearch (Angstroms)")
    parser.add_option("--fract",
                      type="float",
                      default=0.7,
                      help="fraction of the volume used for helical search")
    parser.add_option("--sym",
                      type="string",
                      default="c1",
                      help="symmetry of the structure")
    parser.add_option("--function",
                      type="string",
                      default="helical",
                      help="name of the reference preparation function")
    parser.add_option("--datasym",
                      type="string",
                      default="datasym.txt",
                      help="symdoc")
    parser.add_option(
        "--nise",
        type="int",
        default=200,
        help="start symmetrization after nise steps (default 200)")
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="padding size for 3D reconstruction, (default 2)")
    parser.add_option("--debug",
                      action="store_true",
                      default=False,
                      help="debug")
    parser.add_option("--new",
                      action="store_true",
                      default=False,
                      help="use rectangular recon and projection version")
    parser.add_option(
        "--initial_theta",
        type="float",
        default=90.0,
        help="intial theta for reference projection (default 90)")
    parser.add_option(
        "--delta_theta",
        type="float",
        default=1.0,
        help="delta theta for reference projection (default 1.0)")
    parser.add_option("--WRAP",
                      type="int",
                      default=1,
                      help="do helical wrapping (default 1, meaning yes)")

    (options, args) = parser.parse_args(arglist[1:])
    if len(args) < 1 or len(args) > 5:
        print("usage: " + usage + "\n")
        print("Please run '" + progname + " -h' for detailed options")
    else:
        # Convert input arguments in the units/format as expected by ihrsr_MPI in applications.
        if options.apix < 0:
            print("Please enter pixel size")
            sys.exit()

        rminp = int((float(options.rmin) / options.apix) + 0.5)
        rmaxp = int((float(options.rmax) / options.apix) + 0.5)

        from utilities import get_input_from_string, get_im

        xr = get_input_from_string(options.xr)
        txs = get_input_from_string(options.txs)
        y_restrict = get_input_from_string(options.y_restrict)

        irp = 1
        if options.ou < 0: oup = -1
        else: oup = int((options.ou / options.apix) + 0.5)
        xrp = ''
        txsp = ''
        y_restrict2 = ''

        for i in xrange(len(xr)):
            xrp += " " + str(float(xr[i]) / options.apix)
        for i in xrange(len(txs)):
            txsp += " " + str(float(txs[i]) / options.apix)
        # now y_restrict has the same format as x search range .... has to change ihrsr accordingly
        for i in xrange(len(y_restrict)):
            y_restrict2 += " " + str(float(y_restrict[i]) / options.apix)

        if options.MPI:
            from mpi import mpi_init, mpi_finalize
            sys.argv = mpi_init(len(sys.argv), sys.argv)

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        from applications import ihrsr
        global_def.BATCH = True
        if len(args) < 4: mask = None
        else: mask = args[3]
        ihrsr(args[0], args[1], args[2], mask, irp, oup, options.rs, xrp,
              options.ynumber, txsp, options.delta, options.initial_theta,
              options.delta_theta, options.an, options.maxit, options.CTF,
              options.snr, options.dp, options.ndp, options.dp_step,
              options.dphi, options.ndphi, options.dphi_step, options.psi_max,
              rminp, rmaxp, options.fract, options.nise, options.npad,
              options.sym, options.function, options.datasym, options.apix,
              options.debug, options.MPI, options.WRAP, y_restrict2)
        global_def.BATCH = False

        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()
Esempio n. 30
0
def main():
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage = progname + " stack ref_vol outdir --dp=rise --dphi=rotation --apix=pixel_size --phistep=phi_step --zstep=z_step --fract=helicising_fraction --rmax=maximum_radius --rmin=min_radius --CTF --sym=c1 --function=user_function --maxit=max_iter --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--dp",
                      type="float",
                      default=1.0,
                      help="delta z - translation in Angstroms")
    parser.add_option("--dphi",
                      type="float",
                      default=1.0,
                      help="delta phi - rotation in degrees")
    parser.add_option("--apix",
                      type="float",
                      default=1.84,
                      help="pixel size in Angstroms")
    parser.add_option("--rmin",
                      type="int",
                      default=0,
                      help="minimal radial extent of structure")
    parser.add_option("--rmax",
                      type="int",
                      default=70,
                      help="maximal radial extent of structure")
    parser.add_option("--fract",
                      type="float",
                      default=0.66,
                      help="fraction of the volume used for helical search")
    parser.add_option("--sym",
                      type="string",
                      default="c1",
                      help="symmetry of the structure")
    parser.add_option("--function",
                      type="string",
                      default="helical",
                      help="name of the reference preparation function")
    parser.add_option("--zstep",
                      type="int",
                      default=1,
                      help="Step size for translational search along z")
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="CTF correction")
    parser.add_option("--maxit",
                      type="int",
                      default=5,
                      help="maximum number of iterations performed")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version")
    (options, args) = parser.parse_args(arglist[1:])
    if len(args) != 3:
        print "usage: " + usage
        print "Please run '" + progname + " -h' for detailed options"
    else:
        if options.MPI:
            from mpi import mpi_init, mpi_finalize
            sys.argv = mpi_init(len(sys.argv), sys.argv)
        else:
            print "There is only MPI version of sxfilrecons3d.py. See SPARX wiki page for downloading MyMPI details."
            sys.exit()

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        from development import filrecons3D_MPI
        global_def.BATCH = True
        filrecons3D_MPI(args[0], args[1], args[2], options.dp, options.dphi,
                        options.apix, options.function, options.zstep,
                        options.fract, options.rmax, options.rmin, options.CTF,
                        options.maxit, options.sym)

        global_def.BATCH = False

        if options.MPI: mpi_finalize()
Esempio n. 31
0
def main(args):
    from utilities import if_error_then_all_processes_exit_program, write_text_row, drop_image, model_gauss_noise, get_im, set_params_proj, wrap_mpi_bcast, model_circle
    from logger import Logger, BaseLogger_Files
    from mpi import mpi_init, mpi_finalize, MPI_COMM_WORLD, mpi_comm_rank, mpi_comm_size, mpi_barrier
    import user_functions
    import sys
    import os
    from applications import MPI_start_end
    from optparse import OptionParser, SUPPRESS_HELP
    from global_def import SPARXVERSION
    from EMAN2 import EMData
    from multi_shc import multi_shc

    progname = os.path.basename(sys.argv[0])
    usage = progname + " stack  [output_directory] --ir=inner_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translational_search_step  --delta=angular_step --center=center_type --maxit1=max_iter1 --maxit2=max_iter2 --L2threshold=0.1 --ref_a=S --sym=c1"
    usage += """

stack			2D images in a stack file: (default required string)
directory		output directory name: into which the results will be written (if it does not exist, it will be created, if it does exist, the results will be written possibly overwriting previous results) (default required string)
"""

    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--radius",
        type="int",
        help=
        "radius of the particle: has to be less than < int(nx/2)-1 (default required int)"
    )

    parser.add_option(
        "--xr",
        type="string",
        default='0',
        help=
        "range for translation search in x direction: search is +/xr in pixels (default '0')"
    )
    parser.add_option(
        "--yr",
        type="string",
        default='0',
        help=
        "range for translation search in y direction: if omitted will be set to xr, search is +/yr in pixels (default '0')"
    )
    parser.add_option("--mask3D",
                      type="string",
                      default=None,
                      help="3D mask file: (default sphere)")
    parser.add_option(
        "--moon_elimination",
        type="string",
        default='',
        help=
        "elimination of disconnected pieces: two arguments: mass in KDa and pixel size in px/A separated by comma, no space (default none)"
    )
    parser.add_option(
        "--ir",
        type="int",
        default=1,
        help="inner radius for rotational search: > 0 (default 1)")

    # 'radius' and 'ou' are the same as per Pawel's request; 'ou' is hidden from the user
    # the 'ou' variable is not changed to 'radius' in the 'sparx' program. This change is at interface level only for sxviper.
    ##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
    parser.add_option("--ou", type="int", default=-1, help=SUPPRESS_HELP)
    parser.add_option(
        "--rs",
        type="int",
        default=1,
        help="step between rings in rotational search: >0 (default 1)")
    parser.add_option(
        "--ts",
        type="string",
        default='1.0',
        help=
        "step size of the translation search in x-y directions: search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default '1.0')"
    )
    parser.add_option(
        "--delta",
        type="string",
        default='2.0',
        help="angular step of reference projections: (default '2.0')")
    parser.add_option(
        "--center",
        type="float",
        default=-1.0,
        help=
        "centering of 3D template: average shift method; 0: no centering; 1: center of gravity (default -1.0)"
    )
    parser.add_option(
        "--maxit1",
        type="int",
        default=400,
        help=
        "maximum number of iterations performed for the GA part: (default 400)"
    )
    parser.add_option(
        "--maxit2",
        type="int",
        default=50,
        help=
        "maximum number of iterations performed for the finishing up part: (default 50)"
    )
    parser.add_option(
        "--L2threshold",
        type="float",
        default=0.03,
        help=
        "stopping criterion of GA: given as a maximum relative dispersion of volumes' L2 norms: (default 0.03)"
    )
    parser.add_option(
        "--ref_a",
        type="string",
        default='S',
        help=
        "method for generating the quasi-uniformly distributed projection directions: (default S)"
    )
    parser.add_option(
        "--sym",
        type="string",
        default='c1',
        help="point-group symmetry of the structure: (default c1)")

    # parser.add_option("--function", type="string", default="ref_ali3d",         help="name of the reference preparation function (ref_ali3d by default)")
    ##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
    parser.add_option("--function",
                      type="string",
                      default="ref_ali3d",
                      help=SUPPRESS_HELP)

    parser.add_option(
        "--nruns",
        type="int",
        default=6,
        help=
        "GA population: aka number of quasi-independent volumes (default 6)")
    parser.add_option(
        "--doga",
        type="float",
        default=0.1,
        help=
        "do GA when fraction of orientation changes less than 1.0 degrees is at least doga: (default 0.1)"
    )
    ##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="padding size for 3D reconstruction (default=2)")
    parser.add_option(
        "--fl",
        type="float",
        default=0.25,
        help=
        "cut-off frequency applied to the template volume: using a hyperbolic tangent low-pass filter (default 0.25)"
    )
    parser.add_option(
        "--aa",
        type="float",
        default=0.1,
        help="fall-off of hyperbolic tangent low-pass filter: (default 0.1)")
    parser.add_option(
        "--pwreference",
        type="string",
        default='',
        help="text file with a reference power spectrum: (default none)")
    parser.add_option("--debug",
                      action="store_true",
                      default=False,
                      help="debug info printout: (default False)")

    ##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
    parser.add_option("--return_options",
                      action="store_true",
                      dest="return_options",
                      default=False,
                      help=SUPPRESS_HELP)

    #parser.add_option("--an",       type="string", default= "-1",               help="NOT USED angular neighborhood for local searches (phi and theta)")
    #parser.add_option("--CTF",      action="store_true", default=False,         help="NOT USED Consider CTF correction during the alignment ")
    #parser.add_option("--snr",      type="float",  default= 1.0,                help="NOT USED Signal-to-Noise Ratio of the data (default 1.0)")
    # (options, args) = parser.parse_args(sys.argv[1:])

    required_option_list = ['radius']
    (options, args) = parser.parse_args(args)
    # option_dict = vars(options)
    # print parser

    if options.return_options:
        return parser

    if options.moon_elimination == "":
        options.moon_elimination = []
    else:
        options.moon_elimination = map(float,
                                       options.moon_elimination.split(","))

    # Making sure all required options appeared.
    for required_option in required_option_list:
        if not options.__dict__[required_option]:
            print "\n ==%s== mandatory option is missing.\n" % required_option
            print "Please run '" + progname + " -h' for detailed options"
            return 1

    if len(args) < 2 or len(args) > 3:
        print "usage: " + usage
        print "Please run '" + progname + " -h' for detailed options"
        return 1

    mpi_init(0, [])

    log = Logger(BaseLogger_Files())

    # 'radius' and 'ou' are the same as per Pawel's request; 'ou' is hidden from the user
    # the 'ou' variable is not changed to 'radius' in the 'sparx' program. This change is at interface level only for sxviper.
    options.ou = options.radius
    runs_count = options.nruns
    mpi_rank = mpi_comm_rank(MPI_COMM_WORLD)
    mpi_size = mpi_comm_size(
        MPI_COMM_WORLD)  # Total number of processes, passed by --np option.

    if mpi_rank == 0:
        all_projs = EMData.read_images(args[0])
        subset = range(len(all_projs))
        # if mpi_size > len(all_projs):
        # 	ERROR('Number of processes supplied by --np needs to be less than or equal to %d (total number of images) ' % len(all_projs), 'sxviper', 1)
        # 	mpi_finalize()
        # 	return
    else:
        all_projs = None
        subset = None

    outdir = args[1]
    if mpi_rank == 0:
        if mpi_size % options.nruns != 0:
            ERROR(
                'Number of processes needs to be a multiple of total number of runs. Total runs by default are 3, you can change it by specifying --nruns option.',
                'sxviper', 1)
            mpi_finalize()
            return

        if os.path.exists(outdir):
            ERROR(
                'Output directory exists, please change the name and restart the program',
                "sxviper", 1)
            mpi_finalize()
            return

        os.mkdir(outdir)
        import global_def
        global_def.LOGFILE = os.path.join(outdir, global_def.LOGFILE)

    mpi_barrier(MPI_COMM_WORLD)

    if outdir[-1] != "/":
        outdir += "/"
    log.prefix = outdir

    # if len(args) > 2:
    # 	ref_vol = get_im(args[2])
    # else:
    ref_vol = None

    options.user_func = user_functions.factory[options.function]

    options.CTF = False
    options.snr = 1.0
    options.an = -1.0
    from multi_shc import multi_shc
    out_params, out_vol, out_peaks = multi_shc(all_projs,
                                               subset,
                                               runs_count,
                                               options,
                                               mpi_comm=MPI_COMM_WORLD,
                                               log=log,
                                               ref_vol=ref_vol)

    mpi_finalize()
Esempio n. 32
0
    else:
        if args[1] == 'None':
            outdir = None
        else:
            outdir = args[1]

        if len(args) == 2:
            mask = None
        else:
            mask = args[2]

        from sp_development import multi_ali2d

        if sp_global_def.CACHE_DISABLE:
            from sp_utilities import disable_bdb_cache
            disable_bdb_cache()

        sp_global_def.BATCH = True
        multi_ali2d(args[0], outdir, mask, options.ir, options.ou, options.rs, options.xr, options.yr, options.ts, options.dst, options.center, \
         options.maxit, options.CTF, options.snr, options.Fourvar, options.Ng, options.num_ali, options.function, options.CUDA, options.GPUID, options.MPI)
        sp_global_def.BATCH = False


if __name__ == "__main__":
    sp_global_def.print_timestamp("Start")
    sp_global_def.write_command()
    main()
    sp_global_def.print_timestamp("Finish")
    mpi.mpi_finalize()
Esempio n. 33
0
def main():
    import os
    import sys
    from optparse import OptionParser
    from global_def import SPARXVERSION, ERROR
    import global_def
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage = progname + " stack ref_vol outdir  <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --ynumber=y_numbers  --txs=translational_search_stepx  --delta=angular_step --an=angular_neighborhood --maxit=max_iter --CTF --snr=1.0  --sym=c1 --datasym=symdoc"

    parser = OptionParser(usage, version=SPARXVERSION)
    #parser.add_option("--ir",                 type="float", 	     default= -1,                 help="Inner radius for psi angle search > 0 (set to 1) (Angstroms)")
    parser.add_option(
        "--ou",
        type="float",
        default=-1,
        help=
        "Outer radius for psi angle search < int(nx*pixel_size/2)-1 (Angstroms)"
    )
    parser.add_option(
        "--rs",
        type="int",
        default=1,
        help="Step between rings in rotational correlation >0  (set to 1)")
    parser.add_option(
        "--xr",
        type="string",
        default=" 4  2 1  1   1",
        help=
        "Range for translation search in x direction, search within +/-xr (Angstroms) "
    )
    parser.add_option(
        "--txs",
        type="string",
        default="1 1 1 0.5 0.25",
        help=
        "Step size of the translation search in x directions, search is -xr, -xr+ts, 0, xr-ts, xr (Angstroms)"
    )
    parser.add_option(
        "--y_restrict",
        type="string",
        default="-1 -1 -1 -1 -1",
        help=
        "Range for translational search in y-direction, search is +/-y_restrict in Angstroms. This only applies to local search, i.e., when an is not -1. If y_restrict < 0, then the y search range is set such that it is the same ratio to dp as angular search range is to dphi. For regular ihrsr, y search range is the full range when y_restrict< 0. Default is -1."
    )
    parser.add_option(
        "--ynumber",
        type="string",
        default="4 8 16 32 32",
        help=
        "Even number of the steps for the search in y direction, search is (-dpp/2,-dpp/2+dpp/ny,,..,0,..,dpp/2-dpp/ny dpp/2]"
    )
    parser.add_option("--delta",
                      type="string",
                      default="10 6 4  3  2",
                      help="Angular step of reference projections")
    parser.add_option("--an",
                      type="string",
                      default="-1",
                      help="Angular neighborhood for local searches")
    parser.add_option(
        "--maxit",
        type="int",
        default=30,
        help=
        "Maximum number of iterations performed for each angular step (set to 30) "
    )
    parser.add_option(
        "--searchit",
        type="int",
        default=1,
        help=
        "Number of iterations to predict/search before doing reconstruction and updating of reference volume. Default is 1. If maxit=3 and searchit=2, then for each of the 3 inner iterations, 2 iterations of prediction/search will be performed before generating reconstruction."
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="CTF correction")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="Signal-to-Noise Ratio of the data")
    #parser.add_option("--MPI",                action="store_true",   default=False,               help="use MPI version")
    #parser.add_option("--fourvar",           action="store_true",   default=False,               help="compute Fourier variance")
    parser.add_option("--apix",
                      type="float",
                      default=-1.0,
                      help="Pixel size in Angstroms")
    parser.add_option("--dp",
                      type="float",
                      default=-1.0,
                      help="Helical symmetry axial rise (Angstroms)")
    parser.add_option("--dphi",
                      type="float",
                      default=-1.0,
                      help="Helical symmetry azimuthal angle")
    #parser.add_option("--MA",                 action="store_true",   default=False,      		  help="predict consistent parameters based on moving average")

    parser.add_option(
        "--psi_max",
        type="float",
        default=10.0,
        help=
        "Maximum psi - how far rotation in plane can can deviate from 90 or 270 degrees"
    )
    parser.add_option(
        "--rmin",
        type="float",
        default=0.0,
        help="Min radius for application of helical symmetry (Angstroms)")
    parser.add_option(
        "--rmax",
        type="float",
        default=80.0,
        help="Max radius for application of helical symmetry (Angstroms)")
    parser.add_option(
        "--fract",
        type="float",
        default=0.7,
        help="Fraction of volume used for application of helical symmetry")
    parser.add_option("--sym",
                      type="string",
                      default="c1",
                      help="Point-group symmetry of the filament")
    parser.add_option(
        "--function",
        type="string",
        default="helical",
        help="Name of the reference preparation function (Default: helical)")
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="Padding size for 3D reconstruction (default=2)")
    parser.add_option("--debug",
                      action="store_true",
                      default=False,
                      help="debug")
    parser.add_option(
        "--initial_theta",
        type="float",
        default=90.0,
        help=
        "Intial theta for out-of-plane tilt search, the range will be (initial theta to 90.0 in steps of delta) (default = 90, no out-of-plane tilt)"
    )
    parser.add_option(
        "--delta_theta",
        type="float",
        default=1.0,
        help="Delta theta for out-of-plane tilt search (default = 1)")
    #parser.add_option("--boundaryavg",        action="store_true",   default=False,      		  help="boundaryavg")
    #parser.add_option("--MA_WRAP",            type="int",            default= 0,                  help="do wrapping in MA if MA_WRAP=1, else no wrapping in MA. Default is 0.")
    parser.add_option(
        "--seg_ny",
        type="int",
        default=256,
        help=
        "y dimension of desired segment size, should be related to fract in that fract ~ seg_ny/ny, where ny is dimension of input projections. (pixels)"
    )
    parser.add_option("--new",
                      action="store_true",
                      default=False,
                      help="use new version")
    parser.add_option("--snake",
                      action="store_true",
                      default=False,
                      help="use snake method")
    parser.add_option(
        "--snakeknots",
        type="int",
        default=-1,
        help=
        "maximal number of knots for each filament snake. If take default value -1, it will take nseg//2+1, where nseg is the number of segments in the filament"
    )

    (options, args) = parser.parse_args(arglist[1:])
    if len(args) < 3 or len(args) > 4:
        print "usage: " + usage + "\n"
        print "Please run '" + progname + " -h' for detailed options"
    else:
        global_def.BATCH = True
        # Convert input arguments in the units/format as expected by ihrsr_MPI in applications.
        if options.apix < 0:
            ERROR("Please specify pixel size apix", "sxheliconlocal", 1)
        if options.dp < 0 or options.dphi < 0:
            ERROR("Please specify helical symmetry parameters dp and dphi",
                  "sxheliconlocal", 1)
        if options.an <= 0:
            ERROR(
                "Angular search range (an) has to be given.  Only local searches are permitted.",
                "sxheliconlocal", 1)

        print " This code is under development, some instabilities are possible 12/28/2014"

        rminp = int((float(options.rmin) / options.apix) + 0.5)
        rmaxp = int((float(options.rmax) / options.apix) + 0.5)

        from utilities import get_input_from_string, get_im

        xr = get_input_from_string(options.xr)
        txs = get_input_from_string(options.txs)
        y_restrict = get_input_from_string(options.y_restrict)

        irp = 1
        if options.ou < 0: oup = -1
        else: oup = int((options.ou / options.apix) + 0.5)
        xrp = ""
        txsp = ""
        y_restrict2 = ""

        for i in xrange(len(xr)):
            xrp += str(float(xr[i]) / options.apix) + " "
        xrp = xrp[:-1]
        for i in xrange(len(txs)):
            txsp += str(float(txs[i]) / options.apix) + " "
        txsp = txsp[:-1]
        # now y_restrict has the same format as x search range .... has to change ihrsr accordingly
        for i in xrange(len(y_restrict)):
            y_restrict2 += str(float(y_restrict[i]) / options.apix) + " "
        y_restrict2 = y_restrict2[:-1]

        from mpi import mpi_init, mpi_finalize
        sys.argv = mpi_init(len(sys.argv), sys.argv)

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        from applications import localhelicon_MPI, localhelicon_MPInew, localhelicon_MPIming
        if len(args) < 4: mask = None
        else: mask = args[3]
        if options.new:            localhelicon_MPInew(args[0], args[1], args[2], options.seg_ny, mask, irp, oup, options.rs, xrp, options.ynumber, \
txsp, options.delta, options.initial_theta, options.delta_theta, options.an, options.maxit, options.CTF, options.snr, \
options.dp, options.dphi, options.psi_max, \
rminp, rmaxp, options.fract, options.npad,options.sym, options.function,\
options.apix, options.debug, y_restrict2, options.searchit)
        elif options.snake:            localhelicon_MPIming(args[0], args[1], args[2], options.seg_ny, mask, irp, oup, options.rs, xrp, options.ynumber, \
txsp, options.delta, options.initial_theta, options.delta_theta, options.an, options.maxit, options.CTF, options.snr, \
options.dp, options.dphi, options.psi_max, \
rminp, rmaxp, options.fract, options.npad,options.sym, options.function,\
options.apix, options.debug, y_restrict2, options.searchit, options.snakeknots)
        else:            localhelicon_MPI(args[0], args[1], args[2], options.seg_ny, mask, irp, oup, options.rs, xrp, options.ynumber, \
      txsp, options.delta, options.initial_theta, options.delta_theta, options.an, options.maxit, options.CTF, options.snr, \
       options.dp, options.dphi, options.psi_max, \
      rminp, rmaxp, options.fract, options.npad,options.sym, options.function,\
      options.apix, options.debug, y_restrict2, options.searchit)
        global_def.BATCH = False

        from mpi import mpi_finalize
        mpi_finalize()
Esempio n. 34
0
def main():
    program_name = os.path.basename(sys.argv[0])
    usage = program_name + """  input_image_path  output_directory  --selection_list=selection_list  --wn=CTF_WINDOW_SIZE --apix=PIXEL_SIZE  --Cs=CS  --voltage=VOLTAGE  --ac=AMP_CONTRAST  --f_start=FREA_START  --f_stop=FREQ_STOP  --vpp  --kboot=KBOOT  --overlap_x=OVERLAP_X  --overlap_y=OVERLAP_Y  --edge_x=EDGE_X  --edge_y=EDGE_Y  --check_consistency  --stack_mode  --debug_mode

Automated estimation of CTF parameters with error assessment.

All Micrographs Mode - Process all micrographs in a directory: 
	Specify a list of input micrographs using a wild card (*), called here input micrographs path pattern. 
	Use the wild card to indicate the place of variable part of the file names (e.g. serial number, time stamp, and etc). 
	Running from the command line requires enclosing the string by single quotes (') or double quotes ("). 
	sxgui.py will automatically adds single quotes to the string. 
	BDB files can not be selected as input micrographs. 
	Then, specify output directory where all outputs should be saved. 
	In this mode, all micrographs matching the path pattern will be processed.

	mpirun -np 16 sxcter.py './mic*.hdf' outdir_cter --wn=512 --apix=2.29 --Cs=2.0 --voltage=300 --ac=10.0

Selected Micrographs Mode - Process all micrographs in a selection list file:
	In addition to input micrographs path pattern and output directry arguments, 
	specify a name of micrograph selection list text file using --selection_list option 
	(e.g. output of sxgui_unblur.py or sxgui_cter.py). The file extension must be ".txt". 
	In this mode, only micrographs in the selection list which matches the file name part of the pattern (ignoring the directory paths) will be processed. 
	If a micrograph name in the selection list does not exists in the directory specified by the micrograph path pattern, processing of the micrograph will be skipped.

	mpirun -np 16 sxcter.py './mic*.hdf' outdir_cter --selection_list=mic_list.txt --wn=512 --apix=2.29 --Cs=2.0 --voltage=300 --ac=10.0

Single Micrograph Mode - Process a single micrograph: 
	In addition to input micrographs path pattern and output directry arguments, 
	specify a single micrograph name using --selection_list option. 
	In this mode, only the specified single micrograph will be processed. 
	If this micrograph name does not matches the file name part of the pattern (ignoring the directory paths), the process will exit without processing it. 
	If this micrograph name matches the file name part of the pattern but does not exists in the directory which specified by the micrograph path pattern, again the process will exit without processing it. 
	Use single processor for this mode.

	sxcter.py './mic*.hdf' outdir_cter --selection_list=mic0.hdf --wn=512 --apix=2.29 --Cs=2.0 --voltage=300 --ac=10.0

Stack Mode - Process a particle stack (Not supported by SPHIRE GUI)):: 
	Use --stack_mode option, then specify the path of particle stack file (without wild card "*") and output directory as arguments. 
	This mode ignores --selection_list, --wn --overlap_x, --overlap_y, --edge_x, and --edge_y options. 
	Use single processor for this mode. Not supported by SPHIRE GUI (sxgui.py). 

	sxcter.py bdb:stack outdir_cter --apix=2.29 --Cs=2.0 --voltage=300 --ac=10.0 --stack_mode

"""
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--selection_list",
        type="string",
        default=None,
        help=
        "Micrograph selecting list: Specify path of a micrograph selection list text file for Selected Micrographs Mode. The file extension must be \'.txt\'. Alternatively, the file name of a single micrograph can be specified for Single Micrograph Mode. (default none)"
    )
    parser.add_option(
        "--wn",
        type="int",
        default=512,
        help=
        "CTF window size [pixels]: The size should be slightly larger than particle box size. This will be ignored in Stack Mode. (default 512)"
    )
    parser.add_option(
        "--apix",
        type="float",
        default=-1.0,
        help=
        "Pixel size [A/Pixels]: The pixel size of input micrograph(s) or images in input particle stack. (default -1.0)"
    )
    parser.add_option(
        "--Cs",
        type="float",
        default=2.0,
        help=
        "Microscope spherical aberration (Cs) [mm]: The spherical aberration (Cs) of microscope used for imaging. (default 2.0)"
    )
    parser.add_option(
        "--voltage",
        type="float",
        default=300.0,
        help=
        "Microscope voltage [kV]: The acceleration voltage of microscope used for imaging. (default 300.0)"
    )
    parser.add_option(
        "--ac",
        type="float",
        default=10.0,
        help=
        "Amplitude contrast [%]: The typical amplitude contrast is in the range of 7% - 14%. The value mainly depends on the thickness of the ice embedding the particles. (default 10.0)"
    )
    parser.add_option(
        "--f_start",
        type="float",
        default=-1.0,
        help=
        "Lowest resolution [A]: Lowest resolution to be considered in the CTF estimation. Determined automatically by default. (default -1.0)"
    )
    parser.add_option(
        "--f_stop",
        type="float",
        default=-1.0,
        help=
        "Highest resolution [A]: Highest resolution to be considered in the CTF estimation. Determined automatically by default. (default -1.0)"
    )
    parser.add_option(
        "--kboot",
        type="int",
        default=16,
        help=
        "Number of CTF estimates per micrograph: Used for error assessment. (default 16)"
    )
    parser.add_option(
        "--overlap_x",
        type="int",
        default=50,
        help=
        "X overlap [%]: Overlap between the windows in the x direction. This will be ignored in Stack Mode. (default 50)"
    )
    parser.add_option(
        "--overlap_y",
        type="int",
        default=50,
        help=
        "Y overlap [%]: Overlap between the windows in the y direction. This will be ignored in Stack Mode. (default 50)"
    )
    parser.add_option(
        "--edge_x",
        type="int",
        default=0,
        help=
        "Edge x [pixels]: Defines the edge of the tiling area in the x direction. Normally it does not need to be modified. This will be ignored in Stack Mode. (default 0)"
    )
    parser.add_option(
        "--edge_y",
        type="int",
        default=0,
        help=
        "Edge y [pixels]: Defines the edge of the tiling area in the y direction. Normally it does not need to be modified. This will be ignored in Stack Mode. (default 0)"
    )
    parser.add_option(
        "--check_consistency",
        action="store_true",
        default=False,
        help=
        "Check consistency of inputs: Create a text file containing the list of inconsistent Micrograph ID entries (i.e. inconsist_mic_list_file.txt). (default False)"
    )
    parser.add_option(
        "--stack_mode",
        action="store_true",
        default=False,
        help=
        "Use stack mode: Use a stack as the input. Please set the file path of a stack as the first argument and output directory for the second argument. This is advanced option. Not supported by sxgui. (default False)"
    )
    parser.add_option(
        "--debug_mode",
        action="store_true",
        default=False,
        help="Enable debug mode: Print out debug information. (default False)")
    parser.add_option(
        "--vpp",
        action="store_true",
        default=False,
        help="Volta Phase Plate - fit smplitude contrast. (default False)")
    parser.add_option("--defocus_min",
                      type="float",
                      default=0.3,
                      help="Minimum defocus search [um] (default 0.3)")
    parser.add_option("--defocus_max",
                      type="float",
                      default=9.0,
                      help="Maximum defocus search [um] (default 9.0)")
    parser.add_option("--defocus_step",
                      type="float",
                      default=0.1,
                      help="Step defocus search [um] (default 0.1)")
    parser.add_option("--phase_min",
                      type="float",
                      default=5.0,
                      help="Minimum phase search [degrees] (default 5.0)")
    parser.add_option("--phase_max",
                      type="float",
                      default=175.0,
                      help="Maximum phase search [degrees] (default 175.0)")
    parser.add_option("--phase_step",
                      type="float",
                      default=5.0,
                      help="Step phase search [degrees] (default 5.0)")
    parser.add_option("--pap",
                      action="store_true",
                      default=False,
                      help="Use power spectrum for fitting. (default False)")

    (options, args) = parser.parse_args(sys.argv[1:])

    # ====================================================================================
    # Prepare processing
    # ====================================================================================
    # ------------------------------------------------------------------------------------
    # Set up MPI related variables
    # ------------------------------------------------------------------------------------
    # Detect if program is running under MPI
    RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ

    main_mpi_proc = 0
    if RUNNING_UNDER_MPI:
        from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_barrier, MPI_COMM_WORLD

        sys.argv = mpi_init(len(sys.argv), sys.argv)
        my_mpi_proc_id = mpi_comm_rank(MPI_COMM_WORLD)
        n_mpi_procs = mpi_comm_size(MPI_COMM_WORLD)
        global_def.MPI = True

    else:
        my_mpi_proc_id = 0
        n_mpi_procs = 1

    # ------------------------------------------------------------------------------------
    # Set up SPHIRE global definitions
    # ------------------------------------------------------------------------------------
    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    # Change the name log file for error message
    original_logfilename = global_def.LOGFILE
    global_def.LOGFILE = os.path.splitext(
        program_name)[0] + '_' + original_logfilename + '.txt'

    # ------------------------------------------------------------------------------------
    # Check error conditions of arguments and options, then prepare variables for arguments
    # ------------------------------------------------------------------------------------
    input_image_path = None
    output_directory = None
    # not a real while, an if with the opportunity to use break when errors need to be reported
    error_status = None
    # change input unit
    freq_start = -1.0
    freq_stop = -1.0

    if options.f_start > 0.0:
        if options.f_start <= 0.5:
            ERROR("f_start should be in Angstrom", "sxcter",
                  1)  # exclude abs frequencies and spatial frequencies
        else:
            freq_start = 1. / options.f_start

    if options.f_stop > 0.0:
        if options.f_stop <= 0.5:
            ERROR("f_stop should be in Angstrom", "sxcter",
                  1)  # exclude abs frequencies and spatial frequencies
        else:
            freq_stop = 1. / options.f_stop

    while True:
        # --------------------------------------------------------------------------------
        # Check the number of arguments. If OK, then prepare variables for them
        # --------------------------------------------------------------------------------
        if len(args) != 2:
            error_status = (
                "Please check usage for number of arguments.\n Usage: " +
                usage + "\n" + "Please run %s -h for help." % (program_name),
                getframeinfo(currentframe()))
            break

        # NOTE: 2015/11/27 Toshio Moriya
        # Require single quotes (') or double quotes (") when input micrograph pattern is give for input_image_path
        #  so that sys.argv does not automatically expand wild card and create a list of file names
        #
        input_image_path = args[0]
        output_directory = args[1]

        # --------------------------------------------------------------------------------
        # NOTE: 2016/03/17 Toshio Moriya
        # cter_mrk() will take care of all the error conditions
        # --------------------------------------------------------------------------------

        break
    if_error_then_all_processes_exit_program(error_status)
    #  Toshio, please see how to make it informative
    assert input_image_path != None, " directory  missing  input_image_path"
    assert output_directory != None, " directory  missing  output_directory"

    if options.vpp == False:
        wrong_params = False
        import string as str
        vpp_options = [
            "--defocus_min", "--defocus_max", "--defocus_step", "--phase_min",
            "--phase_max", "--phase_step"
        ]
        for command_token in sys.argv:
            for vppo in vpp_options:
                if str.find(command_token, vppo) > -1: wrong_params = True
                if wrong_params: break
            if wrong_params: break
        if wrong_params:
            ERROR(
                "Some options are valid only for Volta Phase Plate command  %s"
                % command_token, "sxcter", 1, my_mpi_proc_id)

    if my_mpi_proc_id == main_mpi_proc:
        command_line = ""
        for command_token in sys.argv:
            command_line += command_token + "  "
        print(" ")
        print("Shell line command:")
        print(command_line)

    if options.vpp:
        vpp_options = [
            options.defocus_min, options.defocus_max, options.defocus_step,
            options.phase_min, options.phase_max, options.phase_step
        ]
        from morphology import cter_vpp
        result = cter_vpp(input_image_path, output_directory,
                          options.selection_list, options.wn, options.apix,
                          options.Cs, options.voltage, options.ac, freq_start,
                          freq_stop, options.kboot, options.overlap_x,
                          options.overlap_y, options.edge_x, options.edge_y,
                          options.check_consistency, options.stack_mode,
                          options.debug_mode, program_name, vpp_options,
                          RUNNING_UNDER_MPI, main_mpi_proc, my_mpi_proc_id,
                          n_mpi_procs)
    elif options.pap:
        from morphology import cter_pap
        result = cter_pap(input_image_path, output_directory,
                          options.selection_list, options.wn, options.apix,
                          options.Cs, options.voltage, options.ac, freq_start,
                          freq_stop, options.kboot, options.overlap_x,
                          options.overlap_y, options.edge_x, options.edge_y,
                          options.check_consistency, options.stack_mode,
                          options.debug_mode, program_name, RUNNING_UNDER_MPI,
                          main_mpi_proc, my_mpi_proc_id, n_mpi_procs)
    else:
        from morphology import cter_mrk
        result = cter_mrk(input_image_path, output_directory,
                          options.selection_list, options.wn, options.apix,
                          options.Cs, options.voltage, options.ac, freq_start,
                          freq_stop, options.kboot, options.overlap_x,
                          options.overlap_y, options.edge_x, options.edge_y,
                          options.check_consistency, options.stack_mode,
                          options.debug_mode, program_name, RUNNING_UNDER_MPI,
                          main_mpi_proc, my_mpi_proc_id, n_mpi_procs)

    if RUNNING_UNDER_MPI:
        mpi_barrier(MPI_COMM_WORLD)

    if main_mpi_proc == my_mpi_proc_id:
        if options.debug_mode:
            print("Returned value from cter_mrk() := ", result)
        print(" ")
        print("DONE!!!")
        print(" ")

    # ====================================================================================
    # Clean up
    # ====================================================================================
    # ------------------------------------------------------------------------------------
    # Reset SPHIRE global definitions
    # ------------------------------------------------------------------------------------
    global_def.LOGFILE = original_logfilename

    # ------------------------------------------------------------------------------------
    # Clean up MPI related variables
    # ------------------------------------------------------------------------------------
    if RUNNING_UNDER_MPI:
        mpi_barrier(MPI_COMM_WORLD)
        from mpi import mpi_finalize
        mpi_finalize()

    sys.stdout.flush()
    sys.exit(0)
Esempio n. 35
0
def main():

	def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
		# the final ali2d parameters already combine shifts operation first and rotation operation second for parameters converted from 3D
		if mirror:
			m = 1
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)
		else:
			m = 0
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)
		return  alpha, sx, sy, m
	
	progname = os.path.basename(sys.argv[0])
	usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=  --aa=   --sym=symmetry --CTF"
	parser = OptionParser(usage, version=SPARXVERSION)
	
	parser.add_option("--output_dir",   type="string"	   ,	default="./",				    help="Output directory")
	parser.add_option("--ave2D",		type="string"	   ,	default=False,				help="Write to the disk a stack of 2D averages")
	parser.add_option("--var2D",		type="string"	   ,	default=False,				help="Write to the disk a stack of 2D variances")
	parser.add_option("--ave3D",		type="string"	   ,	default=False,				help="Write to the disk reconstructed 3D average")
	parser.add_option("--var3D",		type="string"	   ,	default=False,				help="Compute 3D variability (time consuming!)")
	parser.add_option("--img_per_grp",	type="int"         ,	default=100,	     	    help="Number of neighbouring projections.(Default is 100)")
	parser.add_option("--no_norm",		action="store_true",	default=False,				help="Do not use normalization.(Default is to apply normalization)")
	#parser.add_option("--radius", 	    type="int"         ,	default=-1   ,				help="radius for 3D variability" )
	parser.add_option("--npad",			type="int"         ,	default=2    ,				help="Number of time to pad the original images.(Default is 2 times padding)")
	parser.add_option("--sym" , 		type="string"      ,	default="c1",				help="Symmetry. (Default is no symmetry)")
	parser.add_option("--fl",			type="float"       ,	default=0.0,				help="Low pass filter cutoff in absolute frequency (0.0 - 0.5) and is applied to decimated images. (Default - no filtration)")
	parser.add_option("--aa",			type="float"       ,	default=0.02 ,				help="Fall off of the filter. Use default value if user has no clue about falloff (Default value is 0.02)")
	parser.add_option("--CTF",			action="store_true",	default=False,				help="Use CFT correction.(Default is no CTF correction)")
	#parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
	#parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
	#parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
	#parser.add_option("--abs", 		type="float"   ,        default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
	#parser.add_option("--squ", 		type="float"   ,	    default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
	parser.add_option("--VAR" , 		action="store_true",	default=False,				help="Stack of input consists of 2D variances (Default False)")
	parser.add_option("--decimate",     type  ="float",         default=0.25,               help="Image decimate rate, a number less than 1. (Default is 0.25)")
	parser.add_option("--window",       type  ="int",           default=0,                  help="Target image size relative to original image size. (Default value is zero.)")
	#parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
	#parser.add_option("--nvec",			type="int"         ,	default=0    ,				help="Number of eigenvectors, (Default = 0 meaning no PCA calculated)")
	parser.add_option("--symmetrize",	action="store_true",	default=False,				help="Prepare input stack for handling symmetry (Default False)")
	parser.add_option("--overhead",     type  ="float",         default=0.5,                help="python overhead per CPU.")

	(options,args) = parser.parse_args()
	#####
	from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD
	from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
	#from mpi import *
	from applications   import MPI_start_end
	from reconstruction import recons3d_em, recons3d_em_MPI
	from reconstruction	import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
	from utilities      import print_begin_msg, print_end_msg, print_msg
	from utilities      import read_text_row, get_image, get_im, wrap_mpi_send, wrap_mpi_recv
	from utilities      import bcast_EMData_to_all, bcast_number_to_all
	from utilities      import get_symt

	#  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

	from EMAN2db import db_open_dict

	# Set up global variables related to bdb cache 
	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	
	# Set up global variables related to ERROR function
	global_def.BATCH = True
	
	# detect if program is running under MPI
	RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ
	if RUNNING_UNDER_MPI: global_def.MPI = True
	if options.output_dir =="./": current_output_dir = os.path.abspath(options.output_dir)
	else: current_output_dir = options.output_dir
	if options.symmetrize :
		if RUNNING_UNDER_MPI:
			try:
				sys.argv = mpi_init(len(sys.argv), sys.argv)
				try:	
					number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
					if( number_of_proc > 1 ):
						ERROR("Cannot use more than one CPU for symmetry preparation","sx3dvariability",1)
				except:
					pass
			except:
				pass
		if not os.path.exists(current_output_dir): os.mkdir(current_output_dir)
		
		#  Input
		#instack = "Clean_NORM_CTF_start_wparams.hdf"
		#instack = "bdb:data"
		
		
		from logger import Logger,BaseLogger_Files
		if os.path.exists(os.path.join(current_output_dir, "log.txt")): os.remove(os.path.join(current_output_dir, "log.txt"))
		log_main=Logger(BaseLogger_Files())
		log_main.prefix = os.path.join(current_output_dir, "./")
		
		instack = args[0]
		sym = options.sym.lower()
		if( sym == "c1" ):
			ERROR("There is no need to symmetrize stack for C1 symmetry","sx3dvariability",1)
		
		line =""
		for a in sys.argv:
			line +=" "+a
		log_main.add(line)
	
		if(instack[:4] !="bdb:"):
			#if output_dir =="./": stack = "bdb:data"
			stack = "bdb:"+current_output_dir+"/data"
			delete_bdb(stack)
			junk = cmdexecute("sxcpy.py  "+instack+"  "+stack)
		else: stack = instack
		
		qt = EMUtil.get_all_attributes(stack,'xform.projection')

		na = len(qt)
		ts = get_symt(sym)
		ks = len(ts)
		angsa = [None]*na
		
		for k in range(ks):
			#Qfile = "Q%1d"%k
			#if options.output_dir!="./": Qfile = os.path.join(options.output_dir,"Q%1d"%k)
			Qfile = os.path.join(current_output_dir, "Q%1d"%k)
			#delete_bdb("bdb:Q%1d"%k)
			delete_bdb("bdb:"+Qfile)
			#junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:"+Qfile)
			#DB = db_open_dict("bdb:Q%1d"%k)
			DB = db_open_dict("bdb:"+Qfile)
			for i in range(na):
				ut = qt[i]*ts[k]
				DB.set_attr(i, "xform.projection", ut)
				#bt = ut.get_params("spider")
				#angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
			#write_text_row(angsa, 'ptsma%1d.txt'%k)
			#junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			#junk = cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
			DB.close()
		#if options.output_dir =="./": delete_bdb("bdb:sdata")
		delete_bdb("bdb:" + current_output_dir + "/"+"sdata")
		#junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
		sdata = "bdb:"+current_output_dir+"/"+"sdata"
		print(sdata)
		junk = cmdexecute("e2bdb.py   " + current_output_dir +"  --makevstack="+sdata +" --filt=Q")
		#junk = cmdexecute("ls  EMAN2DB/sdata*")
		#a = get_im("bdb:sdata")
		a = get_im(sdata)
		a.set_attr("variabilitysymmetry",sym)
		#a.write_image("bdb:sdata")
		a.write_image(sdata)

	else:

		from fundamentals import window2d
		sys.argv       = mpi_init(len(sys.argv), sys.argv)
		myid           = mpi_comm_rank(MPI_COMM_WORLD)
		number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
		main_node      = 0
		shared_comm  = mpi_comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED,  0, MPI_INFO_NULL)
		myid_on_node = mpi_comm_rank(shared_comm)
		no_of_processes_per_group = mpi_comm_size(shared_comm)
		masters_from_groups_vs_everything_else_comm = mpi_comm_split(MPI_COMM_WORLD, main_node == myid_on_node, myid_on_node)
		color, no_of_groups, balanced_processor_load_on_nodes = get_colors_and_subsets(main_node, MPI_COMM_WORLD, myid, \
		    shared_comm, myid_on_node, masters_from_groups_vs_everything_else_comm)
		overhead_loading = options.overhead*number_of_proc
		#memory_per_node  = options.memory_per_node
		#if memory_per_node == -1.: memory_per_node = 2.*no_of_processes_per_group
		keepgoing = 1
		
		current_window   = options.window
		current_decimate = options.decimate
		
		if len(args) == 1: stack = args[0]
		else:
			print(( "usage: " + usage))
			print(( "Please run '" + progname + " -h' for detailed options"))
			return 1

		t0 = time()	
		# obsolete flags
		options.MPI  = True
		#options.nvec = 0
		options.radiuspca = -1
		options.iter = 40
		options.abs  = 0.0
		options.squ  = 0.0

		if options.fl > 0.0 and options.aa == 0.0:
			ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid)
			
		#if options.VAR and options.SND:
		#	ERROR("Only one of var and SND can be set!", "sx3dvariability", myid)
			
		if options.VAR and (options.ave2D or options.ave3D or options.var2D): 
			ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid)
			
		#if options.SND and (options.ave2D or options.ave3D):
		#	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
		
		#if options.nvec > 0 :
		#	ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
			
		#if options.nvec > 0 and options.ave3D == None:
		#	ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", 1, myid)
		
		if current_decimate>1.0 or current_decimate<0.0:
			ERROR("Decimate rate should be a value between 0.0 and 1.0", "sx3dvariability", 1, myid)
		
		if current_window < 0.0:
			ERROR("Target window size should be always larger than zero", "sx3dvariability", 1, myid)
			
		if myid == main_node:
			img  = get_image(stack, 0)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			if(min(nx, ny) < current_window):   keepgoing = 0
		keepgoing = bcast_number_to_all(keepgoing, main_node, MPI_COMM_WORLD)
		if keepgoing == 0: ERROR("The target window size cannot be larger than the size of decimated image", "sx3dvariability", 1, myid)

		import string
		options.sym = options.sym.lower()
		# if global_def.CACHE_DISABLE:
		# 	from utilities import disable_bdb_cache
		# 	disable_bdb_cache()
		# global_def.BATCH = True
		
		if myid == main_node:
			if not os.path.exists(current_output_dir): os.mkdir(current_output_dir)# Never delete output_dir in the program!
	
		img_per_grp = options.img_per_grp
		#nvec        = options.nvec
		radiuspca   = options.radiuspca
		from logger import Logger,BaseLogger_Files
		#if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt"))
		log_main=Logger(BaseLogger_Files())
		log_main.prefix = os.path.join(current_output_dir, "./")

		if myid == main_node:
			line = ""
			for a in sys.argv: line +=" "+a
			log_main.add(line)
			log_main.add("-------->>>Settings given by all options<<<-------")
			log_main.add("Symmetry             : %s"%options.sym)
			log_main.add("Input stack          : %s"%stack)
			log_main.add("Output_dir           : %s"%current_output_dir)
			
			if options.ave3D: log_main.add("Ave3d                : %s"%options.ave3D)
			if options.var3D: log_main.add("Var3d                : %s"%options.var3D)
			if options.ave2D: log_main.add("Ave2D                : %s"%options.ave2D)
			if options.var2D: log_main.add("Var2D                : %s"%options.var2D)
			if options.VAR:   log_main.add("VAR                  : True")
			else:             log_main.add("VAR                  : False")
			if options.CTF:   log_main.add("CTF correction       : True  ")
			else:             log_main.add("CTF correction       : False ")
			
			log_main.add("Image per group      : %5d"%options.img_per_grp)
			log_main.add("Image decimate rate  : %4.3f"%current_decimate)
			log_main.add("Low pass filter      : %4.3f"%options.fl)
			current_fl = options.fl
			if current_fl == 0.0: current_fl = 0.5
			log_main.add("Current low pass filter is equivalent to cutoff frequency %4.3f for original image size"%round((current_fl*current_decimate),3))
			log_main.add("Window size          : %5d "%current_window)
			log_main.add("sx3dvariability begins")
	
		symbaselen = 0
		if myid == main_node:
			nima = EMUtil.get_image_count(stack)
			img  = get_image(stack)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			nnxo = nx
			nnyo = ny
			if options.sym != "c1" :
				imgdata = get_im(stack)
				try:
					i = imgdata.get_attr("variabilitysymmetry").lower()
					if(i != options.sym):
						ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", 1, myid)
				except:
					ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", 1, myid)
				from utilities import get_symt
				i = len(get_symt(options.sym))
				if((nima/i)*i != nima):
					ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", 1, myid)
				symbaselen = nima/i
			else:  symbaselen = nima
		else:
			nima = 0
			nx = 0
			ny = 0
			nnxo = 0
			nnyo = 0
		nima    = bcast_number_to_all(nima)
		nx      = bcast_number_to_all(nx)
		ny      = bcast_number_to_all(ny)
		nnxo    = bcast_number_to_all(nnxo)
		nnyo    = bcast_number_to_all(nnyo)
		if current_window > max(nx, ny):
			ERROR("Window size is larger than the original image size", "sx3dvariability", 1)
		
		if current_decimate == 1.:
			if current_window !=0:
				nx = current_window
				ny = current_window
		else:
			if current_window == 0:
				nx = int(nx*current_decimate+0.5)
				ny = int(ny*current_decimate+0.5)
			else:
				nx = int(current_window*current_decimate+0.5)
				ny = nx
		symbaselen = bcast_number_to_all(symbaselen)
		
		# check FFT prime number
		from fundamentals import smallprime
		is_fft_friendly = (nx == smallprime(nx))
		
		if not is_fft_friendly:
			if myid == main_node:
				log_main.add("The target image size is not a product of small prime numbers")
				log_main.add("Program adjusts the input settings!")
			### two cases
			if current_decimate == 1.:
				nx = smallprime(nx)
				ny = nx
				current_window = nx # update
				if myid == main_node:
					log_main.add("The window size is updated to %d."%current_window)
			else:
				if current_window == 0:
					nx = smallprime(int(nx*current_decimate+0.5))
					current_decimate = float(nx)/nnxo
					ny = nx
					if (myid == main_node):
						log_main.add("The decimate rate is updated to %f."%current_decimate)
				else:
					nx = smallprime(int(current_window*current_decimate+0.5))
					ny = nx
					current_window = int(nx/current_decimate+0.5)
					if (myid == main_node):
						log_main.add("The window size is updated to %d."%current_window)
						
		if myid == main_node:
			log_main.add("The target image size is %d"%nx)
						
		if radiuspca == -1: radiuspca = nx/2-2
		if myid == main_node: log_main.add("%-70s:  %d\n"%("Number of projection", nima))
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		
		"""
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
		
		if options.VAR: # 2D variance images have no shifts
			#varList   = EMData.read_images(stack, range(img_begin, img_end))
			from EMAN2 import Region
			for index_of_particle in range(img_begin,img_end):
				image = get_im(stack, index_of_proj)
				if current_window > 0: varList.append(fdecimate(window2d(image,current_window,current_window), nx,ny))
				else:   varList.append(fdecimate(image, nx,ny))
				
		else:
			from utilities		import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
			from utilities		import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
			from utilities		import model_blank, nearest_proj, model_circle, write_text_row, wrap_mpi_gatherv
			from applications	import pca
			from statistics		import avgvar, avgvar_ctf, ccc
			from filter		    import filt_tanl
			from morphology		import threshold, square_root
			from projection 	import project, prep_vol, prgs
			from sets		    import Set
			from utilities      import wrap_mpi_recv, wrap_mpi_bcast, wrap_mpi_send
			import numpy as np
			if myid == main_node:
				t1          = time()
				proj_angles = []
				aveList     = []
				tab = EMUtil.get_all_attributes(stack, 'xform.projection')	
				for i in range(nima):
					t     = tab[i].get_params('spider')
					phi   = t['phi']
					theta = t['theta']
					psi   = t['psi']
					x     = theta
					if x > 90.0: x = 180.0 - x
					x = x*10000+psi
					proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
				t2 = time()
				log_main.add( "%-70s:  %d\n"%("Number of neighboring projections", img_per_grp))
				log_main.add("...... Finding neighboring projections\n")
				log_main.add( "Number of images per group: %d"%img_per_grp)
				log_main.add( "Now grouping projections")
				proj_angles.sort()
				proj_angles_list = np.full((nima, 4), 0.0, dtype=np.float32)	
				for i in range(nima):
					proj_angles_list[i][0] = proj_angles[i][1]
					proj_angles_list[i][1] = proj_angles[i][2]
					proj_angles_list[i][2] = proj_angles[i][3]
					proj_angles_list[i][3] = proj_angles[i][4]
			else: proj_angles_list = 0
			proj_angles_list = wrap_mpi_bcast(proj_angles_list, main_node, MPI_COMM_WORLD)
			proj_angles      = []
			for i in range(nima):
				proj_angles.append([proj_angles_list[i][0], proj_angles_list[i][1], proj_angles_list[i][2], int(proj_angles_list[i][3])])
			del proj_angles_list
			proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end))
			all_proj = Set()
			for im in proj_list:
				for jm in im:
					all_proj.add(proj_angles[jm][3])
			all_proj = list(all_proj)
			index = {}
			for i in range(len(all_proj)): index[all_proj[i]] = i
			mpi_barrier(MPI_COMM_WORLD)
			if myid == main_node:
				log_main.add("%-70s:  %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2))
				log_main.add("%-70s:  %d\n"%("Number of groups processed on the main node", len(proj_list)))
				log_main.add("Grouping projections took:  %12.1f [m]"%((time()-t2)/60.))
				log_main.add("Number of groups on main node: ", len(proj_list))
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				log_main.add("...... Calculating the stack of 2D variances \n")
			# Memory estimation. There are two memory consumption peaks
			# peak 1. Compute ave, var; 
			# peak 2. Var volume reconstruction;
			# proj_params = [0.0]*(nima*5)
			aveList = []
			varList = []				
			#if nvec > 0: eigList = [[] for i in range(nvec)]
			dnumber   = len(all_proj)# all neighborhood set for assigned to myid
			pnumber   = len(proj_list)*2. + img_per_grp # aveList and varList 
			tnumber   = dnumber+pnumber
			vol_size2 = nx**3*4.*8/1.e9
			vol_size1 = 2.*nnxo**3*4.*8/1.e9
			proj_size         = nnxo*nnyo*len(proj_list)*4.*2./1.e9 # both aveList and varList
			orig_data_size    = nnxo*nnyo*4.*tnumber/1.e9
			reduced_data_size = nx*nx*4.*tnumber/1.e9
			full_data         = np.full((number_of_proc, 2), -1., dtype=np.float16)
			full_data[myid]   = orig_data_size, reduced_data_size
			if myid != main_node: wrap_mpi_send(full_data, main_node, MPI_COMM_WORLD)
			if myid == main_node:
				for iproc in range(number_of_proc):
					if iproc != main_node:
						dummy = wrap_mpi_recv(iproc, MPI_COMM_WORLD)
						full_data[np.where(dummy>-1)] = dummy[np.where(dummy>-1)]
				del dummy
			mpi_barrier(MPI_COMM_WORLD)
			full_data = wrap_mpi_bcast(full_data, main_node, MPI_COMM_WORLD)
			# find the CPU with heaviest load
			minindx         = np.argsort(full_data, 0)
			heavy_load_myid = minindx[-1][1]
			total_mem       = sum(full_data)
			if myid == main_node:
				if current_window == 0:
					log_main.add("Nx:   current image size = %d. Decimated by %f from %d"%(nx, current_decimate, nnxo))
				else:
					log_main.add("Nx:   current image size = %d. Windowed to %d, and decimated by %f from %d"%(nx, current_window, current_decimate, nnxo))
				log_main.add("Nproj:       number of particle images.")
				log_main.add("Navg:        number of 2D average images.")
				log_main.add("Nvar:        number of 2D variance images.")
				log_main.add("Img_per_grp: user defined image per group for averaging = %d"%img_per_grp)
				log_main.add("Overhead:    total python overhead memory consumption   = %f"%overhead_loading)
				log_main.add("Total memory) = 4.0*nx^2*(nproj + navg +nvar+ img_per_grp)/1.0e9 + overhead: %12.3f [GB]"%\
				   (total_mem[1] + overhead_loading))
			del full_data
			mpi_barrier(MPI_COMM_WORLD)
			if myid == heavy_load_myid:
				log_main.add("Begin reading and preprocessing images on processor. Wait... ")
				ttt = time()
			#imgdata = EMData.read_images(stack, all_proj)			
			imgdata = [ None for im in range(len(all_proj))]
			for index_of_proj in range(len(all_proj)):
				#image = get_im(stack, all_proj[index_of_proj])
				if( current_window > 0): imgdata[index_of_proj] = fdecimate(window2d(get_im(stack, all_proj[index_of_proj]),current_window,current_window), nx, ny)
				else:                    imgdata[index_of_proj] = fdecimate(get_im(stack, all_proj[index_of_proj]), nx, ny)
				
				if (current_decimate> 0.0 and options.CTF):
					ctf = imgdata[index_of_proj].get_attr("ctf")
					ctf.apix = ctf.apix/current_decimate
					imgdata[index_of_proj].set_attr("ctf", ctf)
					
				if myid == heavy_load_myid and index_of_proj%100 == 0:
					log_main.add(" ...... %6.2f%% "%(index_of_proj/float(len(all_proj))*100.))
			mpi_barrier(MPI_COMM_WORLD)
			if myid == heavy_load_myid:
				log_main.add("All_proj preprocessing cost %7.2f m"%((time()-ttt)/60.))
				log_main.add("Wait untill reading on all CPUs done...")
			'''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
			from applications import prepare_2d_forPCA
			from utilities    import model_blank
			from EMAN2        import Transform
			if not options.no_norm: 
				mask = model_circle(nx/2-2, nx, nx)
			if options.CTF: 
				from utilities import pad
				from filter import filt_ctf
			from filter import filt_tanl
			if myid == heavy_load_myid:
				log_main.add("Start computing 2D aveList and varList. Wait...")
				ttt = time()
			inner=nx//2-4
			outer=inner+2
			xform_proj_for_2D = [ None for i in range(len(proj_list))]
			for i in range(len(proj_list)):
				ki = proj_angles[proj_list[i][0]][3]
				if ki >= symbaselen:  continue
				mi = index[ki]
				dpar = Util.get_transform_params(imgdata[mi], "xform.projection", "spider")
				phiM, thetaM, psiM, s2xM, s2yM  = dpar["phi"],dpar["theta"],dpar["psi"],-dpar["tx"]*current_decimate,-dpar["ty"]*current_decimate
				grp_imgdata = []
				for j in range(img_per_grp):
					mj = index[proj_angles[proj_list[i][j]][3]]
					cpar = Util.get_transform_params(imgdata[mj], "xform.projection", "spider")
					alpha, sx, sy, mirror = params_3D_2D_NEW(cpar["phi"], cpar["theta"],cpar["psi"], -cpar["tx"]*current_decimate, -cpar["ty"]*current_decimate, mirror_list[i][j])
					if thetaM <= 90:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM - cpar["phi"], 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM - cpar["phi"]), 0.0, 0.0, 1.0)
					else:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM- cpar["phi"]), 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM - cpar["phi"])), 0.0, 0.0, 1.0)
					imgdata[mj].set_attr("xform.align2d", Transform({"type":"2D","alpha":alpha,"tx":sx,"ty":sy,"mirror":mirror,"scale":1.0}))
					grp_imgdata.append(imgdata[mj])
				if not options.no_norm:
					for k in range(img_per_grp):
						ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False)
						grp_imgdata[k] -= ave
						grp_imgdata[k] /= std
				if options.fl > 0.0:
					for k in range(img_per_grp):
						grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)

				#  Because of background issues, only linear option works.
				if options.CTF:  ave, var = aves_wiener(grp_imgdata, SNR = 1.0e5, interpolation_method = "linear")
				else:  ave, var = ave_var(grp_imgdata)
				# Switch to std dev
				# threshold is not really needed,it is just in case due to numerical accuracy something turns out negative.
				var = square_root(threshold(var))

				set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
				set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

				aveList.append(ave)
				varList.append(var)
				xform_proj_for_2D[i] = [phiM, thetaM, 0.0, 0.0, 0.0]

				'''
				if nvec > 0:
					eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True)
					for k in range(nvec):
						set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
						eigList[k].append(eig[k])
					"""
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""
				'''
				if (myid == heavy_load_myid) and (i%100 == 0):
					log_main.add(" ......%6.2f%%  "%(i/float(len(proj_list))*100.))		
			del imgdata, grp_imgdata, cpar, dpar, all_proj, proj_angles, index
			if not options.no_norm: del mask
			if myid == main_node: del tab
			#  At this point, all averages and variances are computed
			mpi_barrier(MPI_COMM_WORLD)
			
			if (myid == heavy_load_myid):
				log_main.add("Computing aveList and varList took %12.1f [m]"%((time()-ttt)/60.))
			
			xform_proj_for_2D = wrap_mpi_gatherv(xform_proj_for_2D, main_node, MPI_COMM_WORLD)
			if (myid == main_node):
				write_text_row(xform_proj_for_2D, os.path.join(current_output_dir, "params.txt"))
			del xform_proj_for_2D
			mpi_barrier(MPI_COMM_WORLD)
			if options.ave2D:
				from fundamentals import fpol
				from applications import header
				if myid == main_node:
					log_main.add("Compute ave2D ... ")
					km = 0
					for i in range(number_of_proc):
						if i == main_node :
							for im in range(len(aveList)):
								aveList[im].write_image(os.path.join(current_output_dir, options.ave2D), km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in range(nl):
								ave = recv_EMData(i, im+i+70000)
								"""
								nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
								tmpvol=fpol(ave, nx, nx,1)								
								tmpvol.write_image(os.path.join(current_output_dir, options.ave2D), km)
								km += 1
				else:
					mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
					for im in range(len(aveList)):
						send_EMData(aveList[im], main_node,im+myid+70000)
						"""
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						"""
				if myid == main_node:
					header(os.path.join(current_output_dir, options.ave2D), params='xform.projection', fimport = os.path.join(current_output_dir, "params.txt"))
				mpi_barrier(MPI_COMM_WORLD)	
			if options.ave3D:
				from fundamentals import fpol
				t5 = time()
				if myid == main_node: log_main.add("Reconstruct ave3D ... ")
				ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad)
				bcast_EMData_to_all(ave3D, myid)
				if myid == main_node:
					if current_decimate != 1.0: ave3D = resample(ave3D, 1./current_decimate)
					ave3D = fpol(ave3D, nnxo, nnxo, nnxo) # always to the orignal image size
					set_pixel_size(ave3D, 1.0)
					ave3D.write_image(os.path.join(current_output_dir, options.ave3D))
					log_main.add("Ave3D reconstruction took %12.1f [m]"%((time()-t5)/60.0))
					log_main.add("%-70s:  %s\n"%("The reconstructed ave3D is saved as ", options.ave3D))
					
			mpi_barrier(MPI_COMM_WORLD)		
			del ave, var, proj_list, stack, alpha, sx, sy, mirror, aveList
			'''
			if nvec > 0:
				for k in range(nvec):
					if myid == main_node:log_main.add("Reconstruction eigenvolumes", k)
					cont = True
					ITER = 0
					mask2d = model_circle(radiuspca, nx, nx)
					while cont:
						#print "On node %d, iteration %d"%(myid, ITER)
						eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad)
						bcast_EMData_to_all(eig3D, myid, main_node)
						if options.fl > 0.0:
							eig3D = filt_tanl(eig3D, options.fl, options.aa)
						if myid == main_node:
							eig3D.write_image(os.path.join(options.outpout_dir, "eig3d_%03d.hdf"%(k, ITER)))
						Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) )
						eig3Df, kb = prep_vol(eig3D)
						del eig3D
						cont = False
						icont = 0
						for l in range(len(eigList[k])):
							phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l])
							proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y])
							cl = ccc(proj, eigList[k][l], mask2d)
							if cl < 0.0:
								icont += 1
								cont = True
								eigList[k][l] *= -1.0
						u = int(cont)
						u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD)
						icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

						if myid == main_node:
							u = int(u[0])
							log_main.add(" Eigenvector: ",k," number changed ",int(icont[0]))
						else: u = 0
						u = bcast_number_to_all(u, main_node)
						cont = bool(u)
						ITER += 1

					del eig3Df, kb
					mpi_barrier(MPI_COMM_WORLD)
				del eigList, mask2d
			'''
			if options.ave3D: del ave3D
			if options.var2D:
				from fundamentals import fpol 
				from applications import header
				if myid == main_node:
					log_main.add("Compute var2D...")
					km = 0
					for i in range(number_of_proc):
						if i == main_node :
							for im in range(len(varList)):
								tmpvol=fpol(varList[im], nx, nx,1)
								tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in range(nl):
								ave = recv_EMData(i, im+i+70000)
								tmpvol=fpol(ave, nx, nx,1)
								tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km)
								km += 1
				else:
					mpi_send(len(varList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
					for im in range(len(varList)):
						send_EMData(varList[im], main_node, im+myid+70000)#  What with the attributes??
				mpi_barrier(MPI_COMM_WORLD)
				if myid == main_node:
					from applications import header
					header(os.path.join(current_output_dir, options.var2D), params = 'xform.projection',fimport = os.path.join(current_output_dir, "params.txt"))
				mpi_barrier(MPI_COMM_WORLD)
		if options.var3D:
			if myid == main_node: log_main.add("Reconstruct var3D ...")
			t6 = time()
			# radiusvar = options.radius
			# if( radiusvar < 0 ):  radiusvar = nx//2 -3
			res = recons3d_4nn_MPI(myid, varList, symmetry = options.sym, npad=options.npad)
			#res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
			if myid == main_node:
				from fundamentals import fpol
				if current_decimate != 1.0: res	= resample(res, 1./current_decimate)
				res = fpol(res, nnxo, nnxo, nnxo)
				set_pixel_size(res, 1.0)
				res.write_image(os.path.join(current_output_dir, options.var3D))
				log_main.add("%-70s:  %s\n"%("The reconstructed var3D is saved as ", options.var3D))
				log_main.add("Var3D reconstruction took %f12.1 [m]"%((time()-t6)/60.0))
				log_main.add("Total computation time %f12.1 [m]"%((time()-t0)/60.0))
				log_main.add("sx3dvariability finishes")
		from mpi import mpi_finalize
		mpi_finalize()
		
		if RUNNING_UNDER_MPI: global_def.MPI = False

		global_def.BATCH = False
Esempio n. 36
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = progname + " stack <maskfile> --search_rng=10 --maxit=max_iteration --CTF --snr=SNR --Fourvar=Fourier_variance --oneDx --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--search_rng",
                      type="int",
                      default=-1,
                      help="Search range for x-shift")
    parser.add_option("--search_ang",
                      type="int",
                      default=-1,
                      help="Search range for inplane rotation angle")
    parser.add_option(
        "--search_rng_y",
        type="int",
        default=-1,
        help=
        "Search range for x-shift. Not used for 1D search (oneDx flag set).")
    parser.add_option("--maxit",
                      type="int",
                      default=100,
                      help="Maximum number of iterations program will perform")
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Use CTF correction")
    parser.add_option(
        "--snr",
        type="float",
        default=1.0,
        help="signal-to-noise ratio of the data (default is 1.0)")
    parser.add_option("--Fourvar",
                      action="store_true",
                      default=False,
                      help="compute Fourier variance")
    parser.add_option("--oneDx",
                      action="store_true",
                      default=False,
                      help="1D search along x-axis")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI")
    parser.add_option("--curvature",
                      action="store_true",
                      default=False,
                      help="for curved filament alignment")
    (options, args) = parser.parse_args()

    if not (options.MPI):
        print("Only MPI version is currently implemented.")
        print("Please run '" + progname + " -h' for detailed options")
        return

    if len(args) < 1 or len(args) > 2:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    else:

        if len(args) == 1: mask = None
        else: mask = args[1]

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        from mpi import mpi_init
        sys.argv = mpi_init(len(sys.argv), sys.argv)

        global_def.BATCH = True
        if options.oneDx:
            helicalshiftali_MPI(args[0], mask, options.maxit, options.CTF,
                                options.snr, options.Fourvar,
                                options.search_rng)
        else:
            shiftali_MPI(args[0], mask, options.maxit, options.CTF,
                         options.snr, options.Fourvar, options.search_rng,
                         options.oneDx, options.search_rng_y)
        global_def.BATCH = False

        from mpi import mpi_finalize
        mpi_finalize()
Esempio n. 37
0
def main():

    progname = os.path.basename(sys.argv[0])
    usage = (
        progname
        + " stack outdir <maskfile> --K=10 --trials=2 --debug --maxit=100 --rand_seed=10 --crit='all' --F=0.9 --T0=2.0 --init_method='rnd' --normalize --CTF --MPI --CUDA"
    )
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--K", type="int", default=2, help="Number of classes (default 2)")
    parser.add_option("--trials", type="int", default=1, help="Number of trials of K-means (default 1)")
    parser.add_option("--maxit", type="int", default=100, help="Maximum number of iterations within K-means")
    parser.add_option("--CTF", action="store_true", default=False, help="Perform classification using CTF information")
    parser.add_option("--rand_seed", type="int", default=-1, help="Random seed of initial (default random)")
    parser.add_option(
        "--crit", type="string", default="D", help="Criterions: Coleman [C], Harabasz[H], Davies-Bouldin[D], All [all]"
    )
    # parser.add_option("--F",          type="float",        default=0.0,       help="Cooling in simulated annealing, ex.: 0.9")
    # parser.add_option("--T0",         type="float",        default=0.0,       help="Initial temperature in simulated annealing, ex: 100")
    parser.add_option("--MPI", action="store_true", default=False, help="Use MPI version")
    parser.add_option("--debug", action="store_true", default=False, help="")
    parser.add_option("--normalize", action="store_true", default=False, help="Normalize images under the mask")
    parser.add_option(
        "--init_method",
        type="string",
        default="rnd",
        help='Method used to initialize partition: "rnd" randomize or "d2w" for d2 weighting initialization (default is rnd)',
    )

    (options, args) = parser.parse_args()
    if len(args) < 2 or len(args) > 3:
        print "usage: " + usage
        print "Please run '" + progname + " -h' for detailed options"
    elif options.trials < 1:
        sys.stderr.write("ERROR: Number of trials should be at least 1.\n\n")
        sys.exit()
    else:
        if len(args) == 2:
            mask = None
        else:
            mask = args[2]

        if options.K < 2:
            sys.stderr.write("ERROR: K must be > 1 group\n\n")
            sys.exit()

        if options.CTF:
            sys.stderr.write("ERROR: CTF option not implemented\n\n")
            sys.exit()

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache

            disable_bdb_cache()
        from applications import k_means_main

        global_def.BATCH = True
        k_means_main(
            args[0],
            args[1],
            mask,
            "SSE",
            options.K,
            options.rand_seed,
            options.maxit,
            options.trials,
            options.crit,
            options.CTF,
            0.0,
            0.0,
            options.MPI,
            False,
            options.debug,
            options.normalize,
            options.init_method,
        )
        global_def.BATCH = False
        if options.MPI:
            from mpi import mpi_finalize

            mpi_finalize()
Esempio n. 38
0
def main():
    sp_global_def.print_timestamp("Start")
    sp_global_def.write_command()
    run()
    sp_global_def.print_timestamp("Finish")
    mpi.mpi_finalize()
Esempio n. 39
0
def main():
	arglist = []
	for arg in sys.argv:
		arglist.append( arg )
	progname = os.path.basename(arglist[0])
	usage = progname + """ firstvolume  secondvolume  maskfile  outputfile  --wn  --step  --cutoff  --radius  --fsc  --res_overall  --out_ang_res  --apix  --MPI

	Compute local resolution in real space within area outlined by the maskfile and within regions wn x wn x wn
	"""
	parser = optparse.OptionParser(usage,version=global_def.SPARXVERSION)
	
	parser.add_option("--wn",           type="int",           default=7,      help="Size of window within which local real-space FSC is computed. (default 7)")
	parser.add_option("--step",         type="float",         default= 1.0,   help="Shell step in Fourier size in pixels. (default 1.0)")   
	parser.add_option("--cutoff",       type="float",         default= 0.5,   help="Resolution cut-off for FSC. (default 0.5)")
	parser.add_option("--radius",       type="int",           default=-1,     help="If there is no maskfile, sphere with r=radius will be used. By default, the radius is nx/2-wn (default -1)")
	parser.add_option("--fsc",          type="string",        default= None,  help="Save overall FSC curve (might be truncated). By default, the program does not save the FSC curve. (default none)")
	parser.add_option("--res_overall",  type="float",         default= -1.0,  help="Overall resolution at the cutoff level estimated by the user [abs units]. (default None)")
	parser.add_option("--out_ang_res",  action="store_true",  default=False,  help="Additionally creates a local resolution file in Angstroms. (default False)")
	parser.add_option("--apix",         type="float",         default= 1.0,   help="Pixel size in Angstrom. Effective only with --out_ang_res options. (default 1.0)")
	parser.add_option("--MPI",          action="store_true",  default=False,  help="Use MPI version.")

	(options, args) = parser.parse_args(arglist[1:])

	if len(args) <3 or len(args) > 4:
		print("See usage " + usage)
		sys.exit()

	if global_def.CACHE_DISABLE:
		utilities.disable_bdb_cache()

	res_overall = options.res_overall

	if options.MPI:
		sys.argv = mpi.mpi_init(len(sys.argv),sys.argv)		

		number_of_proc = mpi.mpi_comm_size(mpi.MPI_COMM_WORLD)
		myid = mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD)
		main_node = 0
		global_def.MPI = True
		cutoff = options.cutoff

		nk = int(options.wn)

		if(myid == main_node):
			#print sys.argv
			vi = utilities.get_im(sys.argv[1])
			ui = utilities.get_im(sys.argv[2])
			
			nx = vi.get_xsize()
			ny = vi.get_ysize()
			nz = vi.get_zsize()
			dis = [nx, ny, nz]
		else:
			dis = [0,0,0,0]

		global_def.BATCH = True

		dis = utilities.bcast_list_to_all(dis, myid, source_node = main_node)

		if(myid != main_node):
			nx = int(dis[0])
			ny = int(dis[1])
			nz = int(dis[2])

			vi = utilities.model_blank(nx,ny,nz)
			ui = utilities.model_blank(nx,ny,nz)


		if len(args) == 3:
			m = utilities.model_circle((min(nx,ny,nz)-nk)//2,nx,ny,nz)
			outvol = args[2]
		
		elif len(args) == 4:
			if(myid == main_node):
				m = morphology.binarize(utilities.get_im(args[2]), 0.5)
			else:
				m = utilities.model_blank(nx, ny, nz)
			outvol = args[3]
		utilities.bcast_EMData_to_all(m, myid, main_node)

		"""Multiline Comment0"""
		freqvol, resolut = statistics.locres(vi, ui, m, nk, cutoff, options.step, myid, main_node, number_of_proc)

		if(myid == 0):
			# Remove outliers based on the Interquartile range
			output_volume(freqvol, resolut, options.apix, outvol, options.fsc, options.out_ang_res, nx, ny, nz, res_overall)
		mpi.mpi_finalize()

	else:
		cutoff = options.cutoff
		vi = utilities.get_im(args[0])
		ui = utilities.get_im(args[1])

		nn = vi.get_xsize()
		nk = int(options.wn)
	
		if len(args) == 3:
			m = utilities.model_circle((nn-nk)//2,nn,nn,nn)
			outvol = args[2]
		
		elif len(args) == 4:
			m = morphology.binarize(utilities.get_im(args[2]), 0.5)
			outvol = args[3]

		mc = utilities.model_blank(nn,nn,nn,1.0)-m

		vf = fundamentals.fft(vi)
		uf = fundamentals.fft(ui)
		"""Multiline Comment1"""
		lp = int(old_div(old_div(nn,2),options.step)+0.5)
		step = 0.5/lp

		freqvol = utilities.model_blank(nn,nn,nn)
		resolut = []
		for i in range(1,lp):
			fl = step*i
			fh = fl+step
			#print(lp,i,step,fl,fh)
			v = fundamentals.fft(filter.filt_tophatb( vf, fl, fh))
			u = fundamentals.fft(filter.filt_tophatb( uf, fl, fh))
			tmp1 = EMAN2_cppwrap.Util.muln_img(v,v)
			tmp2 = EMAN2_cppwrap.Util.muln_img(u,u)

			do = EMAN2_cppwrap.Util.infomask(morphology.square_root(morphology.threshold(EMAN2_cppwrap.Util.muln_img(tmp1,tmp2))),m,True)[0]


			tmp3 = EMAN2_cppwrap.Util.muln_img(u,v)
			dp = EMAN2_cppwrap.Util.infomask(tmp3,m,True)[0]
			resolut.append([i,(fl+fh)/2.0, old_div(dp,do)])

			tmp1 = EMAN2_cppwrap.Util.box_convolution(tmp1, nk)
			tmp2 = EMAN2_cppwrap.Util.box_convolution(tmp2, nk)
			tmp3 = EMAN2_cppwrap.Util.box_convolution(tmp3, nk)

			EMAN2_cppwrap.Util.mul_img(tmp1,tmp2)

			tmp1 = morphology.square_root(morphology.threshold(tmp1))

			EMAN2_cppwrap.Util.mul_img(tmp1,m)
			EMAN2_cppwrap.Util.add_img(tmp1,mc)

			EMAN2_cppwrap.Util.mul_img(tmp3,m)
			EMAN2_cppwrap.Util.add_img(tmp3,mc)

			EMAN2_cppwrap.Util.div_img(tmp3,tmp1)

			EMAN2_cppwrap.Util.mul_img(tmp3,m)
			freq=(fl+fh)/2.0
			bailout = True
			for x in range(nn):
				for y in range(nn):
					for z in range(nn):
						if(m.get_value_at(x,y,z) > 0.5):
							if(freqvol.get_value_at(x,y,z) == 0.0):
								if(tmp3.get_value_at(x,y,z) < cutoff):
									freqvol.set_value_at(x,y,z, freq)
									bailout = False
								else:
									bailout = False
			if(bailout):  break
		#print(len(resolut))
		# remove outliers
		output_volume(freqvol, resolut, options.apix, outvol, options.fsc, options.out_ang_res, nx, ny, nz, res_overall)
Esempio n. 40
0
def main():
	import os
	import sys
	from optparse import OptionParser
	from global_def import SPARXVERSION
	import global_def
        arglist = []
        for arg in sys.argv:
        	arglist.append( arg )
	progname = os.path.basename(arglist[0])
	usage = progname + " stack ref_vol outdir  <maskfile> parameters listed below"
	
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--delta",              type="string",		 default= " 10 6 4  3   2",   help="angular step of reference projections")
	parser.add_option("--maxit",              type="int",            default= 30,                 help="maximum number of iterations performed for each angular step (set to 30) ")
	parser.add_option("--CTF",                action="store_true",   default=False,      		  help="CTF correction")
	parser.add_option("--snr",                type="float",          default= 1.0,                help="Signal-to-Noise Ratio of the data")	
	#parser.add_option("--MPI",                action="store_true",   default=False,               help="use MPI version")
	#parser.add_option("--fourvar",           action="store_true",   default=False,               help="compute Fourier variance")
	parser.add_option("--apix",               type="float",			 default= -1.0,               help="pixel size [Angstroms]")   
	parser.add_option("--dp",                 type="float",			 default= -1.0,               help="rise of helical symmetry [Angstroms]")   
	parser.add_option("--dphi",               type="float",			 default= -1.0,               help="azimuthal angle of helical symmetry [degrees]")  
	parser.add_option("--symdoc",             type="string",		 default="",      	    	  help="text file containing helical symmetry parameters dp and dphi")
	
	parser.add_option("--psi_max",            type="float", 		 default= 10.0,               help="maximum psi - how far rotation in plane can can deviate from 90 or 270 degrees")   
	parser.add_option("--rmin",               type="float", 		 default= 0.0,                help="minimal radius for hsearch (Angstroms)")   
	parser.add_option("--rmax",               type="float", 		 default= 80.0,               help="maximal radius for hsearch (Angstroms)")
	parser.add_option("--fract",              type="float", 		 default= 0.7,                help="fraction of the volume used for helical search. Default 0.7.")
	parser.add_option("--sym",                type="string",		 default= "c1",               help="Point-group symmetry of the structure. Default c1.")
	parser.add_option("--function",           type="string",		 default="helical",  	      help="name of the reference preparation function (Default: helical)")
	parser.add_option("--npad",               type="int",   		 default= 2,                  help="padding size for 3D reconstruction (Default: 2)")
	parser.add_option("--debug",              action="store_true",   default=False,               help="debug")
	parser.add_option("--seg_ny",             type="int",            default= 45,                 help="Desired y dimension of segments.  Only central part of segments nseg_ny pixels long will be used in calculations.")
	parser.add_option("--searchxshift",       type="float",		     default= 0.0,                help="search range for x-shift determination: +/- searchxshift (Angstroms)")
	parser.add_option("--xwobble",            type="float",		     default=0.0,                 help="wobble in x-directions (default = 0.0) (Angstroms)")
	parser.add_option("--ywobble",            type="float",          default=0.0,                 help="wobble in y-directions (default = 0.0) (Angstroms)")
	parser.add_option("--ystep",              type="float",          default=0.0,                 help="step is in y-directions (default = pixel size) (Angstroms)")
	parser.add_option("--phiwobble",          type="float",          default=0.0,                 help="wobble of azimuthal angle (default = 0.0) (degrees)")
	parser.add_option("--nopsisearch",        action="store_true",   default=False,               help="Block searching for in-plane angle (default False)")
	(options, args) = parser.parse_args(arglist[1:])
	if len(args) < 3 or len(args) > 4:
		print "usage: " + usage + "\n"
		print "Please run '" + progname + " -h' for detailed options"
	else:
		
		# Convert input arguments in the units/format as expected by ihrsr_MPI in applications.
		if options.apix < 0:
			print "Please enter pixel size"
			sys.exit()
		
		if len(options.symdoc) < 1:
			if options.dp < 0 or options.dphi < 0:
				print "Enter helical symmetry parameters either using --symdoc or --dp and --dphi"
				sys.exit()
			
		if options.dp < 0 or options.dphi < 0:
			# read helical symmetry parameters from symdoc
			from utilities import read_text_row
			hparams=read_text_row(options.symdoc)
			dp  = hparams[0][0]
			dphi = hparams[0][1]
		else:
			dp   = options.dp
			dphi = options.dphi
		
		rminp = int((float(options.rmin)/options.apix) + 0.5)
		rmaxp = int((float(options.rmax)/options.apix) + 0.5)

		from utilities import get_input_from_string, get_im

		searchxshiftp = int( (options.searchxshift/options.apix) + 0.5)
		xwobblep = int( (options.xwobble/options.apix) + 0.5)
		ywobble = options.ywobble/options.apix
		if( options.ystep <= 0.0 ):  ystep = 1.0
		else:                        ystep = options.ystep/options.apix
		if( dp/2.0 < ywobble):
			ERROR('ywobble has to be smaller than dp/2.', 'sxhelicon')
			sys.exit()

		try:
			from mpi import mpi_init, mpi_finalize
			sys.argv = mpi_init(len(sys.argv), sys.argv)
		except:
			ERROR('This program has only MPI version.  Please install MPI library.', 'sxhelicon')
			sys.exit()

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()


		if len(args) < 4:  mask = None
		else:              mask = args[3]
		from applications import ehelix_MPI
		global_def.BATCH = True
		ehelix_MPI(args[0], args[1], args[2], options.seg_ny, options.delta, options.phiwobble, options.psi_max,\
		 searchxshiftp, xwobblep, ywobble, ystep, options.apix, dp, dphi, options.fract, rmaxp, rminp, not options.nopsisearch,\
		  mask, options.maxit, options.CTF, options.snr, options.sym,  options.function, options.npad, options.debug)
		global_def.BATCH = False


		from mpi import mpi_finalize
		mpi_finalize()
Esempio n. 41
0
def main():

    arglist = []
    for arg in sys.argv:
        arglist.append(arg)

    progname = os.path.basename(arglist[0])
    usage = (
        progname
        + " prj_stack volume fsc_curve <mask> --CTF --snr=signal_noise_ratio --list=file --group=ID --sym=symmetry -verbose=(0|1) --MPI"
    )
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--CTF", action="store_true", default=False, help="perform ctf correction")
    parser.add_option("--snr", type="float", default=1.0, help="Signal-to-Noise Ratio in the data")
    parser.add_option("--sym", type="string", default="c1", help="symmetry")
    parser.add_option("--list", type="string", help="file with list of images to be used in the first column")
    parser.add_option(
        "--group",
        type="int",
        default=-1,
        help="perform reconstruction using images for a given group number (group is attribute in the header)",
    )
    parser.add_option("--MPI", action="store_true", default=False, help="use MPI version ")
    parser.add_option("--npad", type="int", default=2, help="number of times padding (default 2)")
    parser.add_option("--verbose", type="int", default=0, help="verbose level: 0 no, 1 yes")
    (options, args) = parser.parse_args(arglist[1:])

    if len(args) != 3 and len(args) != 4:
        print usage
        sys.exit(-1)

    prj_stack = args[0]
    vol_stack = args[1]
    fsc_curve = args[2]

    if len(args) == 3:
        mask = None
    else:
        mask = get_image(args[3])

    if options.MPI:
        from mpi import mpi_init

        sys.argv = mpi_init(len(sys.argv), sys.argv)

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache

        disable_bdb_cache()

    if options.list and options.group > -1:
        ERROR("options group and list cannot be used together", "recon3d_n", 1)
        sys.exit()

    from applications import recons3d_f

    global_def.BATCH = True
    recons3d_f(
        prj_stack,
        vol_stack,
        fsc_curve,
        mask,
        options.CTF,
        options.snr,
        options.sym,
        options.list,
        options.group,
        options.npad,
        options.verbose,
        options.MPI,
    )
    global_def.BATCH = False

    if options.MPI:
        from mpi import mpi_finalize

        mpi_finalize()
Esempio n. 42
0
def main(args):
	from utilities import if_error_then_all_processes_exit_program, write_text_row, drop_image, model_gauss_noise, get_im, set_params_proj, wrap_mpi_bcast, model_circle
	from logger import Logger, BaseLogger_Files
	from mpi import mpi_init, mpi_finalize, MPI_COMM_WORLD, mpi_comm_rank, mpi_comm_size, mpi_barrier
	import user_functions
	import sys
	import os
	from applications import MPI_start_end
	from optparse import OptionParser, SUPPRESS_HELP
	from global_def import SPARXVERSION
	from EMAN2 import EMData
	from multi_shc import multi_shc

	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack  [output_directory] --ir=inner_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translational_search_step  --delta=angular_step --center=center_type --maxit1=max_iter1 --maxit2=max_iter2 --L2threshold=0.1 --ref_a=S --sym=c1"
	usage += """

stack			2D images in a stack file: (default required string)
directory		output directory name: into which the results will be written (if it does not exist, it will be created, if it does exist, the results will be written possibly overwriting previous results) (default required string)
"""
	
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--radius",                type="int",           help="radius of the particle: has to be less than < int(nx/2)-1 (default required int)")

	parser.add_option("--xr",                    type="string",        default='0',        help="range for translation search in x direction: search is +/xr in pixels (default '0')")
	parser.add_option("--yr",                    type="string",        default='0',        help="range for translation search in y direction: if omitted will be set to xr, search is +/yr in pixels (default '0')")
	parser.add_option("--mask3D",                type="string",        default=None,       help="3D mask file: (default sphere)")
	parser.add_option("--moon_elimination",      type="string",        default='',         help="elimination of disconnected pieces: two arguments: mass in KDa and pixel size in px/A separated by comma, no space (default none)")
	parser.add_option("--ir",                    type="int",           default=1,          help="inner radius for rotational search: > 0 (default 1)")
	
	# 'radius' and 'ou' are the same as per Pawel's request; 'ou' is hidden from the user
	# the 'ou' variable is not changed to 'radius' in the 'sparx' program. This change is at interface level only for sxviper.
	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--ou",                    type="int",           default=-1,         help=SUPPRESS_HELP)
	parser.add_option("--rs",                    type="int",           default=1,          help="step between rings in rotational search: >0 (default 1)")
	parser.add_option("--ts",                    type="string",        default='1.0',      help="step size of the translation search in x-y directions: search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default '1.0')")
	parser.add_option("--delta",                 type="string",        default='2.0',      help="angular step of reference projections: (default '2.0')")
	parser.add_option("--center",                type="float",         default=-1.0,       help="centering of 3D template: average shift method; 0: no centering; 1: center of gravity (default -1.0)")
	parser.add_option("--maxit1",                type="int",           default=400,        help="maximum number of iterations performed for the GA part: (default 400)")
	parser.add_option("--maxit2",                type="int",           default=50,         help="maximum number of iterations performed for the finishing up part: (default 50)")
	parser.add_option("--L2threshold",           type="float",         default=0.03,       help="stopping criterion of GA: given as a maximum relative dispersion of volumes' L2 norms: (default 0.03)")
	parser.add_option("--ref_a",                 type="string",        default='S',        help="method for generating the quasi-uniformly distributed projection directions: (default S)")
	parser.add_option("--sym",                   type="string",        default='c1',       help="point-group symmetry of the structure: (default c1)")
	
	# parser.add_option("--function", type="string", default="ref_ali3d",         help="name of the reference preparation function (ref_ali3d by default)")
	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--function", type="string", default="ref_ali3d",         help= SUPPRESS_HELP)
	
	parser.add_option("--nruns",                 type="int",           default=6,          help="GA population: aka number of quasi-independent volumes (default 6)")
	parser.add_option("--doga",                  type="float",         default=0.1,        help="do GA when fraction of orientation changes less than 1.0 degrees is at least doga: (default 0.1)")
	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--npad",     type="int",    default= 2,                  help="padding size for 3D reconstruction (default=2)")
	parser.add_option("--fl",                    type="float",         default=0.25,       help="cut-off frequency applied to the template volume: using a hyperbolic tangent low-pass filter (default 0.25)")
	parser.add_option("--aa",                    type="float",         default=0.1,        help="fall-off of hyperbolic tangent low-pass filter: (default 0.1)")
	parser.add_option("--pwreference",           type="string",        default='',         help="text file with a reference power spectrum: (default none)")
	parser.add_option("--debug",                 action="store_true",  default=False,      help="debug info printout: (default False)")
	
	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--return_options", action="store_true", dest="return_options", default=False, help = SUPPRESS_HELP)	
	
	#parser.add_option("--an",       type="string", default= "-1",               help="NOT USED angular neighborhood for local searches (phi and theta)")
	#parser.add_option("--CTF",      action="store_true", default=False,         help="NOT USED Consider CTF correction during the alignment ")
	#parser.add_option("--snr",      type="float",  default= 1.0,                help="NOT USED Signal-to-Noise Ratio of the data (default 1.0)")
	# (options, args) = parser.parse_args(sys.argv[1:])

	required_option_list = ['radius']
	(options, args) = parser.parse_args(args)
	# option_dict = vars(options)
	# print parser
	
	if options.return_options:
		return parser
	
	if options.moon_elimination == "":
		options.moon_elimination = []
	else:
		options.moon_elimination = map(float, options.moon_elimination.split(","))

	# Making sure all required options appeared.
	for required_option in required_option_list:
		if not options.__dict__[required_option]:
			print "\n ==%s== mandatory option is missing.\n"%required_option
			print "Please run '" + progname + " -h' for detailed options"
			return 1



	if len(args) < 2 or len(args) > 3:
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
		return 1

	mpi_init(0, [])

	log = Logger(BaseLogger_Files())

	# 'radius' and 'ou' are the same as per Pawel's request; 'ou' is hidden from the user
	# the 'ou' variable is not changed to 'radius' in the 'sparx' program. This change is at interface level only for sxviper.
	options.ou = options.radius 
	runs_count = options.nruns
	mpi_rank = mpi_comm_rank(MPI_COMM_WORLD)
	mpi_size = mpi_comm_size(MPI_COMM_WORLD)	# Total number of processes, passed by --np option.
	
	if mpi_rank == 0:
		all_projs = EMData.read_images(args[0])
		subset = range(len(all_projs))
		# if mpi_size > len(all_projs):
		# 	ERROR('Number of processes supplied by --np needs to be less than or equal to %d (total number of images) ' % len(all_projs), 'sxviper', 1)
		# 	mpi_finalize()
		# 	return
	else:
		all_projs = None
		subset = None

	outdir = args[1]
	if mpi_rank == 0:
		if mpi_size % options.nruns != 0:
			ERROR('Number of processes needs to be a multiple of total number of runs. Total runs by default are 3, you can change it by specifying --nruns option.', 'sxviper', 1)
			mpi_finalize()
			return

		if os.path.exists(outdir):
			ERROR('Output directory exists, please change the name and restart the program', "sxviper", 1)
			mpi_finalize()
			return

		os.mkdir(outdir)
		import global_def
		global_def.LOGFILE =  os.path.join(outdir, global_def.LOGFILE)

	mpi_barrier(MPI_COMM_WORLD)

	if outdir[-1] != "/":
		outdir += "/"
	log.prefix = outdir
	
	# if len(args) > 2:
	# 	ref_vol = get_im(args[2])
	# else:
	ref_vol = None

	options.user_func = user_functions.factory[options.function]

	options.CTF = False
	options.snr = 1.0
	options.an  = -1.0
	from multi_shc import multi_shc
	out_params, out_vol, out_peaks = multi_shc(all_projs, subset, runs_count, options, mpi_comm=MPI_COMM_WORLD, log=log, ref_vol=ref_vol)

	mpi_finalize()
Esempio n. 43
0
def main():

    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage = progname + " stack ref_vol outdir <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translational_search_step  --delta=angular_step --an=angular_neighborhood --deltapsi=Delta_psi --startpsi=Start_psi --maxit=max_iter --stoprnct=percentage_to_stop --CTF --snr=SNR  --ref_a=S --sym=c1 --function=user_function --Fourvar=Fourier_variance --debug --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--ir",
        type="int",
        default=1,
        help="inner radius for rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--ou",
        type="int",
        default=-1,
        help=
        "outer radius for rotational correlation < int(nx/2)-1 (set to the radius of the particle)"
    )
    parser.add_option(
        "--rs",
        type="int",
        default=1,
        help="step between rings in rotational correlation >0  (set to 1)")
    parser.add_option(
        "--xr",
        type="string",
        default="4 2 1 1 1",
        help="range for translation search in x direction, search is +/xr")
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help=
        "range for translation search in y direction, search is +/yr (default = same as xr)"
    )
    parser.add_option(
        "--ts",
        type="string",
        default="1 1 1 0.5 0.25",
        help=
        "step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional"
    )
    parser.add_option(
        "--delta",
        type="string",
        default="10 6 4 3 2",
        help=
        "angular step of reference projections, (default is a sequence: 10 6 4 3 2"
    )
    parser.add_option(
        "--an",
        type="string",
        default="-1",
        help="angular neighborhood for local searches (phi and theta)")
    parser.add_option("--apsi",
                      type="string",
                      default="-1",
                      help="angular neighborhood for local searches (psi)")
    parser.add_option("--deltapsi",
                      type="string",
                      default="-1",
                      help="Delta psi for coarse search")
    parser.add_option("--startpsi",
                      type="string",
                      default="-1",
                      help="Start psi for coarse search")
    #parser.add_option("--center",   type="float",        default= -1,                 help="-1: average shift method; 0: no centering; 1: center of gravity (default=-1)")
    parser.add_option(
        "--maxit",
        type="float",
        default=5,
        help=
        "maximum number of iterations performed for each angular step (set to 5) "
    )
    parser.add_option(
        "--stoprnct",
        type="float",
        default=0.0,
        help=
        "Minimum percentage of particles that change orientation to stop the program"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Consider CTF correction during the alignment ")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="Signal-to-Noise Ratio of the data")
    parser.add_option(
        "--ref_a",
        type="string",
        default="S",
        help=
        "method for generating the quasi-uniformly distributed projection directions (default S)"
    )
    parser.add_option("--sym",
                      type="string",
                      default="c1",
                      help="symmetry of the refined structure")
    parser.add_option(
        "--function",
        type="string",
        default="ref_ali3d",
        help="name of the reference preparation function (ref_ali3d)")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="whether to use MPI version")
    parser.add_option("--Fourvar",
                      action="store_true",
                      default=False,
                      help="compute Fourier variance")
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="padding size for 3D reconstruction (default=2)")
    parser.add_option("--debug",
                      action="store_true",
                      default=False,
                      help="debug")
    parser.add_option("--shc",
                      action="store_true",
                      default=False,
                      help="use SHC algorithm")
    parser.add_option("--nsoft",
                      type="int",
                      default=1,
                      help="number of SHC soft assignments (default=1)")
    parser.add_option("--nh2",
                      action="store_true",
                      default=False,
                      help="new - SHC2")
    parser.add_option("--ns",
                      action="store_true",
                      default=False,
                      help="new - saturn")
    parser.add_option("--ns2",
                      action="store_true",
                      default=False,
                      help="new - saturn2")
    parser.add_option("--chunk",
                      type="float",
                      default=0.2,
                      help="percentage of data used for alignment")
    parser.add_option("--rantest",
                      action="store_true",
                      default=False,
                      help="rantest")
    parser.add_option("--searchpsi",
                      action="store_true",
                      default=False,
                      help="psi refinement")
    parser.add_option("--gamma", type="float", default=-1.0, help="gamma")
    (options, args) = parser.parse_args(arglist[1:])
    if len(args) < 3 or len(args) > 4:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    else:
        if len(args) == 3:
            mask = None
        else:
            mask = args[3]
        if options.MPI:
            from mpi import mpi_init, mpi_finalize
            sys.argv = mpi_init(len(sys.argv), sys.argv)

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()
        #  centering permanently disabled due to the way new polar searches are done
        center = 0
        if (options.ns):
            global_def.BATCH = True
            from development import ali3d_saturn
            ali3d_saturn(args[0],
                         args[1],
                         args[2],
                         mask,
                         options.ir,
                         options.ou,
                         options.rs,
                         options.xr,
                         options.yr,
                         options.ts,
                         options.delta,
                         options.an,
                         options.apsi,
                         options.deltapsi,
                         options.startpsi,
                         center,
                         options.maxit,
                         options.CTF,
                         options.snr,
                         options.ref_a,
                         options.sym,
                         options.function,
                         options.Fourvar,
                         options.npad,
                         options.debug,
                         options.MPI,
                         options.stoprnct,
                         gamma=options.gamma)
            global_def.BATCH = False
        elif (options.ns2):
            global_def.BATCH = True
            from development import ali3d_saturn2
            ali3d_saturn2(args[0], args[1], args[2], mask, options.ir,
                          options.ou, options.rs, options.xr, options.yr,
                          options.ts, options.delta, options.an, options.apsi,
                          options.deltapsi, options.startpsi, center,
                          options.maxit, options.CTF, options.snr,
                          options.ref_a, options.sym, options.function,
                          options.Fourvar, options.npad, options.debug,
                          options.MPI, options.stoprnct)
            global_def.BATCH = False
        elif (options.shc):
            if not options.MPI:
                print("Only MPI version is implemented!!!")
            else:
                global_def.BATCH = True
                if (options.nsoft == 1):
                    from applications import ali3d_shcMPI
                    ali3d_shcMPI(args[0],
                                 args[1],
                                 args[2],
                                 mask,
                                 options.ir,
                                 options.ou,
                                 options.rs,
                                 options.xr,
                                 options.yr,
                                 options.ts,
                                 options.delta,
                                 options.an,
                                 options.apsi,
                                 options.deltapsi,
                                 options.startpsi,
                                 center,
                                 options.maxit,
                                 options.CTF,
                                 options.snr,
                                 options.ref_a,
                                 options.sym,
                                 options.function,
                                 options.Fourvar,
                                 options.npad,
                                 options.debug,
                                 options.stoprnct,
                                 gamma=options.gamma)
                elif (options.nsoft == 0):
                    from applications import ali3d_shc0MPI
                    ali3d_shc0MPI(args[0],
                                  args[1],
                                  args[2],
                                  mask,
                                  options.ir,
                                  options.ou,
                                  options.rs,
                                  options.xr,
                                  options.yr,
                                  options.ts,
                                  options.delta,
                                  options.an,
                                  options.apsi,
                                  options.deltapsi,
                                  options.startpsi,
                                  center,
                                  options.maxit,
                                  options.CTF,
                                  options.snr,
                                  options.ref_a,
                                  options.sym,
                                  options.function,
                                  options.Fourvar,
                                  options.npad,
                                  options.debug,
                                  options.stoprnct,
                                  gamma=options.gamma)
                else:
                    from multi_shc import ali3d_multishc_soft
                    import user_functions
                    options.user_func = user_functions.factory[
                        options.function]
                    ali3d_multishc_soft(args[0],
                                        args[1],
                                        options,
                                        mpi_comm=None,
                                        log=None,
                                        nsoft=options.nsoft)
                global_def.BATCH = False
        elif (options.nh2):
            global_def.BATCH = True
            from development import ali3d_shc2
            ali3d_shc2(args[0], args[1], args[2], mask, options.ir, options.ou,
                       options.rs, options.xr, options.yr, options.ts,
                       options.delta, options.an, options.apsi,
                       options.deltapsi, options.startpsi, center,
                       options.maxit, options.CTF, options.snr, options.ref_a,
                       options.sym, options.function, options.Fourvar,
                       options.npad, options.debug, options.MPI,
                       options.stoprnct)
            global_def.BATCH = False
        elif options.searchpsi:
            from applications import ali3dpsi_MPI
            global_def.BATCH = True
            ali3dpsi_MPI(args[0], args[1], args[2], mask, options.ir,
                         options.ou, options.rs, options.xr, options.yr,
                         options.ts, options.delta, options.an, options.apsi,
                         options.deltapsi, options.startpsi, center,
                         options.maxit, options.CTF, options.snr,
                         options.ref_a, options.sym, options.function,
                         options.Fourvar, options.npad, options.debug,
                         options.stoprnct)
            global_def.BATCH = False
        else:
            if options.rantest:
                from development import ali3d_rantest
                global_def.BATCH = True
                ali3d_rantest(args[0], args[1], args[2], mask, options.ir,
                              options.ou, options.rs, options.xr, options.yr,
                              options.ts, options.delta, options.an,
                              options.deltapsi, options.startpsi, center,
                              options.maxit, options.CTF, options.snr,
                              options.ref_a, options.sym, options.function,
                              options.Fourvar, options.npad, options.debug,
                              options.stoprnct)
                global_def.BATCH = False
            else:
                from applications import ali3d
                global_def.BATCH = True
                ali3d(args[0], args[1], args[2], mask, options.ir, options.ou,
                      options.rs, options.xr, options.yr, options.ts,
                      options.delta, options.an, options.apsi,
                      options.deltapsi, options.startpsi, center,
                      options.maxit, options.CTF, options.snr, options.ref_a,
                      options.sym, options.function, options.Fourvar,
                      options.npad, options.debug, options.MPI,
                      options.stoprnct)
                global_def.BATCH = False

        if options.MPI: mpi_finalize()
Esempio n. 44
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack outdir <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range --ts=translation_step --dst=delta --center=center --maxit=max_iteration --CTF --snr=SNR --Fourvar=Fourier_variance --Ng=group_number --Function=user_function_name --CUDA --GPUID --MPI"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--ir",       type="float",  default=1,             help="inner radius for rotational correlation > 0 (set to 1)")
	parser.add_option("--ou",       type="float",  default=-1,            help="outer radius for rotational correlation < nx/2-1 (set to the radius of the particle)")
	parser.add_option("--rs",       type="float",  default=1,             help="step between rings in rotational correlation > 0 (set to 1)" ) 
	parser.add_option("--xr",       type="string", default="4 2 1 1",     help="range for translation search in x direction, search is +/xr ")
	parser.add_option("--yr",       type="string", default="-1",          help="range for translation search in y direction, search is +/yr ")
	parser.add_option("--ts",       type="string", default="2 1 0.5 0.25",help="step of translation search in both directions")
	parser.add_option("--nomirror", action="store_true", default=False,   help="Disable checking mirror orientations of images (default False)")
	parser.add_option("--dst",      type="float",  default=0.0,           help="delta")
	parser.add_option("--center",   type="float",  default=-1,            help="-1.average center method; 0.not centered; 1.phase approximation; 2.cc with Gaussian function; 3.cc with donut-shaped image 4.cc with user-defined reference 5.cc with self-rotated average")
	parser.add_option("--maxit",    type="float",  default=0,             help="maximum number of iterations (0 means the maximum iterations is 10, but it will automatically stop should the criterion falls")
	parser.add_option("--CTF",      action="store_true", default=False,   help="use CTF correction during alignment")
	parser.add_option("--snr",      type="float",  default=1.0,           help="signal-to-noise ratio of the data (set to 1.0)")
	parser.add_option("--Fourvar",  action="store_true", default=False,   help="compute Fourier variance")
	#parser.add_option("--Ng",       type="int",          default=-1,      help="number of groups in the new CTF filteration")
	parser.add_option("--function", type="string",       default="ref_ali2d",  help="name of the reference preparation function (default ref_ali2d)")
	#parser.add_option("--CUDA",     action="store_true", default=False,   help="use CUDA program")
	#parser.add_option("--GPUID",    type="string",    default="",         help="ID of GPUs available")
	parser.add_option("--MPI",      action="store_true", default=False,   help="use MPI version ")
	parser.add_option("--rotational", action="store_true", default=False, help="rotational alignment with optional limited in-plane angle, the parameters are: ir, ou, rs, psi_max, mode(F or H), maxit, orient, randomize")
	parser.add_option("--psi_max",  type="float",        default=180.0,   help="psi_max")
	parser.add_option("--mode",     type="string",       default="F",     help="Full or Half rings, default F")
	parser.add_option("--randomize",action="store_true", default=False,   help="randomize initial rotations (suboption of friedel, default False)")
	parser.add_option("--orient",   action="store_true", default=False,   help="orient images such that the average is symmetric about x-axis, for layer lines (suboption of friedel, default False)")
	parser.add_option("--template", type="string",       default=None,    help="2D alignment will be initialized using the template provided (only non-MPI version, default None)")
	parser.add_option("--random_method",   type="string", default="",   help="use SHC or SCF (default standard method)")

	(options, args) = parser.parse_args()

	if len(args) < 2 or len(args) > 3:
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
	elif(options.rotational):
		from applications import ali2d_rotationaltop
		global_def.BATCH = True
		ali2d_rotationaltop(args[1], args[0], options.randomize, options.orient, options.ir, options.ou, options.rs, options.psi_max, options.mode, options.maxit)
	else:
		if args[1] == 'None': outdir = None
		else:		          outdir = args[1]

		if len(args) == 2: mask = None
		else:              mask = args[2]
		

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		
		global_def.BATCH = True
		if  options.MPI:
			from applications import ali2d_base
			from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
			sys.argv = mpi_init(len(sys.argv),sys.argv)

			number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
			myid = mpi_comm_rank(MPI_COMM_WORLD)
			main_node = 0

			if(myid == main_node):
				import subprocess
				from logger import Logger, BaseLogger_Files
				#  Create output directory
				log = Logger(BaseLogger_Files())
				log.prefix = os.path.join(outdir)
				cmd = "mkdir "+log.prefix
				outcome = subprocess.call(cmd, shell=True)
				log.prefix += "/"
			else:
				outcome = 0
				log = None
			from utilities       import bcast_number_to_all
			outcome  = bcast_number_to_all(outcome, source_node = main_node)
			if(outcome == 1):
				ERROR('Output directory exists, please change the name and restart the program', "ali2d_MPI", 1, myid)

			dummy = ali2d_base(args[0], outdir, mask, options.ir, options.ou, options.rs, options.xr, options.yr, \
				options.ts, options.nomirror, options.dst, \
				options.center, options.maxit, options.CTF, options.snr, options.Fourvar, \
				options.function, random_method = options.random_method, log = log, \
				number_of_proc = number_of_proc, myid = myid, main_node = main_node, mpi_comm = MPI_COMM_WORLD,\
				write_headers = True)
		else:
			print " Non-MPI is no more in use, try MPI option, please."
			"""
			from applications import ali2d
			ali2d(args[0], outdir, mask, options.ir, options.ou, options.rs, options.xr, options.yr, \
				options.ts, options.nomirror, options.dst, \
				options.center, options.maxit, options.CTF, options.snr, options.Fourvar, \
				-1, options.function, False, "", options.MPI, \
				options.template, random_method = options.random_method)
	    	"""
		global_def.BATCH = False

		if options.MPI:
			from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 45
0
def main():
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(sys.argv[0])
    usage = progname + " data_stack reference_stack outdir <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translation_step --center=center_type --maxit=max_iteration --CTF --snr=SNR --function=user_function_name --rand_seed=random_seed --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--ir",
        type="float",
        default=1,
        help="  inner radius for rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--ou",
        type="float",
        default=-1,
        help=
        "  outer radius for rotational correlation < nx/2-1 (set to the radius of the particle)"
    )
    parser.add_option(
        "--rs",
        type="float",
        default=1,
        help="  step between rings in rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--xr",
        type="float",
        default=0,
        help="  range for translation search in x direction, search is +/-xr ")
    parser.add_option(
        "--yr",
        type="float",
        default=0,
        help="  range for translation search in y direction, search is +/-yr ")
    parser.add_option("--ts",
                      type="float",
                      default=1,
                      help="  step of translation search in both directions")
    parser.add_option(
        "--center",
        type="float",
        default=1,
        help=
        "  0 - if you do not want the average to be centered, 1 - center the average (default=1)"
    )
    parser.add_option("--maxit",
                      type="float",
                      default=10,
                      help="  maximum number of iterations (set to 10) ")
    parser.add_option(
        "--CTF",
        action="store_true",
        default=False,
        help=" Consider CTF correction during multiple reference alignment")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="  signal-to-noise ratio of the data (set to 1.0)")
    parser.add_option("--function",
                      type="string",
                      default="ref_ali2d",
                      help="  name of the reference preparation function")
    parser.add_option("--rand_seed",
                      type="int",
                      default=1000,
                      help=" random seed of initial (set to 1000)")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="  whether to use MPI version ")
    parser.add_option("--EQ",
                      action="store_true",
                      default=False,
                      help="  equal version ")
    (options, args) = parser.parse_args(arglist[1:])
    if len(args) < 3 or len(args) > 4:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    else:

        if len(args) == 3:
            mask = None
        else:
            mask = args[3]

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        if options.MPI:
            from mpi import mpi_init
            sys.argv = mpi_init(len(sys.argv), sys.argv)

        global_def.BATCH = True
        if options.EQ:
            from development import mrefeq_ali2df
            #print  "  calling MPI",options.MPI,options.function,options.rand_seed
            #print  args
            mrefeq_ali2df(args[0], args[1], mask, options.ir, options.ou,
                          options.rs, options.xr, options.yr, options.ts,
                          options.center, options.maxit, options.CTF,
                          options.snr, options.function, options.rand_seed,
                          options.MPI)
        else:
            from applications import mref_ali2d
            mref_ali2d(args[0], args[1], args[2], mask, options.ir, options.ou,
                       options.rs, options.xr, options.yr, options.ts,
                       options.center, options.maxit, options.CTF, options.snr,
                       options.function, options.rand_seed, options.MPI)
        global_def.BATCH = False
        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()
Esempio n. 46
0
def main():
	from logger import Logger, BaseLogger_Files
        arglist = []
        i = 0
        while( i < len(sys.argv) ):
            if sys.argv[i]=='-p4pg':
                i = i+2
            elif sys.argv[i]=='-p4wd':
                i = i+2
            else:
                arglist.append( sys.argv[i] )
                i = i+1
	progname = os.path.basename(arglist[0])
	usage = progname + " stack  outdir  <mask> --focus=3Dmask --radius=outer_radius --delta=angular_step" +\
	"--an=angular_neighborhood --maxit=max_iter  --CTF --sym=c1 --function=user_function --independent=indenpendent_runs  --number_of_images_per_group=number_of_images_per_group  --low_pass_frequency=.25  --seed=random_seed"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--focus",                         type   ="string",        default ='',                    help="bineary 3D mask for focused clustering ")
	parser.add_option("--ir",                            type   = "int",          default =1, 	                  help="inner radius for rotational correlation > 0 (set to 1)")
	parser.add_option("--radius",                        type   = "int",          default =-1,	                  help="particle radius in pixel for rotational correlation <nx-1 (set to the radius of the particle)")
	parser.add_option("--maxit",	                     type   = "int",          default =25, 	                  help="maximum number of iteration")
	parser.add_option("--rs",                            type   = "int",          default =1,	                  help="step between rings in rotational correlation >0 (set to 1)" ) 
	parser.add_option("--xr",                            type   ="string",        default ='1',                   help="range for translation search in x direction, search is +/-xr ")
	parser.add_option("--yr",                            type   ="string",        default ='-1',	              help="range for translation search in y direction, search is +/-yr (default = same as xr)")
	parser.add_option("--ts",                            type   ="string",        default ='0.25',                help="step size of the translation search in both directions direction, search is -xr, -xr+ts, 0, xr-ts, xr ")
	parser.add_option("--delta",                         type   ="string",        default ='2',                   help="angular step of reference projections")
	parser.add_option("--an",                            type   ="string",        default ='-1',	              help="angular neighborhood for local searches")
	parser.add_option("--center",                        type   ="int",           default =0,	                  help="0 - if you do not want the volume to be centered, 1 - center the volume using cog (default=0)")
	parser.add_option("--nassign",                       type   ="int",           default =1, 	                  help="number of reassignment iterations performed for each angular step (set to 3) ")
	parser.add_option("--nrefine",                       type   ="int",           default =0, 	                  help="number of alignment iterations performed for each angular step (set to 0)")
	parser.add_option("--CTF",                           action ="store_true",    default =False,                 help="do CTF correction during clustring")
	parser.add_option("--stoprnct",                      type   ="float",         default =3.0,                   help="Minimum percentage of assignment change to stop the program")
	parser.add_option("--sym",                           type   ="string",        default ='c1',                  help="symmetry of the structure ")
	parser.add_option("--function",                      type   ="string",        default ='do_volume_mrk05',     help="name of the reference preparation function")
	parser.add_option("--independent",                   type   ="int",           default = 3,                    help="number of independent run")
	parser.add_option("--number_of_images_per_group",    type   ="int",           default =1000,                  help="number of groups")
	parser.add_option("--low_pass_filter",               type   ="float",         default =-1.0,                  help="absolute frequency of low-pass filter for 3d sorting on the original image size" )
	parser.add_option("--nxinit",                        type   ="int",           default =64,                    help="initial image size for sorting" )
	parser.add_option("--unaccounted",                   action ="store_true",    default =False,                 help="reconstruct the unaccounted images")
	parser.add_option("--seed",                          type   ="int",           default =-1,                    help="random seed for create initial random assignment for EQ Kmeans")
	parser.add_option("--smallest_group",                type   ="int",           default =500,                   help="minimum members for identified group")
	parser.add_option("--sausage",                       action ="store_true",    default =False,                 help="way of filter volume")
	parser.add_option("--chunkdir",                      type   ="string",        default ='',                    help="chunkdir for computing margin of error")
	parser.add_option("--PWadjustment",                  type   ="string",        default ='',                    help="1-D power spectrum of PDB file used for EM volume power spectrum correction")
	parser.add_option("--protein_shape",                 type   ="string",        default ='g',                   help="protein shape. It defines protein preferred orientation angles. Currently it has g and f two types ")
	parser.add_option("--upscale",                       type   ="float",         default =0.5,                   help=" scaling parameter to adjust the power spectrum of EM volumes")
	parser.add_option("--wn",                            type   ="int",           default =0,                     help="optimal window size for data processing")
	parser.add_option("--interpolation",                 type   ="string",        default ="4nn",                 help="3-d reconstruction interpolation method, two options trl and 4nn")
	(options, args) = parser.parse_args(arglist[1:])
	if len(args) < 1  or len(args) > 4:
    		print "usage: " + usage
    		print "Please run '" + progname + " -h' for detailed options"
	else:

		if len(args)>2:
			mask_file = args[2]
		else:
			mask_file = None

		orgstack                        =args[0]
		masterdir                       =args[1]
		global_def.BATCH = True
		#---initialize MPI related variables
		from mpi import mpi_init, mpi_comm_size, MPI_COMM_WORLD, mpi_comm_rank,mpi_barrier,mpi_bcast, mpi_bcast, MPI_INT,MPI_CHAR
		sys.argv = mpi_init(len(sys.argv),sys.argv)
		nproc    = mpi_comm_size(MPI_COMM_WORLD)
		myid     = mpi_comm_rank(MPI_COMM_WORLD)
		mpi_comm = MPI_COMM_WORLD
		main_node= 0
		# import some utilities
		from utilities import get_im,bcast_number_to_all,cmdexecute,write_text_file,read_text_file,wrap_mpi_bcast, get_params_proj, write_text_row
		from applications import recons3d_n_MPI, mref_ali3d_MPI, Kmref_ali3d_MPI
		from statistics import k_means_match_clusters_asg_new,k_means_stab_bbenum
		from applications import mref_ali3d_EQ_Kmeans, ali3d_mref_Kmeans_MPI  
		# Create the main log file
		from logger import Logger,BaseLogger_Files
		if myid ==main_node:
			log_main=Logger(BaseLogger_Files())
			log_main.prefix = masterdir+"/"
		else:
			log_main =None
		#--- fill input parameters into dictionary named after Constants
		Constants		                         ={}
		Constants["stack"]                       = args[0]
		Constants["masterdir"]                   = masterdir
		Constants["mask3D"]                      = mask_file
		Constants["focus3Dmask"]                 = options.focus
		Constants["indep_runs"]                  = options.independent
		Constants["stoprnct"]                    = options.stoprnct
		Constants["number_of_images_per_group"]  = options.number_of_images_per_group
		Constants["CTF"]                         = options.CTF
		Constants["maxit"]                       = options.maxit
		Constants["ir"]                          = options.ir 
		Constants["radius"]                      = options.radius 
		Constants["nassign"]                     = options.nassign
		Constants["rs"]                          = options.rs 
		Constants["xr"]                          = options.xr
		Constants["yr"]                          = options.yr
		Constants["ts"]                          = options.ts
		Constants["delta"]               		 = options.delta
		Constants["an"]                  		 = options.an
		Constants["sym"]                 		 = options.sym
		Constants["center"]              		 = options.center
		Constants["nrefine"]             		 = options.nrefine
		#Constants["fourvar"]            		 = options.fourvar 
		Constants["user_func"]           		 = options.function
		Constants["low_pass_filter"]     		 = options.low_pass_filter # enforced low_pass_filter
		#Constants["debug"]              		 = options.debug
		Constants["main_log_prefix"]     		 = args[1]
		#Constants["importali3d"]        		 = options.importali3d
		Constants["myid"]	             		 = myid
		Constants["main_node"]           		 = main_node
		Constants["nproc"]               		 = nproc
		Constants["log_main"]            		 = log_main
		Constants["nxinit"]              		 = options.nxinit
		Constants["unaccounted"]         		 = options.unaccounted
		Constants["seed"]                		 = options.seed
		Constants["smallest_group"]      		 = options.smallest_group
		Constants["sausage"]             		 = options.sausage
		Constants["chunkdir"]            		 = options.chunkdir
		Constants["PWadjustment"]        		 = options.PWadjustment
		Constants["upscale"]             		 = options.upscale
		Constants["wn"]                  		 = options.wn
		Constants["3d-interpolation"]    		 = options.interpolation
		Constants["protein_shape"]    		     = options.protein_shape 
		# -----------------------------------------------------
		#
		# Create and initialize Tracker dictionary with input options
		Tracker = 			    		{}
		Tracker["constants"]       = Constants
		Tracker["maxit"]           = Tracker["constants"]["maxit"]
		Tracker["radius"]          = Tracker["constants"]["radius"]
		#Tracker["xr"]             = ""
		#Tracker["yr"]             = "-1"  # Do not change!
		#Tracker["ts"]             = 1
		#Tracker["an"]             = "-1"
		#Tracker["delta"]          = "2.0"
		#Tracker["zoom"]           = True
		#Tracker["nsoft"]          = 0
		#Tracker["local"]          = False
		#Tracker["PWadjustment"]   = Tracker["constants"]["PWadjustment"]
		Tracker["upscale"]         = Tracker["constants"]["upscale"]
		#Tracker["upscale"]        = 0.5
		Tracker["applyctf"]        = False  #  Should the data be premultiplied by the CTF.  Set to False for local continuous.
		#Tracker["refvol"]         = None
		Tracker["nxinit"]          = Tracker["constants"]["nxinit"]
		#Tracker["nxstep"]         = 32
		Tracker["icurrentres"]     = -1
		#Tracker["ireachedres"]    = -1
		#Tracker["lowpass"]        = 0.4
		#Tracker["falloff"]        = 0.2
		#Tracker["inires"]         = options.inires  # Now in A, convert to absolute before using
		Tracker["fuse_freq"]       = 50  # Now in A, convert to absolute before using
		#Tracker["delpreviousmax"] = False
		#Tracker["anger"]          = -1.0
		#Tracker["shifter"]        = -1.0
		#Tracker["saturatecrit"]   = 0.95
		#Tracker["pixercutoff"]    = 2.0
		#Tracker["directory"]      = ""
		#Tracker["previousoutputdir"] = ""
		#Tracker["eliminated-outliers"] = False
		#Tracker["mainiteration"]  = 0
		#Tracker["movedback"]      = False
		#Tracker["state"]          = Tracker["constants"]["states"][0] 
		#Tracker["global_resolution"] =0.0
		Tracker["orgstack"]        = orgstack
		#--------------------------------------------------------------------
		# import from utilities
		from utilities import sample_down_1D_curve,get_initial_ID,remove_small_groups,print_upper_triangular_matrix,print_a_line_with_timestamp
		from utilities import print_dict,get_resolution_mrk01,partition_to_groups,partition_independent_runs,get_outliers
		from utilities import merge_groups, save_alist, margin_of_error, get_margin_of_error, do_two_way_comparison, select_two_runs, get_ali3d_params
		from utilities import counting_projections, unload_dict, load_dict, get_stat_proj, create_random_list, get_number_of_groups, recons_mref
		from utilities import apply_low_pass_filter, get_groups_from_partition, get_number_of_groups, get_complementary_elements_total, update_full_dict
		from utilities import count_chunk_members, set_filter_parameters_from_adjusted_fsc, adjust_fsc_down, get_two_chunks_from_stack
		####------------------------------------------------------------------
		#
		# Get the pixel size; if none, set to 1.0, and the original image size
		from utilities import get_shrink_data_huang
		if(myid == main_node):
			line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
			print(line+"Initialization of 3-D sorting")
			a = get_im(orgstack)
			nnxo = a.get_xsize()
			if( Tracker["nxinit"] > nnxo ):
				ERROR("Image size less than minimum permitted $d"%Tracker["nxinit"],"sxsort3d.py",1)
				nnxo = -1
			else:
				if Tracker["constants"]["CTF"]:
					i = a.get_attr('ctf')
					pixel_size = i.apix
					fq = pixel_size/Tracker["fuse_freq"]
				else:
					pixel_size = 1.0
					#  No pixel size, fusing computed as 5 Fourier pixels
					fq = 5.0/nnxo
					del a
		else:
			nnxo = 0
			fq = 0.0
			pixel_size = 1.0
		nnxo = bcast_number_to_all(nnxo, source_node = main_node)
		if( nnxo < 0 ):
			mpi_finalize()
			exit()
		pixel_size = bcast_number_to_all(pixel_size, source_node = main_node)
		fq         = bcast_number_to_all(fq, source_node = main_node)
		if Tracker["constants"]["wn"]==0:
			Tracker["constants"]["nnxo"]          = nnxo
		else:
			Tracker["constants"]["nnxo"]          = Tracker["constants"]["wn"]
			nnxo                                  = Tracker["constants"]["nnxo"]
		Tracker["constants"]["pixel_size"]        = pixel_size
		Tracker["fuse_freq"]                      = fq
		del fq, nnxo, pixel_size
		if(Tracker["constants"]["radius"] < 1):
			Tracker["constants"]["radius"]  = Tracker["constants"]["nnxo"]//2-2
		elif((2*Tracker["constants"]["radius"] +2) > Tracker["constants"]["nnxo"]):
			ERROR("Particle radius set too large!","sxsort3d.py",1,myid)
####-----------------------------------------------------------------------------------------
		# Master directory
		if myid == main_node:
			if masterdir =="":
				timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime())
				masterdir ="master_sort3d"+timestring
			li =len(masterdir)
			cmd="{} {}".format("mkdir", masterdir)
			os.system(cmd)
		else:
			li=0
		li = mpi_bcast(li,1,MPI_INT,main_node,MPI_COMM_WORLD)[0]
		if li>0:
			masterdir = mpi_bcast(masterdir,li,MPI_CHAR,main_node,MPI_COMM_WORLD)
			import string
			masterdir = string.join(masterdir,"")
		if myid ==main_node:
			print_dict(Tracker["constants"],"Permanent settings of 3-D sorting program")
		######### create a vstack from input stack to the local stack in masterdir
		# stack name set to default
		Tracker["constants"]["stack"]       = "bdb:"+masterdir+"/rdata"
		Tracker["constants"]["ali3d"]       = os.path.join(masterdir, "ali3d_init.txt")
		Tracker["constants"]["ctf_params"]  = os.path.join(masterdir, "ctf_params.txt")
		Tracker["constants"]["partstack"]   = Tracker["constants"]["ali3d"]  # also serves for refinement
		if myid == main_node:
			total_stack = EMUtil.get_image_count(Tracker["orgstack"])
		else:
			total_stack = 0
		total_stack = bcast_number_to_all(total_stack, source_node = main_node)
		mpi_barrier(MPI_COMM_WORLD)
		from time import sleep
		while not os.path.exists(masterdir):
				print  "Node ",myid,"  waiting..."
				sleep(5)
		mpi_barrier(MPI_COMM_WORLD)
		if myid == main_node:
			log_main.add("Sphire sort3d ")
			log_main.add("the sort3d master directory is "+masterdir)
		#####
		###----------------------------------------------------------------------------------
		# Initial data analysis and handle two chunk files
		from random import shuffle
		# Compute the resolution 
		#### make chunkdir dictionary for computing margin of error
		import user_functions
		user_func  = user_functions.factory[Tracker["constants"]["user_func"]]
		chunk_dict = {}
		chunk_list = []
		if myid == main_node:
			chunk_one = read_text_file(os.path.join(Tracker["constants"]["chunkdir"],"chunk0.txt"))
			chunk_two = read_text_file(os.path.join(Tracker["constants"]["chunkdir"],"chunk1.txt"))
		else:
			chunk_one = 0
			chunk_two = 0
		chunk_one = wrap_mpi_bcast(chunk_one, main_node)
		chunk_two = wrap_mpi_bcast(chunk_two, main_node)
		mpi_barrier(MPI_COMM_WORLD)
		######################## Read/write bdb: data on main node ############################
	   	if myid==main_node:
			if(orgstack[:4] == "bdb:"):	cmd = "{} {} {}".format("e2bdb.py", orgstack,"--makevstack="+Tracker["constants"]["stack"])
			else:  cmd = "{} {} {}".format("sxcpy.py", orgstack, Tracker["constants"]["stack"])
	   		cmdexecute(cmd)
			cmd = "{} {} {}".format("sxheader.py  --params=xform.projection", "--export="+Tracker["constants"]["ali3d"],orgstack)
			cmdexecute(cmd)
			cmd = "{} {} {}".format("sxheader.py  --params=ctf", "--export="+Tracker["constants"]["ctf_params"],orgstack)
			cmdexecute(cmd)
		mpi_barrier(MPI_COMM_WORLD)	   		   	
		########-----------------------------------------------------------------------------
		Tracker["total_stack"]              = total_stack
		Tracker["constants"]["total_stack"] = total_stack
		Tracker["shrinkage"]                = float(Tracker["nxinit"])/Tracker["constants"]["nnxo"]
		Tracker["radius"]                   = Tracker["constants"]["radius"]*Tracker["shrinkage"]
		if Tracker["constants"]["mask3D"]:
			Tracker["mask3D"] = os.path.join(masterdir,"smask.hdf")
		else:
			Tracker["mask3D"]  = None
		if Tracker["constants"]["focus3Dmask"]:
			Tracker["focus3D"] = os.path.join(masterdir,"sfocus.hdf")
		else:
			Tracker["focus3D"] = None
		if myid == main_node:
			if Tracker["constants"]["mask3D"]:
				mask_3D = get_shrink_3dmask(Tracker["nxinit"],Tracker["constants"]["mask3D"])
				mask_3D.write_image(Tracker["mask3D"])
			if Tracker["constants"]["focus3Dmask"]:
				mask_3D = get_shrink_3dmask(Tracker["nxinit"],Tracker["constants"]["focus3Dmask"])
				st = Util.infomask(mask_3D, None, True)
				if( st[0] == 0.0 ):  ERROR("sxrsort3d","incorrect focused mask, after binarize all values zero",1)
				mask_3D.write_image(Tracker["focus3D"])
				del mask_3D
		if Tracker["constants"]["PWadjustment"] !='':
			PW_dict              = {}
			nxinit_pwsp          = sample_down_1D_curve(Tracker["constants"]["nxinit"],Tracker["constants"]["nnxo"],Tracker["constants"]["PWadjustment"])
			Tracker["nxinit_PW"] = os.path.join(masterdir,"spwp.txt")
			if myid == main_node:  write_text_file(nxinit_pwsp,Tracker["nxinit_PW"])
			PW_dict[Tracker["constants"]["nnxo"]]   = Tracker["constants"]["PWadjustment"]
			PW_dict[Tracker["constants"]["nxinit"]] = Tracker["nxinit_PW"]
			Tracker["PW_dict"]                      = PW_dict
		mpi_barrier(MPI_COMM_WORLD)
		#-----------------------From two chunks to FSC, and low pass filter-----------------------------------------###
		for element in chunk_one: chunk_dict[element] = 0
		for element in chunk_two: chunk_dict[element] = 1
		chunk_list =[chunk_one, chunk_two]
		Tracker["chunk_dict"] = chunk_dict
		Tracker["P_chunk0"]   = len(chunk_one)/float(total_stack)
		Tracker["P_chunk1"]   = len(chunk_two)/float(total_stack)
		### create two volumes to estimate resolution
		if myid == main_node:
			for index in xrange(2): write_text_file(chunk_list[index],os.path.join(masterdir,"chunk%01d.txt"%index))
		mpi_barrier(MPI_COMM_WORLD)
		vols = []
		for index in xrange(2):
			data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nxinit"], os.path.join(masterdir,"chunk%01d.txt"%index), Tracker["constants"]["partstack"],myid,main_node,nproc,preshift=True)
			vol             = recons3d_4nn_ctf_MPI(myid=myid, prjlist=data,symmetry=Tracker["constants"]["sym"], finfo=None)
			if myid == main_node:
				vol.write_image(os.path.join(masterdir, "vol%d.hdf"%index))
			vols.append(vol)
			mpi_barrier(MPI_COMM_WORLD)
		if myid ==main_node:
			low_pass, falloff,currentres = get_resolution_mrk01(vols,Tracker["constants"]["radius"],Tracker["constants"]["nxinit"],masterdir,Tracker["mask3D"])
			if low_pass >Tracker["constants"]["low_pass_filter"]: low_pass= Tracker["constants"]["low_pass_filter"]
		else:
			low_pass    =0.0
			falloff     =0.0
			currentres  =0.0
		bcast_number_to_all(currentres,source_node = main_node)
		bcast_number_to_all(low_pass,source_node   = main_node)
		bcast_number_to_all(falloff,source_node    = main_node)
		Tracker["currentres"]                      = currentres
		Tracker["falloff"]                         = falloff
		if Tracker["constants"]["low_pass_filter"] ==-1.0:
			Tracker["low_pass_filter"] = min(.45,low_pass/Tracker["shrinkage"]) # no better than .45
		else:
			Tracker["low_pass_filter"] = min(.45,Tracker["constants"]["low_pass_filter"]/Tracker["shrinkage"])
		Tracker["lowpass"]             = Tracker["low_pass_filter"]
		Tracker["falloff"]             =.1
		Tracker["global_fsc"]          = os.path.join(masterdir, "fsc.txt")
		############################################################################################
		if myid == main_node:
			log_main.add("The command-line inputs are as following:")
			log_main.add("**********************************************************")
		for a in sys.argv:
			if myid == main_node:log_main.add(a)
		if myid == main_node:
			log_main.add("number of cpus used in this run is %d"%Tracker["constants"]["nproc"])
			log_main.add("**********************************************************")
		from filter import filt_tanl
		### START 3-D sorting
		if myid ==main_node:
			log_main.add("----------3-D sorting  program------- ")
			log_main.add("current resolution %6.3f for images of original size in terms of absolute frequency"%Tracker["currentres"])
			log_main.add("equivalent to %f Angstrom resolution"%(Tracker["constants"]["pixel_size"]/Tracker["currentres"]/Tracker["shrinkage"]))
			log_main.add("the user provided enforced low_pass_filter is %f"%Tracker["constants"]["low_pass_filter"])
			#log_main.add("equivalent to %f Angstrom resolution"%(Tracker["constants"]["pixel_size"]/Tracker["constants"]["low_pass_filter"]))
			for index in xrange(2):
				filt_tanl(get_im(os.path.join(masterdir,"vol%01d.hdf"%index)), Tracker["low_pass_filter"],Tracker["falloff"]).write_image(os.path.join(masterdir, "volf%01d.hdf"%index))
		mpi_barrier(MPI_COMM_WORLD)
		from utilities import get_input_from_string
		delta       = get_input_from_string(Tracker["constants"]["delta"])
		delta       = delta[0]
		from utilities import even_angles
		n_angles    = even_angles(delta, 0, 180)
		this_ali3d  = Tracker["constants"]["ali3d"]
		sampled     = get_stat_proj(Tracker,delta,this_ali3d)
		if myid ==main_node:
			nc = 0
			for a in sampled:
				if len(sampled[a])>0:
					nc += 1
			log_main.add("total sampled direction %10d  at angle step %6.3f"%(len(n_angles), delta)) 
			log_main.add("captured sampled directions %10d percentage covered by data  %6.3f"%(nc,float(nc)/len(n_angles)*100))
		number_of_images_per_group = Tracker["constants"]["number_of_images_per_group"]
		if myid ==main_node: log_main.add("user provided number_of_images_per_group %d"%number_of_images_per_group)
		Tracker["number_of_images_per_group"] = number_of_images_per_group
		number_of_groups = get_number_of_groups(total_stack,number_of_images_per_group)
		Tracker["number_of_groups"] =  number_of_groups
		generation     =0
		partition_dict ={}
		full_dict      ={}
		workdir =os.path.join(masterdir,"generation%03d"%generation)
		Tracker["this_dir"] = workdir
		if myid ==main_node:
			log_main.add("---- generation         %5d"%generation)
			log_main.add("number of images per group is set as %d"%number_of_images_per_group)
			log_main.add("the initial number of groups is  %10d "%number_of_groups)
			cmd="{} {}".format("mkdir",workdir)
			os.system(cmd)
		mpi_barrier(MPI_COMM_WORLD)
		list_to_be_processed = range(Tracker["constants"]["total_stack"])
		Tracker["this_data_list"] = list_to_be_processed
		create_random_list(Tracker)
		#################################
		full_dict ={}
		for iptl in xrange(Tracker["constants"]["total_stack"]):
			 full_dict[iptl]    = iptl
		Tracker["full_ID_dict"] = full_dict
		################################# 	
		for indep_run in xrange(Tracker["constants"]["indep_runs"]):
			Tracker["this_particle_list"] = Tracker["this_indep_list"][indep_run]
			ref_vol =  recons_mref(Tracker)
			if myid == main_node: log_main.add("independent run  %10d"%indep_run)
			mpi_barrier(MPI_COMM_WORLD)
			Tracker["this_data_list"]          = list_to_be_processed
			Tracker["total_stack"]             = len(Tracker["this_data_list"])
			Tracker["this_particle_text_file"] = os.path.join(workdir,"independent_list_%03d.txt"%indep_run) # for get_shrink_data
			if myid == main_node: write_text_file(Tracker["this_data_list"], Tracker["this_particle_text_file"])
			mpi_barrier(MPI_COMM_WORLD)
			outdir  = os.path.join(workdir, "EQ_Kmeans%03d"%indep_run)
			ref_vol = apply_low_pass_filter(ref_vol,Tracker)
			mref_ali3d_EQ_Kmeans(ref_vol, outdir, Tracker["this_particle_text_file"], Tracker)
			partition_dict[indep_run]=Tracker["this_partition"]
		Tracker["partition_dict"]    = partition_dict
		Tracker["total_stack"]       = len(Tracker["this_data_list"])
		Tracker["this_total_stack"]  = Tracker["total_stack"]
		###############################
		do_two_way_comparison(Tracker)
		###############################
		ref_vol_list = []
		from time import sleep
		number_of_ref_class = []
		for igrp in xrange(len(Tracker["two_way_stable_member"])):
			Tracker["this_data_list"]      = Tracker["two_way_stable_member"][igrp]
			Tracker["this_data_list_file"] = os.path.join(workdir,"stable_class%d.txt"%igrp)
			if myid == main_node:
				write_text_file(Tracker["this_data_list"], Tracker["this_data_list_file"])
			data,old_shifts = get_shrink_data_huang(Tracker,Tracker["nxinit"], Tracker["this_data_list_file"], Tracker["constants"]["partstack"], myid, main_node, nproc, preshift = True)
			volref          = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"], finfo = None)
			ref_vol_list.append(volref)
			number_of_ref_class.append(len(Tracker["this_data_list"]))
			if myid == main_node:
				log_main.add("group  %d  members %d "%(igrp,len(Tracker["this_data_list"])))
		Tracker["number_of_ref_class"] = number_of_ref_class
		nx_of_image = ref_vol_list[0].get_xsize()
		if Tracker["constants"]["PWadjustment"]:
			Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image]
		else:
			Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"]	 # no PW adjustment
		if myid == main_node:
			for iref in xrange(len(ref_vol_list)):
				refdata    = [None]*4
				refdata[0] = ref_vol_list[iref]
				refdata[1] = Tracker
				refdata[2] = Tracker["constants"]["myid"]
				refdata[3] = Tracker["constants"]["nproc"]
				volref     = user_func(refdata)
				volref.write_image(os.path.join(workdir,"volf_stable.hdf"),iref)
		mpi_barrier(MPI_COMM_WORLD)
		Tracker["this_data_list"]           = Tracker["this_accounted_list"]
		outdir                              = os.path.join(workdir,"Kmref")  
		empty_group, res_groups, final_list = ali3d_mref_Kmeans_MPI(ref_vol_list,outdir,Tracker["this_accounted_text"],Tracker)
		Tracker["this_unaccounted_list"]    = get_complementary_elements(list_to_be_processed,final_list)
		if myid == main_node:
			log_main.add("the number of particles not processed is %d"%len(Tracker["this_unaccounted_list"]))
			write_text_file(Tracker["this_unaccounted_list"],Tracker["this_unaccounted_text"])
		update_full_dict(Tracker["this_unaccounted_list"], Tracker)
		#######################################
		number_of_groups    = len(res_groups)
		vol_list            = []
		number_of_ref_class = []
		for igrp in xrange(number_of_groups):
			data,old_shifts = get_shrink_data_huang(Tracker, Tracker["constants"]["nnxo"], os.path.join(outdir,"Class%d.txt"%igrp), Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True)
			volref          = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"], finfo=None)
			vol_list.append(volref)

			if( myid == main_node ):  npergroup = len(read_text_file(os.path.join(outdir,"Class%d.txt"%igrp)))
			else:  npergroup = 0
			npergroup = bcast_number_to_all(npergroup, main_node )
			number_of_ref_class.append(npergroup)

		Tracker["number_of_ref_class"] = number_of_ref_class
		
		mpi_barrier(MPI_COMM_WORLD)
		nx_of_image = vol_list[0].get_xsize()
		if Tracker["constants"]["PWadjustment"]:
			Tracker["PWadjustment"]=Tracker["PW_dict"][nx_of_image]
		else:
			Tracker["PWadjustment"]=Tracker["constants"]["PWadjustment"]	

		if myid == main_node:
			for ivol in xrange(len(vol_list)):
				refdata     =[None]*4
				refdata[0] = vol_list[ivol]
				refdata[1] = Tracker
				refdata[2] = Tracker["constants"]["myid"]
				refdata[3] = Tracker["constants"]["nproc"] 
				volref = user_func(refdata)
				volref.write_image(os.path.join(workdir,"volf_of_Classes.hdf"),ivol)
				log_main.add("number of unaccounted particles  %10d"%len(Tracker["this_unaccounted_list"]))
				log_main.add("number of accounted particles  %10d"%len(Tracker["this_accounted_list"]))
				
		Tracker["this_data_list"]    = Tracker["this_unaccounted_list"]   # reset parameters for the next round calculation
		Tracker["total_stack"]       = len(Tracker["this_unaccounted_list"])
		Tracker["this_total_stack"]  = Tracker["total_stack"]
		number_of_groups             = get_number_of_groups(len(Tracker["this_unaccounted_list"]),number_of_images_per_group)
		Tracker["number_of_groups"]  =  number_of_groups
		while number_of_groups >= 2 :
			generation     +=1
			partition_dict ={}
			workdir =os.path.join(masterdir,"generation%03d"%generation)
			Tracker["this_dir"] = workdir
			if myid ==main_node:
				log_main.add("*********************************************")
				log_main.add("-----    generation             %5d    "%generation)
				log_main.add("number of images per group is set as %10d "%number_of_images_per_group)
				log_main.add("the number of groups is  %10d "%number_of_groups)
				log_main.add(" number of particles for clustering is %10d"%Tracker["total_stack"])
				cmd ="{} {}".format("mkdir",workdir)
				os.system(cmd)
			mpi_barrier(MPI_COMM_WORLD)
			create_random_list(Tracker)
			for indep_run in xrange(Tracker["constants"]["indep_runs"]):
				Tracker["this_particle_list"] = Tracker["this_indep_list"][indep_run]
				ref_vol                       = recons_mref(Tracker)
				if myid == main_node:
					log_main.add("independent run  %10d"%indep_run)
					outdir = os.path.join(workdir, "EQ_Kmeans%03d"%indep_run)
				Tracker["this_data_list"]   = Tracker["this_unaccounted_list"]
				#ref_vol=apply_low_pass_filter(ref_vol,Tracker)
				mref_ali3d_EQ_Kmeans(ref_vol,outdir,Tracker["this_unaccounted_text"],Tracker)
				partition_dict[indep_run]   = Tracker["this_partition"]
				Tracker["this_data_list"]   = Tracker["this_unaccounted_list"]
				Tracker["total_stack"]      = len(Tracker["this_unaccounted_list"])
				Tracker["partition_dict"]   = partition_dict
				Tracker["this_total_stack"] = Tracker["total_stack"]
			total_list_of_this_run          = Tracker["this_unaccounted_list"]
			###############################
			do_two_way_comparison(Tracker)
			###############################
			ref_vol_list        = []
			number_of_ref_class = []
			for igrp in xrange(len(Tracker["two_way_stable_member"])):
				Tracker["this_data_list"]      = Tracker["two_way_stable_member"][igrp]
				Tracker["this_data_list_file"] = os.path.join(workdir,"stable_class%d.txt"%igrp)
				if myid == main_node: write_text_file(Tracker["this_data_list"], Tracker["this_data_list_file"])
				mpi_barrier(MPI_COMM_WORLD)
				data,old_shifts  = get_shrink_data_huang(Tracker,Tracker["constants"]["nxinit"],Tracker["this_data_list_file"],Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True)
				volref           = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None)
				#volref = filt_tanl(volref, Tracker["constants"]["low_pass_filter"],.1)
				if myid == main_node:volref.write_image(os.path.join(workdir,"vol_stable.hdf"),iref)
				#volref = resample(volref,Tracker["shrinkage"])
				ref_vol_list.append(volref)
				number_of_ref_class.append(len(Tracker["this_data_list"]))
				mpi_barrier(MPI_COMM_WORLD)
			Tracker["number_of_ref_class"]      = number_of_ref_class
			Tracker["this_data_list"]           = Tracker["this_accounted_list"]
			outdir                              = os.path.join(workdir,"Kmref")
			empty_group, res_groups, final_list = ali3d_mref_Kmeans_MPI(ref_vol_list,outdir,Tracker["this_accounted_text"],Tracker)
			# calculate the 3-D structure of original image size for each group
			number_of_groups                    =  len(res_groups)
			Tracker["this_unaccounted_list"]    = get_complementary_elements(total_list_of_this_run,final_list)
			if myid == main_node:
				log_main.add("the number of particles not processed is %d"%len(Tracker["this_unaccounted_list"]))
				write_text_file(Tracker["this_unaccounted_list"],Tracker["this_unaccounted_text"])
			mpi_barrier(MPI_COMM_WORLD)
			update_full_dict(Tracker["this_unaccounted_list"],Tracker)
			vol_list = []
			for igrp in xrange(number_of_groups):
				data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nnxo"], os.path.join(outdir,"Class%d.txt"%igrp), Tracker["constants"]["partstack"], myid, main_node, nproc,preshift = True)
				volref = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None)
				vol_list.append(volref)

			mpi_barrier(MPI_COMM_WORLD)
			nx_of_image=ref_vol_list[0].get_xsize()
			if Tracker["constants"]["PWadjustment"]:
				Tracker["PWadjustment"] = Tracker["PW_dict"][nx_of_image]
			else:
				Tracker["PWadjustment"] = Tracker["constants"]["PWadjustment"]	

			if myid == main_node:
				for ivol in xrange(len(vol_list)):
					refdata    = [None]*4
					refdata[0] = vol_list[ivol]
					refdata[1] = Tracker
					refdata[2] = Tracker["constants"]["myid"]
					refdata[3] = Tracker["constants"]["nproc"] 
					volref     = user_func(refdata)
					volref.write_image(os.path.join(workdir, "volf_of_Classes.hdf"),ivol)
				log_main.add("number of unaccounted particles  %10d"%len(Tracker["this_unaccounted_list"]))
				log_main.add("number of accounted particles  %10d"%len(Tracker["this_accounted_list"]))
			del vol_list
			mpi_barrier(MPI_COMM_WORLD)
			number_of_groups            = get_number_of_groups(len(Tracker["this_unaccounted_list"]),number_of_images_per_group)
			Tracker["number_of_groups"] =  number_of_groups
			Tracker["this_data_list"]   = Tracker["this_unaccounted_list"]
			Tracker["total_stack"]      = len(Tracker["this_unaccounted_list"])
		if Tracker["constants"]["unaccounted"]:
			data,old_shifts = get_shrink_data_huang(Tracker,Tracker["constants"]["nnxo"],Tracker["this_unaccounted_text"],Tracker["constants"]["partstack"],myid,main_node,nproc,preshift = True)
			volref          = recons3d_4nn_ctf_MPI(myid=myid, prjlist = data, symmetry=Tracker["constants"]["sym"],finfo= None)
			nx_of_image     = volref.get_xsize()
			if Tracker["constants"]["PWadjustment"]:
				Tracker["PWadjustment"]=Tracker["PW_dict"][nx_of_image]
			else:
				Tracker["PWadjustment"]=Tracker["constants"]["PWadjustment"]	
			if( myid == main_node ):
				refdata    = [None]*4
				refdata[0] = volref
				refdata[1] = Tracker
				refdata[2] = Tracker["constants"]["myid"]
				refdata[3] = Tracker["constants"]["nproc"]
				volref     = user_func(refdata)
				#volref    = filt_tanl(volref, Tracker["constants"]["low_pass_filter"],.1)
				volref.write_image(os.path.join(workdir,"volf_unaccounted.hdf"))
		# Finish program
		if myid ==main_node: log_main.add("sxsort3d finishes")
		mpi_barrier(MPI_COMM_WORLD)
		from mpi import mpi_finalize
		mpi_finalize()
		exit()
Esempio n. 47
0
def main():
    mpi.mpi_init(0, [])
    sp_global_def.print_timestamp("Start")
    main(sys.argv[1:])
    sp_global_def.print_timestamp("Finish")
    mpi.mpi_finalize()
Esempio n. 48
0
def main():

    progname = os.path.basename(sys.argv[0])
    usage = progname + " stack outdir <maskfile> --K=10 --trials=2 --debug --maxit=100 --rand_seed=10 --crit='all' --F=0.9 --T0=2.0 --init_method='rnd' --normalize --CTF --MPI --CUDA"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--K",
                      type="int",
                      default=2,
                      help="Number of classes (default 2)")
    parser.add_option("--trials",
                      type="int",
                      default=1,
                      help="Number of trials of K-means (default 1)")
    parser.add_option("--maxit",
                      type="int",
                      default=100,
                      help="Maximum number of iterations within K-means")
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Perform classification using CTF information")
    parser.add_option("--rand_seed",
                      type="int",
                      default=-1,
                      help="Random seed of initial (default random)")
    parser.add_option(
        "--crit",
        type="string",
        default="D",
        help=
        "Criterions: Coleman [C], Harabasz[H], Davies-Bouldin[D], All [all]")
    #parser.add_option("--F",          type="float",        default=0.0,       help="Cooling in simulated annealing, ex.: 0.9")
    #parser.add_option("--T0",         type="float",        default=0.0,       help="Initial temperature in simulated annealing, ex: 100")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="Use MPI version")
    parser.add_option("--debug", action="store_true", default=False, help="")
    parser.add_option("--normalize",
                      action="store_true",
                      default=False,
                      help="Normalize images under the mask")
    parser.add_option(
        '--init_method',
        type='string',
        default='rnd',
        help=
        'Method used to initialize partition: "rnd" randomize or "d2w" for d2 weighting initialization (default is rnd)'
    )

    (options, args) = parser.parse_args()
    if len(args) < 2 or len(args) > 3:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    elif options.trials < 1:
        sys.stderr.write("ERROR: Number of trials should be at least 1.\n\n")
        sys.exit()
    else:
        if len(args) == 2: mask = None
        else: mask = args[2]

        if options.K < 2:
            sys.stderr.write('ERROR: K must be > 1 group\n\n')
            sys.exit()

        if options.CTF:
            sys.stderr.write('ERROR: CTF option not implemented\n\n')
            sys.exit()

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()
        from applications import k_means_main
        global_def.BATCH = True
        k_means_main(args[0], args[1], mask, "SSE", options.K,
                     options.rand_seed, options.maxit, options.trials,
                     options.crit, options.CTF, 0.0, 0.0, options.MPI, False,
                     options.debug, options.normalize, options.init_method)
        global_def.BATCH = False
        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()
Esempio n. 49
0
def main():

    import sys

    arglist = []
    for arg in sys.argv:
        arglist.append(arg)

    progname = os.path.basename(arglist[0])
    usage = progname + " prjstack outdir bufprefix --delta --d --nvol --nbufvol --seedbase --snr --npad --CTF --MPI --verbose"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--nvol",
                      type="int",
                      help="number of resample volumes to be generated")
    parser.add_option("--nbufvol",
                      type="int",
                      default=1,
                      help="number of fftvols in the memory")
    parser.add_option("--delta",
                      type="float",
                      default=10.0,
                      help="angular step for cones")
    parser.add_option("--d",
                      type="float",
                      default=0.1,
                      help="fraction of projections to leave out")
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="use CTF")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="Signal-to-Noise Ratio")
    parser.add_option("--npad", type="int", default=2, help="times of padding")
    parser.add_option("--seedbase",
                      type="int",
                      default=-1,
                      help="random seed base")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI")
    parser.add_option("--verbose",
                      type="int",
                      default=0,
                      help="verbose level: 0 no, 1 yes")

    (options, args) = parser.parse_args(arglist[1:])

    if (len(args) != 1 and len(args) != 3):
        print("usage: " + usage)
        return None

    prjfile = args[0]

    if options.MPI:
        from mpi import mpi_barrier, mpi_comm_rank, mpi_comm_size, mpi_comm_split, MPI_COMM_WORLD
        from mpi import mpi_init
        sys.argv = mpi_init(len(sys.argv), sys.argv)
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        ncpu = mpi_comm_size(MPI_COMM_WORLD)
    else:
        myid = 0
        ncpu = 1

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    outdir = args[1]
    bufprefix = args[2]
    resample( prjfile, outdir, bufprefix, options.nbufvol, options.nvol, options.seedbase,\
               options.delta, options.d, options.snr, options.CTF, options.npad,\
        options.MPI, myid, ncpu, options.verbose )
    if options.MPI:
        from mpi import mpi_finalize
        mpi_finalize()
Esempio n. 50
0
def main():
    import os
    import sys
    from optparse import OptionParser
    from global_def import SPARXVERSION
    import global_def
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage2 = progname + """ inputfile outputfile [options]
        Functionalities:

        1. Helicise input volume and save the result to output volume:
            sxhelicon_utils.py input_vol.hdf output_vol.hdf --helicise --dp=27.6 --dphi=166.5 --fract=0.65 --rmax=70 --rmin=1 --apix=1.84 --sym=D1        

        2. Helicise pdb file and save the result to a new pdb file:
            sxhelicon_utils.py input.pdb output.pdb --helicisepdb --dp=27.6 --dphi=166.5 --nrepeats --apix=1.84         

        3. Generate two lists of image indices used to split segment stack into halves for helical fsc calculation.			
            sxhelicon_utils.py bdb:big_stack --hfsc='flst' --filament_attr=filament

        4. Map of filament distribution in the stack
            sxhelicon_utils.py bdb:big_stack --filinfo=info.txt
            The output file will contain four columns:
                     1                    2                     3                         4
            first image number     last image number      number of images         in the filament name

        5. Predict segments' orientation parameters based on distances between segments and known helical symmetry
            sxhelicon_utils.py bdb:big_stack --predict_helical=helical_params.txt --dp=27.6 --dphi=166.5 --apix=1.84
            
        6. Generate disks from filament based reconstructions:		
            sxheader.py stk.hdf --params=xform.projection --import=params.txt
            mpirun -np 2 sxhelicon_utils.py stk.hdf --gendisk='bdb:disk' --ref_nx=100 --ref_ny=100 --ref_nz=200 --apix=1.84 --dp=27.6 --dphi=166.715 --fract=0.67 --rmin=0 --rmax=64 --function="[.,nofunc,helical3c]" --sym="c1" --MPI

        7. Stack disks based on helical symmetry parameters
            sxhelicon_utils.py disk_to_stack.hdf --stackdisk=stacked_disks.hdf --dphi=166.5 --dp=27.6 --ref_nx=160 --ref_ny=160 --ref_nz=225 --apix=1.84
		
        8. Helical symmetry search:
            mpirun -np 3 sxhelicon_utils.py volf0010.hdf outsymsearch --symsearch --dp=27.6 --dphi=166.715 --apix=1.84 --fract=0.65 --rmin=0 --rmax=92.0 --datasym=datasym.txt  --dp_step=0.92 --ndp=3 --dphi_step=1.0 --ndphi=10 --MPI
"""
    parser = OptionParser(usage2, version=SPARXVERSION)
    #parser.add_option("--ir",                 type="float", 	     default= -1,                 help="inner radius for rotational correlation > 0 (set to 1) (Angstroms)")
    parser.add_option(
        "--ou",
        type="float",
        default=-1,
        help=
        "outer radius for rotational 2D correlation < int(nx/2)-1 (set to the radius of the particle) (Angstroms)"
    )
    parser.add_option(
        "--rs",
        type="int",
        default=1,
        help="step between rings in rotational correlation >0  (set to 1)")
    parser.add_option(
        "--xr",
        type="string",
        default="4 2 1 1 1",
        help=
        "range for translation search in x direction, search is +/-xr (Angstroms) "
    )
    parser.add_option(
        "--txs",
        type="string",
        default="1 1 1 0.5 0.25",
        help=
        "step size of the translation search in x directions, search is -xr, -xr+ts, 0, xr-ts, xr (Angstroms)"
    )
    parser.add_option("--delta",
                      type="string",
                      default="10 6 4 3 2",
                      help="angular step of reference projections")
    parser.add_option("--an",
                      type="string",
                      default="-1",
                      help="angular neighborhood for local searches")
    parser.add_option(
        "--maxit",
        type="int",
        default=30,
        help=
        "maximum number of iterations performed for each angular step (set to 30) "
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="CTF correction")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="Signal-to-Noise Ratio of the data")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version")
    #parser.add_option("--fourvar",           action="store_true",   default=False,               help="compute Fourier variance")
    parser.add_option("--apix",
                      type="float",
                      default=-1.0,
                      help="pixel size in Angstroms")
    parser.add_option("--dp",
                      type="float",
                      default=-1.0,
                      help="delta z - translation in Angstroms")
    parser.add_option("--dphi",
                      type="float",
                      default=-1.0,
                      help="delta phi - rotation in degrees")

    parser.add_option("--rmin",
                      type="float",
                      default=0.0,
                      help="minimal radius for hsearch (Angstroms)")
    parser.add_option("--rmax",
                      type="float",
                      default=80.0,
                      help="maximal radius for hsearch (Angstroms)")
    parser.add_option("--fract",
                      type="float",
                      default=0.7,
                      help="fraction of the volume used for helical search")
    parser.add_option("--sym",
                      type="string",
                      default="c1",
                      help="symmetry of the structure")
    parser.add_option("--function",
                      type="string",
                      default="helical",
                      help="name of the reference preparation function")
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="padding size for 3D reconstruction")
    parser.add_option("--debug",
                      action="store_true",
                      default=False,
                      help="debug")

    parser.add_option("--volalixshift",
                      action="store_true",
                      default=False,
                      help="Use volalixshift refinement")
    parser.add_option(
        "--searchxshift",
        type="float",
        default=0.0,
        help=
        "search range for x-shift determination: +/- searchxshift (Angstroms)")
    parser.add_option(
        "--nearby",
        type="float",
        default=6.0,
        help=
        "neighborhood within which to search for peaks in 1D ccf for x-shift search (Angstroms)"
    )

    # filinfo
    parser.add_option(
        "--filinfo",
        type="string",
        default="",
        help=
        "Store in an output text file infomration about distribution of filaments in the stack."
    )

    # diskali
    parser.add_option("--diskali",
                      action="store_true",
                      default=False,
                      help="volume alignment")
    parser.add_option(
        "--zstep",
        type="float",
        default=1,
        help="Step size for translational search along z (Angstroms)")

    # helicise
    parser.add_option(
        "--helicise",
        action="store_true",
        default=False,
        help="helicise input volume and save results to output volume")
    parser.add_option(
        "--hfsc",
        type="string",
        default="",
        help=
        "Generate two lists of image indices used to split segment stack into halves for helical fsc calculation. The lists will be stored in two text files named using file_prefix with '_even' and '_odd' suffixes, respectively."
    )
    parser.add_option(
        "--filament_attr",
        type="string",
        default="filament",
        help="attribute under which filament identification is stored")
    parser.add_option(
        "--predict_helical",
        type="string",
        default="",
        help="Generate projection parameters consistent with helical symmetry")

    # helicise pdb
    parser.add_option(
        "--helicisepdb",
        action="store_true",
        default=False,
        help="Helicise pdb file and save the result to a new pdb file")
    parser.add_option(
        "--nrepeats",
        type="int",
        default=50,
        help=
        "Number of time the helical symmetry will be applied to the input file"
    )

    # input options for generating disks
    parser.add_option(
        "--gendisk",
        type="string",
        default="",
        help="Name of file under which generated disks will be saved to")
    parser.add_option("--ref_nx",
                      type="int",
                      default=-1,
                      help="nx=ny volume size")
    parser.add_option(
        "--ref_nz",
        type="int",
        default=-1,
        help="nz volume size - computed disks will be nx x ny x rise/apix")
    parser.add_option(
        "--new_pixel_size",
        type="float",
        default=-1,
        help=
        "desired pixel size of the output disks. The default is -1, in which case there is no resampling (unless --match_pixel_rise flag is True)."
    )
    parser.add_option(
        "--maxerror",
        type="float",
        default=0.1,
        help=
        "proportional to the maximum amount of error to tolerate between (dp/new_pixel_size) and int(dp/new_pixel_size ), where new_pixel_size is the pixel size calculated when the option --match_pixel_rise flag is True."
    )
    parser.add_option(
        "--match_pixel_rise",
        action="store_true",
        default=False,
        help=
        "calculate new pixel size such that the rise is approximately integer number of pixels given the new pixel size. This will be the pixel size of the output disks."
    )

    # get consistency
    parser.add_option(
        "--consistency",
        type="string",
        default="",
        help="Name of parameters to get consistency statistics for")
    parser.add_option("--phithr",
                      type="float",
                      default=2.0,
                      help="phi threshold for consistency check")
    parser.add_option("--ythr",
                      type="float",
                      default=2.0,
                      help="y threshold (in Angstroms) for consistency check")
    parser.add_option(
        "--segthr",
        type="int",
        default=3,
        help="minimum number of segments/filament for consistency check")

    # stack disks
    parser.add_option(
        "--stackdisk",
        type="string",
        default="",
        help="Name of file under which output volume will be saved to.")
    parser.add_option("--ref_ny",
                      type="int",
                      default=-1,
                      help="ny of output volume size. Default is ref_nx")

    # symmetry search
    parser.add_option("--symsearch",
                      action="store_true",
                      default=False,
                      help="Do helical symmetry search.")
    parser.add_option(
        "--ndp",
        type="int",
        default=12,
        help=
        "In symmetrization search, number of delta z steps equals to 2*ndp+1")
    parser.add_option(
        "--ndphi",
        type="int",
        default=12,
        help=
        "In symmetrization search, number of dphi steps equals to 2*ndphi+1")
    parser.add_option(
        "--dp_step",
        type="float",
        default=0.1,
        help="delta z step  for symmetrization [Angstroms] (default 0.1)")
    parser.add_option(
        "--dphi_step",
        type="float",
        default=0.1,
        help="dphi step for symmetrization [degrees] (default 0.1)")
    parser.add_option("--datasym",
                      type="string",
                      default="datasym.txt",
                      help="symdoc")
    parser.add_option(
        "--symdoc",
        type="string",
        default="",
        help="text file containing helical symmetry parameters dp and dphi")

    # filament statistics in the stack

    (options, args) = parser.parse_args(arglist[1:])
    if len(args) < 1 or len(args) > 5:
        print("Various helical reconstruction related functionalities: " +
              usage2)
        print("Please run '" + progname + " -h' for detailed options")
    else:

        if len(options.hfsc) > 0:
            if len(args) != 1:
                print("Incorrect number of parameters")
                sys.exit()
            from applications import imgstat_hfsc
            imgstat_hfsc(args[0], options.hfsc, options.filament_attr)
            sys.exit()
        elif len(options.filinfo) > 0:
            if len(args) != 1:
                print("Incorrect number of parameters")
                sys.exit()
            from EMAN2 import EMUtil
            filams = EMUtil.get_all_attributes(args[0], "filament")
            ibeg = 0
            filcur = filams[0]
            n = len(filams)
            inf = []
            i = 1
            while (i <= n):
                if (i < n): fis = filams[i]
                else: fis = ""
                if (fis != filcur):
                    iend = i - 1
                    inf.append([ibeg, iend, iend - ibeg + 1, filcur])
                    ibeg = i
                    filcur = fis
                i += 1
            from utilities import write_text_row
            write_text_row(inf, options.filinfo)
            sys.exit()

        if len(options.stackdisk) > 0:
            if len(args) != 1:
                print("Incorrect number of parameters")
                sys.exit()
            dpp = (float(options.dp) / options.apix)
            rise = int(dpp)
            if (abs(float(rise) - dpp) > 1.0e-3):
                print("  dpp has to be integer multiplicity of the pixel size")
                sys.exit()
            from utilities import get_im
            v = get_im(args[0])
            from applications import stack_disks
            ref_ny = options.ref_ny
            if ref_ny < 0:
                ref_ny = options.ref_nx
            sv = stack_disks(v, options.ref_nx, ref_ny, options.ref_nz,
                             options.dphi, rise)
            sv.write_image(options.stackdisk)
            sys.exit()

        if len(options.consistency) > 0:
            if len(args) != 1:
                print("Incorrect number of parameters")
                sys.exit()
            from development import consistency_params
            consistency_params(args[0],
                               options.consistency,
                               options.dphi,
                               options.dp,
                               options.apix,
                               phithr=options.phithr,
                               ythr=options.ythr,
                               THR=options.segthr)
            sys.exit()

        rminp = int((float(options.rmin) / options.apix) + 0.5)
        rmaxp = int((float(options.rmax) / options.apix) + 0.5)

        from utilities import get_input_from_string, get_im

        xr = get_input_from_string(options.xr)
        txs = get_input_from_string(options.txs)

        irp = 1
        if options.ou < 0: oup = -1
        else: oup = int((old_div(options.ou, options.apix)) + 0.5)
        xrp = ''
        txsp = ''

        for i in range(len(xr)):
            xrp += " " + str(float(xr[i]) / options.apix)
        for i in range(len(txs)):
            txsp += " " + str(float(txs[i]) / options.apix)

        searchxshiftp = int((old_div(options.searchxshift, options.apix)) +
                            0.5)
        nearbyp = int((old_div(options.nearby, options.apix)) + 0.5)
        zstepp = int((old_div(options.zstep, options.apix)) + 0.5)

        if options.MPI:
            from mpi import mpi_init, mpi_finalize
            sys.argv = mpi_init(len(sys.argv), sys.argv)

        if len(options.predict_helical) > 0:
            if len(args) != 1:
                print("Incorrect number of parameters")
                sys.exit()
            if options.dp < 0:
                print(
                    "Helical symmetry paramter rise --dp should not be negative"
                )
                sys.exit()
            from applications import predict_helical_params
            predict_helical_params(args[0], options.dp, options.dphi,
                                   options.apix, options.predict_helical)
            sys.exit()

        if options.helicise:
            if len(args) != 2:
                print("Incorrect number of parameters")
                sys.exit()
            if options.dp < 0:
                print(
                    "Helical symmetry paramter rise --dp should not be negative"
                )
                sys.exit()
            from utilities import get_im, sym_vol
            vol = get_im(args[0])
            vol = sym_vol(vol, options.sym)
            hvol = vol.helicise(options.apix, options.dp, options.dphi,
                                options.fract, rmaxp, rminp)
            hvol = sym_vol(hvol, options.sym)
            hvol.write_image(args[1])
            sys.exit()

        if options.helicisepdb:
            if len(args) != 2:
                print("Incorrect number of parameters")
                sys.exit()
            if options.dp < 0:
                print(
                    "Helical symmetry paramter rise --dp should not be negative"
                )
                sys.exit()
            from math import cos, sin, radians
            from copy import deepcopy
            import numpy
            from numpy import zeros, dot, float32

            dp = options.dp
            dphi = options.dphi
            nperiod = options.nrepeats

            infile = open(args[0], "r")
            pall = infile.readlines()
            infile.close()

            p = []

            pos = []
            lkl = -1
            for i in range(len(pall)):
                if ((pall[i])[:4] == 'ATOM'):
                    if (lkl == -1): lkl = i
                    p.append(pall[i])
                    pos.append(i)
            n = len(p)

            X = zeros((3, len(p)), dtype=float32)
            X_new = zeros((3, len(p)), dtype=float32)

            for i in range(len(p)):
                element = deepcopy(p[i])
                X[0, i] = float(element[30:38])
                X[1, i] = float(element[38:46])
                X[2, i] = float(element[46:54])

            pnew = []
            for j in range(-nperiod, nperiod + 1):
                for i in range(n):
                    pnew.append(deepcopy(p[i]))

            dphi = radians(dphi)
            m = zeros((3, 3), dtype=float32)
            t = zeros((3, 1), dtype=float32)
            m[2][2] = 1.0
            t[0, 0] = 0.0
            t[1, 0] = 0.0

            for j in range(-nperiod, nperiod + 1):
                if j != 0:
                    rd = j * dphi
                    m[0][0] = cos(rd)
                    m[0][1] = sin(rd)
                    m[1][0] = -m[0][1]
                    m[1][1] = m[0][0]
                    t[2, 0] = j * dp
                    X_new = dot(m, X) + t
                    for i in range(n):
                        pnew[j * n +
                             i] = pnew[j * n + i][:30] + "%8.3f" % (float(
                                 X_new[0, i])) + "%8.3f" % (float(
                                     X_new[1, i])) + "%8.3f" % (float(
                                         X_new[2, i])) + pnew[j * n + i][54:]

            outfile = open(args[1], "w")
            outfile.writelines(pall[0:lkl])
            outfile.writelines(pnew)
            outfile.writelines("END\n")
            outfile.close()
            sys.exit()

        if options.volalixshift:
            if options.maxit > 1:
                print(
                    "Inner iteration for x-shift determinatin is restricted to 1"
                )
                sys.exit()
            if len(args) < 4: mask = None
            else: mask = args[3]
            from applications import volalixshift_MPI
            global_def.BATCH = True
            volalixshift_MPI(args[0], args[1], args[2], searchxshiftp,
                             options.apix, options.dp, options.dphi,
                             options.fract, rmaxp, rminp, mask, options.maxit,
                             options.CTF, options.snr, options.sym,
                             options.function, options.npad, options.debug,
                             nearbyp)
            global_def.BATCH = False

        if options.diskali:
            #if options.maxit > 1:
            #	print "Inner iteration for disk alignment is restricted to 1"
            #	sys.exit()
            if len(args) < 4: mask = None
            else: mask = args[3]
            global_def.BATCH = True
            if (options.sym[:1] == "d" or options.sym[:1] == "D"):
                from development import diskaliD_MPI
                diskaliD_MPI(args[0], args[1], args[2], mask, options.dp,
                             options.dphi, options.apix, options.function,
                             zstepp, options.fract, rmaxp, rminp, options.CTF,
                             options.maxit, options.sym)
            else:
                from applications import diskali_MPI
                diskali_MPI(args[0], args[1], args[2], mask, options.dp,
                            options.dphi, options.apix, options.function,
                            zstepp, options.fract, rmaxp, rminp, options.CTF,
                            options.maxit, options.sym)
            global_def.BATCH = False

        if options.symsearch:

            if len(options.symdoc) < 1:
                if options.dp < 0 or options.dphi < 0:
                    print(
                        "Enter helical symmetry parameters either using --symdoc or --dp and --dphi"
                    )
                    sys.exit()

            if options.dp < 0 or options.dphi < 0:
                # read helical symmetry parameters from symdoc
                from utilities import read_text_row
                hparams = read_text_row(options.symdoc)
                dp = hparams[0][0]
                dphi = hparams[0][1]
            else:
                dp = options.dp
                dphi = options.dphi

            from applications import symsearch_MPI
            if len(args) < 3:
                mask = None
            else:
                mask = args[2]
            global_def.BATCH = True
            symsearch_MPI(args[0], args[1], mask, dp, options.ndp,
                          options.dp_step, dphi, options.ndphi,
                          options.dphi_step, rminp, rmaxp, options.fract,
                          options.sym, options.function, options.datasym,
                          options.apix, options.debug)
            global_def.BATCH = False

        elif len(options.gendisk) > 0:
            from applications import gendisks_MPI
            global_def.BATCH = True
            if len(args) == 1: mask3d = None
            else: mask3d = args[1]
            if options.dp < 0:
                print(
                    "Helical symmetry paramter rise --dp must be explictly set!"
                )
                sys.exit()
            gendisks_MPI(args[0], mask3d, options.ref_nx, options.apix,
                         options.dp, options.dphi, options.fract, rmaxp, rminp,
                         options.CTF, options.function, options.sym,
                         options.gendisk, options.maxerror,
                         options.new_pixel_size, options.match_pixel_rise)
            global_def.BATCH = False

        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()
Esempio n. 51
0
def calculate_volumes_after_rotation_and_save_them(ali3d_options, rviper_iter, masterdir, bdb_stack_location, mpi_rank, mpi_size,
												   no_of_viper_runs_analyzed_together, no_of_viper_runs_analyzed_together_from_user_options, mpi_comm = -1):
	
	# This function takes into account the case in which there are more processors than images

	if mpi_comm == -1:
		mpi_comm = MPI_COMM_WORLD

	# some arguments are for debugging purposes

	mainoutputdir = masterdir + DIR_DELIM + NAME_OF_MAIN_DIR + ("%03d" + DIR_DELIM) %(rviper_iter)

	# list_of_projection_indices_used_for_outlier_elimination = map(int, read_text_file(mainoutputdir + DIR_DELIM + "list_of_viper_runs_included_in_outlier_elimination.txt"))
	import json; f = open(mainoutputdir + "list_of_viper_runs_included_in_outlier_elimination.json", 'r')
	list_of_independent_viper_run_indices_used_for_outlier_elimination  = json.load(f); f.close()

	if len(list_of_independent_viper_run_indices_used_for_outlier_elimination)==0:
		print "Error: len(list_of_independent_viper_run_indices_used_for_outlier_elimination)==0"
		mpi_finalize()
		sys.exit()

	# if this data analysis step was already performed in the past then return
	# for future changes make sure that the file checked is the last one to be processed !!!
	
	# if(os.path.exists(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(no_of_viper_runs_analyzed_together - 1) + DIR_DELIM + "rotated_volume.hdf")):
	# check_last_run = max(get_latest_directory_increment_value(mainoutputdir, NAME_OF_RUN_DIR, start_value=0), no_of_viper_runs_analyzed_together_from_user_options)
	# if(os.path.exists(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(check_last_run) + DIR_DELIM + "rotated_volume.hdf")):
	# 	return

	# if this data analysis step was already performed in the past then return
	for check_run in list_of_independent_viper_run_indices_used_for_outlier_elimination:
		if not (os.path.exists(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(check_run) + DIR_DELIM + "rotated_volume.hdf")):
			break
	else:
		return

	partstack = []
	# for i1 in range(0,no_of_viper_runs_analyzed_together):
	for i1 in list_of_independent_viper_run_indices_used_for_outlier_elimination:
		partstack.append(mainoutputdir + NAME_OF_RUN_DIR + "%03d"%(i1) + DIR_DELIM + "rotated_reduced_params.txt")
	partids_file_name = mainoutputdir + "this_iteration_index_keep_images.txt"

	lpartids = map(int, read_text_file(partids_file_name) )
	n_projs = len(lpartids)


	if (mpi_size > n_projs):
		# if there are more processors than images
		working = int(not(mpi_rank < n_projs))
		mpi_subcomm = mpi_comm_split(mpi_comm, working,  mpi_rank - working*n_projs)
		mpi_subsize = mpi_comm_size(mpi_subcomm)
		mpi_subrank = mpi_comm_rank(mpi_subcomm)
		if (mpi_rank < n_projs):

			# for i in xrange(no_of_viper_runs_analyzed_together):
			for idx, i in enumerate(list_of_independent_viper_run_indices_used_for_outlier_elimination):
				projdata = getindexdata(bdb_stack_location + "_%03d"%(rviper_iter - 1), partids_file_name, partstack[idx], mpi_rank, mpi_subsize)
				vol = do_volume(projdata, ali3d_options, 0, mpi_comm = mpi_subcomm)
				del projdata
				if( mpi_rank == 0):
					vol.write_image(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(i) + DIR_DELIM + "rotated_volume.hdf")
					line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " => "
					print line  + "Generated rec_ref_volume_run #%01d \n"%i
				del vol

		mpi_barrier(mpi_comm)
	else:
		for idx, i in enumerate(list_of_independent_viper_run_indices_used_for_outlier_elimination):
			projdata = getindexdata(bdb_stack_location + "_%03d"%(rviper_iter - 1), partids_file_name, partstack[idx], mpi_rank, mpi_size)
			vol = do_volume(projdata, ali3d_options, 0, mpi_comm = mpi_comm)
			del projdata
			if( mpi_rank == 0):
				vol.write_image(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(i) + DIR_DELIM + "rotated_volume.hdf")
				line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " => "
				print line + "Generated rec_ref_volume_run #%01d"%i
			del vol

	if( mpi_rank == 0):
		# Align all rotated volumes, calculate their average and save as an overall result
		from utilities import get_params3D, set_params3D, get_im, model_circle
		from statistics import ave_var
		from applications import ali_vol
		# vls = [None]*no_of_viper_runs_analyzed_together
		vls = [None]*len(list_of_independent_viper_run_indices_used_for_outlier_elimination)
		# for i in xrange(no_of_viper_runs_analyzed_together):
		for idx, i in enumerate(list_of_independent_viper_run_indices_used_for_outlier_elimination):
			vls[idx] = get_im(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(i) + DIR_DELIM + "rotated_volume.hdf")
			set_params3D(vls[idx],[0.,0.,0.,0.,0.,0.,0,1.0])
		asa,sas = ave_var(vls)
		# do the alignment
		nx = asa.get_xsize()
		radius = nx/2 - .5
		st = Util.infomask(asa*asa, model_circle(radius,nx,nx,nx), True)
		goal = st[0]
		going = True
		while(going):
			set_params3D(asa,[0.,0.,0.,0.,0.,0.,0,1.0])
			# for i in xrange(no_of_viper_runs_analyzed_together):
			for idx, i in enumerate(list_of_independent_viper_run_indices_used_for_outlier_elimination):
				o = ali_vol(vls[idx],asa,7.0,5.,radius)  # range of angles and shifts, maybe should be adjusted
				p = get_params3D(o)
				del o
				set_params3D(vls[idx],p)
			asa,sas = ave_var(vls)
			st = Util.infomask(asa*asa, model_circle(radius,nx,nx,nx), True)
			if(st[0] > goal):  goal = st[0]
			else:  going = False
		# over and out
		asa.write_image(mainoutputdir + DIR_DELIM + "average_volume.hdf")
		sas.write_image(mainoutputdir + DIR_DELIM + "variance_volume.hdf")
	return
Esempio n. 52
0
def main():
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)

    progname = os.path.basename(arglist[0])
    usage = progname + " prj_stack volume [begin end step] --CTF --npad=ntimes_padding --list=file --group=ID --snr=SNR --sym=symmetry --verbose=(0|1) --xysize --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="apply CTF correction")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="Signal-to-Noise Ratio")
    parser.add_option("--sym", type="string", default="c1", help="symmetry")
    parser.add_option(
        "--list",
        type="string",
        help="file with list of images to be used in the first column")
    parser.add_option(
        "--group",
        type="int",
        default=-1,
        help=
        "perform reconstruction using images for a given group number (group is attribute in the header)"
    )
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version ")
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="number of times padding (default 2)")
    parser.add_option("--verbose",
                      type="int",
                      default=0,
                      help="verbose level: 0 no verbose, 1 verbose")
    parser.add_option("--xysize",
                      type="int",
                      default=-1,
                      help="user expected size at xy direction")
    parser.add_option("--zsize",
                      type="int",
                      default=-1,
                      help="user expected size at z direction")
    parser.add_option("--smearstep",
                      type="float",
                      default=0.0,
                      help="Rotational smear step (default 0.0, no smear)")
    parser.add_option(
        "--interpolation_method",
        type="string",
        default="4nn",
        help="4nn, or tril: nearest neighbor, or trillinear interpolation")
    parser.add_option("--niter",
                      type="int",
                      default=10,
                      help="number of iterations for iterative reconstruction")
    parser.add_option("--upweighted",
                      action="store_true",
                      default=False,
                      help="apply background noise")
    parser.add_option("--compensate",
                      action="store_true",
                      default=False,
                      help="compensate in reconstruction")
    parser.add_option("--chunk_id",
                      type="int",
                      default=-1,
                      help="reconstruct both odd and even groups of particles")
    parser.add_option("--target_window_size",
                      type="int",
                      default=-1,
                      help=" size of the targeted reconstruction ")
    (options, args) = parser.parse_args(arglist[1:])

    if options.MPI:
        from mpi import mpi_init
        sys.argv = mpi_init(len(sys.argv), sys.argv)

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    if len(args) == 2:
        prj_stack = args[0]
        vol_stack = args[1]
        nimage = EMUtil.get_image_count(prj_stack)
        pid_list = list(range(0, nimage))
    elif len(args) == 5:
        prj_stack = args[0]
        vol_stack = args[1]
        begin = atoi(args[2])
        end = atoi(args[3])
        step = atoi(args[4])
        pid_list = list(range(begin, end, step))
    else:
        ERROR("incomplete list of arguments", "recon3d_n", 1)
        exit()

    if (options.list and options.group > -1):
        ERROR("options group and list cannot be used together", "recon3d_n", 1)
        sys.exit()

    from applications import recons3d_n, recons3d_trl_MPI

    global_def.BATCH = True
    if options.interpolation_method == "4nn":
        recons3d_n(prj_stack, pid_list, vol_stack, options.CTF, options.snr, 1, options.npad,\
         options.sym, options.list, options.group, options.verbose, options.MPI,options.xysize, options.zsize, options.smearstep, options.upweighted, options.compensate,options.chunk_id)
    elif options.interpolation_method == "tril":
        if options.MPI is False:
            ERROR(
                " trillinear interpolation reconstruction has MPI version only!"
            )
            sys.exit()
        recons3d_trl_MPI(prj_stack, pid_list, vol_stack, options.CTF, options.snr, 1, options.npad,\
         options.sym, options.verbose, options.niter, options.compensate, options.target_window_size)

    else:
        ERROR(
            " Wrong interpolation method. The current options are 4nn, and tril. 4nn is the defalut one. "
        )

    global_def.BATCH = False

    if options.MPI:
        from mpi import mpi_finalize
        mpi_finalize()
Esempio n. 53
0
def main():

	from logger import Logger, BaseLogger_Files
	import user_functions
	from optparse import OptionParser, SUPPRESS_HELP
	from global_def import SPARXVERSION
	from EMAN2 import EMData

	main_node = 0
	mpi_init(0, [])
	mpi_comm = MPI_COMM_WORLD
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	mpi_size = mpi_comm_size(MPI_COMM_WORLD)	# Total number of processes, passed by --np option.

	# mpi_barrier(mpi_comm)
	# from mpi import mpi_finalize
	# mpi_finalize()
	# print "mpi finalize"
	# from sys import exit
	# exit()

	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack  [output_directory] --ir=inner_radius --radius=outer_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translational_search_step  --delta=angular_step --an=angular_neighborhood  --center=center_type --maxit1=max_iter1 --maxit2=max_iter2 --L2threshold=0.1  --fl --aa --ref_a=S --sym=c1"
	usage += """

stack			2D images in a stack file: (default required string)
output_directory: directory name into which the output files will be written.  If it does not exist, the directory will be created.  If it does exist, the program will continue executing from where it stopped (if it did not already reach the end). The "--use_latest_master_directory" option can be used to choose the most recent directory that starts with "master".
"""

	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--radius",                type="int",           help="radius of the particle: has to be less than < int(nx/2)-1 (default required int)")

	parser.add_option("--ir",                    type="int",           default=1,          help="inner radius for rotational search: > 0 (default 1)")
	parser.add_option("--rs",                    type="int",           default=1,          help="step between rings in rotational search: >0 (default 1)")
	parser.add_option("--xr",                    type="string",        default='0',        help="range for translation search in x direction: search is +/xr in pixels (default '0')")
	parser.add_option("--yr",                    type="string",        default='0',        help="range for translation search in y direction: if omitted will be set to xr, search is +/yr in pixels (default '0')")
	parser.add_option("--ts",                    type="string",        default='1.0',      help="step size of the translation search in x-y directions: search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default '1.0')")
	parser.add_option("--delta",                 type="string",        default='2.0',      help="angular step of reference projections: (default '2.0')")
	#parser.add_option("--an",       type="string", default= "-1",              help="angular neighborhood for local searches (phi and theta)")
	parser.add_option("--center",                type="float",         default=-1.0,       help="centering of 3D template: average shift method; 0: no centering; 1: center of gravity (default -1.0)")
	parser.add_option("--maxit1",                type="int",           default=400,        help="maximum number of iterations performed for the GA part: (default 400)")
	parser.add_option("--maxit2",                type="int",           default=50,         help="maximum number of iterations performed for the finishing up part: (default 50)")
	parser.add_option("--L2threshold",           type="float",         default=0.03,       help="stopping criterion of GA: given as a maximum relative dispersion of volumes' L2 norms: (default 0.03)")
	parser.add_option("--doga",                  type="float",         default=0.1,        help="do GA when fraction of orientation changes less than 1.0 degrees is at least doga: (default 0.1)")
	parser.add_option("--n_shc_runs",            type="int",           default=4,          help="number of quasi-independent shc runs (same as '--nruns' parameter from sxviper.py): (default 4)")
	parser.add_option("--n_rv_runs",             type="int",           default=10,         help="number of rviper iterations: (default 10)")
	parser.add_option("--n_v_runs",              type="int",           default=3,          help="number of viper runs for each r_viper cycle: (default 3)")
	parser.add_option("--outlier_percentile",    type="float",         default=95.0,       help="percentile above which outliers are removed every rviper iteration: (default 95.0)")
	parser.add_option("--iteration_start",       type="int",           default=0,          help="starting iteration for rviper: 0 means go to the most recent one (default 0)")
	#parser.add_option("--CTF",      action="store_true", default=False,        help="NOT IMPLEMENTED Consider CTF correction during the alignment ")
	#parser.add_option("--snr",      type="float",  default= 1.0,               help="Signal-to-Noise Ratio of the data (default 1.0)")
	parser.add_option("--ref_a",                 type="string",        default='S',        help="method for generating the quasi-uniformly distributed projection directions: (default S)")
	parser.add_option("--sym",                   type="string",        default='c1',       help="point-group symmetry of the structure: (default c1)")
	# parser.add_option("--function", type="string", default="ref_ali3d",         help="name of the reference preparation function (ref_ali3d by default)")
	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--function", type="string", default="ref_ali3d",         help=SUPPRESS_HELP)
	parser.add_option("--npad",                  type="int",           default=2,          help="padding size for 3D reconstruction: (default 2)")
	# parser.add_option("--npad", type="int",  default= 2,            help="padding size for 3D reconstruction (default 2)")

	#options introduced for the do_volume function
	parser.add_option("--fl",                    type="float",         default=0.25,       help="cut-off frequency applied to the template volume: using a hyperbolic tangent low-pass filter (default 0.25)")
	parser.add_option("--aa",                    type="float",         default=0.1,        help="fall-off of hyperbolic tangent low-pass filter: (default 0.1)")
	parser.add_option("--pwreference",           type="string",        default='',         help="text file with a reference power spectrum: (default none)")
	parser.add_option("--mask3D",                type="string",        default=None,       help="3D mask file: (default sphere)")
	parser.add_option("--moon_elimination",      type="string",        default='',         help="elimination of disconnected pieces: two arguments: mass in KDa and pixel size in px/A separated by comma, no space (default none)")

	# used for debugging, help is supressed with SUPPRESS_HELP
	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--my_random_seed",      type="int",  default=123,  help = SUPPRESS_HELP)
	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--run_get_already_processed_viper_runs", action="store_true", dest="run_get_already_processed_viper_runs", default=False, help = SUPPRESS_HELP)
	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--use_latest_master_directory", action="store_true", dest="use_latest_master_directory", default=False, help = SUPPRESS_HELP)
	
	parser.add_option("--criterion_name",        type="string",        default='80th percentile',help="criterion deciding if volumes have a core set of stable projections: '80th percentile', other options:'fastest increase in the last quartile' (default '80th percentile')")
	parser.add_option("--outlier_index_threshold_method",type="string",        default='discontinuity_in_derivative',help="method that decides which images to keep: discontinuity_in_derivative, other options:percentile, angle_measure (default discontinuity_in_derivative)")
	parser.add_option("--angle_threshold",       type="int",           default=30,         help="angle threshold for projection removal if using 'angle_measure': (default 30)")
	

	required_option_list = ['radius']
	(options, args) = parser.parse_args(sys.argv[1:])

	options.CTF = False
	options.snr = 1.0
	options.an = -1

	if options.moon_elimination == "":
		options.moon_elimination = []
	else:
		options.moon_elimination = map(float, options.moon_elimination.split(","))

	# Making sure all required options appeared.
	for required_option in required_option_list:
		if not options.__dict__[required_option]:
			print "\n ==%s== mandatory option is missing.\n"%required_option
			print "Please run '" + progname + " -h' for detailed options"
			return 1

	mpi_barrier(MPI_COMM_WORLD)
	if(myid == main_node):
		print "****************************************************************"
		Util.version()
		print "****************************************************************"
		sys.stdout.flush()
	mpi_barrier(MPI_COMM_WORLD)

	# this is just for benefiting from a user friendly parameter name
	options.ou = options.radius 
	my_random_seed = options.my_random_seed
	criterion_name = options.criterion_name
	outlier_index_threshold_method = options.outlier_index_threshold_method
	use_latest_master_directory = options.use_latest_master_directory
	iteration_start_default = options.iteration_start
	number_of_rrr_viper_runs = options.n_rv_runs
	no_of_viper_runs_analyzed_together_from_user_options = options.n_v_runs
	no_of_shc_runs_analyzed_together = options.n_shc_runs 
	outlier_percentile = options.outlier_percentile 
	angle_threshold = options.angle_threshold 
	
	run_get_already_processed_viper_runs = options.run_get_already_processed_viper_runs
	get_already_processed_viper_runs(run_get_already_processed_viper_runs)

	import random
	random.seed(my_random_seed)

	if len(args) < 1 or len(args) > 3:
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
		return 1

	# if len(args) > 2:
	# 	ref_vol = get_im(args[2])
	# else:
	ref_vol = None
	
	# error_status = None
	# if myid == 0:
	# 	number_of_images = EMUtil.get_image_count(args[0])
	# 	if mpi_size > number_of_images:
	# 		error_status = ('Number of processes supplied by --np in mpirun needs to be less than or equal to %d (total number of images) ' % number_of_images, getframeinfo(currentframe()))
	# if_error_then_all_processes_exit_program(error_status)
	
	bdb_stack_location = ""

	masterdir = ""
	if len(args) == 2:
		masterdir = args[1]
		if masterdir[-1] != DIR_DELIM:
			masterdir += DIR_DELIM
	elif len(args) == 1:
		if use_latest_master_directory:
			all_dirs = [d for d in os.listdir(".") if os.path.isdir(d)]
			import re; r = re.compile("^master.*$")
			all_dirs = filter(r.match, all_dirs)
			if len(all_dirs)>0:
				# all_dirs = max(all_dirs, key=os.path.getctime)
				masterdir = max(all_dirs, key=os.path.getmtime)
				masterdir += DIR_DELIM

	log = Logger(BaseLogger_Files())

	error_status = 0	
	if mpi_size % no_of_shc_runs_analyzed_together != 0:
		ERROR('Number of processes needs to be a multiple of the number of quasi-independent runs (shc) within each viper run. '
		'Total quasi-independent runs by default are 3, you can change it by specifying '
		'--n_shc_runs option (in sxviper this option is called --nruns). Also, to improve communication time it is recommended that '
		'the number of processes divided by the number of quasi-independent runs is a power '
		'of 2 (e.g. 2, 4, 8 or 16 depending on how many physical cores each node has).', 'sxviper', 1)
		error_status = 1
	if_error_then_all_processes_exit_program(error_status)

	#Create folder for all results or check if there is one created already
	if(myid == main_node):
		#cmd = "{}".format("Rmycounter ccc")
		#cmdexecute(cmd)

		if( masterdir == ""):
			timestring = strftime("%Y_%m_%d__%H_%M_%S" + DIR_DELIM, localtime())
			masterdir = "master"+timestring

		if not os.path.exists(masterdir):
			cmd = "{} {}".format("mkdir", masterdir)
			cmdexecute(cmd)

		if ':' in args[0]:
			bdb_stack_location = args[0].split(":")[0] + ":" + masterdir + args[0].split(":")[1]
			org_stack_location = args[0]

			if(not os.path.exists(os.path.join(masterdir,"EMAN2DB" + DIR_DELIM))):
				# cmd = "{} {}".format("cp -rp EMAN2DB", masterdir, "EMAN2DB" DIR_DELIM)
				# cmdexecute(cmd)
				cmd = "{} {} {}".format("e2bdb.py", org_stack_location,"--makevstack=" + bdb_stack_location + "_000")
				cmdexecute(cmd)

				from applications import header
				try:
					header(bdb_stack_location + "_000", params='original_image_index', fprint=True)
					print "Images were already indexed!"
				except KeyError:
					print "Indexing images"
					header(bdb_stack_location + "_000", params='original_image_index', consecutive=True)
		else:
			filename = os.path.basename(args[0])
			bdb_stack_location = "bdb:" + masterdir + os.path.splitext(filename)[0]
			if(not os.path.exists(os.path.join(masterdir,"EMAN2DB" + DIR_DELIM))):
				cmd = "{} {} {}".format("sxcpy.py  ", args[0], bdb_stack_location + "_000")
				cmdexecute(cmd)

				from applications import header
				try:
					header(bdb_stack_location + "_000", params='original_image_index', fprint=True)
					print "Images were already indexed!"
				except KeyError:
					print "Indexing images"
					header(bdb_stack_location + "_000", params='original_image_index', consecutive=True)

	# send masterdir to all processes
	dir_len  = len(masterdir)*int(myid == main_node)
	dir_len = mpi_bcast(dir_len,1,MPI_INT,0,MPI_COMM_WORLD)[0]
	masterdir = mpi_bcast(masterdir,dir_len,MPI_CHAR,main_node,MPI_COMM_WORLD)
	masterdir = string.join(masterdir,"")
	if masterdir[-1] != DIR_DELIM:
		masterdir += DIR_DELIM
		
	global_def.LOGFILE =  os.path.join(masterdir, global_def.LOGFILE)
	print_program_start_information()
	

	# mpi_barrier(mpi_comm)
	# from mpi import mpi_finalize
	# mpi_finalize()
	# print "mpi finalize"
	# from sys import exit
	# exit()
		
	
	# send bdb_stack_location to all processes
	dir_len  = len(bdb_stack_location)*int(myid == main_node)
	dir_len = mpi_bcast(dir_len,1,MPI_INT,0,MPI_COMM_WORLD)[0]
	bdb_stack_location = mpi_bcast(bdb_stack_location,dir_len,MPI_CHAR,main_node,MPI_COMM_WORLD)
	bdb_stack_location = string.join(bdb_stack_location,"")

	iteration_start = get_latest_directory_increment_value(masterdir, "main")

	if (myid == main_node):
		if (iteration_start < iteration_start_default):
			ERROR('Starting iteration provided is greater than last iteration performed. Quiting program', 'sxviper', 1)
			error_status = 1
	if iteration_start_default!=0:
		iteration_start = iteration_start_default
	if (myid == main_node):
		if (number_of_rrr_viper_runs < iteration_start):
			ERROR('Please provide number of rviper runs (--n_rv_runs) greater than number of iterations already performed.', 'sxviper', 1)
			error_status = 1

	if_error_then_all_processes_exit_program(error_status)

	for rviper_iter in range(iteration_start, number_of_rrr_viper_runs + 1):
		if(myid == main_node):
			all_projs = EMData.read_images(bdb_stack_location + "_%03d"%(rviper_iter - 1))
			print "XXXXXXXXXXXXXXXXX"
			print "Number of projections (in loop): " + str(len(all_projs))
			print "XXXXXXXXXXXXXXXXX"
			subset = range(len(all_projs))
		else:
			all_projs = None
			subset = None

		runs_iter = get_latest_directory_increment_value(masterdir + NAME_OF_MAIN_DIR + "%03d"%rviper_iter, DIR_DELIM + NAME_OF_RUN_DIR, start_value=0) - 1
		no_of_viper_runs_analyzed_together = max(runs_iter + 2, no_of_viper_runs_analyzed_together_from_user_options)

		first_time_entering_the_loop_need_to_do_full_check_up = True
		while True:
			runs_iter += 1

			if not first_time_entering_the_loop_need_to_do_full_check_up:
				if runs_iter >= no_of_viper_runs_analyzed_together:
					break
			first_time_entering_the_loop_need_to_do_full_check_up = False

			this_run_is_NOT_complete = 0
			if (myid == main_node):
				independent_run_dir = masterdir + DIR_DELIM + NAME_OF_MAIN_DIR + ('%03d' + DIR_DELIM + NAME_OF_RUN_DIR + "%03d" + DIR_DELIM)%(rviper_iter, runs_iter)
				if run_get_already_processed_viper_runs:
					cmd = "{} {}".format("mkdir -p", masterdir + DIR_DELIM + NAME_OF_MAIN_DIR + ('%03d' + DIR_DELIM)%(rviper_iter)); cmdexecute(cmd)
					cmd = "{} {}".format("rm -rf", independent_run_dir); cmdexecute(cmd)
					cmd = "{} {}".format("cp -r", get_already_processed_viper_runs() + " " +  independent_run_dir); cmdexecute(cmd)

				if os.path.exists(independent_run_dir + "log.txt") and (string_found_in_file("Finish VIPER2", independent_run_dir + "log.txt")):
					this_run_is_NOT_complete = 0
				else:
					this_run_is_NOT_complete = 1
					cmd = "{} {}".format("rm -rf", independent_run_dir); cmdexecute(cmd)
					cmd = "{} {}".format("mkdir -p", independent_run_dir); cmdexecute(cmd)

				this_run_is_NOT_complete = mpi_bcast(this_run_is_NOT_complete,1,MPI_INT,main_node,MPI_COMM_WORLD)[0]
				dir_len = len(independent_run_dir)
				dir_len = mpi_bcast(dir_len,1,MPI_INT,main_node,MPI_COMM_WORLD)[0]
				independent_run_dir = mpi_bcast(independent_run_dir,dir_len,MPI_CHAR,main_node,MPI_COMM_WORLD)
				independent_run_dir = string.join(independent_run_dir,"")
			else:
				this_run_is_NOT_complete = mpi_bcast(this_run_is_NOT_complete,1,MPI_INT,main_node,MPI_COMM_WORLD)[0]
				dir_len = 0
				independent_run_dir = ""
				dir_len = mpi_bcast(dir_len,1,MPI_INT,main_node,MPI_COMM_WORLD)[0]
				independent_run_dir = mpi_bcast(independent_run_dir,dir_len,MPI_CHAR,main_node,MPI_COMM_WORLD)
				independent_run_dir = string.join(independent_run_dir,"")

			if this_run_is_NOT_complete:
				mpi_barrier(MPI_COMM_WORLD)

				if independent_run_dir[-1] != DIR_DELIM:
					independent_run_dir += DIR_DELIM

				log.prefix = independent_run_dir

				options.user_func = user_functions.factory[options.function]

				# for debugging purposes
				#if (myid == main_node):
					#cmd = "{} {}".format("cp ~/log.txt ", independent_run_dir)
					#cmdexecute(cmd)
					#cmd = "{} {}{}".format("cp ~/paramdir/params$(mycounter ccc).txt ", independent_run_dir, "param%03d.txt"%runs_iter)
					#cmd = "{} {}{}".format("cp ~/paramdir/params$(mycounter ccc).txt ", independent_run_dir, "params.txt")
					#cmdexecute(cmd)

				if (myid == main_node):
					store_value_of_simple_vars_in_json_file(masterdir + 'program_state_stack.json', locals(), exclude_list_of_vars=["usage"], 
						vars_that_will_show_only_size = ["subset"])
					store_value_of_simple_vars_in_json_file(masterdir + 'program_state_stack.json', options.__dict__, write_or_append='a')

				# mpi_barrier(mpi_comm)
				# from mpi import mpi_finalize
				# mpi_finalize()
				# print "mpi finalize"
				# from sys import exit
				# exit()

				out_params, out_vol, out_peaks = multi_shc(all_projs, subset, no_of_shc_runs_analyzed_together, options,
				mpi_comm=mpi_comm, log=log, ref_vol=ref_vol)

				# end of: if this_run_is_NOT_complete:

			if runs_iter >= (no_of_viper_runs_analyzed_together_from_user_options - 1):
				increment_for_current_iteration = identify_outliers(myid, main_node, rviper_iter,
				no_of_viper_runs_analyzed_together, no_of_viper_runs_analyzed_together_from_user_options, masterdir,
				bdb_stack_location, outlier_percentile, criterion_name, outlier_index_threshold_method, angle_threshold)

				if increment_for_current_iteration == MUST_END_PROGRAM_THIS_ITERATION:
					break

				no_of_viper_runs_analyzed_together += increment_for_current_iteration

		# end of independent viper loop

		calculate_volumes_after_rotation_and_save_them(options, rviper_iter, masterdir, bdb_stack_location, myid,
		mpi_size, no_of_viper_runs_analyzed_together, no_of_viper_runs_analyzed_together_from_user_options)

		if increment_for_current_iteration == MUST_END_PROGRAM_THIS_ITERATION:
			if (myid == main_node):
				print "RVIPER found a core set of stable projections for the current RVIPER iteration (%d), the maximum angle difference between corresponding projections from different VIPER volumes is less than %.2f. Finishing."%(rviper_iter, ANGLE_ERROR_THRESHOLD)
			break
	else:
		if (myid == main_node):
			print "After running the last iteration (%d), RVIPER did not find a set of projections with the maximum angle difference between corresponding projections from different VIPER volumes less than %.2f Finishing."%(rviper_iter, ANGLE_ERROR_THRESHOLD)
		
			
	# end of RVIPER loop

	#mpi_finalize()
	#sys.exit()

	mpi_barrier(MPI_COMM_WORLD)
	mpi_finalize()
Esempio n. 54
0
def main():
    import os
    import sys
    from optparse import OptionParser
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage = progname + """ inputvolume  locresvolume maskfile outputfile   --radius --falloff  --MPI

	    Locally filer a volume based on local resolution volume (sxlocres.py) within area outlined by the maskfile
	"""
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option(
        "--radius",
        type="int",
        default=-1,
        help=
        "if there is no maskfile, sphere with r=radius will be used, by default the radius is nx/2-1"
    )
    parser.add_option("--falloff",
                      type="float",
                      default=0.1,
                      help="falloff of tanl filter (default 0.1)")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version")

    (options, args) = parser.parse_args(arglist[1:])

    if len(args) < 3 or len(args) > 4:
        print("See usage " + usage)
        sys.exit()

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    if options.MPI:
        from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
        from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv, mpi_send, mpi_recv
        from mpi import MPI_SUM, MPI_FLOAT, MPI_INT
        sys.argv = mpi_init(len(sys.argv), sys.argv)

        number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        main_node = 0

        if (myid == main_node):
            #print sys.argv
            vi = get_im(sys.argv[1])
            ui = get_im(sys.argv[2])
            #print   Util.infomask(ui, None, True)
            radius = options.radius
            nx = vi.get_xsize()
            ny = vi.get_ysize()
            nz = vi.get_zsize()
            dis = [nx, ny, nz]
        else:
            falloff = 0.0
            radius = 0
            dis = [0, 0, 0]
            vi = None
            ui = None
        dis = bcast_list_to_all(dis, myid, source_node=main_node)

        if (myid != main_node):
            nx = int(dis[0])
            ny = int(dis[1])
            nz = int(dis[2])
        radius = bcast_number_to_all(radius, main_node)
        if len(args) == 3:
            if (radius == -1): radius = min(nx, ny, nz) // 2 - 1
            m = model_circle(radius, nx, ny, nz)
            outvol = args[2]

        elif len(args) == 4:
            if (myid == main_node): m = binarize(get_im(args[2]), 0.5)
            else: m = model_blank(nx, ny, nz)
            outvol = args[3]
            bcast_EMData_to_all(m, myid, main_node)

        from filter import filterlocal
        filteredvol = filterlocal(ui, vi, m, options.falloff, myid, main_node,
                                  number_of_proc)

        if (myid == 0): filteredvol.write_image(outvol)

        from mpi import mpi_finalize
        mpi_finalize()

    else:
        vi = get_im(args[0])
        ui = get_im(
            args[1]
        )  # resolution volume, values are assumed to be from 0 to 0.5

        nn = vi.get_xsize()

        falloff = options.falloff

        if len(args) == 3:
            radius = options.radius
            if (radius == -1): radius = nn // 2 - 1
            m = model_circle(radius, nn, nn, nn)
            outvol = args[2]

        elif len(args) == 4:
            m = binarize(get_im(args[2]), 0.5)
            outvol = args[3]

        fftip(vi)  # this is the volume to be filtered

        #  Round all resolution numbers to two digits
        for x in xrange(nn):
            for y in xrange(nn):
                for z in xrange(nn):
                    ui.set_value_at_fast(x, y, z,
                                         round(ui.get_value_at(x, y, z), 2))
        st = Util.infomask(ui, m, True)

        filteredvol = model_blank(nn, nn, nn)
        cutoff = max(st[2] - 0.01, 0.0)
        while (cutoff < st[3]):
            cutoff = round(cutoff + 0.01, 2)
            pt = Util.infomask(
                threshold_outside(ui, cutoff - 0.00501, cutoff + 0.005), m,
                True)
            if (pt[0] != 0.0):
                vovo = fft(filt_tanl(vi, cutoff, falloff))
                for x in xrange(nn):
                    for y in xrange(nn):
                        for z in xrange(nn):
                            if (m.get_value_at(x, y, z) > 0.5):
                                if (round(ui.get_value_at(x, y, z),
                                          2) == cutoff):
                                    filteredvol.set_value_at_fast(
                                        x, y, z, vovo.get_value_at(x, y, z))

        filteredvol.write_image(outvol)
Esempio n. 55
0
def main():
	import os
	import sys
	from optparse import OptionParser
	from global_def import SPARXVERSION
	import global_def
        arglist = []
        for arg in sys.argv:
        	arglist.append( arg )
	progname = os.path.basename(arglist[0])
	usage = progname + " stack ref_vol outdir  <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --ynumber=y_numbers  --txs=translational_search_stepx  --delta=angular_step --an=angular_neighborhood --center=1 --maxit=max_iter --CTF --snr=1.0  --ref_a=S --sym=c1 --datasym=symdoc --new"
	
	parser = OptionParser(usage,version=SPARXVERSION)
	#parser.add_option("--ir",                 type="float", 	     default= -1,                 help="inner radius for rotational correlation > 0 (set to 1) (Angstroms)")
	parser.add_option("--ou",                 type="float", 	     default= -1,                 help="outer radius for rotational 2D correlation < int(nx/2)-1 (set to the radius of the particle) (Angstroms)")
	parser.add_option("--rs",                 type="int",   		 default= 1,                  help="step between rings in rotational correlation >0  (set to 1)" ) 
	parser.add_option("--xr",                 type="string",		 default= " 4  2 1  1   1",   help="range for translation search in x direction, search is +/-xr (Angstroms) ")
	parser.add_option("--txs",                type="string",		 default= "1 1 1 0.5 0.25",   help="step size of the translation search in x directions, search is -xr, -xr+ts, 0, xr-ts, xr (Angstroms)")
	parser.add_option("--y_restrict",         type="string",		 default= "-1 -1 -1 -1 -1",   help="range for translational search in y-direction, search is +/-y_restrict in Angstroms. This only applies to local search, i.e., when an is not -1. If y_restrict < 0, then for ihrsrlocalcons (option --localcons local search with consistency), the y search range is set such that it is the same ratio to dp as angular search range is to dphi. For regular ihrsr, y search range is the full range when y_restrict< 0. Default is -1.")
	parser.add_option("--ynumber",            type="string",		 default= "4 8 16 32 32",     help="even number of the translation search in y direction, search is (-dpp/2,-dpp/2+dpp/ny,,..,0,..,dpp/2-dpp/ny dpp/2]")
	parser.add_option("--delta",              type="string",		 default= " 10 6 4  3   2",   help="angular step of reference projections")
	parser.add_option("--an",                 type="string",		 default= "-1",               help="angular neighborhood for local searches (default -1, meaning do exhaustive search)")
	parser.add_option("--maxit",              type="int",            default= 30,                 help="maximum number of iterations performed for each angular step (default 30) ")
	parser.add_option("--CTF",                action="store_true",   default=False,      		  help="CTF correction")
	parser.add_option("--snr",                type="float",          default= 1.0,                help="Signal-to-Noise Ratio of the data (default 1)")	
	parser.add_option("--MPI",                action="store_true",   default=True,               help="use MPI version")
	#parser.add_option("--fourvar",           action="store_true",   default=False,               help="compute Fourier variance")
	parser.add_option("--apix",               type="float",			 default= -1.0,               help="pixel size in Angstroms")   
	parser.add_option("--dp",                 type="float",			 default= -1.0,               help="delta z - translation in Angstroms")   
	parser.add_option("--dphi",               type="float",			 default= -1.0,               help="delta phi - rotation in degrees")  
		
	parser.add_option("--ndp",                type="int",            default= 12,                 help="In symmetrization search, number of delta z steps equals to 2*ndp+1") 
	parser.add_option("--ndphi",              type="int",            default= 12,                 help="In symmetrization search,number of dphi steps equas to 2*ndphi+1")  
	parser.add_option("--dp_step",            type="float",          default= 0.1,                help="delta z (Angstroms) step  for symmetrization")  
	parser.add_option("--dphi_step",          type="float",          default= 0.1,                help="dphi step for symmetrization")
	   
	parser.add_option("--psi_max",            type="float", 		 default= 10.0,               help="maximum psi - how far rotation in plane can can deviate from 90 or 270 degrees (default 10)")   
	parser.add_option("--rmin",               type="float", 		 default= 0.0,                help="minimal radius for hsearch (Angstroms)")   
	parser.add_option("--rmax",               type="float", 		 default= 80.0,               help="maximal radius for hsearch (Angstroms)")
	parser.add_option("--fract",              type="float", 		 default= 0.7,                help="fraction of the volume used for helical search")
	parser.add_option("--sym",                type="string",		 default= "c1",               help="symmetry of the structure")
	parser.add_option("--function",           type="string",		 default="helical",  	      help="name of the reference preparation function")
	parser.add_option("--datasym",            type="string",		 default="datasym.txt",       help="symdoc")
	parser.add_option("--nise",               type="int",   		 default= 200,                help="start symmetrization after nise steps (default 200)")
	parser.add_option("--npad",               type="int",   		 default= 2,                  help="padding size for 3D reconstruction, (default 2)")
	parser.add_option("--debug",              action="store_true",   default=False,               help="debug")
	parser.add_option("--new",                action="store_true",   default=False,               help="use rectangular recon and projection version")
	parser.add_option("--initial_theta",      type="float",		     default=90.0,                help="intial theta for reference projection (default 90)")
	parser.add_option("--delta_theta",        type="float",		     default=1.0,                 help="delta theta for reference projection (default 1.0)")
	parser.add_option("--WRAP",               type="int",  		     default= 1,                  help="do helical wrapping (default 1, meaning yes)")

	(options, args) = parser.parse_args(arglist[1:])
	if len(args) < 1 or len(args) > 5:
		print "usage: " + usage + "\n"
		print "Please run '" + progname + " -h' for detailed options"
	else:
		# Convert input arguments in the units/format as expected by ihrsr_MPI in applications.
		if options.apix < 0:
			print "Please enter pixel size"
			sys.exit()

		rminp = int((float(options.rmin)/options.apix) + 0.5)
		rmaxp = int((float(options.rmax)/options.apix) + 0.5)
		
		from utilities import get_input_from_string, get_im

		xr = get_input_from_string(options.xr)
		txs = get_input_from_string(options.txs)
		y_restrict = get_input_from_string(options.y_restrict)

		irp = 1
		if options.ou < 0:  oup = -1
		else:               oup = int( (options.ou/options.apix) + 0.5)
		xrp = ''
		txsp = ''
		y_restrict2 = ''
		
		for i in xrange(len(xr)):
			xrp += " "+str(float(xr[i])/options.apix)
		for i in xrange(len(txs)):
			txsp += " "+str(float(txs[i])/options.apix)
		# now y_restrict has the same format as x search range .... has to change ihrsr accordingly
		for i in xrange(len(y_restrict)):
			y_restrict2 += " "+str(float(y_restrict[i])/options.apix)

		if options.MPI:
			from mpi import mpi_init, mpi_finalize
			sys.argv = mpi_init(len(sys.argv), sys.argv)

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()

		from applications import ihrsr
		global_def.BATCH = True
		if len(args) < 4:  mask = None
		else:               mask = args[3]
		ihrsr(args[0], args[1], args[2], mask, irp, oup, options.rs, xrp, options.ynumber, txsp, options.delta, options.initial_theta, options.delta_theta, options.an, options.maxit, options.CTF, options.snr, options.dp, options.ndp, options.dp_step, options.dphi, options.ndphi, options.dphi_step, options.psi_max, rminp, rmaxp, options.fract, options.nise, options.npad,options.sym, options.function, options.datasym, options.apix, options.debug, options.MPI, options.WRAP, y_restrict2) 
		global_def.BATCH = False

		if options.MPI:
			from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 56
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = progname + " out_averages outdir --ou=outer_radius --xr=x_range --ts=translation_step --maxit=max_iteration --CTF --snr=SNR --function=user_function_name --Fourvar --th_err=threshold_cutoff --ali=kind_of_alignment --center=center_type"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--ou",
        type="int",
        default=-1,
        help=
        "outer radius for rotational correlation < nx/2-1 (set to the radius of the particle)"
    )
    parser.add_option(
        "--xr",
        type="string",
        default="4 2",
        help="range for translation search in x direction, search is +/xr ")
    parser.add_option("--ts",
                      type="string",
                      default="2 1",
                      help="step of translation search in both directions")
    parser.add_option(
        "--maxit",
        type="float",
        default=0,
        help=
        "maximum number of iterations (0 means the maximum iterations is 10, but it will automatically stop should the criterion falls"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Consider CTF correction during the alignment ")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="signal-to-noise ratio of the data (set to 1.0)")
    parser.add_option("--Fourvar",
                      action="store_true",
                      default=False,
                      help="compute Fourier variance")
    parser.add_option("--function",
                      type="string",
                      default="ref_ali2d",
                      help="name of the reference preparation function")
    parser.add_option('--Ng', type='int', default=-1, help='Ng')
    parser.add_option('--K', type='int', default=-1, help='K')
    parser.add_option("--dst", type="float", default=0.0, help="")
    parser.add_option(
        "--center",
        type="float",
        default=-1,
        help=
        "-1.average center method; 0.not centered; 1.phase approximation; 2.cc with Gaussian function; 3.cc with donut-shaped image 4.cc with user-defined reference 5.cc with self-rotated average"
    )
    parser.add_option("--CUDA",
                      action="store_true",
                      default=False,
                      help=" whether to use CUDA ")
    parser.add_option("--GPUID",
                      type="string",
                      default="",
                      help=" ID of GPUs to use")
    parser.add_option('--MPI', action='store_true', default=False, help='MPI')

    (options, args) = parser.parse_args()
    if len(args) != 3:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    else:
        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        if options.MPI:
            from mpi import mpi_init
            sys.argv = mpi_init(len(sys.argv), sys.argv)

        global_def.BATCH = True
        from development import mref_alignment
        mref_alignment(args[0], args[1], args[2], options.ou, options.xr,
                       options.ts, options.maxit, options.function,
                       options.snr, options.CTF, options.Fourvar, options.Ng,
                       options.K, options.dst, options.center, options.CUDA,
                       options.GPUID, options.MPI)
        global_def.BATCH = False

        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()
Esempio n. 57
0
def main():
    arglist = []
    i = 0
    while i < len(sys.argv):
        if sys.argv[i] == "-p4pg":
            i = i + 2
        elif sys.argv[i] == "-p4wd":
            i = i + 2
        else:
            arglist.append(sys.argv[i])
            i = i + 1
    progname = os.path.basename(arglist[0])
    usage = (
        progname
        + " stack ref_vols outdir <mask> --focus=3Dmask --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translational_searching_step "
        + " --delta=angular_step --an=angular_neighborhood --center=1 --nassign=reassignment_number --nrefine=alignment_number --maxit=max_iter --stoprnct=percentage_to_stop "
        + " --debug --fourvar=fourier_variance --CTF --snr=1.0 --ref_a=S --sym=c1 --function=user_function --MPI --kmeans"
    )
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--focus", type="string", default=None, help="3D mask for focused clustering ")
    parser.add_option("--ir", type="int", default=1, help="inner radius for rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--ou",
        type="int",
        default="-1",
        help="outer radius for rotational correlation <nx-1 (set to the radius of the particle)",
    )
    parser.add_option("--maxit", type="int", default=5, help="maximum number of iteration")
    parser.add_option(
        "--rs", type="int", default="1", help="step between rings in rotational correlation >0 (set to 1)"
    )
    parser.add_option(
        "--xr", type="string", default="4 2 1 1 1", help="range for translation search in x direction, search is +/-xr "
    )
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help="range for translation search in y direction, search is +/-yr (default = same as xr)",
    )
    parser.add_option(
        "--ts",
        type="string",
        default="0.25",
        help="step size of the translation search in both directions direction, search is -xr, -xr+ts, 0, xr-ts, xr ",
    )
    parser.add_option("--delta", type="string", default="10 6 4  3   2", help="angular step of reference projections")
    parser.add_option("--an", type="string", default="-1", help="angular neighborhood for local searches")
    parser.add_option(
        "--center",
        type="int",
        default=0,
        help="0 - if you do not want the volume to be centered, 1 - center the volume using cog (default=0)",
    )
    parser.add_option(
        "--nassign",
        type="int",
        default=0,
        help="number of reassignment iterations performed for each angular step (set to 3) ",
    )
    parser.add_option(
        "--nrefine",
        type="int",
        default=1,
        help="number of alignment iterations performed for each angular step (set to 1) ",
    )
    parser.add_option("--CTF", action="store_true", default=False, help="Consider CTF correction during the alignment ")
    parser.add_option("--snr", type="float", default=1.0, help="Signal-to-Noise Ratio of the data")
    parser.add_option(
        "--stoprnct", type="float", default=0.0, help="Minimum percentage of assignment change to stop the program"
    )
    parser.add_option(
        "--ref_a",
        type="string",
        default="S",
        help="method for generating the quasi-uniformly distributed projection directions (default S) ",
    )
    parser.add_option("--sym", type="string", default="c1", help="symmetry of the structure ")
    parser.add_option(
        "--function", type="string", default="ref_ali3dm", help="name of the reference preparation function"
    )
    parser.add_option("--MPI", action="store_true", default=False, help="Use MPI version ")
    parser.add_option("--npad", type="int", default=2, help="padding size for 3D reconstruction")
    parser.add_option("--debug", action="store_true", default=False, help="debug ")
    parser.add_option("--fourvar", action="store_true", default=False, help="compute and use fourier variance")
    parser.add_option("--kmeans", action="store_true", default=False, help="use kmeansmref instead of equalmref")

    (options, args) = parser.parse_args(arglist[1:])
    if len(args) < 3 or len(args) > 4:
        print "usage: " + usage
        print "Please run '" + progname + " -h' for detailed options"
    else:

        if len(args) == 3:
            maskfile = None
        else:
            maskfile = args[3]

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache

            disable_bdb_cache()

        global_def.BATCH = True
        if options.MPI:
            from mpi import mpi_init

            sys.argv = mpi_init(len(sys.argv), sys.argv)
            if options.kmeans:
                from applications import Kmref_ali3d_MPI

                Kmref_ali3d_MPI(
                    args[0],
                    args[1],
                    args[2],
                    maskfile,
                    options.focus,
                    options.maxit,
                    options.ir,
                    options.ou,
                    options.rs,
                    options.xr,
                    options.yr,
                    options.ts,
                    options.delta,
                    options.an,
                    options.center,
                    options.nassign,
                    options.nrefine,
                    options.CTF,
                    options.snr,
                    options.ref_a,
                    options.sym,
                    options.function,
                    options.npad,
                    options.debug,
                    options.fourvar,
                    options.stoprnct,
                    mpi_comm=None,
                    log=None,
                )
            else:
                from applications import mref_ali3d_MPI

                mref_ali3d_MPI(
                    args[0],
                    args[1],
                    args[2],
                    maskfile,
                    options.focus,
                    options.maxit,
                    options.ir,
                    options.ou,
                    options.rs,
                    options.xr,
                    options.yr,
                    options.ts,
                    options.delta,
                    options.an,
                    options.center,
                    options.nassign,
                    options.nrefine,
                    options.CTF,
                    options.snr,
                    options.ref_a,
                    options.sym,
                    options.function,
                    options.npad,
                    options.debug,
                    options.fourvar,
                    options.stoprnct,
                    mpi_comm=None,
                    log=None,
                )
        else:
            from applications import mref_ali3d

            mref_ali3d(
                args[0],
                args[1],
                args[2],
                maskfile,
                options.focus,
                options.maxit,
                options.ir,
                options.ou,
                options.rs,
                options.xr,
                options.yr,
                options.ts,
                options.delta,
                options.an,
                options.center,
                options.nassign,
                options.nrefine,
                options.CTF,
                options.snr,
                options.ref_a,
                options.sym,
                options.function,
                options.npad,
                options.debug,
                options.fourvar,
                options.stoprnct,
            )
        global_def.BATCH = False

        if options.MPI:
            from mpi import mpi_finalize

            mpi_finalize()
Esempio n. 58
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage    = progname + " stack outdir --ir --ou --delta --dpsi --lf --hf --rand_seed --maxit --debug --noweights --trials --given --first_zero --weights --MPIGA--pcross --pmut --maxgen --MPI --trials"
	parser   = OptionParser(usage, version = SPARXVERSION)
	parser.add_option("--ir",         type="float",        default=-1,       help="Inner radius of particle (set to 1)")
	parser.add_option("--ou",         type="float",        default=-1,       help="Outer radius of particle < int(nx/2)-1")
	parser.add_option("--delta",      type="float",        default=5.0,      help="Angle step" )
	parser.add_option("--dpsi",       type="int",          default=1,        help="Angle accuracy for sinogram (set to 1)")
	parser.add_option("--lf",         type="float",        default=0.0,      help="Filter, minimum frequency (set to 0.0)")
	parser.add_option("--hf",         type="float",        default=0.5,      help="Filter, maximum frequency (set to 0.5)")
	parser.add_option("--given",      action="store_true", default=False,    help="Start from given projections orientation (set to False, means start with randomize orientations)")
	parser.add_option("--rand_seed",  type="int",          default=-1,       help="Random seed of initial orientations (if set to randomly)")
	parser.add_option("--maxit",      type="int",          default=100,      help="Maximum number of iterations ")
	parser.add_option("--debug",      action="store_true", default=False,    help="Help to debug")
	parser.add_option("--first_zero", action="store_true", default=False,    help="Assign the first projection orientation to 0")
	parser.add_option("--noweights",  action="store_true", default=False,    help="Use Voronoi weighting (by default use weights)")
	parser.add_option("--MPI",        action="store_true", default=False,    help="MPI version")
	parser.add_option("--trials",     type="int",          default=1,        help="Number of trials for the MPI version")
	parser.add_option("--MPIGA",      action="store_true", default=False,    help="MPI version (Genetic algorithm)")
	parser.add_option("--pcross",     type="float",        default=0.95,     help="Cross-over probability (set to 0.95)")
	parser.add_option("--pmut",       type="float",        default=0.05,     help="Mutation probability (set to 0.05)")
	parser.add_option("--maxgen",     type="int",          default=10,       help="Maximum number of generations (set to 10)")
	(options, args) = parser.parse_args()
	if len(args) != 2:
		print("usage: " + usage)
		print("Please run '" + progname + " -h' for detailed options")
	else:
		if options.maxit < 1: options.maxit = 1
		if options.noweights: weights = False
		else:                 weights = True

		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		if options.MPIGA:
			from development import cml2_main_mpi
			global_def.BATCH = True
			cml2_main_mpi(args[0], args[1], options.ir, options.ou, options.delta, options.dpsi, 
				      options.lf, options.hf, options.rand_seed, options.maxit, options.given, options.first_zero, 
				      weights, options.debug, options.maxgen, options.pcross, options.pmut)
			global_def.BATCH = False
		elif options.MPI:
			from mpi import mpi_init
			sys.argv = mpi_init(len(sys.argv),sys.argv)

			from applications import cml_find_structure_MPI2
			global_def.BATCH = True
			cml_find_structure_MPI2(args[0], args[1], options.ir, options.ou, options.delta, options.dpsi, 
				    options.lf, options.hf, options.rand_seed, options.maxit, options.given, options.first_zero, 
				    weights, options.debug, options.trials)
			global_def.BATCH = False
		else:
			from applications import cml_find_structure_main
			global_def.BATCH = True
			cml_find_structure_main(args[0], args[1], options.ir, options.ou, options.delta, options.dpsi, 
				    options.lf, options.hf, options.rand_seed, options.maxit, options.given, options.first_zero, 
				    weights, options.debug, options.trials)
			global_def.BATCH = False
                if options.MPI:
		        from mpi import mpi_finalize
			mpi_finalize()
Esempio n. 59
0
def main():
	import os
	import sys
	from optparse import OptionParser
        arglist = []
        for arg in sys.argv:
        	arglist.append( arg )
	progname = os.path.basename(arglist[0])
	usage = progname + """ firstvolume  secondvolume maskfile outputfile --wn --step --cutoff  --radius  --fsc --MPI

	Compute local resolution in real space within area outlined by the maskfile and within regions wn x wn x wn
	"""
	parser = OptionParser(usage,version=SPARXVERSION)
	
	parser.add_option("--wn",		type="int",		default=7, 			help="Size of window within which local real-space FSC is computed (default 7")
	parser.add_option("--step",     type="float",	default= 1.0,       help="Shell step in Fourier size in pixels (default 1.0)")   
	parser.add_option("--cutoff",   type="float",	default= 0.5,       help="resolution cut-off for FSC (default 0.5)")
	parser.add_option("--radius",	type="int",		default=-1, 		help="if there is no maskfile, sphere with r=radius will be used, by default the radius is nx/2-wn")
	parser.add_option("--fsc",      type="string",	default= None,      help="overall FSC curve (might be truncated) (default no curve)")
	parser.add_option("--MPI",      action="store_true",   	default=False,  help="use MPI version")

	(options, args) = parser.parse_args(arglist[1:])
	
	if len(args) <3 or len(args) > 4:
		print "See usage " + usage
		sys.exit()

	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()


	if options.MPI:
		from mpi 	  	  import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
		from mpi 	  	  import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv, mpi_send, mpi_recv
		from mpi 	  	  import MPI_SUM, MPI_FLOAT, MPI_INT, MPI_TAG_UB
		sys.argv = mpi_init(len(sys.argv),sys.argv)		
	
		number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
		myid = mpi_comm_rank(MPI_COMM_WORLD)
		main_node = 0
		cutoff = options.cutoff

		nk = int(options.wn)

		if(myid == main_node):
			#print sys.argv
			vi = get_im(sys.argv[1])
			ui = get_im(sys.argv[2])

			nx = vi.get_xsize()
			ny = vi.get_ysize()
			nz = vi.get_zsize()
			dis = [nx, ny, nz]
		else:
			dis = [0,0,0,0]


		dis = bcast_list_to_all(dis, myid, source_node = main_node)

		if(myid != main_node):
			nx = int(dis[0])
			ny = int(dis[1])
			nz = int(dis[2])

			vi = model_blank(nx,ny,nz)
			ui = model_blank(nx,ny,nz)


		if len(args) == 3:
			m = model_circle((min(nx,ny,nz)-nk)//2,nx,ny,nz)
			outvol = args[2]
		
		elif len(args) == 4:
			if(myid == main_node):
				m = binarize(get_im(args[2]), 0.5)
			else:
				m = model_blank(nx, ny, nz)
			outvol = args[3]
		bcast_EMData_to_all(m, myid, main_node)

		from statistics import locres
		freqvol, resolut = locres(vi, ui, m, nk, cutoff, options.step, myid, main_node, number_of_proc)
		if(myid == 0):
			freqvol.write_image(outvol)
			if(options.fsc != None): write_text_row(resolut, options.fsc)

		from mpi import mpi_finalize
		mpi_finalize()

	else:
		cutoff = options.cutoff
		vi = get_im(args[0])
		ui = get_im(args[1])

		nn = vi.get_xsize()
		nk = int(options.wn)
	
		if len(args) == 3:
			m = model_circle((nn-nk)//2,nn,nn,nn)
			outvol = args[2]
		
		elif len(args) == 4:
			m = binarize(get_im(args[2]), 0.5)
			outvol = args[3]

		mc = model_blank(nn,nn,nn,1.0)-m

		vf = fft(vi)
		uf = fft(ui)

		lp = int(nn/2/options.step+0.5)
		step = 0.5/lp

		freqvol = model_blank(nn,nn,nn)
		resolut = []
		for i in xrange(1,lp):
			fl = step*i
			fh = fl+step
			print lp,i,step,fl,fh
			v = fft(filt_tophatb( vf, fl, fh))
			u = fft(filt_tophatb( uf, fl, fh))
			tmp1 = Util.muln_img(v,v)
			tmp2 = Util.muln_img(u,u)

			do = Util.infomask(square_root(Util.muln_img(tmp1,tmp2)),m,True)[0]


			tmp3 = Util.muln_img(u,v)
			dp = Util.infomask(tmp3,m,True)[0]
			resolut.append([i,(fl+fh)/2.0, dp/do])

			tmp1 = Util.box_convolution(tmp1, nk)
			tmp2 = Util.box_convolution(tmp2, nk)
			tmp3 = Util.box_convolution(tmp3, nk)

			Util.mul_img(tmp1,tmp2)

			tmp1 = square_root(tmp1)

			Util.mul_img(tmp1,m)
			Util.add_img(tmp1,mc)

			Util.mul_img(tmp3,m)
			Util.add_img(tmp3,mc)

			Util.div_img(tmp3,tmp1)

			Util.mul_img(tmp3,m)
			freq=(fl+fh)/2.0
			bailout = True
			for x in xrange(nn):
				for y in xrange(nn):
					for z in xrange(nn):
						if(m.get_value_at(x,y,z) > 0.5):
							if(freqvol.get_value_at(x,y,z) == 0.0):
								if(tmp3.get_value_at(x,y,z) < cutoff):
									freqvol.set_value_at(x,y,z,freq)
									bailout = False
								else:
									bailout = False
			if(bailout):  break

		freqvol.write_image(outvol)
		if(options.fsc != None): write_text_row(resolut, options.fsc)
Esempio n. 60
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = progname + " stack outdir <maskfile> --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range --ts=translation_step --dst=delta --center=center --maxit=max_iteration --CTF --snr=SNR --Fourvar=Fourier_variance --Ng=group_number --Function=user_function_name --CUDA --GPUID --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--ir",
        type="float",
        default=1,
        help="inner radius for rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--ou",
        type="float",
        default=-1,
        help=
        "outer radius for rotational correlation < nx/2-1 (set to the radius of the particle)"
    )
    parser.add_option(
        "--rs",
        type="float",
        default=1,
        help="step between rings in rotational correlation > 0 (set to 1)")
    parser.add_option(
        "--xr",
        type="string",
        default="4 2 1 1",
        help="range for translation search in x direction, search is +/xr ")
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help="range for translation search in y direction, search is +/yr ")
    parser.add_option("--ts",
                      type="string",
                      default="2 1 0.5 0.25",
                      help="step of translation search in both directions")
    parser.add_option(
        "--nomirror",
        action="store_true",
        default=False,
        help="Disable checking mirror orientations of images (default False)")
    parser.add_option("--dst", type="float", default=0.0, help="delta")
    parser.add_option(
        "--center",
        type="float",
        default=-1,
        help=
        "-1.average center method; 0.not centered; 1.phase approximation; 2.cc with Gaussian function; 3.cc with donut-shaped image 4.cc with user-defined reference 5.cc with self-rotated average"
    )
    parser.add_option(
        "--maxit",
        type="float",
        default=0,
        help=
        "maximum number of iterations (0 means the maximum iterations is 10, but it will automatically stop should the criterion falls"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="use CTF correction during alignment")
    parser.add_option("--snr",
                      type="float",
                      default=1.0,
                      help="signal-to-noise ratio of the data (set to 1.0)")
    parser.add_option("--Fourvar",
                      action="store_true",
                      default=False,
                      help="compute Fourier variance")
    #parser.add_option("--Ng",       type="int",          default=-1,      help="number of groups in the new CTF filteration")
    parser.add_option(
        "--function",
        type="string",
        default="ref_ali2d",
        help="name of the reference preparation function (default ref_ali2d)")
    #parser.add_option("--CUDA",     action="store_true", default=False,   help="use CUDA program")
    #parser.add_option("--GPUID",    type="string",    default="",         help="ID of GPUs available")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version ")
    parser.add_option(
        "--rotational",
        action="store_true",
        default=False,
        help=
        "rotational alignment with optional limited in-plane angle, the parameters are: ir, ou, rs, psi_max, mode(F or H), maxit, orient, randomize"
    )
    parser.add_option("--psi_max", type="float", default=180.0, help="psi_max")
    parser.add_option("--mode",
                      type="string",
                      default="F",
                      help="Full or Half rings, default F")
    parser.add_option(
        "--randomize",
        action="store_true",
        default=False,
        help="randomize initial rotations (suboption of friedel, default False)"
    )
    parser.add_option(
        "--orient",
        action="store_true",
        default=False,
        help=
        "orient images such that the average is symmetric about x-axis, for layer lines (suboption of friedel, default False)"
    )
    parser.add_option(
        "--template",
        type="string",
        default=None,
        help=
        "2D alignment will be initialized using the template provided (only non-MPI version, default None)"
    )
    parser.add_option("--random_method",
                      type="string",
                      default="",
                      help="use SHC or SCF (default standard method)")

    (options, args) = parser.parse_args()

    if len(args) < 2 or len(args) > 3:
        print("usage: " + usage)
        print("Please run '" + progname + " -h' for detailed options")
    elif (options.rotational):
        from applications import ali2d_rotationaltop
        global_def.BATCH = True
        ali2d_rotationaltop(args[1], args[0], options.randomize,
                            options.orient, options.ir, options.ou, options.rs,
                            options.psi_max, options.mode, options.maxit)
    else:
        if args[1] == 'None': outdir = None
        else: outdir = args[1]

        if len(args) == 2: mask = None
        else: mask = args[2]

        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache
            disable_bdb_cache()

        global_def.BATCH = True
        if options.MPI:
            from applications import ali2d_base
            from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
            sys.argv = mpi_init(len(sys.argv), sys.argv)

            number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
            myid = mpi_comm_rank(MPI_COMM_WORLD)
            main_node = 0

            if (myid == main_node):
                import subprocess
                from logger import Logger, BaseLogger_Files
                #  Create output directory
                log = Logger(BaseLogger_Files())
                log.prefix = os.path.join(outdir)
                cmd = "mkdir " + log.prefix
                outcome = subprocess.call(cmd, shell=True)
                log.prefix += "/"
            else:
                outcome = 0
                log = None
            from utilities import bcast_number_to_all
            outcome = bcast_number_to_all(outcome, source_node=main_node)
            if (outcome == 1):
                ERROR(
                    'Output directory exists, please change the name and restart the program',
                    "ali2d_MPI", 1, myid)

            dummy = ali2d_base(args[0], outdir, mask, options.ir, options.ou, options.rs, options.xr, options.yr, \
             options.ts, options.nomirror, options.dst, \
             options.center, options.maxit, options.CTF, options.snr, options.Fourvar, \
             options.function, random_method = options.random_method, log = log, \
             number_of_proc = number_of_proc, myid = myid, main_node = main_node, mpi_comm = MPI_COMM_WORLD,\
             write_headers = True)
        else:
            print(" Non-MPI is no more in use, try MPI option, please.")
            """
			from applications import ali2d
			ali2d(args[0], outdir, mask, options.ir, options.ou, options.rs, options.xr, options.yr, \
				options.ts, options.nomirror, options.dst, \
				options.center, options.maxit, options.CTF, options.snr, options.Fourvar, \
				-1, options.function, False, "", options.MPI, \
				options.template, random_method = options.random_method)
	    	"""
        global_def.BATCH = False

        if options.MPI:
            from mpi import mpi_finalize
            mpi_finalize()