示例#1
0
def ave_ali_err_params_new(ali_params1, ali_params2, r=25):
	'''
	This function determines the relative angle, shifts and mirrorness between
	the two sets of alignment parameters. It also calculates the mirror consistent
	rate and average pixel error between two sets of parameters.
	'''
	from utilities import combine_params2
	from math import sqrt, sin, pi
	
	# Determine relative angle, shift and mirror
	alphai, sxi, syi, mirror = align_diff_params(ali_params1, ali_params2)
	#sxi = syi = 0.
	# Determine the average pixel error
	nima = len(ali_params1)/4
	mirror_same = 0
	err = 0.0
	difi = []
	for i in xrange(nima):
		alpha1, sx1, sy1, mirror1 = ali_params1[i*4:i*4+4]
		alpha2, sx2, sy2, mirror2 = ali_params2[i*4:i*4+4]

		if abs(mirror1-mirror2) == mirror: 
			mirror_same += 1
			alpha12, sx12, sy12, mirror12 = combine_params2(alpha1, sx1, sy1, int(mirror1), alphai, sxi, syi, 0)
			qerr = max_2D_pixel_error([alpha12, sx12, sy12], [alpha2, sx2, sy2], r)
			err += qerr
			difi.append([alpha12, sx12, sy12, mirror12,alpha2, sx2, sy2,qerr])
		else:
			difi.append([-1.,-1.,-1.-1,9999.0])

	return alphai, sxi, syi, mirror, float(mirror_same)/nima, err/mirror_same, difi
示例#2
0
def ali_stable_list(ali_params1, ali_params2, pixel_error_threshold, r=25):
	'''
	This function first determines the relative angle, shifts and mirrorness between
	the two sets of alignment parameters. It then determines whether each image is
	stable or not and return this information as an int list. (1 is stable and 0 is unstable)
	'''
	from utilities import combine_params2
	from math import sqrt, sin, pi
	
	# Determine relative angle, shift and mirror
	alphai, sxi, syi, mirror = align_diff_params(ali_params1, ali_params2)

	# Determine the average pixel error
	nima = len(ali_params1)/4
	ali_list = []
	for i in xrange(nima):
		alpha1, sx1, sy1, mirror1 = ali_params1[i*4:i*4+4]
		alpha2, sx2, sy2, mirror2 = ali_params2[i*4:i*4+4]
		if abs(mirror1-mirror2) == mirror:
			alpha12, sx12, sy12, mirror12 = combine_params2(alpha1, sx1, sy1, int(mirror1), alphai, sxi, syi, 0)
			if max_2D_pixel_error([alpha12, sx12, sy12], [alpha2, sx2, sy2], r) < pixel_error_threshold: ali_list.append(1)
			else: ali_list.append(0)
		else: ali_list.append(0)

	return ali_list
示例#3
0
def ave_ali_err(data1, data2=None, r=25, suffix="_ideal"):
	'''
	This function determines the relative angle, shifts and mirrorness between
	the two lists of data. It also calculates the mirror consistent
	rate and average pixel error between two sets of parameters.
	'''
	from utilities import get_params2D, combine_params2
	from math import sqrt, sin, pi
	
	# Determine relative angle, shifts and mirror
	alphai, sxi, syi, mirror = align_diff(data1, data2, suffix)

	# Determine the average pixel error
	err = 0.0
	nima = len(data1)
	mirror_same = 0
	for i in xrange(nima):
		alpha1, sx1, sy1, mirror1, scale1 = get_params2D(data1[i])
		if data2 != None:
			alpha2, sx2, sy2, mirror2, scale2 = get_params2D(data2[i])
		else:
			alpha2, sx2, sy2, mirror2, scale2 = get_params2D(data1[i], "xform.align2d"+suffix)
		
		if abs(mirror1-mirror2) == mirror: 
			mirror_same += 1
			alpha12, sx12, sy12, mirror12 = combine_params2(alpha1, sx1, sy1, int(mirror1), alphai, sxi, syi, 0)
			err += max_2D_pixel_error([alpha12, sx12, sy12], [alpha2, sx2, sy2], r)
	
	return alphai, sxi, syi, mirror, float(mirror_same)/nima, err/mirror_same
示例#4
0
	def func(args, data, return_avg_pixel_error=True):
		# Computes pixel error per particle given transformation parameters (G_l)
		from math import pi, sin, cos, sqrt
		from utilities import combine_params2
	
		ali_params = data[0]
		d = data[1]

		#print ali_params

		L = len(ali_params)
		N = len(ali_params[0])/4
		#print  "        FUNC",N,L,d
	
		args_list= [0.0]*(L*3)
		for i in xrange(L*3-3):  args_list[i] = args[i]

		pt = Transform({"type":"2D"})
		sqr_pixel_error = [0.0]*N
		ave_params =[]
		hmir = 0
		for i in xrange(N):
			sum_cosa = 0.0
			sum_sina = 0.0
			sx       = [0.0]*L
			sy       = [0.0]*L
			alpha    = [0.0]*L
			for l in xrange(L):
				alpha[l], sx[l], sy[l], mirror12 = combine_params2(ali_params[l][i*4+0], ali_params[l][i*4+1], ali_params[l][i*4+2], int(ali_params[l][i*4+3]), args_list[l*3+0],args_list[l*3+1],args_list[l*3+2],0)
				hmir += mirror12
				sum_cosa += cos(alpha[l]*pi/180.0)
				sum_sina += sin(alpha[l]*pi/180.0)
			sqrtP = sqrt(sum_cosa**2+sum_sina**2)
			sum_cosa /= sqrtP
			sum_sina /= sqrtP
			#  This completes calculation of matrix H_i
			"""
			anger = 0.0
			for l in xrange(L):
				anger += (cos(alpha[l]*pi/180.0)-sum_cosa)**2
				anger += (sin(alpha[l]*pi/180.0)-sum_sina)**2
			anger *= 2
			sqr_pixel_error[i] = d*d/4.*anger/L/4.+sqerr(sx)+sqerr(sy)
			"""
			sqr_pixel_error[i] = d*d/4*(1.0-sqrtP/L) + sqerr(sx) + sqerr(sy)
			#  Get ave transform params
			pt.set_matrix([sum_cosa, sum_sina, 0.0, sum(sx)/L, -sum_sina, sum_cosa, 0.0, sum(sy)/L, 0.0, 0.0, 1.0, 0.0])
			dd = pt.get_params("2D")
			#  We are using here mirror of the FIRST SET.
			pt = Transform({"type":"2D","alpha":dd[ "alpha" ],"tx":dd[ "tx" ],"ty": dd[ "ty" ],"mirror":int(ali_params[0][i*4+3]),"scale":1.0})
			dd = pt.get_params("2D")
			ave_params.append([dd[ "alpha" ], dd[ "tx" ], dd[ "ty" ], dd[ "mirror" ]])
			#three different approaches give the same solution:
			#print i,d*d/4*(1.0-sqrtP/L) + sqerr(sx) + sqerr(sy),sqr_pixel_error[i]#, (sin((alpha[0]-alpha[1])*pi/180.0/4.0)*(d))**2/2  + ((sx[0]-sx[1])/2)**2 +  ((sy[0]-sy[1])/2)**2
		# Warning: Whatever I return here is squared pixel error, this is for the easy expression of derivative
		# Don't forget to square root it after getting the value
		if return_avg_pixel_error:         return sum(sqr_pixel_error)/N
		else: return sqr_pixel_error, ave_params
示例#5
0
def ave_ali_err_textfile(textfile1, textfile2, r=25):
	'''
	This function determines the relative angle, shifts and mirrorness between
	the two sets of alignment parameters. It also calculates the mirror consistent
	rate and average pixel error between two sets of parameters.
	'''
	from utilities import combine_params2
	from math import sqrt, sin, pi
	from utilities import read_text_row
	
	ali1 = read_text_row(textfile1, "", "")
	ali2 = read_text_row(textfile2, "", "")

	nima = len(ali1)
	nima2 = len(ali2)
	if nima2 != nima:
		print "Error: Number of images don't agree!"
		return 0.0, 0.0, 0.0, 0, 0.0, 0.0
	else:
		del nima2

	# Read the alignment parameters
	ali_params1 = []
	ali_params2 = []
	for i in xrange(nima):
		ali_params1.extend(ali1[i][0:4])
		ali_params2.extend(ali2[i][0:4])

	# Determine relative angle, shift and mirror
	alphai, sxi, syi, mirror = align_diff_params(ali_params1, ali_params2)

	# Determine the average pixel error
	nima = len(ali_params1)/4
	mirror_same = 0
	err = 0.0
	for i in xrange(nima):
		alpha1, sx1, sy1, mirror1 = ali_params1[i*4:i*4+4]
		alpha2, sx2, sy2, mirror2 = ali_params2[i*4:i*4+4]
		
		if abs(mirror1-mirror2) == mirror: 
			mirror_same += 1
			alpha12, sx12, sy12, mirror12 = combine_params2(alpha1, sx1, sy1, int(mirror1), alphai, sxi, syi, 0)
			err += max_2D_pixel_error([alpha12, sx12, sy12], [alpha2, sx2, sy2], r)
	
	return alphai, sxi, syi, mirror, float(mirror_same)/nima, err/mirror_same
示例#6
0
文件: sxisac.py 项目: cryoem/eman2
def main(args):
	progname = os.path.basename(sys.argv[0])
	usage = ( progname + " stack_file  output_directory --radius=particle_radius --img_per_grp=img_per_grp --CTF --restart_section<The remaining parameters are optional --ir=ir --rs=rs --xr=xr --yr=yr --ts=ts --maxit=maxit --dst=dst --FL=FL --FH=FH --FF=FF --init_iter=init_iter --main_maxit=main_iter" +
			" --iter_reali=iter_reali --match_first=match_first --max_round=max_round --match_second=match_second --stab_ali=stab_ali --thld_err=thld_err --indep_run=indep_run --thld_grp=thld_grp" +
			"  --generation=generation  --rand_seed=rand_seed>" )
	
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--radius",                type="int",           help="particle radius: there is no default, a sensible number has to be provided, units - pixels (default required int)")
	parser.add_option("--target_radius",         type="int",           default=29,         help="target particle radius: actual particle radius on which isac will process data. Images will be shrinked/enlarged to achieve this radius (default 29)")
	parser.add_option("--target_nx",             type="int",           default=76,         help="target particle image size: actual image size on which isac will process data. Images will be shrinked/enlarged according to target particle radius and then cut/padded to achieve target_nx size. When xr > 0, the final image size for isac processing is 'target_nx + xr - 1'  (default 76)")
	parser.add_option("--img_per_grp",           type="int",           default=100,        help="number of images per class: in the ideal case (essentially maximum size of class) (default 100)")
	parser.add_option("--CTF",                   action="store_true",  default=False,      help="apply phase-flip for CTF correction: if set the data will be phase-flipped using CTF information included in image headers (default False)")
	parser.add_option("--ir",                    type="int",           default=1,          help="inner ring: of the resampling to polar coordinates. units - pixels (default 1)")
	parser.add_option("--rs",                    type="int",           default=1,          help="ring step: of the resampling to polar coordinates. units - pixels (default 1)")
	parser.add_option("--xr",                    type="int",           default=1,         help="x range: of translational search. By default, set by the program. (default 1)")
	parser.add_option("--yr",                    type="int",           default=-1,         help="y range: of translational search. By default, same as xr. (default -1)")
	parser.add_option("--ts",                    type="float",         default=1.0,        help="search step: of translational search: units - pixels (default 1.0)")
	parser.add_option("--maxit",                 type="int",           default=30,         help="number of iterations for reference-free alignment: (default 30)")
	#parser.add_option("--snr",            type="float",        default=1.0,     help="signal-to-noise ratio (only meaningful when CTF is enabled, currently not supported)")
	parser.add_option("--center_method",         type="int",           default=-1,         help="method for centering: of global 2D average during initial prealignment of data (0 : no centering; -1 : average shift method; please see center_2D in utilities.py for methods 1-7) (default -1)")
	parser.add_option("--dst",                   type="float",         default=90.0,       help="discrete angle used in within group alignment: (default 90.0)")
	parser.add_option("--FL",                    type="float",         default=0.2,        help="lowest stopband: frequency used in the tangent filter (default 0.2)")
	parser.add_option("--FH",                    type="float",         default=0.3,        help="highest stopband: frequency used in the tangent filter (default 0.3)")
	parser.add_option("--FF",                    type="float",         default=0.2,        help="fall-off of the tangent filter: (default 0.2)")
	parser.add_option("--init_iter",             type="int",           default=3,          help="SAC initialization iterations: number of runs of ab-initio within-cluster alignment for stability evaluation in SAC initialization (default 3)")
	parser.add_option("--main_iter",             type="int",           default=3,          help="SAC main iterations: number of runs of ab-initio within-cluster alignment for stability evaluation in SAC (default 3)")
	parser.add_option("--iter_reali",            type="int",           default=1,          help="SAC stability check interval: every iter_reali iterations of SAC stability checking is performed (default 1)")
	parser.add_option("--match_first",           type="int",           default=1,          help="number of iterations to run 2-way matching in the first phase: (default 1)")
	parser.add_option("--max_round",             type="int",           default=20,         help="maximum rounds: of generating candidate class averages in the first phase (default 20)")
	parser.add_option("--match_second",          type="int",           default=5,          help="number of iterations to run 2-way (or 3-way) matching in the second phase: (default 5)")
	parser.add_option("--stab_ali",              type="int",           default=5,          help="number of alignments when checking stability: (default 5)")
	parser.add_option("--thld_err",              type="float",         default=0.7,        help="threshold of pixel error when checking stability: equals root mean square of distances between corresponding pixels from set of found transformations and theirs average transformation, depends linearly on square of radius (parameter ou). units - pixels. (default 0.7)")
	parser.add_option("--indep_run",             type="int",           default=4,          help="level of m-way matching for reproducibility tests: By default, perform full ISAC to 4-way matching. Value indep_run=2 will restrict ISAC to 2-way matching and 3 to 3-way matching.  Note the number of used MPI processes requested in mpirun must be a multiplicity of indep_run. (default 4)")
	parser.add_option("--thld_grp",              type="int",           default=10,         help="minimum size of reproducible class (default 10)")
	parser.add_option("--n_generations",         type="int",           default=100,        help="maximum number of generations: program stops when reaching this total number of generations: (default 100)")
	#parser.add_option("--candidatesexist",action="store_true", default=False,   help="Candidate class averages exist use them (default False)")
	parser.add_option("--rand_seed",             type="int",           help="random seed set before calculations: useful for testing purposes. By default, total randomness (type int)")
	parser.add_option("--new",                   action="store_true",  default=False,      help="use new code: (default False)")
	parser.add_option("--debug",                 action="store_true",  default=False,      help="debug info printout: (default False)")

	# must be switched off in production
	parser.add_option("--use_latest_master_directory",action="store_true",  default=False,      help="use latest master directory: when active, the program looks for the latest directory that starts with the word 'master', so the user does not need to provide a directory name. (default False)")
	
	parser.add_option("--restart_section",       type="string",        default=' ',        help="restart section: each generation (iteration) contains three sections: 'restart', 'candidate_class_averages', and 'reproducible_class_averages'. To restart from a particular step, for example, generation 4 and section 'candidate_class_averages' the following option is needed: '--restart_section=candidate_class_averages,4'. The option requires no white space before or after the comma. The default behavior is to restart execution from where it stopped intentionally or unintentionally. For default restart, it is assumed that the name of the directory is provided as argument. Alternatively, the '--use_latest_master_directory' option can be used. (default ' ')")
	parser.add_option("--stop_after_candidates", action="store_true",  default=False,      help="stop after candidates: stops after the 'candidate_class_averages' section. (default False)")

	##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
	parser.add_option("--return_options",        action="store_true", dest="return_options", default=False, help = SUPPRESS_HELP)
	parser.add_option("--skip_prealignment",     action="store_true",  default=False,      help="skip pre-alignment step: to be used if images are already centered. 2dalignment directory will still be generated but the parameters will be zero. (default False)")

	required_option_list = ['radius']
	(options, args) = parser.parse_args(args)

	if options.return_options:
		return parser
	
	if len(args) > 2:
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
		sys.exit()
	
	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	global_def.BATCH = True
	
	from isac import iter_isac
	from fundamentals import rot_shift2D, resample
	from utilities import pad, combine_params2

	command_line_provided_stack_filename = args[0]
	
	main_node = 0
	mpi_init(0, [])
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	nproc = mpi_comm_size(MPI_COMM_WORLD)
	
	mpi_barrier(MPI_COMM_WORLD)
	if(myid == main_node):
		print "****************************************************************"
		Util.version()
		print "****************************************************************"
		sys.stdout.flush()
	mpi_barrier(MPI_COMM_WORLD)
	

	# Making sure all required options appeared.
	for required_option in required_option_list:
		if not options.__dict__[required_option]:
			print "\n ==%s== mandatory option is missing.\n"%required_option
			print "Please run '" + progname + " -h' for detailed options"
			return 1

	radi  = options.radius
	target_radius  = options.target_radius
	target_nx  = options.target_nx
	center_method  = options.center_method
	if(radi < 1):  ERROR("Particle radius has to be provided!","sxisac",1,myid)

	
	use_latest_master_directory = options.use_latest_master_directory
	stop_after_candidates = options.stop_after_candidates
	# program_state_stack.restart_location_title_from_command_line = options.restart_section
	
	from utilities import qw
	program_state_stack.PROGRAM_STATE_VARIABLES = set(qw("""
		isac_generation
	"""))

	# create or reuse master directory
	masterdir = ""
	stack_processed_by_ali2d_base__filename = ""
	stack_processed_by_ali2d_base__filename__without_master_dir = ""
	error_status = 0
	if len(args) == 2:
		masterdir = args[1]
	elif len(args) == 1:
		if use_latest_master_directory:
			all_dirs = [d for d in os.listdir(".") if os.path.isdir(d)]
			import re; r = re.compile("^master.*$")
			all_dirs = filter(r.match, all_dirs)
			if len(all_dirs)>0:
				# all_dirs = max(all_dirs, key=os.path.getctime)
				masterdir = max(all_dirs, key=os.path.getmtime)
				
	#Create folder for all results or check if there is one created already
	if(myid == main_node):
		if( masterdir == ""):
			timestring = strftime("%Y_%m_%d__%H_%M_%S" + DIR_DELIM, localtime())
			masterdir = "master"+timestring
			cmd = "{} {}".format("mkdir", masterdir)
			cmdexecute(cmd)
		elif not os.path.exists(masterdir):
			# os.path.exists(masterdir) does not exist
			masterdir = args[1]
			cmd = "{} {}".format("mkdir", masterdir)
			cmdexecute(cmd)

		if(args[0][:4] == "bdb:"): filename = args[0][4:]
		else:                      filename = args[0][:-4]
		filename = os.path.basename(filename)
		stack_processed_by_ali2d_base__filename = "bdb:" + os.path.join(masterdir, filename )
		stack_processed_by_ali2d_base__filename__without_master_dir  = "bdb:" + filename
	if_error_then_all_processes_exit_program(error_status)

	# send masterdir to all processes
	masterdir = send_string_to_all(masterdir)

	if myid == 0:
		if options.restart_section != " ":
			if os.path.exists(os.path.join(masterdir,NAME_OF_JSON_STATE_FILE)):
				stored_stack, stored_state = restore_program_stack_and_state(os.path.join(masterdir,NAME_OF_JSON_STATE_FILE))
				import re
				if "," in options.restart_section:
					parsed_restart_section_option = options.restart_section.split(",")
					stored_state[-1]["location_in_program"] = re.sub(r"___.*$", "___%s"%parsed_restart_section_option[0], stored_state[-1]["location_in_program"])
					generation_str_format = parsed_restart_section_option[1]
					if generation_str_format != "":
						isac_generation_from_command_line = int(generation_str_format)
						stored_state[-1]["isac_generation"] = isac_generation_from_command_line 
					else:
						isac_generation_from_command_line = 1
						if "isac_generation" in stored_state[-1]:
							del stored_state[-1]["isac_generation"]
				else:
					isac_generation_from_command_line = -1
					stored_state[-1]["location_in_program"] = re.sub(r"___.*$", "___%s"%options.restart_section, stored_state[-1]["location_in_program"])
					if "isac_generation" in stored_state[-1]:
						del stored_state[-1]["isac_generation"]
				store_program_state(os.path.join(masterdir,NAME_OF_JSON_STATE_FILE), stored_state, stored_stack)
			else:
				print "Please remove the restart_section option from the command line. The program must be started from the beginning."			
				mpi_finalize()
				sys.exit()
		else:
			isac_generation_from_command_line = -1
	
	program_state_stack(locals(), getframeinfo(currentframe()), os.path.join(masterdir,NAME_OF_JSON_STATE_FILE))
	

	stack_processed_by_ali2d_base__filename = send_string_to_all(stack_processed_by_ali2d_base__filename)
	stack_processed_by_ali2d_base__filename__without_master_dir = \
		send_string_to_all(stack_processed_by_ali2d_base__filename__without_master_dir)

	# previous code 2016-05-05--20-14-12-153
	# #  PARAMETERS OF THE PROCEDURE
	# if( options.xr == -1 ):
	# 	#  Default values
	# 	# target_nx = 76
	# 	# target_radius = 29
	# 	target_xr = 1
	# else:  #  nx//2
	# 	#  Check below!
	# 	target_xr = options.xr
	# 	# target_nx = 76 + target_xr - 1 # subtract one, which is default
	# 	target_nx += target_xr - 1 # subtract one, which is default
	# 	# target_radius = 29

	target_xr = options.xr
	target_nx += target_xr - 1 # subtract one, which is default
	
	if (options.yr == -1):
		yr = options.xr
	else:
		yr = options.yr


	mpi_barrier(MPI_COMM_WORLD)

	# Initialization of stacks
	if(myid == main_node):
		print "command_line_provided_stack_filename", command_line_provided_stack_filename
		number_of_images_in_stack = EMUtil.get_image_count(command_line_provided_stack_filename)
	else:
		number_of_images_in_stack = 0

	number_of_images_in_stack = bcast_number_to_all(number_of_images_in_stack, source_node = main_node)
	
	nxrsteps = 4
	
	init2dir = os.path.join(masterdir,"2dalignment")
	
	# from mpi import mpi_finalize
	# mpi_finalize()
	# sys.stdout.flush()
	# sys.exit()
	
	
	if not os.path.exists(os.path.join(init2dir, "Finished_initial_2d_alignment.txt")):
	
		if(myid == 0):
			import subprocess
			from logger import Logger, BaseLogger_Files
			#  Create output directory
			log2d = Logger(BaseLogger_Files())
			log2d.prefix = os.path.join(init2dir)
			cmd = "mkdir -p "+log2d.prefix
			outcome = subprocess.call(cmd, shell=True)
			log2d.prefix += "/"
			# outcome = subprocess.call("sxheader.py  "+command_line_provided_stack_filename+"   --params=xform.align2d  --zero", shell=True)
		else:
			outcome = 0
			log2d = None

		if(myid == main_node):
			a = get_im(command_line_provided_stack_filename)
			nnxo = a.get_xsize()
		else:
			nnxo = 0
		nnxo = bcast_number_to_all(nnxo, source_node = main_node)

		image_start, image_end = MPI_start_end(number_of_images_in_stack, nproc, myid)

		if options.skip_prealignment:
			params2d = [[0.0,0.0,0.0,0] for i in xrange(image_start, image_end)]
		else:

			original_images = EMData.read_images(command_line_provided_stack_filename, range(image_start,image_end))
			#  We assume the target radius will be 29, and xr = 1.  
			shrink_ratio = float(target_radius)/float(radi)

			for im in xrange(len(original_images)):
				if(shrink_ratio != 1.0):
					original_images[im]  = resample(original_images[im], shrink_ratio)

			nx = original_images[0].get_xsize()
			# nx = int(nx*shrink_ratio + 0.5)

			txrm = (nx - 2*(target_radius+1))//2
			if(txrm < 0):  			ERROR( "ERROR!!   Radius of the structure larger than the window data size permits   %d"%(radi), "sxisac",1, myid)
			if(txrm/nxrsteps>0):
				tss = ""
				txr = ""
				while(txrm/nxrsteps>0):
					tts=txrm/nxrsteps
					tss += "  %d"%tts
					txr += "  %d"%(tts*nxrsteps)
					txrm =txrm//2
			else:
				tss = "1"
				txr = "%d"%txrm
			
			# print "nx, txr, txrm, tss", nx, txr, txrm, tss
		# from mpi import mpi_finalize
		# mpi_finalize()
		# sys.stdout.flush()
		# sys.exit()



			# section ali2d_base

			params2d = ali2d_base(original_images, init2dir, None, 1, target_radius, 1, txr, txr, tss, \
				False, 90.0, center_method, 14, options.CTF, 1.0, False, \
				"ref_ali2d", "", log2d, nproc, myid, main_node, MPI_COMM_WORLD, write_headers = False)
			
			del original_images
			
			for i in xrange(len(params2d)):
				alpha, sx, sy, mirror = combine_params2(0, params2d[i][1],params2d[i][2], 0, -params2d[i][0], 0, 0, 0)
				sx /= shrink_ratio
				sy /= shrink_ratio
				params2d[i][0] = 0.0
				params2d[i][1] = sx
				params2d[i][2] = sy
				params2d[i][3] = 0
				#set_params2D(aligned_images[i],[0.0, sx,sy,0.,1.0])

		mpi_barrier(MPI_COMM_WORLD)
		tmp = params2d[:]
		tmp = wrap_mpi_gatherv(tmp, main_node, MPI_COMM_WORLD)
		if( myid == main_node ):		
			if options.skip_prealignment:
				print "========================================="
				print "Even though there is no alignment step, '%s' params are set to zero for later use."%os.path.join(init2dir, "initial2Dparams.txt")
				print "========================================="
			write_text_row(tmp,os.path.join(init2dir, "initial2Dparams.txt"))
		del tmp
		mpi_barrier(MPI_COMM_WORLD)
	
		#  We assume the target image size will be target_nx, radius will be 29, and xr = 1.  
		#  Note images can be also padded, in which case shrink_ratio > 1.
		shrink_ratio = float(target_radius)/float(radi)
		
		aligned_images = EMData.read_images(command_line_provided_stack_filename, range(image_start,image_end))
		nx = aligned_images[0].get_xsize()
		nima = len(aligned_images)
		newx = int(nx*shrink_ratio + 0.5)


		
		while not os.path.exists(os.path.join(init2dir, "initial2Dparams.txt")):
			import time
			time.sleep(1)
		mpi_barrier(MPI_COMM_WORLD)
		
		params = read_text_row(os.path.join(init2dir, "initial2Dparams.txt"))
		params = params[image_start:image_end]


		msk = model_circle(radi, nx, nx)
		for im in xrange(nima):
			st = Util.infomask(aligned_images[im], msk, False)
			aligned_images[im] -= st[0]
			if options.CTF:
				aligned_images[im] = filt_ctf(aligned_images[im], aligned_images[im].get_attr("ctf"), binary = True)
	
		if(shrink_ratio < 1.0):
			if    newx > target_nx  :
				msk = model_circle(target_radius, target_nx, target_nx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					#aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
					aligned_images[im] = Util.window(aligned_images[im], target_nx, target_nx, 1)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
			elif  newx == target_nx :
				msk = model_circle(target_radius, target_nx, target_nx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
			elif  newx < target_nx  :	
				msk = model_circle(newx//2-2, newx,  newx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
					aligned_images[im] = pad(aligned_images[im], target_nx, target_nx, 1, 0.0)
		elif(shrink_ratio == 1.0):
			if    newx > target_nx  :
				msk = model_circle(target_radius, target_nx, target_nx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					aligned_images[im] = Util.window(aligned_images[im], target_nx, target_nx, 1)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
			elif  newx == target_nx :
				msk = model_circle(target_radius, target_nx, target_nx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
			elif  newx < target_nx  :			
				msk = model_circle(newx//2-2, newx,  newx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					#aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
					aligned_images[im] = pad(aligned_images[im], target_nx, target_nx, 1, 0.0)
		elif(shrink_ratio > 1.0):
			if    newx > target_nx  :
				msk = model_circle(target_radius, target_nx, target_nx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
					aligned_images[im] = Util.window(aligned_images[im], target_nx, target_nx, 1)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
			elif  newx == target_nx :
				msk = model_circle(target_radius, target_nx, target_nx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
			elif  newx < target_nx  :
				msk = model_circle(newx//2-2, newx,  newx)
				for im in xrange(nima):
					#  Here we should use only shifts
					#alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
					#alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
					aligned_images[im] = rot_shift2D(aligned_images[im], 0, params[im][1], params[im][2], 0)
					aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
					p = Util.infomask(aligned_images[im], msk, False)
					aligned_images[im] -= p[0]
					p = Util.infomask(aligned_images[im], msk, True)
					aligned_images[im] /= p[1]
					aligned_images[im] = pad(aligned_images[im], target_nx, target_nx, 1, 0.0)
		del msk
	
		gather_compacted_EMData_to_root(number_of_images_in_stack, aligned_images, myid)
		number_of_images_in_stack = bcast_number_to_all(number_of_images_in_stack, source_node = main_node)
	
		if( myid == main_node ):
			for i in range(number_of_images_in_stack):  aligned_images[i].write_image(stack_processed_by_ali2d_base__filename,i)
			#  It has to be explicitly closed
			from EMAN2db import db_open_dict
			DB = db_open_dict(stack_processed_by_ali2d_base__filename)
			DB.close()
	
			fp = open(os.path.join(masterdir,"README_shrink_ratio.txt"), "w")
			output_text = """
			Since, for processing purposes, isac changes the image dimensions,
			adjustment of pixel size needs to be made in subsequent steps, (e.g.
			running sxviper.py). The shrink ratio for this particular isac run is
			--------
			%.5f
			%.5f
			--------
			To get the pixel size for the isac output the user needs to divide
			the original pixel size by the above value. This info is saved in
			the following file: README_shrink_ratio.txt
			"""%(shrink_ratio, radi)
			fp.write(output_text); fp.flush() ;fp.close()
			print output_text
			fp = open(os.path.join(init2dir, "Finished_initial_2d_alignment.txt"), "w"); fp.flush() ;fp.close()
	else:
		if( myid == main_node ):
			print "Skipping 2d alignment since it was already done!"

	mpi_barrier(MPI_COMM_WORLD)
	
	
	# from mpi import mpi_finalize
	# mpi_finalize()
	# sys.stdout.flush()
	# sys.exit()
	

	os.chdir(masterdir)
	
	if program_state_stack(locals(), getframeinfo(currentframe())):
	# if 1:
		pass
		if (myid == main_node):
			cmdexecute("sxheader.py  --consecutive  --params=originalid   %s"%stack_processed_by_ali2d_base__filename__without_master_dir)
			cmdexecute("e2bdb.py %s --makevstack=%s_000"%(stack_processed_by_ali2d_base__filename__without_master_dir, stack_processed_by_ali2d_base__filename__without_master_dir))

	if (myid == main_node):
		main_dir_no = get_latest_directory_increment_value("./", NAME_OF_MAIN_DIR, myformat="%04d")
		print "isac_generation_from_command_line", isac_generation_from_command_line, main_dir_no
		if isac_generation_from_command_line < 0:
			if os.path.exists(NAME_OF_JSON_STATE_FILE):
				stored_stack, stored_state = restore_program_stack_and_state(NAME_OF_JSON_STATE_FILE)
				if "isac_generation" in stored_state[-1]:
					isac_generation_from_command_line = stored_state[-1]["isac_generation"]
				else:
					isac_generation_from_command_line = -1
		if isac_generation_from_command_line >= 0 and isac_generation_from_command_line <= main_dir_no: 
			for i in xrange(isac_generation_from_command_line+1, main_dir_no + 1):
				if i == isac_generation_from_command_line+1:
					backup_dir_no = get_nonexistent_directory_increment_value("./", "000_backup", myformat="%05d", start_value=1)
					cmdexecute("mkdir -p " + "000_backup" + "%05d"%backup_dir_no)
				cmdexecute("mv  " + NAME_OF_MAIN_DIR + "%04d"%i +  " 000_backup" + "%05d"%backup_dir_no)
				cmdexecute("rm  " + "EMAN2DB/"+stack_processed_by_ali2d_base__filename__without_master_dir[4:]+"_%03d.bdb"%i)
				
			# it includes both command line and json file
			my_restart_section = stored_state[-1]["location_in_program"].split("___")[-1]
			if "restart" in my_restart_section:
				if "backup_dir_no" not in locals():
					backup_dir_no = get_nonexistent_directory_increment_value("./", "000_backup", myformat="%05d", start_value=1)
					cmdexecute("mkdir -p " + "000_backup" + "%05d"%backup_dir_no)
				cmdexecute("mv  " + NAME_OF_MAIN_DIR + "%04d"%isac_generation_from_command_line +  " 000_backup" + "%05d"%backup_dir_no)
				cmdexecute("rm  " + "EMAN2DB/"+stack_processed_by_ali2d_base__filename__without_master_dir[4:]+"_%03d.bdb"%isac_generation_from_command_line )
			elif "candidate_class_averages" in my_restart_section:
				if "backup_dir_no" not in locals():
					backup_dir_no = get_nonexistent_directory_increment_value("./", "000_backup", myformat="%05d", start_value=1)
					cmdexecute("mkdir -p " + "000_backup" + "%05d"%backup_dir_no)
				cmdexecute("mv  " + NAME_OF_MAIN_DIR + "%04d"%isac_generation_from_command_line +  " 000_backup" + "%05d"%backup_dir_no)
				cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR + "%04d"%isac_generation_from_command_line)
				# cmdexecute("rm -f " + NAME_OF_MAIN_DIR + "%04d/class_averages_candidate*"%isac_generation_from_command_line)
			elif "reproducible_class_averages" in my_restart_section:
				cmdexecute("rm -rf " + NAME_OF_MAIN_DIR + "%04d/ali_params_generation_*"%isac_generation_from_command_line)
				cmdexecute("rm -f " + NAME_OF_MAIN_DIR + "%04d/class_averages_generation*"%isac_generation_from_command_line)
		else:
			if os.path.exists(NAME_OF_JSON_STATE_FILE):
				stored_stack, stored_state = restore_program_stack_and_state(NAME_OF_JSON_STATE_FILE)
				if "isac_generation" in stored_state[-1]:
					isac_generation_from_command_line = stored_state[-1]["isac_generation"]
				else:
					isac_generation_from_command_line = 1
			else:
				isac_generation_from_command_line = 1
	else:
		isac_generation_from_command_line = 0
		
		
		
	isac_generation_from_command_line = mpi_bcast(isac_generation_from_command_line, 1, MPI_INT, 0, MPI_COMM_WORLD)[0]
	isac_generation = isac_generation_from_command_line - 1
	
	if (myid == main_node):
		if isac_generation == 0:
			cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR + "%04d"%isac_generation)
			write_text_file([1], os.path.join(NAME_OF_MAIN_DIR + "%04d"%isac_generation, "generation_%d_accounted.txt"%isac_generation))
			write_text_file(range(number_of_images_in_stack), os.path.join(NAME_OF_MAIN_DIR + "%04d"%isac_generation, "generation_%d_unaccounted.txt"%isac_generation))

	#  Stopping criterion should be inside the program.
	while True:
		isac_generation += 1
		if isac_generation > options.n_generations:
			break

		data64_stack_current = "bdb:../"+stack_processed_by_ali2d_base__filename__without_master_dir[4:]+"_%03d"%isac_generation

		program_state_stack.restart_location_title = "restart"
		if program_state_stack(locals(), getframeinfo(currentframe())):
			if (myid == main_node):
				cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR + "%04d"%isac_generation)
				# reference the original stack
				list_file = os.path.join(NAME_OF_MAIN_DIR + "%04d"%(isac_generation - 1), "generation_%d_unaccounted.txt"%(isac_generation - 1))
				cmdexecute("e2bdb.py %s --makevstack=%s --list=%s"%(stack_processed_by_ali2d_base__filename__without_master_dir,\
						stack_processed_by_ali2d_base__filename__without_master_dir + "_%03d"%isac_generation, list_file))
			mpi_barrier(MPI_COMM_WORLD)

		os.chdir(NAME_OF_MAIN_DIR + "%04d"%isac_generation)

		program_state_stack.restart_location_title = "candidate_class_averages"
		if program_state_stack(locals(), getframeinfo(currentframe())):

			iter_isac(data64_stack_current, options.ir, target_radius, options.rs, target_xr, yr, options.ts, options.maxit, False, 1.0,\
				options.dst, options.FL, options.FH, options.FF, options.init_iter, options.main_iter, options.iter_reali, options.match_first, \
				options.max_round, options.match_second, options.stab_ali, options.thld_err, options.indep_run, options.thld_grp, \
				options.img_per_grp, isac_generation, False, random_seed=options.rand_seed, new=False)#options.new)

		# program_state_stack.restart_location_title = "stopped_program1"
		# program_state_stack(locals(), getframeinfo(currentframe()))
		
		program_state_stack.restart_location_title = "stop_after_candidates"
		program_state_stack(locals(), getframeinfo(currentframe()))
		if stop_after_candidates:
			mpi_finalize()
			sys.exit()

		exit_program = 0
		if(myid == main_node):
			if not os.path.exists("class_averages_candidate_generation_%d.hdf"%isac_generation):
				print "This generation (%d) no class average candidates were generated! Finishing."%isac_generation
				exit_program = 1
		exit_program = int(mpi_bcast(exit_program, 1, MPI_INT, 0, MPI_COMM_WORLD)[0])
		if exit_program:
			os.chdir("..")
			break

		program_state_stack.restart_location_title = "reproducible_class_averages"
		if program_state_stack(locals(), getframeinfo(currentframe())):


			iter_isac(data64_stack_current, options.ir, target_radius, options.rs, target_xr, yr, options.ts, options.maxit, False, 1.0,\
				options.dst, options.FL, options.FH, options.FF, options.init_iter, options.main_iter, options.iter_reali, options.match_first, \
				options.max_round, options.match_second, options.stab_ali, options.thld_err, options.indep_run, options.thld_grp, \
				options.img_per_grp, isac_generation, True, random_seed=options.rand_seed, new=False)#options.new)
			pass

		os.chdir("..")

		if(myid == main_node):
			accounted_images = read_text_file(os.path.join(NAME_OF_MAIN_DIR + "%04d"%(isac_generation),"generation_%d_accounted.txt"%(isac_generation)))
			number_of_accounted_images = len(accounted_images)
			un_accounted_images = read_text_file(os.path.join(NAME_OF_MAIN_DIR + "%04d"%(isac_generation),"generation_%d_unaccounted.txt"%(isac_generation)))
			number_of_un_accounted_images = len(un_accounted_images)
		else:
			number_of_accounted_images = 0
			number_of_un_accounted_images = 0

		number_of_accounted_images = int(mpi_bcast(number_of_accounted_images, 1, MPI_INT, 0, MPI_COMM_WORLD)[0])
		number_of_un_accounted_images = int(mpi_bcast(number_of_un_accounted_images, 1, MPI_INT, 0, MPI_COMM_WORLD)[0])
		
		if number_of_accounted_images == 0:
			if(myid == main_node):
				print "This generation (%d) there are no accounted images! Finishing."%isac_generation
			break

		while (myid == main_node):
			def files_are_missing(isac_generation):
				for i in xrange(1, isac_generation + 1):
					if not os.path.exists("generation_%04d/class_averages_generation_%d.hdf"%(i,i)):
						print "Error: generation_%04d/class_averages_generation_%d.hdf is missing! Exiting."%(i,i)
						return 1
				return 0
			
			if files_are_missing(isac_generation):
				break
				
			cmdexecute("rm -f class_averages.hdf")
			cpy(["generation_%04d/class_averages_generation_%d.hdf"%(i,i) for i in xrange(1, isac_generation + 1)], "class_averages.hdf")
			
			break

		if number_of_un_accounted_images == 0:
			if(myid == main_node):
				print "This generation (%d) there are no un accounted images! Finishing."%isac_generation
			break


	program_state_stack(locals(), getframeinfo(currentframe()), last_call="__LastCall")

	mpi_barrier(MPI_COMM_WORLD)
	mpi_finalize()
示例#7
0
def compare(compare_ref_free, outfile_repro,ref_free_output,yrng, xrng, rstep,nx,apix,ref_free_cutoff, nproc, myid, main_node):

	from alignment      import   Numrinit, ringwe,  Applyws
	from random	 import   seed, randint
	from utilities      import   get_params2D, set_params2D, model_circle, inverse_transform2, combine_params2
	from fundamentals   import   rot_shift2D
	from mpi	    import   MPI_COMM_WORLD, mpi_barrier, mpi_bcast, MPI_INT
	from statistics     import   fsc_mask
	from filter	 import   fit_tanh
	from numpy	  import   array	

	fout = "%s.hdf" % ref_free_output
	frc_out = "%s_frc" % ref_free_output
	res_out = "%s_res" % ref_free_output
	
	
	nima = EMUtil.get_image_count(compare_ref_free)
	image_start, image_end = MPI_start_end(nima, nproc, myid)
	ima = EMData()
	ima.read_image(compare_ref_free, image_start)
	
	last_ring = nx/2-2
	first_ring = 1
	mask = model_circle(last_ring, nx, nx)

	refi = []
	numref = EMUtil.get_image_count(outfile_repro)
	cnx = nx/2 +1
	cny = cnx
	
	mode = "F"
	numr = Numrinit(first_ring, last_ring, rstep, mode)	
	wr = ringwe(numr, mode)

	ima.to_zero()
	for j in xrange(numref):
		temp = EMData()
		temp.read_image(outfile_repro, j)
		#  even, odd, numer of even, number of images.  After frc, totav
		refi.append(temp)
	#  for each node read its share of data
	data = EMData.read_images(compare_ref_free, range(image_start, image_end))
	for im in xrange(image_start, image_end):
		data[im-image_start].set_attr('ID', im)
		set_params2D(data[im-image_start],[0,0,0,0,1])
	ringref = []
	for j in xrange(numref):
			refi[j].process_inplace("normalize.mask", {"mask":mask, "no_sigma":1}) # normalize reference images to N(0,1)
			cimage = Util.Polar2Dm(refi[j], cnx, cny, numr, mode)
			Util.Frngs(cimage, numr)
			Applyws(cimage, numr, wr)
			ringref.append(cimage)
	
	if myid == main_node: seed(1000)
	data_shift = []	
	frc = []
	res = []
	for im in xrange(image_start, image_end):
		alpha, sx, sy, mirror, scale = get_params2D(data[im-image_start])
		alphai, sxi, syi, scalei = inverse_transform2(alpha, sx, sy, 1.0)
		# normalize
		data[im-image_start].process_inplace("normalize.mask", {"mask":mask, "no_sigma":1}) # subtract average under the mask
		# align current image to the reference
		[angt, sxst, syst, mirrort, xiref, peakt] = Util.multiref_polar_ali_2d(data[im-image_start], ringref, xrng, yrng, 1, mode, numr, cnx+sxi, cny+syi)
		iref = int(xiref)
		[alphan, sxn, syn, mn] = combine_params2(0.0, -sxi, -syi, 0, angt, sxst, syst, (int)(mirrort))
		set_params2D(data[im-image_start], [alphan, sxn, syn, int(mn), scale])
		temp = rot_shift2D(data[im-image_start], alphan, sxn, syn, mn)
		temp.set_attr('assign',iref)
		tfrc = fsc_mask(temp,refi[iref],mask = mask)
		temp.set_attr('frc',tfrc[1])
		res = fit_tanh(tfrc)
		temp.set_attr('res',res)
		data_shift.append(temp)
	
	for node in xrange(nproc):
		if myid == node:
			for image in data_shift:
				image.write_image(fout,-1)
				refindex = image.get_attr('assign')
				refi[refindex].write_image(fout,-1)	
		mpi_barrier(MPI_COMM_WORLD)
	rejects = []
	if myid == main_node:
		a = EMData()
		index = 0
		frc = []
		res = []
		temp = []
		classes = []
		for im in xrange(nima):
			a.read_image(fout, index)
			frc.append(a.get_attr("frc"))
			if ref_free_cutoff != -1: classes.append(a.get_attr("class_ptcl_idxs"))
			tmp = a.get_attr("res")
			temp.append(tmp[0])
			res.append("%12f" %(apix/tmp[0]))
			res.append("\n")
			index = index + 2
		res_num = array(temp)
		mean_score = res_num.mean(axis=0)
		std_score = res_num.std(axis=0)
		std = std_score / 2
		if ref_free_cutoff !=-1:
			cutoff = mean_score - std * ref_free_cutoff
			reject = res_num < cutoff
			index = 0
			for i in reject:
				if i: rejects.extend(classes[index])
				index = index + 1
			rejects.sort()
			length = mpi_bcast(len(rejects),1,MPI_INT,main_node, MPI_COMM_WORLD)	
			rejects = mpi_bcast(rejects,length , MPI_INT, main_node, MPI_COMM_WORLD)
		del a
		fout_frc = open(frc_out,'w')
		fout_res = open(res_out,'w')
		fout_res.write("".join(res))
		temp = zip(*frc)
		datstrings = []
		for i in temp:
			for j in i:
				datstrings.append("  %12f" % (j))
			datstrings.append("\n")
		fout_frc.write("".join(datstrings))
		fout_frc.close()
	
	del refi		
	del ringref
	return rejects
示例#8
0
def compare(compare_ref_free, outfile_repro, ref_free_output, yrng, xrng,
            rstep, nx, apix, ref_free_cutoff, nproc, myid, main_node):

    from alignment import Numrinit, ringwe, Applyws
    from random import seed, randint
    from utilities import get_params2D, set_params2D, model_circle, inverse_transform2, combine_params2
    from fundamentals import rot_shift2D
    from mpi import MPI_COMM_WORLD, mpi_barrier, mpi_bcast, MPI_INT
    from statistics import fsc_mask
    from filter import fit_tanh
    from numpy import array

    fout = "%s.hdf" % ref_free_output
    frc_out = "%s_frc" % ref_free_output
    res_out = "%s_res" % ref_free_output

    nima = EMUtil.get_image_count(compare_ref_free)
    image_start, image_end = MPI_start_end(nima, nproc, myid)
    ima = EMData()
    ima.read_image(compare_ref_free, image_start)

    last_ring = nx / 2 - 2
    first_ring = 1
    mask = model_circle(last_ring, nx, nx)

    refi = []
    numref = EMUtil.get_image_count(outfile_repro)
    cnx = nx / 2 + 1
    cny = cnx

    mode = "F"
    numr = Numrinit(first_ring, last_ring, rstep, mode)
    wr = ringwe(numr, mode)

    ima.to_zero()
    for j in xrange(numref):
        temp = EMData()
        temp.read_image(outfile_repro, j)
        #  even, odd, numer of even, number of images.  After frc, totav
        refi.append(temp)
    #  for each node read its share of data
    data = EMData.read_images(compare_ref_free, range(image_start, image_end))
    for im in xrange(image_start, image_end):
        data[im - image_start].set_attr('ID', im)
        set_params2D(data[im - image_start], [0, 0, 0, 0, 1])
    ringref = []
    for j in xrange(numref):
        refi[j].process_inplace("normalize.mask", {
            "mask": mask,
            "no_sigma": 1
        })  # normalize reference images to N(0,1)
        cimage = Util.Polar2Dm(refi[j], cnx, cny, numr, mode)
        Util.Frngs(cimage, numr)
        Applyws(cimage, numr, wr)
        ringref.append(cimage)

    if myid == main_node: seed(1000)
    data_shift = []
    frc = []
    res = []
    for im in xrange(image_start, image_end):
        alpha, sx, sy, mirror, scale = get_params2D(data[im - image_start])
        alphai, sxi, syi, scalei = inverse_transform2(alpha, sx, sy, 1.0)
        # normalize
        data[im - image_start].process_inplace("normalize.mask", {
            "mask": mask,
            "no_sigma": 1
        })  # subtract average under the mask
        # align current image to the reference
        [angt, sxst, syst, mirrort, xiref,
         peakt] = Util.multiref_polar_ali_2d(data[im - image_start], ringref,
                                             xrng, yrng, 1, mode, numr,
                                             cnx + sxi, cny + syi)
        iref = int(xiref)
        [alphan, sxn, syn, mn] = combine_params2(0.0, -sxi, -syi, 0, angt,
                                                 sxst, syst, (int)(mirrort))
        set_params2D(data[im - image_start],
                     [alphan, sxn, syn, int(mn), scale])
        temp = rot_shift2D(data[im - image_start], alphan, sxn, syn, mn)
        temp.set_attr('assign', iref)
        tfrc = fsc_mask(temp, refi[iref], mask=mask)
        temp.set_attr('frc', tfrc[1])
        res = fit_tanh(tfrc)
        temp.set_attr('res', res)
        data_shift.append(temp)

    for node in xrange(nproc):
        if myid == node:
            for image in data_shift:
                image.write_image(fout, -1)
                refindex = image.get_attr('assign')
                refi[refindex].write_image(fout, -1)
        mpi_barrier(MPI_COMM_WORLD)
    rejects = []
    if myid == main_node:
        a = EMData()
        index = 0
        frc = []
        res = []
        temp = []
        classes = []
        for im in xrange(nima):
            a.read_image(fout, index)
            frc.append(a.get_attr("frc"))
            if ref_free_cutoff != -1:
                classes.append(a.get_attr("class_ptcl_idxs"))
            tmp = a.get_attr("res")
            temp.append(tmp[0])
            res.append("%12f" % (apix / tmp[0]))
            res.append("\n")
            index = index + 2
        res_num = array(temp)
        mean_score = res_num.mean(axis=0)
        std_score = res_num.std(axis=0)
        std = std_score / 2
        if ref_free_cutoff != -1:
            cutoff = mean_score - std * ref_free_cutoff
            reject = res_num < cutoff
            index = 0
            for i in reject:
                if i: rejects.extend(classes[index])
                index = index + 1
            rejects.sort()
            length = mpi_bcast(len(rejects), 1, MPI_INT, main_node,
                               MPI_COMM_WORLD)
            rejects = mpi_bcast(rejects, length, MPI_INT, main_node,
                                MPI_COMM_WORLD)
        del a
        fout_frc = open(frc_out, 'w')
        fout_res = open(res_out, 'w')
        fout_res.write("".join(res))
        temp = zip(*frc)
        datstrings = []
        for i in temp:
            for j in i:
                datstrings.append("  %12f" % (j))
            datstrings.append("\n")
        fout_frc.write("".join(datstrings))
        fout_frc.close()

    del refi
    del ringref
    return rejects
示例#9
0
文件: sxisac.py 项目: cryoem/test
def main(args):
	progname = os.path.basename(sys.argv[0])
	usage = ( progname + " stack_file  output_directory --radius=particle_radius --img_per_grp=img_per_grp --CTF --restart_section<The remaining parameters are optional --ir=ir --rs=rs --xr=xr --yr=yr --ts=ts --maxit=maxit --dst=dst --FL=FL --FH=FH --FF=FF --init_iter=init_iter --main_maxit=main_iter" +
			" --iter_reali=iter_reali --match_first=match_first --max_round=max_round --match_second=match_second --stab_ali=stab_ali --thld_err=thld_err --indep_run=indep_run --thld_grp=thld_grp" +
			"  --generation=generation  --rand_seed=rand_seed>" )
	
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--radius",         type="int",          default=-1,      help="<Particle radius>, it has to be provided.")
	parser.add_option("--img_per_grp",    type="int",          default=100,     help="<number of images per group> in the ideal case (essentially maximum size of class) (100)")
	parser.add_option("--CTF",            action="store_true", default=False,   help="<CTF flag>, if set the data will be phase-flipped")
	parser.add_option("--ir",             type="int",          default=1,       help="<inner ring> of the resampling to polar coordinates (1)")
	parser.add_option("--rs",             type="int",          default=1,       help="<ring step> of the resampling to polar coordinates (1)")
	parser.add_option("--xr",             type="int",          default=-1,      help="<x range> of translational search (By default set by the program) (advanced)")
	parser.add_option("--yr",             type="int",          default=-1,      help="<y range> of translational search (same as xr) (advanced)")
	parser.add_option("--ts",             type="float",        default=1.0,     help="<search step> of translational search (1.0)")
	parser.add_option("--maxit",          type="int",          default=30,      help="number of iterations for reference-free alignment (30)")
	#parser.add_option("--snr",            type="float",        default=1.0,     help="signal-to-noise ratio (only meaningful when CTF is enabled, currently not supported)")
	parser.add_option("--center_method",  type="int",          default=7,       help="<Method for centering> of global 2D average during initial prealignment of data (default : 7; 0 : no centering; -1 : average shift method; please see center_2D in utilities.py for methods 1-7)")
	parser.add_option("--dst",            type="float",        default=90.0,    help="discrete angle used in within group alignment ")
	parser.add_option("--FL",             type="float",        default=0.2,     help="<lowest stopband> frequency used in the tangent filter (0.2)")
	parser.add_option("--FH",             type="float",        default=0.3,     help="<highest stopband> frequency used in the tangent filter (0.3)")
	parser.add_option("--FF",             type="float",        default=0.2,     help="<fall-off of the tangent> filter (0.2)")
	parser.add_option("--init_iter",      type="int",          default=3,       help="<init_iter> number of iterations of ISAC program in initialization (3)")
	parser.add_option("--main_iter",      type="int",          default=3,       help="<main_iter> number of iterations of ISAC program in main part (3)")
	parser.add_option("--iter_reali",     type="int",          default=1,       help="<iter_reali> number of iterations in ISAC before checking stability (1)")
	parser.add_option("--match_first",    type="int",          default=1,       help="number of iterations to run 2-way matching in the first phase (1)")
	parser.add_option("--max_round",      type="int",          default=20,      help="maximum rounds of generating candidate averages in the first phase (20)")
	parser.add_option("--match_second",   type="int",          default=5,       help="number of iterations to run 2-way (or 3-way) matching in the second phase (5)")
	parser.add_option("--stab_ali",       type="int",          default=5,       help="number of alignments when checking stability (5)")
	parser.add_option("--thld_err",       type="float",        default=0.7,     help="the threshold of pixel error when checking stability (0.7)")
	parser.add_option("--indep_run",      type="int",          default=4,       help="number of independent runs for reproducibility (default=4, only values 2, 3 and 4 are supported (4)")
	parser.add_option("--thld_grp",       type="int",          default=10,      help="minimum size of class (10)")
	parser.add_option("--n_generations",     type="int",          default=100,       help="<n_generations> program stops when reaching this total number of generations (advanced)")
	#parser.add_option("--candidatesexist",action="store_true", default=False,   help="Candidate class averages exist use them (default False)")
	parser.add_option("--rand_seed",      type="int",          default=None,    help="random seed set before calculations, useful for testing purposes (default None - total randomness)")
	parser.add_option("--new",            action="store_true", default=False,   help="use new code (default = False)")
	parser.add_option("--debug",          action="store_true", default=False,   help="debug info printout (default = False)")

	# must be switched off in production
	parser.add_option("--use_latest_master_directory", action="store_true", dest="use_latest_master_directory", default=False)
	
	parser.add_option("--restart_section", type="string", default="", help="<restart section name> (no spaces) followed immediately by comma, followed immediately by generation to restart, example: \n--restart_section=candidate_class_averages,1         (Sections: restart, candidate_class_averages, reproducible_class_averages)")
	parser.add_option("--stop_after_candidates",          action="store_true", default=False,   help="<stop_after_candidates> stops after the 'candidate_class_averages' section")

	parser.add_option("--return_options", action="store_true", dest="return_options", default=False, help = SUPPRESS_HELP)

	(options, args) = parser.parse_args(args)

	if options.return_options:
		return parser
	
	if len(args) > 2:
		print "usage: " + usage
		print "Please run '" + progname + " -h' for detailed options"
		sys.exit()
	
	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	
	from isac import iter_isac
	global_def.BATCH = True

	global_def.BATCH = True
	
	command_line_provided_stack_filename = args[0]
	global_def.BATCH = True

	main_node = 0
	mpi_init(0, [])
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	nproc = mpi_comm_size(MPI_COMM_WORLD)

	radi  = options.radius
	center_method  = options.center_method
	if(radi < 1):  ERROR("Particle radius has to be provided!","sxisac",1,myid)

	
	use_latest_master_directory = options.use_latest_master_directory
	stop_after_candidates = options.stop_after_candidates
	program_state_stack.restart_location_title_from_command_line = options.restart_section
	
	from utilities import qw
	program_state_stack.PROGRAM_STATE_VARIABLES = set(qw("""
		isac_generation
	"""))

	# create or reuse master directory
	masterdir = ""
	stack_processed_by_ali2d_base__filename = ""
	stack_processed_by_ali2d_base__filename__without_master_dir = ""
	error_status = 0
	if len(args) == 2:
		masterdir = args[1]
	elif len(args) == 1:
		if use_latest_master_directory:
			all_dirs = [d for d in os.listdir(".") if os.path.isdir(d)]
			import re; r = re.compile("^master.*$")
			all_dirs = filter(r.match, all_dirs)
			if len(all_dirs)>0:
				# all_dirs = max(all_dirs, key=os.path.getctime)
				masterdir = max(all_dirs, key=os.path.getmtime)
				
	#Create folder for all results or check if there is one created already
	if(myid == main_node):
		if( masterdir == ""):
			timestring = strftime("%Y_%m_%d__%H_%M_%S" + DIR_DELIM, localtime())
			masterdir = "master"+timestring
			cmd = "{} {}".format("mkdir", masterdir)
			cmdexecute(cmd)
		elif not os.path.exists(masterdir):
			# os.path.exists(masterdir) does not exist
			masterdir = args[1]
			cmd = "{} {}".format("mkdir", masterdir)
			cmdexecute(cmd)

		if(args[0][:4] == "bdb:"): filename = args[0][4:]
		else:                      filename = args[0][:-4]
		filename = os.path.basename(filename)
		stack_processed_by_ali2d_base__filename  = "bdb:" + os.path.join(masterdir, filename )
		stack_processed_by_ali2d_base__filename__without_master_dir  = "bdb:" + filename
	if_error_all_processes_quit_program(error_status)

	# send masterdir to all processes
	masterdir = send_string_to_all(masterdir)

	if myid == 0:
		if options.restart_section != "":
			if os.path.exists(os.path.join(masterdir,NAME_OF_JSON_STATE_FILE)):
				stored_stack, stored_state = restore_program_stack_and_state(os.path.join(masterdir,NAME_OF_JSON_STATE_FILE))
				import re
				if "," in options.restart_section:
					parsed_restart_section_option = options.restart_section.split(",")
					stored_state[-1]["location_in_program"] = re.sub(r"___.*$", "___%s"%parsed_restart_section_option[0], stored_state[-1]["location_in_program"])
					generation_str_format = parsed_restart_section_option[1]
					if generation_str_format != "":
						isac_generation_from_command_line = int(generation_str_format)
						stored_state[-1]["isac_generation"] = isac_generation_from_command_line 
					else:
						isac_generation_from_command_line = 1
						if "isac_generation" in stored_state[-1]:
							del stored_state[-1]["isac_generation"]
				else:
					isac_generation_from_command_line = -1
					stored_state[-1]["location_in_program"] = re.sub(r"___.*$", "___%s"%options.restart_section, stored_state[-1]["location_in_program"])
					if "isac_generation" in stored_state[-1]:
						del stored_state[-1]["isac_generation"]
				store_program_state(os.path.join(masterdir,NAME_OF_JSON_STATE_FILE), stored_state, stored_stack)
			else:
				print "Please remove the restart_section option from the command line. The program must be started from the beginning."			
				mpi_finalize()
				sys.exit()
		else:
			isac_generation_from_command_line = -1
	
	program_state_stack(locals(), getframeinfo(currentframe()), os.path.join(masterdir,NAME_OF_JSON_STATE_FILE))	

	stack_processed_by_ali2d_base__filename = send_string_to_all(stack_processed_by_ali2d_base__filename)
	stack_processed_by_ali2d_base__filename__without_master_dir = \
		send_string_to_all(stack_processed_by_ali2d_base__filename__without_master_dir)

	#  PARAMETERS OF THE PROCEDURE
	if( options.xr == -1 ):
		#  Default values
		target_nx = 76
		target_radius = 29
		target_xr = 1
	else:  #  nx//2
		#  Check below!
		target_xr = options.xr
		target_nx = 76 + target_xr - 1 # subtract one, which is default
		target_radius = 29

	mpi_barrier(MPI_COMM_WORLD)

	# Initialization of stacks
	if(myid == main_node):
		number_of_images_in_stack = EMUtil.get_image_count(command_line_provided_stack_filename)
	else:
		number_of_images_in_stack = 0

	number_of_images_in_stack = bcast_number_to_all(number_of_images_in_stack, source_node = main_node)
	
	nxrsteps = 4
	
	init2dir = os.path.join(masterdir,"2dalignment")

	if(myid == 0):
		import subprocess
		from logger import Logger, BaseLogger_Files
		#  Create output directory
		log2d = Logger(BaseLogger_Files())
		log2d.prefix = os.path.join(init2dir)
		cmd = "mkdir -p "+log2d.prefix
		outcome = subprocess.call(cmd, shell=True)
		log2d.prefix += "/"
		# outcome = subprocess.call("sxheader.py  "+command_line_provided_stack_filename+"   --params=xform.align2d  --zero", shell=True)
	else:
		outcome = 0
		log2d = None

	if(myid == main_node):
		a = get_im(command_line_provided_stack_filename)
		nnxo = a.get_xsize()
	else:
		nnxo = 0
	nnxo = bcast_number_to_all(nnxo, source_node = main_node)

	txrm = (nnxo - 2*(radi+1))//2
	if(txrm < 0):  			ERROR( "ERROR!!   Radius of the structure larger than the window data size permits   %d"%(radi), "sxisac",1, myid)
	if(txrm/nxrsteps>0):
		tss = ""
		txr = ""
		while(txrm/nxrsteps>0):
			tts=txrm/nxrsteps
			tss += "  %d"%tts
			txr += "  %d"%(tts*nxrsteps)
			txrm =txrm//2
	else:
		tss = "1"
		txr = "%d"%txrm

	# section ali2d_base

	#  centering method is set to #7
	params2d, aligned_images = ali2d_base(command_line_provided_stack_filename, init2dir, None, 1, radi, 1, txr, txr, tss, \
				False, 90.0, center_method, 14, options.CTF, 1.0, False, \
				"ref_ali2d", "", log2d, nproc, myid, main_node, MPI_COMM_WORLD, write_headers = False)

	if( myid == main_node ):
		write_text_row(params2d,os.path.join(init2dir, "initial2Dparams.txt"))
	del params2d
	mpi_barrier(MPI_COMM_WORLD)

	#  We assume the target image size will be target_nx, radius will be 29, and xr = 1.  
	#  Note images can be also padded, in which case shrink_ratio > 1.
	shrink_ratio = float(target_radius)/float(radi)
	nx = aligned_images[0].get_xsize()
	nima = len(aligned_images)
	newx = int(nx*shrink_ratio + 0.5)

	from fundamentals import rot_shift2D, resample
	from utilities import pad, combine_params2
	if(shrink_ratio < 1.0):
		if    newx > target_nx  :
			msk = model_circle(target_radius, target_nx, target_nx)
			for im in xrange(nima):
				#  Here we should use only shifts
				alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
				alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
				aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
				aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
				aligned_images[im] = Util.window(aligned_images[im], target_nx, target_nx, 1)
				p = Util.infomask(aligned_images[im], msk, False)
				aligned_images[im] -= p[0]
				p = Util.infomask(aligned_images[im], msk, True)
				aligned_images[im] /= p[1]
		elif  newx == target_nx :
			msk = model_circle(target_radius, target_nx, target_nx)
			for im in xrange(nima):
				#  Here we should use only shifts
				alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
				alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
				aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
				aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
				p = Util.infomask(aligned_images[im], msk, False)
				aligned_images[im] -= p[0]
				p = Util.infomask(aligned_images[im], msk, True)
				aligned_images[im] /= p[1]
		elif  newx < target_nx  :	
			msk = model_circle(newx//2-2, newx,  newx)
			for im in xrange(nima):
				#  Here we should use only shifts
				alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
				alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
				aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
				aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
				p = Util.infomask(aligned_images[im], msk, False)
				aligned_images[im] -= p[0]
				p = Util.infomask(aligned_images[im], msk, True)
				aligned_images[im] /= p[1]
				aligned_images[im] = pad(aligned_images[im], target_nx, target_nx, 1, 0.0)
	elif(shrink_ratio == 1.0):
		if    newx > target_nx  :
			msk = model_circle(target_radius, target_nx, target_nx)
			for im in xrange(nima):
				#  Here we should use only shifts
				alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
				alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
				aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
				aligned_images[im] = Util.window(aligned_images[im], target_nx, target_nx, 1)
				p = Util.infomask(aligned_images[im], msk, False)
				aligned_images[im] -= p[0]
				p = Util.infomask(aligned_images[im], msk, True)
				aligned_images[im] /= p[1]
		elif  newx == target_nx :
			msk = model_circle(target_radius, target_nx, target_nx)
			for im in xrange(nima):
				#  Here we should use only shifts
				alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
				alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
				aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
				p = Util.infomask(aligned_images[im], msk, False)
				aligned_images[im] -= p[0]
				p = Util.infomask(aligned_images[im], msk, True)
				aligned_images[im] /= p[1]
		elif  newx < target_nx  :			
			msk = model_circle(nx//2-2, newx,  newx)
			for im in xrange(nima):
				#  Here we should use only shifts
				alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
				alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
				aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
				#aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
				p = Util.infomask(aligned_images[im], msk, False)
				aligned_images[im] -= p[0]
				p = Util.infomask(aligned_images[im], msk, True)
				aligned_images[im] /= p[1]
				aligned_images[im] = pad(aligned_images[im], target_nx, target_nx, 1, 0.0)
	elif(shrink_ratio > 1.0):
		target_radius = radi
		msk = model_circle(target_radius, nx, nx)
		for im in xrange(nima):
			#  Here we should use only shifts
			alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
			alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
			aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
			p = Util.infomask(aligned_images[im], msk, False)
			aligned_images[im] -= p[0]
			p = Util.infomask(aligned_images[im], msk, True)
			aligned_images[im] /= p[1]
			aligned_images[im] = pad(aligned_images[im], target_nx, target_nx, 1, 0.0)
	del msk

	gather_compacted_EMData_to_root(number_of_images_in_stack, aligned_images, myid)
	number_of_images_in_stack = bcast_number_to_all(number_of_images_in_stack, source_node = main_node)

	if( myid == main_node ):
		for i in range(number_of_images_in_stack):  aligned_images[i].write_image(stack_processed_by_ali2d_base__filename,i)
		#  It has to be explicitly closed
		from EMAN2db import db_open_dict
		DB = db_open_dict(stack_processed_by_ali2d_base__filename)
		DB.close()
		
		fp = open("README_shrink_ratio.txt", "w")
		output_text = """
		Since, for processing purposes, isac changes the image dimensions,
		adjustment of pixel size needs to be made in subsequent steps, (e.g.
		running sxviper.py). The shrink ratio for this particular isac run is
		--------
		%.5f
		--------
		To get the pixel size for the isac output the user needs to divide
		the original pixel size by the above value. This info is saved in
		the following file: README_shrink_ratio.txt
		"""%shrink_ratio
		fp.write(output_text); fp.flush() ;fp.close()
		print output_text

	mpi_barrier(MPI_COMM_WORLD)

	global_def.BATCH = True

	os.chdir(masterdir)

	if program_state_stack(locals(), getframeinfo(currentframe())):
	# if 1:
		pass
		if (myid == main_node):
			cmdexecute("sxheader.py  --consecutive  --params=originalid   %s"%stack_processed_by_ali2d_base__filename__without_master_dir)
			cmdexecute("e2bdb.py %s --makevstack=%s_000"%(stack_processed_by_ali2d_base__filename__without_master_dir, stack_processed_by_ali2d_base__filename__without_master_dir))

	if (myid == main_node):
		main_dir_no = get_latest_directory_increment_value("./", NAME_OF_MAIN_DIR, myformat="%04d")
		print "isac_generation_from_command_line", isac_generation_from_command_line, main_dir_no
		if isac_generation_from_command_line < 0:
			if os.path.exists(NAME_OF_JSON_STATE_FILE):
				stored_stack, stored_state = restore_program_stack_and_state(NAME_OF_JSON_STATE_FILE)
				if "isac_generation" in stored_state[-1]:
					isac_generation_from_command_line = stored_state[-1]["isac_generation"]
				else:
					isac_generation_from_command_line = -1
		if isac_generation_from_command_line >= 0 and isac_generation_from_command_line <= main_dir_no: 
			for i in xrange(isac_generation_from_command_line+1, main_dir_no + 1):
				if i == isac_generation_from_command_line+1:
					backup_dir_no = get_nonexistent_directory_increment_value("./", "000_backup", myformat="%05d", start_value=1)
					cmdexecute("mkdir -p " + "000_backup" + "%05d"%backup_dir_no)
				cmdexecute("mv  " + NAME_OF_MAIN_DIR + "%04d"%i +  " 000_backup" + "%05d"%backup_dir_no)
				cmdexecute("rm  " + "EMAN2DB/"+stack_processed_by_ali2d_base__filename__without_master_dir[4:]+"_%03d.bdb"%i)
				
			# it includes both command line and json file
			my_restart_section = stored_state[-1]["location_in_program"].split("___")[-1]
			if "restart" in my_restart_section:
				if "backup_dir_no" not in locals():
					backup_dir_no = get_nonexistent_directory_increment_value("./", "000_backup", myformat="%05d", start_value=1)
					cmdexecute("mkdir -p " + "000_backup" + "%05d"%backup_dir_no)
				cmdexecute("mv  " + NAME_OF_MAIN_DIR + "%04d"%isac_generation_from_command_line +  " 000_backup" + "%05d"%backup_dir_no)
				cmdexecute("rm  " + "EMAN2DB/"+stack_processed_by_ali2d_base__filename__without_master_dir[4:]+"_%03d.bdb"%isac_generation_from_command_line )
			elif "candidate_class_averages" in my_restart_section:
				if "backup_dir_no" not in locals():
					backup_dir_no = get_nonexistent_directory_increment_value("./", "000_backup", myformat="%05d", start_value=1)
					cmdexecute("mkdir -p " + "000_backup" + "%05d"%backup_dir_no)
				cmdexecute("mv  " + NAME_OF_MAIN_DIR + "%04d"%isac_generation_from_command_line +  " 000_backup" + "%05d"%backup_dir_no)
				cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR + "%04d"%isac_generation_from_command_line)
				# cmdexecute("rm -f " + NAME_OF_MAIN_DIR + "%04d/class_averages_candidate*"%isac_generation_from_command_line)
			elif "reproducible_class_averages" in my_restart_section:
				cmdexecute("rm -rf " + NAME_OF_MAIN_DIR + "%04d/ali_params_generation_*"%isac_generation_from_command_line)
				cmdexecute("rm -f " + NAME_OF_MAIN_DIR + "%04d/class_averages_generation*"%isac_generation_from_command_line)
		else:
			if os.path.exists(NAME_OF_JSON_STATE_FILE):
				stored_stack, stored_state = restore_program_stack_and_state(NAME_OF_JSON_STATE_FILE)
				if "isac_generation" in stored_state[-1]:
					isac_generation_from_command_line = stored_state[-1]["isac_generation"]
				else:
					isac_generation_from_command_line = 1
			else:
				isac_generation_from_command_line = 1
	else:
		isac_generation_from_command_line = 0
		
		
		
	isac_generation_from_command_line = mpi_bcast(isac_generation_from_command_line, 1, MPI_INT, 0, MPI_COMM_WORLD)[0]
	isac_generation = isac_generation_from_command_line - 1
	
	if (myid == main_node):
		if isac_generation == 0:
			cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR + "%04d"%isac_generation)
			write_text_file([1], os.path.join(NAME_OF_MAIN_DIR + "%04d"%isac_generation, "generation_%d_accounted.txt"%isac_generation))
			write_text_file(range(number_of_images_in_stack), os.path.join(NAME_OF_MAIN_DIR + "%04d"%isac_generation, "generation_%d_unaccounted.txt"%isac_generation))

	#  Stopping criterion should be inside the program.
	while True:
		isac_generation += 1
		if isac_generation > options.n_generations:
			break

		data64_stack_current = "bdb:../"+stack_processed_by_ali2d_base__filename__without_master_dir[4:]+"_%03d"%isac_generation

		if(myid == main_node):
			accounted_images = read_text_file(os.path.join(NAME_OF_MAIN_DIR + "%04d"%(isac_generation - 1),"generation_%d_accounted.txt"%(isac_generation - 1)))
			number_of_accounted_images = len(accounted_images)
			# unaccounted_images = read_text_file(os.path.join(NAME_OF_MAIN_DIR + "%04d"%(isac_generation - 1),"generation_%d_unaccounted.txt"%(isac_generation - 1)))
			# number_of_unaccounted_images = len(unaccounted_images)
		else:
			number_of_accounted_images = 0

		number_of_accounted_images = int(mpi_bcast(number_of_accounted_images, 1, MPI_INT, 0, MPI_COMM_WORLD)[0])
		if number_of_accounted_images == 0:
			os.chdir("..")
			break

		program_state_stack.restart_location_title = "restart"
		if program_state_stack(locals(), getframeinfo(currentframe())):
			if (myid == main_node):
				cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR + "%04d"%isac_generation)
				# reference the original stack
				list_file = os.path.join(NAME_OF_MAIN_DIR + "%04d"%(isac_generation - 1), "generation_%d_unaccounted.txt"%(isac_generation - 1))
				cmdexecute("e2bdb.py %s --makevstack=%s --list=%s"%(stack_processed_by_ali2d_base__filename__without_master_dir,\
						stack_processed_by_ali2d_base__filename__without_master_dir + "_%03d"%isac_generation, list_file))
			mpi_barrier(MPI_COMM_WORLD)

		os.chdir(NAME_OF_MAIN_DIR + "%04d"%isac_generation)

		program_state_stack.restart_location_title = "candidate_class_averages"
		if program_state_stack(locals(), getframeinfo(currentframe())):

			iter_isac(data64_stack_current, options.ir, target_radius, options.rs, target_xr, target_xr, options.ts, options.maxit, False, 1.0,\
				options.dst, options.FL, options.FH, options.FF, options.init_iter, options.main_iter, options.iter_reali, options.match_first, \
				options.max_round, options.match_second, options.stab_ali, options.thld_err, options.indep_run, options.thld_grp, \
				options.img_per_grp, isac_generation, False, random_seed=options.rand_seed, new=False)#options.new)

		# program_state_stack.restart_location_title = "stopped_program1"
		# program_state_stack(locals(), getframeinfo(currentframe()))
		
		program_state_stack.restart_location_title = "stop_after_candidates"
		program_state_stack(locals(), getframeinfo(currentframe()))
		if stop_after_candidates:
			mpi_finalize()
			sys.exit()

		exit_program = 0
		if(myid == main_node):
			if not os.path.exists("class_averages_candidate_generation_%d.hdf"%isac_generation):
				print "This generation (%d) no class averages were generated!"%isac_generation
				exit_program = 1
		exit_program = int(mpi_bcast(exit_program, 1, MPI_INT, 0, MPI_COMM_WORLD)[0])
		if exit_program:
			os.chdir("..")
			break

		program_state_stack.restart_location_title = "reproducible_class_averages"
		if program_state_stack(locals(), getframeinfo(currentframe())):


			iter_isac(data64_stack_current, options.ir, target_radius, options.rs, target_xr, target_xr, options.ts, options.maxit, False, 1.0,\
				options.dst, options.FL, options.FH, options.FF, options.init_iter, options.main_iter, options.iter_reali, options.match_first, \
				options.max_round, options.match_second, options.stab_ali, options.thld_err, options.indep_run, options.thld_grp, \
				options.img_per_grp, isac_generation, True, random_seed=options.rand_seed, new=False)#options.new)
			pass

		os.chdir("..")

		if (myid == main_node):
			cmdexecute("rm -f class_averages.hdf")
			cpy(["generation_%04d/class_averages_generation_%d.hdf"%(i,i) for i in xrange(1, isac_generation)], "class_averages.hdf")

		# program_state_stack.restart_location_title = "stopped_program2"
		# program_state_stack(locals(), getframeinfo(currentframe()))

	program_state_stack(locals(), getframeinfo(currentframe()), last_call="__LastCall")


	mpi_finalize()
示例#10
0
def main(args):
    progname = os.path.basename(sys.argv[0])
    usage = (
        progname +
        " stack_file  output_directory --radius=particle_radius --img_per_grp=img_per_grp --CTF --restart_section<The remaining parameters are optional --ir=ir --rs=rs --xr=xr --yr=yr --ts=ts --maxit=maxit --dst=dst --FL=FL --FH=FH --FF=FF --init_iter=init_iter --main_maxit=main_iter"
        +
        " --iter_reali=iter_reali --match_first=match_first --max_round=max_round --match_second=match_second --stab_ali=stab_ali --thld_err=thld_err --indep_run=indep_run --thld_grp=thld_grp"
        + "  --generation=generation  --rand_seed=rand_seed>")

    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--radius",
        type="int",
        help=
        "particle radius: there is no default, a sensible number has to be provided, units - pixels (default required int)"
    )
    parser.add_option(
        "--target_radius",
        type="int",
        default=29,
        help=
        "target particle radius: actual particle radius on which isac will process data. Images will be shrinked/enlarged to achieve this radius (default 29)"
    )
    parser.add_option(
        "--target_nx",
        type="int",
        default=76,
        help=
        "target particle image size: actual image size on which isac will process data. Images will be shrinked/enlarged according to target particle radius and then cut/padded to achieve target_nx size. When xr > 0, the final image size for isac processing is 'target_nx + xr - 1'  (default 76)"
    )
    parser.add_option(
        "--img_per_grp",
        type="int",
        default=100,
        help=
        "number of images per class: in the ideal case (essentially maximum size of class) (default 100)"
    )
    parser.add_option(
        "--CTF",
        action="store_true",
        default=False,
        help=
        "apply phase-flip for CTF correction: if set the data will be phase-flipped using CTF information included in image headers (default False)"
    )
    parser.add_option(
        "--ir",
        type="int",
        default=1,
        help=
        "inner ring: of the resampling to polar coordinates. units - pixels (default 1)"
    )
    parser.add_option(
        "--rs",
        type="int",
        default=1,
        help=
        "ring step: of the resampling to polar coordinates. units - pixels (default 1)"
    )
    parser.add_option(
        "--xr",
        type="int",
        default=1,
        help=
        "x range: of translational search. By default, set by the program. (default 1)"
    )
    parser.add_option(
        "--yr",
        type="int",
        default=-1,
        help=
        "y range: of translational search. By default, same as xr. (default -1)"
    )
    parser.add_option(
        "--ts",
        type="float",
        default=1.0,
        help=
        "search step: of translational search: units - pixels (default 1.0)")
    parser.add_option(
        "--maxit",
        type="int",
        default=30,
        help="number of iterations for reference-free alignment: (default 30)")
    #parser.add_option("--snr",            type="float",        default=1.0,     help="signal-to-noise ratio (only meaningful when CTF is enabled, currently not supported)")
    parser.add_option(
        "--center_method",
        type="int",
        default=-1,
        help=
        "method for centering: of global 2D average during initial prealignment of data (0 : no centering; -1 : average shift method; please see center_2D in utilities.py for methods 1-7) (default -1)"
    )
    parser.add_option(
        "--dst",
        type="float",
        default=90.0,
        help="discrete angle used in within group alignment: (default 90.0)")
    parser.add_option(
        "--FL",
        type="float",
        default=0.2,
        help=
        "lowest stopband: frequency used in the tangent filter (default 0.2)")
    parser.add_option(
        "--FH",
        type="float",
        default=0.3,
        help=
        "highest stopband: frequency used in the tangent filter (default 0.3)")
    parser.add_option("--FF",
                      type="float",
                      default=0.2,
                      help="fall-off of the tangent filter: (default 0.2)")
    parser.add_option(
        "--init_iter",
        type="int",
        default=3,
        help=
        "SAC initialization iterations: number of runs of ab-initio within-cluster alignment for stability evaluation in SAC initialization (default 3)"
    )
    parser.add_option(
        "--main_iter",
        type="int",
        default=3,
        help=
        "SAC main iterations: number of runs of ab-initio within-cluster alignment for stability evaluation in SAC (default 3)"
    )
    parser.add_option(
        "--iter_reali",
        type="int",
        default=1,
        help=
        "SAC stability check interval: every iter_reali iterations of SAC stability checking is performed (default 1)"
    )
    parser.add_option(
        "--match_first",
        type="int",
        default=1,
        help=
        "number of iterations to run 2-way matching in the first phase: (default 1)"
    )
    parser.add_option(
        "--max_round",
        type="int",
        default=20,
        help=
        "maximum rounds: of generating candidate class averages in the first phase (default 20)"
    )
    parser.add_option(
        "--match_second",
        type="int",
        default=5,
        help=
        "number of iterations to run 2-way (or 3-way) matching in the second phase: (default 5)"
    )
    parser.add_option(
        "--stab_ali",
        type="int",
        default=5,
        help="number of alignments when checking stability: (default 5)")
    parser.add_option(
        "--thld_err",
        type="float",
        default=0.7,
        help=
        "threshold of pixel error when checking stability: equals root mean square of distances between corresponding pixels from set of found transformations and theirs average transformation, depends linearly on square of radius (parameter ou). units - pixels. (default 0.7)"
    )
    parser.add_option(
        "--indep_run",
        type="int",
        default=4,
        help=
        "level of m-way matching for reproducibility tests: By default, perform full ISAC to 4-way matching. Value indep_run=2 will restrict ISAC to 2-way matching and 3 to 3-way matching.  Note the number of used MPI processes requested in mpirun must be a multiplicity of indep_run. (default 4)"
    )
    parser.add_option("--thld_grp",
                      type="int",
                      default=10,
                      help="minimum size of reproducible class (default 10)")
    parser.add_option(
        "--n_generations",
        type="int",
        default=10,
        help=
        "maximum number of generations: program stops when reaching this total number of generations: (default 10)"
    )
    #parser.add_option("--candidatesexist",action="store_true", default=False,   help="Candidate class averages exist use them (default False)")
    parser.add_option(
        "--rand_seed",
        type="int",
        help=
        "random seed set before calculations: useful for testing purposes. By default, total randomness (type int)"
    )
    parser.add_option("--new",
                      action="store_true",
                      default=False,
                      help="use new code: (default False)")
    parser.add_option("--debug",
                      action="store_true",
                      default=False,
                      help="debug info printout: (default False)")

    # must be switched off in production
    parser.add_option(
        "--use_latest_master_directory",
        action="store_true",
        default=False,
        help=
        "use latest master directory: when active, the program looks for the latest directory that starts with the word 'master', so the user does not need to provide a directory name. (default False)"
    )

    parser.add_option(
        "--restart_section",
        type="string",
        default=' ',
        help=
        "restart section: each generation (iteration) contains three sections: 'restart', 'candidate_class_averages', and 'reproducible_class_averages'. To restart from a particular step, for example, generation 4 and section 'candidate_class_averages' the following option is needed: '--restart_section=candidate_class_averages,4'. The option requires no white space before or after the comma. The default behavior is to restart execution from where it stopped intentionally or unintentionally. For default restart, it is assumed that the name of the directory is provided as argument. Alternatively, the '--use_latest_master_directory' option can be used. (default ' ')"
    )
    parser.add_option(
        "--stop_after_candidates",
        action="store_true",
        default=False,
        help=
        "stop after candidates: stops after the 'candidate_class_averages' section. (default False)"
    )

    ##### XXXXXXXXXXXXXXXXXXXXXX option does not exist in docs XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
    parser.add_option("--return_options",
                      action="store_true",
                      dest="return_options",
                      default=False,
                      help=SUPPRESS_HELP)
    parser.add_option(
        "--skip_prealignment",
        action="store_true",
        default=False,
        help=
        "skip pre-alignment step: to be used if images are already centered. 2dalignment directory will still be generated but the parameters will be zero. (default False)"
    )

    required_option_list = ['radius']
    (options, args) = parser.parse_args(args)

    if options.return_options:
        return parser

    if len(args) > 2:
        print "usage: " + usage
        print "Please run '" + progname + " -h' for detailed options"
        sys.exit()

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()
    global_def.BATCH = True

    from isac import iter_isac
    from fundamentals import rot_shift2D, resample
    from utilities import pad, combine_params2

    command_line_provided_stack_filename = args[0]

    main_node = 0
    mpi_init(0, [])
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    nproc = mpi_comm_size(MPI_COMM_WORLD)

    mpi_barrier(MPI_COMM_WORLD)
    if (myid == main_node):
        print "****************************************************************"
        Util.version()
        print "****************************************************************"
        sys.stdout.flush()
    mpi_barrier(MPI_COMM_WORLD)

    # Making sure all required options appeared.
    for required_option in required_option_list:
        if not options.__dict__[required_option]:
            print "\n ==%s== mandatory option is missing.\n" % required_option
            print "Please run '" + progname + " -h' for detailed options"
            return 1

    radi = options.radius
    target_radius = options.target_radius
    target_nx = options.target_nx
    center_method = options.center_method
    if (radi < 1):
        ERROR("Particle radius has to be provided!", "sxisac", 1, myid)

    use_latest_master_directory = options.use_latest_master_directory
    stop_after_candidates = options.stop_after_candidates
    # program_state_stack.restart_location_title_from_command_line = options.restart_section

    from utilities import qw
    program_state_stack.PROGRAM_STATE_VARIABLES = set(
        qw("""
		isac_generation
	"""))

    # create or reuse master directory
    masterdir = ""
    stack_processed_by_ali2d_base__filename = ""
    stack_processed_by_ali2d_base__filename__without_master_dir = ""
    error_status = 0
    if len(args) == 2:
        masterdir = args[1]
    elif len(args) == 1:
        if use_latest_master_directory:
            all_dirs = [d for d in os.listdir(".") if os.path.isdir(d)]
            import re
            r = re.compile("^master.*$")
            all_dirs = filter(r.match, all_dirs)
            if len(all_dirs) > 0:
                # all_dirs = max(all_dirs, key=os.path.getctime)
                masterdir = max(all_dirs, key=os.path.getmtime)

    #Create folder for all results or check if there is one created already
    if (myid == main_node):
        if (masterdir == ""):
            timestring = strftime("%Y_%m_%d__%H_%M_%S" + DIR_DELIM,
                                  localtime())
            masterdir = "master" + timestring
            cmd = "{} {}".format("mkdir", masterdir)
            junk = cmdexecute(cmd)
        elif not os.path.exists(masterdir):
            # os.path.exists(masterdir) does not exist
            masterdir = args[1]
            cmd = "{} {}".format("mkdir", masterdir)
            junk = cmdexecute(cmd)

        if (args[0][:4] == "bdb:"): filename = args[0][4:]
        else: filename = args[0][:-4]
        filename = os.path.basename(filename)
        stack_processed_by_ali2d_base__filename = "bdb:" + os.path.join(
            masterdir, filename)
        stack_processed_by_ali2d_base__filename__without_master_dir = "bdb:" + filename
    if_error_then_all_processes_exit_program(error_status)

    # send masterdir to all processes
    masterdir = send_string_to_all(masterdir)

    if myid == 0:
        if options.restart_section != " ":
            if os.path.exists(os.path.join(masterdir,
                                           NAME_OF_JSON_STATE_FILE)):
                stored_stack, stored_state = restore_program_stack_and_state(
                    os.path.join(masterdir, NAME_OF_JSON_STATE_FILE))
                import re
                if "," in options.restart_section:
                    parsed_restart_section_option = options.restart_section.split(
                        ",")
                    stored_state[-1]["location_in_program"] = re.sub(
                        r"___.*$", "___%s" % parsed_restart_section_option[0],
                        stored_state[-1]["location_in_program"])
                    generation_str_format = parsed_restart_section_option[1]
                    if generation_str_format != "":
                        isac_generation_from_command_line = int(
                            generation_str_format)
                        stored_state[-1][
                            "isac_generation"] = isac_generation_from_command_line
                    else:
                        isac_generation_from_command_line = 1
                        if "isac_generation" in stored_state[-1]:
                            del stored_state[-1]["isac_generation"]
                else:
                    isac_generation_from_command_line = -1
                    stored_state[-1]["location_in_program"] = re.sub(
                        r"___.*$", "___%s" % options.restart_section,
                        stored_state[-1]["location_in_program"])
                    if "isac_generation" in stored_state[-1]:
                        del stored_state[-1]["isac_generation"]
                store_program_state(
                    os.path.join(masterdir, NAME_OF_JSON_STATE_FILE),
                    stored_state, stored_stack)
            else:
                print "Please remove the restart_section option from the command line. The program must be started from the beginning."
                mpi_finalize()
                sys.exit()
        else:
            isac_generation_from_command_line = -1

    program_state_stack(locals(), getframeinfo(currentframe()),
                        os.path.join(masterdir, NAME_OF_JSON_STATE_FILE))

    stack_processed_by_ali2d_base__filename = send_string_to_all(
        stack_processed_by_ali2d_base__filename)
    stack_processed_by_ali2d_base__filename__without_master_dir = \
     send_string_to_all(stack_processed_by_ali2d_base__filename__without_master_dir)

    # previous code 2016-05-05--20-14-12-153
    # #  PARAMETERS OF THE PROCEDURE
    # if( options.xr == -1 ):
    # 	#  Default values
    # 	# target_nx = 76
    # 	# target_radius = 29
    # 	target_xr = 1
    # else:  #  nx//2
    # 	#  Check below!
    # 	target_xr = options.xr
    # 	# target_nx = 76 + target_xr - 1 # subtract one, which is default
    # 	target_nx += target_xr - 1 # subtract one, which is default
    # 	# target_radius = 29

    target_xr = options.xr
    target_nx += target_xr - 1  # subtract one, which is default

    if (options.yr == -1):
        yr = options.xr
    else:
        yr = options.yr

    mpi_barrier(MPI_COMM_WORLD)

    # Initialization of stacks
    if (myid == main_node):
        print "command_line_provided_stack_filename", command_line_provided_stack_filename
        number_of_images_in_stack = EMUtil.get_image_count(
            command_line_provided_stack_filename)
    else:
        number_of_images_in_stack = 0

    number_of_images_in_stack = bcast_number_to_all(number_of_images_in_stack,
                                                    source_node=main_node)

    nxrsteps = 4

    init2dir = os.path.join(masterdir, "2dalignment")

    # from mpi import mpi_finalize
    # mpi_finalize()
    # sys.stdout.flush()
    # sys.exit()

    if not os.path.exists(
            os.path.join(init2dir, "Finished_initial_2d_alignment.txt")):

        if (myid == 0):
            import subprocess
            from logger import Logger, BaseLogger_Files
            #  Create output directory
            log2d = Logger(BaseLogger_Files())
            log2d.prefix = os.path.join(init2dir)
            cmd = "mkdir -p " + log2d.prefix
            outcome = subprocess.call(cmd, shell=True)
            log2d.prefix += "/"
            # outcome = subprocess.call("sxheader.py  "+command_line_provided_stack_filename+"   --params=xform.align2d  --zero", shell=True)
        else:
            outcome = 0
            log2d = None

        if (myid == main_node):
            a = get_im(command_line_provided_stack_filename)
            nnxo = a.get_xsize()
        else:
            nnxo = 0
        nnxo = bcast_number_to_all(nnxo, source_node=main_node)

        image_start, image_end = MPI_start_end(number_of_images_in_stack,
                                               nproc, myid)

        if options.skip_prealignment:
            params2d = [[0.0, 0.0, 0.0, 0]
                        for i in xrange(image_start, image_end)]
        else:

            original_images = EMData.read_images(
                command_line_provided_stack_filename,
                range(image_start, image_end))
            #  We assume the target radius will be 29, and xr = 1.
            shrink_ratio = float(target_radius) / float(radi)

            for im in xrange(len(original_images)):
                if (shrink_ratio != 1.0):
                    original_images[im] = resample(original_images[im],
                                                   shrink_ratio)

            nx = original_images[0].get_xsize()
            # nx = int(nx*shrink_ratio + 0.5)

            txrm = (nx - 2 * (target_radius + 1)) // 2
            if (txrm < 0):
                ERROR(
                    "ERROR!!   Radius of the structure larger than the window data size permits   %d"
                    % (radi), "sxisac", 1, myid)
            if (txrm / nxrsteps > 0):
                tss = ""
                txr = ""
                while (txrm / nxrsteps > 0):
                    tts = txrm / nxrsteps
                    tss += "  %d" % tts
                    txr += "  %d" % (tts * nxrsteps)
                    txrm = txrm // 2
            else:
                tss = "1"
                txr = "%d" % txrm

            # print "nx, txr, txrm, tss", nx, txr, txrm, tss
        # from mpi import mpi_finalize
        # mpi_finalize()
        # sys.stdout.flush()
        # sys.exit()

        # section ali2d_base

            params2d = ali2d_base(original_images, init2dir, None, 1, target_radius, 1, txr, txr, tss, \
             False, 90.0, center_method, 14, options.CTF, 1.0, False, \
             "ref_ali2d", "", log2d, nproc, myid, main_node, MPI_COMM_WORLD, write_headers = False)

            del original_images

            for i in xrange(len(params2d)):
                alpha, sx, sy, mirror = combine_params2(
                    0, params2d[i][1], params2d[i][2], 0, -params2d[i][0], 0,
                    0, 0)
                sx /= shrink_ratio
                sy /= shrink_ratio
                params2d[i][0] = 0.0
                params2d[i][1] = sx
                params2d[i][2] = sy
                params2d[i][3] = 0
                #set_params2D(aligned_images[i],[0.0, sx,sy,0.,1.0])

        mpi_barrier(MPI_COMM_WORLD)
        tmp = params2d[:]
        tmp = wrap_mpi_gatherv(tmp, main_node, MPI_COMM_WORLD)
        if (myid == main_node):
            if options.skip_prealignment:
                print "========================================="
                print "Even though there is no alignment step, '%s' params are set to zero for later use." % os.path.join(
                    init2dir, "initial2Dparams.txt")
                print "========================================="
            write_text_row(tmp, os.path.join(init2dir, "initial2Dparams.txt"))
        del tmp
        mpi_barrier(MPI_COMM_WORLD)

        #  We assume the target image size will be target_nx, radius will be 29, and xr = 1.
        #  Note images can be also padded, in which case shrink_ratio > 1.
        shrink_ratio = float(target_radius) / float(radi)

        aligned_images = EMData.read_images(
            command_line_provided_stack_filename,
            range(image_start, image_end))
        nx = aligned_images[0].get_xsize()
        nima = len(aligned_images)
        newx = int(nx * shrink_ratio + 0.5)

        while not os.path.exists(os.path.join(init2dir,
                                              "initial2Dparams.txt")):
            import time
            time.sleep(1)
        mpi_barrier(MPI_COMM_WORLD)

        params = read_text_row(os.path.join(init2dir, "initial2Dparams.txt"))
        params = params[image_start:image_end]

        msk = model_circle(radi, nx, nx)
        for im in xrange(nima):
            st = Util.infomask(aligned_images[im], msk, False)
            aligned_images[im] -= st[0]
            if options.CTF:
                aligned_images[im] = filt_ctf(
                    aligned_images[im],
                    aligned_images[im].get_attr("ctf"),
                    binary=True)

        if (shrink_ratio < 1.0):
            if newx > target_nx:
                msk = model_circle(target_radius, target_nx, target_nx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    #aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    aligned_images[im] = resample(aligned_images[im],
                                                  shrink_ratio)
                    aligned_images[im] = Util.window(aligned_images[im],
                                                     target_nx, target_nx, 1)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
            elif newx == target_nx:
                msk = model_circle(target_radius, target_nx, target_nx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    aligned_images[im] = resample(aligned_images[im],
                                                  shrink_ratio)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
            elif newx < target_nx:
                msk = model_circle(newx // 2 - 2, newx, newx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    aligned_images[im] = resample(aligned_images[im],
                                                  shrink_ratio)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
                    aligned_images[im] = pad(aligned_images[im], target_nx,
                                             target_nx, 1, 0.0)
        elif (shrink_ratio == 1.0):
            if newx > target_nx:
                msk = model_circle(target_radius, target_nx, target_nx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    aligned_images[im] = Util.window(aligned_images[im],
                                                     target_nx, target_nx, 1)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
            elif newx == target_nx:
                msk = model_circle(target_radius, target_nx, target_nx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
            elif newx < target_nx:
                msk = model_circle(newx // 2 - 2, newx, newx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    #aligned_images[im]  = resample(aligned_images[im], shrink_ratio)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
                    aligned_images[im] = pad(aligned_images[im], target_nx,
                                             target_nx, 1, 0.0)
        elif (shrink_ratio > 1.0):
            if newx > target_nx:
                msk = model_circle(target_radius, target_nx, target_nx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    aligned_images[im] = resample(aligned_images[im],
                                                  shrink_ratio)
                    aligned_images[im] = Util.window(aligned_images[im],
                                                     target_nx, target_nx, 1)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
            elif newx == target_nx:
                msk = model_circle(target_radius, target_nx, target_nx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    aligned_images[im] = resample(aligned_images[im],
                                                  shrink_ratio)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
            elif newx < target_nx:
                msk = model_circle(newx // 2 - 2, newx, newx)
                for im in xrange(nima):
                    #  Here we should use only shifts
                    #alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                    #alpha, sx, sy, mirror = combine_params2(0, sx,sy, 0, -alpha, 0, 0, 0)
                    aligned_images[im] = rot_shift2D(aligned_images[im], 0,
                                                     params[im][1],
                                                     params[im][2], 0)
                    aligned_images[im] = resample(aligned_images[im],
                                                  shrink_ratio)
                    p = Util.infomask(aligned_images[im], msk, False)
                    aligned_images[im] -= p[0]
                    p = Util.infomask(aligned_images[im], msk, True)
                    aligned_images[im] /= p[1]
                    aligned_images[im] = pad(aligned_images[im], target_nx,
                                             target_nx, 1, 0.0)
        del msk

        gather_compacted_EMData_to_root(number_of_images_in_stack,
                                        aligned_images, myid)
        number_of_images_in_stack = bcast_number_to_all(
            number_of_images_in_stack, source_node=main_node)

        if (myid == main_node):
            for i in range(number_of_images_in_stack):
                aligned_images[i].write_image(
                    stack_processed_by_ali2d_base__filename, i)
            #  It has to be explicitly closed
            from EMAN2db import db_open_dict
            DB = db_open_dict(stack_processed_by_ali2d_base__filename)
            DB.close()

            fp = open(os.path.join(masterdir, "README_shrink_ratio.txt"), "w")
            output_text = """
			Since, for processing purposes, isac changes the image dimensions,
			adjustment of pixel size needs to be made in subsequent steps, (e.g.
			running sxviper.py). The shrink ratio for this particular isac run is
			--------
			%.5f
			%.5f
			--------
			To get the pixel size for the isac output the user needs to divide
			the original pixel size by the above value. This info is saved in
			the following file: README_shrink_ratio.txt
			""" % (shrink_ratio, radi)
            fp.write(output_text)
            fp.flush()
            fp.close()
            print output_text
            fp = open(
                os.path.join(init2dir, "Finished_initial_2d_alignment.txt"),
                "w")
            fp.flush()
            fp.close()
    else:
        if (myid == main_node):
            print "Skipping 2d alignment since it was already done!"

    mpi_barrier(MPI_COMM_WORLD)

    # from mpi import mpi_finalize
    # mpi_finalize()
    # sys.stdout.flush()
    # sys.exit()

    os.chdir(masterdir)

    if program_state_stack(locals(), getframeinfo(currentframe())):
        # if 1:
        pass
        if (myid == main_node):
            junk = cmdexecute(
                "sxheader.py  --consecutive  --params=originalid   %s" %
                stack_processed_by_ali2d_base__filename__without_master_dir)
            junk = cmdexecute(
                "e2bdb.py %s --makevstack=%s_000" %
                (stack_processed_by_ali2d_base__filename__without_master_dir,
                 stack_processed_by_ali2d_base__filename__without_master_dir))

    if (myid == main_node):
        main_dir_no = get_latest_directory_increment_value("./",
                                                           NAME_OF_MAIN_DIR,
                                                           myformat="%04d")
        print "isac_generation_from_command_line", isac_generation_from_command_line, main_dir_no
        if isac_generation_from_command_line < 0:
            if os.path.exists(NAME_OF_JSON_STATE_FILE):
                stored_stack, stored_state = restore_program_stack_and_state(
                    NAME_OF_JSON_STATE_FILE)
                if "isac_generation" in stored_state[-1]:
                    isac_generation_from_command_line = stored_state[-1][
                        "isac_generation"]
                else:
                    isac_generation_from_command_line = -1
        if isac_generation_from_command_line >= 0 and isac_generation_from_command_line <= main_dir_no:
            for i in xrange(isac_generation_from_command_line + 1,
                            main_dir_no + 1):
                if i == isac_generation_from_command_line + 1:
                    backup_dir_no = get_nonexistent_directory_increment_value(
                        "./", "000_backup", myformat="%05d", start_value=1)
                    junk = cmdexecute("mkdir -p " + "000_backup" +
                                      "%05d" % backup_dir_no)
                junk = cmdexecute("mv  " + NAME_OF_MAIN_DIR + "%04d" % i +
                                  " 000_backup" + "%05d" % backup_dir_no)
                junk = cmdexecute(
                    "rm  " + "EMAN2DB/" +
                    stack_processed_by_ali2d_base__filename__without_master_dir[
                        4:] + "_%03d.bdb" % i)

            # it includes both command line and json file
            my_restart_section = stored_state[-1]["location_in_program"].split(
                "___")[-1]
            if "restart" in my_restart_section:
                if "backup_dir_no" not in locals():
                    backup_dir_no = get_nonexistent_directory_increment_value(
                        "./", "000_backup", myformat="%05d", start_value=1)
                    junk = cmdexecute("mkdir -p " + "000_backup" +
                                      "%05d" % backup_dir_no)
                junk = cmdexecute("mv  " + NAME_OF_MAIN_DIR +
                                  "%04d" % isac_generation_from_command_line +
                                  " 000_backup" + "%05d" % backup_dir_no)
                junk = cmdexecute(
                    "rm  " + "EMAN2DB/" +
                    stack_processed_by_ali2d_base__filename__without_master_dir[
                        4:] + "_%03d.bdb" % isac_generation_from_command_line)
            elif "candidate_class_averages" in my_restart_section:
                if "backup_dir_no" not in locals():
                    backup_dir_no = get_nonexistent_directory_increment_value(
                        "./", "000_backup", myformat="%05d", start_value=1)
                    junk = cmdexecute("mkdir -p " + "000_backup" +
                                      "%05d" % backup_dir_no)
                junk = cmdexecute("mv  " + NAME_OF_MAIN_DIR +
                                  "%04d" % isac_generation_from_command_line +
                                  " 000_backup" + "%05d" % backup_dir_no)
                junk = cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR +
                                  "%04d" % isac_generation_from_command_line)
                # junk = cmdexecute("rm -f " + NAME_OF_MAIN_DIR + "%04d/class_averages_candidate*"%isac_generation_from_command_line)
            elif "reproducible_class_averages" in my_restart_section:
                junk = cmdexecute("rm -rf " + NAME_OF_MAIN_DIR +
                                  "%04d/ali_params_generation_*" %
                                  isac_generation_from_command_line)
                junk = cmdexecute("rm -f " + NAME_OF_MAIN_DIR +
                                  "%04d/class_averages_generation*" %
                                  isac_generation_from_command_line)
        else:
            if os.path.exists(NAME_OF_JSON_STATE_FILE):
                stored_stack, stored_state = restore_program_stack_and_state(
                    NAME_OF_JSON_STATE_FILE)
                if "isac_generation" in stored_state[-1]:
                    isac_generation_from_command_line = stored_state[-1][
                        "isac_generation"]
                else:
                    isac_generation_from_command_line = 1
            else:
                isac_generation_from_command_line = 1
    else:
        isac_generation_from_command_line = 0

    isac_generation_from_command_line = mpi_bcast(
        isac_generation_from_command_line, 1, MPI_INT, 0, MPI_COMM_WORLD)[0]
    isac_generation = isac_generation_from_command_line - 1

    if (myid == main_node):
        if isac_generation == 0:
            junk = cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR +
                              "%04d" % isac_generation)
            write_text_file(
                [1],
                os.path.join(NAME_OF_MAIN_DIR + "%04d" % isac_generation,
                             "generation_%d_accounted.txt" % isac_generation))
            write_text_file(
                range(number_of_images_in_stack),
                os.path.join(NAME_OF_MAIN_DIR + "%04d" % isac_generation,
                             "generation_%d_unaccounted.txt" %
                             isac_generation))

    #  Stopping criterion should be inside the program.
    while True:
        isac_generation += 1
        if isac_generation > options.n_generations:
            break

        data64_stack_current = "bdb:../" + stack_processed_by_ali2d_base__filename__without_master_dir[
            4:] + "_%03d" % isac_generation

        program_state_stack.restart_location_title = "restart"
        if program_state_stack(locals(), getframeinfo(currentframe())):
            if (myid == main_node):
                junk = cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR +
                                  "%04d" % isac_generation)
                # reference the original stack
                list_file = os.path.join(
                    NAME_OF_MAIN_DIR + "%04d" % (isac_generation - 1),
                    "generation_%d_unaccounted.txt" % (isac_generation - 1))
                junk = cmdexecute("e2bdb.py %s --makevstack=%s --list=%s"%(stack_processed_by_ali2d_base__filename__without_master_dir,\
                  stack_processed_by_ali2d_base__filename__without_master_dir + "_%03d"%isac_generation, list_file))
            mpi_barrier(MPI_COMM_WORLD)

        os.chdir(NAME_OF_MAIN_DIR + "%04d" % isac_generation)

        program_state_stack.restart_location_title = "candidate_class_averages"
        if program_state_stack(locals(), getframeinfo(currentframe())):

            iter_isac(data64_stack_current, options.ir, target_radius, options.rs, target_xr, yr, options.ts, options.maxit, False, 1.0,\
             options.dst, options.FL, options.FH, options.FF, options.init_iter, options.main_iter, options.iter_reali, options.match_first, \
             options.max_round, options.match_second, options.stab_ali, options.thld_err, options.indep_run, options.thld_grp, \
             options.img_per_grp, isac_generation, False, random_seed=options.rand_seed, new=False)#options.new)

        # program_state_stack.restart_location_title = "stopped_program1"
        # program_state_stack(locals(), getframeinfo(currentframe()))

        program_state_stack.restart_location_title = "stop_after_candidates"
        program_state_stack(locals(), getframeinfo(currentframe()))
        if stop_after_candidates:
            mpi_finalize()
            sys.exit()

        exit_program = 0
        if (myid == main_node):
            if not os.path.exists(
                    "class_averages_candidate_generation_%d.hdf" %
                    isac_generation):
                print "This generation (%d) no class average candidates were generated! Finishing." % isac_generation
                exit_program = 1
        exit_program = int(
            mpi_bcast(exit_program, 1, MPI_INT, 0, MPI_COMM_WORLD)[0])
        if exit_program:
            os.chdir("..")
            break

        program_state_stack.restart_location_title = "reproducible_class_averages"
        if program_state_stack(locals(), getframeinfo(currentframe())):


            iter_isac(data64_stack_current, options.ir, target_radius, options.rs, target_xr, yr, options.ts, options.maxit, False, 1.0,\
             options.dst, options.FL, options.FH, options.FF, options.init_iter, options.main_iter, options.iter_reali, options.match_first, \
             options.max_round, options.match_second, options.stab_ali, options.thld_err, options.indep_run, options.thld_grp, \
             options.img_per_grp, isac_generation, True, random_seed=options.rand_seed, new=False)#options.new)
            pass

        os.chdir("..")

        if (myid == main_node):
            accounted_images = read_text_file(
                os.path.join(NAME_OF_MAIN_DIR + "%04d" % (isac_generation),
                             "generation_%d_accounted.txt" %
                             (isac_generation)))
            number_of_accounted_images = len(accounted_images)
            un_accounted_images = read_text_file(
                os.path.join(
                    NAME_OF_MAIN_DIR + "%04d" % (isac_generation),
                    "generation_%d_unaccounted.txt" % (isac_generation)))
            number_of_un_accounted_images = len(un_accounted_images)
        else:
            number_of_accounted_images = 0
            number_of_un_accounted_images = 0

        number_of_accounted_images = int(
            mpi_bcast(number_of_accounted_images, 1, MPI_INT, 0,
                      MPI_COMM_WORLD)[0])
        number_of_un_accounted_images = int(
            mpi_bcast(number_of_un_accounted_images, 1, MPI_INT, 0,
                      MPI_COMM_WORLD)[0])

        if number_of_accounted_images == 0:
            if (myid == main_node):
                print "This generation (%d) there are no accounted images! Finishing." % isac_generation
            break

        while (myid == main_node):

            def files_are_missing(isac_generation):
                for i in xrange(1, isac_generation + 1):
                    if not os.path.exists(
                            "generation_%04d/class_averages_generation_%d.hdf"
                            % (i, i)):
                        print "Error: generation_%04d/class_averages_generation_%d.hdf is missing! Exiting." % (
                            i, i)
                        return 1
                return 0

            if files_are_missing(isac_generation):
                break

            junk = cmdexecute("rm -f class_averages.hdf")
            cpy([
                "generation_%04d/class_averages_generation_%d.hdf" % (i, i)
                for i in xrange(1, isac_generation + 1)
            ], "class_averages.hdf")

            break

        if number_of_un_accounted_images == 0:
            if (myid == main_node):
                print "This generation (%d) there are no un accounted images! Finishing." % isac_generation
            break

    program_state_stack(locals(),
                        getframeinfo(currentframe()),
                        last_call="__LastCall")

    mpi_barrier(MPI_COMM_WORLD)
    mpi_finalize()
示例#11
0
def main(args):
    progname = os.path.basename(sys.argv[0])
    usage = (
        progname +
        " stack_file  output_directory --radius=particle_radius --img_per_grp=img_per_grp --CTF --restart_section<The remaining parameters are optional --ir=ir --rs=rs --xr=xr --yr=yr --ts=ts --maxit=maxit --dst=dst --FL=FL --FH=FH --FF=FF --init_iter=init_iter --main_maxit=main_iter"
        +
        " --iter_reali=iter_reali --match_first=match_first --max_round=max_round --match_second=match_second --stab_ali=stab_ali --thld_err=thld_err --indep_run=indep_run --thld_grp=thld_grp"
        + "  --generation=generation  --rand_seed=rand_seed>")

    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--radius",
                      type="int",
                      default=-1,
                      help="<Particle radius>, it has to be provided.")
    parser.add_option(
        "--img_per_grp",
        type="int",
        default=100,
        help=
        "<number of images per group> in the ideal case (essentially maximum size of class) (100)"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="<CTF flag>, if set the data will be phase-flipped")
    parser.add_option(
        "--ir",
        type="int",
        default=1,
        help="<inner ring> of the resampling to polar coordinates (1)")
    parser.add_option(
        "--rs",
        type="int",
        default=1,
        help="<ring step> of the resampling to polar coordinates (1)")
    parser.add_option(
        "--xr",
        type="int",
        default=-1,
        help=
        "<x range> of translational search (By default set by the program) (advanced)"
    )
    parser.add_option(
        "--yr",
        type="int",
        default=-1,
        help="<y range> of translational search (same as xr) (advanced)")
    parser.add_option("--ts",
                      type="float",
                      default=1.0,
                      help="<search step> of translational search (1.0)")
    parser.add_option(
        "--maxit",
        type="int",
        default=30,
        help="number of iterations for reference-free alignment (30)")
    #parser.add_option("--snr",            type="float",        default=1.0,     help="signal-to-noise ratio (only meaningful when CTF is enabled, currently not supported)")
    parser.add_option(
        "--center_method",
        type="int",
        default=7,
        help=
        "<Method for centering> of global 2D average during initial prealignment of data (default : 7; 0 : no centering; -1 : average shift method; please see center_2D in utilities.py for methods 1-7)"
    )
    parser.add_option("--dst",
                      type="float",
                      default=90.0,
                      help="discrete angle used in within group alignment ")
    parser.add_option(
        "--FL",
        type="float",
        default=0.2,
        help="<lowest stopband> frequency used in the tangent filter (0.2)")
    parser.add_option(
        "--FH",
        type="float",
        default=0.3,
        help="<highest stopband> frequency used in the tangent filter (0.3)")
    parser.add_option("--FF",
                      type="float",
                      default=0.2,
                      help="<fall-off of the tangent> filter (0.2)")
    parser.add_option(
        "--init_iter",
        type="int",
        default=3,
        help=
        "<init_iter> number of iterations of ISAC program in initialization (3)"
    )
    parser.add_option(
        "--main_iter",
        type="int",
        default=3,
        help="<main_iter> number of iterations of ISAC program in main part (3)"
    )
    parser.add_option(
        "--iter_reali",
        type="int",
        default=1,
        help=
        "<iter_reali> number of iterations in ISAC before checking stability (1)"
    )
    parser.add_option(
        "--match_first",
        type="int",
        default=1,
        help="number of iterations to run 2-way matching in the first phase (1)"
    )
    parser.add_option(
        "--max_round",
        type="int",
        default=20,
        help=
        "maximum rounds of generating candidate averages in the first phase (20)"
    )
    parser.add_option(
        "--match_second",
        type="int",
        default=5,
        help=
        "number of iterations to run 2-way (or 3-way) matching in the second phase (5)"
    )
    parser.add_option("--stab_ali",
                      type="int",
                      default=5,
                      help="number of alignments when checking stability (5)")
    parser.add_option(
        "--thld_err",
        type="float",
        default=0.7,
        help="the threshold of pixel error when checking stability (0.7)")
    parser.add_option(
        "--indep_run",
        type="int",
        default=4,
        help=
        "number of independent runs for reproducibility (default=4, only values 2, 3 and 4 are supported (4)"
    )
    parser.add_option("--thld_grp",
                      type="int",
                      default=10,
                      help="minimum size of class (10)")
    parser.add_option(
        "--n_generations",
        type="int",
        default=100,
        help=
        "<n_generations> program stops when reaching this total number of generations (advanced)"
    )
    #parser.add_option("--candidatesexist",action="store_true", default=False,   help="Candidate class averages exist use them (default False)")
    parser.add_option(
        "--rand_seed",
        type="int",
        default=None,
        help=
        "random seed set before calculations, useful for testing purposes (default None - total randomness)"
    )
    parser.add_option("--new",
                      action="store_true",
                      default=False,
                      help="use new code (default = False)")
    parser.add_option("--debug",
                      action="store_true",
                      default=False,
                      help="debug info printout (default = False)")

    # must be switched off in production
    parser.add_option("--use_latest_master_directory",
                      action="store_true",
                      dest="use_latest_master_directory",
                      default=False)

    parser.add_option(
        "--restart_section",
        type="string",
        default="",
        help=
        "<restart section name> (no spaces) followed immediately by comma, followed immediately by generation to restart, example: \n--restart_section=candidate_class_averages,1         (Sections: restart, candidate_class_averages, reproducible_class_averages)"
    )
    parser.add_option(
        "--stop_after_candidates",
        action="store_true",
        default=False,
        help=
        "<stop_after_candidates> stops after the 'candidate_class_averages' section"
    )

    parser.add_option("--return_options",
                      action="store_true",
                      dest="return_options",
                      default=False,
                      help=SUPPRESS_HELP)

    (options, args) = parser.parse_args(args)

    if options.return_options:
        return parser

    if len(args) > 2:
        print "usage: " + usage
        print "Please run '" + progname + " -h' for detailed options"
        sys.exit()

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    from isac import iter_isac
    global_def.BATCH = True

    global_def.BATCH = True

    command_line_provided_stack_filename = args[0]
    global_def.BATCH = True

    main_node = 0
    mpi_init(0, [])
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    nproc = mpi_comm_size(MPI_COMM_WORLD)

    radi = options.radius
    center_method = options.center_method
    if (radi < 1):
        ERROR("Particle radius has to be provided!", "sxisac", 1, myid)

    use_latest_master_directory = options.use_latest_master_directory
    stop_after_candidates = options.stop_after_candidates
    program_state_stack.restart_location_title_from_command_line = options.restart_section

    from utilities import qw
    program_state_stack.PROGRAM_STATE_VARIABLES = set(
        qw("""
		isac_generation
	"""))

    # create or reuse master directory
    masterdir = ""
    stack_processed_by_ali2d_base__filename = ""
    stack_processed_by_ali2d_base__filename__without_master_dir = ""
    error_status = 0
    if len(args) == 2:
        masterdir = args[1]
    elif len(args) == 1:
        if use_latest_master_directory:
            all_dirs = [d for d in os.listdir(".") if os.path.isdir(d)]
            import re
            r = re.compile("^master.*$")
            all_dirs = filter(r.match, all_dirs)
            if len(all_dirs) > 0:
                # all_dirs = max(all_dirs, key=os.path.getctime)
                masterdir = max(all_dirs, key=os.path.getmtime)

    #Create folder for all results or check if there is one created already
    if (myid == main_node):
        if (masterdir == ""):
            timestring = strftime("%Y_%m_%d__%H_%M_%S" + DIR_DELIM,
                                  localtime())
            masterdir = "master" + timestring
            cmd = "{} {}".format("mkdir", masterdir)
            cmdexecute(cmd)
        elif not os.path.exists(masterdir):
            # os.path.exists(masterdir) does not exist
            masterdir = args[1]
            cmd = "{} {}".format("mkdir", masterdir)
            cmdexecute(cmd)

        if (args[0][:4] == "bdb:"): filename = args[0][4:]
        else: filename = args[0][:-4]
        filename = os.path.basename(filename)
        stack_processed_by_ali2d_base__filename = "bdb:" + os.path.join(
            masterdir, filename)
        stack_processed_by_ali2d_base__filename__without_master_dir = "bdb:" + filename
    if_error_all_processes_quit_program(error_status)

    # send masterdir to all processes
    masterdir = send_string_to_all(masterdir)

    if myid == 0:
        if options.restart_section != "":
            if os.path.exists(os.path.join(masterdir,
                                           NAME_OF_JSON_STATE_FILE)):
                stored_stack, stored_state = restore_program_stack_and_state(
                    os.path.join(masterdir, NAME_OF_JSON_STATE_FILE))
                import re
                if "," in options.restart_section:
                    parsed_restart_section_option = options.restart_section.split(
                        ",")
                    stored_state[-1]["location_in_program"] = re.sub(
                        r"___.*$", "___%s" % parsed_restart_section_option[0],
                        stored_state[-1]["location_in_program"])
                    generation_str_format = parsed_restart_section_option[1]
                    if generation_str_format != "":
                        isac_generation_from_command_line = int(
                            generation_str_format)
                        stored_state[-1][
                            "isac_generation"] = isac_generation_from_command_line
                    else:
                        isac_generation_from_command_line = 1
                        if "isac_generation" in stored_state[-1]:
                            del stored_state[-1]["isac_generation"]
                else:
                    isac_generation_from_command_line = -1
                    stored_state[-1]["location_in_program"] = re.sub(
                        r"___.*$", "___%s" % options.restart_section,
                        stored_state[-1]["location_in_program"])
                    if "isac_generation" in stored_state[-1]:
                        del stored_state[-1]["isac_generation"]
                store_program_state(
                    os.path.join(masterdir, NAME_OF_JSON_STATE_FILE),
                    stored_state, stored_stack)
            else:
                print "Please remove the restart_section option from the command line. The program must be started from the beginning."
                mpi_finalize()
                sys.exit()
        else:
            isac_generation_from_command_line = -1

    program_state_stack(locals(), getframeinfo(currentframe()),
                        os.path.join(masterdir, NAME_OF_JSON_STATE_FILE))

    stack_processed_by_ali2d_base__filename = send_string_to_all(
        stack_processed_by_ali2d_base__filename)
    stack_processed_by_ali2d_base__filename__without_master_dir = \
     send_string_to_all(stack_processed_by_ali2d_base__filename__without_master_dir)

    #  PARAMETERS OF THE PROCEDURE
    if (options.xr == -1):
        #  Default values
        target_nx = 76
        target_radius = 29
        target_xr = 1
    else:  #  nx//2
        #  Check below!
        target_xr = options.xr
        target_nx = 76 + target_xr - 1  # subtract one, which is default
        target_radius = 29

    mpi_barrier(MPI_COMM_WORLD)

    # Initialization of stacks
    if (myid == main_node):
        number_of_images_in_stack = EMUtil.get_image_count(
            command_line_provided_stack_filename)
    else:
        number_of_images_in_stack = 0

    number_of_images_in_stack = bcast_number_to_all(number_of_images_in_stack,
                                                    source_node=main_node)

    nxrsteps = 4

    init2dir = os.path.join(masterdir, "2dalignment")

    if (myid == 0):
        import subprocess
        from logger import Logger, BaseLogger_Files
        #  Create output directory
        log2d = Logger(BaseLogger_Files())
        log2d.prefix = os.path.join(init2dir)
        cmd = "mkdir -p " + log2d.prefix
        outcome = subprocess.call(cmd, shell=True)
        log2d.prefix += "/"
        # outcome = subprocess.call("sxheader.py  "+command_line_provided_stack_filename+"   --params=xform.align2d  --zero", shell=True)
    else:
        outcome = 0
        log2d = None

    if (myid == main_node):
        a = get_im(command_line_provided_stack_filename)
        nnxo = a.get_xsize()
    else:
        nnxo = 0
    nnxo = bcast_number_to_all(nnxo, source_node=main_node)

    txrm = (nnxo - 2 * (radi + 1)) // 2
    if (txrm < 0):
        ERROR(
            "ERROR!!   Radius of the structure larger than the window data size permits   %d"
            % (radi), "sxisac", 1, myid)
    if (txrm / nxrsteps > 0):
        tss = ""
        txr = ""
        while (txrm / nxrsteps > 0):
            tts = txrm / nxrsteps
            tss += "  %d" % tts
            txr += "  %d" % (tts * nxrsteps)
            txrm = txrm // 2
    else:
        tss = "1"
        txr = "%d" % txrm

    # section ali2d_base

    #  centering method is set to #7
    params2d, aligned_images = ali2d_base(command_line_provided_stack_filename, init2dir, None, 1, radi, 1, txr, txr, tss, \
       False, 90.0, center_method, 14, options.CTF, 1.0, False, \
       "ref_ali2d", "", log2d, nproc, myid, main_node, MPI_COMM_WORLD, write_headers = False)

    if (myid == main_node):
        write_text_row(params2d, os.path.join(init2dir, "initial2Dparams.txt"))
    del params2d
    mpi_barrier(MPI_COMM_WORLD)

    #  We assume the target image size will be target_nx, radius will be 29, and xr = 1.
    #  Note images can be also padded, in which case shrink_ratio > 1.
    shrink_ratio = float(target_radius) / float(radi)
    nx = aligned_images[0].get_xsize()
    nima = len(aligned_images)
    newx = int(nx * shrink_ratio + 0.5)

    from fundamentals import rot_shift2D, resample
    from utilities import pad, combine_params2
    if (shrink_ratio < 1.0):
        if newx > target_nx:
            msk = model_circle(target_radius, target_nx, target_nx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                aligned_images[im] = resample(aligned_images[im], shrink_ratio)
                aligned_images[im] = Util.window(aligned_images[im], target_nx,
                                                 target_nx, 1)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
        elif newx == target_nx:
            msk = model_circle(target_radius, target_nx, target_nx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                aligned_images[im] = resample(aligned_images[im], shrink_ratio)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
        elif newx < target_nx:
            msk = model_circle(nx // 2 - 2, newx, newx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                aligned_images[im] = resample(aligned_images[im], shrink_ratio)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
                aligned_images[im] = pad(aligned_images[im], target_nx,
                                         target_nx, 1, 0.0)
    elif (shrink_ratio == 1.0):
        if newx > target_nx:
            msk = model_circle(target_radius, target_nx, target_nx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                aligned_images[im] = Util.window(aligned_images[im], target_nx,
                                                 target_nx, 1)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
        elif newx == target_nx:
            msk = model_circle(target_radius, target_nx, target_nx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
        elif newx < target_nx:
            msk = model_circle(nx // 2 - 2, newx, newx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                aligned_images[im] = resample(aligned_images[im], shrink_ratio)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
                aligned_images[im] = pad(aligned_images[im], target_nx,
                                         target_nx, 1, 0.0)
    elif (shrink_ratio > 1.0):
        if newx > target_nx:
            msk = model_circle(target_radius, target_nx, target_nx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                aligned_images[im] = Util.window(aligned_images[im], target_nx,
                                                 target_nx, 1)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
        elif newx == target_nx:
            msk = model_circle(target_radius, target_nx, target_nx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
        elif newx < target_nx:
            msk = model_circle(target_radius, nx, nx)
            for im in xrange(nima):
                #  Here we should use only shifts
                alpha, sx, sy, mirror, scale = get_params2D(aligned_images[im])
                alpha, sx, sy, mirror = combine_params2(
                    0, sx, sy, 0, -alpha, 0, 0, 0)
                aligned_images[im] = rot_shift2D(aligned_images[im], 0, sx, sy,
                                                 0)
                p = Util.infomask(aligned_images[im], msk, False)
                aligned_images[im] -= p[0]
                p = Util.infomask(aligned_images[im], msk, True)
                aligned_images[im] /= p[1]
                aligned_images[im] = pad(aligned_images[im], target_nx,
                                         target_nx, 1, 0.0)
    del msk

    gather_compacted_EMData_to_root(number_of_images_in_stack, aligned_images,
                                    myid)
    number_of_images_in_stack = bcast_number_to_all(number_of_images_in_stack,
                                                    source_node=main_node)

    if (myid == main_node):
        for i in range(number_of_images_in_stack):
            aligned_images[i].write_image(
                stack_processed_by_ali2d_base__filename, i)
        #  It has to be explicitly closed
        from EMAN2db import db_open_dict
        DB = db_open_dict(stack_processed_by_ali2d_base__filename)
        DB.close()

    mpi_barrier(MPI_COMM_WORLD)

    global_def.BATCH = True

    os.chdir(masterdir)

    if program_state_stack(locals(), getframeinfo(currentframe())):
        # if 1:
        pass
        if (myid == main_node):
            cmdexecute(
                "sxheader.py  --consecutive  --params=originalid   %s" %
                stack_processed_by_ali2d_base__filename__without_master_dir)
            cmdexecute(
                "e2bdb.py %s --makevstack=%s_000" %
                (stack_processed_by_ali2d_base__filename__without_master_dir,
                 stack_processed_by_ali2d_base__filename__without_master_dir))

    if (myid == main_node):
        main_dir_no = get_latest_directory_increment_value("./",
                                                           NAME_OF_MAIN_DIR,
                                                           myformat="%04d")
        print "isac_generation_from_command_line", isac_generation_from_command_line, main_dir_no
        if isac_generation_from_command_line < 0:
            if os.path.exists(NAME_OF_JSON_STATE_FILE):
                stored_stack, stored_state = restore_program_stack_and_state(
                    NAME_OF_JSON_STATE_FILE)
                if "isac_generation" in stored_state[-1]:
                    isac_generation_from_command_line = stored_state[-1][
                        "isac_generation"]
                else:
                    isac_generation_from_command_line = -1
        if isac_generation_from_command_line >= 0 and isac_generation_from_command_line <= main_dir_no:
            for i in xrange(isac_generation_from_command_line + 1,
                            main_dir_no + 1):
                if i == isac_generation_from_command_line + 1:
                    backup_dir_no = get_nonexistent_directory_increment_value(
                        "./", "000_backup", myformat="%05d", start_value=1)
                    cmdexecute("mkdir -p " + "000_backup" +
                               "%05d" % backup_dir_no)
                cmdexecute("mv  " + NAME_OF_MAIN_DIR + "%04d" % i +
                           " 000_backup" + "%05d" % backup_dir_no)
                cmdexecute(
                    "rm  " + "EMAN2DB/" +
                    stack_processed_by_ali2d_base__filename__without_master_dir[
                        4:] + "_%03d.bdb" % i)

            # it includes both command line and json file
            my_restart_section = stored_state[-1]["location_in_program"].split(
                "___")[-1]
            if "restart" in my_restart_section:
                if "backup_dir_no" not in locals():
                    backup_dir_no = get_nonexistent_directory_increment_value(
                        "./", "000_backup", myformat="%05d", start_value=1)
                    cmdexecute("mkdir -p " + "000_backup" +
                               "%05d" % backup_dir_no)
                cmdexecute("mv  " + NAME_OF_MAIN_DIR +
                           "%04d" % isac_generation_from_command_line +
                           " 000_backup" + "%05d" % backup_dir_no)
                cmdexecute(
                    "rm  " + "EMAN2DB/" +
                    stack_processed_by_ali2d_base__filename__without_master_dir[
                        4:] + "_%03d.bdb" % isac_generation_from_command_line)
            elif "candidate_class_averages" in my_restart_section:
                if "backup_dir_no" not in locals():
                    backup_dir_no = get_nonexistent_directory_increment_value(
                        "./", "000_backup", myformat="%05d", start_value=1)
                    cmdexecute("mkdir -p " + "000_backup" +
                               "%05d" % backup_dir_no)
                cmdexecute("mv  " + NAME_OF_MAIN_DIR +
                           "%04d" % isac_generation_from_command_line +
                           " 000_backup" + "%05d" % backup_dir_no)
                cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR +
                           "%04d" % isac_generation_from_command_line)
                # cmdexecute("rm -f " + NAME_OF_MAIN_DIR + "%04d/class_averages_candidate*"%isac_generation_from_command_line)
            elif "reproducible_class_averages" in my_restart_section:
                cmdexecute("rm -rf " + NAME_OF_MAIN_DIR +
                           "%04d/ali_params_generation_*" %
                           isac_generation_from_command_line)
                cmdexecute("rm -f " + NAME_OF_MAIN_DIR +
                           "%04d/class_averages_generation*" %
                           isac_generation_from_command_line)
        else:
            if os.path.exists(NAME_OF_JSON_STATE_FILE):
                stored_stack, stored_state = restore_program_stack_and_state(
                    NAME_OF_JSON_STATE_FILE)
                if "isac_generation" in stored_state[-1]:
                    isac_generation_from_command_line = stored_state[-1][
                        "isac_generation"]
                else:
                    isac_generation_from_command_line = 1
            else:
                isac_generation_from_command_line = 1
    else:
        isac_generation_from_command_line = 0

    isac_generation_from_command_line = mpi_bcast(
        isac_generation_from_command_line, 1, MPI_INT, 0, MPI_COMM_WORLD)[0]
    isac_generation = isac_generation_from_command_line - 1

    if (myid == main_node):
        if isac_generation == 0:
            cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR +
                       "%04d" % isac_generation)
            write_text_file(
                [1],
                os.path.join(NAME_OF_MAIN_DIR + "%04d" % isac_generation,
                             "generation_%d_accounted.txt" % isac_generation))
            write_text_file(
                range(number_of_images_in_stack),
                os.path.join(NAME_OF_MAIN_DIR + "%04d" % isac_generation,
                             "generation_%d_unaccounted.txt" %
                             isac_generation))

    #  Stopping criterion should be inside the program.
    while True:
        isac_generation += 1
        if isac_generation > options.n_generations:
            break

        data64_stack_current = "bdb:../" + stack_processed_by_ali2d_base__filename__without_master_dir[
            4:] + "_%03d" % isac_generation

        if (myid == main_node):
            accounted_images = read_text_file(
                os.path.join(
                    NAME_OF_MAIN_DIR + "%04d" % (isac_generation - 1),
                    "generation_%d_accounted.txt" % (isac_generation - 1)))
            number_of_accounted_images = len(accounted_images)
            # unaccounted_images = read_text_file(os.path.join(NAME_OF_MAIN_DIR + "%04d"%(isac_generation - 1),"generation_%d_unaccounted.txt"%(isac_generation - 1)))
            # number_of_unaccounted_images = len(unaccounted_images)
        else:
            number_of_accounted_images = 0

        number_of_accounted_images = int(
            mpi_bcast(number_of_accounted_images, 1, MPI_INT, 0,
                      MPI_COMM_WORLD)[0])
        if number_of_accounted_images == 0:
            os.chdir("..")
            break

        program_state_stack.restart_location_title = "restart"
        if program_state_stack(locals(), getframeinfo(currentframe())):
            if (myid == main_node):
                cmdexecute("mkdir -p " + NAME_OF_MAIN_DIR +
                           "%04d" % isac_generation)
                # reference the original stack
                list_file = os.path.join(
                    NAME_OF_MAIN_DIR + "%04d" % (isac_generation - 1),
                    "generation_%d_unaccounted.txt" % (isac_generation - 1))
                cmdexecute("e2bdb.py %s --makevstack=%s --list=%s"%(stack_processed_by_ali2d_base__filename__without_master_dir,\
                  stack_processed_by_ali2d_base__filename__without_master_dir + "_%03d"%isac_generation, list_file))
            mpi_barrier(MPI_COMM_WORLD)

        os.chdir(NAME_OF_MAIN_DIR + "%04d" % isac_generation)

        program_state_stack.restart_location_title = "candidate_class_averages"
        if program_state_stack(locals(), getframeinfo(currentframe())):

            iter_isac(data64_stack_current, options.ir, target_radius, options.rs, target_xr, target_xr, options.ts, options.maxit, False, 1.0,\
             options.dst, options.FL, options.FH, options.FF, options.init_iter, options.main_iter, options.iter_reali, options.match_first, \
             options.max_round, options.match_second, options.stab_ali, options.thld_err, options.indep_run, options.thld_grp, \
             options.img_per_grp, isac_generation, False, random_seed=options.rand_seed, new=False)#options.new)

        # program_state_stack.restart_location_title = "stopped_program1"
        # program_state_stack(locals(), getframeinfo(currentframe()))

        program_state_stack.restart_location_title = "stop_after_candidates"
        program_state_stack(locals(), getframeinfo(currentframe()))
        if stop_after_candidates:
            mpi_finalize()
            sys.exit()

        exit_program = 0
        if (myid == main_node):
            if not os.path.exists(
                    "class_averages_candidate_generation_%d.hdf" %
                    isac_generation):
                print "This generation (%d) no class averages were generated!" % isac_generation
                exit_program = 1
        exit_program = int(
            mpi_bcast(exit_program, 1, MPI_INT, 0, MPI_COMM_WORLD)[0])
        if exit_program:
            os.chdir("..")
            break

        program_state_stack.restart_location_title = "reproducible_class_averages"
        if program_state_stack(locals(), getframeinfo(currentframe())):


            iter_isac(data64_stack_current, options.ir, target_radius, options.rs, target_xr, target_xr, options.ts, options.maxit, False, 1.0,\
             options.dst, options.FL, options.FH, options.FF, options.init_iter, options.main_iter, options.iter_reali, options.match_first, \
             options.max_round, options.match_second, options.stab_ali, options.thld_err, options.indep_run, options.thld_grp, \
             options.img_per_grp, isac_generation, True, random_seed=options.rand_seed, new=False)#options.new)
            pass

        os.chdir("..")

        if (myid == main_node):
            cmdexecute("rm -f class_averages.hdf")
            cpy([
                "generation_%04d/class_averages_generation_%d.hdf" % (i, i)
                for i in xrange(1, isac_generation)
            ], "class_averages.hdf")

        # program_state_stack.restart_location_title = "stopped_program2"
        # program_state_stack(locals(), getframeinfo(currentframe()))

    program_state_stack(locals(),
                        getframeinfo(currentframe()),
                        last_call="__LastCall")

    mpi_finalize()
示例#12
0
def align_diff_params(ali_params1, ali_params2):
	'''
	This function determines the relative angle, shifts and mirrorness between
	two sets of alignment parameters.	
	'''
	from math import cos, sin, pi, atan
	from utilities import combine_params2
	
	nima = len(ali_params1)
	nima2 = len(ali_params2)
	if nima2 != nima:
		print "Error: Number of images do not agree!"
		return 0.0, 0.0, 0.0, 0
	else:
		nima/=4	
		del nima2

	# Read the alignment parameters and determine the relative mirrorness
	mirror_same = 0
	for i in xrange(nima):
		if ali_params1[i*4+3] == ali_params2[i*4+3]: mirror_same += 1
	if mirror_same > nima/2:
		mirror = 0
	else:
		mirror_same = nima-mirror_same
		mirror = 1

	# Determine the relative angle
	cosi = 0.0
	sini = 0.0
	angle1 = []
	angle2 = []
	for i in xrange(nima):
		mirror1 = ali_params1[i*4+3]
		mirror2 = ali_params2[i*4+3]
		if abs(mirror1-mirror2) == mirror: 
			alpha1 = ali_params1[i*4]
			alpha2 = ali_params2[i*4]
			if mirror1 == 1:
				alpha1 = -alpha1
				alpha2 = -alpha2
			angle1.append(alpha1)
			angle2.append(alpha2)
	alphai = angle_diff(angle1, angle2)

	# Determine the relative shift
	sxi = 0.0
	syi = 0.0
	for i in xrange(nima):
		mirror1 = ali_params1[i*4+3]
		mirror2 = ali_params2[i*4+3]
		if abs(mirror1-mirror2) == mirror: 
			alpha1 = ali_params1[i*4]
			#alpha2 = ali_params2[i*4]
			sx1 = ali_params1[i*4+1]
			sx2 = ali_params2[i*4+1]
			sy1 = ali_params1[i*4+2]
			sy2 = ali_params2[i*4+2]
			alpha12, sx12, sy12, mirror12 = combine_params2(alpha1, sx1, sy1, int(mirror1), alphai, 0.0, 0.0, 0)
			if mirror1 == 0: sxi += sx2-sx12
			else: sxi -= sx2-sx12
			syi += sy2-sy12

	sxi /= mirror_same
	syi /= mirror_same

	return alphai, sxi, syi, mirror