示例#1
0
文件: sxrviper.py 项目: raj347/eman2
def calculate_list_of_independent_viper_run_indices_used_for_outlier_elimination(no_of_viper_runs_analyzed_together, 
	no_of_viper_runs_analyzed_together_from_user_options, masterdir, rviper_iter, criterion_name):

	from utilities import combinations_of_n_taken_by_k

	# generate all possible combinations of (no_of_viper_runs_analyzed_together - 1) taken (3 - 1) at a time
	import itertools

	number_of_additional_combinations_for_this_viper_iteration = combinations_of_n_taken_by_k(no_of_viper_runs_analyzed_together - 1,
																		  no_of_viper_runs_analyzed_together_from_user_options - 1)

	criterion_measure = [0.0] * number_of_additional_combinations_for_this_viper_iteration
	all_n_minus_1_combinations_taken_k_minus_1_at_a_time = list(itertools.combinations(range(no_of_viper_runs_analyzed_together - 1),
																  no_of_viper_runs_analyzed_together_from_user_options - 1))

	no_of_processors = mpi_comm_size(MPI_COMM_WORLD)
	my_rank = mpi_comm_rank(MPI_COMM_WORLD)

	for idx, tuple_of_projection_indices in enumerate(all_n_minus_1_combinations_taken_k_minus_1_at_a_time):
		if (my_rank == idx % no_of_processors):
			list_of_viper_run_indices = list(tuple_of_projection_indices) + [no_of_viper_runs_analyzed_together - 1]
			criterion_measure[idx] = measure_for_outlier_criterion(criterion_name, masterdir, rviper_iter, list_of_viper_run_indices)
			plot_errors_between_any_number_of_projections(masterdir, rviper_iter, list_of_viper_run_indices, criterion_measure[idx])

	criterion_measure = mpi_reduce(criterion_measure, number_of_additional_combinations_for_this_viper_iteration, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD)

	if (my_rank == 0):
		index_of_sorted_criterion_measure_list = [i[0] for i in sorted(enumerate(criterion_measure), reverse=False, key=lambda x: x[1])]

		list_of_viper_run_indices_for_the_current_rrr_viper_iteration = list(all_n_minus_1_combinations_taken_k_minus_1_at_a_time[index_of_sorted_criterion_measure_list[0]]) + \
																		[no_of_viper_runs_analyzed_together - 1]

		mainoutputdir = masterdir + DIR_DELIM + NAME_OF_MAIN_DIR + ("%03d" + DIR_DELIM) % (rviper_iter)

		if criterion_measure[index_of_sorted_criterion_measure_list[0]] == TRIPLET_WITH_ANGLE_ERROR_LESS_THAN_THRESHOLD_HAS_BEEN_FOUND:
			list_of_viper_run_indices_for_the_current_rrr_viper_iteration.insert(0,MUST_END_PROGRAM_THIS_ITERATION)
		else:
			list_of_viper_run_indices_for_the_current_rrr_viper_iteration.insert(0,DUMMY_INDEX_USED_AS_BUFFER)
			if criterion_name == "80th percentile":
				pass_criterion = criterion_measure[index_of_sorted_criterion_measure_list[0]] < PERCENT_THRESHOLD_Y
			elif criterion_name == "fastest increase in the last quartile":
				pass_criterion = criterion_measure[index_of_sorted_criterion_measure_list[-1]] > PERCENT_THRESHOLD_Y
			else:
				pass_criterion = False
	
			if not pass_criterion:
				list_of_viper_run_indices_for_the_current_rrr_viper_iteration = [EMPTY_VIPER_RUN_INDICES_LIST]

		import json; f = open(mainoutputdir + "list_of_viper_runs_included_in_outlier_elimination.json", 'w')
		json.dump(list_of_viper_run_indices_for_the_current_rrr_viper_iteration[1:],f); f.close()

		mpi_barrier(MPI_COMM_WORLD)
		return list_of_viper_run_indices_for_the_current_rrr_viper_iteration

	mpi_barrier(MPI_COMM_WORLD)

	return [EMPTY_VIPER_RUN_INDICES_LIST]
示例#2
0
def comp_rep(refrings, data, itout, modout, vol, group, nima, nx, myid,
             main_node, outdir):
    import os
    from fundamentals import rot_shift2D
    from utilities import get_params_proj, params_3D_2D
    from mpi import mpi_reduce, MPI_COMM_WORLD, MPI_FLOAT, MPI_SUM
    avg = [EMData() for i in xrange(len(refrings))]
    avg_csum = [0.0 for i in xrange(len(refrings))]
    for i in xrange(len(refrings)):
        avg[i] = EMData()
        avg[i].set_size(nx, nx)
        phi = refrings[i].get_attr("phi")
        theta = refrings[i].get_attr("theta")
        t = Transform({
            "type": "spider",
            "phi": phi,
            "theta": theta,
            "psi": 0.0
        })
        avg[i].set_attr("xform.projection", t)

    for im in xrange(nima):
        iref = data[im].get_attr("assign")
        gim = data[im].get_attr("group")
        if gim == group:
            [phi, theta, psi, s2x, s2y] = get_params_proj(data[im])
            [alpha, sx, sy, mirror] = params_3D_2D(phi, theta, psi, s2x, s2y)
            temp = rot_shift2D(data[im], alpha, sx, sy, mirror, 1.0)
            avg[iref] = avg[iref] + temp
            avg_csum[iref] = avg_csum[iref] + 1
        from utilities import reduce_EMData_to_root
    for i in xrange(len(refrings)):
        reduce_EMData_to_root(avg[i], myid, main_node)
        avg_sum = mpi_reduce(avg_csum[i], 1, MPI_FLOAT, MPI_SUM, 0,
                             MPI_COMM_WORLD)
        outfile_repro = os.path.join(outdir,
                                     "repro_%s%s.hdf" % (itout, modout))
        if myid == 0:
            outfile = os.path.join(outdir,
                                   "compare_repro_%s%s.hdf" % (itout, modout))
            avg[i].write_image(outfile, -1)
            t = avg[i].get_attr("xform.projection")
            proj = vol.project("pawel", t)
            proj.set_attr("xform.projection", t)
            proj.set_attr("Raw_im_count", float(avg_sum))
            proj.write_image(outfile, -1)
            proj.write_image(outfile_repro, -1)
    return outfile_repro
示例#3
0
def comp_rep(refrings, data, itout, modout, vol, group, nima, nx, myid, main_node, outdir):
	import os
	from fundamentals import rot_shift2D
	from utilities    import get_params_proj, params_3D_2D
	from mpi import mpi_reduce, MPI_COMM_WORLD, MPI_FLOAT, MPI_SUM	
	avg = [EMData() for i in xrange(len(refrings))]
	avg_csum = [0.0 for i in xrange(len(refrings))]
	for i in xrange(len(refrings)):
		avg[i] = EMData()
		avg[i].set_size(nx,nx)
		phi   = refrings[i].get_attr("phi")
		theta = refrings[i].get_attr("theta")
		t = Transform({"type":"spider","phi":phi,"theta":theta,"psi":0.0})
		avg[i].set_attr("xform.projection",t)

	for im in xrange(nima):
		iref = data[im].get_attr("assign")
		gim = data[im].get_attr("group")
		if gim == group:
			[phi, theta, psi, s2x, s2y] = get_params_proj(data[im])
			[alpha, sx,sy,mirror] = params_3D_2D(phi,theta,psi,s2x,s2y)
			temp = rot_shift2D(data[im],alpha, sx, sy, mirror, 1.0)
			avg[iref] = avg[iref] + temp
			avg_csum[iref] = avg_csum[iref] + 1
		from utilities import reduce_EMData_to_root
	for i in xrange(len(refrings)):
	    	reduce_EMData_to_root(avg[i], myid, main_node)
		avg_sum = mpi_reduce(avg_csum[i],1,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD)	
		outfile_repro = os.path.join(outdir, "repro_%s%s.hdf"%(itout,modout))
		if myid ==0:
		     	outfile = os.path.join(outdir, "compare_repro_%s%s.hdf"%(itout,modout))
			avg[i].write_image(outfile,-1)
			t = avg[i].get_attr("xform.projection")
			proj = vol.project("pawel",t)
			proj.set_attr("xform.projection",t)
			proj.set_attr("Raw_im_count", float(avg_sum))
			proj.write_image(outfile,-1)
			proj.write_image(outfile_repro,-1)
	return outfile_repro
示例#4
0
def cml_find_structure2(Prj, Ori, Rot, outdir, outname, maxit, first_zero, flag_weights, myid, main_node, number_of_proc):
	from projection import cml_export_progress, cml_disc, cml_export_txtagls
	import time, sys
	from random import shuffle,random

	from mpi import MPI_FLOAT, MPI_INT, MPI_SUM, MPI_COMM_WORLD
	from mpi import mpi_reduce, mpi_bcast, mpi_barrier

	# global vars
	global g_i_prj, g_n_prj, g_n_anglst, g_anglst, g_d_psi, g_debug, g_n_lines, g_seq

	# list of free orientation
	ocp = [-1] * g_n_anglst

	if first_zero:
		listprj = range(1, g_n_prj)
		ocp[0]  = 0 
	else:   listprj = range(g_n_prj)

	# to stop when the solution oscillates
	period_disc = [0, 0, 0]
	period_ct   = 0
	period_th   = 2
	#if not flag_weights:   weights = [1.0] * g_n_lines

	# iteration loop
	for ite in xrange(maxit):
		#print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>    ite = ", ite, "   myid = ", myid
		t_start = time.time()

		# loop over i prj
		change = False
		tlistprj = listprj[:]
		shuffle(tlistprj)
		nnn = len(tlistprj)
		tlistprj = mpi_bcast(tlistprj, nnn, MPI_INT, main_node, MPI_COMM_WORLD)
		tlistprj = map(int, tlistprj)
		"""
		if(ite>1 and ite%5 == 0  and ite<140):
			if(myid == main_node):
				for i in xrange(0,len(tlistprj),5):
					ind          = 4*i
					Ori[ind]      =  360.*random()
					Ori[ind+1]    =  180.*random()
					Ori[ind+2]    =  360.*random()
					Ori[ind+3]    =  -1
				for i in xrange(len(tlistprj)):
					ind          = 4*i
					Ori[ind+3]    = float(Ori[ind+3])
			nnn = len(Ori)
			Ori = mpi_bcast(Ori, nnn, MPI_FLOAT, main_node, MPI_COMM_WORLD)
			Ori = map(float, Ori)
			for i in xrange(len(tlistprj)):
				ind          = 4*i
				Ori[ind+3]    = int(Ori[ind+3])
		"""

		for iprj in tlistprj:
			#print "**********************************  iprj = ", iprj, g_n_anglst

			# Store current the current orientation
			ind          = 4*iprj
			store_phi    = Ori[ind]
			store_theta  = Ori[ind+1]
			store_psi    = Ori[ind+2]
			cur_agl      = Ori[ind+3]
			if cur_agl  != -1: ocp[cur_agl] = -1

			# prepare active index of cml for weighting in order to earn time later
			iw = [0] * (g_n_prj - 1)
			c  = 0
			ct = 0
			for i in xrange(g_n_prj):
				for j in xrange(i+1, g_n_prj):
					if i == iprj or j == iprj:
						iw[ct] = c
						ct += 1
					c += 1

			# loop over all angles
			best_disc_list = [0]*g_n_anglst
			best_psi_list  = [0]*g_n_anglst
			for iagl in xrange(myid, g_n_anglst, number_of_proc):
				# if orientation is free
				if ocp[iagl] == -1:
					# assign new orientation
					Ori[ind]   = g_anglst[iagl][0]
					Ori[ind+1] = g_anglst[iagl][1]
					Rot        = Util.cml_update_rot(Rot, iprj, Ori[ind], Ori[ind+1], 0.0)
					# weights
					if flag_weights:
						cml = Util.cml_line_in3d(Ori, g_seq, g_n_prj, g_n_lines)
						weights = Util.cml_weights(cml)
						mw  = max(weights)
						for i in xrange(g_n_lines): weights[i]  = mw - weights[i]
						sw = sum(weights)
						if sw == 0:
							weights = [6.28 / float(g_n_lines)] * g_n_lines
						else:
							for i in xrange(g_n_lines):
								weights[i] /= sw
								weights[i] *= weights[i]

					# spin all psi
					com = Util.cml_line_insino(Rot, iprj, g_n_prj)
					if flag_weights:
						res = Util.cml_spin_psi(Prj, com, weights, iprj, iw, g_n_psi, g_d_psi, g_n_prj)
					else:
						res = Util.cml_spin_psi_now(Prj, com, iprj, iw, g_n_psi, g_d_psi, g_n_prj)

					# select the best
					best_disc_list[iagl] = res[0]
					best_psi_list[iagl]  = res[1]

					if g_debug: cml_export_progress(outdir, ite, iprj, iagl, res[1], res[0], 'progress')
				else:
					if g_debug: cml_export_progress(outdir, ite, iprj, iagl, -1, -1, 'progress')
			best_disc_list = mpi_reduce(best_disc_list, g_n_anglst, MPI_FLOAT, MPI_SUM, main_node, MPI_COMM_WORLD)
			best_psi_list = mpi_reduce(best_psi_list, g_n_anglst, MPI_FLOAT, MPI_SUM, main_node, MPI_COMM_WORLD)

			best_psi = -1
			best_iagl = -1

			if myid == main_node:
				best_disc = 1.0e20
				for iagl in xrange(g_n_anglst):
					if best_disc_list[iagl] > 0.0 and best_disc_list[iagl] < best_disc:
						best_disc = best_disc_list[iagl]
						best_psi = best_psi_list[iagl]
						best_iagl = iagl
			best_psi = mpi_bcast(best_psi, 1, MPI_FLOAT, main_node, MPI_COMM_WORLD)
			best_iagl = mpi_bcast(best_iagl, 1, MPI_INT, main_node, MPI_COMM_WORLD)
			best_psi = float(best_psi[0])
			best_iagl =  int(best_iagl[0])
			
			#print "xxxxx myid = ", myid, "    best_psi = ", best_psi, "   best_ialg = ", best_iagl

			# if change, assign
			if best_iagl != cur_agl:
				ocp[best_iagl] = iprj
				Ori[ind]       = g_anglst[best_iagl][0] # phi
				Ori[ind+1]     = g_anglst[best_iagl][1] # theta
				Ori[ind+2]     = best_psi * g_d_psi     # psi
				Ori[ind+3]     = best_iagl              # index
				change = True
			else:
				if cur_agl != -1: ocp[cur_agl] = iprj
				Ori[ind]    = store_phi
				Ori[ind+1]  = store_theta
				Ori[ind+2]  = store_psi
				Ori[ind+3]  = cur_agl

			Rot = Util.cml_update_rot(Rot, iprj, Ori[ind], Ori[ind+1], Ori[ind+2])

			if g_debug: cml_export_progress(outdir, ite, iprj, best_iagl, best_psi * g_d_psi, best_disc, 'choose')

		# if one change, compute new full disc
		disc = cml_disc(Prj, Ori, Rot, flag_weights)

		# display in the progress file
		if myid == main_node:
			cml_export_txtagls(outdir, outname, Ori, disc, 'Ite: %03i' % (ite + 1))

		if not change: break

		# to stop when the solution oscillates
		period_disc.pop(0)
		period_disc.append(disc)
		if period_disc[0] == period_disc[2]:
			period_ct += 1
			if period_ct >= period_th and min(period_disc) == disc and myid == main_node:
				angfile = open(outdir + '/' + outname, 'a')
				angfile.write('\nSTOP SOLUTION UNSTABLE\n')
				angfile.write('Discrepancy period: %s\n' % period_disc)
				angfile.close()
				break
		else:
			period_ct = 0
		mpi_barrier(MPI_COMM_WORLD)

	return Ori, disc, ite
示例#5
0
# set initial guess for the value of the grid
psi[:, :] = 1.0
do_force(forf, i1, i2, j1, j2)
#set boundary conditions
bc(psi, i1, i2, j1, j2)

new_psi[:, :] = psi[:, :]
iout = vals.steps / 100
if (iout == 0):
    iout = 1
iw = 0
r1 = range(1, (i2 - i1) + 2)
r2 = range(1, (j2 - j1) + 2)
ttot = 0
for i in range(0, vals.steps):
    do_transfer(psi, i1, i2, j1, j2)
    diff = do_jacobi(psi, new_psi, i1, i2, j1, j2)
    diff = mpi.mpi_reduce(diff, 1, mpi.MPI_DOUBLE, mpi.MPI_SUM, 0,
                          mpi.MPI_COMM_WORLD)
    if (myid == 0):
        if ((i + 1) % iout) == 0:
            print(i + 1, diff[0])
t2 = walltime()
if (myid == 0):
    print("total time=", t2 - t1, "  time spent in do_jacobi=", ttot)
mpi.mpi_finalize()

# if is acting as the executable call main
#if __name__ == '__main__':
#	main()
示例#6
0
def ali3d_MPI(stack, ref_vol, outdir, maskfile = None, ir = 1, ou = -1, rs = 1, 
	    xr = "4 2 2 1", yr = "-1", ts = "1 1 0.5 0.25", delta = "10 6 4 4", an = "-1",
	    center = 0, maxit = 5, term = 95, CTF = False, fourvar = False, snr = 1.0,  ref_a = "S", sym = "c1", 
	    sort=True, cutoff=999.99, pix_cutoff="0", two_tail=False, model_jump="1 1 1 1 1", restart=False, save_half=False,
	    protos=None, oplane=None, lmask=-1, ilmask=-1, findseam=False, vertstep=None, hpars="-1", hsearch="73.0 170.0",
	    full_output = False, compare_repro = False, compare_ref_free = "-1", ref_free_cutoff= "-1 -1 -1 -1",
	    wcmask = None, debug = False, recon_pad = 4):

	from alignment      import Numrinit, prepare_refrings
	from utilities      import model_circle, get_image, drop_image, get_input_from_string
	from utilities      import bcast_list_to_all, bcast_number_to_all, reduce_EMData_to_root, bcast_EMData_to_all 
	from utilities      import send_attr_dict
	from utilities      import get_params_proj, file_type
	from fundamentals   import rot_avg_image
	import os
	import types
	from utilities      import print_begin_msg, print_end_msg, print_msg
	from mpi	    import mpi_bcast, mpi_comm_size, mpi_comm_rank, MPI_FLOAT, MPI_COMM_WORLD, mpi_barrier, mpi_reduce
	from mpi	    import mpi_reduce, MPI_INT, MPI_SUM, mpi_finalize
	from filter	 import filt_ctf
	from projection     import prep_vol, prgs
	from statistics     import hist_list, varf3d_MPI, fsc_mask
	from numpy	  import array, bincount, array2string, ones

	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	myid	   = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0
	if myid == main_node:
		if os.path.exists(outdir):  ERROR('Output directory exists, please change the name and restart the program', "ali3d_MPI", 1)
		os.mkdir(outdir)
	mpi_barrier(MPI_COMM_WORLD)

	if debug:
		from time import sleep
		while not os.path.exists(outdir):
			print  "Node ",myid,"  waiting..."
			sleep(5)

		info_file = os.path.join(outdir, "progress%04d"%myid)
		finfo = open(info_file, 'w')
	else:
		finfo = None
	mjump = get_input_from_string(model_jump)
	xrng	= get_input_from_string(xr)
	if  yr == "-1":  yrng = xrng
	else	  :  yrng = get_input_from_string(yr)
	step	= get_input_from_string(ts)
	delta       = get_input_from_string(delta)
	ref_free_cutoff = get_input_from_string(ref_free_cutoff)	
	pix_cutoff = get_input_from_string(pix_cutoff)
	
	lstp = min(len(xrng), len(yrng), len(step), len(delta))
	if an == "-1":
		an = [-1] * lstp
	else:
		an = get_input_from_string(an)
	# make sure pix_cutoff is set for all iterations
	if len(pix_cutoff)<lstp:
		for i in xrange(len(pix_cutoff),lstp):
			pix_cutoff.append(pix_cutoff[-1])
	# don't waste time on sub-pixel alignment for low-resolution ang incr
	for i in range(len(step)):
		if (delta[i] > 4 or delta[i] == -1) and step[i] < 1:
			step[i] = 1

	first_ring  = int(ir)
	rstep       = int(rs)
	last_ring   = int(ou)
	max_iter    = int(maxit)
	center      = int(center)

	nrefs   = EMUtil.get_image_count( ref_vol )
	nmasks = 0
	if maskfile:
		# read number of masks within each maskfile (mc)
		nmasks   = EMUtil.get_image_count( maskfile )
		# open masks within maskfile (mc)
		maskF   = EMData.read_images(maskfile, xrange(nmasks))
	vol     = EMData.read_images(ref_vol, xrange(nrefs))
	nx      = vol[0].get_xsize()

	## make sure box sizes are the same
	if myid == main_node:
		im=EMData.read_images(stack,[0])
		bx = im[0].get_xsize()
		if bx!=nx:
			print_msg("Error: Stack box size (%i) differs from initial model (%i)\n"%(bx,nx))
			sys.exit()
		del im,bx
	
	# for helical processing:
	helicalrecon = False
	if protos is not None or hpars != "-1" or findseam is True:
		helicalrecon = True
		# if no out-of-plane param set, use 5 degrees
		if oplane is None:
			oplane=5.0
	if protos is not None:
		proto = get_input_from_string(protos)
		if len(proto) != nrefs:
			print_msg("Error: insufficient protofilament numbers supplied")
			sys.exit()
	if hpars != "-1":
		hpars = get_input_from_string(hpars)
		if len(hpars) != 2*nrefs:
			print_msg("Error: insufficient helical parameters supplied")
			sys.exit()
	## create helical parameter file for helical reconstruction
	if helicalrecon is True and myid == main_node:
		from hfunctions import createHpar
		# create initial helical parameter files
		dp=[0]*nrefs
		dphi=[0]*nrefs
		vdp=[0]*nrefs
		vdphi=[0]*nrefs
		for iref in xrange(nrefs):
			hpar = os.path.join(outdir,"hpar%02d.spi"%(iref))
			params = False
			if hpars != "-1":
				# if helical parameters explicitly given, set twist & rise
				params = [float(hpars[iref*2]),float(hpars[(iref*2)+1])]
			dp[iref],dphi[iref],vdp[iref],vdphi[iref] = createHpar(hpar,proto[iref],params,vertstep)

	# get values for helical search parameters
	hsearch = get_input_from_string(hsearch)
	if len(hsearch) != 2:
		print_msg("Error: specify outer and inner radii for helical search")
		sys.exit()

	if last_ring < 0 or last_ring > int(nx/2)-2 :	last_ring = int(nx/2) - 2

	if myid == main_node:
	#	import user_functions
	#	user_func = user_functions.factory[user_func_name]

		print_begin_msg("ali3d_MPI")
		print_msg("Input stack		 : %s\n"%(stack))
		print_msg("Reference volume	    : %s\n"%(ref_vol))	
		print_msg("Output directory	    : %s\n"%(outdir))
		if nmasks > 0:
			print_msg("Maskfile (number of masks)  : %s (%i)\n"%(maskfile,nmasks))
		print_msg("Inner radius		: %i\n"%(first_ring))
		print_msg("Outer radius		: %i\n"%(last_ring))
		print_msg("Ring step		   : %i\n"%(rstep))
		print_msg("X search range	      : %s\n"%(xrng))
		print_msg("Y search range	      : %s\n"%(yrng))
		print_msg("Translational step	  : %s\n"%(step))
		print_msg("Angular step		: %s\n"%(delta))
		print_msg("Angular search range	: %s\n"%(an))
		print_msg("Maximum iteration	   : %i\n"%(max_iter))
		print_msg("Center type		 : %i\n"%(center))
		print_msg("CTF correction	      : %s\n"%(CTF))
		print_msg("Signal-to-Noise Ratio       : %f\n"%(snr))
		print_msg("Reference projection method : %s\n"%(ref_a))
		print_msg("Symmetry group	      : %s\n"%(sym))
		print_msg("Fourier padding for 3D      : %i\n"%(recon_pad))
		print_msg("Number of reference models  : %i\n"%(nrefs))
		print_msg("Sort images between models  : %s\n"%(sort))
		print_msg("Allow images to jump	: %s\n"%(mjump))
		print_msg("CC cutoff standard dev      : %f\n"%(cutoff))
		print_msg("Two tail cutoff	     : %s\n"%(two_tail))
		print_msg("Termination pix error       : %f\n"%(term))
		print_msg("Pixel error cutoff	  : %s\n"%(pix_cutoff))
		print_msg("Restart		     : %s\n"%(restart))
		print_msg("Full output		 : %s\n"%(full_output))
		print_msg("Compare reprojections       : %s\n"%(compare_repro))
		print_msg("Compare ref free class avgs : %s\n"%(compare_ref_free))
		print_msg("Use cutoff from ref free    : %s\n"%(ref_free_cutoff))
		if protos:
			print_msg("Protofilament numbers	: %s\n"%(proto))
			print_msg("Using helical search range   : %s\n"%hsearch) 
		if findseam is True:
			print_msg("Using seam-based reconstruction\n")
		if hpars != "-1":
			print_msg("Using hpars		  : %s\n"%hpars)
		if vertstep != None:
			print_msg("Using vertical step    : %.2f\n"%vertstep)
		if save_half is True:
			print_msg("Saving even/odd halves\n")
		for i in xrange(100) : print_msg("*")
		print_msg("\n\n")
	if maskfile:
		if type(maskfile) is types.StringType: mask3D = get_image(maskfile)
		else:				  mask3D = maskfile
	else: mask3D = model_circle(last_ring, nx, nx, nx)

	numr	= Numrinit(first_ring, last_ring, rstep, "F")
	mask2D  = model_circle(last_ring,nx,nx) - model_circle(first_ring,nx,nx)

	fscmask = model_circle(last_ring,nx,nx,nx)
	if CTF:
		from filter	 import filt_ctf
	from reconstruction_rjh import rec3D_MPI_noCTF

	if myid == main_node:
		active = EMUtil.get_all_attributes(stack, 'active')
		list_of_particles = []
		for im in xrange(len(active)):
			if active[im]:  list_of_particles.append(im)
		del active
		nima = len(list_of_particles)
	else:
		nima = 0
	total_nima = bcast_number_to_all(nima, source_node = main_node)

	if myid != main_node:
		list_of_particles = [-1]*total_nima
	list_of_particles = bcast_list_to_all(list_of_particles, source_node = main_node)

	image_start, image_end = MPI_start_end(total_nima, number_of_proc, myid)

	# create a list of images for each node
	list_of_particles = list_of_particles[image_start: image_end]
	nima = len(list_of_particles)
	if debug:
		finfo.write("image_start, image_end: %d %d\n" %(image_start, image_end))
		finfo.flush()

	data = EMData.read_images(stack, list_of_particles)

	t_zero = Transform({"type":"spider","phi":0,"theta":0,"psi":0,"tx":0,"ty":0})
	transmulti = [[t_zero for i in xrange(nrefs)] for j in xrange(nima)]

	for iref,im in ((iref,im) for iref in xrange(nrefs) for im in xrange(nima)):
		if nrefs == 1:
			transmulti[im][iref] = data[im].get_attr("xform.projection")
		else:
			# if multi models, keep track of eulers for all models
			try:
				transmulti[im][iref] = data[im].get_attr("eulers_txty.%i"%iref)
			except:
				data[im].set_attr("eulers_txty.%i"%iref,t_zero)

	scoremulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)] 
	pixelmulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)] 
	ref_res = [0.0 for x in xrange(nrefs)] 
	apix = data[0].get_attr('apix_x')

	# for oplane parameter, create cylindrical mask
	if oplane is not None and myid == main_node:
		from hfunctions import createCylMask
		cmaskf=os.path.join(outdir, "mask3D_cyl.mrc")
		mask3D = createCylMask(data,ou,lmask,ilmask,cmaskf)
		# if finding seam of helix, create wedge masks
		if findseam is True:
			wedgemask=[]
			for pf in xrange(nrefs):
				wedgemask.append(EMData())
			# wedgemask option
			if wcmask is not None:
				wcmask = get_input_from_string(wcmask)
				if len(wcmask) != 3:
					print_msg("Error: wcmask option requires 3 values: x y radius")
					sys.exit()

	# determine if particles have helix info:
	try:
		data[0].get_attr('h_angle')
		original_data = []
		boxmask = True
		from hfunctions import createBoxMask
	except:
		boxmask = False

	# prepare particles
	for im in xrange(nima):
		data[im].set_attr('ID', list_of_particles[im])
		data[im].set_attr('pix_score', int(0))
		if CTF:
			# only phaseflip particles, not full CTF correction
			ctf_params = data[im].get_attr("ctf")
			st = Util.infomask(data[im], mask2D, False)
			data[im] -= st[0]
			data[im] = filt_ctf(data[im], ctf_params, sign = -1, binary=1)
			data[im].set_attr('ctf_applied', 1)
		# for window mask:
		if boxmask is True:
			h_angle = data[im].get_attr("h_angle")
			original_data.append(data[im].copy())
			bmask = createBoxMask(nx,apix,ou,lmask,h_angle)
			data[im]*=bmask
			del bmask
	if debug:
		finfo.write( '%d loaded  \n' % nima )
		finfo.flush()
	if myid == main_node:
		# initialize data for the reference preparation function
		ref_data = [ mask3D, max(center,0), None, None, None, None ]
		# for method -1, switch off centering in user function

	from time import time	

	#  this is needed for gathering of pixel errors
	disps = []
	recvcount = []
	disps_score = []
	recvcount_score = []
	for im in xrange(number_of_proc):
		if( im == main_node ):  
			disps.append(0)
			disps_score.append(0)
		else:		  
			disps.append(disps[im-1] + recvcount[im-1])
			disps_score.append(disps_score[im-1] + recvcount_score[im-1])
		ib, ie = MPI_start_end(total_nima, number_of_proc, im)
		recvcount.append( ie - ib )
		recvcount_score.append((ie-ib)*nrefs)

	pixer = [0.0]*nima
	cs = [0.0]*3
	total_iter = 0
	volodd = EMData.read_images(ref_vol, xrange(nrefs))
	voleve = EMData.read_images(ref_vol, xrange(nrefs))

	if restart:
		# recreate initial volumes from alignments stored in header
		itout = "000_00"
		for iref in xrange(nrefs):
			if(nrefs == 1):
				modout = ""
			else:
				modout = "_model_%02d"%(iref)	
	
			if(sort): 
				group = iref
				for im in xrange(nima):
					imgroup = data[im].get_attr('group')
					if imgroup == iref:
						data[im].set_attr('xform.projection',transmulti[im][iref])
			else: 
				group = int(999) 
				for im in xrange(nima):
					data[im].set_attr('xform.projection',transmulti[im][iref])
			
			fscfile = os.path.join(outdir, "fsc_%s%s"%(itout,modout))

			vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(data, sym, fscmask, fscfile, myid, main_node, index = group, npad = recon_pad)

			if myid == main_node:
				if helicalrecon:
					from hfunctions import processHelicalVol

					vstep=None
					if vertstep is not None:
						vstep=(vdp[iref],vdphi[iref])
					print_msg("Old rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
					hvals=processHelicalVol(vol[iref],voleve[iref],volodd[iref],iref,outdir,itout,
								dp[iref],dphi[iref],apix,hsearch,findseam,vstep,wcmask)
					(vol[iref],voleve[iref],volodd[iref],dp[iref],dphi[iref],vdp[iref],vdphi[iref])=hvals
					print_msg("New rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
					# get new FSC from symmetrized half volumes
					fscc = fsc_mask( volodd[iref], voleve[iref], mask3D, rstep, fscfile)
				else:
					vol[iref].write_image(os.path.join(outdir, "vol_%s.hdf"%itout),-1)

				if save_half is True:
					volodd[iref].write_image(os.path.join(outdir, "volodd_%s.hdf"%itout),-1)
					voleve[iref].write_image(os.path.join(outdir, "voleve_%s.hdf"%itout),-1)

				if nmasks > 1:
					# Read mask for multiplying
					ref_data[0] = maskF[iref]
				ref_data[2] = vol[iref]
				ref_data[3] = fscc
				#  call user-supplied function to prepare reference image, i.e., center and filter it
				vol[iref], cs,fl = ref_ali3d(ref_data)
				vol[iref].write_image(os.path.join(outdir, "volf_%s.hdf"%(itout)),-1)
				if (apix == 1):
					res_msg = "Models filtered at spatial frequency of:\t"
					res = fl
				else:
					res_msg = "Models filtered at resolution of:       \t"
					res = apix / fl	
				ares = array2string(array(res), precision = 2)
				print_msg("%s%s\n\n"%(res_msg,ares))	
			
			bcast_EMData_to_all(vol[iref], myid, main_node)
			# write out headers, under MPI writing has to be done sequentially
			mpi_barrier(MPI_COMM_WORLD)

	# projection matching	
	for N_step in xrange(lstp):
		terminate = 0
		Iter = -1
 		while(Iter < max_iter-1 and terminate == 0):
			Iter += 1
			total_iter += 1
			itout = "%03g_%02d" %(delta[N_step], Iter)
			if myid == main_node:
				print_msg("ITERATION #%3d, inner iteration #%3d\nDelta = %4.1f, an = %5.2f, xrange = %5.2f, yrange = %5.2f, step = %5.2f\n\n"%(N_step, Iter, delta[N_step], an[N_step], xrng[N_step],yrng[N_step],step[N_step]))
	
			for iref in xrange(nrefs):
				if myid == main_node: start_time = time()
				volft,kb = prep_vol( vol[iref] )

				## constrain projections to out of plane parameter
				theta1 = None
				theta2 = None
				if oplane is not None:
					theta1 = 90-oplane
					theta2 = 90+oplane
				refrings = prepare_refrings( volft, kb, nx, delta[N_step], ref_a, sym, numr, MPI=True, phiEqpsi = "Minus", initial_theta=theta1, delta_theta=theta2)
				
				del volft,kb

				if myid== main_node:
					print_msg( "Time to prepare projections for model %i: %s\n" % (iref, legibleTime(time()-start_time)) )
					start_time = time()
	
				for im in xrange( nima ):
					data[im].set_attr("xform.projection", transmulti[im][iref])
					if an[N_step] == -1:
						t1, peak, pixer[im] = proj_ali_incore(data[im],refrings,numr,xrng[N_step],yrng[N_step],step[N_step],finfo)
					else:
						t1, peak, pixer[im] = proj_ali_incore_local(data[im],refrings,numr,xrng[N_step],yrng[N_step],step[N_step],an[N_step],finfo)
					#data[im].set_attr("xform.projection"%iref, t1)
					if nrefs > 1: data[im].set_attr("eulers_txty.%i"%iref,t1)
					scoremulti[im][iref] = peak
					from pixel_error import max_3D_pixel_error
					# t1 is the current param, t2 is old
					t2 = transmulti[im][iref]
					pixelmulti[im][iref] = max_3D_pixel_error(t1,t2,numr[-3])
					transmulti[im][iref] = t1

				if myid == main_node:
					print_msg("Time of alignment for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
					start_time = time()


			# gather scoring data from all processors
			from mpi import mpi_gatherv
			scoremultisend = sum(scoremulti,[])
			pixelmultisend = sum(pixelmulti,[])
			tmp = mpi_gatherv(scoremultisend,len(scoremultisend),MPI_FLOAT, recvcount_score, disps_score, MPI_FLOAT, main_node,MPI_COMM_WORLD)
			tmp1 = mpi_gatherv(pixelmultisend,len(pixelmultisend),MPI_FLOAT, recvcount_score, disps_score, MPI_FLOAT, main_node,MPI_COMM_WORLD)
			tmp = mpi_bcast(tmp,(total_nima * nrefs), MPI_FLOAT,0, MPI_COMM_WORLD)
			tmp1 = mpi_bcast(tmp1,(total_nima * nrefs), MPI_FLOAT,0, MPI_COMM_WORLD)
			tmp = map(float,tmp)
			tmp1 = map(float,tmp1)
			score = array(tmp).reshape(-1,nrefs)
			pixelerror = array(tmp1).reshape(-1,nrefs) 
			score_local = array(scoremulti)
			mean_score = score.mean(axis=0)
			std_score = score.std(axis=0)
			cut = mean_score - (cutoff * std_score)
			cut2 = mean_score + (cutoff * std_score)
			res_max = score_local.argmax(axis=1)
			minus_cc = [0.0 for x in xrange(nrefs)]
			minus_pix = [0.0 for x in xrange(nrefs)]
			minus_ref = [0.0 for x in xrange(nrefs)]
			
			#output pixel errors
			if(myid == main_node):
				from statistics import hist_list
				lhist = 20
				pixmin = pixelerror.min(axis=1)
				region, histo = hist_list(pixmin, lhist)
				if(region[0] < 0.0):  region[0] = 0.0
				print_msg("Histogram of pixel errors\n      ERROR       number of particles\n")
				for lhx in xrange(lhist):
					print_msg(" %10.3f     %7d\n"%(region[lhx], histo[lhx]))
				# Terminate if 95% within 1 pixel error
				im = 0
				for lhx in xrange(lhist):
					if(region[lhx] > 1.0): break
					im += histo[lhx]
				print_msg( "Percent of particles with pixel error < 1: %f\n\n"% (im/float(total_nima)*100))
				term_cond = float(term)/100
				if(im/float(total_nima) > term_cond): 
					terminate = 1
					print_msg("Terminating internal loop\n")
				del region, histo
			terminate = mpi_bcast(terminate, 1, MPI_INT, 0, MPI_COMM_WORLD)
			terminate = int(terminate[0])	
			
			for im in xrange(nima):
				if(sort==False):
					data[im].set_attr('group',999)
				elif (mjump[N_step]==1):
					data[im].set_attr('group',int(res_max[im]))
				
				pix_run = data[im].get_attr('pix_score')			
				if (pix_cutoff[N_step]==1 and (terminate==1 or Iter == max_iter-1)):
					if (pixelmulti[im][int(res_max[im])] > 1):
						data[im].set_attr('pix_score',int(777))

				if (score_local[im][int(res_max[im])]<cut[int(res_max[im])]) or (two_tail and score_local[im][int(res_max[im])]>cut2[int(res_max[im])]):
					data[im].set_attr('group',int(888))
					minus_cc[int(res_max[im])] = minus_cc[int(res_max[im])] + 1

				if(pix_run == 777):
					data[im].set_attr('group',int(777))
					minus_pix[int(res_max[im])] = minus_pix[int(res_max[im])] + 1

				if (compare_ref_free != "-1") and (ref_free_cutoff[N_step] != -1) and (total_iter > 1):
					id = data[im].get_attr('ID')
					if id in rejects:
						data[im].set_attr('group',int(666))
						minus_ref[int(res_max[im])] = minus_ref[int(res_max[im])] + 1	
						
				
			minus_cc_tot = mpi_reduce(minus_cc,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD)	
			minus_pix_tot = mpi_reduce(minus_pix,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD) 	
			minus_ref_tot = mpi_reduce(minus_ref,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD)
			if (myid == main_node):
				if(sort):
					tot_max = score.argmax(axis=1)
					res = bincount(tot_max)
				else:
					res = ones(nrefs) * total_nima
				print_msg("Particle distribution:	     \t\t%s\n"%(res*1.0))
				afcut1 = res - minus_cc_tot
				afcut2 = afcut1 - minus_pix_tot
				afcut3 = afcut2 - minus_ref_tot
				print_msg("Particle distribution after cc cutoff:\t\t%s\n"%(afcut1))
				print_msg("Particle distribution after pix cutoff:\t\t%s\n"%(afcut2)) 
				print_msg("Particle distribution after ref cutoff:\t\t%s\n\n"%(afcut3)) 
					
						
			res = [0.0 for i in xrange(nrefs)]
			for iref in xrange(nrefs):
				if(center == -1):
					from utilities      import estimate_3D_center_MPI, rotate_3D_shift
					dummy=EMData()
					cs[0], cs[1], cs[2], dummy, dummy = estimate_3D_center_MPI(data, total_nima, myid, number_of_proc, main_node)				
					cs = mpi_bcast(cs, 3, MPI_FLOAT, main_node, MPI_COMM_WORLD)
					cs = [-float(cs[0]), -float(cs[1]), -float(cs[2])]
					rotate_3D_shift(data, cs)


				if(sort): 
					group = iref
					for im in xrange(nima):
						imgroup = data[im].get_attr('group')
						if imgroup == iref:
							data[im].set_attr('xform.projection',transmulti[im][iref])
				else: 
					group = int(999) 
					for im in xrange(nima):
						data[im].set_attr('xform.projection',transmulti[im][iref])
				if(nrefs == 1):
					modout = ""
				else:
					modout = "_model_%02d"%(iref)	
				
				fscfile = os.path.join(outdir, "fsc_%s%s"%(itout,modout))
				vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(data, sym, fscmask, fscfile, myid, main_node, index=group, npad=recon_pad)
	
				if myid == main_node:
					print_msg("3D reconstruction time for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
					start_time = time()
	
				# Compute Fourier variance
				if fourvar:
					outvar = os.path.join(outdir, "volVar_%s.hdf"%(itout))
					ssnr_file = os.path.join(outdir, "ssnr_%s"%(itout))
					varf = varf3d_MPI(data, ssnr_text_file=ssnr_file, mask2D=None, reference_structure=vol[iref], ou=last_ring, rw=1.0, npad=1, CTF=None, sign=1, sym=sym, myid=myid)
					if myid == main_node:
						print_msg("Time to calculate 3D Fourier variance for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
						start_time = time()
						varf = 1.0/varf
						varf.write_image(outvar,-1)
				else:  varf = None

				if myid == main_node:
					if helicalrecon:
						from hfunctions import processHelicalVol

						vstep=None
						if vertstep is not None:
							vstep=(vdp[iref],vdphi[iref])
						print_msg("Old rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
						hvals=processHelicalVol(vol[iref],voleve[iref],volodd[iref],iref,outdir,itout,
									dp[iref],dphi[iref],apix,hsearch,findseam,vstep,wcmask)
						(vol[iref],voleve[iref],volodd[iref],dp[iref],dphi[iref],vdp[iref],vdphi[iref])=hvals
						print_msg("New rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
						# get new FSC from symmetrized half volumes
						fscc = fsc_mask( volodd[iref], voleve[iref], mask3D, rstep, fscfile)

						print_msg("Time to search and apply helical symmetry for model %i: %s\n\n"%(iref, legibleTime(time()-start_time)))
						start_time = time()
					else:
						vol[iref].write_image(os.path.join(outdir, "vol_%s.hdf"%(itout)),-1)

					if save_half is True:
						volodd[iref].write_image(os.path.join(outdir, "volodd_%s.hdf"%(itout)),-1)
						voleve[iref].write_image(os.path.join(outdir, "voleve_%s.hdf"%(itout)),-1)

					if nmasks > 1:
						# Read mask for multiplying
						ref_data[0] = maskF[iref]
					ref_data[2] = vol[iref]
					ref_data[3] = fscc
					ref_data[4] = varf
					#  call user-supplied function to prepare reference image, i.e., center and filter it
					vol[iref], cs,fl = ref_ali3d(ref_data)
					vol[iref].write_image(os.path.join(outdir, "volf_%s.hdf"%(itout)),-1)
					if (apix == 1):
						res_msg = "Models filtered at spatial frequency of:\t"
						res[iref] = fl
					else:
						res_msg = "Models filtered at resolution of:       \t"
						res[iref] = apix / fl	
	
				del varf
				bcast_EMData_to_all(vol[iref], myid, main_node)
				
				if compare_ref_free != "-1": compare_repro = True
				if compare_repro:
					outfile_repro = comp_rep(refrings, data, itout, modout, vol[iref], group, nima, nx, myid, main_node, outdir)
					mpi_barrier(MPI_COMM_WORLD)
					if compare_ref_free != "-1":
						ref_free_output = os.path.join(outdir,"ref_free_%s%s"%(itout,modout))
						rejects = compare(compare_ref_free, outfile_repro,ref_free_output,yrng[N_step], xrng[N_step], rstep,nx,apix,ref_free_cutoff[N_step], number_of_proc, myid, main_node)

			# retrieve alignment params from all processors
			par_str = ['xform.projection','ID','group']
			if nrefs > 1:
				for iref in xrange(nrefs):
					par_str.append('eulers_txty.%i'%iref)

			if myid == main_node:
				from utilities import recv_attr_dict
				recv_attr_dict(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
				
			else:	send_attr_dict(main_node, data, par_str, image_start, image_end)

			if myid == main_node:
				ares = array2string(array(res), precision = 2)
				print_msg("%s%s\n\n"%(res_msg,ares))
				dummy = EMData()
				if full_output:
					nimat = EMUtil.get_image_count(stack)
					output_file = os.path.join(outdir, "paramout_%s"%itout)
					foutput = open(output_file, 'w')
					for im in xrange(nimat):
						# save the parameters for each of the models
						outstring = ""
						dummy.read_image(stack,im,True)
						param3d = dummy.get_attr('xform.projection')
						g = dummy.get_attr("group")
						# retrieve alignments in EMAN-format
						pE = param3d.get_params('eman')
						outstring += "%f\t%f\t%f\t%f\t%f\t%i\n" %(pE["az"], pE["alt"], pE["phi"], pE["tx"], pE["ty"],g)
						foutput.write(outstring)
					foutput.close()
				del dummy
			mpi_barrier(MPI_COMM_WORLD)


#	mpi_finalize()	

	if myid == main_node: print_end_msg("ali3d_MPI")
示例#7
0
def main():
	progname = os.path.basename(sys.argv[0])
	usage = progname + """  input_micrograph_list_file  input_micrograph_pattern  input_coordinates_pattern  output_directory  --coordinates_format  --box_size=box_size  --invert  --import_ctf=ctf_file  --limit_ctf  --resample_ratio=resample_ratio  --defocus_error=defocus_error  --astigmatism_error=astigmatism_error
	
Window particles from micrographs in input list file. The coordinates of the particles should be given as input.
Please specify name pattern of input micrographs and coordinates files with a wild card (*). Use the wild card to indicate the place of micrograph ID (e.g. serial number, time stamp, and etc). 
The name patterns must be enclosed by single quotes (') or double quotes ("). (Note: sxgui.py automatically adds single quotes (')). 
BDB files can not be selected as input micrographs.
	
	sxwindow.py  mic_list.txt  ./mic*.hdf  info/mic*_info.json  particles  --coordinates_format=eman2  --box_size=64  --invert  --import_ctf=outdir_cter/partres/partres.txt
	
If micrograph list file name is not provided, all files matched with the micrograph name pattern will be processed.
	
	sxwindow.py  ./mic*.hdf  info/mic*_info.json  particles  --coordinates_format=eman2  --box_size=64  --invert  --import_ctf=outdir_cter/partres/partres.txt
	
"""
	parser = OptionParser(usage, version=SPARXVERSION)
	parser.add_option("--coordinates_format",  type="string",        default="eman1",   help="format of input coordinates files: 'sparx', 'eman1', 'eman2', or 'spider'. the coordinates of sparx, eman2, and spider format is particle center. the coordinates of eman1 format is particle box conner associated with the original box size. (default eman1)")
	parser.add_option("--box_size",            type="int",           default=256,       help="x and y dimension of square area to be windowed (in pixels): pixel size after resampling is assumed when resample_ratio < 1.0 (default 256)")
	parser.add_option("--invert",              action="store_true",  default=False,     help="invert image contrast: recommended for cryo data (default False)")
	parser.add_option("--import_ctf",          type="string",        default="",        help="file name of sxcter output: normally partres.txt (default none)") 
	parser.add_option("--limit_ctf",           action="store_true",  default=False,     help="filter micrographs based on the CTF limit: this option requires --import_ctf. (default False)")	
	parser.add_option("--resample_ratio",      type="float",         default=1.0,       help="ratio of new to old image size (or old to new pixel size) for resampling: Valid range is 0.0 < resample_ratio <= 1.0. (default 1.0)")
	parser.add_option("--defocus_error",       type="float",         default=1000000.0, help="defocus errror limit: exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocus_error percent. the error is computed as (std dev defocus)/defocus*100%. (default 1000000.0)" )
	parser.add_option("--astigmatism_error",   type="float",         default=360.0,     help="astigmatism error limit: Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatism_error degrees. (default 360.0)")

	### detect if program is running under MPI
	RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ
	
	main_node = 0
	
	if RUNNING_UNDER_MPI:
		from mpi import mpi_init
		from mpi import MPI_COMM_WORLD, mpi_comm_rank, mpi_comm_size, mpi_barrier, mpi_reduce, MPI_INT, MPI_SUM
		
		
		mpi_init(0, [])
		myid = mpi_comm_rank(MPI_COMM_WORLD)
		number_of_processes = mpi_comm_size(MPI_COMM_WORLD)
	else:
		number_of_processes = 1
		myid = 0
	
	(options, args) = parser.parse_args(sys.argv[1:])
	
	mic_list_file_path = None
	mic_pattern = None
	coords_pattern = None
	error_status = None
	while True:
		if len(args) < 3 or len(args) > 4:
			error_status = ("Please check usage for number of arguments.\n Usage: " + usage + "\n" + "Please run %s -h for help." % (progname), getframeinfo(currentframe()))
			break
		
		if len(args) == 3:
			mic_pattern = args[0]
			coords_pattern = args[1]
			out_dir = args[2]
		else: # assert(len(args) == 4)
			mic_list_file_path = args[0]
			mic_pattern = args[1]
			coords_pattern = args[2]
			out_dir = args[3]
		
		if mic_list_file_path != None:
			if os.path.splitext(mic_list_file_path)[1] != ".txt":
				error_status = ("Extension of input micrograph list file must be \".txt\". Please check input_micrograph_list_file argument. Run %s -h for help." % (progname), getframeinfo(currentframe()))
				break
		
		if mic_pattern[:len("bdb:")].lower() == "bdb":
			error_status = ("BDB file can not be selected as input micrographs. Please convert the format, and restart the program. Run %s -h for help." % (progname), getframeinfo(currentframe()))
			break
		
		if mic_pattern.find("*") == -1:
			error_status = ("Input micrograph file name pattern must contain wild card (*). Please check input_micrograph_pattern argument. Run %s -h for help." % (progname), getframeinfo(currentframe()))
			break
		
		if coords_pattern.find("*") == -1:
			error_status = ("Input coordinates file name pattern must contain wild card (*). Please check input_coordinates_pattern argument. Run %s -h for help." % (progname), getframeinfo(currentframe()))
			break
		
		if myid == main_node:
			if os.path.exists(out_dir):
				error_status = ("Output directory exists. Please change the name and restart the program.", getframeinfo(currentframe()))
				break

		break
	if_error_then_all_processes_exit_program(error_status)
	
	# Check invalid conditions of options
	check_options(options, progname)
	
	mic_name_list = None
	error_status = None
	if myid == main_node:
		if mic_list_file_path != None:
			print("Loading micrograph list from %s file ..." % (mic_list_file_path))
			mic_name_list = read_text_file(mic_list_file_path)
			if len(mic_name_list) == 0:
				print("Directory of first micrograph entry is " % (os.path.dirname(mic_name_list[0])))
		else: # assert (mic_list_file_path == None)
			print("Generating micrograph list in %s directory..." % (os.path.dirname(mic_pattern)))
			mic_name_list = glob.glob(mic_pattern)
		if len(mic_name_list) == 0:
			error_status = ("No micrograph file is found. Please check input_micrograph_pattern and/or input_micrograph_list_file argument. Run %s -h for help." % (progname), getframeinfo(currentframe()))
		else:
			print("Found %d microgarphs" % len(mic_name_list))
			
	if_error_then_all_processes_exit_program(error_status)
	if RUNNING_UNDER_MPI:
		mic_name_list = wrap_mpi_bcast(mic_name_list, main_node)
	
	coords_name_list = None
	error_status = None
	if myid == main_node:
		coords_name_list = glob.glob(coords_pattern)
		if len(coords_name_list) == 0:
			error_status = ("No coordinates file is found. Please check input_coordinates_pattern argument. Run %s -h for help." % (progname), getframeinfo(currentframe()))
	if_error_then_all_processes_exit_program(error_status)
	if RUNNING_UNDER_MPI:
		coords_name_list = wrap_mpi_bcast(coords_name_list, main_node)
	
##################################################################################################################################################################################################################	
##################################################################################################################################################################################################################	
##################################################################################################################################################################################################################	

	# all processes must have access to indices
	if options.import_ctf:
		i_enum = -1
		i_enum += 1; idx_cter_def          = i_enum # defocus [um]; index must be same as ctf object format
		i_enum += 1; idx_cter_cs           = i_enum # Cs [mm]; index must be same as ctf object format
		i_enum += 1; idx_cter_vol          = i_enum # voltage[kV]; index must be same as ctf object format
		i_enum += 1; idx_cter_apix         = i_enum # pixel size [A]; index must be same as ctf object format
		i_enum += 1; idx_cter_bfactor      = i_enum # B-factor [A^2]; index must be same as ctf object format
		i_enum += 1; idx_cter_ac           = i_enum # amplitude contrast [%]; index must be same as ctf object format
		i_enum += 1; idx_cter_astig_amp    = i_enum # astigmatism amplitude [um]; index must be same as ctf object format
		i_enum += 1; idx_cter_astig_ang    = i_enum # astigmatism angle [degree]; index must be same as ctf object format
		i_enum += 1; idx_cter_sd_def       = i_enum # std dev of defocus [um]
		i_enum += 1; idx_cter_sd_astig_amp = i_enum # std dev of ast amp [A]
		i_enum += 1; idx_cter_sd_astig_ang = i_enum # std dev of ast angle [degree]
		i_enum += 1; idx_cter_cv_def       = i_enum # coefficient of variation of defocus [%]
		i_enum += 1; idx_cter_cv_astig_amp = i_enum # coefficient of variation of ast amp [%]
		i_enum += 1; idx_cter_spectra_diff = i_enum # average of differences between with- and without-astig. experimental 1D spectra at extrema
		i_enum += 1; idx_cter_error_def    = i_enum # frequency at which signal drops by 50% due to estimated error of defocus alone [1/A]
		i_enum += 1; idx_cter_error_astig  = i_enum # frequency at which signal drops by 50% due to estimated error of defocus and astigmatism [1/A]
		i_enum += 1; idx_cter_error_ctf    = i_enum # limit frequency by CTF error [1/A]
		i_enum += 1; idx_cter_mic_name     = i_enum # micrograph name
		i_enum += 1; n_idx_cter            = i_enum
	
	
	# Prepare loop variables
	mic_basename_pattern = os.path.basename(mic_pattern)              # file pattern without path
	mic_baseroot_pattern = os.path.splitext(mic_basename_pattern)[0]  # file pattern without path and extension
	coords_format = options.coordinates_format.lower()
	box_size = options.box_size
	box_half = box_size // 2
	mask2d = model_circle(box_size//2, box_size, box_size) # Create circular 2D mask to Util.infomask of particle images
	resample_ratio = options.resample_ratio
	
	n_mic_process = 0
	n_mic_reject_no_coords = 0
	n_mic_reject_no_cter_entry = 0
	n_global_coords_detect = 0
	n_global_coords_process = 0
	n_global_coords_reject_out_of_boundary = 0
	
	serial_id_list = []
	error_status = None
	## not a real while, an if with the opportunity to use break when errors need to be reported
	while myid == main_node:
		# 
		# NOTE: 2016/05/24 Toshio Moriya
		# Now, ignores the path in mic_pattern and entries of mic_name_list to create serial ID
		# Only the basename (file name) in micrograph path must be match
		# 
		# Create list of micrograph serial ID
		# Break micrograph name pattern into prefix and suffix to find the head index of the micrograph serial id
		# 
		mic_basename_tokens = mic_basename_pattern.split('*')
		# assert (len(mic_basename_tokens) == 2)
		serial_id_head_index = len(mic_basename_tokens[0])
		# Loop through micrograph names
		for mic_name in mic_name_list:
			# Find the tail index of the serial id and extract serial id from the micrograph name
			mic_basename = os.path.basename(mic_name)
			serial_id_tail_index = mic_basename.index(mic_basename_tokens[1])
			serial_id = mic_basename[serial_id_head_index:serial_id_tail_index]
			serial_id_list.append(serial_id)
		# assert (len(serial_id_list) == len(mic_name))
		del mic_name_list # Do not need this anymore
		
		# Load CTFs if necessary
		if options.import_ctf:
			
			ctf_list = read_text_row(options.import_ctf)
			# print("Detected CTF entries : %6d ..." % (len(ctf_list)))
			
			if len(ctf_list) == 0:
				error_status = ("No CTF entry is found in %s. Please check --import_ctf option. Run %s -h for help." % (options.import_ctf, progname), getframeinfo(currentframe()))
				break
			
			if (len(ctf_list[0]) != n_idx_cter):
				error_status = ("Number of columns (%d) must be %d in %s. The format might be old. Please run sxcter.py again." % (len(ctf_list[0]), n_idx_cter, options.import_ctf), getframeinfo(currentframe()))
				break
			
			ctf_dict={}
			n_reject_defocus_error = 0
			ctf_error_limit = [options.defocus_error/100.0, options.astigmatism_error]
			for ctf_params in ctf_list:
				assert(len(ctf_params) == n_idx_cter)
				# mic_baseroot is name of micrograph minus the path and extension
				mic_baseroot = os.path.splitext(os.path.basename(ctf_params[idx_cter_mic_name]))[0]
				if(ctf_params[idx_cter_sd_def] / ctf_params[idx_cter_def] > ctf_error_limit[0]):
					print("Defocus error %f exceeds the threshold. Micrograph %s is rejected." % (ctf_params[idx_cter_sd_def] / ctf_params[idx_cter_def], mic_baseroot))
					n_reject_defocus_error += 1
				else:
					if(ctf_params[idx_cter_sd_astig_ang] > ctf_error_limit[1]):
						ctf_params[idx_cter_astig_amp] = 0.0
						ctf_params[idx_cter_astig_ang] = 0.0
					ctf_dict[mic_baseroot] = ctf_params
			del ctf_list # Do not need this anymore
		
		break
		
	if_error_then_all_processes_exit_program(error_status)

	if options.import_ctf:
		if options.limit_ctf:
			cutoff_histogram = []  #@ming compute the histogram for micrographs cut of by ctf_params limit.
	
##################################################################################################################################################################################################################	
##################################################################################################################################################################################################################	
##################################################################################################################################################################################################################	
	
	restricted_serial_id_list = []
	if myid == main_node:
		# Loop over serial IDs of micrographs
		for serial_id in serial_id_list:
			# mic_baseroot is name of micrograph minus the path and extension
			mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
			mic_name = mic_pattern.replace("*", serial_id)
			coords_name = coords_pattern.replace("*", serial_id)
			
			########### # CHECKS: BEGIN
			if coords_name not in coords_name_list:
				print("    Cannot read %s. Skipping %s ..." % (coords_name, mic_baseroot))
				n_mic_reject_no_coords += 1
				continue
			
			# IF mic is in CTER results
			if options.import_ctf:
				if mic_baseroot not in ctf_dict:
					print("    Is not listed in CTER results. Skipping %s ..." % (mic_baseroot))
					n_mic_reject_no_cter_entry += 1
					continue
				else:
					ctf_params = ctf_dict[mic_baseroot]
			# CHECKS: END
			
			n_mic_process += 1
			
			restricted_serial_id_list.append(serial_id)
		# restricted_serial_id_list = restricted_serial_id_list[:128]  ## for testing against the nonMPI version

	
	if myid != main_node:
		if options.import_ctf:
			ctf_dict = None

	error_status = None
	if len(restricted_serial_id_list) < number_of_processes:
		error_status = ('Number of processes (%d) supplied by --np in mpirun cannot be greater than %d (number of micrographs that satisfy all criteria to be processed) ' % (number_of_processes, len(restricted_serial_id_list)), getframeinfo(currentframe()))
	if_error_then_all_processes_exit_program(error_status)

	## keep a copy of the original output directory where the final bdb will be created
	original_out_dir = out_dir
	if RUNNING_UNDER_MPI:
		mpi_barrier(MPI_COMM_WORLD)
		restricted_serial_id_list = wrap_mpi_bcast(restricted_serial_id_list, main_node)
		mic_start, mic_end = MPI_start_end(len(restricted_serial_id_list), number_of_processes, myid)
		restricted_serial_id_list_not_sliced = restricted_serial_id_list
		restricted_serial_id_list = restricted_serial_id_list[mic_start:mic_end]
	
		if options.import_ctf:
			ctf_dict = wrap_mpi_bcast(ctf_dict, main_node)

		# generate subdirectories of out_dir, one for each process
		out_dir = os.path.join(out_dir,"%03d"%myid)
	
	if myid == main_node:
		print("Micrographs processed by main process (including percent complete):")

	len_processed_by_main_node_divided_by_100 = len(restricted_serial_id_list)/100.0

##################################################################################################################################################################################################################	
##################################################################################################################################################################################################################	
##################################################################################################################################################################################################################	
#####  Starting main parallel execution

	for my_idx, serial_id in enumerate(restricted_serial_id_list):
		mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
		mic_name = mic_pattern.replace("*", serial_id)
		coords_name = coords_pattern.replace("*", serial_id)

		if myid == main_node:
			print(mic_name, " ---> % 2.2f%%"%(my_idx/len_processed_by_main_node_divided_by_100))
		mic_img = get_im(mic_name)

		# Read coordinates according to the specified format and 
		# make the coordinates the center of particle image 
		if coords_format == "sparx":
			coords_list = read_text_row(coords_name)
		elif coords_format == "eman1":
			coords_list = read_text_row(coords_name)
			for i in xrange(len(coords_list)):
				coords_list[i] = [(coords_list[i][0] + coords_list[i][2] // 2), (coords_list[i][1] + coords_list[i][3] // 2)]
		elif coords_format == "eman2":
			coords_list = js_open_dict(coords_name)["boxes"]
			for i in xrange(len(coords_list)):
				coords_list[i] = [coords_list[i][0], coords_list[i][1]]
		elif coords_format == "spider":
			coords_list = read_text_row(coords_name)
			for i in xrange(len(coords_list)):
				coords_list[i] = [coords_list[i][2], coords_list[i][3]]
			# else: assert (False) # Unreachable code
		
		# Calculate the new pixel size
		if options.import_ctf:
			ctf_params = ctf_dict[mic_baseroot]
			pixel_size_origin = ctf_params[idx_cter_apix]
			
			if resample_ratio < 1.0:
				# assert (resample_ratio > 0.0)
				new_pixel_size = pixel_size_origin / resample_ratio
				print("Resample micrograph to pixel size %6.4f and window segments from resampled micrograph." % new_pixel_size)
			else:
				# assert (resample_ratio == 1.0)
				new_pixel_size = pixel_size_origin
		
			# Set ctf along with new pixel size in resampled micrograph
			ctf_params[idx_cter_apix] = new_pixel_size
		else:
			# assert (not options.import_ctf)
			if resample_ratio < 1.0:
				# assert (resample_ratio > 0.0)
				print("Resample micrograph with ratio %6.4f and window segments from resampled micrograph." % resample_ratio)
			# else:
			#	assert (resample_ratio == 1.0)
		
		# Apply filters to micrograph
		fftip(mic_img)
		if options.limit_ctf:
			# assert (options.import_ctf)
			# Cut off frequency components higher than CTF limit 
			q1, q2 = ctflimit(box_size, ctf_params[idx_cter_def], ctf_params[idx_cter_cs], ctf_params[idx_cter_vol], new_pixel_size)
			
			# This is absolute frequency of CTF limit in scale of original micrograph
			if resample_ratio < 1.0:
				# assert (resample_ratio > 0.0)
				q1 = resample_ratio * q1 / float(box_size) # q1 = (pixel_size_origin / new_pixel_size) * q1/float(box_size)
			else:
				# assert (resample_ratio == 1.0) -> pixel_size_origin == new_pixel_size -> pixel_size_origin / new_pixel_size == 1.0
				q1 = q1 / float(box_size)
			
			if q1 < 0.5:
				mic_img = filt_tanl(mic_img, q1, 0.01)
				cutoff_histogram.append(q1)
		
		# Cut off frequency components lower than the box size can express 
		mic_img = fft(filt_gaussh(mic_img, resample_ratio / box_size))
		
		# Resample micrograph, map coordinates, and window segments from resampled micrograph using new coordinates
		# after resampling by resample_ratio, new pixel size will be pixel_size/resample_ratio = new_pixel_size
		# NOTE: 2015/04/13 Toshio Moriya
		# resample() efficiently takes care of the case resample_ratio = 1.0 but
		# it does not set apix_*. Even though it sets apix_* when resample_ratio < 1.0 ...
		mic_img = resample(mic_img, resample_ratio)
		
		if options.invert:
			mic_stats = Util.infomask(mic_img, None, True) # mic_stat[0:mean, 1:SD, 2:min, 3:max]
			Util.mul_scalar(mic_img, -1.0)
			mic_img += 2 * mic_stats[0]
		
		if options.import_ctf:
			from utilities import generate_ctf
			ctf_obj = generate_ctf(ctf_params) # indexes 0 to 7 (idx_cter_def to idx_cter_astig_ang) must be same in cter format & ctf object format.
		
		# Prepare loop variables
		nx = mic_img.get_xsize() 
		ny = mic_img.get_ysize()
		x0 = nx//2
		y0 = ny//2

		n_coords_reject_out_of_boundary = 0
		local_stack_name  = "bdb:%s#" % out_dir + mic_baseroot + '_ptcls'
		local_particle_id = 0 # can be different from coordinates_id
		# Loop over coordinates
		for coords_id in xrange(len(coords_list)):
			
			x = int(coords_list[coords_id][0])
			y = int(coords_list[coords_id][1])
			
			if resample_ratio < 1.0:
				# assert (resample_ratio > 0.0)
				x = int(x * resample_ratio)	
				y = int(y * resample_ratio)
			# else:
			# 	assert(resample_ratio == 1.0)
				
			if( (0 <= x - box_half) and ( x + box_half <= nx ) and (0 <= y - box_half) and ( y + box_half <= ny ) ):
				particle_img = Util.window(mic_img, box_size, box_size, 1, x-x0, y-y0)
			else:
				print("In %s, coordinates ID = %04d (x = %4d, y = %4d, box_size = %4d) is out of micrograph bound, skipping ..." % (mic_baseroot, coords_id, x, y, box_size))
				n_coords_reject_out_of_boundary += 1
				continue
			
			particle_img = ramp(particle_img)
			particle_stats = Util.infomask(particle_img, mask2d, False) # particle_stats[0:mean, 1:SD, 2:min, 3:max]
			particle_img -= particle_stats[0]
			particle_img /= particle_stats[1]
			
			# NOTE: 2015/04/09 Toshio Moriya
			# ptcl_source_image might be redundant information ...
			# Consider re-organizing header entries...
			particle_img.set_attr("ptcl_source_image", mic_name)
			particle_img.set_attr("ptcl_source_coord_id", coords_id)
			particle_img.set_attr("ptcl_source_coord", [int(coords_list[coords_id][0]), int(coords_list[coords_id][1])])
			particle_img.set_attr("resample_ratio", resample_ratio)
			
			# NOTE: 2015/04/13 Toshio Moriya
			# apix_* attributes are updated by resample() only when resample_ratio != 1.0
			# Let's make sure header info is consistent by setting apix_* = 1.0 
			# regardless of options, so it is not passed down the processing line
			particle_img.set_attr("apix_x", 1.0)
			particle_img.set_attr("apix_y", 1.0)
			particle_img.set_attr("apix_z", 1.0)
			if options.import_ctf:
				particle_img.set_attr("ctf",ctf_obj)
				particle_img.set_attr("ctf_applied", 0)
				particle_img.set_attr("pixel_size_origin", pixel_size_origin)
				# particle_img.set_attr("apix_x", new_pixel_size)
				# particle_img.set_attr("apix_y", new_pixel_size)
				# particle_img.set_attr("apix_z", new_pixel_size)
			# NOTE: 2015/04/13 Toshio Moriya 
			# Pawel Comment: Micrograph is not supposed to have CTF header info.
			# So, let's assume it does not exist & ignore its presence.
			# Note that resample() "correctly" updates pixel size of CTF header info if it exists
			# elif (particle_img.has_ctff()):
			# 	assert(not options.import_ctf)
			# 	ctf_origin = particle_img.get_attr("ctf_obj")
			# 	pixel_size_origin = round(ctf_origin.apix, 5) # Because SXCTER ouputs up to 5 digits 
			# 	particle_img.set_attr("apix_x",pixel_size_origin)
			# 	particle_img.set_attr("apix_y",pixel_size_origin)
			# 	particle_img.set_attr("apix_z",pixel_size_origin)	
			
			# print("local_stack_name, local_particle_id", local_stack_name, local_particle_id)
			particle_img.write_image(local_stack_name, local_particle_id)
			local_particle_id += 1
		
		n_global_coords_detect += len(coords_list)
		n_global_coords_process += local_particle_id
		n_global_coords_reject_out_of_boundary += n_coords_reject_out_of_boundary
		
#		# MRK_DEBUG: Toshio Moriya 2016/05/03
#		# Following codes are for debugging bdb. Delete in future
#		result = db_check_dict(local_stack_name)
#		print('# MRK_DEBUG: result = db_check_dict(local_stack_name): %s' % (result))
#		result = db_list_dicts('bdb:%s' % out_dir)
#		print('# MRK_DEBUG: result = db_list_dicts(out_dir): %s' % (result))
#		result = db_get_image_info(local_stack_name)
#		print('# MRK_DEBUG: result = db_get_image_info(local_stack_name)', result)
		
		# Release the data base of local stack from this process
		# so that the subprocess can access to the data base
		db_close_dict(local_stack_name)
		
#		# MRK_DEBUG: Toshio Moriya 2016/05/03
#		# Following codes are for debugging bdb. Delete in future
#		cmd_line = "e2iminfo.py %s" % (local_stack_name)
#		print('# MRK_DEBUG: Executing the command: %s' % (cmd_line))
#		cmdexecute(cmd_line)
		
#		# MRK_DEBUG: Toshio Moriya 2016/05/03
#		# Following codes are for debugging bdb. Delete in future
#		cmd_line = "e2iminfo.py bdb:%s#data" % (out_dir)
#		print('# MRK_DEBUG: Executing the command: %s' % (cmd_line))
#		cmdexecute(cmd_line)
		
	if RUNNING_UNDER_MPI:
		if options.import_ctf:
			if options.limit_ctf:
				cutoff_histogram = wrap_mpi_gatherv(cutoff_histogram, main_node)

	if myid == main_node:
		if options.limit_ctf:
			# Print out the summary of CTF-limit filtering
			print(" ")
			print("Global summary of CTF-limit filtering (--limit_ctf) ...")
			print("Percentage of filtered micrographs: %8.2f\n" % (len(cutoff_histogram) * 100.0 / len(restricted_serial_id_list_not_sliced)))

			n_bins = 10
			if len(cutoff_histogram) >= n_bins:
				from statistics import hist_list
				cutoff_region, cutoff_counts = hist_list(cutoff_histogram, n_bins)
				print("      Histogram of cut-off frequency")
				print("      cut-off       counts")
				for bin_id in xrange(n_bins):
					print(" %14.7f     %7d" % (cutoff_region[bin_id], cutoff_counts[bin_id]))
			else:
				print("The number of filtered micrographs (%d) is less than the number of bins (%d). No histogram is produced." % (len(cutoff_histogram), n_bins))
	
	n_mic_process = mpi_reduce(n_mic_process, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)
	n_mic_reject_no_coords = mpi_reduce(n_mic_reject_no_coords, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)
	n_mic_reject_no_cter_entry = mpi_reduce(n_mic_reject_no_cter_entry, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)
	n_global_coords_detect = mpi_reduce(n_global_coords_detect, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)
	n_global_coords_process = mpi_reduce(n_global_coords_process, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)
	n_global_coords_reject_out_of_boundary = mpi_reduce(n_global_coords_reject_out_of_boundary, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)
	
	# Print out the summary of all micrographs
	if main_node == myid:
		print(" ")
		print("Global summary of micrographs ...")
		print("Detected                        : %6d" % (len(restricted_serial_id_list_not_sliced)))
		print("Processed                       : %6d" % (n_mic_process))
		print("Rejected by no coordinates file : %6d" % (n_mic_reject_no_coords))
		print("Rejected by no CTER entry       : %6d" % (n_mic_reject_no_cter_entry))
		print(" ")
		print("Global summary of coordinates ...")
		print("Detected                        : %6d" % (n_global_coords_detect))
		print("Processed                       : %6d" % (n_global_coords_process))
		print("Rejected by out of boundary     : %6d" % (n_global_coords_reject_out_of_boundary))
		# print(" ")
		# print("DONE!!!")
	
	mpi_barrier(MPI_COMM_WORLD)
	
	if main_node == myid:
	
		import time
		time.sleep(1)
		print("\n Creating bdb:%s/data\n"%original_out_dir)
		for proc_i in range(number_of_processes):
			mic_start, mic_end = MPI_start_end(len(restricted_serial_id_list_not_sliced), number_of_processes, proc_i)
			for serial_id in restricted_serial_id_list_not_sliced[mic_start:mic_end]:
				e2bdb_command = "e2bdb.py "
				mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
				if RUNNING_UNDER_MPI:
					e2bdb_command += "bdb:" + os.path.join(original_out_dir,"%03d/"%proc_i) + mic_baseroot + "_ptcls "
				else:
					e2bdb_command += "bdb:" + os.path.join(original_out_dir, mic_baseroot + "_ptcls ") 
				
				e2bdb_command += " --appendvstack=bdb:%s/data  1>/dev/null"%original_out_dir
				cmdexecute(e2bdb_command, printing_on_success = False)
				
		print("Done!\n")
				
	if RUNNING_UNDER_MPI:
		mpi_barrier(MPI_COMM_WORLD)
		from mpi import mpi_finalize
		mpi_finalize()

	sys.stdout.flush()
	sys.exit(0)
示例#8
0
def shiftali_MPI(stack,
                 maskfile=None,
                 maxit=100,
                 CTF=False,
                 snr=1.0,
                 Fourvar=False,
                 search_rng=-1,
                 oneDx=False,
                 search_rng_y=-1):

    number_of_proc = mpi.mpi_comm_size(mpi.MPI_COMM_WORLD)
    myid = mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD)
    main_node = 0

    ftp = file_type(stack)

    if myid == main_node:
        print_begin_msg("shiftali_MPI")

    max_iter = int(maxit)

    if myid == main_node:
        if ftp == "bdb":
            from EMAN2db import db_open_dict
            dummy = db_open_dict(stack, True)
        nima = EMUtil.get_image_count(stack)
    else:
        nima = 0
    nima = bcast_number_to_all(nima, source_node=main_node)
    list_of_particles = list(range(nima))

    image_start, image_end = MPI_start_end(nima, number_of_proc, myid)
    list_of_particles = list_of_particles[image_start:image_end]

    # read nx and ctf_app (if CTF) and broadcast to all nodes
    if myid == main_node:
        ima = EMData()
        ima.read_image(stack, list_of_particles[0], True)
        nx = ima.get_xsize()
        ny = ima.get_ysize()
        if CTF: ctf_app = ima.get_attr_default('ctf_applied', 2)
        del ima
    else:
        nx = 0
        ny = 0
        if CTF: ctf_app = 0
    nx = bcast_number_to_all(nx, source_node=main_node)
    ny = bcast_number_to_all(ny, source_node=main_node)
    if CTF:
        ctf_app = bcast_number_to_all(ctf_app, source_node=main_node)
        if ctf_app > 0:
            ERROR("data cannot be ctf-applied", myid=myid)

    if maskfile == None:
        mrad = min(nx, ny)
        mask = model_circle(mrad // 2 - 2, nx, ny)
    else:
        mask = get_im(maskfile)

    if CTF:
        from sp_filter import filt_ctf
        from sp_morphology import ctf_img
        ctf_abs_sum = EMData(nx, ny, 1, False)
        ctf_2_sum = EMData(nx, ny, 1, False)
    else:
        ctf_2_sum = None

    from sp_global_def import CACHE_DISABLE
    if CACHE_DISABLE:
        data = EMData.read_images(stack, list_of_particles)
    else:
        for i in range(number_of_proc):
            if myid == i:
                data = EMData.read_images(stack, list_of_particles)
            if ftp == "bdb": mpi.mpi_barrier(mpi.MPI_COMM_WORLD)

    for im in range(len(data)):
        data[im].set_attr('ID', list_of_particles[im])
        st = Util.infomask(data[im], mask, False)
        data[im] -= st[0]
        if CTF:
            ctf_params = data[im].get_attr("ctf")
            ctfimg = ctf_img(nx, ctf_params, ny=ny)
            Util.add_img2(ctf_2_sum, ctfimg)
            Util.add_img_abs(ctf_abs_sum, ctfimg)

    if CTF:
        reduce_EMData_to_root(ctf_2_sum, myid, main_node)
        reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
    else:
        ctf_2_sum = None
    if CTF:
        if myid != main_node:
            del ctf_2_sum
            del ctf_abs_sum
        else:
            temp = EMData(nx, ny, 1, False)
            for i in range(0, nx, 2):
                for j in range(ny):
                    temp.set_value_at(i, j, snr)
            Util.add_img(ctf_2_sum, temp)
            del temp

    total_iter = 0

    # apply initial xform.align2d parameters stored in header
    init_params = []
    for im in range(len(data)):
        t = data[im].get_attr('xform.align2d')
        init_params.append(t)
        p = t.get_params("2d")
        data[im] = rot_shift2D(data[im],
                               p['alpha'],
                               sx=p['tx'],
                               sy=p['ty'],
                               mirror=p['mirror'],
                               scale=p['scale'])

    # fourier transform all images, and apply ctf if CTF
    for im in range(len(data)):
        if CTF:
            ctf_params = data[im].get_attr("ctf")
            data[im] = filt_ctf(fft(data[im]), ctf_params)
        else:
            data[im] = fft(data[im])

    sx_sum = 0
    sy_sum = 0
    sx_sum_total = 0
    sy_sum_total = 0
    shift_x = [0.0] * len(data)
    shift_y = [0.0] * len(data)
    ishift_x = [0.0] * len(data)
    ishift_y = [0.0] * len(data)

    for Iter in range(max_iter):
        if myid == main_node:
            start_time = time()
            print_msg("Iteration #%4d\n" % (total_iter))
        total_iter += 1
        avg = EMData(nx, ny, 1, False)
        for im in data:
            Util.add_img(avg, im)

        reduce_EMData_to_root(avg, myid, main_node)

        if myid == main_node:
            if CTF:
                tavg = Util.divn_filter(avg, ctf_2_sum)
            else:
                tavg = Util.mult_scalar(avg, 1.0 / float(nima))
        else:
            tavg = EMData(nx, ny, 1, False)

        if Fourvar:
            bcast_EMData_to_all(tavg, myid, main_node)
            vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

        if myid == main_node:
            if Fourvar:
                tavg = fft(Util.divn_img(fft(tavg), vav))
                vav_r = Util.pack_complex_to_real(vav)

            # normalize and mask tavg in real space
            tavg = fft(tavg)
            stat = Util.infomask(tavg, mask, False)
            tavg -= stat[0]
            Util.mul_img(tavg, mask)
            # For testing purposes: shift tavg to some random place and see if the centering is still correct
            #tavg = rot_shift3D(tavg,sx=3,sy=-4)
            tavg = fft(tavg)

        if Fourvar: del vav
        bcast_EMData_to_all(tavg, myid, main_node)

        sx_sum = 0
        sy_sum = 0
        if search_rng > 0: nwx = 2 * search_rng + 1
        else: nwx = nx

        if search_rng_y > 0: nwy = 2 * search_rng_y + 1
        else: nwy = ny

        not_zero = 0
        for im in range(len(data)):
            if oneDx:
                ctx = Util.window(ccf(data[im], tavg), nwx, 1)
                p1 = peak_search(ctx)
                p1_x = -int(p1[0][3])
                ishift_x[im] = p1_x
                sx_sum += p1_x
            else:
                p1 = peak_search(Util.window(ccf(data[im], tavg), nwx, nwy))
                p1_x = -int(p1[0][4])
                p1_y = -int(p1[0][5])
                ishift_x[im] = p1_x
                ishift_y[im] = p1_y
                sx_sum += p1_x
                sy_sum += p1_y

            if not_zero == 0:
                if (not (ishift_x[im] == 0.0)) or (not (ishift_y[im] == 0.0)):
                    not_zero = 1

        sx_sum = mpi.mpi_reduce(sx_sum, 1, mpi.MPI_INT, mpi.MPI_SUM, main_node,
                                mpi.MPI_COMM_WORLD)

        if not oneDx:
            sy_sum = mpi.mpi_reduce(sy_sum, 1, mpi.MPI_INT, mpi.MPI_SUM,
                                    main_node, mpi.MPI_COMM_WORLD)

        if myid == main_node:
            sx_sum_total = int(sx_sum[0])
            if not oneDx:
                sy_sum_total = int(sy_sum[0])
        else:
            sx_sum_total = 0
            sy_sum_total = 0

        sx_sum_total = bcast_number_to_all(sx_sum_total, source_node=main_node)

        if not oneDx:
            sy_sum_total = bcast_number_to_all(sy_sum_total,
                                               source_node=main_node)

        sx_ave = round(float(sx_sum_total) / nima)
        sy_ave = round(float(sy_sum_total) / nima)
        for im in range(len(data)):
            p1_x = ishift_x[im] - sx_ave
            p1_y = ishift_y[im] - sy_ave
            params2 = {
                "filter_type": Processor.fourier_filter_types.SHIFT,
                "x_shift": p1_x,
                "y_shift": p1_y,
                "z_shift": 0.0
            }
            data[im] = Processor.EMFourierFilter(data[im], params2)
            shift_x[im] += p1_x
            shift_y[im] += p1_y
        # stop if all shifts are zero
        not_zero = mpi.mpi_reduce(not_zero, 1, mpi.MPI_INT, mpi.MPI_SUM,
                                  main_node, mpi.MPI_COMM_WORLD)
        if myid == main_node:
            not_zero_all = int(not_zero[0])
        else:
            not_zero_all = 0
        not_zero_all = bcast_number_to_all(not_zero_all, source_node=main_node)

        if myid == main_node:
            print_msg("Time of iteration = %12.2f\n" % (time() - start_time))
            start_time = time()

        if not_zero_all == 0: break

    #for im in xrange(len(data)): data[im] = fft(data[im])  This should not be required as only header information is used
    # combine shifts found with the original parameters
    for im in range(len(data)):
        t0 = init_params[im]
        t1 = Transform()
        t1.set_params({
            "type": "2D",
            "alpha": 0,
            "scale": t0.get_scale(),
            "mirror": 0,
            "tx": shift_x[im],
            "ty": shift_y[im]
        })
        # combine t0 and t1
        tt = t1 * t0
        data[im].set_attr("xform.align2d", tt)

    # write out headers and STOP, under MPI writing has to be done sequentially
    mpi.mpi_barrier(mpi.MPI_COMM_WORLD)
    par_str = ["xform.align2d", "ID"]
    if myid == main_node:
        from sp_utilities import file_type
        if (file_type(stack) == "bdb"):
            from sp_utilities import recv_attr_dict_bdb
            recv_attr_dict_bdb(main_node, stack, data, par_str, image_start,
                               image_end, number_of_proc)
        else:
            from sp_utilities import recv_attr_dict
            recv_attr_dict(main_node, stack, data, par_str, image_start,
                           image_end, number_of_proc)

    else:
        send_attr_dict(main_node, data, par_str, image_start, image_end)
    if myid == main_node: print_end_msg("shiftali_MPI")
示例#9
0
parent = mpi.mpi_comm_get_parent()
parentSize = mpi.mpi_comm_size(parent)
print "parentSize", parentSize

tod = stamp()
s = sys.argv[1] + "%2.2d" % myid
print "hello from python worker", myid, " writing to ", s

x = array([5, 3, 4, 2], 'i')
print "starting bcast"
buffer = mpi.mpi_bcast(x, 4, mpi.MPI_INT, 0, parent)
out = open(s, "w")
out.write(str(buffer))
out.write(tod + "\n")
out.close()

print myid, " got ", buffer
junk = mpi.mpi_scatter(x, 1, mpi.MPI_INT, 1, mpi.MPI_INT, 0, parent)
print myid, " got scatter ", junk

back = mpi.mpi_recv(1, mpi.MPI_INT, 0, 1234, parent)
back[0] = back[0] + 1
mpi.mpi_send(back, 1, mpi.MPI_INT, 0, 5678, parent)

dummy = myid
final = mpi.mpi_reduce(dummy, 1, mpi.MPI_INT, mpi.MPI_SUM, 0, parent)

sleep(10)
mpi.mpi_comm_free(parent)
mpi.mpi_finalize()
示例#10
0
mpi_root = 0

#each processor will get count elements from the root
count = 4
# in python we do not need to preallocate the array myray
# we do need to assign a dummy value to the send_ray
send_ray = zeros(0, "i")
if myid == mpi_root:
    size = count * numnodes
    send_ray = zeros(size, "i")
    for i in range(0, size):
        send_ray[i] = i

#send different data to each processor
myray = mpi.mpi_scatter(send_ray, count, mpi.MPI_INT, count, mpi.MPI_INT,
                        mpi_root, mpi.MPI_COMM_WORLD)

#each processor does a local sum
total = 0
for i in range(0, count):
    total = total + myray[i]
print "myid=", myid, "total=", total

#reduce  back to the root and print
back_ray = mpi.mpi_reduce(total, 1, mpi.MPI_INT, mpi.MPI_SUM, mpi_root,
                          mpi.MPI_COMM_WORLD)
if myid == mpi_root:
    print "results from all processors=", back_ray

mpi.mpi_finalize()
示例#11
0
def shiftali_MPI(stack, maskfile=None, maxit=100, CTF=False, snr=1.0, Fourvar=False, search_rng=-1, oneDx=False, search_rng_y=-1):  
	from applications import MPI_start_end
	from utilities    import model_circle, model_blank, get_image, peak_search, get_im
	from utilities    import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all
	from statistics   import varf2d_MPI
	from fundamentals import fft, ccf, rot_shift3D, rot_shift2D
	from utilities    import get_params2D, set_params2D
	from utilities    import print_msg, print_begin_msg, print_end_msg
	import os
	import sys
	from mpi 	  	  import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
	from mpi 	  	  import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv
	from mpi 	  	  import MPI_SUM, MPI_FLOAT, MPI_INT
	from EMAN2	  	  import Processor
	from time         import time	
	
	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0
		
	ftp = file_type(stack)

	if myid == main_node:
		print_begin_msg("shiftali_MPI")

	max_iter=int(maxit)

	if myid == main_node:
		if ftp == "bdb":
			from EMAN2db import db_open_dict
			dummy = db_open_dict(stack, True)
		nima = EMUtil.get_image_count(stack)
	else:
		nima = 0
	nima = bcast_number_to_all(nima, source_node = main_node)
	list_of_particles = range(nima)
	
	image_start, image_end = MPI_start_end(nima, number_of_proc, myid)
	list_of_particles = list_of_particles[image_start: image_end]

	# read nx and ctf_app (if CTF) and broadcast to all nodes
	if myid == main_node:
		ima = EMData()
		ima.read_image(stack, list_of_particles[0], True)
		nx = ima.get_xsize()
		ny = ima.get_ysize()
		if CTF:	ctf_app = ima.get_attr_default('ctf_applied', 2)
		del ima
	else:
		nx = 0
		ny = 0
		if CTF:	ctf_app = 0
	nx = bcast_number_to_all(nx, source_node = main_node)
	ny = bcast_number_to_all(ny, source_node = main_node)
	if CTF:
		ctf_app = bcast_number_to_all(ctf_app, source_node = main_node)
		if ctf_app > 0:	ERROR("data cannot be ctf-applied", "shiftali_MPI", 1, myid)

	if maskfile == None:
		mrad = min(nx, ny)
		mask = model_circle(mrad//2-2, nx, ny)
	else:
		mask = get_im(maskfile)

	if CTF:
		from filter import filt_ctf
		from morphology   import ctf_img
		ctf_abs_sum = EMData(nx, ny, 1, False)
		ctf_2_sum = EMData(nx, ny, 1, False)
	else:
		ctf_2_sum = None

	from global_def import CACHE_DISABLE
	if CACHE_DISABLE:
		data = EMData.read_images(stack, list_of_particles)
	else:
		for i in xrange(number_of_proc):
			if myid == i:
				data = EMData.read_images(stack, list_of_particles)
			if ftp == "bdb": mpi_barrier(MPI_COMM_WORLD)


	for im in xrange(len(data)):
		data[im].set_attr('ID', list_of_particles[im])
		st = Util.infomask(data[im], mask, False)
		data[im] -= st[0]
		if CTF:
			ctf_params = data[im].get_attr("ctf")
			ctfimg = ctf_img(nx, ctf_params, ny=ny)
			Util.add_img2(ctf_2_sum, ctfimg)
			Util.add_img_abs(ctf_abs_sum, ctfimg)

	if CTF:
		reduce_EMData_to_root(ctf_2_sum, myid, main_node)
		reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
	else:  ctf_2_sum = None
	if CTF:
		if myid != main_node:
			del ctf_2_sum
			del ctf_abs_sum
		else:
			temp = EMData(nx, ny, 1, False)
			for i in xrange(0,nx,2):
				for j in xrange(ny):
					temp.set_value_at(i,j,snr)
			Util.add_img(ctf_2_sum, temp)
			del temp

	total_iter = 0

	# apply initial xform.align2d parameters stored in header
	init_params = []
	for im in xrange(len(data)):
		t = data[im].get_attr('xform.align2d')
		init_params.append(t)
		p = t.get_params("2d")
		data[im] = rot_shift2D(data[im], p['alpha'], sx=p['tx'], sy=p['ty'], mirror=p['mirror'], scale=p['scale'])

	# fourier transform all images, and apply ctf if CTF
	for im in xrange(len(data)):
		if CTF:
			ctf_params = data[im].get_attr("ctf")
			data[im] = filt_ctf(fft(data[im]), ctf_params)
		else:
			data[im] = fft(data[im])

	sx_sum=0
	sy_sum=0
	sx_sum_total=0
	sy_sum_total=0
	shift_x = [0.0]*len(data)
	shift_y = [0.0]*len(data)
	ishift_x = [0.0]*len(data)
	ishift_y = [0.0]*len(data)

	for Iter in xrange(max_iter):
		if myid == main_node:
			start_time = time()
			print_msg("Iteration #%4d\n"%(total_iter))
		total_iter += 1
		avg = EMData(nx, ny, 1, False)
		for im in data:  Util.add_img(avg, im)

		reduce_EMData_to_root(avg, myid, main_node)

		if myid == main_node:
			if CTF:
				tavg = Util.divn_filter(avg, ctf_2_sum)
			else:	 tavg = Util.mult_scalar(avg, 1.0/float(nima))
		else:
			tavg = EMData(nx, ny, 1, False)                               

		if Fourvar:
			bcast_EMData_to_all(tavg, myid, main_node)
			vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

		if myid == main_node:
			if Fourvar:
				tavg    = fft(Util.divn_img(fft(tavg), vav))
				vav_r	= Util.pack_complex_to_real(vav)

			# normalize and mask tavg in real space
			tavg = fft(tavg)
			stat = Util.infomask( tavg, mask, False ) 
			tavg -= stat[0]
			Util.mul_img(tavg, mask)
			# For testing purposes: shift tavg to some random place and see if the centering is still correct
			#tavg = rot_shift3D(tavg,sx=3,sy=-4)
			tavg = fft(tavg)

		if Fourvar:  del vav
		bcast_EMData_to_all(tavg, myid, main_node)

		sx_sum=0 
		sy_sum=0 
		if search_rng > 0: nwx = 2*search_rng+1
		else:              nwx = nx
		
		if search_rng_y > 0: nwy = 2*search_rng_y+1
		else:                nwy = ny

		not_zero = 0
		for im in xrange(len(data)):
			if oneDx:
				ctx = Util.window(ccf(data[im],tavg),nwx,1)
				p1  = peak_search(ctx)
				p1_x = -int(p1[0][3])
				ishift_x[im] = p1_x
				sx_sum += p1_x
			else:
				p1 = peak_search(Util.window(ccf(data[im],tavg), nwx,nwy))
				p1_x = -int(p1[0][4])
				p1_y = -int(p1[0][5])
				ishift_x[im] = p1_x
				ishift_y[im] = p1_y
				sx_sum += p1_x
				sy_sum += p1_y

			if not_zero == 0:
				if (not(ishift_x[im] == 0.0)) or (not(ishift_y[im] == 0.0)):
					not_zero = 1

		sx_sum = mpi_reduce(sx_sum, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)  

		if not oneDx:
			sy_sum = mpi_reduce(sy_sum, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

		if myid == main_node:
			sx_sum_total = int(sx_sum[0])
			if not oneDx:
				sy_sum_total = int(sy_sum[0])
		else:
			sx_sum_total = 0	
			sy_sum_total = 0

		sx_sum_total = bcast_number_to_all(sx_sum_total, source_node = main_node)

		if not oneDx:
			sy_sum_total = bcast_number_to_all(sy_sum_total, source_node = main_node)

		sx_ave = round(float(sx_sum_total)/nima)
		sy_ave = round(float(sy_sum_total)/nima)
		for im in xrange(len(data)): 
			p1_x = ishift_x[im] - sx_ave
			p1_y = ishift_y[im] - sy_ave
			params2 = {"filter_type" : Processor.fourier_filter_types.SHIFT, "x_shift" : p1_x, "y_shift" : p1_y, "z_shift" : 0.0}
			data[im] = Processor.EMFourierFilter(data[im], params2)
			shift_x[im] += p1_x
			shift_y[im] += p1_y
		# stop if all shifts are zero
		not_zero = mpi_reduce(not_zero, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)  
		if myid == main_node:
			not_zero_all = int(not_zero[0])
		else:
			not_zero_all = 0
		not_zero_all = bcast_number_to_all(not_zero_all, source_node = main_node)

		if myid == main_node:
			print_msg("Time of iteration = %12.2f\n"%(time()-start_time))
			start_time = time()

		if not_zero_all == 0:  break

	#for im in xrange(len(data)): data[im] = fft(data[im])  This should not be required as only header information is used
	# combine shifts found with the original parameters
	for im in xrange(len(data)):		
		t0 = init_params[im]
		t1 = Transform()
		t1.set_params({"type":"2D","alpha":0,"scale":t0.get_scale(),"mirror":0,"tx":shift_x[im],"ty":shift_y[im]})
		# combine t0 and t1
		tt = t1*t0
		data[im].set_attr("xform.align2d", tt)  

	# write out headers and STOP, under MPI writing has to be done sequentially
	mpi_barrier(MPI_COMM_WORLD)
	par_str = ["xform.align2d", "ID"]
	if myid == main_node:
		from utilities import file_type
		if(file_type(stack) == "bdb"):
			from utilities import recv_attr_dict_bdb
			recv_attr_dict_bdb(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
		else:
			from utilities import recv_attr_dict
			recv_attr_dict(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
		
	else:           send_attr_dict(main_node, data, par_str, image_start, image_end)
	if myid == main_node: print_end_msg("shiftali_MPI")				
示例#12
0
def helicalshiftali_MPI(stack, maskfile=None, maxit=100, CTF=False, snr=1.0, Fourvar=False, search_rng=-1):
	from applications import MPI_start_end
	from utilities    import model_circle, model_blank, get_image, peak_search, get_im, pad
	from utilities    import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all
	from statistics   import varf2d_MPI
	from fundamentals import fft, ccf, rot_shift3D, rot_shift2D, fshift
	from utilities    import get_params2D, set_params2D, chunks_distribution
	from utilities    import print_msg, print_begin_msg, print_end_msg
	import os
	import sys
	from mpi 	  	  import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
	from mpi 	  	  import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv
	from mpi 	  	  import MPI_SUM, MPI_FLOAT, MPI_INT
	from time         import time	
	from pixel_error  import ordersegments
	from math         import sqrt, atan2, tan, pi
	
	nproc = mpi_comm_size(MPI_COMM_WORLD)
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0
		
	ftp = file_type(stack)

	if myid == main_node:
		print_begin_msg("helical-shiftali_MPI")

	max_iter=int(maxit)
	if( myid == main_node):
		infils = EMUtil.get_all_attributes(stack, "filament")
		ptlcoords = EMUtil.get_all_attributes(stack, 'ptcl_source_coord')
		filaments = ordersegments(infils, ptlcoords)
		total_nfils = len(filaments)
		inidl = [0]*total_nfils
		for i in xrange(total_nfils):  inidl[i] = len(filaments[i])
		linidl = sum(inidl)
		nima = linidl
		tfilaments = []
		for i in xrange(total_nfils):  tfilaments += filaments[i]
		del filaments
	else:
		total_nfils = 0
		linidl = 0
	total_nfils = bcast_number_to_all(total_nfils, source_node = main_node)
	if myid != main_node:
		inidl = [-1]*total_nfils
	inidl = bcast_list_to_all(inidl, myid, source_node = main_node)
	linidl = bcast_number_to_all(linidl, source_node = main_node)
	if myid != main_node:
		tfilaments = [-1]*linidl
	tfilaments = bcast_list_to_all(tfilaments, myid, source_node = main_node)
	filaments = []
	iendi = 0
	for i in xrange(total_nfils):
		isti = iendi
		iendi = isti+inidl[i]
		filaments.append(tfilaments[isti:iendi])
	del tfilaments,inidl

	if myid == main_node:
		print_msg( "total number of filaments: %d"%total_nfils)
	if total_nfils< nproc:
		ERROR('number of CPUs (%i) is larger than the number of filaments (%i), please reduce the number of CPUs used'%(nproc, total_nfils), "ehelix_MPI", 1,myid)

	#  balanced load
	temp = chunks_distribution([[len(filaments[i]), i] for i in xrange(len(filaments))], nproc)[myid:myid+1][0]
	filaments = [filaments[temp[i][1]] for i in xrange(len(temp))]
	nfils     = len(filaments)

	#filaments = [[0,1]]
	#print "filaments",filaments
	list_of_particles = []
	indcs = []
	k = 0
	for i in xrange(nfils):
		list_of_particles += filaments[i]
		k1 = k+len(filaments[i])
		indcs.append([k,k1])
		k = k1
	data = EMData.read_images(stack, list_of_particles)
	ldata = len(data)
	print "ldata=", ldata
	nx = data[0].get_xsize()
	ny = data[0].get_ysize()
	if maskfile == None:
		mrad = min(nx, ny)//2-2
		mask = pad( model_blank(2*mrad+1, ny, 1, 1.0), nx, ny, 1, 0.0)
	else:
		mask = get_im(maskfile)

	# apply initial xform.align2d parameters stored in header
	init_params = []
	for im in xrange(ldata):
		t = data[im].get_attr('xform.align2d')
		init_params.append(t)
		p = t.get_params("2d")
		data[im] = rot_shift2D(data[im], p['alpha'], p['tx'], p['ty'], p['mirror'], p['scale'])

	if CTF:
		from filter import filt_ctf
		from morphology   import ctf_img
		ctf_abs_sum = EMData(nx, ny, 1, False)
		ctf_2_sum = EMData(nx, ny, 1, False)
	else:
		ctf_2_sum = None
		ctf_abs_sum = None



	from utilities import info

	for im in xrange(ldata):
		data[im].set_attr('ID', list_of_particles[im])
		st = Util.infomask(data[im], mask, False)
		data[im] -= st[0]
		if CTF:
			ctf_params = data[im].get_attr("ctf")
			qctf = data[im].get_attr("ctf_applied")
			if qctf == 0:
				data[im] = filt_ctf(fft(data[im]), ctf_params)
				data[im].set_attr('ctf_applied', 1)
			elif qctf != 1:
				ERROR('Incorrectly set qctf flag', "helicalshiftali_MPI", 1,myid)
			ctfimg = ctf_img(nx, ctf_params, ny=ny)
			Util.add_img2(ctf_2_sum, ctfimg)
			Util.add_img_abs(ctf_abs_sum, ctfimg)
		else:  data[im] = fft(data[im])

	del list_of_particles		

	if CTF:
		reduce_EMData_to_root(ctf_2_sum, myid, main_node)
		reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
	if CTF:
		if myid != main_node:
			del ctf_2_sum
			del ctf_abs_sum
		else:
			temp = EMData(nx, ny, 1, False)
			tsnr = 1./snr
			for i in xrange(0,nx+2,2):
				for j in xrange(ny):
					temp.set_value_at(i,j,tsnr)
					temp.set_value_at(i+1,j,0.0)
			#info(ctf_2_sum)
			Util.add_img(ctf_2_sum, temp)
			#info(ctf_2_sum)
			del temp

	total_iter = 0
	shift_x = [0.0]*ldata

	for Iter in xrange(max_iter):
		if myid == main_node:
			start_time = time()
			print_msg("Iteration #%4d\n"%(total_iter))
		total_iter += 1
		avg = EMData(nx, ny, 1, False)
		for im in xrange(ldata):
			Util.add_img(avg, fshift(data[im], shift_x[im]))

		reduce_EMData_to_root(avg, myid, main_node)

		if myid == main_node:
			if CTF:  tavg = Util.divn_filter(avg, ctf_2_sum)
			else:    tavg = Util.mult_scalar(avg, 1.0/float(nima))
		else:
			tavg = model_blank(nx,ny)

		if Fourvar:
			bcast_EMData_to_all(tavg, myid, main_node)
			vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

		if myid == main_node:
			if Fourvar:
				tavg    = fft(Util.divn_img(fft(tavg), vav))
				vav_r	= Util.pack_complex_to_real(vav)
			# normalize and mask tavg in real space
			tavg = fft(tavg)
			stat = Util.infomask( tavg, mask, False )
			tavg -= stat[0]
			Util.mul_img(tavg, mask)
			tavg.write_image("tavg.hdf",Iter)
			# For testing purposes: shift tavg to some random place and see if the centering is still correct
			#tavg = rot_shift3D(tavg,sx=3,sy=-4)

		if Fourvar:  del vav
		bcast_EMData_to_all(tavg, myid, main_node)
		tavg = fft(tavg)

		sx_sum = 0.0
		nxc = nx//2
		
		for ifil in xrange(nfils):
			"""
			# Calculate filament average
			avg = EMData(nx, ny, 1, False)
			filnima = 0
			for im in xrange(indcs[ifil][0], indcs[ifil][1]):
				Util.add_img(avg, data[im])
				filnima += 1
			tavg = Util.mult_scalar(avg, 1.0/float(filnima))
			"""
			# Calculate 1D ccf between each segment and filament average
			nsegms = indcs[ifil][1]-indcs[ifil][0]
			ctx = [None]*nsegms
			pcoords = [None]*nsegms
			for im in xrange(indcs[ifil][0], indcs[ifil][1]):
				ctx[im-indcs[ifil][0]] = Util.window(ccf(tavg, data[im]), nx, 1)
				pcoords[im-indcs[ifil][0]] = data[im].get_attr('ptcl_source_coord')
				#ctx[im-indcs[ifil][0]].write_image("ctx.hdf",im-indcs[ifil][0])
				#print "  CTX  ",myid,im,Util.infomask(ctx[im-indcs[ifil][0]], None, True)
			# search for best x-shift
			cents = nsegms//2
			
			dst = sqrt(max((pcoords[cents][0] - pcoords[0][0])**2 + (pcoords[cents][1] - pcoords[0][1])**2, (pcoords[cents][0] - pcoords[-1][0])**2 + (pcoords[cents][1] - pcoords[-1][1])**2))
			maxincline = atan2(ny//2-2-float(search_rng),dst)
			kang = int(dst*tan(maxincline)+0.5)
			#print  "  settings ",nsegms,cents,dst,search_rng,maxincline,kang
			
			# ## C code for alignment. @ming
 			results = [0.0]*3;
 			results = Util.helixshiftali(ctx, pcoords, nsegms, maxincline, kang, search_rng,nxc)
			sib = int(results[0])
 			bang = results[1]
 			qm = results[2]
			#print qm, sib, bang
			
			# qm = -1.e23	
# 				
# 			for six in xrange(-search_rng, search_rng+1,1):
# 				q0 = ctx[cents].get_value_at(six+nxc)
# 				for incline in xrange(kang+1):
# 					qt = q0
# 					qu = q0
# 					if(kang>0):  tang = tan(maxincline/kang*incline)
# 					else:        tang = 0.0
# 					for kim in xrange(cents+1,nsegms):
# 						dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
# 						xl = dst*tang+six+nxc
# 						ixl = int(xl)
# 						dxl = xl - ixl
# 						#print "  A  ", ifil,six,incline,kim,xl,ixl,dxl
# 						qt += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
# 						xl = -dst*tang+six+nxc
# 						ixl = int(xl)
# 						dxl = xl - ixl
# 						qu += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
# 					for kim in xrange(cents):
# 						dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
# 						xl = -dst*tang+six+nxc
# 						ixl = int(xl)
# 						dxl = xl - ixl
# 						qt += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
# 						xl =  dst*tang+six+nxc
# 						ixl = int(xl)
# 						dxl = xl - ixl
# 						qu += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
# 					if( qt > qm ):
# 						qm = qt
# 						sib = six
# 						bang = tang
# 					if( qu > qm ):
# 						qm = qu
# 						sib = six
# 						bang = -tang
					#if incline == 0:  print  "incline = 0  ",six,tang,qt,qu
			#print qm,six,sib,bang
			#print " got results   ",indcs[ifil][0], indcs[ifil][1], ifil,myid,qm,sib,tang,bang,len(ctx),Util.infomask(ctx[0], None, True)
			for im in xrange(indcs[ifil][0], indcs[ifil][1]):
				kim = im-indcs[ifil][0]
				dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
				if(kim < cents):  xl = -dst*bang+sib
				else:             xl =  dst*bang+sib
				shift_x[im] = xl
							
			# Average shift
			sx_sum += shift_x[indcs[ifil][0]+cents]
			
			
		# #print myid,sx_sum,total_nfils
		sx_sum = mpi_reduce(sx_sum, 1, MPI_FLOAT, MPI_SUM, main_node, MPI_COMM_WORLD)
		if myid == main_node:
			sx_sum = float(sx_sum[0])/total_nfils
			print_msg("Average shift  %6.2f\n"%(sx_sum))
		else:
			sx_sum = 0.0
		sx_sum = 0.0
		sx_sum = bcast_number_to_all(sx_sum, source_node = main_node)
		for im in xrange(ldata):
			shift_x[im] -= sx_sum
			#print  "   %3d  %6.3f"%(im,shift_x[im])
		#exit()


			
	# combine shifts found with the original parameters
	for im in xrange(ldata):		
		t1 = Transform()
		##import random
		##shix=random.randint(-10, 10)
		##t1.set_params({"type":"2D","tx":shix})
		t1.set_params({"type":"2D","tx":shift_x[im]})
		# combine t0 and t1
		tt = t1*init_params[im]
		data[im].set_attr("xform.align2d", tt)
	# write out headers and STOP, under MPI writing has to be done sequentially
	mpi_barrier(MPI_COMM_WORLD)
	par_str = ["xform.align2d", "ID"]
	if myid == main_node:
		from utilities import file_type
		if(file_type(stack) == "bdb"):
			from utilities import recv_attr_dict_bdb
			recv_attr_dict_bdb(main_node, stack, data, par_str, 0, ldata, nproc)
		else:
			from utilities import recv_attr_dict
			recv_attr_dict(main_node, stack, data, par_str, 0, ldata, nproc)
	else:           send_attr_dict(main_node, data, par_str, 0, ldata)
	if myid == main_node: print_end_msg("helical-shiftali_MPI")				
示例#13
0
print "head did bcast"

##### scatter ####
scat=array([10,20,30],"i")
junk=mpi.mpi_scatter(scat,1,mpi.MPI_INT,1,mpi.MPI_INT,mpi.MPI_ROOT,newcom1)

##### send/recv ####
for i in range(0,copies):
	k=(i+1)*100
	mpi.mpi_send(k,1,mpi.MPI_INT,i,1234,newcom1)
	back=mpi.mpi_recv(1,mpi.MPI_INT,i,5678,newcom1)
	print "from ",i,back

##### reduce ####
dummy=1000
final=mpi.mpi_reduce(dummy,1,mpi.MPI_INT,mpi.MPI_SUM,mpi.MPI_ROOT,newcom1)


sleep(5)

print "the final answer is=",final

toRun=getcwd()+"/worker"
print mpi.mpi_get_processor_name(),"starting",toRun
newcom2=mpi.mpi_comm_spawn(toRun,"from_C_",copies,mpi.MPI_INFO_NULL,0,mpi.MPI_COMM_WORLD)
errors=mpi.mpi_array_of_errcodes()
print "errors=",errors
newcom2Size=mpi.mpi_comm_size(newcom2)
print "newcom2Size",newcom2Size
sleep(15)
示例#14
0
def mref_ali2d_MPI(stack,
                   refim,
                   outdir,
                   maskfile=None,
                   ir=1,
                   ou=-1,
                   rs=1,
                   xrng=0,
                   yrng=0,
                   step=1,
                   center=1,
                   maxit=10,
                   CTF=False,
                   snr=1.0,
                   user_func_name="ref_ali2d",
                   rand_seed=1000):
    # 2D multi-reference alignment using rotational ccf in polar coordinates and quadratic interpolation

    from sp_utilities import model_circle, combine_params2, inverse_transform2, drop_image, get_image, get_im
    from sp_utilities import reduce_EMData_to_root, bcast_EMData_to_all, bcast_number_to_all
    from sp_utilities import send_attr_dict
    from sp_utilities import center_2D
    from sp_statistics import fsc_mask
    from sp_alignment import Numrinit, ringwe, search_range
    from sp_fundamentals import rot_shift2D, fshift
    from sp_utilities import get_params2D, set_params2D
    from random import seed, randint
    from sp_morphology import ctf_2
    from sp_filter import filt_btwl, filt_params
    from numpy import reshape, shape
    from sp_utilities import print_msg, print_begin_msg, print_end_msg
    import os
    import sys
    import shutil
    from sp_applications import MPI_start_end
    from mpi import mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
    from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_recv, mpi_send
    from mpi import MPI_SUM, MPI_FLOAT, MPI_INT

    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    main_node = 0

    # create the output directory, if it does not exist

    if os.path.exists(outdir):
        ERROR(
            'Output directory exists, please change the name and restart the program',
            "mref_ali2d_MPI ", 1, myid)
    mpi_barrier(MPI_COMM_WORLD)

    import sp_global_def
    if myid == main_node:
        os.mkdir(outdir)
        sp_global_def.LOGFILE = os.path.join(outdir, sp_global_def.LOGFILE)
        print_begin_msg("mref_ali2d_MPI")

    nima = EMUtil.get_image_count(stack)

    image_start, image_end = MPI_start_end(nima, number_of_proc, myid)

    nima = EMUtil.get_image_count(stack)
    ima = EMData()
    ima.read_image(stack, image_start)

    first_ring = int(ir)
    last_ring = int(ou)
    rstep = int(rs)
    max_iter = int(maxit)

    if max_iter == 0:
        max_iter = 10
        auto_stop = True
    else:
        auto_stop = False

    if myid == main_node:
        print_msg("Input stack                 : %s\n" % (stack))
        print_msg("Reference stack             : %s\n" % (refim))
        print_msg("Output directory            : %s\n" % (outdir))
        print_msg("Maskfile                    : %s\n" % (maskfile))
        print_msg("Inner radius                : %i\n" % (first_ring))

    nx = ima.get_xsize()
    # default value for the last ring
    if last_ring == -1: last_ring = nx / 2 - 2

    if myid == main_node:
        print_msg("Outer radius                : %i\n" % (last_ring))
        print_msg("Ring step                   : %i\n" % (rstep))
        print_msg("X search range              : %f\n" % (xrng))
        print_msg("Y search range              : %f\n" % (yrng))
        print_msg("Translational step          : %f\n" % (step))
        print_msg("Center type                 : %i\n" % (center))
        print_msg("Maximum iteration           : %i\n" % (max_iter))
        print_msg("CTF correction              : %s\n" % (CTF))
        print_msg("Signal-to-Noise Ratio       : %f\n" % (snr))
        print_msg("Random seed                 : %i\n\n" % (rand_seed))
        print_msg("User function               : %s\n" % (user_func_name))
    import sp_user_functions
    user_func = sp_user_functions.factory[user_func_name]

    if maskfile:
        import types
        if type(maskfile) is bytes: mask = get_image(maskfile)
        else: mask = maskfile
    else: mask = model_circle(last_ring, nx, nx)
    #  references, do them on all processors...
    refi = []
    numref = EMUtil.get_image_count(refim)

    # IMAGES ARE SQUARES! center is in SPIDER convention
    cnx = nx / 2 + 1
    cny = cnx

    mode = "F"
    #precalculate rings
    numr = Numrinit(first_ring, last_ring, rstep, mode)
    wr = ringwe(numr, mode)

    # prepare reference images on all nodes
    ima.to_zero()
    for j in range(numref):
        #  even, odd, numer of even, number of images.  After frc, totav
        refi.append([get_im(refim, j), ima.copy(), 0])
    #  for each node read its share of data
    data = EMData.read_images(stack, list(range(image_start, image_end)))
    for im in range(image_start, image_end):
        data[im - image_start].set_attr('ID', im)

    if myid == main_node: seed(rand_seed)

    a0 = -1.0
    again = True
    Iter = 0

    ref_data = [mask, center, None, None]

    while Iter < max_iter and again:
        ringref = []
        mashi = cnx - last_ring - 2
        for j in range(numref):
            refi[j][0].process_inplace("normalize.mask", {
                "mask": mask,
                "no_sigma": 1
            })  # normalize reference images to N(0,1)
            cimage = Util.Polar2Dm(refi[j][0], cnx, cny, numr, mode)
            Util.Frngs(cimage, numr)
            Util.Applyws(cimage, numr, wr)
            ringref.append(cimage)
            # zero refi
            refi[j][0].to_zero()
            refi[j][1].to_zero()
            refi[j][2] = 0

        assign = [[] for i in range(numref)]
        # begin MPI section
        for im in range(image_start, image_end):
            alpha, sx, sy, mirror, scale = get_params2D(data[im - image_start])
            #  Why inverse?  07/11/2015 PAP
            alphai, sxi, syi, scalei = inverse_transform2(alpha, sx, sy)
            # normalize
            data[im - image_start].process_inplace("normalize.mask", {
                "mask": mask,
                "no_sigma": 0
            })  # subtract average under the mask
            # If shifts are outside of the permissible range, reset them
            if (abs(sxi) > mashi or abs(syi) > mashi):
                sxi = 0.0
                syi = 0.0
                set_params2D(data[im - image_start], [0.0, 0.0, 0.0, 0, 1.0])
            ny = nx
            txrng = search_range(nx, last_ring, sxi, xrng, "mref_ali2d_MPI")
            txrng = [txrng[1], txrng[0]]
            tyrng = search_range(ny, last_ring, syi, yrng, "mref_ali2d_MPI")
            tyrng = [tyrng[1], tyrng[0]]
            # align current image to the reference
            [angt, sxst, syst, mirrort, xiref,
             peakt] = Util.multiref_polar_ali_2d(data[im - image_start],
                                                 ringref, txrng, tyrng, step,
                                                 mode, numr, cnx + sxi,
                                                 cny + syi)

            iref = int(xiref)
            # combine parameters and set them to the header, ignore previous angle and mirror
            [alphan, sxn, syn,
             mn] = combine_params2(0.0, -sxi, -syi, 0, angt, sxst, syst,
                                   (int)(mirrort))
            set_params2D(data[im - image_start],
                         [alphan, sxn, syn, int(mn), scale])
            data[im - image_start].set_attr('assign', iref)
            # apply current parameters and add to the average
            temp = rot_shift2D(data[im - image_start], alphan, sxn, syn, mn)
            it = im % 2
            Util.add_img(refi[iref][it], temp)
            assign[iref].append(im)
            #assign[im] = iref
            refi[iref][2] += 1.0
        del ringref
        # end MPI section, bring partial things together, calculate new reference images, broadcast them back

        for j in range(numref):
            reduce_EMData_to_root(refi[j][0], myid, main_node)
            reduce_EMData_to_root(refi[j][1], myid, main_node)
            refi[j][2] = mpi_reduce(refi[j][2], 1, MPI_FLOAT, MPI_SUM,
                                    main_node, MPI_COMM_WORLD)
            if (myid == main_node): refi[j][2] = int(refi[j][2][0])
        # gather assignements
        for j in range(numref):
            if myid == main_node:
                for n in range(number_of_proc):
                    if n != main_node:
                        import sp_global_def
                        ln = mpi_recv(1, MPI_INT, n,
                                      sp_global_def.SPARX_MPI_TAG_UNIVERSAL,
                                      MPI_COMM_WORLD)
                        lis = mpi_recv(ln[0], MPI_INT, n,
                                       sp_global_def.SPARX_MPI_TAG_UNIVERSAL,
                                       MPI_COMM_WORLD)
                        for l in range(ln[0]):
                            assign[j].append(int(lis[l]))
            else:
                import sp_global_def
                mpi_send(len(assign[j]), 1, MPI_INT, main_node,
                         sp_global_def.SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                mpi_send(assign[j], len(assign[j]), MPI_INT, main_node,
                         sp_global_def.SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)

        if myid == main_node:
            # replace the name of the stack with reference with the current one
            refim = os.path.join(outdir, "aqm%03d.hdf" % Iter)
            a1 = 0.0
            ave_fsc = []
            for j in range(numref):
                if refi[j][2] < 4:
                    #ERROR("One of the references vanished","mref_ali2d_MPI",1)
                    #  if vanished, put a random image (only from main node!) there
                    assign[j] = []
                    assign[j].append(
                        randint(image_start, image_end - 1) - image_start)
                    refi[j][0] = data[assign[j][0]].copy()
                    #print 'ERROR', j
                else:
                    #frsc = fsc_mask(refi[j][0], refi[j][1], mask, 1.0, os.path.join(outdir,"drm%03d%04d"%(Iter, j)))
                    from sp_statistics import fsc
                    frsc = fsc(
                        refi[j][0], refi[j][1], 1.0,
                        os.path.join(outdir, "drm%03d%04d.txt" % (Iter, j)))
                    Util.add_img(refi[j][0], refi[j][1])
                    Util.mul_scalar(refi[j][0], 1.0 / float(refi[j][2]))

                    if ave_fsc == []:
                        for i in range(len(frsc[1])):
                            ave_fsc.append(frsc[1][i])
                        c_fsc = 1
                    else:
                        for i in range(len(frsc[1])):
                            ave_fsc[i] += frsc[1][i]
                        c_fsc += 1
                    #print 'OK', j, len(frsc[1]), frsc[1][0:5], ave_fsc[0:5]

            #print 'sum', sum(ave_fsc)
            if sum(ave_fsc) != 0:
                for i in range(len(ave_fsc)):
                    ave_fsc[i] /= float(c_fsc)
                    frsc[1][i] = ave_fsc[i]

            for j in range(numref):
                ref_data[2] = refi[j][0]
                ref_data[3] = frsc
                refi[j][0], cs = user_func(ref_data)

                # write the current average
                TMP = []
                for i_tmp in range(len(assign[j])):
                    TMP.append(float(assign[j][i_tmp]))
                TMP.sort()
                refi[j][0].set_attr_dict({'ave_n': refi[j][2], 'members': TMP})
                del TMP
                refi[j][0].process_inplace("normalize.mask", {
                    "mask": mask,
                    "no_sigma": 1
                })
                refi[j][0].write_image(refim, j)

            Iter += 1
            msg = "ITERATION #%3d        %d\n\n" % (Iter, again)
            print_msg(msg)
            for j in range(numref):
                msg = "   group #%3d   number of particles = %7d\n" % (
                    j, refi[j][2])
                print_msg(msg)
        Iter = bcast_number_to_all(Iter, main_node)  # need to tell all
        if again:
            for j in range(numref):
                bcast_EMData_to_all(refi[j][0], myid, main_node)

    #  clean up
    del assign
    # write out headers  and STOP, under MPI writing has to be done sequentially (time-consumming)
    mpi_barrier(MPI_COMM_WORLD)
    if CTF and data_had_ctf == 0:
        for im in range(len(data)):
            data[im].set_attr('ctf_applied', 0)
    par_str = ['xform.align2d', 'assign', 'ID']
    if myid == main_node:
        from sp_utilities import file_type
        if (file_type(stack) == "bdb"):
            from sp_utilities import recv_attr_dict_bdb
            recv_attr_dict_bdb(main_node, stack, data, par_str, image_start,
                               image_end, number_of_proc)
        else:
            from sp_utilities import recv_attr_dict
            recv_attr_dict(main_node, stack, data, par_str, image_start,
                           image_end, number_of_proc)
    else:
        send_attr_dict(main_node, data, par_str, image_start, image_end)
    if myid == main_node:
        print_end_msg("mref_ali2d_MPI")
示例#15
0
def main():

	def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
		if mirror:
			m = 1
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)
		else:
			m = 0
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)
		return  alpha, sx, sy, m
	
	progname = os.path.basename(sys.argv[0])
	usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=0.2 --aa=0.1  --sym=symmetry --CTF"
	parser = OptionParser(usage, version=SPARXVERSION)

	parser.add_option("--ave2D",		type="string"	   ,	default=False,				help="write to the disk a stack of 2D averages")
	parser.add_option("--var2D",		type="string"	   ,	default=False,				help="write to the disk a stack of 2D variances")
	parser.add_option("--ave3D",		type="string"	   ,	default=False,				help="write to the disk reconstructed 3D average")
	parser.add_option("--var3D",		type="string"	   ,	default=False,				help="compute 3D variability (time consuming!)")
	parser.add_option("--img_per_grp",	type="int"         ,	default=10   ,				help="number of neighbouring projections")
	parser.add_option("--no_norm",		action="store_true",	default=False,				help="do not use normalization")
	parser.add_option("--radiusvar", 	type="int"         ,	default=-1   ,				help="radius for 3D var" )
	parser.add_option("--npad",			type="int"         ,	default=2    ,				help="number of time to pad the original images")
	parser.add_option("--sym" , 		type="string"      ,	default="c1" ,				help="symmetry")
	parser.add_option("--fl",			type="float"       ,	default=0.0  ,				help="stop-band frequency (Default - no filtration)")
	parser.add_option("--aa",			type="float"       ,	default=0.0  ,				help="fall off of the filter (Default - no filtration)")
	parser.add_option("--CTF",			action="store_true",	default=False,				help="use CFT correction")
	parser.add_option("--VERBOSE",		action="store_true",	default=False,				help="Long output for debugging")
	#parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
	#parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
	#parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
	#parser.add_option("--abs", 			type="float"       ,	default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
	#parser.add_option("--squ", 			type="float"       ,	default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
	parser.add_option("--VAR" , 		action="store_true",	default=False,				help="stack on input consists of 2D variances (Default False)")
	parser.add_option("--decimate",     type="float",           default=1.0,                 help="image decimate rate, a number large than 1. default is 1")
	parser.add_option("--window",       type="int",             default=0,                   help="reduce images to a small image size without changing pixel_size. Default value is zero.")
	#parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
	parser.add_option("--nvec",			type="int"         ,	default=0    ,				help="number of eigenvectors, default = 0 meaning no PCA calculated")
	parser.add_option("--symmetrize",	action="store_true",	default=False,				help="Prepare input stack for handling symmetry (Default False)")
	
	(options,args) = parser.parse_args()
	#####
	from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD, MPI_TAG_UB
	from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
	from applications import MPI_start_end
	from reconstruction import recons3d_em, recons3d_em_MPI
	from reconstruction	import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
	from utilities import print_begin_msg, print_end_msg, print_msg
	from utilities import read_text_row, get_image, get_im
	from utilities import bcast_EMData_to_all, bcast_number_to_all
	from utilities import get_symt

	#  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

	from EMAN2db import db_open_dict
	
	if options.symmetrize :
		try:
			sys.argv = mpi_init(len(sys.argv), sys.argv)
			try:	
				number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
				if( number_of_proc > 1 ):
					ERROR("Cannot use more than one CPU for symmetry prepration","sx3dvariability",1)
			except:
				pass
		except:
			pass

		#  Input
		#instack = "Clean_NORM_CTF_start_wparams.hdf"
		#instack = "bdb:data"
		instack = args[0]
		sym = options.sym
		if( sym == "c1" ):
			ERROR("Thre is no need to symmetrize stack for C1 symmetry","sx3dvariability",1)

		if(instack[:4] !="bdb:"):
			stack = "bdb:data"
			delete_bdb(stack)
			cmdexecute("sxcpy.py  "+instack+"  "+stack)
		else:
			stack = instack

		qt = EMUtil.get_all_attributes(stack,'xform.projection')

		na = len(qt)
		ts = get_symt(sym)
		ks = len(ts)
		angsa = [None]*na
		for k in xrange(ks):
			delete_bdb("bdb:Q%1d"%k)
			cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			DB = db_open_dict("bdb:Q%1d"%k)
			for i in xrange(na):
				ut = qt[i]*ts[k]
				DB.set_attr(i, "xform.projection", ut)
				#bt = ut.get_params("spider")
				#angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
			#write_text_row(angsa, 'ptsma%1d.txt'%k)
			#cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			#cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
			DB.close()
		delete_bdb("bdb:sdata")
		cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
		#cmdexecute("ls  EMAN2DB/sdata*")
		a = get_im("bdb:sdata")
		a.set_attr("variabilitysymmetry",sym)
		a.write_image("bdb:sdata")


	else:

		sys.argv = mpi_init(len(sys.argv), sys.argv)
		myid     = mpi_comm_rank(MPI_COMM_WORLD)
		number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
		main_node = 0

		if len(args) == 1:
			stack = args[0]
		else:
			print( "usage: " + usage)
			print( "Please run '" + progname + " -h' for detailed options")
			return 1

		t0 = time()
	
		# obsolete flags
		options.MPI = True
		options.nvec = 0
		options.radiuspca = -1
		options.iter = 40
		options.abs = 0.0
		options.squ = 0.0

		if options.fl > 0.0 and options.aa == 0.0:
			ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid)
		if options.VAR and options.SND:
			ERROR("Only one of var and SND can be set!", "sx3dvariability", myid)
			exit()
		if options.VAR and (options.ave2D or options.ave3D or options.var2D): 
			ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid)
			exit()
		#if options.SND and (options.ave2D or options.ave3D):
		#	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
		#	exit()
		if options.nvec > 0 :
			ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
			exit()
		if options.nvec > 0 and options.ave3D == None:
			ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", myid=myid)
			exit()
		import string
		options.sym = options.sym.lower()
		 
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		global_def.BATCH = True

		if myid == main_node:
			print_begin_msg("sx3dvariability")
			print_msg("%-70s:  %s\n"%("Input stack", stack))
	
		img_per_grp = options.img_per_grp
		nvec = options.nvec
		radiuspca = options.radiuspca

		symbaselen = 0
		if myid == main_node:
			nima = EMUtil.get_image_count(stack)
			img  = get_image(stack)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			if options.sym != "c1" :
				imgdata = get_im(stack)
				try:
					i = imgdata.get_attr("variabilitysymmetry")
					if(i != options.sym):
						ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", myid=myid)
				except:
					ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", myid=myid)
				from utilities import get_symt
				i = len(get_symt(options.sym))
				if((nima/i)*i != nima):
					ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", myid=myid)
				symbaselen = nima/i
			else:  symbaselen = nima
		else:
			nima = 0
			nx = 0
			ny = 0
		nima = bcast_number_to_all(nima)
		nx   = bcast_number_to_all(nx)
		ny   = bcast_number_to_all(ny)
		Tracker ={}
		Tracker["nx"]  =nx
		Tracker["ny"]  =ny
		Tracker["total_stack"]=nima
		if options.decimate==1.:
			if options.window !=0:
				nx = options.window
				ny = options.window
		else:
			if options.window ==0:
				nx = int(nx/options.decimate)
				ny = int(ny/options.decimate)
			else:
				nx = int(options.window/options.decimate)
				ny = nx
		symbaselen = bcast_number_to_all(symbaselen)
		if radiuspca == -1: radiuspca = nx/2-2

		if myid == main_node:
			print_msg("%-70s:  %d\n"%("Number of projection", nima))
		
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		"""
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
		if options.VAR:
			#varList = EMData.read_images(stack, range(img_begin, img_end))
			varList = []
			this_image = EMData()
			for index_of_particle in xrange(img_begin,img_end):
				this_image.read_image(stack,index_of_particle)
				varList.append(image_decimate_window_xform_ctf(img,options.decimate,options.window,options.CTF))
		else:
			from utilities		import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
			from utilities		import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
			from utilities		import model_blank, nearest_proj, model_circle
			from applications	import pca
			from statistics		import avgvar, avgvar_ctf, ccc
			from filter		    import filt_tanl
			from morphology		import threshold, square_root
			from projection 	import project, prep_vol, prgs
			from sets		    import Set

			if myid == main_node:
				t1 = time()
				proj_angles = []
				aveList = []
				tab = EMUtil.get_all_attributes(stack, 'xform.projection')
				for i in xrange(nima):
					t     = tab[i].get_params('spider')
					phi   = t['phi']
					theta = t['theta']
					psi   = t['psi']
					x     = theta
					if x > 90.0: x = 180.0 - x
					x = x*10000+psi
					proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
				t2 = time()
				print_msg("%-70s:  %d\n"%("Number of neighboring projections", img_per_grp))
				print_msg("...... Finding neighboring projections\n")
				if options.VERBOSE:
					print "Number of images per group: ", img_per_grp
					print "Now grouping projections"
				proj_angles.sort()

			proj_angles_list = [0.0]*(nima*4)
			if myid == main_node:
				for i in xrange(nima):
					proj_angles_list[i*4]   = proj_angles[i][1]
					proj_angles_list[i*4+1] = proj_angles[i][2]
					proj_angles_list[i*4+2] = proj_angles[i][3]
					proj_angles_list[i*4+3] = proj_angles[i][4]
			proj_angles_list = bcast_list_to_all(proj_angles_list, myid, main_node)
			proj_angles = []
			for i in xrange(nima):
				proj_angles.append([proj_angles_list[i*4], proj_angles_list[i*4+1], proj_angles_list[i*4+2], int(proj_angles_list[i*4+3])])
			del proj_angles_list

			proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end))

			all_proj = Set()
			for im in proj_list:
				for jm in im:
					all_proj.add(proj_angles[jm][3])

			all_proj = list(all_proj)
			if options.VERBOSE:
				print "On node %2d, number of images needed to be read = %5d"%(myid, len(all_proj))

			index = {}
			for i in xrange(len(all_proj)): index[all_proj[i]] = i
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2))
				print_msg("%-70s:  %d\n"%("Number of groups processed on the main node", len(proj_list)))
				if options.VERBOSE:
					print "Grouping projections took: ", (time()-t2)/60	, "[min]"
					print "Number of groups on main node: ", len(proj_list)
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				print_msg("...... calculating the stack of 2D variances \n")
				if options.VERBOSE:
					print "Now calculating the stack of 2D variances"

			proj_params = [0.0]*(nima*5)
			aveList = []
			varList = []				
			if nvec > 0:
				eigList = [[] for i in xrange(nvec)]

			if options.VERBOSE: 	print "Begin to read images on processor %d"%(myid)
			ttt = time()
			#imgdata = EMData.read_images(stack, all_proj)
			img     = EMData()
			imgdata = []
			for index_of_proj in xrange(len(all_proj)):
				img.read_image(stack, all_proj[index_of_proj])
				dmg = image_decimate_window_xform_ctf(img,options.decimate,options.window,options.CTF)
				#print dmg.get_xsize(), "init"
				imgdata.append(dmg)
			if options.VERBOSE:
				print "Reading images on processor %d done, time = %.2f"%(myid, time()-ttt)
				print "On processor %d, we got %d images"%(myid, len(imgdata))
			mpi_barrier(MPI_COMM_WORLD)

			'''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
			from applications import prepare_2d_forPCA
			from utilities import model_blank
			for i in xrange(len(proj_list)):
				ki = proj_angles[proj_list[i][0]][3]
				if ki >= symbaselen:  continue
				mi = index[ki]
				phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi])

				grp_imgdata = []
				for j in xrange(img_per_grp):
					mj = index[proj_angles[proj_list[i][j]][3]]
					phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj])
					alpha, sx, sy, mirror = params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror_list[i][j])
					if thetaM <= 90:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM-phi, 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM-phi), 0.0, 0.0, 1.0)
					else:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM-phi), 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM-phi)), 0.0, 0.0, 1.0)
					set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0])
					grp_imgdata.append(imgdata[mj])
					#print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize()

				if not options.no_norm:
					#print grp_imgdata[j].get_xsize()
					mask = model_circle(nx/2-2, nx, nx)
					for k in xrange(img_per_grp):
						ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False)
						grp_imgdata[k] -= ave
						grp_imgdata[k] /= std
					del mask

				if options.fl > 0.0:
					from filter import filt_ctf, filt_table
					from fundamentals import fft, window2d
					nx2 = 2*nx
					ny2 = 2*ny
					if options.CTF:
						from utilities import pad
						for k in xrange(img_per_grp):
							grp_imgdata[k] = window2d(fft( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa) ),nx,ny)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
					else:
						for k in xrange(img_per_grp):
							grp_imgdata[k] = filt_tanl( grp_imgdata[k], options.fl, options.aa)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
				else:
					from utilities import pad, read_text_file
					from filter import filt_ctf, filt_table
					from fundamentals import fft, window2d
					nx2 = 2*nx
					ny2 = 2*ny
					if options.CTF:
						from utilities import pad
						for k in xrange(img_per_grp):
							grp_imgdata[k] = window2d( fft( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1) ) , nx,ny)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)

				'''
				if i < 10 and myid == main_node:
					for k in xrange(10):
						grp_imgdata[k].write_image("grp%03d.hdf"%i, k)
				'''
				"""
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("pp.hdf", pp)
				"""
				ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata)
				"""
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("qq.hdf", pp)
				"""

				var = model_blank(nx,ny)
				for q in grp_imgdata:  Util.add_img2( var, q )
				Util.mul_scalar( var, 1.0/(len(grp_imgdata)-1))
				# Switch to std dev
				var = square_root(threshold(var))
				#if options.CTF:	ave, var = avgvar_ctf(grp_imgdata, mode="a")
				#else:	            ave, var = avgvar(grp_imgdata, mode="a")
				"""
				if myid == main_node:
					ave.write_image("avgv.hdf",i)
					var.write_image("varv.hdf",i)
				"""
			
				set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
				set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

				aveList.append(ave)
				varList.append(var)

				if options.VERBOSE:
					print "%5.2f%% done on processor %d"%(i*100.0/len(proj_list), myid)
				if nvec > 0:
					eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True)
					for k in xrange(nvec):
						set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
						eigList[k].append(eig[k])
					"""
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""

			del imgdata
			#  To this point, all averages, variances, and eigenvectors are computed

			if options.ave2D:
				from fundamentals import fpol
				if myid == main_node:
					km = 0
					for i in xrange(number_of_proc):
						if i == main_node :
							for im in xrange(len(aveList)):
								aveList[im].write_image(options.ave2D, km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in xrange(nl):
								ave = recv_EMData(i, im+i+70000)
								"""
								nm = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
								tmpvol=fpol(ave, Tracker["nx"],Tracker["nx"],Tracker["nx"])								
								tmpvol.write_image(options.ave2D, km)
								km += 1
				else:
					mpi_send(len(aveList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
					for im in xrange(len(aveList)):
						send_EMData(aveList[im], main_node,im+myid+70000)
						"""
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						"""

			if options.ave3D:
				from fundamentals import fpol
				if options.VERBOSE:
					print "Reconstructing 3D average volume"
				ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad)
				bcast_EMData_to_all(ave3D, myid)
				if myid == main_node:
					ave3D=fpol(ave3D,Tracker["nx"],Tracker["nx"],Tracker["nx"])
					ave3D.write_image(options.ave3D)
					print_msg("%-70s:  %s\n"%("Writing to the disk volume reconstructed from averages as", options.ave3D))
			del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList

			if nvec > 0:
				for k in xrange(nvec):
					if options.VERBOSE:
						print "Reconstruction eigenvolumes", k
					cont = True
					ITER = 0
					mask2d = model_circle(radiuspca, nx, nx)
					while cont:
						#print "On node %d, iteration %d"%(myid, ITER)
						eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad)
						bcast_EMData_to_all(eig3D, myid, main_node)
						if options.fl > 0.0:
							eig3D = filt_tanl(eig3D, options.fl, options.aa)
						if myid == main_node:
							eig3D.write_image("eig3d_%03d.hdf"%k, ITER)
						Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) )
						eig3Df, kb = prep_vol(eig3D)
						del eig3D
						cont = False
						icont = 0
						for l in xrange(len(eigList[k])):
							phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l])
							proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y])
							cl = ccc(proj, eigList[k][l], mask2d)
							if cl < 0.0:
								icont += 1
								cont = True
								eigList[k][l] *= -1.0
						u = int(cont)
						u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD)
						icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

						if myid == main_node:
							u = int(u[0])
							print " Eigenvector: ",k," number changed ",int(icont[0])
						else: u = 0
						u = bcast_number_to_all(u, main_node)
						cont = bool(u)
						ITER += 1

					del eig3Df, kb
					mpi_barrier(MPI_COMM_WORLD)
				del eigList, mask2d

			if options.ave3D: del ave3D
			if options.var2D:
				from fundamentals import fpol 
				if myid == main_node:
					km = 0
					for i in xrange(number_of_proc):
						if i == main_node :
							for im in xrange(len(varList)):
								tmpvol=fpol(varList[im], Tracker["nx"], Tracker["nx"],1)
								tmpvol.write_image(options.var2D, km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in xrange(nl):
								ave = recv_EMData(i, im+i+70000)
								tmpvol=fpol(ave, Tracker["nx"], Tracker["nx"],1)
								tmpvol.write_image(options.var2D, km)
								km += 1
				else:
					mpi_send(len(varList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
					for im in xrange(len(varList)):
						send_EMData(varList[im], main_node, im+myid+70000)#  What with the attributes??

			mpi_barrier(MPI_COMM_WORLD)

		if  options.var3D:
			if myid == main_node and options.VERBOSE:
				print "Reconstructing 3D variability volume"

			t6 = time()
			radiusvar = options.radiusvar
			if( radiusvar < 0 ):  radiusvar = nx//2 -3
			res = recons3d_4nn_MPI(myid, varList, symmetry=options.sym, npad=options.npad)
			#res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
			if myid == main_node:
				from fundamentals import fpol
				res =fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"])
				res.write_image(options.var3D)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Reconstructing 3D variability took [s]", time()-t6))
				if options.VERBOSE:
					print "Reconstruction took: %.2f [min]"%((time()-t6)/60)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Total time for these computations [s]", time()-t0))
				if options.VERBOSE:
					print "Total time for these computations: %.2f [min]"%((time()-t0)/60)
				print_end_msg("sx3dvariability")

		global_def.BATCH = False

		from mpi import mpi_finalize
		mpi_finalize()
示例#16
0
def main():
    def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
        if mirror:
            m = 1
            alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0,
                                                       540.0 - psi, 0, 0, 1.0)
        else:
            m = 0
            alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0,
                                                       360.0 - psi, 0, 0, 1.0)
        return alpha, sx, sy, m

    progname = os.path.basename(sys.argv[0])
    usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=15. --aa=0.01  --sym=symmetry --CTF"
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--output_dir",
                      type="string",
                      default="./",
                      help="output directory")
    parser.add_option("--ave2D",
                      type="string",
                      default=False,
                      help="write to the disk a stack of 2D averages")
    parser.add_option("--var2D",
                      type="string",
                      default=False,
                      help="write to the disk a stack of 2D variances")
    parser.add_option("--ave3D",
                      type="string",
                      default=False,
                      help="write to the disk reconstructed 3D average")
    parser.add_option("--var3D",
                      type="string",
                      default=False,
                      help="compute 3D variability (time consuming!)")
    parser.add_option("--img_per_grp",
                      type="int",
                      default=10,
                      help="number of neighbouring projections")
    parser.add_option("--no_norm",
                      action="store_true",
                      default=False,
                      help="do not use normalization")
    #parser.add_option("--radius", 	    type="int"         ,	default=-1   ,				help="radius for 3D variability" )
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="number of time to pad the original images")
    parser.add_option("--sym", type="string", default="c1", help="symmetry")
    parser.add_option(
        "--fl",
        type="float",
        default=0.0,
        help=
        "cutoff freqency in absolute frequency (0.0-0.5). (Default - no filtration)"
    )
    parser.add_option(
        "--aa",
        type="float",
        default=0.0,
        help=
        "fall off of the filter. Put 0.01 if user has no clue about falloff (Default - no filtration)"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="use CFT correction")
    parser.add_option("--VERBOSE",
                      action="store_true",
                      default=False,
                      help="Long output for debugging")
    #parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
    #parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
    #parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
    #parser.add_option("--abs", 		type="float"   ,        default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
    #parser.add_option("--squ", 		type="float"   ,	    default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
    parser.add_option(
        "--VAR",
        action="store_true",
        default=False,
        help="stack on input consists of 2D variances (Default False)")
    parser.add_option(
        "--decimate",
        type="float",
        default=1.0,
        help=
        "image decimate rate, a number larger (expand image) or less (shrink image) than 1. default is 1"
    )
    parser.add_option(
        "--window",
        type="int",
        default=0,
        help=
        "reduce images to a small image size without changing pixel_size. Default value is zero."
    )
    #parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
    parser.add_option(
        "--nvec",
        type="int",
        default=0,
        help="number of eigenvectors, default = 0 meaning no PCA calculated")
    parser.add_option(
        "--symmetrize",
        action="store_true",
        default=False,
        help="Prepare input stack for handling symmetry (Default False)")

    (options, args) = parser.parse_args()
    #####
    from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD
    from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
    from applications import MPI_start_end
    from reconstruction import recons3d_em, recons3d_em_MPI
    from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
    from utilities import print_begin_msg, print_end_msg, print_msg
    from utilities import read_text_row, get_image, get_im
    from utilities import bcast_EMData_to_all, bcast_number_to_all
    from utilities import get_symt

    #  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

    from EMAN2db import db_open_dict

    # Set up global variables related to bdb cache
    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    # Set up global variables related to ERROR function
    global_def.BATCH = True

    # detect if program is running under MPI
    RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ
    if RUNNING_UNDER_MPI:
        global_def.MPI = True

    if options.symmetrize:
        if RUNNING_UNDER_MPI:
            try:
                sys.argv = mpi_init(len(sys.argv), sys.argv)
                try:
                    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
                    if (number_of_proc > 1):
                        ERROR(
                            "Cannot use more than one CPU for symmetry prepration",
                            "sx3dvariability", 1)
                except:
                    pass
            except:
                pass
        if options.output_dir != "./" and not os.path.exists(
                options.output_dir):
            os.mkdir(options.output_dir)
        #  Input
        #instack = "Clean_NORM_CTF_start_wparams.hdf"
        #instack = "bdb:data"

        from logger import Logger, BaseLogger_Files
        if os.path.exists(os.path.join(options.output_dir, "log.txt")):
            os.remove(os.path.join(options.output_dir, "log.txt"))
        log_main = Logger(BaseLogger_Files())
        log_main.prefix = os.path.join(options.output_dir, "./")

        instack = args[0]
        sym = options.sym.lower()
        if (sym == "c1"):
            ERROR("There is no need to symmetrize stack for C1 symmetry",
                  "sx3dvariability", 1)

        line = ""
        for a in sys.argv:
            line += " " + a
        log_main.add(line)

        if (instack[:4] != "bdb:"):
            if output_dir == "./": stack = "bdb:data"
            else: stack = "bdb:" + options.output_dir + "/data"
            delete_bdb(stack)
            junk = cmdexecute("sxcpy.py  " + instack + "  " + stack)
        else:
            stack = instack

        qt = EMUtil.get_all_attributes(stack, 'xform.projection')

        na = len(qt)
        ts = get_symt(sym)
        ks = len(ts)
        angsa = [None] * na

        for k in xrange(ks):
            #Qfile = "Q%1d"%k
            if options.output_dir != "./":
                Qfile = os.path.join(options.output_dir, "Q%1d" % k)
            else:
                Qfile = os.path.join(options.output_dir, "Q%1d" % k)
            #delete_bdb("bdb:Q%1d"%k)
            delete_bdb("bdb:" + Qfile)
            #junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
            junk = cmdexecute("e2bdb.py  " + stack + "  --makevstack=bdb:" +
                              Qfile)
            #DB = db_open_dict("bdb:Q%1d"%k)
            DB = db_open_dict("bdb:" + Qfile)
            for i in xrange(na):
                ut = qt[i] * ts[k]
                DB.set_attr(i, "xform.projection", ut)
                #bt = ut.get_params("spider")
                #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
            #write_text_row(angsa, 'ptsma%1d.txt'%k)
            #junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
            #junk = cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
            DB.close()
        if options.output_dir == "./": delete_bdb("bdb:sdata")
        else: delete_bdb("bdb:" + options.output_dir + "/" + "sdata")
        #junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
        sdata = "bdb:" + options.output_dir + "/" + "sdata"
        print(sdata)
        junk = cmdexecute("e2bdb.py   " + options.output_dir +
                          "  --makevstack=" + sdata + " --filt=Q")
        #junk = cmdexecute("ls  EMAN2DB/sdata*")
        #a = get_im("bdb:sdata")
        a = get_im(sdata)
        a.set_attr("variabilitysymmetry", sym)
        #a.write_image("bdb:sdata")
        a.write_image(sdata)

    else:

        sys.argv = mpi_init(len(sys.argv), sys.argv)
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
        main_node = 0

        if len(args) == 1:
            stack = args[0]
        else:
            print(("usage: " + usage))
            print(("Please run '" + progname + " -h' for detailed options"))
            return 1

        t0 = time()
        # obsolete flags
        options.MPI = True
        options.nvec = 0
        options.radiuspca = -1
        options.iter = 40
        options.abs = 0.0
        options.squ = 0.0

        if options.fl > 0.0 and options.aa == 0.0:
            ERROR("Fall off has to be given for the low-pass filter",
                  "sx3dvariability", 1, myid)
        if options.VAR and options.SND:
            ERROR("Only one of var and SND can be set!", "sx3dvariability",
                  myid)
            exit()
        if options.VAR and (options.ave2D or options.ave3D or options.var2D):
            ERROR(
                "When VAR is set, the program cannot output ave2D, ave3D or var2D",
                "sx3dvariability", 1, myid)
            exit()
        #if options.SND and (options.ave2D or options.ave3D):
        #	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
        #	exit()
        if options.nvec > 0:
            ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
            exit()
        if options.nvec > 0 and options.ave3D == None:
            ERROR("When doing PCA analysis, one must set ave3D",
                  "sx3dvariability",
                  myid=myid)
            exit()
        import string
        options.sym = options.sym.lower()

        # if global_def.CACHE_DISABLE:
        # 	from utilities import disable_bdb_cache
        # 	disable_bdb_cache()
        # global_def.BATCH = True

        if myid == main_node:
            if options.output_dir != "./" and not os.path.exists(
                    options.output_dir):
                os.mkdir(options.output_dir)

        img_per_grp = options.img_per_grp
        nvec = options.nvec
        radiuspca = options.radiuspca

        from logger import Logger, BaseLogger_Files
        #if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt"))
        log_main = Logger(BaseLogger_Files())
        log_main.prefix = os.path.join(options.output_dir, "./")

        if myid == main_node:
            line = ""
            for a in sys.argv:
                line += " " + a
            log_main.add(line)
            log_main.add("-------->>>Settings given by all options<<<-------")
            log_main.add("instack  		    :" + stack)
            log_main.add("output_dir        :" + options.output_dir)
            log_main.add("var3d   		    :" + options.var3D)

        if myid == main_node:
            line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
            #print_begin_msg("sx3dvariability")
            msg = "sx3dvariability"
            log_main.add(msg)
            print(line, msg)
            msg = ("%-70s:  %s\n" % ("Input stack", stack))
            log_main.add(msg)
            print(line, msg)

        symbaselen = 0
        if myid == main_node:
            nima = EMUtil.get_image_count(stack)
            img = get_image(stack)
            nx = img.get_xsize()
            ny = img.get_ysize()
            if options.sym != "c1":
                imgdata = get_im(stack)
                try:
                    i = imgdata.get_attr("variabilitysymmetry").lower()
                    if (i != options.sym):
                        ERROR(
                            "The symmetry provided does not agree with the symmetry of the input stack",
                            "sx3dvariability",
                            myid=myid)
                except:
                    ERROR(
                        "Input stack is not prepared for symmetry, please follow instructions",
                        "sx3dvariability",
                        myid=myid)
                from utilities import get_symt
                i = len(get_symt(options.sym))
                if ((nima / i) * i != nima):
                    ERROR(
                        "The length of the input stack is incorrect for symmetry processing",
                        "sx3dvariability",
                        myid=myid)
                symbaselen = nima / i
            else:
                symbaselen = nima
        else:
            nima = 0
            nx = 0
            ny = 0
        nima = bcast_number_to_all(nima)
        nx = bcast_number_to_all(nx)
        ny = bcast_number_to_all(ny)
        Tracker = {}
        Tracker["total_stack"] = nima
        if options.decimate == 1.:
            if options.window != 0:
                nx = options.window
                ny = options.window
        else:
            if options.window == 0:
                nx = int(nx * options.decimate)
                ny = int(ny * options.decimate)
            else:
                nx = int(options.window * options.decimate)
                ny = nx
        Tracker["nx"] = nx
        Tracker["ny"] = ny
        Tracker["nz"] = nx
        symbaselen = bcast_number_to_all(symbaselen)
        if radiuspca == -1: radiuspca = nx / 2 - 2

        if myid == main_node:
            line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
            msg = "%-70s:  %d\n" % ("Number of projection", nima)
            log_main.add(msg)
            print(line, msg)
        img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
        """
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
        if options.VAR:
            #varList   = EMData.read_images(stack, range(img_begin, img_end))
            varList = []
            this_image = EMData()
            for index_of_particle in xrange(img_begin, img_end):
                this_image.read_image(stack, index_of_particle)
                varList.append(
                    image_decimate_window_xform_ctf(this_image,
                                                    options.decimate,
                                                    options.window,
                                                    options.CTF))
        else:
            from utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
            from utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
            from utilities import model_blank, nearest_proj, model_circle
            from applications import pca
            from statistics import avgvar, avgvar_ctf, ccc
            from filter import filt_tanl
            from morphology import threshold, square_root
            from projection import project, prep_vol, prgs
            from sets import Set

            if myid == main_node:
                t1 = time()
                proj_angles = []
                aveList = []
                tab = EMUtil.get_all_attributes(stack, 'xform.projection')
                for i in xrange(nima):
                    t = tab[i].get_params('spider')
                    phi = t['phi']
                    theta = t['theta']
                    psi = t['psi']
                    x = theta
                    if x > 90.0: x = 180.0 - x
                    x = x * 10000 + psi
                    proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
                t2 = time()
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = "%-70s:  %d\n" % ("Number of neighboring projections",
                                        img_per_grp)
                log_main.add(msg)
                print(line, msg)
                msg = "...... Finding neighboring projections\n"
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    msg = "Number of images per group: %d" % img_per_grp
                    log_main.add(msg)
                    print(line, msg)
                    msg = "Now grouping projections"
                    log_main.add(msg)
                    print(line, msg)
                proj_angles.sort()
            proj_angles_list = [0.0] * (nima * 4)
            if myid == main_node:
                for i in xrange(nima):
                    proj_angles_list[i * 4] = proj_angles[i][1]
                    proj_angles_list[i * 4 + 1] = proj_angles[i][2]
                    proj_angles_list[i * 4 + 2] = proj_angles[i][3]
                    proj_angles_list[i * 4 + 3] = proj_angles[i][4]
            proj_angles_list = bcast_list_to_all(proj_angles_list, myid,
                                                 main_node)
            proj_angles = []
            for i in xrange(nima):
                proj_angles.append([
                    proj_angles_list[i * 4], proj_angles_list[i * 4 + 1],
                    proj_angles_list[i * 4 + 2],
                    int(proj_angles_list[i * 4 + 3])
                ])
            del proj_angles_list
            proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp,
                                                  range(img_begin, img_end))

            all_proj = Set()
            for im in proj_list:
                for jm in im:
                    all_proj.add(proj_angles[jm][3])

            all_proj = list(all_proj)
            if options.VERBOSE:
                print("On node %2d, number of images needed to be read = %5d" %
                      (myid, len(all_proj)))

            index = {}
            for i in xrange(len(all_proj)):
                index[all_proj[i]] = i
            mpi_barrier(MPI_COMM_WORLD)

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Finding neighboring projections lasted [s]",
                        time() - t2))
                log_main.add(msg)
                print(msg)
                msg = ("%-70s:  %d\n" %
                       ("Number of groups processed on the main node",
                        len(proj_list)))
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Grouping projections took: ", (time() - t2) / 60,
                          "[min]")
                    print("Number of groups on main node: ", len(proj_list))
            mpi_barrier(MPI_COMM_WORLD)

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("...... calculating the stack of 2D variances \n")
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Now calculating the stack of 2D variances")

            proj_params = [0.0] * (nima * 5)
            aveList = []
            varList = []
            if nvec > 0:
                eigList = [[] for i in xrange(nvec)]

            if options.VERBOSE:
                print("Begin to read images on processor %d" % (myid))
            ttt = time()
            #imgdata = EMData.read_images(stack, all_proj)
            imgdata = []
            for index_of_proj in xrange(len(all_proj)):
                #img     = EMData()
                #img.read_image(stack, all_proj[index_of_proj])
                dmg = image_decimate_window_xform_ctf(
                    get_im(stack, all_proj[index_of_proj]), options.decimate,
                    options.window, options.CTF)
                #print dmg.get_xsize(), "init"
                imgdata.append(dmg)
            if options.VERBOSE:
                print("Reading images on processor %d done, time = %.2f" %
                      (myid, time() - ttt))
                print("On processor %d, we got %d images" %
                      (myid, len(imgdata)))
            mpi_barrier(MPI_COMM_WORLD)
            '''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
            from applications import prepare_2d_forPCA
            from utilities import model_blank
            for i in xrange(len(proj_list)):
                ki = proj_angles[proj_list[i][0]][3]
                if ki >= symbaselen: continue
                mi = index[ki]
                phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi])

                grp_imgdata = []
                for j in xrange(img_per_grp):
                    mj = index[proj_angles[proj_list[i][j]][3]]
                    phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj])
                    alpha, sx, sy, mirror = params_3D_2D_NEW(
                        phi, theta, psi, s2x, s2y, mirror_list[i][j])
                    if thetaM <= 90:
                        if mirror == 0:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, phiM - phi, 0.0, 0.0, 1.0)
                        else:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, 180 - (phiM - phi), 0.0,
                                0.0, 1.0)
                    else:
                        if mirror == 0:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, -(phiM - phi), 0.0, 0.0,
                                1.0)
                        else:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, -(180 - (phiM - phi)), 0.0,
                                0.0, 1.0)
                    set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0])
                    grp_imgdata.append(imgdata[mj])
                    #print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize()

                if not options.no_norm:
                    #print grp_imgdata[j].get_xsize()
                    mask = model_circle(nx / 2 - 2, nx, nx)
                    for k in xrange(img_per_grp):
                        ave, std, minn, maxx = Util.infomask(
                            grp_imgdata[k], mask, False)
                        grp_imgdata[k] -= ave
                        grp_imgdata[k] /= std
                    del mask

                if options.fl > 0.0:
                    from filter import filt_ctf, filt_table
                    from fundamentals import fft, window2d
                    nx2 = 2 * nx
                    ny2 = 2 * ny
                    if options.CTF:
                        from utilities import pad
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = window2d(
                                fft(
                                    filt_tanl(
                                        filt_ctf(
                                            fft(
                                                pad(grp_imgdata[k], nx2, ny2,
                                                    1, 0.0)),
                                            grp_imgdata[k].get_attr("ctf"),
                                            binary=1), options.fl,
                                        options.aa)), nx, ny)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                    else:
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = filt_tanl(grp_imgdata[k],
                                                       options.fl, options.aa)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                else:
                    from utilities import pad, read_text_file
                    from filter import filt_ctf, filt_table
                    from fundamentals import fft, window2d
                    nx2 = 2 * nx
                    ny2 = 2 * ny
                    if options.CTF:
                        from utilities import pad
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = window2d(
                                fft(
                                    filt_ctf(fft(
                                        pad(grp_imgdata[k], nx2, ny2, 1, 0.0)),
                                             grp_imgdata[k].get_attr("ctf"),
                                             binary=1)), nx, ny)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                '''
				if i < 10 and myid == main_node:
					for k in xrange(10):
						grp_imgdata[k].write_image("grp%03d.hdf"%i, k)
				'''
                """
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("pp.hdf", pp)
				"""
                ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata)
                """
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("qq.hdf", pp)
				"""

                var = model_blank(nx, ny)
                for q in grp_imgdata:
                    Util.add_img2(var, q)
                Util.mul_scalar(var, 1.0 / (len(grp_imgdata) - 1))
                # Switch to std dev
                var = square_root(threshold(var))
                #if options.CTF:	ave, var = avgvar_ctf(grp_imgdata, mode="a")
                #else:	            ave, var = avgvar(grp_imgdata, mode="a")
                """
				if myid == main_node:
					ave.write_image("avgv.hdf",i)
					var.write_image("varv.hdf",i)
				"""

                set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
                set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

                aveList.append(ave)
                varList.append(var)

                if options.VERBOSE:
                    print("%5.2f%% done on processor %d" %
                          (i * 100.0 / len(proj_list), myid))
                if nvec > 0:
                    eig = pca(input_stacks=grp_imgdata,
                              subavg="",
                              mask_radius=radiuspca,
                              nvec=nvec,
                              incore=True,
                              shuffle=False,
                              genbuf=True)
                    for k in xrange(nvec):
                        set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
                        eigList[k].append(eig[k])
                    """
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""

            del imgdata
            #  To this point, all averages, variances, and eigenvectors are computed

            if options.ave2D:
                from fundamentals import fpol
                if myid == main_node:
                    km = 0
                    for i in xrange(number_of_proc):
                        if i == main_node:
                            for im in xrange(len(aveList)):
                                aveList[im].write_image(
                                    os.path.join(options.output_dir,
                                                 options.ave2D), km)
                                km += 1
                        else:
                            nl = mpi_recv(1, MPI_INT, i,
                                          SPARX_MPI_TAG_UNIVERSAL,
                                          MPI_COMM_WORLD)
                            nl = int(nl[0])
                            for im in xrange(nl):
                                ave = recv_EMData(i, im + i + 70000)
                                """
								nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
                                tmpvol = fpol(ave, Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.ave2D), km)
                                km += 1
                else:
                    mpi_send(len(aveList), 1, MPI_INT, main_node,
                             SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    for im in xrange(len(aveList)):
                        send_EMData(aveList[im], main_node, im + myid + 70000)
                        """
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						"""

            if options.ave3D:
                from fundamentals import fpol
                if options.VERBOSE:
                    print("Reconstructing 3D average volume")
                ave3D = recons3d_4nn_MPI(myid,
                                         aveList,
                                         symmetry=options.sym,
                                         npad=options.npad)
                bcast_EMData_to_all(ave3D, myid)
                if myid == main_node:
                    line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                    ave3D = fpol(ave3D, Tracker["nx"], Tracker["nx"],
                                 Tracker["nx"])
                    ave3D.write_image(
                        os.path.join(options.output_dir, options.ave3D))
                    msg = ("%-70s:  %s\n" % (
                        "Writing to the disk volume reconstructed from averages as",
                        options.ave3D))
                    log_main.add(msg)
                    print(line, msg)
            del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList

            if nvec > 0:
                for k in xrange(nvec):
                    if options.VERBOSE:
                        print("Reconstruction eigenvolumes", k)
                    cont = True
                    ITER = 0
                    mask2d = model_circle(radiuspca, nx, nx)
                    while cont:
                        #print "On node %d, iteration %d"%(myid, ITER)
                        eig3D = recons3d_4nn_MPI(myid,
                                                 eigList[k],
                                                 symmetry=options.sym,
                                                 npad=options.npad)
                        bcast_EMData_to_all(eig3D, myid, main_node)
                        if options.fl > 0.0:
                            eig3D = filt_tanl(eig3D, options.fl, options.aa)
                        if myid == main_node:
                            eig3D.write_image(
                                os.path.join(options.outpout_dir,
                                             "eig3d_%03d.hdf" % (k, ITER)))
                        Util.mul_img(eig3D,
                                     model_circle(radiuspca, nx, nx, nx))
                        eig3Df, kb = prep_vol(eig3D)
                        del eig3D
                        cont = False
                        icont = 0
                        for l in xrange(len(eigList[k])):
                            phi, theta, psi, s2x, s2y = get_params_proj(
                                eigList[k][l])
                            proj = prgs(eig3Df, kb,
                                        [phi, theta, psi, s2x, s2y])
                            cl = ccc(proj, eigList[k][l], mask2d)
                            if cl < 0.0:
                                icont += 1
                                cont = True
                                eigList[k][l] *= -1.0
                        u = int(cont)
                        u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node,
                                       MPI_COMM_WORLD)
                        icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM,
                                           main_node, MPI_COMM_WORLD)

                        if myid == main_node:
                            line = strftime("%Y-%m-%d_%H:%M:%S",
                                            localtime()) + " =>"
                            u = int(u[0])
                            msg = (" Eigenvector: ", k, " number changed ",
                                   int(icont[0]))
                            log_main.add(msg)
                            print(line, msg)
                        else:
                            u = 0
                        u = bcast_number_to_all(u, main_node)
                        cont = bool(u)
                        ITER += 1

                    del eig3Df, kb
                    mpi_barrier(MPI_COMM_WORLD)
                del eigList, mask2d

            if options.ave3D: del ave3D
            if options.var2D:
                from fundamentals import fpol
                if myid == main_node:
                    km = 0
                    for i in xrange(number_of_proc):
                        if i == main_node:
                            for im in xrange(len(varList)):
                                tmpvol = fpol(varList[im], Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.var2D), km)
                                km += 1
                        else:
                            nl = mpi_recv(1, MPI_INT, i,
                                          SPARX_MPI_TAG_UNIVERSAL,
                                          MPI_COMM_WORLD)
                            nl = int(nl[0])
                            for im in xrange(nl):
                                ave = recv_EMData(i, im + i + 70000)
                                tmpvol = fpol(ave, Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.var2D, km))
                                km += 1
                else:
                    mpi_send(len(varList), 1, MPI_INT, main_node,
                             SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    for im in xrange(len(varList)):
                        send_EMData(varList[im], main_node, im + myid +
                                    70000)  #  What with the attributes??

            mpi_barrier(MPI_COMM_WORLD)

        if options.var3D:
            if myid == main_node and options.VERBOSE:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("Reconstructing 3D variability volume")
                log_main.add(msg)
                print(line, msg)
            t6 = time()
            # radiusvar = options.radius
            # if( radiusvar < 0 ):  radiusvar = nx//2 -3
            res = recons3d_4nn_MPI(myid,
                                   varList,
                                   symmetry=options.sym,
                                   npad=options.npad)
            #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
            if myid == main_node:
                from fundamentals import fpol
                res = fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"])
                res.write_image(os.path.join(options.output_dir,
                                             options.var3D))

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Reconstructing 3D variability took [s]", time() - t6))
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Reconstruction took: %.2f [min]" %
                          ((time() - t6) / 60))

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Total time for these computations [s]", time() - t0))
                print(line, msg)
                log_main.add(msg)
                if options.VERBOSE:
                    print("Total time for these computations: %.2f [min]" %
                          ((time() - t0) / 60))
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("sx3dvariability")
                print(line, msg)
                log_main.add(msg)

        from mpi import mpi_finalize
        mpi_finalize()

        if RUNNING_UNDER_MPI:
            global_def.MPI = False

        global_def.BATCH = False
示例#17
0
def ali3d_MPI(stack,
              ref_vol,
              outdir,
              maskfile=None,
              ir=1,
              ou=-1,
              rs=1,
              xr="4 2 2 1",
              yr="-1",
              ts="1 1 0.5 0.25",
              delta="10 6 4 4",
              an="-1",
              center=0,
              maxit=5,
              term=95,
              CTF=False,
              fourvar=False,
              snr=1.0,
              ref_a="S",
              sym="c1",
              sort=True,
              cutoff=999.99,
              pix_cutoff="0",
              two_tail=False,
              model_jump="1 1 1 1 1",
              restart=False,
              save_half=False,
              protos=None,
              oplane=None,
              lmask=-1,
              ilmask=-1,
              findseam=False,
              vertstep=None,
              hpars="-1",
              hsearch="0.0 50.0",
              full_output=False,
              compare_repro=False,
              compare_ref_free="-1",
              ref_free_cutoff="-1 -1 -1 -1",
              wcmask=None,
              debug=False,
              recon_pad=4,
              olmask=75):

    from alignment import Numrinit, prepare_refrings
    from utilities import model_circle, get_image, drop_image, get_input_from_string
    from utilities import bcast_list_to_all, bcast_number_to_all, reduce_EMData_to_root, bcast_EMData_to_all
    from utilities import send_attr_dict
    from utilities import get_params_proj, file_type
    from fundamentals import rot_avg_image
    import os
    import types
    from utilities import print_begin_msg, print_end_msg, print_msg
    from mpi import mpi_bcast, mpi_comm_size, mpi_comm_rank, MPI_FLOAT, MPI_COMM_WORLD, mpi_barrier, mpi_reduce
    from mpi import mpi_reduce, MPI_INT, MPI_SUM, mpi_finalize
    from filter import filt_ctf
    from projection import prep_vol, prgs
    from statistics import hist_list, varf3d_MPI, fsc_mask
    from numpy import array, bincount, array2string, ones

    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    main_node = 0
    if myid == main_node:
        if os.path.exists(outdir):
            ERROR(
                'Output directory exists, please change the name and restart the program',
                "ali3d_MPI", 1)
        os.mkdir(outdir)
    mpi_barrier(MPI_COMM_WORLD)

    if debug:
        from time import sleep
        while not os.path.exists(outdir):
            print "Node ", myid, "  waiting..."
            sleep(5)

        info_file = os.path.join(outdir, "progress%04d" % myid)
        finfo = open(info_file, 'w')
    else:
        finfo = None
    mjump = get_input_from_string(model_jump)
    xrng = get_input_from_string(xr)
    if yr == "-1": yrng = xrng
    else: yrng = get_input_from_string(yr)
    step = get_input_from_string(ts)
    delta = get_input_from_string(delta)
    ref_free_cutoff = get_input_from_string(ref_free_cutoff)
    pix_cutoff = get_input_from_string(pix_cutoff)

    lstp = min(len(xrng), len(yrng), len(step), len(delta))
    if an == "-1":
        an = [-1] * lstp
    else:
        an = get_input_from_string(an)
    # make sure pix_cutoff is set for all iterations
    if len(pix_cutoff) < lstp:
        for i in xrange(len(pix_cutoff), lstp):
            pix_cutoff.append(pix_cutoff[-1])
    # don't waste time on sub-pixel alignment for low-resolution ang incr
    for i in range(len(step)):
        if (delta[i] > 4 or delta[i] == -1) and step[i] < 1:
            step[i] = 1

    first_ring = int(ir)
    rstep = int(rs)
    last_ring = int(ou)
    max_iter = int(maxit)
    center = int(center)

    nrefs = EMUtil.get_image_count(ref_vol)
    nmasks = 0
    if maskfile:
        # read number of masks within each maskfile (mc)
        nmasks = EMUtil.get_image_count(maskfile)
        # open masks within maskfile (mc)
        maskF = EMData.read_images(maskfile, xrange(nmasks))
    vol = EMData.read_images(ref_vol, xrange(nrefs))
    nx = vol[0].get_xsize()

    ## make sure box sizes are the same
    if myid == main_node:
        im = EMData.read_images(stack, [0])
        bx = im[0].get_xsize()
        if bx != nx:
            print_msg(
                "Error: Stack box size (%i) differs from initial model (%i)\n"
                % (bx, nx))
            sys.exit()
        del im, bx

    # for helical processing:
    helicalrecon = False
    if protos is not None or hpars != "-1" or findseam is True:
        helicalrecon = True
        # if no out-of-plane param set, use 5 degrees
        if oplane is None:
            oplane = 5.0
    if protos is not None:
        proto = get_input_from_string(protos)
        if len(proto) != nrefs:
            print_msg("Error: insufficient protofilament numbers supplied")
            sys.exit()
    if hpars != "-1":
        hpars = get_input_from_string(hpars)
        if len(hpars) != 2 * nrefs:
            print_msg("Error: insufficient helical parameters supplied")
            sys.exit()
    ## create helical parameter file for helical reconstruction
    if helicalrecon is True and myid == main_node:
        from hfunctions import createHpar
        # create initial helical parameter files
        dp = [0] * nrefs
        dphi = [0] * nrefs
        vdp = [0] * nrefs
        vdphi = [0] * nrefs
        for iref in xrange(nrefs):
            hpar = os.path.join(outdir, "hpar%02d.spi" % (iref))
            params = False
            if hpars != "-1":
                # if helical parameters explicitly given, set twist & rise
                params = [float(hpars[iref * 2]), float(hpars[(iref * 2) + 1])]
            dp[iref], dphi[iref], vdp[iref], vdphi[iref] = createHpar(
                hpar, proto[iref], params, vertstep)

    # get values for helical search parameters
    hsearch = get_input_from_string(hsearch)
    if len(hsearch) != 2:
        print_msg("Error: specify outer and inner radii for helical search")
        sys.exit()

    if last_ring < 0 or last_ring > int(nx / 2) - 2:
        last_ring = int(nx / 2) - 2

    if myid == main_node:
        #	import user_functions
        #	user_func = user_functions.factory[user_func_name]

        print_begin_msg("ali3d_MPI")
        print_msg("Input stack		 : %s\n" % (stack))
        print_msg("Reference volume	    : %s\n" % (ref_vol))
        print_msg("Output directory	    : %s\n" % (outdir))
        if nmasks > 0:
            print_msg("Maskfile (number of masks)  : %s (%i)\n" %
                      (maskfile, nmasks))
        print_msg("Inner radius		: %i\n" % (first_ring))
        print_msg("Outer radius		: %i\n" % (last_ring))
        print_msg("Ring step		   : %i\n" % (rstep))
        print_msg("X search range	      : %s\n" % (xrng))
        print_msg("Y search range	      : %s\n" % (yrng))
        print_msg("Translational step	  : %s\n" % (step))
        print_msg("Angular step		: %s\n" % (delta))
        print_msg("Angular search range	: %s\n" % (an))
        print_msg("Maximum iteration	   : %i\n" % (max_iter))
        print_msg("Center type		 : %i\n" % (center))
        print_msg("CTF correction	      : %s\n" % (CTF))
        print_msg("Signal-to-Noise Ratio       : %f\n" % (snr))
        print_msg("Reference projection method : %s\n" % (ref_a))
        print_msg("Symmetry group	      : %s\n" % (sym))
        print_msg("Fourier padding for 3D      : %i\n" % (recon_pad))
        print_msg("Number of reference models  : %i\n" % (nrefs))
        print_msg("Sort images between models  : %s\n" % (sort))
        print_msg("Allow images to jump	: %s\n" % (mjump))
        print_msg("CC cutoff standard dev      : %f\n" % (cutoff))
        print_msg("Two tail cutoff	     : %s\n" % (two_tail))
        print_msg("Termination pix error       : %f\n" % (term))
        print_msg("Pixel error cutoff	  : %s\n" % (pix_cutoff))
        print_msg("Restart		     : %s\n" % (restart))
        print_msg("Full output		 : %s\n" % (full_output))
        print_msg("Compare reprojections       : %s\n" % (compare_repro))
        print_msg("Compare ref free class avgs : %s\n" % (compare_ref_free))
        print_msg("Use cutoff from ref free    : %s\n" % (ref_free_cutoff))
        if protos:
            print_msg("Protofilament numbers	: %s\n" % (proto))
            print_msg("Using helical search range   : %s\n" % hsearch)
        if findseam is True:
            print_msg("Using seam-based reconstruction\n")
        if hpars != "-1":
            print_msg("Using hpars		  : %s\n" % hpars)
        if vertstep != None:
            print_msg("Using vertical step    : %.2f\n" % vertstep)
        if save_half is True:
            print_msg("Saving even/odd halves\n")
        for i in xrange(100):
            print_msg("*")
        print_msg("\n\n")
    if maskfile:
        if type(maskfile) is types.StringType: mask3D = get_image(maskfile)
        else: mask3D = maskfile
    else: mask3D = model_circle(last_ring, nx, nx, nx)

    numr = Numrinit(first_ring, last_ring, rstep, "F")
    mask2D = model_circle(last_ring, nx, nx) - model_circle(first_ring, nx, nx)

    fscmask = model_circle(last_ring, nx, nx, nx)
    if CTF:
        from filter import filt_ctf
    from reconstruction_rjh import rec3D_MPI_noCTF

    if myid == main_node:
        active = EMUtil.get_all_attributes(stack, 'active')
        list_of_particles = []
        for im in xrange(len(active)):
            if active[im]: list_of_particles.append(im)
        del active
        nima = len(list_of_particles)
    else:
        nima = 0
    total_nima = bcast_number_to_all(nima, source_node=main_node)

    if myid != main_node:
        list_of_particles = [-1] * total_nima
    list_of_particles = bcast_list_to_all(list_of_particles,
                                          source_node=main_node)

    image_start, image_end = MPI_start_end(total_nima, number_of_proc, myid)

    # create a list of images for each node
    list_of_particles = list_of_particles[image_start:image_end]
    nima = len(list_of_particles)
    if debug:
        finfo.write("image_start, image_end: %d %d\n" %
                    (image_start, image_end))
        finfo.flush()

    data = EMData.read_images(stack, list_of_particles)

    t_zero = Transform({
        "type": "spider",
        "phi": 0,
        "theta": 0,
        "psi": 0,
        "tx": 0,
        "ty": 0
    })
    transmulti = [[t_zero for i in xrange(nrefs)] for j in xrange(nima)]

    for iref, im in ((iref, im) for iref in xrange(nrefs)
                     for im in xrange(nima)):
        if nrefs == 1:
            transmulti[im][iref] = data[im].get_attr("xform.projection")
        else:
            # if multi models, keep track of eulers for all models
            try:
                transmulti[im][iref] = data[im].get_attr("eulers_txty.%i" %
                                                         iref)
            except:
                data[im].set_attr("eulers_txty.%i" % iref, t_zero)

    scoremulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)]
    pixelmulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)]
    ref_res = [0.0 for x in xrange(nrefs)]
    apix = data[0].get_attr('apix_x')

    # for oplane parameter, create cylindrical mask
    if oplane is not None and myid == main_node:
        from hfunctions import createCylMask
        cmaskf = os.path.join(outdir, "mask3D_cyl.mrc")
        mask3D = createCylMask(data, olmask, lmask, ilmask, cmaskf)
        # if finding seam of helix, create wedge masks
        if findseam is True:
            wedgemask = []
            for pf in xrange(nrefs):
                wedgemask.append(EMData())
            # wedgemask option
            if wcmask is not None:
                wcmask = get_input_from_string(wcmask)
                if len(wcmask) != 3:
                    print_msg(
                        "Error: wcmask option requires 3 values: x y radius")
                    sys.exit()

    # determine if particles have helix info:
    try:
        data[0].get_attr('h_angle')
        original_data = []
        boxmask = True
        from hfunctions import createBoxMask
    except:
        boxmask = False

    # prepare particles
    for im in xrange(nima):
        data[im].set_attr('ID', list_of_particles[im])
        data[im].set_attr('pix_score', int(0))
        if CTF:
            # only phaseflip particles, not full CTF correction
            ctf_params = data[im].get_attr("ctf")
            st = Util.infomask(data[im], mask2D, False)
            data[im] -= st[0]
            data[im] = filt_ctf(data[im], ctf_params, sign=-1, binary=1)
            data[im].set_attr('ctf_applied', 1)
        # for window mask:
        if boxmask is True:
            h_angle = data[im].get_attr("h_angle")
            original_data.append(data[im].copy())
            bmask = createBoxMask(nx, apix, ou, lmask, h_angle)
            data[im] *= bmask
            del bmask
    if debug:
        finfo.write('%d loaded  \n' % nima)
        finfo.flush()
    if myid == main_node:
        # initialize data for the reference preparation function
        ref_data = [mask3D, max(center, 0), None, None, None, None]
        # for method -1, switch off centering in user function

    from time import time

    #  this is needed for gathering of pixel errors
    disps = []
    recvcount = []
    disps_score = []
    recvcount_score = []
    for im in xrange(number_of_proc):
        if (im == main_node):
            disps.append(0)
            disps_score.append(0)
        else:
            disps.append(disps[im - 1] + recvcount[im - 1])
            disps_score.append(disps_score[im - 1] + recvcount_score[im - 1])
        ib, ie = MPI_start_end(total_nima, number_of_proc, im)
        recvcount.append(ie - ib)
        recvcount_score.append((ie - ib) * nrefs)

    pixer = [0.0] * nima
    cs = [0.0] * 3
    total_iter = 0
    volodd = EMData.read_images(ref_vol, xrange(nrefs))
    voleve = EMData.read_images(ref_vol, xrange(nrefs))

    if restart:
        # recreate initial volumes from alignments stored in header
        itout = "000_00"
        for iref in xrange(nrefs):
            if (nrefs == 1):
                modout = ""
            else:
                modout = "_model_%02d" % (iref)

            if (sort):
                group = iref
                for im in xrange(nima):
                    imgroup = data[im].get_attr('group')
                    if imgroup == iref:
                        data[im].set_attr('xform.projection',
                                          transmulti[im][iref])
            else:
                group = int(999)
                for im in xrange(nima):
                    data[im].set_attr('xform.projection', transmulti[im][iref])

            fscfile = os.path.join(outdir, "fsc_%s%s" % (itout, modout))

            vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(
                data,
                sym,
                fscmask,
                fscfile,
                myid,
                main_node,
                index=group,
                npad=recon_pad)

            if myid == main_node:
                if helicalrecon:
                    from hfunctions import processHelicalVol
                    vstep = None
                    if vertstep is not None:
                        vstep = (vdp[iref], vdphi[iref])
                    print_msg(
                        "Old rise and twist for model %i     : %8.3f, %8.3f\n"
                        % (iref, dp[iref], dphi[iref]))
                    hvals = processHelicalVol(vol[iref], voleve[iref],
                                              volodd[iref], iref, outdir,
                                              itout, dp[iref], dphi[iref],
                                              apix, hsearch, findseam, vstep,
                                              wcmask)
                    (vol[iref], voleve[iref], volodd[iref], dp[iref],
                     dphi[iref], vdp[iref], vdphi[iref]) = hvals
                    print_msg(
                        "New rise and twist for model %i     : %8.3f, %8.3f\n"
                        % (iref, dp[iref], dphi[iref]))
                    # get new FSC from symmetrized half volumes
                    fscc = fsc_mask(volodd[iref], voleve[iref], mask3D, rstep,
                                    fscfile)
                else:
                    vol[iref].write_image(
                        os.path.join(outdir, "vol_%s.hdf" % itout), -1)

                if save_half is True:
                    volodd[iref].write_image(
                        os.path.join(outdir, "volodd_%s.hdf" % itout), -1)
                    voleve[iref].write_image(
                        os.path.join(outdir, "voleve_%s.hdf" % itout), -1)

                if nmasks > 1:
                    # Read mask for multiplying
                    ref_data[0] = maskF[iref]
                ref_data[2] = vol[iref]
                ref_data[3] = fscc
                #  call user-supplied function to prepare reference image, i.e., center and filter it
                vol[iref], cs, fl = ref_ali3d(ref_data)
                vol[iref].write_image(
                    os.path.join(outdir, "volf_%s.hdf" % (itout)), -1)
                if (apix == 1):
                    res_msg = "Models filtered at spatial frequency of:\t"
                    res = fl
                else:
                    res_msg = "Models filtered at resolution of:       \t"
                    res = apix / fl
                ares = array2string(array(res), precision=2)
                print_msg("%s%s\n\n" % (res_msg, ares))

            bcast_EMData_to_all(vol[iref], myid, main_node)
            # write out headers, under MPI writing has to be done sequentially
            mpi_barrier(MPI_COMM_WORLD)

    # projection matching
    for N_step in xrange(lstp):
        terminate = 0
        Iter = -1
        while (Iter < max_iter - 1 and terminate == 0):
            Iter += 1
            total_iter += 1
            itout = "%03g_%02d" % (delta[N_step], Iter)
            if myid == main_node:
                print_msg(
                    "ITERATION #%3d, inner iteration #%3d\nDelta = %4.1f, an = %5.2f, xrange = %5.2f, yrange = %5.2f, step = %5.2f\n\n"
                    % (N_step, Iter, delta[N_step], an[N_step], xrng[N_step],
                       yrng[N_step], step[N_step]))

            for iref in xrange(nrefs):
                if myid == main_node: start_time = time()
                volft, kb = prep_vol(vol[iref])

                ## constrain projections to out of plane parameter
                theta1 = None
                theta2 = None
                if oplane is not None:
                    theta1 = 90 - oplane
                    theta2 = 90 + oplane
                refrings = prepare_refrings(volft,
                                            kb,
                                            nx,
                                            delta[N_step],
                                            ref_a,
                                            sym,
                                            numr,
                                            MPI=True,
                                            phiEqpsi="Minus",
                                            initial_theta=theta1,
                                            delta_theta=theta2)

                del volft, kb

                if myid == main_node:
                    print_msg(
                        "Time to prepare projections for model %i: %s\n" %
                        (iref, legibleTime(time() - start_time)))
                    start_time = time()

                for im in xrange(nima):
                    data[im].set_attr("xform.projection", transmulti[im][iref])
                    if an[N_step] == -1:
                        t1, peak, pixer[im] = proj_ali_incore(
                            data[im], refrings, numr, xrng[N_step],
                            yrng[N_step], step[N_step], finfo)
                    else:
                        t1, peak, pixer[im] = proj_ali_incore_local(
                            data[im], refrings, numr, xrng[N_step],
                            yrng[N_step], step[N_step], an[N_step], finfo)
                    #data[im].set_attr("xform.projection"%iref, t1)
                    if nrefs > 1:
                        data[im].set_attr("eulers_txty.%i" % iref, t1)
                    scoremulti[im][iref] = peak
                    from pixel_error import max_3D_pixel_error
                    # t1 is the current param, t2 is old
                    t2 = transmulti[im][iref]
                    pixelmulti[im][iref] = max_3D_pixel_error(t1, t2, numr[-3])
                    transmulti[im][iref] = t1

                if myid == main_node:
                    print_msg("Time of alignment for model %i: %s\n" %
                              (iref, legibleTime(time() - start_time)))
                    start_time = time()

            # gather scoring data from all processors
            from mpi import mpi_gatherv
            scoremultisend = sum(scoremulti, [])
            pixelmultisend = sum(pixelmulti, [])
            tmp = mpi_gatherv(scoremultisend, len(scoremultisend), MPI_FLOAT,
                              recvcount_score, disps_score, MPI_FLOAT,
                              main_node, MPI_COMM_WORLD)
            tmp1 = mpi_gatherv(pixelmultisend, len(pixelmultisend), MPI_FLOAT,
                               recvcount_score, disps_score, MPI_FLOAT,
                               main_node, MPI_COMM_WORLD)
            tmp = mpi_bcast(tmp, (total_nima * nrefs), MPI_FLOAT, 0,
                            MPI_COMM_WORLD)
            tmp1 = mpi_bcast(tmp1, (total_nima * nrefs), MPI_FLOAT, 0,
                             MPI_COMM_WORLD)
            tmp = map(float, tmp)
            tmp1 = map(float, tmp1)
            score = array(tmp).reshape(-1, nrefs)
            pixelerror = array(tmp1).reshape(-1, nrefs)
            score_local = array(scoremulti)
            mean_score = score.mean(axis=0)
            std_score = score.std(axis=0)
            cut = mean_score - (cutoff * std_score)
            cut2 = mean_score + (cutoff * std_score)
            res_max = score_local.argmax(axis=1)
            minus_cc = [0.0 for x in xrange(nrefs)]
            minus_pix = [0.0 for x in xrange(nrefs)]
            minus_ref = [0.0 for x in xrange(nrefs)]

            #output pixel errors
            if (myid == main_node):
                from statistics import hist_list
                lhist = 20
                pixmin = pixelerror.min(axis=1)
                region, histo = hist_list(pixmin, lhist)
                if (region[0] < 0.0): region[0] = 0.0
                print_msg(
                    "Histogram of pixel errors\n      ERROR       number of particles\n"
                )
                for lhx in xrange(lhist):
                    print_msg(" %10.3f     %7d\n" % (region[lhx], histo[lhx]))
                # Terminate if 95% within 1 pixel error
                im = 0
                for lhx in xrange(lhist):
                    if (region[lhx] > 1.0): break
                    im += histo[lhx]
                print_msg("Percent of particles with pixel error < 1: %f\n\n" %
                          (im / float(total_nima) * 100))
                term_cond = float(term) / 100
                if (im / float(total_nima) > term_cond):
                    terminate = 1
                    print_msg("Terminating internal loop\n")
                del region, histo
            terminate = mpi_bcast(terminate, 1, MPI_INT, 0, MPI_COMM_WORLD)
            terminate = int(terminate[0])

            for im in xrange(nima):
                if (sort == False):
                    data[im].set_attr('group', 999)
                elif (mjump[N_step] == 1):
                    data[im].set_attr('group', int(res_max[im]))

                pix_run = data[im].get_attr('pix_score')
                if (pix_cutoff[N_step] == 1
                        and (terminate == 1 or Iter == max_iter - 1)):
                    if (pixelmulti[im][int(res_max[im])] > 1):
                        data[im].set_attr('pix_score', int(777))

                if (score_local[im][int(res_max[im])] < cut[int(
                        res_max[im])]) or (two_tail and score_local[im][int(
                            res_max[im])] > cut2[int(res_max[im])]):
                    data[im].set_attr('group', int(888))
                    minus_cc[int(res_max[im])] = minus_cc[int(res_max[im])] + 1

                if (pix_run == 777):
                    data[im].set_attr('group', int(777))
                    minus_pix[int(
                        res_max[im])] = minus_pix[int(res_max[im])] + 1

                if (compare_ref_free != "-1") and (ref_free_cutoff[N_step] !=
                                                   -1) and (total_iter > 1):
                    id = data[im].get_attr('ID')
                    if id in rejects:
                        data[im].set_attr('group', int(666))
                        minus_ref[int(
                            res_max[im])] = minus_ref[int(res_max[im])] + 1

            minus_cc_tot = mpi_reduce(minus_cc, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                      MPI_COMM_WORLD)
            minus_pix_tot = mpi_reduce(minus_pix, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                       MPI_COMM_WORLD)
            minus_ref_tot = mpi_reduce(minus_ref, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                       MPI_COMM_WORLD)
            if (myid == main_node):
                if (sort):
                    tot_max = score.argmax(axis=1)
                    res = bincount(tot_max)
                else:
                    res = ones(nrefs) * total_nima
                print_msg("Particle distribution:	     \t\t%s\n" % (res * 1.0))
                afcut1 = res - minus_cc_tot
                afcut2 = afcut1 - minus_pix_tot
                afcut3 = afcut2 - minus_ref_tot
                print_msg("Particle distribution after cc cutoff:\t\t%s\n" %
                          (afcut1))
                print_msg("Particle distribution after pix cutoff:\t\t%s\n" %
                          (afcut2))
                print_msg("Particle distribution after ref cutoff:\t\t%s\n\n" %
                          (afcut3))

            res = [0.0 for i in xrange(nrefs)]
            for iref in xrange(nrefs):
                if (center == -1):
                    from utilities import estimate_3D_center_MPI, rotate_3D_shift
                    dummy = EMData()
                    cs[0], cs[1], cs[2], dummy, dummy = estimate_3D_center_MPI(
                        data, total_nima, myid, number_of_proc, main_node)
                    cs = mpi_bcast(cs, 3, MPI_FLOAT, main_node, MPI_COMM_WORLD)
                    cs = [-float(cs[0]), -float(cs[1]), -float(cs[2])]
                    rotate_3D_shift(data, cs)

                if (sort):
                    group = iref
                    for im in xrange(nima):
                        imgroup = data[im].get_attr('group')
                        if imgroup == iref:
                            data[im].set_attr('xform.projection',
                                              transmulti[im][iref])
                else:
                    group = int(999)
                    for im in xrange(nima):
                        data[im].set_attr('xform.projection',
                                          transmulti[im][iref])
                if (nrefs == 1):
                    modout = ""
                else:
                    modout = "_model_%02d" % (iref)

                fscfile = os.path.join(outdir, "fsc_%s%s" % (itout, modout))
                vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(
                    data,
                    sym,
                    fscmask,
                    fscfile,
                    myid,
                    main_node,
                    index=group,
                    npad=recon_pad)

                if myid == main_node:
                    print_msg("3D reconstruction time for model %i: %s\n" %
                              (iref, legibleTime(time() - start_time)))
                    start_time = time()

                # Compute Fourier variance
                if fourvar:
                    outvar = os.path.join(outdir, "volVar_%s.hdf" % (itout))
                    ssnr_file = os.path.join(outdir, "ssnr_%s" % (itout))
                    varf = varf3d_MPI(data,
                                      ssnr_text_file=ssnr_file,
                                      mask2D=None,
                                      reference_structure=vol[iref],
                                      ou=last_ring,
                                      rw=1.0,
                                      npad=1,
                                      CTF=None,
                                      sign=1,
                                      sym=sym,
                                      myid=myid)
                    if myid == main_node:
                        print_msg(
                            "Time to calculate 3D Fourier variance for model %i: %s\n"
                            % (iref, legibleTime(time() - start_time)))
                        start_time = time()
                        varf = 1.0 / varf
                        varf.write_image(outvar, -1)
                else:
                    varf = None

                if myid == main_node:
                    if helicalrecon:
                        from hfunctions import processHelicalVol

                        vstep = None
                        if vertstep is not None:
                            vstep = (vdp[iref], vdphi[iref])
                        print_msg(
                            "Old rise and twist for model %i     : %8.3f, %8.3f\n"
                            % (iref, dp[iref], dphi[iref]))
                        hvals = processHelicalVol(vol[iref], voleve[iref],
                                                  volodd[iref], iref, outdir,
                                                  itout, dp[iref], dphi[iref],
                                                  apix, hsearch, findseam,
                                                  vstep, wcmask)
                        (vol[iref], voleve[iref], volodd[iref], dp[iref],
                         dphi[iref], vdp[iref], vdphi[iref]) = hvals
                        print_msg(
                            "New rise and twist for model %i     : %8.3f, %8.3f\n"
                            % (iref, dp[iref], dphi[iref]))
                        # get new FSC from symmetrized half volumes
                        fscc = fsc_mask(volodd[iref], voleve[iref], mask3D,
                                        rstep, fscfile)

                        print_msg(
                            "Time to search and apply helical symmetry for model %i: %s\n\n"
                            % (iref, legibleTime(time() - start_time)))
                        start_time = time()
                    else:
                        vol[iref].write_image(
                            os.path.join(outdir, "vol_%s.hdf" % (itout)), -1)

                    if save_half is True:
                        volodd[iref].write_image(
                            os.path.join(outdir, "volodd_%s.hdf" % (itout)),
                            -1)
                        voleve[iref].write_image(
                            os.path.join(outdir, "voleve_%s.hdf" % (itout)),
                            -1)

                    if nmasks > 1:
                        # Read mask for multiplying
                        ref_data[0] = maskF[iref]
                    ref_data[2] = vol[iref]
                    ref_data[3] = fscc
                    ref_data[4] = varf
                    #  call user-supplied function to prepare reference image, i.e., center and filter it
                    vol[iref], cs, fl = ref_ali3d(ref_data)
                    vol[iref].write_image(
                        os.path.join(outdir, "volf_%s.hdf" % (itout)), -1)
                    if (apix == 1):
                        res_msg = "Models filtered at spatial frequency of:\t"
                        res[iref] = fl
                    else:
                        res_msg = "Models filtered at resolution of:       \t"
                        res[iref] = apix / fl

                del varf
                bcast_EMData_to_all(vol[iref], myid, main_node)

                if compare_ref_free != "-1": compare_repro = True
                if compare_repro:
                    outfile_repro = comp_rep(refrings, data, itout, modout,
                                             vol[iref], group, nima, nx, myid,
                                             main_node, outdir)
                    mpi_barrier(MPI_COMM_WORLD)
                    if compare_ref_free != "-1":
                        ref_free_output = os.path.join(
                            outdir, "ref_free_%s%s" % (itout, modout))
                        rejects = compare(compare_ref_free, outfile_repro,
                                          ref_free_output, yrng[N_step],
                                          xrng[N_step], rstep, nx, apix,
                                          ref_free_cutoff[N_step],
                                          number_of_proc, myid, main_node)

            # retrieve alignment params from all processors
            par_str = ['xform.projection', 'ID', 'group']
            if nrefs > 1:
                for iref in xrange(nrefs):
                    par_str.append('eulers_txty.%i' % iref)

            if myid == main_node:
                from utilities import recv_attr_dict
                recv_attr_dict(main_node, stack, data, par_str, image_start,
                               image_end, number_of_proc)

            else:
                send_attr_dict(main_node, data, par_str, image_start,
                               image_end)

            if myid == main_node:
                ares = array2string(array(res), precision=2)
                print_msg("%s%s\n\n" % (res_msg, ares))
                dummy = EMData()
                if full_output:
                    nimat = EMUtil.get_image_count(stack)
                    output_file = os.path.join(outdir, "paramout_%s" % itout)
                    foutput = open(output_file, 'w')
                    for im in xrange(nimat):
                        # save the parameters for each of the models
                        outstring = ""
                        dummy.read_image(stack, im, True)
                        param3d = dummy.get_attr('xform.projection')
                        g = dummy.get_attr("group")
                        # retrieve alignments in EMAN-format
                        pE = param3d.get_params('eman')
                        outstring += "%f\t%f\t%f\t%f\t%f\t%i\n" % (
                            pE["az"], pE["alt"], pE["phi"], pE["tx"], pE["ty"],
                            g)
                        foutput.write(outstring)
                    foutput.close()
                del dummy
            mpi_barrier(MPI_COMM_WORLD)


#	mpi_finalize()

    if myid == main_node: print_end_msg("ali3d_MPI")
示例#18
0
def cml_find_structure2(Prj, Ori, Rot, outdir, outname, maxit, first_zero,
                        flag_weights, myid, main_node, number_of_proc):
    from projection import cml_export_progress, cml_disc, cml_export_txtagls
    import time, sys
    from random import shuffle, random

    from mpi import MPI_FLOAT, MPI_INT, MPI_SUM, MPI_COMM_WORLD
    from mpi import mpi_reduce, mpi_bcast, mpi_barrier

    # global vars
    global g_i_prj, g_n_prj, g_n_anglst, g_anglst, g_d_psi, g_debug, g_n_lines, g_seq

    # list of free orientation
    ocp = [-1] * g_n_anglst

    if first_zero:
        listprj = range(1, g_n_prj)
        ocp[0] = 0
    else:
        listprj = range(g_n_prj)

    # to stop when the solution oscillates
    period_disc = [0, 0, 0]
    period_ct = 0
    period_th = 2
    #if not flag_weights:   weights = [1.0] * g_n_lines

    # iteration loop
    for ite in xrange(maxit):
        #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>    ite = ", ite, "   myid = ", myid
        t_start = time.time()

        # loop over i prj
        change = False
        tlistprj = listprj[:]
        shuffle(tlistprj)
        nnn = len(tlistprj)
        tlistprj = mpi_bcast(tlistprj, nnn, MPI_INT, main_node, MPI_COMM_WORLD)
        tlistprj = map(int, tlistprj)
        """
		if(ite>1 and ite%5 == 0  and ite<140):
			if(myid == main_node):
				for i in xrange(0,len(tlistprj),5):
					ind          = 4*i
					Ori[ind]      =  360.*random()
					Ori[ind+1]    =  180.*random()
					Ori[ind+2]    =  360.*random()
					Ori[ind+3]    =  -1
				for i in xrange(len(tlistprj)):
					ind          = 4*i
					Ori[ind+3]    = float(Ori[ind+3])
			nnn = len(Ori)
			Ori = mpi_bcast(Ori, nnn, MPI_FLOAT, main_node, MPI_COMM_WORLD)
			Ori = map(float, Ori)
			for i in xrange(len(tlistprj)):
				ind          = 4*i
				Ori[ind+3]    = int(Ori[ind+3])
		"""

        for iprj in tlistprj:
            #print "**********************************  iprj = ", iprj, g_n_anglst

            # Store current the current orientation
            ind = 4 * iprj
            store_phi = Ori[ind]
            store_theta = Ori[ind + 1]
            store_psi = Ori[ind + 2]
            cur_agl = Ori[ind + 3]
            if cur_agl != -1: ocp[cur_agl] = -1

            # prepare active index of cml for weighting in order to earn time later
            iw = [0] * (g_n_prj - 1)
            c = 0
            ct = 0
            for i in xrange(g_n_prj):
                for j in xrange(i + 1, g_n_prj):
                    if i == iprj or j == iprj:
                        iw[ct] = c
                        ct += 1
                    c += 1

            # loop over all angles
            best_disc_list = [0] * g_n_anglst
            best_psi_list = [0] * g_n_anglst
            for iagl in xrange(myid, g_n_anglst, number_of_proc):
                # if orientation is free
                if ocp[iagl] == -1:
                    # assign new orientation
                    Ori[ind] = g_anglst[iagl][0]
                    Ori[ind + 1] = g_anglst[iagl][1]
                    Rot = Util.cml_update_rot(Rot, iprj, Ori[ind],
                                              Ori[ind + 1], 0.0)
                    # weights
                    if flag_weights:
                        cml = Util.cml_line_in3d(Ori, g_seq, g_n_prj,
                                                 g_n_lines)
                        weights = Util.cml_weights(cml)
                        mw = max(weights)
                        for i in xrange(g_n_lines):
                            weights[i] = mw - weights[i]
                        sw = sum(weights)
                        if sw == 0:
                            weights = [6.28 / float(g_n_lines)] * g_n_lines
                        else:
                            for i in xrange(g_n_lines):
                                weights[i] /= sw
                                weights[i] *= weights[i]

                    # spin all psi
                    com = Util.cml_line_insino(Rot, iprj, g_n_prj)
                    if flag_weights:
                        res = Util.cml_spin_psi(Prj, com, weights, iprj, iw,
                                                g_n_psi, g_d_psi, g_n_prj)
                    else:
                        res = Util.cml_spin_psi_now(Prj, com, iprj, iw,
                                                    g_n_psi, g_d_psi, g_n_prj)

                    # select the best
                    best_disc_list[iagl] = res[0]
                    best_psi_list[iagl] = res[1]

                    if g_debug:
                        cml_export_progress(outdir, ite, iprj, iagl, res[1],
                                            res[0], 'progress')
                else:
                    if g_debug:
                        cml_export_progress(outdir, ite, iprj, iagl, -1, -1,
                                            'progress')
            best_disc_list = mpi_reduce(best_disc_list, g_n_anglst, MPI_FLOAT,
                                        MPI_SUM, main_node, MPI_COMM_WORLD)
            best_psi_list = mpi_reduce(best_psi_list, g_n_anglst, MPI_FLOAT,
                                       MPI_SUM, main_node, MPI_COMM_WORLD)

            best_psi = -1
            best_iagl = -1

            if myid == main_node:
                best_disc = 1.0e20
                for iagl in xrange(g_n_anglst):
                    if best_disc_list[iagl] > 0.0 and best_disc_list[
                            iagl] < best_disc:
                        best_disc = best_disc_list[iagl]
                        best_psi = best_psi_list[iagl]
                        best_iagl = iagl
            best_psi = mpi_bcast(best_psi, 1, MPI_FLOAT, main_node,
                                 MPI_COMM_WORLD)
            best_iagl = mpi_bcast(best_iagl, 1, MPI_INT, main_node,
                                  MPI_COMM_WORLD)
            best_psi = float(best_psi[0])
            best_iagl = int(best_iagl[0])

            #print "xxxxx myid = ", myid, "    best_psi = ", best_psi, "   best_ialg = ", best_iagl

            # if change, assign
            if best_iagl != cur_agl:
                ocp[best_iagl] = iprj
                Ori[ind] = g_anglst[best_iagl][0]  # phi
                Ori[ind + 1] = g_anglst[best_iagl][1]  # theta
                Ori[ind + 2] = best_psi * g_d_psi  # psi
                Ori[ind + 3] = best_iagl  # index
                change = True
            else:
                if cur_agl != -1: ocp[cur_agl] = iprj
                Ori[ind] = store_phi
                Ori[ind + 1] = store_theta
                Ori[ind + 2] = store_psi
                Ori[ind + 3] = cur_agl

            Rot = Util.cml_update_rot(Rot, iprj, Ori[ind], Ori[ind + 1],
                                      Ori[ind + 2])

            if g_debug:
                cml_export_progress(outdir, ite, iprj, best_iagl,
                                    best_psi * g_d_psi, best_disc, 'choose')

        # if one change, compute new full disc
        disc = cml_disc(Prj, Ori, Rot, flag_weights)

        # display in the progress file
        if myid == main_node:
            cml_export_txtagls(outdir, outname, Ori, disc,
                               'Ite: %03i' % (ite + 1))

        if not change: break

        # to stop when the solution oscillates
        period_disc.pop(0)
        period_disc.append(disc)
        if period_disc[0] == period_disc[2]:
            period_ct += 1
            if period_ct >= period_th and min(
                    period_disc) == disc and myid == main_node:
                angfile = open(outdir + '/' + outname, 'a')
                angfile.write('\nSTOP SOLUTION UNSTABLE\n')
                angfile.write('Discrepancy period: %s\n' % period_disc)
                angfile.close()
                break
        else:
            period_ct = 0
        mpi_barrier(MPI_COMM_WORLD)

    return Ori, disc, ite
示例#19
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = progname + """  input_micrograph_list_file  input_micrograph_pattern  input_coordinates_pattern  output_directory  --coordinates_format  --box_size=box_size  --invert  --import_ctf=ctf_file  --limit_ctf  --resample_ratio=resample_ratio  --defocus_error=defocus_error  --astigmatism_error=astigmatism_error
	
Window particles from micrographs in input list file. The coordinates of the particles should be given as input.
Please specify name pattern of input micrographs and coordinates files with a wild card (*). Use the wild card to indicate the place of micrograph ID (e.g. serial number, time stamp, and etc). 
The name patterns must be enclosed by single quotes (') or double quotes ("). (Note: sxgui.py automatically adds single quotes (')). 
BDB files can not be selected as input micrographs.
	
	sxwindow.py  mic_list.txt  ./mic*.hdf  info/mic*_info.json  particles  --coordinates_format=eman2  --box_size=64  --invert  --import_ctf=outdir_cter/partres/partres.txt
	
If micrograph list file name is not provided, all files matched with the micrograph name pattern will be processed.
	
	sxwindow.py  ./mic*.hdf  info/mic*_info.json  particles  --coordinates_format=eman2  --box_size=64  --invert  --import_ctf=outdir_cter/partres/partres.txt
	
"""
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--coordinates_format",
        type="string",
        default="eman1",
        help=
        "format of input coordinates files: 'sparx', 'eman1', 'eman2', or 'spider'. the coordinates of sparx, eman2, and spider format is particle center. the coordinates of eman1 format is particle box conner associated with the original box size. (default eman1)"
    )
    parser.add_option(
        "--box_size",
        type="int",
        default=256,
        help=
        "x and y dimension of square area to be windowed (in pixels): pixel size after resampling is assumed when resample_ratio < 1.0 (default 256)"
    )
    parser.add_option(
        "--invert",
        action="store_true",
        default=False,
        help="invert image contrast: recommended for cryo data (default False)"
    )
    parser.add_option(
        "--import_ctf",
        type="string",
        default="",
        help="file name of sxcter output: normally partres.txt (default none)")
    parser.add_option(
        "--limit_ctf",
        action="store_true",
        default=False,
        help=
        "filter micrographs based on the CTF limit: this option requires --import_ctf. (default False)"
    )
    parser.add_option(
        "--resample_ratio",
        type="float",
        default=1.0,
        help=
        "ratio of new to old image size (or old to new pixel size) for resampling: Valid range is 0.0 < resample_ratio <= 1.0. (default 1.0)"
    )
    parser.add_option(
        "--defocus_error",
        type="float",
        default=1000000.0,
        help=
        "defocus errror limit: exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocus_error percent. the error is computed as (std dev defocus)/defocus*100%. (default 1000000.0)"
    )
    parser.add_option(
        "--astigmatism_error",
        type="float",
        default=360.0,
        help=
        "astigmatism error limit: Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatism_error degrees. (default 360.0)"
    )

    ### detect if program is running under MPI
    RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ

    main_node = 0

    if RUNNING_UNDER_MPI:
        from mpi import mpi_init
        from mpi import MPI_COMM_WORLD, mpi_comm_rank, mpi_comm_size, mpi_barrier, mpi_reduce, MPI_INT, MPI_SUM

        mpi_init(0, [])
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        number_of_processes = mpi_comm_size(MPI_COMM_WORLD)
    else:
        number_of_processes = 1
        myid = 0

    (options, args) = parser.parse_args(sys.argv[1:])

    mic_list_file_path = None
    mic_pattern = None
    coords_pattern = None
    error_status = None
    while True:
        if len(args) < 3 or len(args) > 4:
            error_status = (
                "Please check usage for number of arguments.\n Usage: " +
                usage + "\n" + "Please run %s -h for help." % (progname),
                getframeinfo(currentframe()))
            break

        if len(args) == 3:
            mic_pattern = args[0]
            coords_pattern = args[1]
            out_dir = args[2]
        else:  # assert(len(args) == 4)
            mic_list_file_path = args[0]
            mic_pattern = args[1]
            coords_pattern = args[2]
            out_dir = args[3]

        if mic_list_file_path != None:
            if os.path.splitext(mic_list_file_path)[1] != ".txt":
                error_status = (
                    "Extension of input micrograph list file must be \".txt\". Please check input_micrograph_list_file argument. Run %s -h for help."
                    % (progname), getframeinfo(currentframe()))
                break

        if mic_pattern[:len("bdb:")].lower() == "bdb":
            error_status = (
                "BDB file can not be selected as input micrographs. Please convert the format, and restart the program. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
            break

        if mic_pattern.find("*") == -1:
            error_status = (
                "Input micrograph file name pattern must contain wild card (*). Please check input_micrograph_pattern argument. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
            break

        if coords_pattern.find("*") == -1:
            error_status = (
                "Input coordinates file name pattern must contain wild card (*). Please check input_coordinates_pattern argument. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
            break

        if myid == main_node:
            if os.path.exists(out_dir):
                error_status = (
                    "Output directory exists. Please change the name and restart the program.",
                    getframeinfo(currentframe()))
                break

        break
    if_error_then_all_processes_exit_program(error_status)

    # Check invalid conditions of options
    check_options(options, progname)

    mic_name_list = None
    error_status = None
    if myid == main_node:
        if mic_list_file_path != None:
            print("Loading micrograph list from %s file ..." %
                  (mic_list_file_path))
            mic_name_list = read_text_file(mic_list_file_path)
            if len(mic_name_list) == 0:
                print("Directory of first micrograph entry is " %
                      (os.path.dirname(mic_name_list[0])))
        else:  # assert (mic_list_file_path == None)
            print("Generating micrograph list in %s directory..." %
                  (os.path.dirname(mic_pattern)))
            mic_name_list = glob.glob(mic_pattern)
        if len(mic_name_list) == 0:
            error_status = (
                "No micrograph file is found. Please check input_micrograph_pattern and/or input_micrograph_list_file argument. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
        else:
            print("Found %d microgarphs" % len(mic_name_list))

    if_error_then_all_processes_exit_program(error_status)
    if RUNNING_UNDER_MPI:
        mic_name_list = wrap_mpi_bcast(mic_name_list, main_node)

    coords_name_list = None
    error_status = None
    if myid == main_node:
        coords_name_list = glob.glob(coords_pattern)
        if len(coords_name_list) == 0:
            error_status = (
                "No coordinates file is found. Please check input_coordinates_pattern argument. Run %s -h for help."
                % (progname), getframeinfo(currentframe()))
    if_error_then_all_processes_exit_program(error_status)
    if RUNNING_UNDER_MPI:
        coords_name_list = wrap_mpi_bcast(coords_name_list, main_node)

##################################################################################################################################################################################################################
##################################################################################################################################################################################################################
##################################################################################################################################################################################################################

# all processes must have access to indices
    if options.import_ctf:
        i_enum = -1
        i_enum += 1
        idx_cter_def = i_enum  # defocus [um]; index must be same as ctf object format
        i_enum += 1
        idx_cter_cs = i_enum  # Cs [mm]; index must be same as ctf object format
        i_enum += 1
        idx_cter_vol = i_enum  # voltage[kV]; index must be same as ctf object format
        i_enum += 1
        idx_cter_apix = i_enum  # pixel size [A]; index must be same as ctf object format
        i_enum += 1
        idx_cter_bfactor = i_enum  # B-factor [A^2]; index must be same as ctf object format
        i_enum += 1
        idx_cter_ac = i_enum  # amplitude contrast [%]; index must be same as ctf object format
        i_enum += 1
        idx_cter_astig_amp = i_enum  # astigmatism amplitude [um]; index must be same as ctf object format
        i_enum += 1
        idx_cter_astig_ang = i_enum  # astigmatism angle [degree]; index must be same as ctf object format
        i_enum += 1
        idx_cter_sd_def = i_enum  # std dev of defocus [um]
        i_enum += 1
        idx_cter_sd_astig_amp = i_enum  # std dev of ast amp [A]
        i_enum += 1
        idx_cter_sd_astig_ang = i_enum  # std dev of ast angle [degree]
        i_enum += 1
        idx_cter_cv_def = i_enum  # coefficient of variation of defocus [%]
        i_enum += 1
        idx_cter_cv_astig_amp = i_enum  # coefficient of variation of ast amp [%]
        i_enum += 1
        idx_cter_spectra_diff = i_enum  # average of differences between with- and without-astig. experimental 1D spectra at extrema
        i_enum += 1
        idx_cter_error_def = i_enum  # frequency at which signal drops by 50% due to estimated error of defocus alone [1/A]
        i_enum += 1
        idx_cter_error_astig = i_enum  # frequency at which signal drops by 50% due to estimated error of defocus and astigmatism [1/A]
        i_enum += 1
        idx_cter_error_ctf = i_enum  # limit frequency by CTF error [1/A]
        i_enum += 1
        idx_cter_mic_name = i_enum  # micrograph name
        i_enum += 1
        n_idx_cter = i_enum

    # Prepare loop variables
    mic_basename_pattern = os.path.basename(
        mic_pattern)  # file pattern without path
    mic_baseroot_pattern = os.path.splitext(mic_basename_pattern)[
        0]  # file pattern without path and extension
    coords_format = options.coordinates_format.lower()
    box_size = options.box_size
    box_half = box_size // 2
    mask2d = model_circle(
        box_size // 2, box_size, box_size
    )  # Create circular 2D mask to Util.infomask of particle images
    resample_ratio = options.resample_ratio

    n_mic_process = 0
    n_mic_reject_no_coords = 0
    n_mic_reject_no_cter_entry = 0
    n_global_coords_detect = 0
    n_global_coords_process = 0
    n_global_coords_reject_out_of_boundary = 0

    serial_id_list = []
    error_status = None
    ## not a real while, an if with the opportunity to use break when errors need to be reported
    while myid == main_node:
        #
        # NOTE: 2016/05/24 Toshio Moriya
        # Now, ignores the path in mic_pattern and entries of mic_name_list to create serial ID
        # Only the basename (file name) in micrograph path must be match
        #
        # Create list of micrograph serial ID
        # Break micrograph name pattern into prefix and suffix to find the head index of the micrograph serial id
        #
        mic_basename_tokens = mic_basename_pattern.split('*')
        # assert (len(mic_basename_tokens) == 2)
        serial_id_head_index = len(mic_basename_tokens[0])
        # Loop through micrograph names
        for mic_name in mic_name_list:
            # Find the tail index of the serial id and extract serial id from the micrograph name
            mic_basename = os.path.basename(mic_name)
            serial_id_tail_index = mic_basename.index(mic_basename_tokens[1])
            serial_id = mic_basename[serial_id_head_index:serial_id_tail_index]
            serial_id_list.append(serial_id)
        # assert (len(serial_id_list) == len(mic_name))
        del mic_name_list  # Do not need this anymore

        # Load CTFs if necessary
        if options.import_ctf:

            ctf_list = read_text_row(options.import_ctf)
            # print("Detected CTF entries : %6d ..." % (len(ctf_list)))

            if len(ctf_list) == 0:
                error_status = (
                    "No CTF entry is found in %s. Please check --import_ctf option. Run %s -h for help."
                    % (options.import_ctf, progname),
                    getframeinfo(currentframe()))
                break

            if (len(ctf_list[0]) != n_idx_cter):
                error_status = (
                    "Number of columns (%d) must be %d in %s. The format might be old. Please run sxcter.py again."
                    % (len(ctf_list[0]), n_idx_cter, options.import_ctf),
                    getframeinfo(currentframe()))
                break

            ctf_dict = {}
            n_reject_defocus_error = 0
            ctf_error_limit = [
                options.defocus_error / 100.0, options.astigmatism_error
            ]
            for ctf_params in ctf_list:
                assert (len(ctf_params) == n_idx_cter)
                # mic_baseroot is name of micrograph minus the path and extension
                mic_baseroot = os.path.splitext(
                    os.path.basename(ctf_params[idx_cter_mic_name]))[0]
                if (ctf_params[idx_cter_sd_def] / ctf_params[idx_cter_def] >
                        ctf_error_limit[0]):
                    print(
                        "Defocus error %f exceeds the threshold. Micrograph %s is rejected."
                        % (ctf_params[idx_cter_sd_def] /
                           ctf_params[idx_cter_def], mic_baseroot))
                    n_reject_defocus_error += 1
                else:
                    if (ctf_params[idx_cter_sd_astig_ang] >
                            ctf_error_limit[1]):
                        ctf_params[idx_cter_astig_amp] = 0.0
                        ctf_params[idx_cter_astig_ang] = 0.0
                    ctf_dict[mic_baseroot] = ctf_params
            del ctf_list  # Do not need this anymore

        break

    if_error_then_all_processes_exit_program(error_status)

    if options.import_ctf:
        if options.limit_ctf:
            cutoff_histogram = [
            ]  #@ming compute the histogram for micrographs cut of by ctf_params limit.

##################################################################################################################################################################################################################
##################################################################################################################################################################################################################
##################################################################################################################################################################################################################

    restricted_serial_id_list = []
    if myid == main_node:
        # Loop over serial IDs of micrographs
        for serial_id in serial_id_list:
            # mic_baseroot is name of micrograph minus the path and extension
            mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
            mic_name = mic_pattern.replace("*", serial_id)
            coords_name = coords_pattern.replace("*", serial_id)

            ########### # CHECKS: BEGIN
            if coords_name not in coords_name_list:
                print("    Cannot read %s. Skipping %s ..." %
                      (coords_name, mic_baseroot))
                n_mic_reject_no_coords += 1
                continue

            # IF mic is in CTER results
            if options.import_ctf:
                if mic_baseroot not in ctf_dict:
                    print(
                        "    Is not listed in CTER results. Skipping %s ..." %
                        (mic_baseroot))
                    n_mic_reject_no_cter_entry += 1
                    continue
                else:
                    ctf_params = ctf_dict[mic_baseroot]
            # CHECKS: END

            n_mic_process += 1

            restricted_serial_id_list.append(serial_id)
        # restricted_serial_id_list = restricted_serial_id_list[:128]  ## for testing against the nonMPI version

    if myid != main_node:
        if options.import_ctf:
            ctf_dict = None

    error_status = None
    if len(restricted_serial_id_list) < number_of_processes:
        error_status = (
            'Number of processes (%d) supplied by --np in mpirun cannot be greater than %d (number of micrographs that satisfy all criteria to be processed) '
            % (number_of_processes, len(restricted_serial_id_list)),
            getframeinfo(currentframe()))
    if_error_then_all_processes_exit_program(error_status)

    ## keep a copy of the original output directory where the final bdb will be created
    original_out_dir = out_dir
    if RUNNING_UNDER_MPI:
        mpi_barrier(MPI_COMM_WORLD)
        restricted_serial_id_list = wrap_mpi_bcast(restricted_serial_id_list,
                                                   main_node)
        mic_start, mic_end = MPI_start_end(len(restricted_serial_id_list),
                                           number_of_processes, myid)
        restricted_serial_id_list_not_sliced = restricted_serial_id_list
        restricted_serial_id_list = restricted_serial_id_list[
            mic_start:mic_end]

        if options.import_ctf:
            ctf_dict = wrap_mpi_bcast(ctf_dict, main_node)

        # generate subdirectories of out_dir, one for each process
        out_dir = os.path.join(out_dir, "%03d" % myid)

    if myid == main_node:
        print(
            "Micrographs processed by main process (including percent complete):"
        )

    len_processed_by_main_node_divided_by_100 = len(
        restricted_serial_id_list) / 100.0

    ##################################################################################################################################################################################################################
    ##################################################################################################################################################################################################################
    ##################################################################################################################################################################################################################
    #####  Starting main parallel execution

    for my_idx, serial_id in enumerate(restricted_serial_id_list):
        mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
        mic_name = mic_pattern.replace("*", serial_id)
        coords_name = coords_pattern.replace("*", serial_id)

        if myid == main_node:
            print(
                mic_name, " ---> % 2.2f%%" %
                (my_idx / len_processed_by_main_node_divided_by_100))
        mic_img = get_im(mic_name)

        # Read coordinates according to the specified format and
        # make the coordinates the center of particle image
        if coords_format == "sparx":
            coords_list = read_text_row(coords_name)
        elif coords_format == "eman1":
            coords_list = read_text_row(coords_name)
            for i in xrange(len(coords_list)):
                coords_list[i] = [(coords_list[i][0] + coords_list[i][2] // 2),
                                  (coords_list[i][1] + coords_list[i][3] // 2)]
        elif coords_format == "eman2":
            coords_list = js_open_dict(coords_name)["boxes"]
            for i in xrange(len(coords_list)):
                coords_list[i] = [coords_list[i][0], coords_list[i][1]]
        elif coords_format == "spider":
            coords_list = read_text_row(coords_name)
            for i in xrange(len(coords_list)):
                coords_list[i] = [coords_list[i][2], coords_list[i][3]]
            # else: assert (False) # Unreachable code

        # Calculate the new pixel size
        if options.import_ctf:
            ctf_params = ctf_dict[mic_baseroot]
            pixel_size_origin = ctf_params[idx_cter_apix]

            if resample_ratio < 1.0:
                # assert (resample_ratio > 0.0)
                new_pixel_size = pixel_size_origin / resample_ratio
                print(
                    "Resample micrograph to pixel size %6.4f and window segments from resampled micrograph."
                    % new_pixel_size)
            else:
                # assert (resample_ratio == 1.0)
                new_pixel_size = pixel_size_origin

            # Set ctf along with new pixel size in resampled micrograph
            ctf_params[idx_cter_apix] = new_pixel_size
        else:
            # assert (not options.import_ctf)
            if resample_ratio < 1.0:
                # assert (resample_ratio > 0.0)
                print(
                    "Resample micrograph with ratio %6.4f and window segments from resampled micrograph."
                    % resample_ratio)
            # else:
            #	assert (resample_ratio == 1.0)

        # Apply filters to micrograph
        fftip(mic_img)
        if options.limit_ctf:
            # assert (options.import_ctf)
            # Cut off frequency components higher than CTF limit
            q1, q2 = ctflimit(box_size, ctf_params[idx_cter_def],
                              ctf_params[idx_cter_cs],
                              ctf_params[idx_cter_vol], new_pixel_size)

            # This is absolute frequency of CTF limit in scale of original micrograph
            if resample_ratio < 1.0:
                # assert (resample_ratio > 0.0)
                q1 = resample_ratio * q1 / float(
                    box_size
                )  # q1 = (pixel_size_origin / new_pixel_size) * q1/float(box_size)
            else:
                # assert (resample_ratio == 1.0) -> pixel_size_origin == new_pixel_size -> pixel_size_origin / new_pixel_size == 1.0
                q1 = q1 / float(box_size)

            if q1 < 0.5:
                mic_img = filt_tanl(mic_img, q1, 0.01)
                cutoff_histogram.append(q1)

        # Cut off frequency components lower than the box size can express
        mic_img = fft(filt_gaussh(mic_img, resample_ratio / box_size))

        # Resample micrograph, map coordinates, and window segments from resampled micrograph using new coordinates
        # after resampling by resample_ratio, new pixel size will be pixel_size/resample_ratio = new_pixel_size
        # NOTE: 2015/04/13 Toshio Moriya
        # resample() efficiently takes care of the case resample_ratio = 1.0 but
        # it does not set apix_*. Even though it sets apix_* when resample_ratio < 1.0 ...
        mic_img = resample(mic_img, resample_ratio)

        if options.invert:
            mic_stats = Util.infomask(
                mic_img, None, True)  # mic_stat[0:mean, 1:SD, 2:min, 3:max]
            Util.mul_scalar(mic_img, -1.0)
            mic_img += 2 * mic_stats[0]

        if options.import_ctf:
            from utilities import generate_ctf
            ctf_obj = generate_ctf(
                ctf_params
            )  # indexes 0 to 7 (idx_cter_def to idx_cter_astig_ang) must be same in cter format & ctf object format.

        # Prepare loop variables
        nx = mic_img.get_xsize()
        ny = mic_img.get_ysize()
        x0 = nx // 2
        y0 = ny // 2

        n_coords_reject_out_of_boundary = 0
        local_stack_name = "bdb:%s#" % out_dir + mic_baseroot + '_ptcls'
        local_particle_id = 0  # can be different from coordinates_id
        # Loop over coordinates
        for coords_id in xrange(len(coords_list)):

            x = int(coords_list[coords_id][0])
            y = int(coords_list[coords_id][1])

            if resample_ratio < 1.0:
                # assert (resample_ratio > 0.0)
                x = int(x * resample_ratio)
                y = int(y * resample_ratio)
            # else:
            # 	assert(resample_ratio == 1.0)

            if ((0 <= x - box_half) and (x + box_half <= nx)
                    and (0 <= y - box_half) and (y + box_half <= ny)):
                particle_img = Util.window(mic_img, box_size, box_size, 1,
                                           x - x0, y - y0)
            else:
                print(
                    "In %s, coordinates ID = %04d (x = %4d, y = %4d, box_size = %4d) is out of micrograph bound, skipping ..."
                    % (mic_baseroot, coords_id, x, y, box_size))
                n_coords_reject_out_of_boundary += 1
                continue

            particle_img = ramp(particle_img)
            particle_stats = Util.infomask(
                particle_img, mask2d,
                False)  # particle_stats[0:mean, 1:SD, 2:min, 3:max]
            particle_img -= particle_stats[0]
            particle_img /= particle_stats[1]

            # NOTE: 2015/04/09 Toshio Moriya
            # ptcl_source_image might be redundant information ...
            # Consider re-organizing header entries...
            particle_img.set_attr("ptcl_source_image", mic_name)
            particle_img.set_attr("ptcl_source_coord_id", coords_id)
            particle_img.set_attr("ptcl_source_coord", [
                int(coords_list[coords_id][0]),
                int(coords_list[coords_id][1])
            ])
            particle_img.set_attr("resample_ratio", resample_ratio)

            # NOTE: 2015/04/13 Toshio Moriya
            # apix_* attributes are updated by resample() only when resample_ratio != 1.0
            # Let's make sure header info is consistent by setting apix_* = 1.0
            # regardless of options, so it is not passed down the processing line
            particle_img.set_attr("apix_x", 1.0)
            particle_img.set_attr("apix_y", 1.0)
            particle_img.set_attr("apix_z", 1.0)
            if options.import_ctf:
                particle_img.set_attr("ctf", ctf_obj)
                particle_img.set_attr("ctf_applied", 0)
                particle_img.set_attr("pixel_size_origin", pixel_size_origin)
                # particle_img.set_attr("apix_x", new_pixel_size)
                # particle_img.set_attr("apix_y", new_pixel_size)
                # particle_img.set_attr("apix_z", new_pixel_size)
            # NOTE: 2015/04/13 Toshio Moriya
            # Pawel Comment: Micrograph is not supposed to have CTF header info.
            # So, let's assume it does not exist & ignore its presence.
            # Note that resample() "correctly" updates pixel size of CTF header info if it exists
            # elif (particle_img.has_ctff()):
            # 	assert(not options.import_ctf)
            # 	ctf_origin = particle_img.get_attr("ctf_obj")
            # 	pixel_size_origin = round(ctf_origin.apix, 5) # Because SXCTER ouputs up to 5 digits
            # 	particle_img.set_attr("apix_x",pixel_size_origin)
            # 	particle_img.set_attr("apix_y",pixel_size_origin)
            # 	particle_img.set_attr("apix_z",pixel_size_origin)

            # print("local_stack_name, local_particle_id", local_stack_name, local_particle_id)
            particle_img.write_image(local_stack_name, local_particle_id)
            local_particle_id += 1

        n_global_coords_detect += len(coords_list)
        n_global_coords_process += local_particle_id
        n_global_coords_reject_out_of_boundary += n_coords_reject_out_of_boundary

        #		# MRK_DEBUG: Toshio Moriya 2016/05/03
        #		# Following codes are for debugging bdb. Delete in future
        #		result = db_check_dict(local_stack_name)
        #		print('# MRK_DEBUG: result = db_check_dict(local_stack_name): %s' % (result))
        #		result = db_list_dicts('bdb:%s' % out_dir)
        #		print('# MRK_DEBUG: result = db_list_dicts(out_dir): %s' % (result))
        #		result = db_get_image_info(local_stack_name)
        #		print('# MRK_DEBUG: result = db_get_image_info(local_stack_name)', result)

        # Release the data base of local stack from this process
        # so that the subprocess can access to the data base
        db_close_dict(local_stack_name)


#		# MRK_DEBUG: Toshio Moriya 2016/05/03
#		# Following codes are for debugging bdb. Delete in future
#		cmd_line = "e2iminfo.py %s" % (local_stack_name)
#		print('# MRK_DEBUG: Executing the command: %s' % (cmd_line))
#		cmdexecute(cmd_line)

#		# MRK_DEBUG: Toshio Moriya 2016/05/03
#		# Following codes are for debugging bdb. Delete in future
#		cmd_line = "e2iminfo.py bdb:%s#data" % (out_dir)
#		print('# MRK_DEBUG: Executing the command: %s' % (cmd_line))
#		cmdexecute(cmd_line)

    if RUNNING_UNDER_MPI:
        if options.import_ctf:
            if options.limit_ctf:
                cutoff_histogram = wrap_mpi_gatherv(cutoff_histogram,
                                                    main_node)

    if myid == main_node:
        if options.limit_ctf:
            # Print out the summary of CTF-limit filtering
            print(" ")
            print("Global summary of CTF-limit filtering (--limit_ctf) ...")
            print("Percentage of filtered micrographs: %8.2f\n" %
                  (len(cutoff_histogram) * 100.0 /
                   len(restricted_serial_id_list_not_sliced)))

            n_bins = 10
            if len(cutoff_histogram) >= n_bins:
                from statistics import hist_list
                cutoff_region, cutoff_counts = hist_list(
                    cutoff_histogram, n_bins)
                print("      Histogram of cut-off frequency")
                print("      cut-off       counts")
                for bin_id in xrange(n_bins):
                    print(" %14.7f     %7d" %
                          (cutoff_region[bin_id], cutoff_counts[bin_id]))
            else:
                print(
                    "The number of filtered micrographs (%d) is less than the number of bins (%d). No histogram is produced."
                    % (len(cutoff_histogram), n_bins))

    n_mic_process = mpi_reduce(n_mic_process, 1, MPI_INT, MPI_SUM, main_node,
                               MPI_COMM_WORLD)
    n_mic_reject_no_coords = mpi_reduce(n_mic_reject_no_coords, 1, MPI_INT,
                                        MPI_SUM, main_node, MPI_COMM_WORLD)
    n_mic_reject_no_cter_entry = mpi_reduce(n_mic_reject_no_cter_entry, 1,
                                            MPI_INT, MPI_SUM, main_node,
                                            MPI_COMM_WORLD)
    n_global_coords_detect = mpi_reduce(n_global_coords_detect, 1, MPI_INT,
                                        MPI_SUM, main_node, MPI_COMM_WORLD)
    n_global_coords_process = mpi_reduce(n_global_coords_process, 1, MPI_INT,
                                         MPI_SUM, main_node, MPI_COMM_WORLD)
    n_global_coords_reject_out_of_boundary = mpi_reduce(
        n_global_coords_reject_out_of_boundary, 1, MPI_INT, MPI_SUM, main_node,
        MPI_COMM_WORLD)

    # Print out the summary of all micrographs
    if main_node == myid:
        print(" ")
        print("Global summary of micrographs ...")
        print("Detected                        : %6d" %
              (len(restricted_serial_id_list_not_sliced)))
        print("Processed                       : %6d" % (n_mic_process))
        print("Rejected by no coordinates file : %6d" %
              (n_mic_reject_no_coords))
        print("Rejected by no CTER entry       : %6d" %
              (n_mic_reject_no_cter_entry))
        print(" ")
        print("Global summary of coordinates ...")
        print("Detected                        : %6d" %
              (n_global_coords_detect))
        print("Processed                       : %6d" %
              (n_global_coords_process))
        print("Rejected by out of boundary     : %6d" %
              (n_global_coords_reject_out_of_boundary))
        # print(" ")
        # print("DONE!!!")

    mpi_barrier(MPI_COMM_WORLD)

    if main_node == myid:

        import time
        time.sleep(1)
        print("\n Creating bdb:%s/data\n" % original_out_dir)
        for proc_i in range(number_of_processes):
            mic_start, mic_end = MPI_start_end(
                len(restricted_serial_id_list_not_sliced), number_of_processes,
                proc_i)
            for serial_id in restricted_serial_id_list_not_sliced[
                    mic_start:mic_end]:
                e2bdb_command = "e2bdb.py "
                mic_baseroot = mic_baseroot_pattern.replace("*", serial_id)
                if RUNNING_UNDER_MPI:
                    e2bdb_command += "bdb:" + os.path.join(
                        original_out_dir,
                        "%03d/" % proc_i) + mic_baseroot + "_ptcls "
                else:
                    e2bdb_command += "bdb:" + os.path.join(
                        original_out_dir, mic_baseroot + "_ptcls ")

                e2bdb_command += " --appendvstack=bdb:%s/data  1>/dev/null" % original_out_dir
                cmdexecute(e2bdb_command, printing_on_success=False)

        print("Done!\n")

    if RUNNING_UNDER_MPI:
        mpi_barrier(MPI_COMM_WORLD)
        from mpi import mpi_finalize
        mpi_finalize()

    sys.stdout.flush()
    sys.exit(0)
示例#20
0
def helicalshiftali_MPI(stack,
                        maskfile=None,
                        maxit=100,
                        CTF=False,
                        snr=1.0,
                        Fourvar=False,
                        search_rng=-1):

    nproc = mpi.mpi_comm_size(mpi.MPI_COMM_WORLD)
    myid = mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD)
    main_node = 0

    ftp = file_type(stack)

    if myid == main_node:
        print_begin_msg("helical-shiftali_MPI")

    max_iter = int(maxit)
    if (myid == main_node):
        infils = EMUtil.get_all_attributes(stack, "filament")
        ptlcoords = EMUtil.get_all_attributes(stack, 'ptcl_source_coord')
        filaments = ordersegments(infils, ptlcoords)
        total_nfils = len(filaments)
        inidl = [0] * total_nfils
        for i in range(total_nfils):
            inidl[i] = len(filaments[i])
        linidl = sum(inidl)
        nima = linidl
        tfilaments = []
        for i in range(total_nfils):
            tfilaments += filaments[i]
        del filaments
    else:
        total_nfils = 0
        linidl = 0
    total_nfils = bcast_number_to_all(total_nfils, source_node=main_node)
    if myid != main_node:
        inidl = [-1] * total_nfils
    inidl = bcast_list_to_all(inidl, myid, source_node=main_node)
    linidl = bcast_number_to_all(linidl, source_node=main_node)
    if myid != main_node:
        tfilaments = [-1] * linidl
    tfilaments = bcast_list_to_all(tfilaments, myid, source_node=main_node)
    filaments = []
    iendi = 0
    for i in range(total_nfils):
        isti = iendi
        iendi = isti + inidl[i]
        filaments.append(tfilaments[isti:iendi])
    del tfilaments, inidl

    if myid == main_node:
        print_msg("total number of filaments: %d" % total_nfils)
    if total_nfils < nproc:
        ERROR(
            'number of CPUs (%i) is larger than the number of filaments (%i), please reduce the number of CPUs used'
            % (nproc, total_nfils),
            myid=myid)

    #  balanced load
    temp = chunks_distribution([[len(filaments[i]), i]
                                for i in range(len(filaments))],
                               nproc)[myid:myid + 1][0]
    filaments = [filaments[temp[i][1]] for i in range(len(temp))]
    nfils = len(filaments)

    #filaments = [[0,1]]
    #print "filaments",filaments
    list_of_particles = []
    indcs = []
    k = 0
    for i in range(nfils):
        list_of_particles += filaments[i]
        k1 = k + len(filaments[i])
        indcs.append([k, k1])
        k = k1
    data = EMData.read_images(stack, list_of_particles)
    ldata = len(data)
    sxprint("ldata=", ldata)
    nx = data[0].get_xsize()
    ny = data[0].get_ysize()
    if maskfile == None:
        mrad = min(nx, ny) // 2 - 2
        mask = pad(model_blank(2 * mrad + 1, ny, 1, 1.0), nx, ny, 1, 0.0)
    else:
        mask = get_im(maskfile)

    # apply initial xform.align2d parameters stored in header
    init_params = []
    for im in range(ldata):
        t = data[im].get_attr('xform.align2d')
        init_params.append(t)
        p = t.get_params("2d")
        data[im] = rot_shift2D(data[im], p['alpha'], p['tx'], p['ty'],
                               p['mirror'], p['scale'])

    if CTF:
        from sp_filter import filt_ctf
        from sp_morphology import ctf_img
        ctf_abs_sum = EMData(nx, ny, 1, False)
        ctf_2_sum = EMData(nx, ny, 1, False)
    else:
        ctf_2_sum = None
        ctf_abs_sum = None

    from sp_utilities import info

    for im in range(ldata):
        data[im].set_attr('ID', list_of_particles[im])
        st = Util.infomask(data[im], mask, False)
        data[im] -= st[0]
        if CTF:
            ctf_params = data[im].get_attr("ctf")
            qctf = data[im].get_attr("ctf_applied")
            if qctf == 0:
                data[im] = filt_ctf(fft(data[im]), ctf_params)
                data[im].set_attr('ctf_applied', 1)
            elif qctf != 1:
                ERROR('Incorrectly set qctf flag', myid=myid)
            ctfimg = ctf_img(nx, ctf_params, ny=ny)
            Util.add_img2(ctf_2_sum, ctfimg)
            Util.add_img_abs(ctf_abs_sum, ctfimg)
        else:
            data[im] = fft(data[im])

    del list_of_particles

    if CTF:
        reduce_EMData_to_root(ctf_2_sum, myid, main_node)
        reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
    if CTF:
        if myid != main_node:
            del ctf_2_sum
            del ctf_abs_sum
        else:
            temp = EMData(nx, ny, 1, False)
            tsnr = 1. / snr
            for i in range(0, nx + 2, 2):
                for j in range(ny):
                    temp.set_value_at(i, j, tsnr)
                    temp.set_value_at(i + 1, j, 0.0)
            #info(ctf_2_sum)
            Util.add_img(ctf_2_sum, temp)
            #info(ctf_2_sum)
            del temp

    total_iter = 0
    shift_x = [0.0] * ldata

    for Iter in range(max_iter):
        if myid == main_node:
            start_time = time()
            print_msg("Iteration #%4d\n" % (total_iter))
        total_iter += 1
        avg = EMData(nx, ny, 1, False)
        for im in range(ldata):
            Util.add_img(avg, fshift(data[im], shift_x[im]))

        reduce_EMData_to_root(avg, myid, main_node)

        if myid == main_node:
            if CTF: tavg = Util.divn_filter(avg, ctf_2_sum)
            else: tavg = Util.mult_scalar(avg, 1.0 / float(nima))
        else:
            tavg = model_blank(nx, ny)

        if Fourvar:
            bcast_EMData_to_all(tavg, myid, main_node)
            vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

        if myid == main_node:
            if Fourvar:
                tavg = fft(Util.divn_img(fft(tavg), vav))
                vav_r = Util.pack_complex_to_real(vav)
            # normalize and mask tavg in real space
            tavg = fft(tavg)
            stat = Util.infomask(tavg, mask, False)
            tavg -= stat[0]
            Util.mul_img(tavg, mask)
            tavg.write_image("tavg.hdf", Iter)
            # For testing purposes: shift tavg to some random place and see if the centering is still correct
            #tavg = rot_shift3D(tavg,sx=3,sy=-4)

        if Fourvar: del vav
        bcast_EMData_to_all(tavg, myid, main_node)
        tavg = fft(tavg)

        sx_sum = 0.0
        nxc = nx // 2

        for ifil in range(nfils):
            """
			# Calculate filament average
			avg = EMData(nx, ny, 1, False)
			filnima = 0
			for im in xrange(indcs[ifil][0], indcs[ifil][1]):
				Util.add_img(avg, data[im])
				filnima += 1
			tavg = Util.mult_scalar(avg, 1.0/float(filnima))
			"""
            # Calculate 1D ccf between each segment and filament average
            nsegms = indcs[ifil][1] - indcs[ifil][0]
            ctx = [None] * nsegms
            pcoords = [None] * nsegms
            for im in range(indcs[ifil][0], indcs[ifil][1]):
                ctx[im - indcs[ifil][0]] = Util.window(ccf(tavg, data[im]), nx,
                                                       1)
                pcoords[im - indcs[ifil][0]] = data[im].get_attr(
                    'ptcl_source_coord')
                #ctx[im-indcs[ifil][0]].write_image("ctx.hdf",im-indcs[ifil][0])
                #print "  CTX  ",myid,im,Util.infomask(ctx[im-indcs[ifil][0]], None, True)
            # search for best x-shift
            cents = nsegms // 2

            dst = sqrt(
                max((pcoords[cents][0] - pcoords[0][0])**2 +
                    (pcoords[cents][1] - pcoords[0][1])**2,
                    (pcoords[cents][0] - pcoords[-1][0])**2 +
                    (pcoords[cents][1] - pcoords[-1][1])**2))
            maxincline = atan2(ny // 2 - 2 - float(search_rng), dst)
            kang = int(dst * tan(maxincline) + 0.5)
            #print  "  settings ",nsegms,cents,dst,search_rng,maxincline,kang

            # ## C code for alignment. @ming
            results = [0.0] * 3
            results = Util.helixshiftali(ctx, pcoords, nsegms, maxincline,
                                         kang, search_rng, nxc)
            sib = int(results[0])
            bang = results[1]
            qm = results[2]
            #print qm, sib, bang

            # qm = -1.e23
            #
            # 			for six in xrange(-search_rng, search_rng+1,1):
            # 				q0 = ctx[cents].get_value_at(six+nxc)
            # 				for incline in xrange(kang+1):
            # 					qt = q0
            # 					qu = q0
            # 					if(kang>0):  tang = tan(maxincline/kang*incline)
            # 					else:        tang = 0.0
            # 					for kim in xrange(cents+1,nsegms):
            # 						dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
            # 						xl = dst*tang+six+nxc
            # 						ixl = int(xl)
            # 						dxl = xl - ixl
            # 						#print "  A  ", ifil,six,incline,kim,xl,ixl,dxl
            # 						qt += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
            # 						xl = -dst*tang+six+nxc
            # 						ixl = int(xl)
            # 						dxl = xl - ixl
            # 						qu += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
            # 					for kim in xrange(cents):
            # 						dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
            # 						xl = -dst*tang+six+nxc
            # 						ixl = int(xl)
            # 						dxl = xl - ixl
            # 						qt += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
            # 						xl =  dst*tang+six+nxc
            # 						ixl = int(xl)
            # 						dxl = xl - ixl
            # 						qu += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
            # 					if( qt > qm ):
            # 						qm = qt
            # 						sib = six
            # 						bang = tang
            # 					if( qu > qm ):
            # 						qm = qu
            # 						sib = six
            # 						bang = -tang
            #if incline == 0:  print  "incline = 0  ",six,tang,qt,qu
            #print qm,six,sib,bang
            #print " got results   ",indcs[ifil][0], indcs[ifil][1], ifil,myid,qm,sib,tang,bang,len(ctx),Util.infomask(ctx[0], None, True)
            for im in range(indcs[ifil][0], indcs[ifil][1]):
                kim = im - indcs[ifil][0]
                dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 +
                           (pcoords[cents][1] - pcoords[kim][1])**2)
                if (kim < cents): xl = -dst * bang + sib
                else: xl = dst * bang + sib
                shift_x[im] = xl

            # Average shift
            sx_sum += shift_x[indcs[ifil][0] + cents]

        # #print myid,sx_sum,total_nfils
        sx_sum = mpi.mpi_reduce(sx_sum, 1, mpi.MPI_FLOAT, mpi.MPI_SUM,
                                main_node, mpi.MPI_COMM_WORLD)
        if myid == main_node:
            sx_sum = float(sx_sum[0]) / total_nfils
            print_msg("Average shift  %6.2f\n" % (sx_sum))
        else:
            sx_sum = 0.0
        sx_sum = 0.0
        sx_sum = bcast_number_to_all(sx_sum, source_node=main_node)
        for im in range(ldata):
            shift_x[im] -= sx_sum
            #print  "   %3d  %6.3f"%(im,shift_x[im])
        #exit()

    # combine shifts found with the original parameters
    for im in range(ldata):
        t1 = Transform()
        ##import random
        ##shix=random.randint(-10, 10)
        ##t1.set_params({"type":"2D","tx":shix})
        t1.set_params({"type": "2D", "tx": shift_x[im]})
        # combine t0 and t1
        tt = t1 * init_params[im]
        data[im].set_attr("xform.align2d", tt)
    # write out headers and STOP, under MPI writing has to be done sequentially
    mpi.mpi_barrier(mpi.MPI_COMM_WORLD)
    par_str = ["xform.align2d", "ID"]
    if myid == main_node:
        from sp_utilities import file_type
        if (file_type(stack) == "bdb"):
            from sp_utilities import recv_attr_dict_bdb
            recv_attr_dict_bdb(main_node, stack, data, par_str, 0, ldata,
                               nproc)
        else:
            from sp_utilities import recv_attr_dict
            recv_attr_dict(main_node, stack, data, par_str, 0, ldata, nproc)
    else:
        send_attr_dict(main_node, data, par_str, 0, ldata)
    if myid == main_node: print_end_msg("helical-shiftali_MPI")
示例#21
0
def main():

	def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
		# the final ali2d parameters already combine shifts operation first and rotation operation second for parameters converted from 3D
		if mirror:
			m = 1
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)
		else:
			m = 0
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)
		return  alpha, sx, sy, m
	
	progname = os.path.basename(sys.argv[0])
	usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=  --aa=   --sym=symmetry --CTF"
	parser = OptionParser(usage, version=SPARXVERSION)
	
	parser.add_option("--output_dir",   type="string"	   ,	default="./",				    help="Output directory")
	parser.add_option("--ave2D",		type="string"	   ,	default=False,				help="Write to the disk a stack of 2D averages")
	parser.add_option("--var2D",		type="string"	   ,	default=False,				help="Write to the disk a stack of 2D variances")
	parser.add_option("--ave3D",		type="string"	   ,	default=False,				help="Write to the disk reconstructed 3D average")
	parser.add_option("--var3D",		type="string"	   ,	default=False,				help="Compute 3D variability (time consuming!)")
	parser.add_option("--img_per_grp",	type="int"         ,	default=100,	     	    help="Number of neighbouring projections.(Default is 100)")
	parser.add_option("--no_norm",		action="store_true",	default=False,				help="Do not use normalization.(Default is to apply normalization)")
	#parser.add_option("--radius", 	    type="int"         ,	default=-1   ,				help="radius for 3D variability" )
	parser.add_option("--npad",			type="int"         ,	default=2    ,				help="Number of time to pad the original images.(Default is 2 times padding)")
	parser.add_option("--sym" , 		type="string"      ,	default="c1",				help="Symmetry. (Default is no symmetry)")
	parser.add_option("--fl",			type="float"       ,	default=0.0,				help="Low pass filter cutoff in absolute frequency (0.0 - 0.5) and is applied to decimated images. (Default - no filtration)")
	parser.add_option("--aa",			type="float"       ,	default=0.02 ,				help="Fall off of the filter. Use default value if user has no clue about falloff (Default value is 0.02)")
	parser.add_option("--CTF",			action="store_true",	default=False,				help="Use CFT correction.(Default is no CTF correction)")
	#parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
	#parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
	#parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
	#parser.add_option("--abs", 		type="float"   ,        default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
	#parser.add_option("--squ", 		type="float"   ,	    default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
	parser.add_option("--VAR" , 		action="store_true",	default=False,				help="Stack of input consists of 2D variances (Default False)")
	parser.add_option("--decimate",     type  ="float",         default=0.25,               help="Image decimate rate, a number less than 1. (Default is 0.25)")
	parser.add_option("--window",       type  ="int",           default=0,                  help="Target image size relative to original image size. (Default value is zero.)")
	#parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
	#parser.add_option("--nvec",			type="int"         ,	default=0    ,				help="Number of eigenvectors, (Default = 0 meaning no PCA calculated)")
	parser.add_option("--symmetrize",	action="store_true",	default=False,				help="Prepare input stack for handling symmetry (Default False)")
	parser.add_option("--overhead",     type  ="float",         default=0.5,                help="python overhead per CPU.")

	(options,args) = parser.parse_args()
	#####
	from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD
	from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
	#from mpi import *
	from applications   import MPI_start_end
	from reconstruction import recons3d_em, recons3d_em_MPI
	from reconstruction	import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
	from utilities      import print_begin_msg, print_end_msg, print_msg
	from utilities      import read_text_row, get_image, get_im, wrap_mpi_send, wrap_mpi_recv
	from utilities      import bcast_EMData_to_all, bcast_number_to_all
	from utilities      import get_symt

	#  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

	from EMAN2db import db_open_dict

	# Set up global variables related to bdb cache 
	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	
	# Set up global variables related to ERROR function
	global_def.BATCH = True
	
	# detect if program is running under MPI
	RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ
	if RUNNING_UNDER_MPI: global_def.MPI = True
	if options.output_dir =="./": current_output_dir = os.path.abspath(options.output_dir)
	else: current_output_dir = options.output_dir
	if options.symmetrize :
		if RUNNING_UNDER_MPI:
			try:
				sys.argv = mpi_init(len(sys.argv), sys.argv)
				try:	
					number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
					if( number_of_proc > 1 ):
						ERROR("Cannot use more than one CPU for symmetry preparation","sx3dvariability",1)
				except:
					pass
			except:
				pass
		if not os.path.exists(current_output_dir): os.mkdir(current_output_dir)
		
		#  Input
		#instack = "Clean_NORM_CTF_start_wparams.hdf"
		#instack = "bdb:data"
		
		
		from logger import Logger,BaseLogger_Files
		if os.path.exists(os.path.join(current_output_dir, "log.txt")): os.remove(os.path.join(current_output_dir, "log.txt"))
		log_main=Logger(BaseLogger_Files())
		log_main.prefix = os.path.join(current_output_dir, "./")
		
		instack = args[0]
		sym = options.sym.lower()
		if( sym == "c1" ):
			ERROR("There is no need to symmetrize stack for C1 symmetry","sx3dvariability",1)
		
		line =""
		for a in sys.argv:
			line +=" "+a
		log_main.add(line)
	
		if(instack[:4] !="bdb:"):
			#if output_dir =="./": stack = "bdb:data"
			stack = "bdb:"+current_output_dir+"/data"
			delete_bdb(stack)
			junk = cmdexecute("sxcpy.py  "+instack+"  "+stack)
		else: stack = instack
		
		qt = EMUtil.get_all_attributes(stack,'xform.projection')

		na = len(qt)
		ts = get_symt(sym)
		ks = len(ts)
		angsa = [None]*na
		
		for k in range(ks):
			#Qfile = "Q%1d"%k
			#if options.output_dir!="./": Qfile = os.path.join(options.output_dir,"Q%1d"%k)
			Qfile = os.path.join(current_output_dir, "Q%1d"%k)
			#delete_bdb("bdb:Q%1d"%k)
			delete_bdb("bdb:"+Qfile)
			#junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:"+Qfile)
			#DB = db_open_dict("bdb:Q%1d"%k)
			DB = db_open_dict("bdb:"+Qfile)
			for i in range(na):
				ut = qt[i]*ts[k]
				DB.set_attr(i, "xform.projection", ut)
				#bt = ut.get_params("spider")
				#angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
			#write_text_row(angsa, 'ptsma%1d.txt'%k)
			#junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			#junk = cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
			DB.close()
		#if options.output_dir =="./": delete_bdb("bdb:sdata")
		delete_bdb("bdb:" + current_output_dir + "/"+"sdata")
		#junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
		sdata = "bdb:"+current_output_dir+"/"+"sdata"
		print(sdata)
		junk = cmdexecute("e2bdb.py   " + current_output_dir +"  --makevstack="+sdata +" --filt=Q")
		#junk = cmdexecute("ls  EMAN2DB/sdata*")
		#a = get_im("bdb:sdata")
		a = get_im(sdata)
		a.set_attr("variabilitysymmetry",sym)
		#a.write_image("bdb:sdata")
		a.write_image(sdata)

	else:

		from fundamentals import window2d
		sys.argv       = mpi_init(len(sys.argv), sys.argv)
		myid           = mpi_comm_rank(MPI_COMM_WORLD)
		number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
		main_node      = 0
		shared_comm  = mpi_comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED,  0, MPI_INFO_NULL)
		myid_on_node = mpi_comm_rank(shared_comm)
		no_of_processes_per_group = mpi_comm_size(shared_comm)
		masters_from_groups_vs_everything_else_comm = mpi_comm_split(MPI_COMM_WORLD, main_node == myid_on_node, myid_on_node)
		color, no_of_groups, balanced_processor_load_on_nodes = get_colors_and_subsets(main_node, MPI_COMM_WORLD, myid, \
		    shared_comm, myid_on_node, masters_from_groups_vs_everything_else_comm)
		overhead_loading = options.overhead*number_of_proc
		#memory_per_node  = options.memory_per_node
		#if memory_per_node == -1.: memory_per_node = 2.*no_of_processes_per_group
		keepgoing = 1
		
		current_window   = options.window
		current_decimate = options.decimate
		
		if len(args) == 1: stack = args[0]
		else:
			print(( "usage: " + usage))
			print(( "Please run '" + progname + " -h' for detailed options"))
			return 1

		t0 = time()	
		# obsolete flags
		options.MPI  = True
		#options.nvec = 0
		options.radiuspca = -1
		options.iter = 40
		options.abs  = 0.0
		options.squ  = 0.0

		if options.fl > 0.0 and options.aa == 0.0:
			ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid)
			
		#if options.VAR and options.SND:
		#	ERROR("Only one of var and SND can be set!", "sx3dvariability", myid)
			
		if options.VAR and (options.ave2D or options.ave3D or options.var2D): 
			ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid)
			
		#if options.SND and (options.ave2D or options.ave3D):
		#	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
		
		#if options.nvec > 0 :
		#	ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
			
		#if options.nvec > 0 and options.ave3D == None:
		#	ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", 1, myid)
		
		if current_decimate>1.0 or current_decimate<0.0:
			ERROR("Decimate rate should be a value between 0.0 and 1.0", "sx3dvariability", 1, myid)
		
		if current_window < 0.0:
			ERROR("Target window size should be always larger than zero", "sx3dvariability", 1, myid)
			
		if myid == main_node:
			img  = get_image(stack, 0)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			if(min(nx, ny) < current_window):   keepgoing = 0
		keepgoing = bcast_number_to_all(keepgoing, main_node, MPI_COMM_WORLD)
		if keepgoing == 0: ERROR("The target window size cannot be larger than the size of decimated image", "sx3dvariability", 1, myid)

		import string
		options.sym = options.sym.lower()
		# if global_def.CACHE_DISABLE:
		# 	from utilities import disable_bdb_cache
		# 	disable_bdb_cache()
		# global_def.BATCH = True
		
		if myid == main_node:
			if not os.path.exists(current_output_dir): os.mkdir(current_output_dir)# Never delete output_dir in the program!
	
		img_per_grp = options.img_per_grp
		#nvec        = options.nvec
		radiuspca   = options.radiuspca
		from logger import Logger,BaseLogger_Files
		#if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt"))
		log_main=Logger(BaseLogger_Files())
		log_main.prefix = os.path.join(current_output_dir, "./")

		if myid == main_node:
			line = ""
			for a in sys.argv: line +=" "+a
			log_main.add(line)
			log_main.add("-------->>>Settings given by all options<<<-------")
			log_main.add("Symmetry             : %s"%options.sym)
			log_main.add("Input stack          : %s"%stack)
			log_main.add("Output_dir           : %s"%current_output_dir)
			
			if options.ave3D: log_main.add("Ave3d                : %s"%options.ave3D)
			if options.var3D: log_main.add("Var3d                : %s"%options.var3D)
			if options.ave2D: log_main.add("Ave2D                : %s"%options.ave2D)
			if options.var2D: log_main.add("Var2D                : %s"%options.var2D)
			if options.VAR:   log_main.add("VAR                  : True")
			else:             log_main.add("VAR                  : False")
			if options.CTF:   log_main.add("CTF correction       : True  ")
			else:             log_main.add("CTF correction       : False ")
			
			log_main.add("Image per group      : %5d"%options.img_per_grp)
			log_main.add("Image decimate rate  : %4.3f"%current_decimate)
			log_main.add("Low pass filter      : %4.3f"%options.fl)
			current_fl = options.fl
			if current_fl == 0.0: current_fl = 0.5
			log_main.add("Current low pass filter is equivalent to cutoff frequency %4.3f for original image size"%round((current_fl*current_decimate),3))
			log_main.add("Window size          : %5d "%current_window)
			log_main.add("sx3dvariability begins")
	
		symbaselen = 0
		if myid == main_node:
			nima = EMUtil.get_image_count(stack)
			img  = get_image(stack)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			nnxo = nx
			nnyo = ny
			if options.sym != "c1" :
				imgdata = get_im(stack)
				try:
					i = imgdata.get_attr("variabilitysymmetry").lower()
					if(i != options.sym):
						ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", 1, myid)
				except:
					ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", 1, myid)
				from utilities import get_symt
				i = len(get_symt(options.sym))
				if((nima/i)*i != nima):
					ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", 1, myid)
				symbaselen = nima/i
			else:  symbaselen = nima
		else:
			nima = 0
			nx = 0
			ny = 0
			nnxo = 0
			nnyo = 0
		nima    = bcast_number_to_all(nima)
		nx      = bcast_number_to_all(nx)
		ny      = bcast_number_to_all(ny)
		nnxo    = bcast_number_to_all(nnxo)
		nnyo    = bcast_number_to_all(nnyo)
		if current_window > max(nx, ny):
			ERROR("Window size is larger than the original image size", "sx3dvariability", 1)
		
		if current_decimate == 1.:
			if current_window !=0:
				nx = current_window
				ny = current_window
		else:
			if current_window == 0:
				nx = int(nx*current_decimate+0.5)
				ny = int(ny*current_decimate+0.5)
			else:
				nx = int(current_window*current_decimate+0.5)
				ny = nx
		symbaselen = bcast_number_to_all(symbaselen)
		
		# check FFT prime number
		from fundamentals import smallprime
		is_fft_friendly = (nx == smallprime(nx))
		
		if not is_fft_friendly:
			if myid == main_node:
				log_main.add("The target image size is not a product of small prime numbers")
				log_main.add("Program adjusts the input settings!")
			### two cases
			if current_decimate == 1.:
				nx = smallprime(nx)
				ny = nx
				current_window = nx # update
				if myid == main_node:
					log_main.add("The window size is updated to %d."%current_window)
			else:
				if current_window == 0:
					nx = smallprime(int(nx*current_decimate+0.5))
					current_decimate = float(nx)/nnxo
					ny = nx
					if (myid == main_node):
						log_main.add("The decimate rate is updated to %f."%current_decimate)
				else:
					nx = smallprime(int(current_window*current_decimate+0.5))
					ny = nx
					current_window = int(nx/current_decimate+0.5)
					if (myid == main_node):
						log_main.add("The window size is updated to %d."%current_window)
						
		if myid == main_node:
			log_main.add("The target image size is %d"%nx)
						
		if radiuspca == -1: radiuspca = nx/2-2
		if myid == main_node: log_main.add("%-70s:  %d\n"%("Number of projection", nima))
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		
		"""
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
		
		if options.VAR: # 2D variance images have no shifts
			#varList   = EMData.read_images(stack, range(img_begin, img_end))
			from EMAN2 import Region
			for index_of_particle in range(img_begin,img_end):
				image = get_im(stack, index_of_proj)
				if current_window > 0: varList.append(fdecimate(window2d(image,current_window,current_window), nx,ny))
				else:   varList.append(fdecimate(image, nx,ny))
				
		else:
			from utilities		import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
			from utilities		import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
			from utilities		import model_blank, nearest_proj, model_circle, write_text_row
			from applications	import pca
			from statistics		import avgvar, avgvar_ctf, ccc
			from filter		    import filt_tanl
			from morphology		import threshold, square_root
			from projection 	import project, prep_vol, prgs
			from sets		    import Set
			from utilities      import wrap_mpi_recv, wrap_mpi_bcast, wrap_mpi_send
			import numpy as np
			if myid == main_node:
				t1          = time()
				proj_angles = []
				aveList     = []
				tab = EMUtil.get_all_attributes(stack, 'xform.projection')	
				for i in range(nima):
					t     = tab[i].get_params('spider')
					phi   = t['phi']
					theta = t['theta']
					psi   = t['psi']
					x     = theta
					if x > 90.0: x = 180.0 - x
					x = x*10000+psi
					proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
				t2 = time()
				log_main.add( "%-70s:  %d\n"%("Number of neighboring projections", img_per_grp))
				log_main.add("...... Finding neighboring projections\n")
				log_main.add( "Number of images per group: %d"%img_per_grp)
				log_main.add( "Now grouping projections")
				proj_angles.sort()
				proj_angles_list = np.full((nima, 4), 0.0, dtype=np.float32)	
				for i in range(nima):
					proj_angles_list[i][0] = proj_angles[i][1]
					proj_angles_list[i][1] = proj_angles[i][2]
					proj_angles_list[i][2] = proj_angles[i][3]
					proj_angles_list[i][3] = proj_angles[i][4]
			else: proj_angles_list = 0
			proj_angles_list = wrap_mpi_bcast(proj_angles_list, main_node, MPI_COMM_WORLD)
			proj_angles      = []
			for i in range(nima):
				proj_angles.append([proj_angles_list[i][0], proj_angles_list[i][1], proj_angles_list[i][2], int(proj_angles_list[i][3])])
			del proj_angles_list
			proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end))
			all_proj = Set()
			for im in proj_list:
				for jm in im:
					all_proj.add(proj_angles[jm][3])
			all_proj = list(all_proj)
			index = {}
			for i in range(len(all_proj)): index[all_proj[i]] = i
			mpi_barrier(MPI_COMM_WORLD)
			if myid == main_node:
				log_main.add("%-70s:  %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2))
				log_main.add("%-70s:  %d\n"%("Number of groups processed on the main node", len(proj_list)))
				log_main.add("Grouping projections took:  %12.1f [m]"%((time()-t2)/60.))
				log_main.add("Number of groups on main node: ", len(proj_list))
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				log_main.add("...... Calculating the stack of 2D variances \n")
			# Memory estimation. There are two memory consumption peaks
			# peak 1. Compute ave, var; 
			# peak 2. Var volume reconstruction;
			# proj_params = [0.0]*(nima*5)
			aveList = []
			varList = []				
			#if nvec > 0: eigList = [[] for i in range(nvec)]
			dnumber   = len(all_proj)# all neighborhood set for assigned to myid
			pnumber   = len(proj_list)*2. + img_per_grp # aveList and varList 
			tnumber   = dnumber+pnumber
			vol_size2 = nx**3*4.*8/1.e9
			vol_size1 = 2.*nnxo**3*4.*8/1.e9
			proj_size         = nnxo*nnyo*len(proj_list)*4.*2./1.e9 # both aveList and varList
			orig_data_size    = nnxo*nnyo*4.*tnumber/1.e9
			reduced_data_size = nx*nx*4.*tnumber/1.e9
			full_data         = np.full((number_of_proc, 2), -1., dtype=np.float16)
			full_data[myid]   = orig_data_size, reduced_data_size
			if myid != main_node: wrap_mpi_send(full_data, main_node, MPI_COMM_WORLD)
			if myid == main_node:
				for iproc in range(number_of_proc):
					if iproc != main_node:
						dummy = wrap_mpi_recv(iproc, MPI_COMM_WORLD)
						full_data[np.where(dummy>-1)] = dummy[np.where(dummy>-1)]
				del dummy
			mpi_barrier(MPI_COMM_WORLD)
			full_data = wrap_mpi_bcast(full_data, main_node, MPI_COMM_WORLD)
			# find the CPU with heaviest load
			minindx         = np.argsort(full_data, 0)
			heavy_load_myid = minindx[-1][1]
			total_mem       = sum(full_data)
			if myid == main_node:
				if current_window == 0:
					log_main.add("Nx:   current image size = %d. Decimated by %f from %d"%(nx, current_decimate, nnxo))
				else:
					log_main.add("Nx:   current image size = %d. Windowed to %d, and decimated by %f from %d"%(nx, current_window, current_decimate, nnxo))
				log_main.add("Nproj:       number of particle images.")
				log_main.add("Navg:        number of 2D average images.")
				log_main.add("Nvar:        number of 2D variance images.")
				log_main.add("Img_per_grp: user defined image per group for averaging = %d"%img_per_grp)
				log_main.add("Overhead:    total python overhead memory consumption   = %f"%overhead_loading)
				log_main.add("Total memory) = 4.0*nx^2*(nproj + navg +nvar+ img_per_grp)/1.0e9 + overhead: %12.3f [GB]"%\
				   (total_mem[1] + overhead_loading))
			del full_data
			mpi_barrier(MPI_COMM_WORLD)
			if myid == heavy_load_myid:
				log_main.add("Begin reading and preprocessing images on processor. Wait... ")
				ttt = time()
			#imgdata = EMData.read_images(stack, all_proj)			
			imgdata = [ None for im in range(len(all_proj))]
			for index_of_proj in range(len(all_proj)):
				#image = get_im(stack, all_proj[index_of_proj])
				if( current_window > 0): imgdata[index_of_proj] = fdecimate(window2d(get_im(stack, all_proj[index_of_proj]),current_window,current_window), nx, ny)
				else:                    imgdata[index_of_proj] = fdecimate(get_im(stack, all_proj[index_of_proj]), nx, ny)
				
				if (current_decimate> 0.0 and options.CTF):
					ctf = imgdata[index_of_proj].get_attr("ctf")
					ctf.apix = ctf.apix/current_decimate
					imgdata[index_of_proj].set_attr("ctf", ctf)
					
				if myid == heavy_load_myid and index_of_proj%100 == 0:
					log_main.add(" ...... %6.2f%% "%(index_of_proj/float(len(all_proj))*100.))
			mpi_barrier(MPI_COMM_WORLD)
			if myid == heavy_load_myid:
				log_main.add("All_proj preprocessing cost %7.2f m"%((time()-ttt)/60.))
				log_main.add("Wait untill reading on all CPUs done...")
			'''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
			from applications import prepare_2d_forPCA
			from utilities    import model_blank
			from EMAN2        import Transform
			if not options.no_norm: 
				mask = model_circle(nx/2-2, nx, nx)
			if options.CTF: 
				from utilities import pad
				from filter import filt_ctf
			from filter import filt_tanl
			if myid == heavy_load_myid:
				log_main.add("Start computing 2D aveList and varList. Wait...")
				ttt = time()
			inner=nx//2-4
			outer=inner+2
			xform_proj_for_2D = [ None for i in range(len(proj_list))]
			for i in range(len(proj_list)):
				ki = proj_angles[proj_list[i][0]][3]
				if ki >= symbaselen:  continue
				mi = index[ki]
				dpar = Util.get_transform_params(imgdata[mi], "xform.projection", "spider")
				phiM, thetaM, psiM, s2xM, s2yM  = dpar["phi"],dpar["theta"],dpar["psi"],-dpar["tx"]*current_decimate,-dpar["ty"]*current_decimate
				grp_imgdata = []
				for j in range(img_per_grp):
					mj = index[proj_angles[proj_list[i][j]][3]]
					cpar = Util.get_transform_params(imgdata[mj], "xform.projection", "spider")
					alpha, sx, sy, mirror = params_3D_2D_NEW(cpar["phi"], cpar["theta"],cpar["psi"], -cpar["tx"]*current_decimate, -cpar["ty"]*current_decimate, mirror_list[i][j])
					if thetaM <= 90:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM - cpar["phi"], 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM - cpar["phi"]), 0.0, 0.0, 1.0)
					else:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM- cpar["phi"]), 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM - cpar["phi"])), 0.0, 0.0, 1.0)
					imgdata[mj].set_attr("xform.align2d", Transform({"type":"2D","alpha":alpha,"tx":sx,"ty":sy,"mirror":mirror,"scale":1.0}))
					grp_imgdata.append(imgdata[mj])
				if not options.no_norm:
					for k in range(img_per_grp):
						ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False)
						grp_imgdata[k] -= ave
						grp_imgdata[k] /= std
				if options.fl > 0.0:
					for k in range(img_per_grp):
						grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)

				#  Because of background issues, only linear option works.
				if options.CTF:  ave, var = aves_wiener(grp_imgdata, SNR = 1.0e5, interpolation_method = "linear")
				else:  ave, var = ave_var(grp_imgdata)
				# Switch to std dev
				# threshold is not really needed,it is just in case due to numerical accuracy something turns out negative.
				var = square_root(threshold(var))

				set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
				set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

				aveList.append(ave)
				varList.append(var)
				xform_proj_for_2D[i] = [phiM, thetaM, 0.0, 0.0, 0.0]

				'''
				if nvec > 0:
					eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True)
					for k in range(nvec):
						set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
						eigList[k].append(eig[k])
					"""
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""
				'''
				if (myid == heavy_load_myid) and (i%100 == 0):
					log_main.add(" ......%6.2f%%  "%(i/float(len(proj_list))*100.))		
			del imgdata, grp_imgdata, cpar, dpar, all_proj, proj_angles, index
			if not options.no_norm: del mask
			if myid == main_node: del tab
			#  At this point, all averages and variances are computed
			mpi_barrier(MPI_COMM_WORLD)
			
			if (myid == heavy_load_myid):
				log_main.add("Computing aveList and varList took %12.1f [m]"%((time()-ttt)/60.))
			
			nproj = len(xform_proj_for_2D)
			nproj = mpi_reduce(nproj, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)
			if myid == main_node:
				txform_proj = [ None for i in range(nproj)]
				txform_proj[0:len(xform_proj_for_2D)] = xform_proj_for_2D[:]
				nc = len(xform_proj_for_2D)
			else:
				wrap_mpi_send(xform_proj_for_2D, main_node, MPI_COMM_WORLD)
			if (myid == main_node):
				for iproc in range(1, number_of_proc):
					dummy = wrap_mpi_recv(iproc, MPI_COMM_WORLD)
					for im in range(len(dummy)):
						txform_proj[nc] = dummy[im]
						nc +=1
				write_text_row(txform_proj, os.path.join(current_output_dir, "params.txt"))
				del txform_proj
			del xform_proj_for_2D
			mpi_barrier(MPI_COMM_WORLD)
			if options.ave2D:
				from fundamentals import fpol
				from applications import header
				if myid == main_node:
					log_main.add("Compute ave2D ... ")
					km = 0
					for i in range(number_of_proc):
						if i == main_node :
							for im in range(len(aveList)):
								aveList[im].write_image(os.path.join(current_output_dir, options.ave2D), km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in range(nl):
								ave = recv_EMData(i, im+i+70000)
								"""
								nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
								tmpvol=fpol(ave, nx, nx,1)								
								tmpvol.write_image(os.path.join(current_output_dir, options.ave2D), km)
								km += 1
				else:
					mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
					for im in range(len(aveList)):
						send_EMData(aveList[im], main_node,im+myid+70000)
						"""
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						"""
				if myid == main_node:
					header(os.path.join(current_output_dir, options.ave2D), params='xform.projection', fimport = os.path.join(current_output_dir, "params.txt"))
				mpi_barrier(MPI_COMM_WORLD)	
			if options.ave3D:
				from fundamentals import fpol
				t5 = time()
				if myid == main_node: log_main.add("Reconstruct ave3D ... ")
				ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad)
				bcast_EMData_to_all(ave3D, myid)
				if myid == main_node:
					if current_decimate != 1.0: ave3D = resample(ave3D, 1./current_decimate)
					ave3D = fpol(ave3D, nnxo, nnxo, nnxo) # always to the orignal image size
					set_pixel_size(ave3D, 1.0)
					ave3D.write_image(os.path.join(current_output_dir, options.ave3D))
					log_main.add("Ave3D reconstruction took %12.1f [m]"%((time()-t5)/60.0))
					log_main.add("%-70s:  %s\n"%("The reconstructed ave3D is saved as ", options.ave3D))
					
			mpi_barrier(MPI_COMM_WORLD)		
			del ave, var, proj_list, stack, alpha, sx, sy, mirror, aveList
			'''
			if nvec > 0:
				for k in range(nvec):
					if myid == main_node:log_main.add("Reconstruction eigenvolumes", k)
					cont = True
					ITER = 0
					mask2d = model_circle(radiuspca, nx, nx)
					while cont:
						#print "On node %d, iteration %d"%(myid, ITER)
						eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad)
						bcast_EMData_to_all(eig3D, myid, main_node)
						if options.fl > 0.0:
							eig3D = filt_tanl(eig3D, options.fl, options.aa)
						if myid == main_node:
							eig3D.write_image(os.path.join(options.outpout_dir, "eig3d_%03d.hdf"%(k, ITER)))
						Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) )
						eig3Df, kb = prep_vol(eig3D)
						del eig3D
						cont = False
						icont = 0
						for l in range(len(eigList[k])):
							phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l])
							proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y])
							cl = ccc(proj, eigList[k][l], mask2d)
							if cl < 0.0:
								icont += 1
								cont = True
								eigList[k][l] *= -1.0
						u = int(cont)
						u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD)
						icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

						if myid == main_node:
							u = int(u[0])
							log_main.add(" Eigenvector: ",k," number changed ",int(icont[0]))
						else: u = 0
						u = bcast_number_to_all(u, main_node)
						cont = bool(u)
						ITER += 1

					del eig3Df, kb
					mpi_barrier(MPI_COMM_WORLD)
				del eigList, mask2d
			'''
			if options.ave3D: del ave3D
			if options.var2D:
				from fundamentals import fpol 
				from applications import header
				if myid == main_node:
					log_main.add("Compute var2D...")
					km = 0
					for i in range(number_of_proc):
						if i == main_node :
							for im in range(len(varList)):
								tmpvol=fpol(varList[im], nx, nx,1)
								tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in range(nl):
								ave = recv_EMData(i, im+i+70000)
								tmpvol=fpol(ave, nx, nx,1)
								tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km)
								km += 1
				else:
					mpi_send(len(varList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
					for im in range(len(varList)):
						send_EMData(varList[im], main_node, im+myid+70000)#  What with the attributes??
				mpi_barrier(MPI_COMM_WORLD)
				if myid == main_node:
					from applications import header
					header(os.path.join(current_output_dir, options.var2D), params = 'xform.projection',fimport = os.path.join(current_output_dir, "params.txt"))
				mpi_barrier(MPI_COMM_WORLD)
		if options.var3D:
			if myid == main_node: log_main.add("Reconstruct var3D ...")
			t6 = time()
			# radiusvar = options.radius
			# if( radiusvar < 0 ):  radiusvar = nx//2 -3
			res = recons3d_4nn_MPI(myid, varList, symmetry = options.sym, npad=options.npad)
			#res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
			if myid == main_node:
				from fundamentals import fpol
				if current_decimate != 1.0: res	= resample(res, 1./current_decimate)
				res = fpol(res, nnxo, nnxo, nnxo)
				set_pixel_size(res, 1.0)
				res.write_image(os.path.join(current_output_dir, options.var3D))
				log_main.add("%-70s:  %s\n"%("The reconstructed var3D is saved as ", options.var3D))
				log_main.add("Var3D reconstruction took %f12.1 [m]"%((time()-t6)/60.0))
				log_main.add("Total computation time %f12.1 [m]"%((time()-t0)/60.0))
				log_main.add("sx3dvariability finishes")
		from mpi import mpi_finalize
		mpi_finalize()
		
		if RUNNING_UNDER_MPI: global_def.MPI = False

		global_def.BATCH = False