Example #1
0
def gen_rings_ctf( prjref, nx, ctf, numr):
	"""
	  Convert set of ffts of projections to Fourier rings with additional multiplication by a ctf
	  The command returns list of rings
	"""
        from math         import sin, cos, pi
	from fundamentals import fft
	from alignment    import ringwe
	from filter       import filt_ctf
	mode = "F"
	wr_four  = ringwe(numr, "F")
	cnx = nx//2 + 1
	cny = nx//2 + 1
	qv = pi/180.0

	refrings = []     # list of (image objects) reference projections in Fourier representation

        for i in xrange( len(prjref) ):
		cimage = Util.Polar2Dm(filt_ctf(prjref[i], ctf, True) , cnx, cny, numr, mode)  # currently set to quadratic....
		Util.Normalize_ring(cimage, numr)

		Util.Frngs(cimage, numr)
		Util.Applyws(cimage, numr, wr_four)
		refrings.append(cimage)
		phi   = prjref[i].get_attr('phi')
		theta = prjref[i].get_attr('theta')
		psi   = prjref[i].get_attr('psi')
		n1 = sin(theta*qv)*cos(phi*qv)
		n2 = sin(theta*qv)*sin(phi*qv)
		n3 = cos(theta*qv)
		refrings[i].set_attr_dict( {"n1":n1, "n2":n2, "n3":n3, "phi": phi, "theta": theta,"psi": psi} )

	return refrings
Example #2
0
def gen_rings_ctf( prjref, nx, ctf, numr):
	"""
	  Convert set of ffts of projections to Fourier rings with additional multiplication by a ctf
	  The command returns list of rings
	"""
	from math         import sin, cos, pi
	from fundamentals import fft
	from alignment    import ringwe
	from filter       import filt_ctf
	mode = "F"
	wr_four  = ringwe(numr, "F")
	cnx = nx//2 + 1
	cny = nx//2 + 1
	qv = pi/180.0

	refrings = []     # list of (image objects) reference projections in Fourier representation

	for i in range( len(prjref) ):
		cimage = Util.Polar2Dm(filt_ctf(prjref[i], ctf, True) , cnx, cny, numr, mode)  # currently set to quadratic....
		Util.Normalize_ring(cimage, numr, 0 )

		Util.Frngs(cimage, numr)
		Util.Applyws(cimage, numr, wr_four)
		refrings.append(cimage)
		phi   = prjref[i].get_attr('phi')
		theta = prjref[i].get_attr('theta')
		psi   = prjref[i].get_attr('psi')
		n1 = sin(theta*qv)*cos(phi*qv)
		n2 = sin(theta*qv)*sin(phi*qv)
		n3 = cos(theta*qv)
		refrings[i].set_attr_dict( {"n1":n1, "n2":n2, "n3":n3, "phi": phi, "theta": theta,"psi": psi} )

	return refrings
Example #3
0
def main():
    import os
    import sys

    from optparse import OptionParser
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)

    progname = os.path.basename(arglist[0])
    usage = progname + """ input_ctf_params 

        input_ctf_params: output file from sx_cter.py, which was run in Multi-micrograph mode
        Note: files to be flipped should be in same directory.
        Outputs: phase-flipped files in hdf format, with identical filenames as input
"""
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--input_ctf_params",
                      type="string",
                      default="partres.txt",
                      help="output from sxcter.py")

    (options, args) = parser.parse_args(arglist[1:])
    infile = options.input_ctf_params
    padimg = True  #ctf correction -- pad image
    sign = 1  # ctf correction - sign
    binary = 1  # ctf correction: phaseflip only
    from utilities import generate_ctf
    from filter import filt_ctf
    global_def.BATCH = True

    file_content = open(infile)
    ctflist = file_content.readlines()
    img = EMData()
    for line in ctflist:
        data = line.split()
        imgfile = data[17]
        print "Working on file %s\n" % (imgfile)
        (name, ext) = imgfile.split(".")
        outfile = name + '.hdf'  # Assumes input files are NOT hdf format! otherwise will overwrite them
        df = float(data[0])
        cs = float(data[1])
        voltage = float(data[2])
        apix = float(data[3])
        bfactor = float(data[4])
        amp_contrast = float(data[5])
        astig = float(data[6])
        astig_ang = float(data[7])
        ctf = generate_ctf(
            [df, cs, voltage, apix, bfactor, amp_contrast, astig, astig_ang])
        img.read_image(imgfile)
        corr_img = filt_ctf(img, ctf, padimg, sign, binary)
        corr_img.write_image(outfile)
Example #4
0
def generate_helimic(refvol, outdir, pixel, CTF=False, Cs=2.0,voltage = 200.0, ampcont = 10.0, nonoise = False, rand_seed=14567):
	
	from utilities	 import model_blank, model_gauss, model_gauss_noise, pad, get_im
	from random 	 import random
	from projection  import prgs, prep_vol
	from filter	     import filt_gaussl, filt_ctf
	from EMAN2 	     import EMAN2Ctf
	
	if os.path.exists(outdir):   ERROR('Output directory exists, please change the name and restart the program', "sxhelical_demo", 1)
	os.mkdir(outdir)
	seed(rand_seed)
	Util.set_randnum_seed(rand_seed)
	angles =[]
	for i in xrange(3):
		angles.append( [0.0+60.0*i, 90.0-i*5, 0.0, 0.0, 0.0] )

	nangle   = len(angles)

	volfts = get_im(refvol)
	nx = volfts.get_xsize()
	ny = volfts.get_ysize()
	nz = volfts.get_zsize()
	volfts, kbx, kby, kbz = prep_vol( volfts )
	iprj   = 0
	width  = 500
	xstart = 0
	ystart = 0

	for idef in xrange(3,6):
		mic = model_blank(2048, 2048)
		#defocus = idef*0.2
		defocus = idef*0.6     ##@ming
		if CTF :
			#ctf = EMAN2Ctf()
			#ctf.from_dict( {"defocus":defocus, "cs":Cs, "voltage":voltage, "apix":pixel, "ampcont":ampcont, "bfactor":0.0} )
			from utilities import generate_ctf
			ctf = generate_ctf([defocus,2,200,1.84,0.0,ampcont,defocus*0.2,80])   ##@ming   the range of astigmatism amplitude is between 10 percent and 22 percent. 20 percent is a good choice.
		i = idef - 4
		for k in xrange(1):
			psi  = 90 + 10*i			
 			proj = prgs(volfts, kbz, [angles[idef-3][0], angles[idef-3][1], psi, 0.0, 0.0], kbx, kby)
			proj = Util.window(proj, 320, nz)		
			mic += pad(proj, 2048, 2048, 1, 0.0, 750*i, 20*i, 0)

		if not nonoise:  mic += model_gauss_noise(30.0,2048,2048)
		if CTF :
			#apply CTF
			mic = filt_ctf(mic, ctf)

		if not nonoise:  mic += filt_gaussl(model_gauss_noise(17.5,2048,2048), 0.3)

		mic.write_image("%s/mic%1d.hdf"%(outdir, idef-3),0)
Example #5
0
def main():
	from utilities import get_input_from_string
	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack output_average --radius=particle_radius --xr=xr --yr=yr --ts=ts --thld_err=thld_err --num_ali=num_ali --fl=fl --aa=aa --CTF --verbose --stables"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--radius",       type="int",              default=-1,          help=" particle radius for alignment")
	parser.add_option("--xr",           type="string"      ,     default="2 1",       help="range for translation search in x direction, search is +/xr (default 2,1)")
	parser.add_option("--yr",           type="string"      ,     default="-1",        help="range for translation search in y direction, search is +/yr (default = same as xr)")
	parser.add_option("--ts",           type="string"      ,     default="1 0.5",     help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default: 1,0.5)")
	parser.add_option("--thld_err",     type="float",            default=0.75,        help="threshld of pixel error (default = 0.75)")
	parser.add_option("--num_ali",      type="int",              default=5,           help="number of alignments performed for stability (default = 5)")
	parser.add_option("--maxit",        type="int",              default=30,          help="number of iterations for each xr (default = 30)")
	parser.add_option("--fl",           type="float"       ,     default=0.3,         help="cut-off frequency of hyperbolic tangent low-pass Fourier filter (default = 0.3)")
	parser.add_option("--aa",           type="float"       ,     default=0.2,         help="fall-off of hyperbolic tangent low-pass Fourier filter (default = 0.2)")
	parser.add_option("--CTF",          action="store_true",     default=False,       help="Use CTF correction during the alignment ")
	parser.add_option("--verbose",      action="store_true",     default=False,       help="print individual pixel error (default = False)")
	parser.add_option("--stables",		action="store_true",	 default=False,	      help="output the stable particles number in file (default = False)")
	parser.add_option("--method",		type="string"      ,	 default=" ",	      help="SHC (standard method is default when flag is ommitted)")
	(options, args) = parser.parse_args()
	if len(args) != 1 and len(args) != 2:
    		print "usage: " + usage
    		print "Please run '" + progname + " -h' for detailed options"
	else:
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()

		from applications   import within_group_refinement, ali2d_ras
		from pixel_error    import multi_align_stability
		from utilities      import write_text_file, write_text_row

		global_def.BATCH = True

		xrng        = get_input_from_string(options.xr)
		if  options.yr == "-1":  yrng = xrng
		else          :  yrng = get_input_from_string(options.yr)
		step        = get_input_from_string(options.ts)

		class_data = EMData.read_images(args[0])

		nx = class_data[0].get_xsize()
		ou = options.radius
		num_ali = options.num_ali
		if ou == -1: ou = nx/2-2
		from utilities import model_circle, get_params2D, set_params2D
		mask = model_circle(ou, nx, nx)

		if options.CTF :
			from filter import filt_ctf
			for im in xrange(len(class_data)):
				#  Flip phases
				class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
		for im in class_data:
			im.set_attr("previousmax", -1.0e10)
			try:
				t = im.get_attr("xform.align2d") # if they are there, no need to set them!
			except:
				try:
					t = im.get_attr("xform.projection")
					d = t.get_params("spider")
					set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0])
				except:
					set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
		all_ali_params = []

		for ii in xrange(num_ali):
			ali_params = []
			if options.verbose:
				ALPHA = []
				SX = []
				SY = []
				MIRROR = []
			if( xrng[0] == 0.0 and yrng[0] == 0.0 ):
				avet = ali2d_ras(class_data, randomize = True, ir = 1, ou = ou, rs = 1, step = 1.0, dst = 90.0, \
						maxit = options.maxit, check_mirror = True, FH=options.fl, FF=options.aa)
			else:
				avet = within_group_refinement(class_data, mask, True, 1, ou, 1, xrng, yrng, step, 90.0, \
						maxit = options.maxit, FH=options.fl, FF=options.aa, method = options.method)
				from utilities import info
				#print "  avet  ",info(avet)
			for im in class_data:
				alpha, sx, sy, mirror, scale = get_params2D(im)
				ali_params.extend([alpha, sx, sy, mirror])
				if options.verbose:
					ALPHA.append(alpha)
					SX.append(sx)
					SY.append(sy)
					MIRROR.append(mirror)
			all_ali_params.append(ali_params)
			if options.verbose:
				write_text_file([ALPHA, SX, SY, MIRROR], "ali_params_run_%d"%ii)
		"""
		avet = class_data[0]
		from utilities import read_text_file
		all_ali_params = []
		for ii in xrange(5):
			temp = read_text_file( "ali_params_run_%d"%ii,-1)
			uuu = []
			for k in xrange(len(temp[0])):
				uuu.extend([temp[0][k],temp[1][k],temp[2][k],temp[3][k]])
			all_ali_params.append(uuu)


		"""

		stable_set, mir_stab_rate, pix_err = multi_align_stability(all_ali_params, 0.0, 10000.0, options.thld_err, options.verbose, 2*ou+1)
		print "%4s %20s %20s %20s %30s %6.2f"%("", "Size of set", "Size of stable set", "Mirror stab rate", "Pixel error prior to pruning the set above threshold of",options.thld_err)
		print "Average stat: %10d %20d %20.2f   %15.2f"%( len(class_data), len(stable_set), mir_stab_rate, pix_err)
		if( len(stable_set) > 0):
			if options.stables:
				stab_mem = [[0,0.0,0] for j in xrange(len(stable_set))]
				for j in xrange(len(stable_set)): stab_mem[j] = [int(stable_set[j][1]), stable_set[j][0], j]
				write_text_row(stab_mem, "stable_particles.txt")

			stable_set_id = []
			particle_pixerr = []
			for s in stable_set:
				stable_set_id.append(s[1])
				particle_pixerr.append(s[0])
			from fundamentals import rot_shift2D
			avet.to_zero()
			l = -1
			print "average parameters:  angle, x-shift, y-shift, mirror"
			for j in stable_set_id:
				l += 1
				print " %4d  %4d  %12.2f %12.2f %12.2f        %1d"%(l,j, stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], int(stable_set[l][2][3]))
				avet += rot_shift2D(class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3] )
			avet /= (l+1)
			avet.set_attr('members', stable_set_id)
			avet.set_attr('pix_err', pix_err)
			avet.set_attr('pixerr', particle_pixerr)
			avet.write_image(args[1])



		global_def.BATCH = False
Example #6
0
def main():
    def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
        if mirror:
            m = 1
            alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0,
                                                       540.0 - psi, 0, 0, 1.0)
        else:
            m = 0
            alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0,
                                                       360.0 - psi, 0, 0, 1.0)
        return alpha, sx, sy, m

    progname = os.path.basename(sys.argv[0])
    usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=15. --aa=0.01  --sym=symmetry --CTF"
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--output_dir",
                      type="string",
                      default="./",
                      help="output directory")
    parser.add_option("--ave2D",
                      type="string",
                      default=False,
                      help="write to the disk a stack of 2D averages")
    parser.add_option("--var2D",
                      type="string",
                      default=False,
                      help="write to the disk a stack of 2D variances")
    parser.add_option("--ave3D",
                      type="string",
                      default=False,
                      help="write to the disk reconstructed 3D average")
    parser.add_option("--var3D",
                      type="string",
                      default=False,
                      help="compute 3D variability (time consuming!)")
    parser.add_option("--img_per_grp",
                      type="int",
                      default=10,
                      help="number of neighbouring projections")
    parser.add_option("--no_norm",
                      action="store_true",
                      default=False,
                      help="do not use normalization")
    #parser.add_option("--radius", 	    type="int"         ,	default=-1   ,				help="radius for 3D variability" )
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="number of time to pad the original images")
    parser.add_option("--sym", type="string", default="c1", help="symmetry")
    parser.add_option(
        "--fl",
        type="float",
        default=0.0,
        help=
        "cutoff freqency in absolute frequency (0.0-0.5). (Default - no filtration)"
    )
    parser.add_option(
        "--aa",
        type="float",
        default=0.0,
        help=
        "fall off of the filter. Put 0.01 if user has no clue about falloff (Default - no filtration)"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="use CFT correction")
    parser.add_option("--VERBOSE",
                      action="store_true",
                      default=False,
                      help="Long output for debugging")
    #parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
    #parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
    #parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
    #parser.add_option("--abs", 		type="float"   ,        default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
    #parser.add_option("--squ", 		type="float"   ,	    default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
    parser.add_option(
        "--VAR",
        action="store_true",
        default=False,
        help="stack on input consists of 2D variances (Default False)")
    parser.add_option(
        "--decimate",
        type="float",
        default=1.0,
        help=
        "image decimate rate, a number larger (expand image) or less (shrink image) than 1. default is 1"
    )
    parser.add_option(
        "--window",
        type="int",
        default=0,
        help=
        "reduce images to a small image size without changing pixel_size. Default value is zero."
    )
    #parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
    parser.add_option(
        "--nvec",
        type="int",
        default=0,
        help="number of eigenvectors, default = 0 meaning no PCA calculated")
    parser.add_option(
        "--symmetrize",
        action="store_true",
        default=False,
        help="Prepare input stack for handling symmetry (Default False)")

    (options, args) = parser.parse_args()
    #####
    from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD
    from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
    from applications import MPI_start_end
    from reconstruction import recons3d_em, recons3d_em_MPI
    from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
    from utilities import print_begin_msg, print_end_msg, print_msg
    from utilities import read_text_row, get_image, get_im
    from utilities import bcast_EMData_to_all, bcast_number_to_all
    from utilities import get_symt

    #  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

    from EMAN2db import db_open_dict

    # Set up global variables related to bdb cache
    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    # Set up global variables related to ERROR function
    global_def.BATCH = True

    # detect if program is running under MPI
    RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ
    if RUNNING_UNDER_MPI:
        global_def.MPI = True

    if options.symmetrize:
        if RUNNING_UNDER_MPI:
            try:
                sys.argv = mpi_init(len(sys.argv), sys.argv)
                try:
                    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
                    if (number_of_proc > 1):
                        ERROR(
                            "Cannot use more than one CPU for symmetry prepration",
                            "sx3dvariability", 1)
                except:
                    pass
            except:
                pass
        if options.output_dir != "./" and not os.path.exists(
                options.output_dir):
            os.mkdir(options.output_dir)
        #  Input
        #instack = "Clean_NORM_CTF_start_wparams.hdf"
        #instack = "bdb:data"

        from logger import Logger, BaseLogger_Files
        if os.path.exists(os.path.join(options.output_dir, "log.txt")):
            os.remove(os.path.join(options.output_dir, "log.txt"))
        log_main = Logger(BaseLogger_Files())
        log_main.prefix = os.path.join(options.output_dir, "./")

        instack = args[0]
        sym = options.sym.lower()
        if (sym == "c1"):
            ERROR("There is no need to symmetrize stack for C1 symmetry",
                  "sx3dvariability", 1)

        line = ""
        for a in sys.argv:
            line += " " + a
        log_main.add(line)

        if (instack[:4] != "bdb:"):
            if output_dir == "./": stack = "bdb:data"
            else: stack = "bdb:" + options.output_dir + "/data"
            delete_bdb(stack)
            junk = cmdexecute("sxcpy.py  " + instack + "  " + stack)
        else:
            stack = instack

        qt = EMUtil.get_all_attributes(stack, 'xform.projection')

        na = len(qt)
        ts = get_symt(sym)
        ks = len(ts)
        angsa = [None] * na

        for k in xrange(ks):
            #Qfile = "Q%1d"%k
            if options.output_dir != "./":
                Qfile = os.path.join(options.output_dir, "Q%1d" % k)
            else:
                Qfile = os.path.join(options.output_dir, "Q%1d" % k)
            #delete_bdb("bdb:Q%1d"%k)
            delete_bdb("bdb:" + Qfile)
            #junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
            junk = cmdexecute("e2bdb.py  " + stack + "  --makevstack=bdb:" +
                              Qfile)
            #DB = db_open_dict("bdb:Q%1d"%k)
            DB = db_open_dict("bdb:" + Qfile)
            for i in xrange(na):
                ut = qt[i] * ts[k]
                DB.set_attr(i, "xform.projection", ut)
                #bt = ut.get_params("spider")
                #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
            #write_text_row(angsa, 'ptsma%1d.txt'%k)
            #junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
            #junk = cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
            DB.close()
        if options.output_dir == "./": delete_bdb("bdb:sdata")
        else: delete_bdb("bdb:" + options.output_dir + "/" + "sdata")
        #junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
        sdata = "bdb:" + options.output_dir + "/" + "sdata"
        print(sdata)
        junk = cmdexecute("e2bdb.py   " + options.output_dir +
                          "  --makevstack=" + sdata + " --filt=Q")
        #junk = cmdexecute("ls  EMAN2DB/sdata*")
        #a = get_im("bdb:sdata")
        a = get_im(sdata)
        a.set_attr("variabilitysymmetry", sym)
        #a.write_image("bdb:sdata")
        a.write_image(sdata)

    else:

        sys.argv = mpi_init(len(sys.argv), sys.argv)
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
        main_node = 0

        if len(args) == 1:
            stack = args[0]
        else:
            print(("usage: " + usage))
            print(("Please run '" + progname + " -h' for detailed options"))
            return 1

        t0 = time()
        # obsolete flags
        options.MPI = True
        options.nvec = 0
        options.radiuspca = -1
        options.iter = 40
        options.abs = 0.0
        options.squ = 0.0

        if options.fl > 0.0 and options.aa == 0.0:
            ERROR("Fall off has to be given for the low-pass filter",
                  "sx3dvariability", 1, myid)
        if options.VAR and options.SND:
            ERROR("Only one of var and SND can be set!", "sx3dvariability",
                  myid)
            exit()
        if options.VAR and (options.ave2D or options.ave3D or options.var2D):
            ERROR(
                "When VAR is set, the program cannot output ave2D, ave3D or var2D",
                "sx3dvariability", 1, myid)
            exit()
        #if options.SND and (options.ave2D or options.ave3D):
        #	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
        #	exit()
        if options.nvec > 0:
            ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
            exit()
        if options.nvec > 0 and options.ave3D == None:
            ERROR("When doing PCA analysis, one must set ave3D",
                  "sx3dvariability",
                  myid=myid)
            exit()
        import string
        options.sym = options.sym.lower()

        # if global_def.CACHE_DISABLE:
        # 	from utilities import disable_bdb_cache
        # 	disable_bdb_cache()
        # global_def.BATCH = True

        if myid == main_node:
            if options.output_dir != "./" and not os.path.exists(
                    options.output_dir):
                os.mkdir(options.output_dir)

        img_per_grp = options.img_per_grp
        nvec = options.nvec
        radiuspca = options.radiuspca

        from logger import Logger, BaseLogger_Files
        #if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt"))
        log_main = Logger(BaseLogger_Files())
        log_main.prefix = os.path.join(options.output_dir, "./")

        if myid == main_node:
            line = ""
            for a in sys.argv:
                line += " " + a
            log_main.add(line)
            log_main.add("-------->>>Settings given by all options<<<-------")
            log_main.add("instack  		    :" + stack)
            log_main.add("output_dir        :" + options.output_dir)
            log_main.add("var3d   		    :" + options.var3D)

        if myid == main_node:
            line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
            #print_begin_msg("sx3dvariability")
            msg = "sx3dvariability"
            log_main.add(msg)
            print(line, msg)
            msg = ("%-70s:  %s\n" % ("Input stack", stack))
            log_main.add(msg)
            print(line, msg)

        symbaselen = 0
        if myid == main_node:
            nima = EMUtil.get_image_count(stack)
            img = get_image(stack)
            nx = img.get_xsize()
            ny = img.get_ysize()
            if options.sym != "c1":
                imgdata = get_im(stack)
                try:
                    i = imgdata.get_attr("variabilitysymmetry").lower()
                    if (i != options.sym):
                        ERROR(
                            "The symmetry provided does not agree with the symmetry of the input stack",
                            "sx3dvariability",
                            myid=myid)
                except:
                    ERROR(
                        "Input stack is not prepared for symmetry, please follow instructions",
                        "sx3dvariability",
                        myid=myid)
                from utilities import get_symt
                i = len(get_symt(options.sym))
                if ((nima / i) * i != nima):
                    ERROR(
                        "The length of the input stack is incorrect for symmetry processing",
                        "sx3dvariability",
                        myid=myid)
                symbaselen = nima / i
            else:
                symbaselen = nima
        else:
            nima = 0
            nx = 0
            ny = 0
        nima = bcast_number_to_all(nima)
        nx = bcast_number_to_all(nx)
        ny = bcast_number_to_all(ny)
        Tracker = {}
        Tracker["total_stack"] = nima
        if options.decimate == 1.:
            if options.window != 0:
                nx = options.window
                ny = options.window
        else:
            if options.window == 0:
                nx = int(nx * options.decimate)
                ny = int(ny * options.decimate)
            else:
                nx = int(options.window * options.decimate)
                ny = nx
        Tracker["nx"] = nx
        Tracker["ny"] = ny
        Tracker["nz"] = nx
        symbaselen = bcast_number_to_all(symbaselen)
        if radiuspca == -1: radiuspca = nx / 2 - 2

        if myid == main_node:
            line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
            msg = "%-70s:  %d\n" % ("Number of projection", nima)
            log_main.add(msg)
            print(line, msg)
        img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
        """
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
        if options.VAR:
            #varList   = EMData.read_images(stack, range(img_begin, img_end))
            varList = []
            this_image = EMData()
            for index_of_particle in xrange(img_begin, img_end):
                this_image.read_image(stack, index_of_particle)
                varList.append(
                    image_decimate_window_xform_ctf(this_image,
                                                    options.decimate,
                                                    options.window,
                                                    options.CTF))
        else:
            from utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
            from utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
            from utilities import model_blank, nearest_proj, model_circle
            from applications import pca
            from statistics import avgvar, avgvar_ctf, ccc
            from filter import filt_tanl
            from morphology import threshold, square_root
            from projection import project, prep_vol, prgs
            from sets import Set

            if myid == main_node:
                t1 = time()
                proj_angles = []
                aveList = []
                tab = EMUtil.get_all_attributes(stack, 'xform.projection')
                for i in xrange(nima):
                    t = tab[i].get_params('spider')
                    phi = t['phi']
                    theta = t['theta']
                    psi = t['psi']
                    x = theta
                    if x > 90.0: x = 180.0 - x
                    x = x * 10000 + psi
                    proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
                t2 = time()
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = "%-70s:  %d\n" % ("Number of neighboring projections",
                                        img_per_grp)
                log_main.add(msg)
                print(line, msg)
                msg = "...... Finding neighboring projections\n"
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    msg = "Number of images per group: %d" % img_per_grp
                    log_main.add(msg)
                    print(line, msg)
                    msg = "Now grouping projections"
                    log_main.add(msg)
                    print(line, msg)
                proj_angles.sort()
            proj_angles_list = [0.0] * (nima * 4)
            if myid == main_node:
                for i in xrange(nima):
                    proj_angles_list[i * 4] = proj_angles[i][1]
                    proj_angles_list[i * 4 + 1] = proj_angles[i][2]
                    proj_angles_list[i * 4 + 2] = proj_angles[i][3]
                    proj_angles_list[i * 4 + 3] = proj_angles[i][4]
            proj_angles_list = bcast_list_to_all(proj_angles_list, myid,
                                                 main_node)
            proj_angles = []
            for i in xrange(nima):
                proj_angles.append([
                    proj_angles_list[i * 4], proj_angles_list[i * 4 + 1],
                    proj_angles_list[i * 4 + 2],
                    int(proj_angles_list[i * 4 + 3])
                ])
            del proj_angles_list
            proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp,
                                                  range(img_begin, img_end))

            all_proj = Set()
            for im in proj_list:
                for jm in im:
                    all_proj.add(proj_angles[jm][3])

            all_proj = list(all_proj)
            if options.VERBOSE:
                print("On node %2d, number of images needed to be read = %5d" %
                      (myid, len(all_proj)))

            index = {}
            for i in xrange(len(all_proj)):
                index[all_proj[i]] = i
            mpi_barrier(MPI_COMM_WORLD)

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Finding neighboring projections lasted [s]",
                        time() - t2))
                log_main.add(msg)
                print(msg)
                msg = ("%-70s:  %d\n" %
                       ("Number of groups processed on the main node",
                        len(proj_list)))
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Grouping projections took: ", (time() - t2) / 60,
                          "[min]")
                    print("Number of groups on main node: ", len(proj_list))
            mpi_barrier(MPI_COMM_WORLD)

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("...... calculating the stack of 2D variances \n")
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Now calculating the stack of 2D variances")

            proj_params = [0.0] * (nima * 5)
            aveList = []
            varList = []
            if nvec > 0:
                eigList = [[] for i in xrange(nvec)]

            if options.VERBOSE:
                print("Begin to read images on processor %d" % (myid))
            ttt = time()
            #imgdata = EMData.read_images(stack, all_proj)
            imgdata = []
            for index_of_proj in xrange(len(all_proj)):
                #img     = EMData()
                #img.read_image(stack, all_proj[index_of_proj])
                dmg = image_decimate_window_xform_ctf(
                    get_im(stack, all_proj[index_of_proj]), options.decimate,
                    options.window, options.CTF)
                #print dmg.get_xsize(), "init"
                imgdata.append(dmg)
            if options.VERBOSE:
                print("Reading images on processor %d done, time = %.2f" %
                      (myid, time() - ttt))
                print("On processor %d, we got %d images" %
                      (myid, len(imgdata)))
            mpi_barrier(MPI_COMM_WORLD)
            '''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
            from applications import prepare_2d_forPCA
            from utilities import model_blank
            for i in xrange(len(proj_list)):
                ki = proj_angles[proj_list[i][0]][3]
                if ki >= symbaselen: continue
                mi = index[ki]
                phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi])

                grp_imgdata = []
                for j in xrange(img_per_grp):
                    mj = index[proj_angles[proj_list[i][j]][3]]
                    phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj])
                    alpha, sx, sy, mirror = params_3D_2D_NEW(
                        phi, theta, psi, s2x, s2y, mirror_list[i][j])
                    if thetaM <= 90:
                        if mirror == 0:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, phiM - phi, 0.0, 0.0, 1.0)
                        else:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, 180 - (phiM - phi), 0.0,
                                0.0, 1.0)
                    else:
                        if mirror == 0:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, -(phiM - phi), 0.0, 0.0,
                                1.0)
                        else:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, -(180 - (phiM - phi)), 0.0,
                                0.0, 1.0)
                    set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0])
                    grp_imgdata.append(imgdata[mj])
                    #print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize()

                if not options.no_norm:
                    #print grp_imgdata[j].get_xsize()
                    mask = model_circle(nx / 2 - 2, nx, nx)
                    for k in xrange(img_per_grp):
                        ave, std, minn, maxx = Util.infomask(
                            grp_imgdata[k], mask, False)
                        grp_imgdata[k] -= ave
                        grp_imgdata[k] /= std
                    del mask

                if options.fl > 0.0:
                    from filter import filt_ctf, filt_table
                    from fundamentals import fft, window2d
                    nx2 = 2 * nx
                    ny2 = 2 * ny
                    if options.CTF:
                        from utilities import pad
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = window2d(
                                fft(
                                    filt_tanl(
                                        filt_ctf(
                                            fft(
                                                pad(grp_imgdata[k], nx2, ny2,
                                                    1, 0.0)),
                                            grp_imgdata[k].get_attr("ctf"),
                                            binary=1), options.fl,
                                        options.aa)), nx, ny)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                    else:
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = filt_tanl(grp_imgdata[k],
                                                       options.fl, options.aa)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                else:
                    from utilities import pad, read_text_file
                    from filter import filt_ctf, filt_table
                    from fundamentals import fft, window2d
                    nx2 = 2 * nx
                    ny2 = 2 * ny
                    if options.CTF:
                        from utilities import pad
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = window2d(
                                fft(
                                    filt_ctf(fft(
                                        pad(grp_imgdata[k], nx2, ny2, 1, 0.0)),
                                             grp_imgdata[k].get_attr("ctf"),
                                             binary=1)), nx, ny)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                '''
				if i < 10 and myid == main_node:
					for k in xrange(10):
						grp_imgdata[k].write_image("grp%03d.hdf"%i, k)
				'''
                """
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("pp.hdf", pp)
				"""
                ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata)
                """
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("qq.hdf", pp)
				"""

                var = model_blank(nx, ny)
                for q in grp_imgdata:
                    Util.add_img2(var, q)
                Util.mul_scalar(var, 1.0 / (len(grp_imgdata) - 1))
                # Switch to std dev
                var = square_root(threshold(var))
                #if options.CTF:	ave, var = avgvar_ctf(grp_imgdata, mode="a")
                #else:	            ave, var = avgvar(grp_imgdata, mode="a")
                """
				if myid == main_node:
					ave.write_image("avgv.hdf",i)
					var.write_image("varv.hdf",i)
				"""

                set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
                set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

                aveList.append(ave)
                varList.append(var)

                if options.VERBOSE:
                    print("%5.2f%% done on processor %d" %
                          (i * 100.0 / len(proj_list), myid))
                if nvec > 0:
                    eig = pca(input_stacks=grp_imgdata,
                              subavg="",
                              mask_radius=radiuspca,
                              nvec=nvec,
                              incore=True,
                              shuffle=False,
                              genbuf=True)
                    for k in xrange(nvec):
                        set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
                        eigList[k].append(eig[k])
                    """
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""

            del imgdata
            #  To this point, all averages, variances, and eigenvectors are computed

            if options.ave2D:
                from fundamentals import fpol
                if myid == main_node:
                    km = 0
                    for i in xrange(number_of_proc):
                        if i == main_node:
                            for im in xrange(len(aveList)):
                                aveList[im].write_image(
                                    os.path.join(options.output_dir,
                                                 options.ave2D), km)
                                km += 1
                        else:
                            nl = mpi_recv(1, MPI_INT, i,
                                          SPARX_MPI_TAG_UNIVERSAL,
                                          MPI_COMM_WORLD)
                            nl = int(nl[0])
                            for im in xrange(nl):
                                ave = recv_EMData(i, im + i + 70000)
                                """
								nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
                                tmpvol = fpol(ave, Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.ave2D), km)
                                km += 1
                else:
                    mpi_send(len(aveList), 1, MPI_INT, main_node,
                             SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    for im in xrange(len(aveList)):
                        send_EMData(aveList[im], main_node, im + myid + 70000)
                        """
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						"""

            if options.ave3D:
                from fundamentals import fpol
                if options.VERBOSE:
                    print("Reconstructing 3D average volume")
                ave3D = recons3d_4nn_MPI(myid,
                                         aveList,
                                         symmetry=options.sym,
                                         npad=options.npad)
                bcast_EMData_to_all(ave3D, myid)
                if myid == main_node:
                    line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                    ave3D = fpol(ave3D, Tracker["nx"], Tracker["nx"],
                                 Tracker["nx"])
                    ave3D.write_image(
                        os.path.join(options.output_dir, options.ave3D))
                    msg = ("%-70s:  %s\n" % (
                        "Writing to the disk volume reconstructed from averages as",
                        options.ave3D))
                    log_main.add(msg)
                    print(line, msg)
            del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList

            if nvec > 0:
                for k in xrange(nvec):
                    if options.VERBOSE:
                        print("Reconstruction eigenvolumes", k)
                    cont = True
                    ITER = 0
                    mask2d = model_circle(radiuspca, nx, nx)
                    while cont:
                        #print "On node %d, iteration %d"%(myid, ITER)
                        eig3D = recons3d_4nn_MPI(myid,
                                                 eigList[k],
                                                 symmetry=options.sym,
                                                 npad=options.npad)
                        bcast_EMData_to_all(eig3D, myid, main_node)
                        if options.fl > 0.0:
                            eig3D = filt_tanl(eig3D, options.fl, options.aa)
                        if myid == main_node:
                            eig3D.write_image(
                                os.path.join(options.outpout_dir,
                                             "eig3d_%03d.hdf" % (k, ITER)))
                        Util.mul_img(eig3D,
                                     model_circle(radiuspca, nx, nx, nx))
                        eig3Df, kb = prep_vol(eig3D)
                        del eig3D
                        cont = False
                        icont = 0
                        for l in xrange(len(eigList[k])):
                            phi, theta, psi, s2x, s2y = get_params_proj(
                                eigList[k][l])
                            proj = prgs(eig3Df, kb,
                                        [phi, theta, psi, s2x, s2y])
                            cl = ccc(proj, eigList[k][l], mask2d)
                            if cl < 0.0:
                                icont += 1
                                cont = True
                                eigList[k][l] *= -1.0
                        u = int(cont)
                        u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node,
                                       MPI_COMM_WORLD)
                        icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM,
                                           main_node, MPI_COMM_WORLD)

                        if myid == main_node:
                            line = strftime("%Y-%m-%d_%H:%M:%S",
                                            localtime()) + " =>"
                            u = int(u[0])
                            msg = (" Eigenvector: ", k, " number changed ",
                                   int(icont[0]))
                            log_main.add(msg)
                            print(line, msg)
                        else:
                            u = 0
                        u = bcast_number_to_all(u, main_node)
                        cont = bool(u)
                        ITER += 1

                    del eig3Df, kb
                    mpi_barrier(MPI_COMM_WORLD)
                del eigList, mask2d

            if options.ave3D: del ave3D
            if options.var2D:
                from fundamentals import fpol
                if myid == main_node:
                    km = 0
                    for i in xrange(number_of_proc):
                        if i == main_node:
                            for im in xrange(len(varList)):
                                tmpvol = fpol(varList[im], Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.var2D), km)
                                km += 1
                        else:
                            nl = mpi_recv(1, MPI_INT, i,
                                          SPARX_MPI_TAG_UNIVERSAL,
                                          MPI_COMM_WORLD)
                            nl = int(nl[0])
                            for im in xrange(nl):
                                ave = recv_EMData(i, im + i + 70000)
                                tmpvol = fpol(ave, Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.var2D, km))
                                km += 1
                else:
                    mpi_send(len(varList), 1, MPI_INT, main_node,
                             SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    for im in xrange(len(varList)):
                        send_EMData(varList[im], main_node, im + myid +
                                    70000)  #  What with the attributes??

            mpi_barrier(MPI_COMM_WORLD)

        if options.var3D:
            if myid == main_node and options.VERBOSE:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("Reconstructing 3D variability volume")
                log_main.add(msg)
                print(line, msg)
            t6 = time()
            # radiusvar = options.radius
            # if( radiusvar < 0 ):  radiusvar = nx//2 -3
            res = recons3d_4nn_MPI(myid,
                                   varList,
                                   symmetry=options.sym,
                                   npad=options.npad)
            #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
            if myid == main_node:
                from fundamentals import fpol
                res = fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"])
                res.write_image(os.path.join(options.output_dir,
                                             options.var3D))

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Reconstructing 3D variability took [s]", time() - t6))
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Reconstruction took: %.2f [min]" %
                          ((time() - t6) / 60))

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Total time for these computations [s]", time() - t0))
                print(line, msg)
                log_main.add(msg)
                if options.VERBOSE:
                    print("Total time for these computations: %.2f [min]" %
                          ((time() - t0) / 60))
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("sx3dvariability")
                print(line, msg)
                log_main.add(msg)

        from mpi import mpi_finalize
        mpi_finalize()

        if RUNNING_UNDER_MPI:
            global_def.MPI = False

        global_def.BATCH = False
Example #7
0
def main():
    import sys
    import os
    import math
    import random
    import pyemtbx.options
    import time
    from random import random, seed, randint
    from optparse import OptionParser

    progname = os.path.basename(sys.argv[0])
    usage = progname + """ [options] <inputfile> <outputfile>

	Generic 2-D image processing programs.

	Functionality:

	1.  Phase flip a stack of images and write output to new file:
		sxprocess.py input_stack.hdf output_stack.hdf --phase_flip
	
	2.  Resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size.
	    The window size will change accordingly.
		sxprocess input.hdf output.hdf  --changesize --ratio=0.5

	3.  Compute average power spectrum of a stack of 2D images with optional padding (option wn) with zeroes.
		sxprocess.py input_stack.hdf powerspectrum.hdf --pw [--wn=1024]

	4.  Generate a stack of projections bdb:data and micrographs with prefix mic (i.e., mic0.hdf, mic1.hdf etc) from structure input_structure.hdf, with CTF applied to both projections and micrographs:
		sxprocess.py input_structure.hdf data mic --generate_projections format="bdb":apix=5.2:CTF=True:boxsize=64

    5.  Retrieve original image numbers in the selected ISAC group (here group 12 from generation 3):
    	sxprocess.py  bdb:test3 class_averages_generation_3.hdf  list3_12.txt --isacgroup=12 --params=originalid

    6.  Retrieve original image numbers of images listed in ISAC output stack of averages:
    	sxprocess.py  select1.hdf  ohk.txt

    7.  Adjust rotationally averaged power spectrum of an image to that of a reference image or a reference 1D power spectrum stored in an ASCII file.
    	Optionally use a tangent low-pass filter.  Also works for a stack of images, in which case the output is also a stack.
    	sxprocess.py  vol.hdf ref.hdf  avol.hdf < 0.25 0.2> --adjpw
   	 	sxprocess.py  vol.hdf pw.txt   avol.hdf < 0.25 0.2> --adjpw

    8.  Generate a 1D rotationally averaged power spectrum of an image.
		sxprocess.py  vol.hdf --rotwp=rotpw.txt
    	# Output will contain three columns:
       (1) rotationally averaged power spectrum
       (2) logarithm of the rotationally averaged power spectrum
       (3) integer line number (from zero to approximately to half the image size)

    9.  Apply 3D transformation (rotation and/or shift) to a set of orientation parameters associated with projection data.
    	sxprocess.py  --transfromparams=phi,theta,psi,tx,ty,tz      input.txt  output.txt
    	The output file is then imported and 3D transformed volume computed:
    	sxheader.py  bdb:p  --params=xform.projection  --import=output.txt
    	mpirun -np 2 sxrecons3d_n.py  bdb:p tvol.hdf --MPI
    	The reconstructed volume is in the position of the volume computed using the input.txt parameters and then
    	transformed with rot_shift3D(vol, phi,theta,psi,tx,ty,tz)

   10.  Import ctf parameters from the output of sxcter into windowed particle headers.
	    There are three possible input files formats:  (1) all particles are in one stack, (2 aor 3) particles are in stacks, each stack corresponds to a single micrograph.
	    In each case the particles should contain a name of the micrograph of origin stores using attribute name 'ptcl_source_image'.
        Normally this is done by e2boxer.py during windowing.
	    Particles whose defocus or astigmatism error exceed set thresholds will be skipped, otherwise, virtual stacks with the original way preceded by G will be created.
		sxprocess.py  --input=bdb:data  --importctf=outdir/partres  --defocuserror=10.0  --astigmatismerror=5.0
		#  Output will be a vritual stack bdb:Gdata
		sxprocess.py  --input="bdb:directory/stacks*"  --importctf=outdir/partres  --defocuserror=10.0  --astigmatismerror=5.0
		To concatenate output files:
		cd directory
		e2bdb.py . --makevstack=bdb:allparticles  --filt=G
		IMPORTANT:  Please do not move (or remove!) any input/intermediate EMAN2DB files as the information is linked between them.

   11. Scale 3D shifts.  The shifts in the input five columns text file with 3D orientation parameters will be DIVIDED by the scale factor
		sxprocess.py  orientationparams.txt  scaledparams.txt  scale=0.5
   
   12. Generate adaptive mask from a given 3-D volume. 


"""

    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option(
        "--order",
        action="store_true",
        help=
        "Two arguments are required: name of input stack and desired name of output stack. The output stack is the input stack sorted by similarity in terms of cross-correlation coefficent.",
        default=False)
    parser.add_option("--order_lookup",
                      action="store_true",
                      help="Test/Debug.",
                      default=False)
    parser.add_option("--order_metropolis",
                      action="store_true",
                      help="Test/Debug.",
                      default=False)
    parser.add_option("--order_pca",
                      action="store_true",
                      help="Test/Debug.",
                      default=False)
    parser.add_option(
        "--initial",
        type="int",
        default=-1,
        help=
        "Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)"
    )
    parser.add_option(
        "--circular",
        action="store_true",
        help=
        "Select circular ordering (fisr image has to be similar to the last",
        default=False)
    parser.add_option(
        "--radius",
        type="int",
        default=-1,
        help="Radius of a circular mask for similarity based ordering")
    parser.add_option(
        "--changesize",
        action="store_true",
        help=
        "resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size.",
        default=False)
    parser.add_option(
        "--ratio",
        type="float",
        default=1.0,
        help=
        "The ratio of new to old image size (if <1 the pixel size will increase and image size decrease, if>1, the other way round"
    )
    parser.add_option(
        "--pw",
        action="store_true",
        help=
        "compute average power spectrum of a stack of 2-D images with optional padding (option wn) with zeroes",
        default=False)
    parser.add_option(
        "--wn",
        type="int",
        default=-1,
        help=
        "Size of window to use (should be larger/equal than particle box size, default padding to max(nx,ny))"
    )
    parser.add_option("--phase_flip",
                      action="store_true",
                      help="Phase flip the input stack",
                      default=False)
    parser.add_option(
        "--makedb",
        metavar="param1=value1:param2=value2",
        type="string",
        action="append",
        help=
        "One argument is required: name of key with which the database will be created. Fill in database with parameters specified as follows: --makedb param1=value1:param2=value2, e.g. 'gauss_width'=1.0:'pixel_input'=5.2:'pixel_output'=5.2:'thr_low'=1.0"
    )
    parser.add_option(
        "--generate_projections",
        metavar="param1=value1:param2=value2",
        type="string",
        action="append",
        help=
        "Three arguments are required: name of input structure from which to generate projections, desired name of output projection stack, and desired prefix for micrographs (e.g. if prefix is 'mic', then micrographs mic0.hdf, mic1.hdf etc will be generated). Optional arguments specifying format, apix, box size and whether to add CTF effects can be entered as follows after --generate_projections: format='bdb':apix=5.2:CTF=True:boxsize=100, or format='hdf', etc., where format is bdb or hdf, apix (pixel size) is a float, CTF is True or False, and boxsize denotes the dimension of the box (assumed to be a square). If an optional parameter is not specified, it will default as follows: format='bdb', apix=2.5, CTF=False, boxsize=64."
    )
    parser.add_option(
        "--isacgroup",
        type="int",
        help=
        "Retrieve original image numbers in the selected ISAC group. See ISAC documentation for details.",
        default=-1)
    parser.add_option(
        "--isacselect",
        action="store_true",
        help=
        "Retrieve original image numbers of images listed in ISAC output stack of averages. See ISAC documentation for details.",
        default=False)
    parser.add_option(
        "--params",
        type="string",
        default=None,
        help="Name of header of parameter, which one depends on specific option"
    )
    parser.add_option(
        "--adjpw",
        action="store_true",
        help="Adjust rotationally averaged power spectrum of an image",
        default=False)
    parser.add_option(
        "--rotpw",
        type="string",
        default=None,
        help=
        "Name of the text file to contain rotationally averaged power spectrum of the input image."
    )
    parser.add_option(
        "--transformparams",
        type="string",
        default=None,
        help=
        "Transform 3D projection orientation parameters using six 3D parameters (phi, theta,psi,sx,sy,sz).  Input: --transformparams=45.,66.,12.,-2,3,-5.5 desired six transformation of the reconstructed structure. Output: file with modified orientation parameters."
    )

    # import ctf estimates done using cter
    parser.add_option("--input",
                      type="string",
                      default=None,
                      help="Input particles.")
    parser.add_option(
        "--importctf",
        type="string",
        default=None,
        help="Name of the file containing CTF parameters produced by sxcter.")
    parser.add_option(
        "--defocuserror",
        type="float",
        default=1000000.0,
        help=
        "Exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocuserror percent.  The error is computed as (std dev defocus)/defocus*100%"
    )
    parser.add_option(
        "--astigmatismerror",
        type="float",
        default=360.0,
        help=
        "Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatismerror degrees."
    )

    # import ctf estimates done using cter
    parser.add_option(
        "--scale",
        type="float",
        default=-1.0,
        help=
        "Divide shifts in the input 3D orientation parameters text file by the scale factor."
    )

    # generate adaptive mask from an given 3-Db volue
    parser.add_option("--adaptive_mask",
                      action="store_true",
                      help="create adavptive 3-D mask from a given volume",
                      default=False)
    parser.add_option(
        "--nsigma",
        type="float",
        default=1.,
        help=
        "number of times of sigma of the input volume to obtain the the large density cluster"
    )
    parser.add_option(
        "--ndilation",
        type="int",
        default=3,
        help=
        "number of times of dilation applied to the largest cluster of density"
    )
    parser.add_option(
        "--kernel_size",
        type="int",
        default=11,
        help="convolution kernel for smoothing the edge of the mask")
    parser.add_option(
        "--gauss_standard_dev",
        type="int",
        default=9,
        help="stanadard deviation value to generate Gaussian edge")

    (options, args) = parser.parse_args()

    global_def.BATCH = True

    if options.phase_flip:
        nargs = len(args)
        if nargs != 2:
            print "must provide name of input and output file!"
            return
        from EMAN2 import Processor
        instack = args[0]
        outstack = args[1]
        nima = EMUtil.get_image_count(instack)
        from filter import filt_ctf
        for i in xrange(nima):
            img = EMData()
            img.read_image(instack, i)
            try:
                ctf = img.get_attr('ctf')
            except:
                print "no ctf information in input stack! Exiting..."
                return

            dopad = True
            sign = 1
            binary = 1  # phase flip

            assert img.get_ysize() > 1
            dict = ctf.to_dict()
            dz = dict["defocus"]
            cs = dict["cs"]
            voltage = dict["voltage"]
            pixel_size = dict["apix"]
            b_factor = dict["bfactor"]
            ampcont = dict["ampcont"]
            dza = dict["dfdiff"]
            azz = dict["dfang"]

            if dopad and not img.is_complex(): ip = 1
            else: ip = 0

            params = {
                "filter_type": Processor.fourier_filter_types.CTF_,
                "defocus": dz,
                "Cs": cs,
                "voltage": voltage,
                "Pixel_size": pixel_size,
                "B_factor": b_factor,
                "amp_contrast": ampcont,
                "dopad": ip,
                "binary": binary,
                "sign": sign,
                "dza": dza,
                "azz": azz
            }

            tmp = Processor.EMFourierFilter(img, params)
            tmp.set_attr_dict({"ctf": ctf})

            tmp.write_image(outstack, i)

    elif options.changesize:
        nargs = len(args)
        if nargs != 2:
            ERROR("must provide name of input and output file!", "change size",
                  1)
            return
        from utilities import get_im
        instack = args[0]
        outstack = args[1]
        sub_rate = float(options.ratio)

        nima = EMUtil.get_image_count(instack)
        from fundamentals import resample
        for i in xrange(nima):
            resample(get_im(instack, i), sub_rate).write_image(outstack, i)

    elif options.isacgroup > -1:
        nargs = len(args)
        if nargs != 3:
            ERROR("Three files needed on input!", "isacgroup", 1)
            return
        from utilities import get_im
        instack = args[0]
        m = get_im(args[1], int(options.isacgroup)).get_attr("members")
        l = []
        for k in m:
            l.append(int(get_im(args[0], k).get_attr(options.params)))
        from utilities import write_text_file
        write_text_file(l, args[2])

    elif options.isacselect:
        nargs = len(args)
        if nargs != 2:
            ERROR("Two files needed on input!", "isacgroup", 1)
            return
        from utilities import get_im
        nima = EMUtil.get_image_count(args[0])
        m = []
        for k in xrange(nima):
            m += get_im(args[0], k).get_attr("members")
        m.sort()
        from utilities import write_text_file
        write_text_file(m, args[1])

    elif options.pw:
        nargs = len(args)
        if nargs < 2:
            ERROR("must provide name of input and output file!", "pw", 1)
            return
        from utilities import get_im
        d = get_im(args[0])
        nx = d.get_xsize()
        ny = d.get_ysize()
        if nargs == 3: mask = get_im(args[2])
        wn = int(options.wn)
        if wn == -1:
            wn = max(nx, ny)
        else:
            if ((wn < nx) or (wn < ny)):
                ERROR("window size cannot be smaller than the image size",
                      "pw", 1)
        n = EMUtil.get_image_count(args[0])
        from utilities import model_blank, model_circle, pad
        from EMAN2 import periodogram
        p = model_blank(wn, wn)

        for i in xrange(n):
            d = get_im(args[0], i)
            if nargs == 3:
                d *= mask
            st = Util.infomask(d, None, True)
            d -= st[0]
            p += periodogram(pad(d, wn, wn, 1, 0.))
        p /= n
        p.write_image(args[1])

    elif options.adjpw:

        if len(args) < 3:
            ERROR(
                "filt_by_rops input target output fl aa (the last two are optional parameters of a low-pass filter)",
                "adjpw", 1)
            return
        img_stack = args[0]
        from math import sqrt
        from fundamentals import rops_table, fft
        from utilities import read_text_file, get_im
        from filter import filt_tanl, filt_table
        if (args[1][-3:] == 'txt'):
            rops_dst = read_text_file(args[1])
        else:
            rops_dst = rops_table(get_im(args[1]))

        out_stack = args[2]
        if (len(args) > 4):
            fl = float(args[3])
            aa = float(args[4])
        else:
            fl = -1.0
            aa = 0.0

        nimage = EMUtil.get_image_count(img_stack)

        for i in xrange(nimage):
            img = fft(get_im(img_stack, i))
            rops_src = rops_table(img)

            assert len(rops_dst) == len(rops_src)

            table = [0.0] * len(rops_dst)
            for j in xrange(len(rops_dst)):
                table[j] = sqrt(rops_dst[j] / rops_src[j])

            if (fl > 0.0):
                img = filt_tanl(img, fl, aa)
            img = fft(filt_table(img, table))
            img.write_image(out_stack, i)

    elif options.rotpw != None:

        if len(args) != 1:
            ERROR("Only one input permitted", "rotpw", 1)
            return
        from utilities import write_text_file, get_im
        from fundamentals import rops_table
        from math import log10
        t = rops_table(get_im(args[0]))
        x = range(len(t))
        r = [0.0] * len(x)
        for i in x:
            r[i] = log10(t[i])
        write_text_file([t, r, x], options.rotpw)

    elif options.transformparams != None:
        if len(args) != 2:
            ERROR(
                "Please provide names of input and output files with orientation parameters",
                "transformparams", 1)
            return
        from utilities import read_text_row, write_text_row
        transf = [0.0] * 6
        spl = options.transformparams.split(',')
        for i in xrange(len(spl)):
            transf[i] = float(spl[i])

        write_text_row(rotate_shift_params(read_text_row(args[0]), transf),
                       args[1])

    elif options.makedb != None:
        nargs = len(args)
        if nargs != 1:
            print "must provide exactly one argument denoting database key under which the input params will be stored"
            return
        dbkey = args[0]
        print "database key under which params will be stored: ", dbkey
        gbdb = js_open_dict("e2boxercache/gauss_box_DB.json")

        parmstr = 'dummy:' + options.makedb[0]
        (processorname, param_dict) = parsemodopt(parmstr)
        dbdict = {}
        for pkey in param_dict:
            if (pkey == 'invert_contrast') or (pkey == 'use_variance'):
                if param_dict[pkey] == 'True':
                    dbdict[pkey] = True
                else:
                    dbdict[pkey] = False
            else:
                dbdict[pkey] = param_dict[pkey]
        gbdb[dbkey] = dbdict

    elif options.generate_projections:
        nargs = len(args)
        if nargs != 3:
            ERROR("Must provide name of input structure(s) from which to generate projections, name of output projection stack, and prefix for output micrographs."\
            "sxprocess - generate projections",1)
            return
        inpstr = args[0]
        outstk = args[1]
        micpref = args[2]

        parmstr = 'dummy:' + options.generate_projections[0]
        (processorname, param_dict) = parsemodopt(parmstr)

        parm_CTF = False
        parm_format = 'bdb'
        parm_apix = 2.5

        if 'CTF' in param_dict:
            if param_dict['CTF'] == 'True':
                parm_CTF = True

        if 'format' in param_dict:
            parm_format = param_dict['format']

        if 'apix' in param_dict:
            parm_apix = float(param_dict['apix'])

        boxsize = 64
        if 'boxsize' in param_dict:
            boxsize = int(param_dict['boxsize'])

        print "pixel size: ", parm_apix, " format: ", parm_format, " add CTF: ", parm_CTF, " box size: ", boxsize

        scale_mult = 2500
        sigma_add = 1.5
        sigma_proj = 30.0
        sigma2_proj = 17.5
        sigma_gauss = 0.3
        sigma_mic = 30.0
        sigma2_mic = 17.5
        sigma_gauss_mic = 0.3

        if 'scale_mult' in param_dict:
            scale_mult = float(param_dict['scale_mult'])
        if 'sigma_add' in param_dict:
            sigma_add = float(param_dict['sigma_add'])
        if 'sigma_proj' in param_dict:
            sigma_proj = float(param_dict['sigma_proj'])
        if 'sigma2_proj' in param_dict:
            sigma2_proj = float(param_dict['sigma2_proj'])
        if 'sigma_gauss' in param_dict:
            sigma_gauss = float(param_dict['sigma_gauss'])
        if 'sigma_mic' in param_dict:
            sigma_mic = float(param_dict['sigma_mic'])
        if 'sigma2_mic' in param_dict:
            sigma2_mic = float(param_dict['sigma2_mic'])
        if 'sigma_gauss_mic' in param_dict:
            sigma_gauss_mic = float(param_dict['sigma_gauss_mic'])

        from filter import filt_gaussl, filt_ctf
        from utilities import drop_spider_doc, even_angles, model_gauss, delete_bdb, model_blank, pad, model_gauss_noise, set_params2D, set_params_proj
        from projection import prep_vol, prgs
        seed(14567)
        delta = 29
        angles = even_angles(delta, 0.0, 89.9, 0.0, 359.9, "S")
        nangle = len(angles)

        modelvol = []
        nvlms = EMUtil.get_image_count(inpstr)
        from utilities import get_im
        for k in xrange(nvlms):
            modelvol.append(get_im(inpstr, k))

        nx = modelvol[0].get_xsize()

        if nx != boxsize:
            ERROR("Requested box dimension does not match dimension of the input model.", \
            "sxprocess - generate projections",1)
        nvol = 10
        volfts = [[] for k in xrange(nvlms)]
        for k in xrange(nvlms):
            for i in xrange(nvol):
                sigma = sigma_add + random()  # 1.5-2.5
                addon = model_gauss(sigma, boxsize, boxsize, boxsize, sigma,
                                    sigma, 38, 38, 40)
                scale = scale_mult * (0.5 + random())
                vf, kb = prep_vol(modelvol[k] + scale * addon)
                volfts[k].append(vf)
        del vf, modelvol

        if parm_format == "bdb":
            stack_data = "bdb:" + outstk
            delete_bdb(stack_data)
        else:
            stack_data = outstk + ".hdf"
        Cs = 2.0
        pixel = parm_apix
        voltage = 120.0
        ampcont = 10.0
        ibd = 4096 / 2 - boxsize
        iprj = 0

        width = 240
        xstart = 8 + boxsize / 2
        ystart = 8 + boxsize / 2
        rowlen = 17
        from random import randint
        params = []
        for idef in xrange(3, 8):

            irow = 0
            icol = 0

            mic = model_blank(4096, 4096)
            defocus = idef * 0.5  #0.2
            if parm_CTF:
                astampl = defocus * 0.15
                astangl = 50.0
                ctf = generate_ctf([
                    defocus, Cs, voltage, pixel, ampcont, 0.0, astampl, astangl
                ])

            for i in xrange(nangle):
                for k in xrange(12):
                    dphi = 8.0 * (random() - 0.5)
                    dtht = 8.0 * (random() - 0.5)
                    psi = 360.0 * random()

                    phi = angles[i][0] + dphi
                    tht = angles[i][1] + dtht

                    s2x = 4.0 * (random() - 0.5)
                    s2y = 4.0 * (random() - 0.5)

                    params.append([phi, tht, psi, s2x, s2y])

                    ivol = iprj % nvol
                    #imgsrc = randint(0,nvlms-1)
                    imgsrc = iprj % nvlms
                    proj = prgs(volfts[imgsrc][ivol], kb,
                                [phi, tht, psi, -s2x, -s2y])

                    x = xstart + irow * width
                    y = ystart + icol * width

                    mic += pad(proj, 4096, 4096, 1, 0.0, x - 2048, y - 2048, 0)

                    proj = proj + model_gauss_noise(sigma_proj, nx, nx)
                    if parm_CTF:
                        proj = filt_ctf(proj, ctf)
                        proj.set_attr_dict({"ctf": ctf, "ctf_applied": 0})

                    proj = proj + filt_gaussl(
                        model_gauss_noise(sigma2_proj, nx, nx), sigma_gauss)
                    proj.set_attr("origimgsrc", imgsrc)
                    proj.set_attr("test_id", iprj)
                    # flags describing the status of the image (1 = true, 0 = false)
                    set_params2D(proj, [0.0, 0.0, 0.0, 0, 1.0])
                    set_params_proj(proj, [phi, tht, psi, s2x, s2y])

                    proj.write_image(stack_data, iprj)

                    icol += 1
                    if icol == rowlen:
                        icol = 0
                        irow += 1

                    iprj += 1

            mic += model_gauss_noise(sigma_mic, 4096, 4096)
            if parm_CTF:
                #apply CTF
                mic = filt_ctf(mic, ctf)
            mic += filt_gaussl(model_gauss_noise(sigma2_mic, 4096, 4096),
                               sigma_gauss_mic)

            mic.write_image(micpref + "%1d.hdf" % (idef - 3), 0)

        drop_spider_doc("params.txt", params)

    elif options.importctf != None:
        print ' IMPORTCTF  '
        from utilities import read_text_row, write_text_row
        from random import randint
        import subprocess
        grpfile = 'groupid%04d' % randint(1000, 9999)
        ctfpfile = 'ctfpfile%04d' % randint(1000, 9999)
        cterr = [options.defocuserror / 100.0, options.astigmatismerror]
        ctfs = read_text_row(options.importctf)
        for kk in xrange(len(ctfs)):
            root, name = os.path.split(ctfs[kk][-1])
            ctfs[kk][-1] = name[:-4]
        if (options.input[:4] != 'bdb:'):
            ERROR('Sorry, only bdb files implemented', 'importctf', 1)
        d = options.input[4:]
        #try:     str = d.index('*')
        #except:  str = -1
        from string import split
        import glob
        uu = os.path.split(d)
        uu = os.path.join(uu[0], 'EMAN2DB', uu[1] + '.bdb')
        flist = glob.glob(uu)
        for i in xrange(len(flist)):
            root, name = os.path.split(flist[i])
            root = root[:-7]
            name = name[:-4]
            fil = 'bdb:' + os.path.join(root, name)
            sourcemic = EMUtil.get_all_attributes(fil, 'ptcl_source_image')
            nn = len(sourcemic)
            gctfp = []
            groupid = []
            for kk in xrange(nn):
                junk, name2 = os.path.split(sourcemic[kk])
                name2 = name2[:-4]
                ctfp = [-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
                for ll in xrange(len(ctfs)):
                    if (name2 == ctfs[ll][-1]):
                        #  found correct
                        if (ctfs[ll][8] / ctfs[ll][0] <= cterr[0]):
                            #  acceptable defocus error
                            ctfp = ctfs[ll][:8]
                            if (ctfs[ll][10] > cterr[1]):
                                # error of astigmatism exceed the threshold, set astigmatism to zero.
                                ctfp[6] = 0.0
                                ctfp[7] = 0.0
                            gctfp.append(ctfp)
                            groupid.append(kk)
                        break
            if (len(groupid) > 0):
                write_text_row(groupid, grpfile)
                write_text_row(gctfp, ctfpfile)
                cmd = "{} {} {} {}".format(
                    'e2bdb.py', fil, '--makevstack=bdb:' + root + 'G' + name,
                    '--list=' + grpfile)
                #print cmd
                subprocess.call(cmd, shell=True)
                cmd = "{} {} {} {}".format('sxheader.py',
                                           'bdb:' + root + 'G' + name,
                                           '--params=ctf',
                                           '--import=' + ctfpfile)
                #print cmd
                subprocess.call(cmd, shell=True)
            else:
                print ' >>>  Group ', name, '  skipped.'

        cmd = "{} {} {}".format("rm -f", grpfile, ctfpfile)
        subprocess.call(cmd, shell=True)

    elif options.scale > 0.0:
        from utilities import read_text_row, write_text_row
        scale = options.scale
        nargs = len(args)
        if nargs != 2:
            print "Please provide names of input and output file!"
            return
        p = read_text_row(args[0])
        for i in xrange(len(p)):
            p[i][3] /= scale
            p[i][4] /= scale
        write_text_row(p, args[1])

    elif options.adaptive_mask:
        from utilities import get_im
        from morphology import adaptive_mask
        nsigma = options.nsigma
        ndilation = options.ndilation
        kernel_size = options.kernel_size
        gauss_standard_dev = options.gauss_standard_dev
        nargs = len(args)
        if nargs > 2:
            print "Too many inputs are given, try again!"
            return
        else:
            inputvol = get_im(args[0])
            input_path, input_file_name = os.path.split(args[0])
            input_file_name_root, ext = os.path.splitext(input_file_name)
            if nargs == 2: mask_file_name = args[1]
            else:
                mask_file_name = "adaptive_mask_for" + input_file_name_root + ".hdf"  # Only hdf file is output.
            mask3d = adaptive_mask(inputvol, nsigma, ndilation, kernel_size,
                                   gauss_standard_dev)
            mask3d.write_image(mask_file_name)

    else:
        ERROR("Please provide option name", "sxprocess.py", 1)
Example #8
0
def main():
    import global_def
    from optparse import OptionParser
    from EMAN2 import EMUtil
    import os
    import sys
    from time import time

    progname = os.path.basename(sys.argv[0])
    usage = progname + " proj_stack output_averages --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--img_per_group",
                      type="int",
                      default=100,
                      help="number of images per group")
    parser.add_option("--radius",
                      type="int",
                      default=-1,
                      help="radius for alignment")
    parser.add_option(
        "--xr",
        type="string",
        default="2 1",
        help="range for translation search in x direction, search is +/xr")
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help=
        "range for translation search in y direction, search is +/yr (default = same as xr)"
    )
    parser.add_option(
        "--ts",
        type="string",
        default="1 0.5",
        help=
        "step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional"
    )
    parser.add_option(
        "--iter",
        type="int",
        default=30,
        help="number of iterations within alignment (default = 30)")
    parser.add_option(
        "--num_ali",
        type="int",
        default=5,
        help="number of alignments performed for stability (default = 5)")
    parser.add_option("--thld_err",
                      type="float",
                      default=1.0,
                      help="threshold of pixel error (default = 1.732)")
    parser.add_option(
        "--grouping",
        type="string",
        default="GRP",
        help=
        "do grouping of projections: PPR - per projection, GRP - different size groups, exclusive (default), GEV - grouping equal size"
    )
    parser.add_option(
        "--delta",
        type="float",
        default=-1.0,
        help="angular step for reference projections (required for GEV method)"
    )
    parser.add_option(
        "--fl",
        type="float",
        default=0.3,
        help="cut-off frequency of hyperbolic tangent low-pass Fourier filter")
    parser.add_option(
        "--aa",
        type="float",
        default=0.2,
        help="fall-off of hyperbolic tangent low-pass Fourier filter")
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Consider CTF correction during the alignment ")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version")

    (options, args) = parser.parse_args()

    from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD
    from mpi import mpi_barrier, mpi_send, mpi_recv, mpi_bcast, MPI_INT, mpi_finalize, MPI_FLOAT
    from applications import MPI_start_end, within_group_refinement, ali2d_ras
    from pixel_error import multi_align_stability
    from utilities import send_EMData, recv_EMData
    from utilities import get_image, bcast_number_to_all, set_params2D, get_params2D
    from utilities import group_proj_by_phitheta, model_circle, get_input_from_string

    sys.argv = mpi_init(len(sys.argv), sys.argv)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    main_node = 0

    if len(args) == 2:
        stack = args[0]
        outdir = args[1]
    else:
        ERROR("incomplete list of arguments", "sxproj_stability", 1, myid=myid)
        exit()
    if not options.MPI:
        ERROR("Non-MPI not supported!", "sxproj_stability", myid=myid)
        exit()

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()
    global_def.BATCH = True

    #if os.path.exists(outdir):  ERROR('Output directory exists, please change the name and restart the program', "sxproj_stability", 1, myid)
    #mpi_barrier(MPI_COMM_WORLD)

    img_per_grp = options.img_per_group
    radius = options.radius
    ite = options.iter
    num_ali = options.num_ali
    thld_err = options.thld_err

    xrng = get_input_from_string(options.xr)
    if options.yr == "-1": yrng = xrng
    else: yrng = get_input_from_string(options.yr)
    step = get_input_from_string(options.ts)

    if myid == main_node:
        nima = EMUtil.get_image_count(stack)
        img = get_image(stack)
        nx = img.get_xsize()
        ny = img.get_ysize()
    else:
        nima = 0
        nx = 0
        ny = 0
    nima = bcast_number_to_all(nima)
    nx = bcast_number_to_all(nx)
    ny = bcast_number_to_all(ny)
    if radius == -1: radius = nx / 2 - 2
    mask = model_circle(radius, nx, nx)

    st = time()
    if options.grouping == "GRP":
        if myid == main_node:
            print "  A  ", myid, "  ", time() - st
            proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
            proj_params = []
            for i in xrange(nima):
                dp = proj_attr[i].get_params("spider")
                phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp[
                    "psi"], -dp["tx"], -dp["ty"]
                proj_params.append([phi, theta, psi, s2x, s2y])

            # Here is where the grouping is done, I didn't put enough annotation in the group_proj_by_phitheta,
            # So I will briefly explain it here
            # proj_list  : Returns a list of list of particle numbers, each list contains img_per_grp particle numbers
            #              except for the last one. Depending on the number of particles left, they will either form a
            #              group or append themselves to the last group
            # angle_list : Also returns a list of list, each list contains three numbers (phi, theta, delta), (phi,
            #              theta) is the projection angle of the center of the group, delta is the range of this group
            # mirror_list: Also returns a list of list, each list contains img_per_grp True or False, which indicates
            #              whether it should take mirror position.
            # In this program angle_list and mirror list are not of interest.

            proj_list_all, angle_list, mirror_list = group_proj_by_phitheta(
                proj_params, img_per_grp=img_per_grp)
            del proj_params
            print "  B  number of groups  ", myid, "  ", len(
                proj_list_all), time() - st
        mpi_barrier(MPI_COMM_WORLD)

        # Number of groups, actually there could be one or two more groups, since the size of the remaining group varies
        # we will simply assign them to main node.
        n_grp = nima / img_per_grp - 1

        # Divide proj_list_all equally to all nodes, and becomes proj_list
        proj_list = []
        for i in xrange(n_grp):
            proc_to_stay = i % number_of_proc
            if proc_to_stay == main_node:
                if myid == main_node: proj_list.append(proj_list_all[i])
            elif myid == main_node:
                mpi_send(len(proj_list_all[i]), 1, MPI_INT, proc_to_stay,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                mpi_send(proj_list_all[i], len(proj_list_all[i]), MPI_INT,
                         proc_to_stay, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            elif myid == proc_to_stay:
                img_per_grp = mpi_recv(1, MPI_INT, main_node,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                img_per_grp = int(img_per_grp[0])
                temp = mpi_recv(img_per_grp, MPI_INT, main_node,
                                SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                proj_list.append(map(int, temp))
                del temp
            mpi_barrier(MPI_COMM_WORLD)
        print "  C  ", myid, "  ", time() - st
        if myid == main_node:
            # Assign the remaining groups to main_node
            for i in xrange(n_grp, len(proj_list_all)):
                proj_list.append(proj_list_all[i])
            del proj_list_all, angle_list, mirror_list

    #   Compute stability per projection projection direction, equal number assigned, thus overlaps
    elif options.grouping == "GEV":
        if options.delta == -1.0:
            ERROR(
                "Angular step for reference projections is required for GEV method",
                "sxproj_stability", 1)
        from utilities import even_angles, nearestk_to_refdir, getvec
        refproj = even_angles(options.delta)
        img_begin, img_end = MPI_start_end(len(refproj), number_of_proc, myid)
        # Now each processor keeps its own share of reference projections
        refprojdir = refproj[img_begin:img_end]
        del refproj

        ref_ang = [0.0] * (len(refprojdir) * 2)
        for i in xrange(len(refprojdir)):
            ref_ang[i * 2] = refprojdir[0][0]
            ref_ang[i * 2 + 1] = refprojdir[0][1] + i * 0.1

        print "  A  ", myid, "  ", time() - st
        proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
        #  the solution below is very slow, do not use it unless there is a problem with the i/O
        """
		for i in xrange(number_of_proc):
			if myid == i:
				proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			mpi_barrier(MPI_COMM_WORLD)
		"""
        print "  B  ", myid, "  ", time() - st

        proj_ang = [0.0] * (nima * 2)
        for i in xrange(nima):
            dp = proj_attr[i].get_params("spider")
            proj_ang[i * 2] = dp["phi"]
            proj_ang[i * 2 + 1] = dp["theta"]
        print "  C  ", myid, "  ", time() - st
        asi = Util.nearestk_to_refdir(proj_ang, ref_ang, img_per_grp)
        del proj_ang, ref_ang
        proj_list = []
        for i in xrange(len(refprojdir)):
            proj_list.append(asi[i * img_per_grp:(i + 1) * img_per_grp])
        del asi
        print "  D  ", myid, "  ", time() - st
        #from sys import exit
        #exit()

    #   Compute stability per projection
    elif options.grouping == "PPR":
        print "  A  ", myid, "  ", time() - st
        proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
        print "  B  ", myid, "  ", time() - st
        proj_params = []
        for i in xrange(nima):
            dp = proj_attr[i].get_params("spider")
            phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp[
                "psi"], -dp["tx"], -dp["ty"]
            proj_params.append([phi, theta, psi, s2x, s2y])
        img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
        print "  C  ", myid, "  ", time() - st
        from utilities import nearest_proj
        proj_list, mirror_list = nearest_proj(
            proj_params, img_per_grp,
            range(img_begin, img_begin + 1))  #range(img_begin, img_end))
        refprojdir = proj_params[img_begin:img_end]
        del proj_params, mirror_list
        print "  D  ", myid, "  ", time() - st
    else:
        ERROR("Incorrect projection grouping option", "sxproj_stability", 1)
    """
	from utilities import write_text_file
	for i in xrange(len(proj_list)):
		write_text_file(proj_list[i],"projlist%06d_%04d"%(i,myid))
	"""

    ###########################################################################################################
    # Begin stability test
    from utilities import get_params_proj, read_text_file
    #if myid == 0:
    #	from utilities import read_text_file
    #	proj_list[0] = map(int, read_text_file("lggrpp0.txt"))

    from utilities import model_blank
    aveList = [model_blank(nx, ny)] * len(proj_list)
    if options.grouping == "GRP":
        refprojdir = [[0.0, 0.0, -1.0]] * len(proj_list)
    for i in xrange(len(proj_list)):
        print "  E  ", myid, "  ", time() - st
        class_data = EMData.read_images(stack, proj_list[i])
        #print "  R  ",myid,"  ",time()-st
        if options.CTF:
            from filter import filt_ctf
            for im in xrange(len(class_data)):  #  MEM LEAK!!
                atemp = class_data[im].copy()
                btemp = filt_ctf(atemp, atemp.get_attr("ctf"), binary=1)
                class_data[im] = btemp
                #class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
        for im in class_data:
            try:
                t = im.get_attr(
                    "xform.align2d")  # if they are there, no need to set them!
            except:
                try:
                    t = im.get_attr("xform.projection")
                    d = t.get_params("spider")
                    set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0])
                except:
                    set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
        #print "  F  ",myid,"  ",time()-st
        # Here, we perform realignment num_ali times
        all_ali_params = []
        for j in xrange(num_ali):
            if (xrng[0] == 0.0 and yrng[0] == 0.0):
                avet = ali2d_ras(class_data,
                                 randomize=True,
                                 ir=1,
                                 ou=radius,
                                 rs=1,
                                 step=1.0,
                                 dst=90.0,
                                 maxit=ite,
                                 check_mirror=True,
                                 FH=options.fl,
                                 FF=options.aa)
            else:
                avet = within_group_refinement(class_data, mask, True, 1,
                                               radius, 1, xrng, yrng, step,
                                               90.0, ite, options.fl,
                                               options.aa)
            ali_params = []
            for im in xrange(len(class_data)):
                alpha, sx, sy, mirror, scale = get_params2D(class_data[im])
                ali_params.extend([alpha, sx, sy, mirror])
            all_ali_params.append(ali_params)
        #aveList[i] = avet
        #print "  G  ",myid,"  ",time()-st
        del ali_params
        # We determine the stability of this group here.
        # stable_set contains all particles deemed stable, it is a list of list
        # each list has two elements, the first is the pixel error, the second is the image number
        # stable_set is sorted based on pixel error
        #from utilities import write_text_file
        #write_text_file(all_ali_params, "all_ali_params%03d.txt"%myid)
        stable_set, mir_stab_rate, average_pix_err = multi_align_stability(
            all_ali_params, 0.0, 10000.0, thld_err, False, 2 * radius + 1)
        #print "  H  ",myid,"  ",time()-st
        if (len(stable_set) > 5):
            stable_set_id = []
            members = []
            pix_err = []
            # First put the stable members into attr 'members' and 'pix_err'
            for s in stable_set:
                # s[1] - number in this subset
                stable_set_id.append(s[1])
                # the original image number
                members.append(proj_list[i][s[1]])
                pix_err.append(s[0])
            # Then put the unstable members into attr 'members' and 'pix_err'
            from fundamentals import rot_shift2D
            avet.to_zero()
            if options.grouping == "GRP":
                aphi = 0.0
                atht = 0.0
                vphi = 0.0
                vtht = 0.0
            l = -1
            for j in xrange(len(proj_list[i])):
                #  Here it will only work if stable_set_id is sorted in the increasing number, see how l progresses
                if j in stable_set_id:
                    l += 1
                    avet += rot_shift2D(class_data[j], stable_set[l][2][0],
                                        stable_set[l][2][1],
                                        stable_set[l][2][2],
                                        stable_set[l][2][3])
                    if options.grouping == "GRP":
                        phi, theta, psi, sxs, sys = get_params_proj(
                            class_data[j])
                        if (theta > 90.0):
                            phi = (phi + 540.0) % 360.0
                            theta = 180.0 - theta
                        aphi += phi
                        atht += theta
                        vphi += phi * phi
                        vtht += theta * theta
                else:
                    members.append(proj_list[i][j])
                    pix_err.append(99999.99)
            aveList[i] = avet.copy()
            if l > 1:
                l += 1
                aveList[i] /= l
                if options.grouping == "GRP":
                    aphi /= l
                    atht /= l
                    vphi = (vphi - l * aphi * aphi) / l
                    vtht = (vtht - l * atht * atht) / l
                    from math import sqrt
                    refprojdir[i] = [
                        aphi, atht,
                        (sqrt(max(vphi, 0.0)) + sqrt(max(vtht, 0.0))) / 2.0
                    ]

            # Here more information has to be stored, PARTICULARLY WHAT IS THE REFERENCE DIRECTION
            aveList[i].set_attr('members', members)
            aveList[i].set_attr('refprojdir', refprojdir[i])
            aveList[i].set_attr('pixerr', pix_err)
        else:
            print " empty group ", i, refprojdir[i]
            aveList[i].set_attr('members', [-1])
            aveList[i].set_attr('refprojdir', refprojdir[i])
            aveList[i].set_attr('pixerr', [99999.])

    del class_data

    if myid == main_node:
        km = 0
        for i in xrange(number_of_proc):
            if i == main_node:
                for im in xrange(len(aveList)):
                    aveList[im].write_image(args[1], km)
                    km += 1
            else:
                nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                              MPI_COMM_WORLD)
                nl = int(nl[0])
                for im in xrange(nl):
                    ave = recv_EMData(i, im + i + 70000)
                    nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                                  MPI_COMM_WORLD)
                    nm = int(nm[0])
                    members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                                       MPI_COMM_WORLD)
                    ave.set_attr('members', map(int, members))
                    members = mpi_recv(nm, MPI_FLOAT, i,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    ave.set_attr('pixerr', map(float, members))
                    members = mpi_recv(3, MPI_FLOAT, i,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    ave.set_attr('refprojdir', map(float, members))
                    ave.write_image(args[1], km)
                    km += 1
    else:
        mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL,
                 MPI_COMM_WORLD)
        for im in xrange(len(aveList)):
            send_EMData(aveList[im], main_node, im + myid + 70000)
            members = aveList[im].get_attr('members')
            mpi_send(len(members), 1, MPI_INT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            mpi_send(members, len(members), MPI_INT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            members = aveList[im].get_attr('pixerr')
            mpi_send(members, len(members), MPI_FLOAT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            try:
                members = aveList[im].get_attr('refprojdir')
                mpi_send(members, 3, MPI_FLOAT, main_node,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            except:
                mpi_send([-999.0, -999.0, -999.0], 3, MPI_FLOAT, main_node,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)

    global_def.BATCH = False
    mpi_barrier(MPI_COMM_WORLD)
    from mpi import mpi_finalize
    mpi_finalize()
Example #9
0
def ali3d_MPI(stack,
              ref_vol,
              outdir,
              maskfile=None,
              ir=1,
              ou=-1,
              rs=1,
              xr="4 2 2 1",
              yr="-1",
              ts="1 1 0.5 0.25",
              delta="10 6 4 4",
              an="-1",
              center=0,
              maxit=5,
              term=95,
              CTF=False,
              fourvar=False,
              snr=1.0,
              ref_a="S",
              sym="c1",
              sort=True,
              cutoff=999.99,
              pix_cutoff="0",
              two_tail=False,
              model_jump="1 1 1 1 1",
              restart=False,
              save_half=False,
              protos=None,
              oplane=None,
              lmask=-1,
              ilmask=-1,
              findseam=False,
              vertstep=None,
              hpars="-1",
              hsearch="0.0 50.0",
              full_output=False,
              compare_repro=False,
              compare_ref_free="-1",
              ref_free_cutoff="-1 -1 -1 -1",
              wcmask=None,
              debug=False,
              recon_pad=4,
              olmask=75):

    from alignment import Numrinit, prepare_refrings
    from utilities import model_circle, get_image, drop_image, get_input_from_string
    from utilities import bcast_list_to_all, bcast_number_to_all, reduce_EMData_to_root, bcast_EMData_to_all
    from utilities import send_attr_dict
    from utilities import get_params_proj, file_type
    from fundamentals import rot_avg_image
    import os
    import types
    from utilities import print_begin_msg, print_end_msg, print_msg
    from mpi import mpi_bcast, mpi_comm_size, mpi_comm_rank, MPI_FLOAT, MPI_COMM_WORLD, mpi_barrier, mpi_reduce
    from mpi import mpi_reduce, MPI_INT, MPI_SUM, mpi_finalize
    from filter import filt_ctf
    from projection import prep_vol, prgs
    from statistics import hist_list, varf3d_MPI, fsc_mask
    from numpy import array, bincount, array2string, ones

    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    main_node = 0
    if myid == main_node:
        if os.path.exists(outdir):
            ERROR(
                'Output directory exists, please change the name and restart the program',
                "ali3d_MPI", 1)
        os.mkdir(outdir)
    mpi_barrier(MPI_COMM_WORLD)

    if debug:
        from time import sleep
        while not os.path.exists(outdir):
            print "Node ", myid, "  waiting..."
            sleep(5)

        info_file = os.path.join(outdir, "progress%04d" % myid)
        finfo = open(info_file, 'w')
    else:
        finfo = None
    mjump = get_input_from_string(model_jump)
    xrng = get_input_from_string(xr)
    if yr == "-1": yrng = xrng
    else: yrng = get_input_from_string(yr)
    step = get_input_from_string(ts)
    delta = get_input_from_string(delta)
    ref_free_cutoff = get_input_from_string(ref_free_cutoff)
    pix_cutoff = get_input_from_string(pix_cutoff)

    lstp = min(len(xrng), len(yrng), len(step), len(delta))
    if an == "-1":
        an = [-1] * lstp
    else:
        an = get_input_from_string(an)
    # make sure pix_cutoff is set for all iterations
    if len(pix_cutoff) < lstp:
        for i in xrange(len(pix_cutoff), lstp):
            pix_cutoff.append(pix_cutoff[-1])
    # don't waste time on sub-pixel alignment for low-resolution ang incr
    for i in range(len(step)):
        if (delta[i] > 4 or delta[i] == -1) and step[i] < 1:
            step[i] = 1

    first_ring = int(ir)
    rstep = int(rs)
    last_ring = int(ou)
    max_iter = int(maxit)
    center = int(center)

    nrefs = EMUtil.get_image_count(ref_vol)
    nmasks = 0
    if maskfile:
        # read number of masks within each maskfile (mc)
        nmasks = EMUtil.get_image_count(maskfile)
        # open masks within maskfile (mc)
        maskF = EMData.read_images(maskfile, xrange(nmasks))
    vol = EMData.read_images(ref_vol, xrange(nrefs))
    nx = vol[0].get_xsize()

    ## make sure box sizes are the same
    if myid == main_node:
        im = EMData.read_images(stack, [0])
        bx = im[0].get_xsize()
        if bx != nx:
            print_msg(
                "Error: Stack box size (%i) differs from initial model (%i)\n"
                % (bx, nx))
            sys.exit()
        del im, bx

    # for helical processing:
    helicalrecon = False
    if protos is not None or hpars != "-1" or findseam is True:
        helicalrecon = True
        # if no out-of-plane param set, use 5 degrees
        if oplane is None:
            oplane = 5.0
    if protos is not None:
        proto = get_input_from_string(protos)
        if len(proto) != nrefs:
            print_msg("Error: insufficient protofilament numbers supplied")
            sys.exit()
    if hpars != "-1":
        hpars = get_input_from_string(hpars)
        if len(hpars) != 2 * nrefs:
            print_msg("Error: insufficient helical parameters supplied")
            sys.exit()
    ## create helical parameter file for helical reconstruction
    if helicalrecon is True and myid == main_node:
        from hfunctions import createHpar
        # create initial helical parameter files
        dp = [0] * nrefs
        dphi = [0] * nrefs
        vdp = [0] * nrefs
        vdphi = [0] * nrefs
        for iref in xrange(nrefs):
            hpar = os.path.join(outdir, "hpar%02d.spi" % (iref))
            params = False
            if hpars != "-1":
                # if helical parameters explicitly given, set twist & rise
                params = [float(hpars[iref * 2]), float(hpars[(iref * 2) + 1])]
            dp[iref], dphi[iref], vdp[iref], vdphi[iref] = createHpar(
                hpar, proto[iref], params, vertstep)

    # get values for helical search parameters
    hsearch = get_input_from_string(hsearch)
    if len(hsearch) != 2:
        print_msg("Error: specify outer and inner radii for helical search")
        sys.exit()

    if last_ring < 0 or last_ring > int(nx / 2) - 2:
        last_ring = int(nx / 2) - 2

    if myid == main_node:
        #	import user_functions
        #	user_func = user_functions.factory[user_func_name]

        print_begin_msg("ali3d_MPI")
        print_msg("Input stack		 : %s\n" % (stack))
        print_msg("Reference volume	    : %s\n" % (ref_vol))
        print_msg("Output directory	    : %s\n" % (outdir))
        if nmasks > 0:
            print_msg("Maskfile (number of masks)  : %s (%i)\n" %
                      (maskfile, nmasks))
        print_msg("Inner radius		: %i\n" % (first_ring))
        print_msg("Outer radius		: %i\n" % (last_ring))
        print_msg("Ring step		   : %i\n" % (rstep))
        print_msg("X search range	      : %s\n" % (xrng))
        print_msg("Y search range	      : %s\n" % (yrng))
        print_msg("Translational step	  : %s\n" % (step))
        print_msg("Angular step		: %s\n" % (delta))
        print_msg("Angular search range	: %s\n" % (an))
        print_msg("Maximum iteration	   : %i\n" % (max_iter))
        print_msg("Center type		 : %i\n" % (center))
        print_msg("CTF correction	      : %s\n" % (CTF))
        print_msg("Signal-to-Noise Ratio       : %f\n" % (snr))
        print_msg("Reference projection method : %s\n" % (ref_a))
        print_msg("Symmetry group	      : %s\n" % (sym))
        print_msg("Fourier padding for 3D      : %i\n" % (recon_pad))
        print_msg("Number of reference models  : %i\n" % (nrefs))
        print_msg("Sort images between models  : %s\n" % (sort))
        print_msg("Allow images to jump	: %s\n" % (mjump))
        print_msg("CC cutoff standard dev      : %f\n" % (cutoff))
        print_msg("Two tail cutoff	     : %s\n" % (two_tail))
        print_msg("Termination pix error       : %f\n" % (term))
        print_msg("Pixel error cutoff	  : %s\n" % (pix_cutoff))
        print_msg("Restart		     : %s\n" % (restart))
        print_msg("Full output		 : %s\n" % (full_output))
        print_msg("Compare reprojections       : %s\n" % (compare_repro))
        print_msg("Compare ref free class avgs : %s\n" % (compare_ref_free))
        print_msg("Use cutoff from ref free    : %s\n" % (ref_free_cutoff))
        if protos:
            print_msg("Protofilament numbers	: %s\n" % (proto))
            print_msg("Using helical search range   : %s\n" % hsearch)
        if findseam is True:
            print_msg("Using seam-based reconstruction\n")
        if hpars != "-1":
            print_msg("Using hpars		  : %s\n" % hpars)
        if vertstep != None:
            print_msg("Using vertical step    : %.2f\n" % vertstep)
        if save_half is True:
            print_msg("Saving even/odd halves\n")
        for i in xrange(100):
            print_msg("*")
        print_msg("\n\n")
    if maskfile:
        if type(maskfile) is types.StringType: mask3D = get_image(maskfile)
        else: mask3D = maskfile
    else: mask3D = model_circle(last_ring, nx, nx, nx)

    numr = Numrinit(first_ring, last_ring, rstep, "F")
    mask2D = model_circle(last_ring, nx, nx) - model_circle(first_ring, nx, nx)

    fscmask = model_circle(last_ring, nx, nx, nx)
    if CTF:
        from filter import filt_ctf
    from reconstruction_rjh import rec3D_MPI_noCTF

    if myid == main_node:
        active = EMUtil.get_all_attributes(stack, 'active')
        list_of_particles = []
        for im in xrange(len(active)):
            if active[im]: list_of_particles.append(im)
        del active
        nima = len(list_of_particles)
    else:
        nima = 0
    total_nima = bcast_number_to_all(nima, source_node=main_node)

    if myid != main_node:
        list_of_particles = [-1] * total_nima
    list_of_particles = bcast_list_to_all(list_of_particles,
                                          source_node=main_node)

    image_start, image_end = MPI_start_end(total_nima, number_of_proc, myid)

    # create a list of images for each node
    list_of_particles = list_of_particles[image_start:image_end]
    nima = len(list_of_particles)
    if debug:
        finfo.write("image_start, image_end: %d %d\n" %
                    (image_start, image_end))
        finfo.flush()

    data = EMData.read_images(stack, list_of_particles)

    t_zero = Transform({
        "type": "spider",
        "phi": 0,
        "theta": 0,
        "psi": 0,
        "tx": 0,
        "ty": 0
    })
    transmulti = [[t_zero for i in xrange(nrefs)] for j in xrange(nima)]

    for iref, im in ((iref, im) for iref in xrange(nrefs)
                     for im in xrange(nima)):
        if nrefs == 1:
            transmulti[im][iref] = data[im].get_attr("xform.projection")
        else:
            # if multi models, keep track of eulers for all models
            try:
                transmulti[im][iref] = data[im].get_attr("eulers_txty.%i" %
                                                         iref)
            except:
                data[im].set_attr("eulers_txty.%i" % iref, t_zero)

    scoremulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)]
    pixelmulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)]
    ref_res = [0.0 for x in xrange(nrefs)]
    apix = data[0].get_attr('apix_x')

    # for oplane parameter, create cylindrical mask
    if oplane is not None and myid == main_node:
        from hfunctions import createCylMask
        cmaskf = os.path.join(outdir, "mask3D_cyl.mrc")
        mask3D = createCylMask(data, olmask, lmask, ilmask, cmaskf)
        # if finding seam of helix, create wedge masks
        if findseam is True:
            wedgemask = []
            for pf in xrange(nrefs):
                wedgemask.append(EMData())
            # wedgemask option
            if wcmask is not None:
                wcmask = get_input_from_string(wcmask)
                if len(wcmask) != 3:
                    print_msg(
                        "Error: wcmask option requires 3 values: x y radius")
                    sys.exit()

    # determine if particles have helix info:
    try:
        data[0].get_attr('h_angle')
        original_data = []
        boxmask = True
        from hfunctions import createBoxMask
    except:
        boxmask = False

    # prepare particles
    for im in xrange(nima):
        data[im].set_attr('ID', list_of_particles[im])
        data[im].set_attr('pix_score', int(0))
        if CTF:
            # only phaseflip particles, not full CTF correction
            ctf_params = data[im].get_attr("ctf")
            st = Util.infomask(data[im], mask2D, False)
            data[im] -= st[0]
            data[im] = filt_ctf(data[im], ctf_params, sign=-1, binary=1)
            data[im].set_attr('ctf_applied', 1)
        # for window mask:
        if boxmask is True:
            h_angle = data[im].get_attr("h_angle")
            original_data.append(data[im].copy())
            bmask = createBoxMask(nx, apix, ou, lmask, h_angle)
            data[im] *= bmask
            del bmask
    if debug:
        finfo.write('%d loaded  \n' % nima)
        finfo.flush()
    if myid == main_node:
        # initialize data for the reference preparation function
        ref_data = [mask3D, max(center, 0), None, None, None, None]
        # for method -1, switch off centering in user function

    from time import time

    #  this is needed for gathering of pixel errors
    disps = []
    recvcount = []
    disps_score = []
    recvcount_score = []
    for im in xrange(number_of_proc):
        if (im == main_node):
            disps.append(0)
            disps_score.append(0)
        else:
            disps.append(disps[im - 1] + recvcount[im - 1])
            disps_score.append(disps_score[im - 1] + recvcount_score[im - 1])
        ib, ie = MPI_start_end(total_nima, number_of_proc, im)
        recvcount.append(ie - ib)
        recvcount_score.append((ie - ib) * nrefs)

    pixer = [0.0] * nima
    cs = [0.0] * 3
    total_iter = 0
    volodd = EMData.read_images(ref_vol, xrange(nrefs))
    voleve = EMData.read_images(ref_vol, xrange(nrefs))

    if restart:
        # recreate initial volumes from alignments stored in header
        itout = "000_00"
        for iref in xrange(nrefs):
            if (nrefs == 1):
                modout = ""
            else:
                modout = "_model_%02d" % (iref)

            if (sort):
                group = iref
                for im in xrange(nima):
                    imgroup = data[im].get_attr('group')
                    if imgroup == iref:
                        data[im].set_attr('xform.projection',
                                          transmulti[im][iref])
            else:
                group = int(999)
                for im in xrange(nima):
                    data[im].set_attr('xform.projection', transmulti[im][iref])

            fscfile = os.path.join(outdir, "fsc_%s%s" % (itout, modout))

            vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(
                data,
                sym,
                fscmask,
                fscfile,
                myid,
                main_node,
                index=group,
                npad=recon_pad)

            if myid == main_node:
                if helicalrecon:
                    from hfunctions import processHelicalVol
                    vstep = None
                    if vertstep is not None:
                        vstep = (vdp[iref], vdphi[iref])
                    print_msg(
                        "Old rise and twist for model %i     : %8.3f, %8.3f\n"
                        % (iref, dp[iref], dphi[iref]))
                    hvals = processHelicalVol(vol[iref], voleve[iref],
                                              volodd[iref], iref, outdir,
                                              itout, dp[iref], dphi[iref],
                                              apix, hsearch, findseam, vstep,
                                              wcmask)
                    (vol[iref], voleve[iref], volodd[iref], dp[iref],
                     dphi[iref], vdp[iref], vdphi[iref]) = hvals
                    print_msg(
                        "New rise and twist for model %i     : %8.3f, %8.3f\n"
                        % (iref, dp[iref], dphi[iref]))
                    # get new FSC from symmetrized half volumes
                    fscc = fsc_mask(volodd[iref], voleve[iref], mask3D, rstep,
                                    fscfile)
                else:
                    vol[iref].write_image(
                        os.path.join(outdir, "vol_%s.hdf" % itout), -1)

                if save_half is True:
                    volodd[iref].write_image(
                        os.path.join(outdir, "volodd_%s.hdf" % itout), -1)
                    voleve[iref].write_image(
                        os.path.join(outdir, "voleve_%s.hdf" % itout), -1)

                if nmasks > 1:
                    # Read mask for multiplying
                    ref_data[0] = maskF[iref]
                ref_data[2] = vol[iref]
                ref_data[3] = fscc
                #  call user-supplied function to prepare reference image, i.e., center and filter it
                vol[iref], cs, fl = ref_ali3d(ref_data)
                vol[iref].write_image(
                    os.path.join(outdir, "volf_%s.hdf" % (itout)), -1)
                if (apix == 1):
                    res_msg = "Models filtered at spatial frequency of:\t"
                    res = fl
                else:
                    res_msg = "Models filtered at resolution of:       \t"
                    res = apix / fl
                ares = array2string(array(res), precision=2)
                print_msg("%s%s\n\n" % (res_msg, ares))

            bcast_EMData_to_all(vol[iref], myid, main_node)
            # write out headers, under MPI writing has to be done sequentially
            mpi_barrier(MPI_COMM_WORLD)

    # projection matching
    for N_step in xrange(lstp):
        terminate = 0
        Iter = -1
        while (Iter < max_iter - 1 and terminate == 0):
            Iter += 1
            total_iter += 1
            itout = "%03g_%02d" % (delta[N_step], Iter)
            if myid == main_node:
                print_msg(
                    "ITERATION #%3d, inner iteration #%3d\nDelta = %4.1f, an = %5.2f, xrange = %5.2f, yrange = %5.2f, step = %5.2f\n\n"
                    % (N_step, Iter, delta[N_step], an[N_step], xrng[N_step],
                       yrng[N_step], step[N_step]))

            for iref in xrange(nrefs):
                if myid == main_node: start_time = time()
                volft, kb = prep_vol(vol[iref])

                ## constrain projections to out of plane parameter
                theta1 = None
                theta2 = None
                if oplane is not None:
                    theta1 = 90 - oplane
                    theta2 = 90 + oplane
                refrings = prepare_refrings(volft,
                                            kb,
                                            nx,
                                            delta[N_step],
                                            ref_a,
                                            sym,
                                            numr,
                                            MPI=True,
                                            phiEqpsi="Minus",
                                            initial_theta=theta1,
                                            delta_theta=theta2)

                del volft, kb

                if myid == main_node:
                    print_msg(
                        "Time to prepare projections for model %i: %s\n" %
                        (iref, legibleTime(time() - start_time)))
                    start_time = time()

                for im in xrange(nima):
                    data[im].set_attr("xform.projection", transmulti[im][iref])
                    if an[N_step] == -1:
                        t1, peak, pixer[im] = proj_ali_incore(
                            data[im], refrings, numr, xrng[N_step],
                            yrng[N_step], step[N_step], finfo)
                    else:
                        t1, peak, pixer[im] = proj_ali_incore_local(
                            data[im], refrings, numr, xrng[N_step],
                            yrng[N_step], step[N_step], an[N_step], finfo)
                    #data[im].set_attr("xform.projection"%iref, t1)
                    if nrefs > 1:
                        data[im].set_attr("eulers_txty.%i" % iref, t1)
                    scoremulti[im][iref] = peak
                    from pixel_error import max_3D_pixel_error
                    # t1 is the current param, t2 is old
                    t2 = transmulti[im][iref]
                    pixelmulti[im][iref] = max_3D_pixel_error(t1, t2, numr[-3])
                    transmulti[im][iref] = t1

                if myid == main_node:
                    print_msg("Time of alignment for model %i: %s\n" %
                              (iref, legibleTime(time() - start_time)))
                    start_time = time()

            # gather scoring data from all processors
            from mpi import mpi_gatherv
            scoremultisend = sum(scoremulti, [])
            pixelmultisend = sum(pixelmulti, [])
            tmp = mpi_gatherv(scoremultisend, len(scoremultisend), MPI_FLOAT,
                              recvcount_score, disps_score, MPI_FLOAT,
                              main_node, MPI_COMM_WORLD)
            tmp1 = mpi_gatherv(pixelmultisend, len(pixelmultisend), MPI_FLOAT,
                               recvcount_score, disps_score, MPI_FLOAT,
                               main_node, MPI_COMM_WORLD)
            tmp = mpi_bcast(tmp, (total_nima * nrefs), MPI_FLOAT, 0,
                            MPI_COMM_WORLD)
            tmp1 = mpi_bcast(tmp1, (total_nima * nrefs), MPI_FLOAT, 0,
                             MPI_COMM_WORLD)
            tmp = map(float, tmp)
            tmp1 = map(float, tmp1)
            score = array(tmp).reshape(-1, nrefs)
            pixelerror = array(tmp1).reshape(-1, nrefs)
            score_local = array(scoremulti)
            mean_score = score.mean(axis=0)
            std_score = score.std(axis=0)
            cut = mean_score - (cutoff * std_score)
            cut2 = mean_score + (cutoff * std_score)
            res_max = score_local.argmax(axis=1)
            minus_cc = [0.0 for x in xrange(nrefs)]
            minus_pix = [0.0 for x in xrange(nrefs)]
            minus_ref = [0.0 for x in xrange(nrefs)]

            #output pixel errors
            if (myid == main_node):
                from statistics import hist_list
                lhist = 20
                pixmin = pixelerror.min(axis=1)
                region, histo = hist_list(pixmin, lhist)
                if (region[0] < 0.0): region[0] = 0.0
                print_msg(
                    "Histogram of pixel errors\n      ERROR       number of particles\n"
                )
                for lhx in xrange(lhist):
                    print_msg(" %10.3f     %7d\n" % (region[lhx], histo[lhx]))
                # Terminate if 95% within 1 pixel error
                im = 0
                for lhx in xrange(lhist):
                    if (region[lhx] > 1.0): break
                    im += histo[lhx]
                print_msg("Percent of particles with pixel error < 1: %f\n\n" %
                          (im / float(total_nima) * 100))
                term_cond = float(term) / 100
                if (im / float(total_nima) > term_cond):
                    terminate = 1
                    print_msg("Terminating internal loop\n")
                del region, histo
            terminate = mpi_bcast(terminate, 1, MPI_INT, 0, MPI_COMM_WORLD)
            terminate = int(terminate[0])

            for im in xrange(nima):
                if (sort == False):
                    data[im].set_attr('group', 999)
                elif (mjump[N_step] == 1):
                    data[im].set_attr('group', int(res_max[im]))

                pix_run = data[im].get_attr('pix_score')
                if (pix_cutoff[N_step] == 1
                        and (terminate == 1 or Iter == max_iter - 1)):
                    if (pixelmulti[im][int(res_max[im])] > 1):
                        data[im].set_attr('pix_score', int(777))

                if (score_local[im][int(res_max[im])] < cut[int(
                        res_max[im])]) or (two_tail and score_local[im][int(
                            res_max[im])] > cut2[int(res_max[im])]):
                    data[im].set_attr('group', int(888))
                    minus_cc[int(res_max[im])] = minus_cc[int(res_max[im])] + 1

                if (pix_run == 777):
                    data[im].set_attr('group', int(777))
                    minus_pix[int(
                        res_max[im])] = minus_pix[int(res_max[im])] + 1

                if (compare_ref_free != "-1") and (ref_free_cutoff[N_step] !=
                                                   -1) and (total_iter > 1):
                    id = data[im].get_attr('ID')
                    if id in rejects:
                        data[im].set_attr('group', int(666))
                        minus_ref[int(
                            res_max[im])] = minus_ref[int(res_max[im])] + 1

            minus_cc_tot = mpi_reduce(minus_cc, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                      MPI_COMM_WORLD)
            minus_pix_tot = mpi_reduce(minus_pix, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                       MPI_COMM_WORLD)
            minus_ref_tot = mpi_reduce(minus_ref, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                       MPI_COMM_WORLD)
            if (myid == main_node):
                if (sort):
                    tot_max = score.argmax(axis=1)
                    res = bincount(tot_max)
                else:
                    res = ones(nrefs) * total_nima
                print_msg("Particle distribution:	     \t\t%s\n" % (res * 1.0))
                afcut1 = res - minus_cc_tot
                afcut2 = afcut1 - minus_pix_tot
                afcut3 = afcut2 - minus_ref_tot
                print_msg("Particle distribution after cc cutoff:\t\t%s\n" %
                          (afcut1))
                print_msg("Particle distribution after pix cutoff:\t\t%s\n" %
                          (afcut2))
                print_msg("Particle distribution after ref cutoff:\t\t%s\n\n" %
                          (afcut3))

            res = [0.0 for i in xrange(nrefs)]
            for iref in xrange(nrefs):
                if (center == -1):
                    from utilities import estimate_3D_center_MPI, rotate_3D_shift
                    dummy = EMData()
                    cs[0], cs[1], cs[2], dummy, dummy = estimate_3D_center_MPI(
                        data, total_nima, myid, number_of_proc, main_node)
                    cs = mpi_bcast(cs, 3, MPI_FLOAT, main_node, MPI_COMM_WORLD)
                    cs = [-float(cs[0]), -float(cs[1]), -float(cs[2])]
                    rotate_3D_shift(data, cs)

                if (sort):
                    group = iref
                    for im in xrange(nima):
                        imgroup = data[im].get_attr('group')
                        if imgroup == iref:
                            data[im].set_attr('xform.projection',
                                              transmulti[im][iref])
                else:
                    group = int(999)
                    for im in xrange(nima):
                        data[im].set_attr('xform.projection',
                                          transmulti[im][iref])
                if (nrefs == 1):
                    modout = ""
                else:
                    modout = "_model_%02d" % (iref)

                fscfile = os.path.join(outdir, "fsc_%s%s" % (itout, modout))
                vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(
                    data,
                    sym,
                    fscmask,
                    fscfile,
                    myid,
                    main_node,
                    index=group,
                    npad=recon_pad)

                if myid == main_node:
                    print_msg("3D reconstruction time for model %i: %s\n" %
                              (iref, legibleTime(time() - start_time)))
                    start_time = time()

                # Compute Fourier variance
                if fourvar:
                    outvar = os.path.join(outdir, "volVar_%s.hdf" % (itout))
                    ssnr_file = os.path.join(outdir, "ssnr_%s" % (itout))
                    varf = varf3d_MPI(data,
                                      ssnr_text_file=ssnr_file,
                                      mask2D=None,
                                      reference_structure=vol[iref],
                                      ou=last_ring,
                                      rw=1.0,
                                      npad=1,
                                      CTF=None,
                                      sign=1,
                                      sym=sym,
                                      myid=myid)
                    if myid == main_node:
                        print_msg(
                            "Time to calculate 3D Fourier variance for model %i: %s\n"
                            % (iref, legibleTime(time() - start_time)))
                        start_time = time()
                        varf = 1.0 / varf
                        varf.write_image(outvar, -1)
                else:
                    varf = None

                if myid == main_node:
                    if helicalrecon:
                        from hfunctions import processHelicalVol

                        vstep = None
                        if vertstep is not None:
                            vstep = (vdp[iref], vdphi[iref])
                        print_msg(
                            "Old rise and twist for model %i     : %8.3f, %8.3f\n"
                            % (iref, dp[iref], dphi[iref]))
                        hvals = processHelicalVol(vol[iref], voleve[iref],
                                                  volodd[iref], iref, outdir,
                                                  itout, dp[iref], dphi[iref],
                                                  apix, hsearch, findseam,
                                                  vstep, wcmask)
                        (vol[iref], voleve[iref], volodd[iref], dp[iref],
                         dphi[iref], vdp[iref], vdphi[iref]) = hvals
                        print_msg(
                            "New rise and twist for model %i     : %8.3f, %8.3f\n"
                            % (iref, dp[iref], dphi[iref]))
                        # get new FSC from symmetrized half volumes
                        fscc = fsc_mask(volodd[iref], voleve[iref], mask3D,
                                        rstep, fscfile)

                        print_msg(
                            "Time to search and apply helical symmetry for model %i: %s\n\n"
                            % (iref, legibleTime(time() - start_time)))
                        start_time = time()
                    else:
                        vol[iref].write_image(
                            os.path.join(outdir, "vol_%s.hdf" % (itout)), -1)

                    if save_half is True:
                        volodd[iref].write_image(
                            os.path.join(outdir, "volodd_%s.hdf" % (itout)),
                            -1)
                        voleve[iref].write_image(
                            os.path.join(outdir, "voleve_%s.hdf" % (itout)),
                            -1)

                    if nmasks > 1:
                        # Read mask for multiplying
                        ref_data[0] = maskF[iref]
                    ref_data[2] = vol[iref]
                    ref_data[3] = fscc
                    ref_data[4] = varf
                    #  call user-supplied function to prepare reference image, i.e., center and filter it
                    vol[iref], cs, fl = ref_ali3d(ref_data)
                    vol[iref].write_image(
                        os.path.join(outdir, "volf_%s.hdf" % (itout)), -1)
                    if (apix == 1):
                        res_msg = "Models filtered at spatial frequency of:\t"
                        res[iref] = fl
                    else:
                        res_msg = "Models filtered at resolution of:       \t"
                        res[iref] = apix / fl

                del varf
                bcast_EMData_to_all(vol[iref], myid, main_node)

                if compare_ref_free != "-1": compare_repro = True
                if compare_repro:
                    outfile_repro = comp_rep(refrings, data, itout, modout,
                                             vol[iref], group, nima, nx, myid,
                                             main_node, outdir)
                    mpi_barrier(MPI_COMM_WORLD)
                    if compare_ref_free != "-1":
                        ref_free_output = os.path.join(
                            outdir, "ref_free_%s%s" % (itout, modout))
                        rejects = compare(compare_ref_free, outfile_repro,
                                          ref_free_output, yrng[N_step],
                                          xrng[N_step], rstep, nx, apix,
                                          ref_free_cutoff[N_step],
                                          number_of_proc, myid, main_node)

            # retrieve alignment params from all processors
            par_str = ['xform.projection', 'ID', 'group']
            if nrefs > 1:
                for iref in xrange(nrefs):
                    par_str.append('eulers_txty.%i' % iref)

            if myid == main_node:
                from utilities import recv_attr_dict
                recv_attr_dict(main_node, stack, data, par_str, image_start,
                               image_end, number_of_proc)

            else:
                send_attr_dict(main_node, data, par_str, image_start,
                               image_end)

            if myid == main_node:
                ares = array2string(array(res), precision=2)
                print_msg("%s%s\n\n" % (res_msg, ares))
                dummy = EMData()
                if full_output:
                    nimat = EMUtil.get_image_count(stack)
                    output_file = os.path.join(outdir, "paramout_%s" % itout)
                    foutput = open(output_file, 'w')
                    for im in xrange(nimat):
                        # save the parameters for each of the models
                        outstring = ""
                        dummy.read_image(stack, im, True)
                        param3d = dummy.get_attr('xform.projection')
                        g = dummy.get_attr("group")
                        # retrieve alignments in EMAN-format
                        pE = param3d.get_params('eman')
                        outstring += "%f\t%f\t%f\t%f\t%f\t%i\n" % (
                            pE["az"], pE["alt"], pE["phi"], pE["tx"], pE["ty"],
                            g)
                        foutput.write(outstring)
                    foutput.close()
                del dummy
            mpi_barrier(MPI_COMM_WORLD)


#	mpi_finalize()

    if myid == main_node: print_end_msg("ali3d_MPI")
Example #10
0
def main():
	import sys
	import os
	import math
	import random
	import pyemtbx.options
	import time
	from   random   import random, seed, randint
	from   optparse import OptionParser

	progname = os.path.basename(sys.argv[0])
	usage = progname + """ [options] <inputfile> <outputfile>

	Generic 2-D image processing programs.

	Functionality:

	1.  Phase flip a stack of images and write output to new file:
		sxprocess.py input_stack.hdf output_stack.hdf --phase_flip
	
	2.  Resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size.
	    The window size will change accordingly.
		sxprocess input.hdf output.hdf  --changesize --ratio=0.5

	3.  Compute average power spectrum of a stack of 2D images with optional padding (option wn) with zeroes or a 3-D volume.
		sxprocess.py input_stack.hdf powerspectrum.hdf --pw [--wn=1024]

	4.  Generate a stack of projections bdb:data and micrographs with prefix mic (i.e., mic0.hdf, mic1.hdf etc) from structure input_structure.hdf, with CTF applied to both projections and micrographs:
		sxprocess.py input_structure.hdf data mic --generate_projections format="bdb":apix=5.2:CTF=True:boxsize=64

    5.  Retrieve original image numbers in the selected ISAC group (here group 12 from generation 3):
    	sxprocess.py  bdb:test3 class_averages_generation_3.hdf  list3_12.txt --isacgroup=12 --params=originalid

    6.  Retrieve original image numbers of images listed in ISAC output stack of averages:
    	sxprocess.py  select1.hdf  ohk.txt

    7.  Adjust rotationally averaged power spectrum of an image to that of a reference image or a reference 1D power spectrum stored in an ASCII file.
    	Optionally use a tangent low-pass filter.  Also works for a stack of images, in which case the output is also a stack.
    	sxprocess.py  vol.hdf ref.hdf  avol.hdf < 0.25 0.2> --adjpw
   	 	sxprocess.py  vol.hdf pw.txt   avol.hdf < 0.25 0.2> --adjpw

    8.  Generate a 1D rotationally averaged power spectrum of an image.
		sxprocess.py  vol.hdf --rotwp=rotpw.txt
    	# Output will contain three columns:
       (1) rotationally averaged power spectrum
       (2) logarithm of the rotationally averaged power spectrum
       (3) integer line number (from zero to approximately to half the image size)

    9.  Apply 3D transformation (rotation and/or shift) to a set of orientation parameters associated with projection data.
    	sxprocess.py  --transfromparams=phi,theta,psi,tx,ty,tz      input.txt  output.txt
    	The output file is then imported and 3D transformed volume computed:
    	sxheader.py  bdb:p  --params=xform.projection  --import=output.txt
    	mpirun -np 2 sxrecons3d_n.py  bdb:p tvol.hdf --MPI
    	The reconstructed volume is in the position of the volume computed using the input.txt parameters and then
    	transformed with rot_shift3D(vol, phi,theta,psi,tx,ty,tz)

   10.  Import ctf parameters from the output of sxcter into windowed particle headers.
	    There are three possible input files formats:  (1) all particles are in one stack, (2 aor 3) particles are in stacks, each stack corresponds to a single micrograph.
	    In each case the particles should contain a name of the micrograph of origin stores using attribute name 'ptcl_source_image'.
        Normally this is done by e2boxer.py during windowing.
	    Particles whose defocus or astigmatism error exceed set thresholds will be skipped, otherwise, virtual stacks with the original way preceded by G will be created.
		sxprocess.py  --input=bdb:data  --importctf=outdir/partres  --defocuserror=10.0  --astigmatismerror=5.0
		#  Output will be a vritual stack bdb:Gdata
		sxprocess.py  --input="bdb:directory/stacks*"  --importctf=outdir/partres  --defocuserror=10.0  --astigmatismerror=5.0
		To concatenate output files:
		cd directory
		e2bdb.py . --makevstack=bdb:allparticles  --filt=G
		IMPORTANT:  Please do not move (or remove!) any input/intermediate EMAN2DB files as the information is linked between them.

   11. Scale 3D shifts.  The shifts in the input five columns text file with 3D orientation parameters will be DIVIDED by the scale factor
		sxprocess.py  orientationparams.txt  scaledparams.txt  scale=0.5
   
   12. Generate 3D mask from a given 3-D volume automatically or using threshold provided by user.
   
   13. Postprocess 3-D or 2-D images: 
   			for 3-D volumes: calculate FSC with provided mask; weight summed volume with FSC; estimate B-factor from FSC weighted summed two volumes; apply negative B-factor to the weighted volume. 
   			for 2-D images:  calculate B-factor and apply negative B-factor to 2-D images.
   14. Winow stack file -reduce size of images without changing the pixel size. 


"""

	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--order", 				action="store_true", help="Two arguments are required: name of input stack and desired name of output stack. The output stack is the input stack sorted by similarity in terms of cross-correlation coefficent.", default=False)
	parser.add_option("--order_lookup", 		action="store_true", help="Test/Debug.", default=False)
	parser.add_option("--order_metropolis", 	action="store_true", help="Test/Debug.", default=False)
	parser.add_option("--order_pca", 			action="store_true", help="Test/Debug.", default=False)
	parser.add_option("--initial",				type="int", 		default=-1, help="Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)")
	parser.add_option("--circular", 			action="store_true", help="Select circular ordering (fisr image has to be similar to the last", default=False)
	parser.add_option("--radius", 				type="int", 		default=-1, help="Radius of a circular mask for similarity based ordering")
	parser.add_option("--changesize", 			action="store_true", help="resample (decimate or interpolate up) images (2D or 3D) in a stack to change the pixel size.", default=False)
	parser.add_option("--ratio", 				type="float", 		default=1.0, help="The ratio of new to old image size (if <1 the pixel size will increase and image size decrease, if>1, the other way round")
	parser.add_option("--pw", 					action="store_true", help="compute average power spectrum of a stack of 2-D images with optional padding (option wn) with zeroes", default=False)
	parser.add_option("--wn", 					type="int", 		default=-1, help="Size of window to use (should be larger/equal than particle box size, default padding to max(nx,ny))")
	parser.add_option("--phase_flip", 			action="store_true", help="Phase flip the input stack", default=False)
	parser.add_option("--makedb", 				metavar="param1=value1:param2=value2", type="string",
					action="append",  help="One argument is required: name of key with which the database will be created. Fill in database with parameters specified as follows: --makedb param1=value1:param2=value2, e.g. 'gauss_width'=1.0:'pixel_input'=5.2:'pixel_output'=5.2:'thr_low'=1.0")
	parser.add_option("--generate_projections", metavar="param1=value1:param2=value2", type="string",
					action="append", help="Three arguments are required: name of input structure from which to generate projections, desired name of output projection stack, and desired prefix for micrographs (e.g. if prefix is 'mic', then micrographs mic0.hdf, mic1.hdf etc will be generated). Optional arguments specifying format, apix, box size and whether to add CTF effects can be entered as follows after --generate_projections: format='bdb':apix=5.2:CTF=True:boxsize=100, or format='hdf', etc., where format is bdb or hdf, apix (pixel size) is a float, CTF is True or False, and boxsize denotes the dimension of the box (assumed to be a square). If an optional parameter is not specified, it will default as follows: format='bdb', apix=2.5, CTF=False, boxsize=64.")
	parser.add_option("--isacgroup", 			type="int", 		help="Retrieve original image numbers in the selected ISAC group. See ISAC documentation for details.", default=-1)
	parser.add_option("--isacselect", 			action="store_true", 		help="Retrieve original image numbers of images listed in ISAC output stack of averages. See ISAC documentation for details.", default=False)
	parser.add_option("--params",	   			type="string",      default=None,    help="Name of header of parameter, which one depends on specific option")
	parser.add_option("--adjpw", 				action="store_true",	help="Adjust rotationally averaged power spectrum of an image", default=False)
	parser.add_option("--rotpw", 				type="string",   	default=None,    help="Name of the text file to contain rotationally averaged power spectrum of the input image.")
	parser.add_option("--transformparams",		type="string",   	default=None,    help="Transform 3D projection orientation parameters using six 3D parameters (phi, theta,psi,sx,sy,sz).  Input: --transformparams=45.,66.,12.,-2,3,-5.5 desired six transformation of the reconstructed structure. Output: file with modified orientation parameters.")

	
	# import ctf estimates done using cter
	parser.add_option("--input",              	type="string",		default= None,     		  help="Input particles.")
	parser.add_option("--importctf",          	type="string",		default= None,     		  help="Name of the file containing CTF parameters produced by sxcter.")
	parser.add_option("--defocuserror",       	type="float",  		default=1000000.0,        help="Exclude micrographs whose relative defocus error as estimated by sxcter is larger than defocuserror percent.  The error is computed as (std dev defocus)/defocus*100%")
	parser.add_option("--astigmatismerror",   	type="float",  		default=360.0,            help="Set to zero astigmatism for micrographs whose astigmatism angular error as estimated by sxcter is larger than astigmatismerror degrees.")

	# import ctf estimates done using cter
	parser.add_option("--scale",              	type="float", 		default=-1.0,      		  help="Divide shifts in the input 3D orientation parameters text file by the scale factor.")
	
	# generate adaptive mask from an given 3-D volume
	parser.add_option("--adaptive_mask",        action="store_true",                      help="create adavptive 3-D mask from a given volume", default=False)
	parser.add_option("--nsigma",              	type="float",	default= 1.,     	      help="number of times of sigma of the input volume to obtain the the large density cluster")
	parser.add_option("--ndilation",            type="int",		default= 3,     		  help="number of times of dilation applied to the largest cluster of density")
	parser.add_option("--kernel_size",          type="int",		default= 11,     		  help="convolution kernel for smoothing the edge of the mask")
	parser.add_option("--gauss_standard_dev",   type="int",		default= 9,     		  help="stanadard deviation value to generate Gaussian edge")
	parser.add_option("--threshold",            type="float",	default= 9999.,           help="threshold provided by user to binarize input volume")
	parser.add_option("--ne",                   type="int",		default= 0,     		  help="number of times to erode the binarized  input image")
	parser.add_option("--nd",                   type="int",		default= 0,     		  help="number of times to dilate the binarized input image")
	parser.add_option("--postprocess",          action="store_true",                      help="postprocess unfiltered odd, even 3-D volumes",default=False)
	parser.add_option("--fsc_weighted",         action="store_true",                      help="postprocess unfiltered odd, even 3-D volumes")
	parser.add_option("--low_pass_filter",      action="store_true",      default=False,  help="postprocess unfiltered odd, even 3-D volumes")
	parser.add_option("--ff",                   type="float", default=.25,                help="low pass filter stop band frequency in absolute unit")
	parser.add_option("--aa",                   type="float", default=.1,                 help="low pass filter falloff" )
	parser.add_option("--mask",           type="string",                                  help="input mask file",  default=None)
	parser.add_option("--output",         type="string",                                  help="output file name", default="postprocessed.hdf")
	parser.add_option("--pixel_size",     type="float",                                   help="pixel size of the data", default=1.0)
	parser.add_option("--B_start",     type="float",                                      help="starting frequency in Angstrom for B-factor estimation", default=10.)
	parser.add_option("--FSC_cutoff",     type="float",                                   help="stop frequency in Angstrom for B-factor estimation", default=0.143)
	parser.add_option("--2d",          action="store_true",                      help="postprocess isac 2-D averaged images",default=False)
	parser.add_option("--window_stack",                     action="store_true",          help="window stack images using a smaller window size", default=False)
	parser.add_option("--box",           type="int",		default= 0,                   help="the new window size ") 
 	(options, args) = parser.parse_args()

	global_def.BATCH = True
		
	if options.phase_flip:
		nargs = len(args)
		if nargs != 2:
			print "must provide name of input and output file!"
			return
		from EMAN2 import Processor
		instack = args[0]
		outstack = args[1]
		nima = EMUtil.get_image_count(instack)
		from filter import filt_ctf
		for i in xrange(nima):
			img = EMData()
			img.read_image(instack, i)
			try:
				ctf = img.get_attr('ctf')
			except:
				print "no ctf information in input stack! Exiting..."
				return
			
			dopad = True
			sign = 1
			binary = 1  # phase flip
				
			assert img.get_ysize() > 1	
			dict = ctf.to_dict()
			dz = dict["defocus"]
			cs = dict["cs"]
			voltage = dict["voltage"]
			pixel_size = dict["apix"]
			b_factor = dict["bfactor"]
			ampcont = dict["ampcont"]
			dza = dict["dfdiff"]
			azz = dict["dfang"]
			
			if dopad and not img.is_complex(): ip = 1
			else:                             ip = 0
	
	
			params = {"filter_type": Processor.fourier_filter_types.CTF_,
	 			"defocus" : dz,
				"Cs": cs,
				"voltage": voltage,
				"Pixel_size": pixel_size,
				"B_factor": b_factor,
				"amp_contrast": ampcont,
				"dopad": ip,
				"binary": binary,
				"sign": sign,
				"dza": dza,
				"azz":azz}
			
			tmp = Processor.EMFourierFilter(img, params)
			tmp.set_attr_dict({"ctf": ctf})
			
			tmp.write_image(outstack, i)

	elif options.changesize:
		nargs = len(args)
		if nargs != 2:
			ERROR("must provide name of input and output file!", "change size", 1)
			return
		from utilities import get_im
		instack = args[0]
		outstack = args[1]
		sub_rate = float(options.ratio)
			
		nima = EMUtil.get_image_count(instack)
		from fundamentals import resample
		for i in xrange(nima):
			resample(get_im(instack, i), sub_rate).write_image(outstack, i)

	elif options.isacgroup>-1:
		nargs = len(args)
		if nargs != 3:
			ERROR("Three files needed on input!", "isacgroup", 1)
			return
		from utilities import get_im
		instack = args[0]
		m=get_im(args[1],int(options.isacgroup)).get_attr("members")
		l = []
		for k in m:
			l.append(int(get_im(args[0],k).get_attr(options.params)))
		from utilities import write_text_file
		write_text_file(l, args[2])

	elif options.isacselect:
		nargs = len(args)
		if nargs != 2:
			ERROR("Two files needed on input!", "isacgroup", 1)
			return
		from utilities import get_im
		nima = EMUtil.get_image_count(args[0])
		m = []
		for k in xrange(nima):
			m += get_im(args[0],k).get_attr("members")
		m.sort()
		from utilities import write_text_file
		write_text_file(m, args[1])

	elif options.pw:
		nargs = len(args)
		if nargs < 2:
			ERROR("must provide name of input and output file!", "pw", 1)
			return
		from utilities import get_im, write_text_file
		from fundamentals import rops_table
		d = get_im(args[0])
		ndim = d.get_ndim()
		if ndim ==3:
			pw = rops_table(d)
			write_text_file(pw, args[1])			
		else:
			nx = d.get_xsize()
			ny = d.get_ysize()
			if nargs ==3: mask = get_im(args[2])
			wn = int(options.wn)
			if wn == -1:
				wn = max(nx, ny)
			else:
				if( (wn<nx) or (wn<ny) ):  ERROR("window size cannot be smaller than the image size","pw",1)
			n = EMUtil.get_image_count(args[0])
			from utilities import model_blank, model_circle, pad
			from EMAN2 import periodogram
			p = model_blank(wn,wn)
		
			for i in xrange(n):
				d = get_im(args[0], i)
				if nargs==3:
					d *=mask
				st = Util.infomask(d, None, True)
				d -= st[0]
				p += periodogram(pad(d, wn, wn, 1, 0.))
			p /= n
			p.write_image(args[1])

	elif options.adjpw:

		if len(args) < 3:
			ERROR("filt_by_rops input target output fl aa (the last two are optional parameters of a low-pass filter)","adjpw",1)
			return
		img_stack = args[0]
		from math         import sqrt
		from fundamentals import rops_table, fft
		from utilities    import read_text_file, get_im
		from filter       import  filt_tanl, filt_table
		if(  args[1][-3:] == 'txt'):
			rops_dst = read_text_file( args[1] )
		else:
			rops_dst = rops_table(get_im( args[1] ))

		out_stack = args[2]
		if(len(args) >4):
			fl = float(args[3])
			aa = float(args[4])
		else:
			fl = -1.0
			aa = 0.0

		nimage = EMUtil.get_image_count( img_stack )

		for i in xrange(nimage):
			img = fft(get_im(img_stack, i) )
			rops_src = rops_table(img)

			assert len(rops_dst) == len(rops_src)

			table = [0.0]*len(rops_dst)
			for j in xrange( len(rops_dst) ):
				table[j] = sqrt( rops_dst[j]/rops_src[j] )

			if( fl > 0.0):
				img = filt_tanl(img, fl, aa)
			img = fft(filt_table(img, table))
			img.write_image(out_stack, i)

	elif options.rotpw != None:

		if len(args) != 1:
			ERROR("Only one input permitted","rotpw",1)
			return
		from utilities import write_text_file, get_im
		from fundamentals import rops_table
		from math import log10
		t = rops_table(get_im(args[0]))
		x = range(len(t))
		r = [0.0]*len(x)
		for i in x:  r[i] = log10(t[i])
		write_text_file([t,r,x],options.rotpw)

	elif options.transformparams != None:
		if len(args) != 2:
			ERROR("Please provide names of input and output files with orientation parameters","transformparams",1)
			return
		from utilities import read_text_row, write_text_row
		transf = [0.0]*6
		spl=options.transformparams.split(',')
		for i in xrange(len(spl)):  transf[i] = float(spl[i])

		write_text_row( rotate_shift_params(read_text_row(args[0]), transf)	, args[1])

	elif options.makedb != None:
		nargs = len(args)
		if nargs != 1:
			print "must provide exactly one argument denoting database key under which the input params will be stored"
			return
		dbkey = args[0]
		print "database key under which params will be stored: ", dbkey
		gbdb = js_open_dict("e2boxercache/gauss_box_DB.json")
				
		parmstr = 'dummy:'+options.makedb[0]
		(processorname, param_dict) = parsemodopt(parmstr)
		dbdict = {}
		for pkey in param_dict:
			if (pkey == 'invert_contrast') or (pkey == 'use_variance'):
				if param_dict[pkey] == 'True':
					dbdict[pkey] = True
				else:
					dbdict[pkey] = False
			else:		
				dbdict[pkey] = param_dict[pkey]
		gbdb[dbkey] = dbdict

	elif options.generate_projections:
		nargs = len(args)
		if nargs != 3:
			ERROR("Must provide name of input structure(s) from which to generate projections, name of output projection stack, and prefix for output micrographs."\
			"sxprocess - generate projections",1)
			return
		inpstr  = args[0]
		outstk  = args[1]
		micpref = args[2]

		parmstr = 'dummy:'+options.generate_projections[0]
		(processorname, param_dict) = parsemodopt(parmstr)

		parm_CTF    = False
		parm_format = 'bdb'
		parm_apix   = 2.5

		if 'CTF' in param_dict:
			if param_dict['CTF'] == 'True':
				parm_CTF = True

		if 'format' in param_dict:
			parm_format = param_dict['format']

		if 'apix' in param_dict:
			parm_apix = float(param_dict['apix'])

		boxsize = 64
		if 'boxsize' in param_dict:
			boxsize = int(param_dict['boxsize'])

		print "pixel size: ", parm_apix, " format: ", parm_format, " add CTF: ", parm_CTF, " box size: ", boxsize

		scale_mult      = 2500
		sigma_add       = 1.5
		sigma_proj      = 30.0
		sigma2_proj     = 17.5
		sigma_gauss     = 0.3
		sigma_mic       = 30.0
		sigma2_mic      = 17.5
		sigma_gauss_mic = 0.3
		
		if 'scale_mult' in param_dict:
			scale_mult = float(param_dict['scale_mult'])
		if 'sigma_add' in param_dict:
			sigma_add = float(param_dict['sigma_add'])
		if 'sigma_proj' in param_dict:
			sigma_proj = float(param_dict['sigma_proj'])
		if 'sigma2_proj' in param_dict:
			sigma2_proj = float(param_dict['sigma2_proj'])
		if 'sigma_gauss' in param_dict:
			sigma_gauss = float(param_dict['sigma_gauss'])	
		if 'sigma_mic' in param_dict:
			sigma_mic = float(param_dict['sigma_mic'])
		if 'sigma2_mic' in param_dict:
			sigma2_mic = float(param_dict['sigma2_mic'])
		if 'sigma_gauss_mic' in param_dict:
			sigma_gauss_mic = float(param_dict['sigma_gauss_mic'])	
			
		from filter import filt_gaussl, filt_ctf
		from utilities import drop_spider_doc, even_angles, model_gauss, delete_bdb, model_blank,pad,model_gauss_noise,set_params2D, set_params_proj
		from projection import prep_vol,prgs
		seed(14567)
		delta = 29
		angles = even_angles(delta, 0.0, 89.9, 0.0, 359.9, "S")
		nangle = len(angles)
		
		modelvol = []
		nvlms = EMUtil.get_image_count(inpstr)
		from utilities import get_im
		for k in xrange(nvlms):  modelvol.append(get_im(inpstr,k))
		
		nx = modelvol[0].get_xsize()
		
		if nx != boxsize:
			ERROR("Requested box dimension does not match dimension of the input model.", \
			"sxprocess - generate projections",1)
		nvol = 10
		volfts = [[] for k in xrange(nvlms)]
		for k in xrange(nvlms):
			for i in xrange(nvol):
				sigma = sigma_add + random()  # 1.5-2.5
				addon = model_gauss(sigma, boxsize, boxsize, boxsize, sigma, sigma, 38, 38, 40 )
				scale = scale_mult * (0.5+random())
				vf, kb = prep_vol(modelvol[k] + scale*addon)
				volfts[k].append(vf)
		del vf, modelvol

		if parm_format == "bdb":
			stack_data = "bdb:"+outstk
			delete_bdb(stack_data)
		else:
			stack_data = outstk + ".hdf"
		Cs      = 2.0
		pixel   = parm_apix
		voltage = 120.0
		ampcont = 10.0
		ibd     = 4096/2-boxsize
		iprj    = 0

		width = 240
		xstart = 8 + boxsize/2
		ystart = 8 + boxsize/2
		rowlen = 17
		from random import randint
		params = []
		for idef in xrange(3, 8):

			irow = 0
			icol = 0

			mic = model_blank(4096, 4096)
			defocus = idef * 0.5#0.2
			if parm_CTF:
				astampl=defocus*0.15
				astangl=50.0
				ctf = generate_ctf([defocus, Cs, voltage,  pixel, ampcont, 0.0, astampl, astangl])

			for i in xrange(nangle):
				for k in xrange(12):
					dphi = 8.0*(random()-0.5)
					dtht = 8.0*(random()-0.5)
					psi  = 360.0*random()

					phi = angles[i][0]+dphi
					tht = angles[i][1]+dtht

					s2x = 4.0*(random()-0.5)
					s2y = 4.0*(random()-0.5)

					params.append([phi, tht, psi, s2x, s2y])

					ivol = iprj % nvol
					#imgsrc = randint(0,nvlms-1)
					imgsrc = iprj % nvlms
					proj = prgs(volfts[imgsrc][ivol], kb, [phi, tht, psi, -s2x, -s2y])

					x = xstart + irow * width
					y = ystart + icol * width

					mic += pad(proj, 4096, 4096, 1, 0.0, x-2048, y-2048, 0)

					proj = proj + model_gauss_noise( sigma_proj, nx, nx )
					if parm_CTF:
						proj = filt_ctf(proj, ctf)
						proj.set_attr_dict({"ctf":ctf, "ctf_applied":0})

					proj = proj + filt_gaussl(model_gauss_noise(sigma2_proj, nx, nx), sigma_gauss)
					proj.set_attr("origimgsrc",imgsrc)
					proj.set_attr("test_id", iprj)
					# flags describing the status of the image (1 = true, 0 = false)
					set_params2D(proj, [0.0, 0.0, 0.0, 0, 1.0])
					set_params_proj(proj, [phi, tht, psi, s2x, s2y])

					proj.write_image(stack_data, iprj)
			
					icol += 1
					if icol == rowlen:
						icol = 0
						irow += 1

					iprj += 1

			mic += model_gauss_noise(sigma_mic,4096,4096)
			if parm_CTF:
				#apply CTF
				mic = filt_ctf(mic, ctf)
			mic += filt_gaussl(model_gauss_noise(sigma2_mic, 4096, 4096), sigma_gauss_mic)
	
			mic.write_image(micpref + "%1d.hdf" % (idef-3), 0)
		
		drop_spider_doc("params.txt", params)

	elif options.importctf != None:
		print ' IMPORTCTF  '
		from utilities import read_text_row,write_text_row
		from random import randint
		import subprocess
		grpfile = 'groupid%04d'%randint(1000,9999)
		ctfpfile = 'ctfpfile%04d'%randint(1000,9999)
		cterr = [options.defocuserror/100.0, options.astigmatismerror]
		ctfs = read_text_row(options.importctf)
		for kk in xrange(len(ctfs)):
			root,name = os.path.split(ctfs[kk][-1])
			ctfs[kk][-1] = name[:-4]
		if(options.input[:4] != 'bdb:'):
			ERROR('Sorry, only bdb files implemented','importctf',1)
		d = options.input[4:]
		#try:     str = d.index('*')
		#except:  str = -1
		from string import split
		import glob
		uu = os.path.split(d)
		uu = os.path.join(uu[0],'EMAN2DB',uu[1]+'.bdb')
		flist = glob.glob(uu)
		for i in xrange(len(flist)):
			root,name = os.path.split(flist[i])
			root = root[:-7]
			name = name[:-4]
			fil = 'bdb:'+os.path.join(root,name)
			sourcemic = EMUtil.get_all_attributes(fil,'ptcl_source_image')
			nn = len(sourcemic)
			gctfp = []
			groupid = []
			for kk in xrange(nn):
				junk,name2 = os.path.split(sourcemic[kk])
				name2 = name2[:-4]
				ctfp = [-1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
				for ll in xrange(len(ctfs)):
					if(name2 == ctfs[ll][-1]):
						#  found correct
						if(ctfs[ll][8]/ctfs[ll][0] <= cterr[0]):
							#  acceptable defocus error
							ctfp = ctfs[ll][:8]
							if(ctfs[ll][10] > cterr[1] ):
								# error of astigmatism exceed the threshold, set astigmatism to zero.
								ctfp[6] = 0.0
								ctfp[7] = 0.0
							gctfp.append(ctfp)
							groupid.append(kk)
						break
			if(len(groupid) > 0):
				write_text_row(groupid, grpfile)
				write_text_row(gctfp, ctfpfile)
				cmd = "{} {} {} {}".format('e2bdb.py',fil,'--makevstack=bdb:'+root+'G'+name,'--list='+grpfile)
				#print cmd
				subprocess.call(cmd, shell=True)
				cmd = "{} {} {} {}".format('sxheader.py','bdb:'+root+'G'+name,'--params=ctf','--import='+ctfpfile)
				#print cmd
				subprocess.call(cmd, shell=True)
			else:
				print  ' >>>  Group ',name,'  skipped.'
				
		cmd = "{} {} {}".format("rm -f",grpfile,ctfpfile)
		subprocess.call(cmd, shell=True)

	elif options.scale > 0.0:
		from utilities import read_text_row,write_text_row
		scale = options.scale
		nargs = len(args)
		if nargs != 2:
			print "Please provide names of input and output file!"
			return
		p = read_text_row(args[0])
		for i in xrange(len(p)):
			p[i][3] /= scale
			p[i][4] /= scale
		write_text_row(p, args[1])
		
	elif options.adaptive_mask:
		from utilities import get_im
		from morphology import adaptive_mask, binarize, erosion, dilation
		nsigma             = options.nsigma
		ndilation          = options.ndilation
		kernel_size        = options.kernel_size
		gauss_standard_dev = options.gauss_standard_dev
		nargs = len(args)
		if nargs ==0:
			print " Create 3D mask from a given volume, either automatically or from the user provided threshold."
		elif nargs > 2:
			print "Too many inputs are given, try again!"
			return
		else:
			inputvol = get_im(args[0])
			input_path, input_file_name = os.path.split(args[0])
			input_file_name_root,ext=os.path.splitext(input_file_name)
			if nargs == 2:  mask_file_name = args[1]
			else:           mask_file_name = "adaptive_mask_for_"+input_file_name_root+".hdf" # Only hdf file is output.
			if options.threshold !=9999.:
				mask3d = binarize(inputvol, options.threshold)
				for i in xrange(options.ne): mask3d = erosion(mask3d)
				for i in xrange(options.nd): mask3d = dilation(mask3d)
			else: 
				mask3d = adaptive_mask(inputvol, nsigma, ndilation, kernel_size, gauss_standard_dev)
			mask3d.write_image(mask_file_name)
			
	elif options.postprocess:
		from utilities    import get_im
		from fundamentals import rot_avg_table
		from morphology   import compute_bfactor,power
		from statistics   import fsc
		from filter       import filt_table, filt_gaussinv
		from EMAN2 import periodogram
		e1   = get_im(args[0],0)
		if e1.get_zsize()==1:
			nimage = EMUtil.get_image_count(args[0])
			if options.mask !=None: m = get_im(options.mask)
			else: m = None
			for i in xrange(nimage):
				e1 = get_im(args[0],i)
				if m: e1 *=m
				guinerline = rot_avg_table(power(periodogram(e1),.5))
				freq_max   =  1/(2.*pixel_size)
				freq_min   =  1./options.B_start
				b,junk=compute_bfactor(guinerline, freq_min, freq_max, pixel_size)
				tmp = b/pixel_size**2
				sigma_of_inverse=sqrt(2./tmp)
				e1 = filt_gaussinv(e1,sigma_of_inverse)
				if options.low_pass_filter:
					from filter import filt_tanl
					e1 =filt_tanl(e1,options.ff, options.aa)
				e1.write_image(options.output)							
		else:
			nargs = len(args)
			e1    = get_im(args[0])
			if nargs >1: e2 = get_im(args[1])
			if options.mask !=None: m = get_im(options.mask)
			else: m =None
			pixel_size = options.pixel_size
			from math import sqrt
			if m !=None:
				e1 *=m
				if nargs >1 :e2 *=m
			if options.fsc_weighted:
				frc = fsc(e1,e2,1)
				## FSC is done on masked two images
				#### FSC weighting sqrt((2.*fsc)/(1+fsc));
				fil = len(frc[1])*[None]
				for i in xrange(len(fil)):
					if frc[1][i]>=options.FSC_cutoff: tmp = frc[1][i]
					else: tmp = 0.0
					fil[i] = sqrt(2.*tmp/(1.+tmp))
			if nargs>1: e1 +=e2
			if options.fsc_weighted: e1=filt_table(e1,fil) 
			guinerline = rot_avg_table(power(periodogram(e1),.5))
			freq_max   = 1/(2.*pixel_size)
			freq_min   = 1./options.B_start
			b,junk     = compute_bfactor(guinerline, freq_min, freq_max, pixel_size)
			tmp        = b/pixel_size**2
			sigma_of_inverse=sqrt(2./tmp)
			e1  = filt_gaussinv(e1,sigma_of_inverse)
			if options.low_pass_filter:
				from filter       import filt_tanl
				e1 =filt_tanl(e1,options.ff, options.aa)
			e1.write_image(options.output)
		 
	elif options.window_stack:
		nargs = len(args)
		if nargs ==0:
			print "  Reduce image size of a stack"
			return
		else:
			output_stack_name = None
			inputstack = args[0]
			if nargs ==2:output_stack_name = args[1]
			input_path,input_file_name=os.path.split(inputstack)
			input_file_name_root,ext=os.path.splitext(input_file_name)
			if input_file_name_root[0:3]=="bdb":stack_is_bdb= True
			else: stack_is_bdb= False
			if output_stack_name is None:
				if stack_is_bdb: output_stack_name ="bdb:reduced_"+input_file_name_root[4:]
				else:output_stack_name = "reduced_"+input_file_name_root+".hdf" # Only hdf file is output.
			nimage = EMUtil.get_image_count(inputstack)
			from fundamentals import window2d
			for i in xrange(nimage):
				image = EMData()
				image.read_image(inputstack,i)
				w = window2d(image,options.box,options.box)
				w.write_image(output_stack_name,i)
	else:  ERROR("Please provide option name","sxprocess.py",1)	
Example #11
0
def shiftali_MPI(stack, maskfile=None, maxit=100, CTF=False, snr=1.0, Fourvar=False, search_rng=-1, oneDx=False, search_rng_y=-1):  
	from applications import MPI_start_end
	from utilities    import model_circle, model_blank, get_image, peak_search, get_im
	from utilities    import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all
	from statistics   import varf2d_MPI
	from fundamentals import fft, ccf, rot_shift3D, rot_shift2D
	from utilities    import get_params2D, set_params2D
	from utilities    import print_msg, print_begin_msg, print_end_msg
	import os
	import sys
	from mpi 	  	  import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
	from mpi 	  	  import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv
	from mpi 	  	  import MPI_SUM, MPI_FLOAT, MPI_INT
	from EMAN2	  	  import Processor
	from time         import time	
	
	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0
		
	ftp = file_type(stack)

	if myid == main_node:
		print_begin_msg("shiftali_MPI")

	max_iter=int(maxit)

	if myid == main_node:
		if ftp == "bdb":
			from EMAN2db import db_open_dict
			dummy = db_open_dict(stack, True)
		nima = EMUtil.get_image_count(stack)
	else:
		nima = 0
	nima = bcast_number_to_all(nima, source_node = main_node)
	list_of_particles = range(nima)
	
	image_start, image_end = MPI_start_end(nima, number_of_proc, myid)
	list_of_particles = list_of_particles[image_start: image_end]

	# read nx and ctf_app (if CTF) and broadcast to all nodes
	if myid == main_node:
		ima = EMData()
		ima.read_image(stack, list_of_particles[0], True)
		nx = ima.get_xsize()
		ny = ima.get_ysize()
		if CTF:	ctf_app = ima.get_attr_default('ctf_applied', 2)
		del ima
	else:
		nx = 0
		ny = 0
		if CTF:	ctf_app = 0
	nx = bcast_number_to_all(nx, source_node = main_node)
	ny = bcast_number_to_all(ny, source_node = main_node)
	if CTF:
		ctf_app = bcast_number_to_all(ctf_app, source_node = main_node)
		if ctf_app > 0:	ERROR("data cannot be ctf-applied", "shiftali_MPI", 1, myid)

	if maskfile == None:
		mrad = min(nx, ny)
		mask = model_circle(mrad//2-2, nx, ny)
	else:
		mask = get_im(maskfile)

	if CTF:
		from filter import filt_ctf
		from morphology   import ctf_img
		ctf_abs_sum = EMData(nx, ny, 1, False)
		ctf_2_sum = EMData(nx, ny, 1, False)
	else:
		ctf_2_sum = None

	from global_def import CACHE_DISABLE
	if CACHE_DISABLE:
		data = EMData.read_images(stack, list_of_particles)
	else:
		for i in xrange(number_of_proc):
			if myid == i:
				data = EMData.read_images(stack, list_of_particles)
			if ftp == "bdb": mpi_barrier(MPI_COMM_WORLD)


	for im in xrange(len(data)):
		data[im].set_attr('ID', list_of_particles[im])
		st = Util.infomask(data[im], mask, False)
		data[im] -= st[0]
		if CTF:
			ctf_params = data[im].get_attr("ctf")
			ctfimg = ctf_img(nx, ctf_params, ny=ny)
			Util.add_img2(ctf_2_sum, ctfimg)
			Util.add_img_abs(ctf_abs_sum, ctfimg)

	if CTF:
		reduce_EMData_to_root(ctf_2_sum, myid, main_node)
		reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
	else:  ctf_2_sum = None
	if CTF:
		if myid != main_node:
			del ctf_2_sum
			del ctf_abs_sum
		else:
			temp = EMData(nx, ny, 1, False)
			for i in xrange(0,nx,2):
				for j in xrange(ny):
					temp.set_value_at(i,j,snr)
			Util.add_img(ctf_2_sum, temp)
			del temp

	total_iter = 0

	# apply initial xform.align2d parameters stored in header
	init_params = []
	for im in xrange(len(data)):
		t = data[im].get_attr('xform.align2d')
		init_params.append(t)
		p = t.get_params("2d")
		data[im] = rot_shift2D(data[im], p['alpha'], sx=p['tx'], sy=p['ty'], mirror=p['mirror'], scale=p['scale'])

	# fourier transform all images, and apply ctf if CTF
	for im in xrange(len(data)):
		if CTF:
			ctf_params = data[im].get_attr("ctf")
			data[im] = filt_ctf(fft(data[im]), ctf_params)
		else:
			data[im] = fft(data[im])

	sx_sum=0
	sy_sum=0
	sx_sum_total=0
	sy_sum_total=0
	shift_x = [0.0]*len(data)
	shift_y = [0.0]*len(data)
	ishift_x = [0.0]*len(data)
	ishift_y = [0.0]*len(data)

	for Iter in xrange(max_iter):
		if myid == main_node:
			start_time = time()
			print_msg("Iteration #%4d\n"%(total_iter))
		total_iter += 1
		avg = EMData(nx, ny, 1, False)
		for im in data:  Util.add_img(avg, im)

		reduce_EMData_to_root(avg, myid, main_node)

		if myid == main_node:
			if CTF:
				tavg = Util.divn_filter(avg, ctf_2_sum)
			else:	 tavg = Util.mult_scalar(avg, 1.0/float(nima))
		else:
			tavg = EMData(nx, ny, 1, False)                               

		if Fourvar:
			bcast_EMData_to_all(tavg, myid, main_node)
			vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

		if myid == main_node:
			if Fourvar:
				tavg    = fft(Util.divn_img(fft(tavg), vav))
				vav_r	= Util.pack_complex_to_real(vav)

			# normalize and mask tavg in real space
			tavg = fft(tavg)
			stat = Util.infomask( tavg, mask, False ) 
			tavg -= stat[0]
			Util.mul_img(tavg, mask)
			# For testing purposes: shift tavg to some random place and see if the centering is still correct
			#tavg = rot_shift3D(tavg,sx=3,sy=-4)
			tavg = fft(tavg)

		if Fourvar:  del vav
		bcast_EMData_to_all(tavg, myid, main_node)

		sx_sum=0 
		sy_sum=0 
		if search_rng > 0: nwx = 2*search_rng+1
		else:              nwx = nx
		
		if search_rng_y > 0: nwy = 2*search_rng_y+1
		else:                nwy = ny

		not_zero = 0
		for im in xrange(len(data)):
			if oneDx:
				ctx = Util.window(ccf(data[im],tavg),nwx,1)
				p1  = peak_search(ctx)
				p1_x = -int(p1[0][3])
				ishift_x[im] = p1_x
				sx_sum += p1_x
			else:
				p1 = peak_search(Util.window(ccf(data[im],tavg), nwx,nwy))
				p1_x = -int(p1[0][4])
				p1_y = -int(p1[0][5])
				ishift_x[im] = p1_x
				ishift_y[im] = p1_y
				sx_sum += p1_x
				sy_sum += p1_y

			if not_zero == 0:
				if (not(ishift_x[im] == 0.0)) or (not(ishift_y[im] == 0.0)):
					not_zero = 1

		sx_sum = mpi_reduce(sx_sum, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)  

		if not oneDx:
			sy_sum = mpi_reduce(sy_sum, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

		if myid == main_node:
			sx_sum_total = int(sx_sum[0])
			if not oneDx:
				sy_sum_total = int(sy_sum[0])
		else:
			sx_sum_total = 0	
			sy_sum_total = 0

		sx_sum_total = bcast_number_to_all(sx_sum_total, source_node = main_node)

		if not oneDx:
			sy_sum_total = bcast_number_to_all(sy_sum_total, source_node = main_node)

		sx_ave = round(float(sx_sum_total)/nima)
		sy_ave = round(float(sy_sum_total)/nima)
		for im in xrange(len(data)): 
			p1_x = ishift_x[im] - sx_ave
			p1_y = ishift_y[im] - sy_ave
			params2 = {"filter_type" : Processor.fourier_filter_types.SHIFT, "x_shift" : p1_x, "y_shift" : p1_y, "z_shift" : 0.0}
			data[im] = Processor.EMFourierFilter(data[im], params2)
			shift_x[im] += p1_x
			shift_y[im] += p1_y
		# stop if all shifts are zero
		not_zero = mpi_reduce(not_zero, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)  
		if myid == main_node:
			not_zero_all = int(not_zero[0])
		else:
			not_zero_all = 0
		not_zero_all = bcast_number_to_all(not_zero_all, source_node = main_node)

		if myid == main_node:
			print_msg("Time of iteration = %12.2f\n"%(time()-start_time))
			start_time = time()

		if not_zero_all == 0:  break

	#for im in xrange(len(data)): data[im] = fft(data[im])  This should not be required as only header information is used
	# combine shifts found with the original parameters
	for im in xrange(len(data)):		
		t0 = init_params[im]
		t1 = Transform()
		t1.set_params({"type":"2D","alpha":0,"scale":t0.get_scale(),"mirror":0,"tx":shift_x[im],"ty":shift_y[im]})
		# combine t0 and t1
		tt = t1*t0
		data[im].set_attr("xform.align2d", tt)  

	# write out headers and STOP, under MPI writing has to be done sequentially
	mpi_barrier(MPI_COMM_WORLD)
	par_str = ["xform.align2d", "ID"]
	if myid == main_node:
		from utilities import file_type
		if(file_type(stack) == "bdb"):
			from utilities import recv_attr_dict_bdb
			recv_attr_dict_bdb(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
		else:
			from utilities import recv_attr_dict
			recv_attr_dict(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
		
	else:           send_attr_dict(main_node, data, par_str, image_start, image_end)
	if myid == main_node: print_end_msg("shiftali_MPI")				
Example #12
0
def helicalshiftali_MPI(stack, maskfile=None, maxit=100, CTF=False, snr=1.0, Fourvar=False, search_rng=-1):
	from applications import MPI_start_end
	from utilities    import model_circle, model_blank, get_image, peak_search, get_im, pad
	from utilities    import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all
	from statistics   import varf2d_MPI
	from fundamentals import fft, ccf, rot_shift3D, rot_shift2D, fshift
	from utilities    import get_params2D, set_params2D, chunks_distribution
	from utilities    import print_msg, print_begin_msg, print_end_msg
	import os
	import sys
	from mpi 	  	  import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
	from mpi 	  	  import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv
	from mpi 	  	  import MPI_SUM, MPI_FLOAT, MPI_INT
	from time         import time	
	from pixel_error  import ordersegments
	from math         import sqrt, atan2, tan, pi
	
	nproc = mpi_comm_size(MPI_COMM_WORLD)
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0
		
	ftp = file_type(stack)

	if myid == main_node:
		print_begin_msg("helical-shiftali_MPI")

	max_iter=int(maxit)
	if( myid == main_node):
		infils = EMUtil.get_all_attributes(stack, "filament")
		ptlcoords = EMUtil.get_all_attributes(stack, 'ptcl_source_coord')
		filaments = ordersegments(infils, ptlcoords)
		total_nfils = len(filaments)
		inidl = [0]*total_nfils
		for i in xrange(total_nfils):  inidl[i] = len(filaments[i])
		linidl = sum(inidl)
		nima = linidl
		tfilaments = []
		for i in xrange(total_nfils):  tfilaments += filaments[i]
		del filaments
	else:
		total_nfils = 0
		linidl = 0
	total_nfils = bcast_number_to_all(total_nfils, source_node = main_node)
	if myid != main_node:
		inidl = [-1]*total_nfils
	inidl = bcast_list_to_all(inidl, myid, source_node = main_node)
	linidl = bcast_number_to_all(linidl, source_node = main_node)
	if myid != main_node:
		tfilaments = [-1]*linidl
	tfilaments = bcast_list_to_all(tfilaments, myid, source_node = main_node)
	filaments = []
	iendi = 0
	for i in xrange(total_nfils):
		isti = iendi
		iendi = isti+inidl[i]
		filaments.append(tfilaments[isti:iendi])
	del tfilaments,inidl

	if myid == main_node:
		print_msg( "total number of filaments: %d"%total_nfils)
	if total_nfils< nproc:
		ERROR('number of CPUs (%i) is larger than the number of filaments (%i), please reduce the number of CPUs used'%(nproc, total_nfils), "ehelix_MPI", 1,myid)

	#  balanced load
	temp = chunks_distribution([[len(filaments[i]), i] for i in xrange(len(filaments))], nproc)[myid:myid+1][0]
	filaments = [filaments[temp[i][1]] for i in xrange(len(temp))]
	nfils     = len(filaments)

	#filaments = [[0,1]]
	#print "filaments",filaments
	list_of_particles = []
	indcs = []
	k = 0
	for i in xrange(nfils):
		list_of_particles += filaments[i]
		k1 = k+len(filaments[i])
		indcs.append([k,k1])
		k = k1
	data = EMData.read_images(stack, list_of_particles)
	ldata = len(data)
	print "ldata=", ldata
	nx = data[0].get_xsize()
	ny = data[0].get_ysize()
	if maskfile == None:
		mrad = min(nx, ny)//2-2
		mask = pad( model_blank(2*mrad+1, ny, 1, 1.0), nx, ny, 1, 0.0)
	else:
		mask = get_im(maskfile)

	# apply initial xform.align2d parameters stored in header
	init_params = []
	for im in xrange(ldata):
		t = data[im].get_attr('xform.align2d')
		init_params.append(t)
		p = t.get_params("2d")
		data[im] = rot_shift2D(data[im], p['alpha'], p['tx'], p['ty'], p['mirror'], p['scale'])

	if CTF:
		from filter import filt_ctf
		from morphology   import ctf_img
		ctf_abs_sum = EMData(nx, ny, 1, False)
		ctf_2_sum = EMData(nx, ny, 1, False)
	else:
		ctf_2_sum = None
		ctf_abs_sum = None



	from utilities import info

	for im in xrange(ldata):
		data[im].set_attr('ID', list_of_particles[im])
		st = Util.infomask(data[im], mask, False)
		data[im] -= st[0]
		if CTF:
			ctf_params = data[im].get_attr("ctf")
			qctf = data[im].get_attr("ctf_applied")
			if qctf == 0:
				data[im] = filt_ctf(fft(data[im]), ctf_params)
				data[im].set_attr('ctf_applied', 1)
			elif qctf != 1:
				ERROR('Incorrectly set qctf flag', "helicalshiftali_MPI", 1,myid)
			ctfimg = ctf_img(nx, ctf_params, ny=ny)
			Util.add_img2(ctf_2_sum, ctfimg)
			Util.add_img_abs(ctf_abs_sum, ctfimg)
		else:  data[im] = fft(data[im])

	del list_of_particles		

	if CTF:
		reduce_EMData_to_root(ctf_2_sum, myid, main_node)
		reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
	if CTF:
		if myid != main_node:
			del ctf_2_sum
			del ctf_abs_sum
		else:
			temp = EMData(nx, ny, 1, False)
			tsnr = 1./snr
			for i in xrange(0,nx+2,2):
				for j in xrange(ny):
					temp.set_value_at(i,j,tsnr)
					temp.set_value_at(i+1,j,0.0)
			#info(ctf_2_sum)
			Util.add_img(ctf_2_sum, temp)
			#info(ctf_2_sum)
			del temp

	total_iter = 0
	shift_x = [0.0]*ldata

	for Iter in xrange(max_iter):
		if myid == main_node:
			start_time = time()
			print_msg("Iteration #%4d\n"%(total_iter))
		total_iter += 1
		avg = EMData(nx, ny, 1, False)
		for im in xrange(ldata):
			Util.add_img(avg, fshift(data[im], shift_x[im]))

		reduce_EMData_to_root(avg, myid, main_node)

		if myid == main_node:
			if CTF:  tavg = Util.divn_filter(avg, ctf_2_sum)
			else:    tavg = Util.mult_scalar(avg, 1.0/float(nima))
		else:
			tavg = model_blank(nx,ny)

		if Fourvar:
			bcast_EMData_to_all(tavg, myid, main_node)
			vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

		if myid == main_node:
			if Fourvar:
				tavg    = fft(Util.divn_img(fft(tavg), vav))
				vav_r	= Util.pack_complex_to_real(vav)
			# normalize and mask tavg in real space
			tavg = fft(tavg)
			stat = Util.infomask( tavg, mask, False )
			tavg -= stat[0]
			Util.mul_img(tavg, mask)
			tavg.write_image("tavg.hdf",Iter)
			# For testing purposes: shift tavg to some random place and see if the centering is still correct
			#tavg = rot_shift3D(tavg,sx=3,sy=-4)

		if Fourvar:  del vav
		bcast_EMData_to_all(tavg, myid, main_node)
		tavg = fft(tavg)

		sx_sum = 0.0
		nxc = nx//2
		
		for ifil in xrange(nfils):
			"""
			# Calculate filament average
			avg = EMData(nx, ny, 1, False)
			filnima = 0
			for im in xrange(indcs[ifil][0], indcs[ifil][1]):
				Util.add_img(avg, data[im])
				filnima += 1
			tavg = Util.mult_scalar(avg, 1.0/float(filnima))
			"""
			# Calculate 1D ccf between each segment and filament average
			nsegms = indcs[ifil][1]-indcs[ifil][0]
			ctx = [None]*nsegms
			pcoords = [None]*nsegms
			for im in xrange(indcs[ifil][0], indcs[ifil][1]):
				ctx[im-indcs[ifil][0]] = Util.window(ccf(tavg, data[im]), nx, 1)
				pcoords[im-indcs[ifil][0]] = data[im].get_attr('ptcl_source_coord')
				#ctx[im-indcs[ifil][0]].write_image("ctx.hdf",im-indcs[ifil][0])
				#print "  CTX  ",myid,im,Util.infomask(ctx[im-indcs[ifil][0]], None, True)
			# search for best x-shift
			cents = nsegms//2
			
			dst = sqrt(max((pcoords[cents][0] - pcoords[0][0])**2 + (pcoords[cents][1] - pcoords[0][1])**2, (pcoords[cents][0] - pcoords[-1][0])**2 + (pcoords[cents][1] - pcoords[-1][1])**2))
			maxincline = atan2(ny//2-2-float(search_rng),dst)
			kang = int(dst*tan(maxincline)+0.5)
			#print  "  settings ",nsegms,cents,dst,search_rng,maxincline,kang
			
			# ## C code for alignment. @ming
 			results = [0.0]*3;
 			results = Util.helixshiftali(ctx, pcoords, nsegms, maxincline, kang, search_rng,nxc)
			sib = int(results[0])
 			bang = results[1]
 			qm = results[2]
			#print qm, sib, bang
			
			# qm = -1.e23	
# 				
# 			for six in xrange(-search_rng, search_rng+1,1):
# 				q0 = ctx[cents].get_value_at(six+nxc)
# 				for incline in xrange(kang+1):
# 					qt = q0
# 					qu = q0
# 					if(kang>0):  tang = tan(maxincline/kang*incline)
# 					else:        tang = 0.0
# 					for kim in xrange(cents+1,nsegms):
# 						dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
# 						xl = dst*tang+six+nxc
# 						ixl = int(xl)
# 						dxl = xl - ixl
# 						#print "  A  ", ifil,six,incline,kim,xl,ixl,dxl
# 						qt += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
# 						xl = -dst*tang+six+nxc
# 						ixl = int(xl)
# 						dxl = xl - ixl
# 						qu += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
# 					for kim in xrange(cents):
# 						dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
# 						xl = -dst*tang+six+nxc
# 						ixl = int(xl)
# 						dxl = xl - ixl
# 						qt += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
# 						xl =  dst*tang+six+nxc
# 						ixl = int(xl)
# 						dxl = xl - ixl
# 						qu += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
# 					if( qt > qm ):
# 						qm = qt
# 						sib = six
# 						bang = tang
# 					if( qu > qm ):
# 						qm = qu
# 						sib = six
# 						bang = -tang
					#if incline == 0:  print  "incline = 0  ",six,tang,qt,qu
			#print qm,six,sib,bang
			#print " got results   ",indcs[ifil][0], indcs[ifil][1], ifil,myid,qm,sib,tang,bang,len(ctx),Util.infomask(ctx[0], None, True)
			for im in xrange(indcs[ifil][0], indcs[ifil][1]):
				kim = im-indcs[ifil][0]
				dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
				if(kim < cents):  xl = -dst*bang+sib
				else:             xl =  dst*bang+sib
				shift_x[im] = xl
							
			# Average shift
			sx_sum += shift_x[indcs[ifil][0]+cents]
			
			
		# #print myid,sx_sum,total_nfils
		sx_sum = mpi_reduce(sx_sum, 1, MPI_FLOAT, MPI_SUM, main_node, MPI_COMM_WORLD)
		if myid == main_node:
			sx_sum = float(sx_sum[0])/total_nfils
			print_msg("Average shift  %6.2f\n"%(sx_sum))
		else:
			sx_sum = 0.0
		sx_sum = 0.0
		sx_sum = bcast_number_to_all(sx_sum, source_node = main_node)
		for im in xrange(ldata):
			shift_x[im] -= sx_sum
			#print  "   %3d  %6.3f"%(im,shift_x[im])
		#exit()


			
	# combine shifts found with the original parameters
	for im in xrange(ldata):		
		t1 = Transform()
		##import random
		##shix=random.randint(-10, 10)
		##t1.set_params({"type":"2D","tx":shix})
		t1.set_params({"type":"2D","tx":shift_x[im]})
		# combine t0 and t1
		tt = t1*init_params[im]
		data[im].set_attr("xform.align2d", tt)
	# write out headers and STOP, under MPI writing has to be done sequentially
	mpi_barrier(MPI_COMM_WORLD)
	par_str = ["xform.align2d", "ID"]
	if myid == main_node:
		from utilities import file_type
		if(file_type(stack) == "bdb"):
			from utilities import recv_attr_dict_bdb
			recv_attr_dict_bdb(main_node, stack, data, par_str, 0, ldata, nproc)
		else:
			from utilities import recv_attr_dict
			recv_attr_dict(main_node, stack, data, par_str, 0, ldata, nproc)
	else:           send_attr_dict(main_node, data, par_str, 0, ldata)
	if myid == main_node: print_end_msg("helical-shiftali_MPI")				
Example #13
0
def main():

	def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
		if mirror:
			m = 1
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)
		else:
			m = 0
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)
		return  alpha, sx, sy, m
	
	progname = os.path.basename(sys.argv[0])
	usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=0.2 --aa=0.1  --sym=symmetry --CTF"
	parser = OptionParser(usage, version=SPARXVERSION)

	parser.add_option("--ave2D",		type="string"	   ,	default=False,				help="write to the disk a stack of 2D averages")
	parser.add_option("--var2D",		type="string"	   ,	default=False,				help="write to the disk a stack of 2D variances")
	parser.add_option("--ave3D",		type="string"	   ,	default=False,				help="write to the disk reconstructed 3D average")
	parser.add_option("--var3D",		type="string"	   ,	default=False,				help="compute 3D variability (time consuming!)")
	parser.add_option("--img_per_grp",	type="int"         ,	default=10   ,				help="number of neighbouring projections")
	parser.add_option("--no_norm",		action="store_true",	default=False,				help="do not use normalization")
	parser.add_option("--radiusvar", 	type="int"         ,	default=-1   ,				help="radius for 3D var" )
	parser.add_option("--npad",			type="int"         ,	default=2    ,				help="number of time to pad the original images")
	parser.add_option("--sym" , 		type="string"      ,	default="c1" ,				help="symmetry")
	parser.add_option("--fl",			type="float"       ,	default=0.0  ,				help="stop-band frequency (Default - no filtration)")
	parser.add_option("--aa",			type="float"       ,	default=0.0  ,				help="fall off of the filter (Default - no filtration)")
	parser.add_option("--CTF",			action="store_true",	default=False,				help="use CFT correction")
	parser.add_option("--VERBOSE",		action="store_true",	default=False,				help="Long output for debugging")
	#parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
	#parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
	#parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
	#parser.add_option("--abs", 			type="float"       ,	default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
	#parser.add_option("--squ", 			type="float"       ,	default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
	parser.add_option("--VAR" , 		action="store_true",	default=False,				help="stack on input consists of 2D variances (Default False)")
	parser.add_option("--decimate",     type="float",           default=1.0,                 help="image decimate rate, a number large than 1. default is 1")
	parser.add_option("--window",       type="int",             default=0,                   help="reduce images to a small image size without changing pixel_size. Default value is zero.")
	#parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
	parser.add_option("--nvec",			type="int"         ,	default=0    ,				help="number of eigenvectors, default = 0 meaning no PCA calculated")
	parser.add_option("--symmetrize",	action="store_true",	default=False,				help="Prepare input stack for handling symmetry (Default False)")
	
	(options,args) = parser.parse_args()
	#####
	from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD, MPI_TAG_UB
	from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
	from applications import MPI_start_end
	from reconstruction import recons3d_em, recons3d_em_MPI
	from reconstruction	import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
	from utilities import print_begin_msg, print_end_msg, print_msg
	from utilities import read_text_row, get_image, get_im
	from utilities import bcast_EMData_to_all, bcast_number_to_all
	from utilities import get_symt

	#  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

	from EMAN2db import db_open_dict
	
	if options.symmetrize :
		try:
			sys.argv = mpi_init(len(sys.argv), sys.argv)
			try:	
				number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
				if( number_of_proc > 1 ):
					ERROR("Cannot use more than one CPU for symmetry prepration","sx3dvariability",1)
			except:
				pass
		except:
			pass

		#  Input
		#instack = "Clean_NORM_CTF_start_wparams.hdf"
		#instack = "bdb:data"
		instack = args[0]
		sym = options.sym
		if( sym == "c1" ):
			ERROR("Thre is no need to symmetrize stack for C1 symmetry","sx3dvariability",1)

		if(instack[:4] !="bdb:"):
			stack = "bdb:data"
			delete_bdb(stack)
			cmdexecute("sxcpy.py  "+instack+"  "+stack)
		else:
			stack = instack

		qt = EMUtil.get_all_attributes(stack,'xform.projection')

		na = len(qt)
		ts = get_symt(sym)
		ks = len(ts)
		angsa = [None]*na
		for k in xrange(ks):
			delete_bdb("bdb:Q%1d"%k)
			cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			DB = db_open_dict("bdb:Q%1d"%k)
			for i in xrange(na):
				ut = qt[i]*ts[k]
				DB.set_attr(i, "xform.projection", ut)
				#bt = ut.get_params("spider")
				#angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
			#write_text_row(angsa, 'ptsma%1d.txt'%k)
			#cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			#cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
			DB.close()
		delete_bdb("bdb:sdata")
		cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
		#cmdexecute("ls  EMAN2DB/sdata*")
		a = get_im("bdb:sdata")
		a.set_attr("variabilitysymmetry",sym)
		a.write_image("bdb:sdata")


	else:

		sys.argv = mpi_init(len(sys.argv), sys.argv)
		myid     = mpi_comm_rank(MPI_COMM_WORLD)
		number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
		main_node = 0

		if len(args) == 1:
			stack = args[0]
		else:
			print( "usage: " + usage)
			print( "Please run '" + progname + " -h' for detailed options")
			return 1

		t0 = time()
	
		# obsolete flags
		options.MPI = True
		options.nvec = 0
		options.radiuspca = -1
		options.iter = 40
		options.abs = 0.0
		options.squ = 0.0

		if options.fl > 0.0 and options.aa == 0.0:
			ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid)
		if options.VAR and options.SND:
			ERROR("Only one of var and SND can be set!", "sx3dvariability", myid)
			exit()
		if options.VAR and (options.ave2D or options.ave3D or options.var2D): 
			ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid)
			exit()
		#if options.SND and (options.ave2D or options.ave3D):
		#	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
		#	exit()
		if options.nvec > 0 :
			ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
			exit()
		if options.nvec > 0 and options.ave3D == None:
			ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", myid=myid)
			exit()
		import string
		options.sym = options.sym.lower()
		 
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		global_def.BATCH = True

		if myid == main_node:
			print_begin_msg("sx3dvariability")
			print_msg("%-70s:  %s\n"%("Input stack", stack))
	
		img_per_grp = options.img_per_grp
		nvec = options.nvec
		radiuspca = options.radiuspca

		symbaselen = 0
		if myid == main_node:
			nima = EMUtil.get_image_count(stack)
			img  = get_image(stack)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			if options.sym != "c1" :
				imgdata = get_im(stack)
				try:
					i = imgdata.get_attr("variabilitysymmetry")
					if(i != options.sym):
						ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", myid=myid)
				except:
					ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", myid=myid)
				from utilities import get_symt
				i = len(get_symt(options.sym))
				if((nima/i)*i != nima):
					ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", myid=myid)
				symbaselen = nima/i
			else:  symbaselen = nima
		else:
			nima = 0
			nx = 0
			ny = 0
		nima = bcast_number_to_all(nima)
		nx   = bcast_number_to_all(nx)
		ny   = bcast_number_to_all(ny)
		Tracker ={}
		Tracker["nx"]  =nx
		Tracker["ny"]  =ny
		Tracker["total_stack"]=nima
		if options.decimate==1.:
			if options.window !=0:
				nx = options.window
				ny = options.window
		else:
			if options.window ==0:
				nx = int(nx/options.decimate)
				ny = int(ny/options.decimate)
			else:
				nx = int(options.window/options.decimate)
				ny = nx
		symbaselen = bcast_number_to_all(symbaselen)
		if radiuspca == -1: radiuspca = nx/2-2

		if myid == main_node:
			print_msg("%-70s:  %d\n"%("Number of projection", nima))
		
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		"""
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
		if options.VAR:
			#varList = EMData.read_images(stack, range(img_begin, img_end))
			varList = []
			this_image = EMData()
			for index_of_particle in xrange(img_begin,img_end):
				this_image.read_image(stack,index_of_particle)
				varList.append(image_decimate_window_xform_ctf(img,options.decimate,options.window,options.CTF))
		else:
			from utilities		import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
			from utilities		import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
			from utilities		import model_blank, nearest_proj, model_circle
			from applications	import pca
			from statistics		import avgvar, avgvar_ctf, ccc
			from filter		    import filt_tanl
			from morphology		import threshold, square_root
			from projection 	import project, prep_vol, prgs
			from sets		    import Set

			if myid == main_node:
				t1 = time()
				proj_angles = []
				aveList = []
				tab = EMUtil.get_all_attributes(stack, 'xform.projection')
				for i in xrange(nima):
					t     = tab[i].get_params('spider')
					phi   = t['phi']
					theta = t['theta']
					psi   = t['psi']
					x     = theta
					if x > 90.0: x = 180.0 - x
					x = x*10000+psi
					proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
				t2 = time()
				print_msg("%-70s:  %d\n"%("Number of neighboring projections", img_per_grp))
				print_msg("...... Finding neighboring projections\n")
				if options.VERBOSE:
					print "Number of images per group: ", img_per_grp
					print "Now grouping projections"
				proj_angles.sort()

			proj_angles_list = [0.0]*(nima*4)
			if myid == main_node:
				for i in xrange(nima):
					proj_angles_list[i*4]   = proj_angles[i][1]
					proj_angles_list[i*4+1] = proj_angles[i][2]
					proj_angles_list[i*4+2] = proj_angles[i][3]
					proj_angles_list[i*4+3] = proj_angles[i][4]
			proj_angles_list = bcast_list_to_all(proj_angles_list, myid, main_node)
			proj_angles = []
			for i in xrange(nima):
				proj_angles.append([proj_angles_list[i*4], proj_angles_list[i*4+1], proj_angles_list[i*4+2], int(proj_angles_list[i*4+3])])
			del proj_angles_list

			proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end))

			all_proj = Set()
			for im in proj_list:
				for jm in im:
					all_proj.add(proj_angles[jm][3])

			all_proj = list(all_proj)
			if options.VERBOSE:
				print "On node %2d, number of images needed to be read = %5d"%(myid, len(all_proj))

			index = {}
			for i in xrange(len(all_proj)): index[all_proj[i]] = i
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2))
				print_msg("%-70s:  %d\n"%("Number of groups processed on the main node", len(proj_list)))
				if options.VERBOSE:
					print "Grouping projections took: ", (time()-t2)/60	, "[min]"
					print "Number of groups on main node: ", len(proj_list)
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				print_msg("...... calculating the stack of 2D variances \n")
				if options.VERBOSE:
					print "Now calculating the stack of 2D variances"

			proj_params = [0.0]*(nima*5)
			aveList = []
			varList = []				
			if nvec > 0:
				eigList = [[] for i in xrange(nvec)]

			if options.VERBOSE: 	print "Begin to read images on processor %d"%(myid)
			ttt = time()
			#imgdata = EMData.read_images(stack, all_proj)
			img     = EMData()
			imgdata = []
			for index_of_proj in xrange(len(all_proj)):
				img.read_image(stack, all_proj[index_of_proj])
				dmg = image_decimate_window_xform_ctf(img,options.decimate,options.window,options.CTF)
				#print dmg.get_xsize(), "init"
				imgdata.append(dmg)
			if options.VERBOSE:
				print "Reading images on processor %d done, time = %.2f"%(myid, time()-ttt)
				print "On processor %d, we got %d images"%(myid, len(imgdata))
			mpi_barrier(MPI_COMM_WORLD)

			'''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
			from applications import prepare_2d_forPCA
			from utilities import model_blank
			for i in xrange(len(proj_list)):
				ki = proj_angles[proj_list[i][0]][3]
				if ki >= symbaselen:  continue
				mi = index[ki]
				phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi])

				grp_imgdata = []
				for j in xrange(img_per_grp):
					mj = index[proj_angles[proj_list[i][j]][3]]
					phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj])
					alpha, sx, sy, mirror = params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror_list[i][j])
					if thetaM <= 90:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM-phi, 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM-phi), 0.0, 0.0, 1.0)
					else:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM-phi), 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM-phi)), 0.0, 0.0, 1.0)
					set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0])
					grp_imgdata.append(imgdata[mj])
					#print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize()

				if not options.no_norm:
					#print grp_imgdata[j].get_xsize()
					mask = model_circle(nx/2-2, nx, nx)
					for k in xrange(img_per_grp):
						ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False)
						grp_imgdata[k] -= ave
						grp_imgdata[k] /= std
					del mask

				if options.fl > 0.0:
					from filter import filt_ctf, filt_table
					from fundamentals import fft, window2d
					nx2 = 2*nx
					ny2 = 2*ny
					if options.CTF:
						from utilities import pad
						for k in xrange(img_per_grp):
							grp_imgdata[k] = window2d(fft( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa) ),nx,ny)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
					else:
						for k in xrange(img_per_grp):
							grp_imgdata[k] = filt_tanl( grp_imgdata[k], options.fl, options.aa)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
				else:
					from utilities import pad, read_text_file
					from filter import filt_ctf, filt_table
					from fundamentals import fft, window2d
					nx2 = 2*nx
					ny2 = 2*ny
					if options.CTF:
						from utilities import pad
						for k in xrange(img_per_grp):
							grp_imgdata[k] = window2d( fft( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1) ) , nx,ny)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)

				'''
				if i < 10 and myid == main_node:
					for k in xrange(10):
						grp_imgdata[k].write_image("grp%03d.hdf"%i, k)
				'''
				"""
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("pp.hdf", pp)
				"""
				ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata)
				"""
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("qq.hdf", pp)
				"""

				var = model_blank(nx,ny)
				for q in grp_imgdata:  Util.add_img2( var, q )
				Util.mul_scalar( var, 1.0/(len(grp_imgdata)-1))
				# Switch to std dev
				var = square_root(threshold(var))
				#if options.CTF:	ave, var = avgvar_ctf(grp_imgdata, mode="a")
				#else:	            ave, var = avgvar(grp_imgdata, mode="a")
				"""
				if myid == main_node:
					ave.write_image("avgv.hdf",i)
					var.write_image("varv.hdf",i)
				"""
			
				set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
				set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

				aveList.append(ave)
				varList.append(var)

				if options.VERBOSE:
					print "%5.2f%% done on processor %d"%(i*100.0/len(proj_list), myid)
				if nvec > 0:
					eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True)
					for k in xrange(nvec):
						set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
						eigList[k].append(eig[k])
					"""
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""

			del imgdata
			#  To this point, all averages, variances, and eigenvectors are computed

			if options.ave2D:
				from fundamentals import fpol
				if myid == main_node:
					km = 0
					for i in xrange(number_of_proc):
						if i == main_node :
							for im in xrange(len(aveList)):
								aveList[im].write_image(options.ave2D, km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in xrange(nl):
								ave = recv_EMData(i, im+i+70000)
								"""
								nm = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
								tmpvol=fpol(ave, Tracker["nx"],Tracker["nx"],Tracker["nx"])								
								tmpvol.write_image(options.ave2D, km)
								km += 1
				else:
					mpi_send(len(aveList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
					for im in xrange(len(aveList)):
						send_EMData(aveList[im], main_node,im+myid+70000)
						"""
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						"""

			if options.ave3D:
				from fundamentals import fpol
				if options.VERBOSE:
					print "Reconstructing 3D average volume"
				ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad)
				bcast_EMData_to_all(ave3D, myid)
				if myid == main_node:
					ave3D=fpol(ave3D,Tracker["nx"],Tracker["nx"],Tracker["nx"])
					ave3D.write_image(options.ave3D)
					print_msg("%-70s:  %s\n"%("Writing to the disk volume reconstructed from averages as", options.ave3D))
			del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList

			if nvec > 0:
				for k in xrange(nvec):
					if options.VERBOSE:
						print "Reconstruction eigenvolumes", k
					cont = True
					ITER = 0
					mask2d = model_circle(radiuspca, nx, nx)
					while cont:
						#print "On node %d, iteration %d"%(myid, ITER)
						eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad)
						bcast_EMData_to_all(eig3D, myid, main_node)
						if options.fl > 0.0:
							eig3D = filt_tanl(eig3D, options.fl, options.aa)
						if myid == main_node:
							eig3D.write_image("eig3d_%03d.hdf"%k, ITER)
						Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) )
						eig3Df, kb = prep_vol(eig3D)
						del eig3D
						cont = False
						icont = 0
						for l in xrange(len(eigList[k])):
							phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l])
							proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y])
							cl = ccc(proj, eigList[k][l], mask2d)
							if cl < 0.0:
								icont += 1
								cont = True
								eigList[k][l] *= -1.0
						u = int(cont)
						u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD)
						icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

						if myid == main_node:
							u = int(u[0])
							print " Eigenvector: ",k," number changed ",int(icont[0])
						else: u = 0
						u = bcast_number_to_all(u, main_node)
						cont = bool(u)
						ITER += 1

					del eig3Df, kb
					mpi_barrier(MPI_COMM_WORLD)
				del eigList, mask2d

			if options.ave3D: del ave3D
			if options.var2D:
				from fundamentals import fpol 
				if myid == main_node:
					km = 0
					for i in xrange(number_of_proc):
						if i == main_node :
							for im in xrange(len(varList)):
								tmpvol=fpol(varList[im], Tracker["nx"], Tracker["nx"],1)
								tmpvol.write_image(options.var2D, km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in xrange(nl):
								ave = recv_EMData(i, im+i+70000)
								tmpvol=fpol(ave, Tracker["nx"], Tracker["nx"],1)
								tmpvol.write_image(options.var2D, km)
								km += 1
				else:
					mpi_send(len(varList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
					for im in xrange(len(varList)):
						send_EMData(varList[im], main_node, im+myid+70000)#  What with the attributes??

			mpi_barrier(MPI_COMM_WORLD)

		if  options.var3D:
			if myid == main_node and options.VERBOSE:
				print "Reconstructing 3D variability volume"

			t6 = time()
			radiusvar = options.radiusvar
			if( radiusvar < 0 ):  radiusvar = nx//2 -3
			res = recons3d_4nn_MPI(myid, varList, symmetry=options.sym, npad=options.npad)
			#res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
			if myid == main_node:
				from fundamentals import fpol
				res =fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"])
				res.write_image(options.var3D)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Reconstructing 3D variability took [s]", time()-t6))
				if options.VERBOSE:
					print "Reconstruction took: %.2f [min]"%((time()-t6)/60)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Total time for these computations [s]", time()-t0))
				if options.VERBOSE:
					print "Total time for these computations: %.2f [min]"%((time()-t0)/60)
				print_end_msg("sx3dvariability")

		global_def.BATCH = False

		from mpi import mpi_finalize
		mpi_finalize()
Example #14
0
def shiftali_MPI(stack,
                 maskfile=None,
                 maxit=100,
                 CTF=False,
                 snr=1.0,
                 Fourvar=False,
                 search_rng=-1,
                 oneDx=False,
                 search_rng_y=-1):
    from applications import MPI_start_end
    from utilities import model_circle, model_blank, get_image, peak_search, get_im
    from utilities import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all
    from pap_statistics import varf2d_MPI
    from fundamentals import fft, ccf, rot_shift3D, rot_shift2D
    from utilities import get_params2D, set_params2D
    from utilities import print_msg, print_begin_msg, print_end_msg
    import os
    import sys
    from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
    from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv
    from mpi import MPI_SUM, MPI_FLOAT, MPI_INT
    from EMAN2 import Processor
    from time import time

    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    main_node = 0

    ftp = file_type(stack)

    if myid == main_node:
        print_begin_msg("shiftali_MPI")

    max_iter = int(maxit)

    if myid == main_node:
        if ftp == "bdb":
            from EMAN2db import db_open_dict
            dummy = db_open_dict(stack, True)
        nima = EMUtil.get_image_count(stack)
    else:
        nima = 0
    nima = bcast_number_to_all(nima, source_node=main_node)
    list_of_particles = list(range(nima))

    image_start, image_end = MPI_start_end(nima, number_of_proc, myid)
    list_of_particles = list_of_particles[image_start:image_end]

    # read nx and ctf_app (if CTF) and broadcast to all nodes
    if myid == main_node:
        ima = EMData()
        ima.read_image(stack, list_of_particles[0], True)
        nx = ima.get_xsize()
        ny = ima.get_ysize()
        if CTF: ctf_app = ima.get_attr_default('ctf_applied', 2)
        del ima
    else:
        nx = 0
        ny = 0
        if CTF: ctf_app = 0
    nx = bcast_number_to_all(nx, source_node=main_node)
    ny = bcast_number_to_all(ny, source_node=main_node)
    if CTF:
        ctf_app = bcast_number_to_all(ctf_app, source_node=main_node)
        if ctf_app > 0:
            ERROR("data cannot be ctf-applied", "shiftali_MPI", 1, myid)

    if maskfile == None:
        mrad = min(nx, ny)
        mask = model_circle(mrad // 2 - 2, nx, ny)
    else:
        mask = get_im(maskfile)

    if CTF:
        from filter import filt_ctf
        from morphology import ctf_img
        ctf_abs_sum = EMData(nx, ny, 1, False)
        ctf_2_sum = EMData(nx, ny, 1, False)
    else:
        ctf_2_sum = None

    from global_def import CACHE_DISABLE
    if CACHE_DISABLE:
        data = EMData.read_images(stack, list_of_particles)
    else:
        for i in range(number_of_proc):
            if myid == i:
                data = EMData.read_images(stack, list_of_particles)
            if ftp == "bdb": mpi_barrier(MPI_COMM_WORLD)

    for im in range(len(data)):
        data[im].set_attr('ID', list_of_particles[im])
        st = Util.infomask(data[im], mask, False)
        data[im] -= st[0]
        if CTF:
            ctf_params = data[im].get_attr("ctf")
            ctfimg = ctf_img(nx, ctf_params, ny=ny)
            Util.add_img2(ctf_2_sum, ctfimg)
            Util.add_img_abs(ctf_abs_sum, ctfimg)

    if CTF:
        reduce_EMData_to_root(ctf_2_sum, myid, main_node)
        reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
    else:
        ctf_2_sum = None
    if CTF:
        if myid != main_node:
            del ctf_2_sum
            del ctf_abs_sum
        else:
            temp = EMData(nx, ny, 1, False)
            for i in range(0, nx, 2):
                for j in range(ny):
                    temp.set_value_at(i, j, snr)
            Util.add_img(ctf_2_sum, temp)
            del temp

    total_iter = 0

    # apply initial xform.align2d parameters stored in header
    init_params = []
    for im in range(len(data)):
        t = data[im].get_attr('xform.align2d')
        init_params.append(t)
        p = t.get_params("2d")
        data[im] = rot_shift2D(data[im],
                               p['alpha'],
                               sx=p['tx'],
                               sy=p['ty'],
                               mirror=p['mirror'],
                               scale=p['scale'])

    # fourier transform all images, and apply ctf if CTF
    for im in range(len(data)):
        if CTF:
            ctf_params = data[im].get_attr("ctf")
            data[im] = filt_ctf(fft(data[im]), ctf_params)
        else:
            data[im] = fft(data[im])

    sx_sum = 0
    sy_sum = 0
    sx_sum_total = 0
    sy_sum_total = 0
    shift_x = [0.0] * len(data)
    shift_y = [0.0] * len(data)
    ishift_x = [0.0] * len(data)
    ishift_y = [0.0] * len(data)

    for Iter in range(max_iter):
        if myid == main_node:
            start_time = time()
            print_msg("Iteration #%4d\n" % (total_iter))
        total_iter += 1
        avg = EMData(nx, ny, 1, False)
        for im in data:
            Util.add_img(avg, im)

        reduce_EMData_to_root(avg, myid, main_node)

        if myid == main_node:
            if CTF:
                tavg = Util.divn_filter(avg, ctf_2_sum)
            else:
                tavg = Util.mult_scalar(avg, 1.0 / float(nima))
        else:
            tavg = EMData(nx, ny, 1, False)

        if Fourvar:
            bcast_EMData_to_all(tavg, myid, main_node)
            vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

        if myid == main_node:
            if Fourvar:
                tavg = fft(Util.divn_img(fft(tavg), vav))
                vav_r = Util.pack_complex_to_real(vav)

            # normalize and mask tavg in real space
            tavg = fft(tavg)
            stat = Util.infomask(tavg, mask, False)
            tavg -= stat[0]
            Util.mul_img(tavg, mask)
            # For testing purposes: shift tavg to some random place and see if the centering is still correct
            #tavg = rot_shift3D(tavg,sx=3,sy=-4)
            tavg = fft(tavg)

        if Fourvar: del vav
        bcast_EMData_to_all(tavg, myid, main_node)

        sx_sum = 0
        sy_sum = 0
        if search_rng > 0: nwx = 2 * search_rng + 1
        else: nwx = nx

        if search_rng_y > 0: nwy = 2 * search_rng_y + 1
        else: nwy = ny

        not_zero = 0
        for im in range(len(data)):
            if oneDx:
                ctx = Util.window(ccf(data[im], tavg), nwx, 1)
                p1 = peak_search(ctx)
                p1_x = -int(p1[0][3])
                ishift_x[im] = p1_x
                sx_sum += p1_x
            else:
                p1 = peak_search(Util.window(ccf(data[im], tavg), nwx, nwy))
                p1_x = -int(p1[0][4])
                p1_y = -int(p1[0][5])
                ishift_x[im] = p1_x
                ishift_y[im] = p1_y
                sx_sum += p1_x
                sy_sum += p1_y

            if not_zero == 0:
                if (not (ishift_x[im] == 0.0)) or (not (ishift_y[im] == 0.0)):
                    not_zero = 1

        sx_sum = mpi_reduce(sx_sum, 1, MPI_INT, MPI_SUM, main_node,
                            MPI_COMM_WORLD)

        if not oneDx:
            sy_sum = mpi_reduce(sy_sum, 1, MPI_INT, MPI_SUM, main_node,
                                MPI_COMM_WORLD)

        if myid == main_node:
            sx_sum_total = int(sx_sum[0])
            if not oneDx:
                sy_sum_total = int(sy_sum[0])
        else:
            sx_sum_total = 0
            sy_sum_total = 0

        sx_sum_total = bcast_number_to_all(sx_sum_total, source_node=main_node)

        if not oneDx:
            sy_sum_total = bcast_number_to_all(sy_sum_total,
                                               source_node=main_node)

        sx_ave = round(float(sx_sum_total) / nima)
        sy_ave = round(float(sy_sum_total) / nima)
        for im in range(len(data)):
            p1_x = ishift_x[im] - sx_ave
            p1_y = ishift_y[im] - sy_ave
            params2 = {
                "filter_type": Processor.fourier_filter_types.SHIFT,
                "x_shift": p1_x,
                "y_shift": p1_y,
                "z_shift": 0.0
            }
            data[im] = Processor.EMFourierFilter(data[im], params2)
            shift_x[im] += p1_x
            shift_y[im] += p1_y
        # stop if all shifts are zero
        not_zero = mpi_reduce(not_zero, 1, MPI_INT, MPI_SUM, main_node,
                              MPI_COMM_WORLD)
        if myid == main_node:
            not_zero_all = int(not_zero[0])
        else:
            not_zero_all = 0
        not_zero_all = bcast_number_to_all(not_zero_all, source_node=main_node)

        if myid == main_node:
            print_msg("Time of iteration = %12.2f\n" % (time() - start_time))
            start_time = time()

        if not_zero_all == 0: break

    #for im in xrange(len(data)): data[im] = fft(data[im])  This should not be required as only header information is used
    # combine shifts found with the original parameters
    for im in range(len(data)):
        t0 = init_params[im]
        t1 = Transform()
        t1.set_params({
            "type": "2D",
            "alpha": 0,
            "scale": t0.get_scale(),
            "mirror": 0,
            "tx": shift_x[im],
            "ty": shift_y[im]
        })
        # combine t0 and t1
        tt = t1 * t0
        data[im].set_attr("xform.align2d", tt)

    # write out headers and STOP, under MPI writing has to be done sequentially
    mpi_barrier(MPI_COMM_WORLD)
    par_str = ["xform.align2d", "ID"]
    if myid == main_node:
        from utilities import file_type
        if (file_type(stack) == "bdb"):
            from utilities import recv_attr_dict_bdb
            recv_attr_dict_bdb(main_node, stack, data, par_str, image_start,
                               image_end, number_of_proc)
        else:
            from utilities import recv_attr_dict
            recv_attr_dict(main_node, stack, data, par_str, image_start,
                           image_end, number_of_proc)

    else:
        send_attr_dict(main_node, data, par_str, image_start, image_end)
    if myid == main_node: print_end_msg("shiftali_MPI")
Example #15
0
def main():
	import	global_def
	from	optparse 	import OptionParser
	from	EMAN2 		import EMUtil
	import	os
	import	sys
	from time import time

	progname = os.path.basename(sys.argv[0])
	usage = progname + " proj_stack output_averages --MPI"
	parser = OptionParser(usage, version=SPARXVERSION)

	parser.add_option("--img_per_group",type="int"         ,	default=100  ,				help="number of images per group" )
	parser.add_option("--radius", 		type="int"         ,	default=-1   ,				help="radius for alignment" )
	parser.add_option("--xr",           type="string"      ,    default="2 1",              help="range for translation search in x direction, search is +/xr")
	parser.add_option("--yr",           type="string"      ,    default="-1",               help="range for translation search in y direction, search is +/yr (default = same as xr)")
	parser.add_option("--ts",           type="string"      ,    default="1 0.5",            help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional")
	parser.add_option("--iter", 		type="int"         ,	default=30,                 help="number of iterations within alignment (default = 30)" )
	parser.add_option("--num_ali",      type="int"     	   ,    default=5,         			help="number of alignments performed for stability (default = 5)" )
	parser.add_option("--thld_err",     type="float"       ,    default=1.0,         		help="threshold of pixel error (default = 1.732)" )
	parser.add_option("--grouping" , 	type="string"      ,	default="GRP",				help="do grouping of projections: PPR - per projection, GRP - different size groups, exclusive (default), GEV - grouping equal size")
	parser.add_option("--delta",        type="float"       ,    default=-1.0,         		help="angular step for reference projections (required for GEV method)")
	parser.add_option("--fl",           type="float"       ,    default=0.3,                help="cut-off frequency of hyperbolic tangent low-pass Fourier filter")
	parser.add_option("--aa",           type="float"       ,    default=0.2,                help="fall-off of hyperbolic tangent low-pass Fourier filter")
	parser.add_option("--CTF",          action="store_true",    default=False,              help="Consider CTF correction during the alignment ")
	parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")

	(options,args) = parser.parse_args()
	
	from mpi          import mpi_init, mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD, MPI_TAG_UB
	from mpi          import mpi_barrier, mpi_send, mpi_recv, mpi_bcast, MPI_INT, mpi_finalize, MPI_FLOAT
	from applications import MPI_start_end, within_group_refinement, ali2d_ras
	from pixel_error  import multi_align_stability
	from utilities    import send_EMData, recv_EMData
	from utilities    import get_image, bcast_number_to_all, set_params2D, get_params2D
	from utilities    import group_proj_by_phitheta, model_circle, get_input_from_string

	sys.argv = mpi_init(len(sys.argv), sys.argv)
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	main_node = 0

	if len(args) == 2:
		stack  = args[0]
		outdir = args[1]
	else:
		ERROR("incomplete list of arguments", "sxproj_stability", 1, myid=myid)
		exit()
	if not options.MPI:
		ERROR("Non-MPI not supported!", "sxproj_stability", myid=myid)
		exit()		 

	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	global_def.BATCH = True

	#if os.path.exists(outdir):  ERROR('Output directory exists, please change the name and restart the program', "sxproj_stability", 1, myid)
	#mpi_barrier(MPI_COMM_WORLD)

	
	img_per_grp = options.img_per_group
	radius = options.radius
	ite = options.iter
	num_ali = options.num_ali
	thld_err = options.thld_err

	xrng        = get_input_from_string(options.xr)
	if  options.yr == "-1":  yrng = xrng
	else          :  yrng = get_input_from_string(options.yr)
	step        = get_input_from_string(options.ts)


	if myid == main_node:
		nima = EMUtil.get_image_count(stack)
		img  = get_image(stack)
		nx   = img.get_xsize()
		ny   = img.get_ysize()
	else:
		nima = 0
		nx = 0
		ny = 0
	nima = bcast_number_to_all(nima)
	nx   = bcast_number_to_all(nx)
	ny   = bcast_number_to_all(ny)
	if radius == -1: radius = nx/2-2
	mask = model_circle(radius, nx, nx)

	st = time()
	if options.grouping == "GRP":
		if myid == main_node:
			print "  A  ",myid,"  ",time()-st
			proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			proj_params = []
			for i in xrange(nima):
				dp = proj_attr[i].get_params("spider")
				phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp["psi"], -dp["tx"], -dp["ty"]
				proj_params.append([phi, theta, psi, s2x, s2y])

			# Here is where the grouping is done, I didn't put enough annotation in the group_proj_by_phitheta,
			# So I will briefly explain it here
			# proj_list  : Returns a list of list of particle numbers, each list contains img_per_grp particle numbers
			#              except for the last one. Depending on the number of particles left, they will either form a
			#              group or append themselves to the last group
			# angle_list : Also returns a list of list, each list contains three numbers (phi, theta, delta), (phi, 
			#              theta) is the projection angle of the center of the group, delta is the range of this group
			# mirror_list: Also returns a list of list, each list contains img_per_grp True or False, which indicates
			#              whether it should take mirror position.
			# In this program angle_list and mirror list are not of interest.

			proj_list_all, angle_list, mirror_list = group_proj_by_phitheta(proj_params, img_per_grp=img_per_grp)
			del proj_params
			print "  B  number of groups  ",myid,"  ",len(proj_list_all),time()-st
		mpi_barrier(MPI_COMM_WORLD)

		# Number of groups, actually there could be one or two more groups, since the size of the remaining group varies
		# we will simply assign them to main node.
		n_grp = nima/img_per_grp-1

		# Divide proj_list_all equally to all nodes, and becomes proj_list
		proj_list = []
		for i in xrange(n_grp):
			proc_to_stay = i%number_of_proc
			if proc_to_stay == main_node:
				if myid == main_node: 	proj_list.append(proj_list_all[i])
			elif myid == main_node:
				mpi_send(len(proj_list_all[i]), 1, MPI_INT, proc_to_stay, MPI_TAG_UB, MPI_COMM_WORLD)
				mpi_send(proj_list_all[i], len(proj_list_all[i]), MPI_INT, proc_to_stay, MPI_TAG_UB, MPI_COMM_WORLD)
			elif myid == proc_to_stay:
				img_per_grp = mpi_recv(1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
				img_per_grp = int(img_per_grp[0])
				temp = mpi_recv(img_per_grp, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
				proj_list.append(map(int, temp))
				del temp
			mpi_barrier(MPI_COMM_WORLD)
		print "  C  ",myid,"  ",time()-st
		if myid == main_node:
			# Assign the remaining groups to main_node
			for i in xrange(n_grp, len(proj_list_all)):
				proj_list.append(proj_list_all[i])
			del proj_list_all, angle_list, mirror_list


	#   Compute stability per projection projection direction, equal number assigned, thus overlaps
	elif options.grouping == "GEV":
		if options.delta == -1.0: ERROR("Angular step for reference projections is required for GEV method","sxproj_stability",1)
		from utilities import even_angles, nearestk_to_refdir, getvec
		refproj = even_angles(options.delta)
		img_begin, img_end = MPI_start_end(len(refproj), number_of_proc, myid)
		# Now each processor keeps its own share of reference projections
		refprojdir = refproj[img_begin: img_end]
		del refproj

		ref_ang = [0.0]*(len(refprojdir)*2)
		for i in xrange(len(refprojdir)):
			ref_ang[i*2]   = refprojdir[0][0]
			ref_ang[i*2+1] = refprojdir[0][1]+i*0.1

		print "  A  ",myid,"  ",time()-st
		proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
		#  the solution below is very slow, do not use it unless there is a problem with the i/O
		"""
		for i in xrange(number_of_proc):
			if myid == i:
				proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			mpi_barrier(MPI_COMM_WORLD)
		"""
		print "  B  ",myid,"  ",time()-st

		proj_ang = [0.0]*(nima*2)
		for i in xrange(nima):
			dp = proj_attr[i].get_params("spider")
			proj_ang[i*2]   = dp["phi"]
			proj_ang[i*2+1] = dp["theta"]
		print "  C  ",myid,"  ",time()-st
		asi = Util.nearestk_to_refdir(proj_ang, ref_ang, img_per_grp)
		del proj_ang, ref_ang
		proj_list = []
		for i in xrange(len(refprojdir)):
			proj_list.append(asi[i*img_per_grp:(i+1)*img_per_grp])
		del asi
		print "  D  ",myid,"  ",time()-st
		#from sys import exit
		#exit()


	#   Compute stability per projection
	elif options.grouping == "PPR":
		print "  A  ",myid,"  ",time()-st
		proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
		print "  B  ",myid,"  ",time()-st
		proj_params = []
		for i in xrange(nima):
			dp = proj_attr[i].get_params("spider")
			phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp["psi"], -dp["tx"], -dp["ty"]
			proj_params.append([phi, theta, psi, s2x, s2y])
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		print "  C  ",myid,"  ",time()-st
		from utilities import nearest_proj
		proj_list, mirror_list = nearest_proj(proj_params, img_per_grp, range(img_begin, img_begin+1))#range(img_begin, img_end))
		refprojdir = proj_params[img_begin: img_end]
		del proj_params, mirror_list
		print "  D  ",myid,"  ",time()-st
	else:  ERROR("Incorrect projection grouping option","sxproj_stability",1)
	"""
	from utilities import write_text_file
	for i in xrange(len(proj_list)):
		write_text_file(proj_list[i],"projlist%06d_%04d"%(i,myid))
	"""

	###########################################################################################################
	# Begin stability test
	from utilities import get_params_proj, read_text_file
	#if myid == 0:
	#	from utilities import read_text_file
	#	proj_list[0] = map(int, read_text_file("lggrpp0.txt"))


	from utilities import model_blank
	aveList = [model_blank(nx,ny)]*len(proj_list)
	if options.grouping == "GRP":  refprojdir = [[0.0,0.0,-1.0]]*len(proj_list)
	for i in xrange(len(proj_list)):
		print "  E  ",myid,"  ",time()-st
		class_data = EMData.read_images(stack, proj_list[i])
		#print "  R  ",myid,"  ",time()-st
		if options.CTF :
			from filter import filt_ctf
			for im in xrange(len(class_data)):  #  MEM LEAK!!
				atemp = class_data[im].copy()
				btemp = filt_ctf(atemp, atemp.get_attr("ctf"), binary=1)
				class_data[im] = btemp
				#class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
		for im in class_data:
			try:
				t = im.get_attr("xform.align2d") # if they are there, no need to set them!
			except:
				try:
					t = im.get_attr("xform.projection")
					d = t.get_params("spider")
					set_params2D(im, [0.0,-d["tx"],-d["ty"],0,1.0])
				except:
					set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
		#print "  F  ",myid,"  ",time()-st
		# Here, we perform realignment num_ali times
		all_ali_params = []
		for j in xrange(num_ali):
			if( xrng[0] == 0.0 and yrng[0] == 0.0 ):
				avet = ali2d_ras(class_data, randomize = True, ir = 1, ou = radius, rs = 1, step = 1.0, dst = 90.0, maxit = ite, check_mirror = True, FH=options.fl, FF=options.aa)
			else:
				avet = within_group_refinement(class_data, mask, True, 1, radius, 1, xrng, yrng, step, 90.0, ite, options.fl, options.aa)
			ali_params = []
			for im in xrange(len(class_data)):
				alpha, sx, sy, mirror, scale = get_params2D(class_data[im])
				ali_params.extend( [alpha, sx, sy, mirror] )
			all_ali_params.append(ali_params)
		#aveList[i] = avet
		#print "  G  ",myid,"  ",time()-st
		del ali_params
		# We determine the stability of this group here.
		# stable_set contains all particles deemed stable, it is a list of list
		# each list has two elements, the first is the pixel error, the second is the image number
		# stable_set is sorted based on pixel error
		#from utilities import write_text_file
		#write_text_file(all_ali_params, "all_ali_params%03d.txt"%myid)
		stable_set, mir_stab_rate, average_pix_err = multi_align_stability(all_ali_params, 0.0, 10000.0, thld_err, False, 2*radius+1)
		#print "  H  ",myid,"  ",time()-st
		if(len(stable_set) > 5):
			stable_set_id = []
			members = []
			pix_err = []
			# First put the stable members into attr 'members' and 'pix_err'
			for s in stable_set:
				# s[1] - number in this subset
				stable_set_id.append(s[1])
				# the original image number
				members.append(proj_list[i][s[1]])
				pix_err.append(s[0])
			# Then put the unstable members into attr 'members' and 'pix_err'
			from fundamentals import rot_shift2D
			avet.to_zero()
			if options.grouping == "GRP":
				aphi = 0.0
				atht = 0.0
				vphi = 0.0
				vtht = 0.0
			l = -1
			for j in xrange(len(proj_list[i])):
				#  Here it will only work if stable_set_id is sorted in the increasing number, see how l progresses
				if j in stable_set_id:
					l += 1
					avet += rot_shift2D(class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3] )
					if options.grouping == "GRP":
						phi, theta, psi, sxs, sys = get_params_proj(class_data[j])
						if( theta > 90.0):
							phi = (phi+540.0)%360.0
							theta = 180.0 - theta
						aphi += phi
						atht += theta
						vphi += phi*phi
						vtht += theta*theta
				else:
					members.append(proj_list[i][j])
					pix_err.append(99999.99)
			aveList[i] = avet.copy()
			if l>1 :
				l += 1
				aveList[i] /= l
				if options.grouping == "GRP":
					aphi /= l
					atht /= l
					vphi = (vphi - l*aphi*aphi)/l
					vtht = (vtht - l*atht*atht)/l
					from math import sqrt
					refprojdir[i] = [aphi, atht, (sqrt(max(vphi,0.0))+sqrt(max(vtht,0.0)))/2.0]

			# Here more information has to be stored, PARTICULARLY WHAT IS THE REFERENCE DIRECTION
			aveList[i].set_attr('members', members)
			aveList[i].set_attr('refprojdir',refprojdir[i])
			aveList[i].set_attr('pixerr', pix_err)
		else:
			print  " empty group ",i, refprojdir[i]
			aveList[i].set_attr('members',[-1])
			aveList[i].set_attr('refprojdir',refprojdir[i])
			aveList[i].set_attr('pixerr', [99999.])

	del class_data

	if myid == main_node:
		km = 0
		for i in xrange(number_of_proc):
			if i == main_node :
				for im in xrange(len(aveList)):
					aveList[im].write_image(args[1], km)
					km += 1
			else:
				nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
				nl = int(nl[0])
				for im in xrange(nl):
					ave = recv_EMData(i, im+i+70000)
					nm = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					nm = int(nm[0])
					members = mpi_recv(nm, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('members', map(int, members))
					members = mpi_recv(nm, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('pixerr', map(float, members))
					members = mpi_recv(3, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('refprojdir', map(float, members))
					ave.write_image(args[1], km)
					km += 1
	else:
		mpi_send(len(aveList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
		for im in xrange(len(aveList)):
			send_EMData(aveList[im], main_node,im+myid+70000)
			members = aveList[im].get_attr('members')
			mpi_send(len(members), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			mpi_send(members, len(members), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			members = aveList[im].get_attr('pixerr')
			mpi_send(members, len(members), MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			try:
				members = aveList[im].get_attr('refprojdir')
				mpi_send(members, 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			except:
				mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)

	global_def.BATCH = False
	mpi_barrier(MPI_COMM_WORLD)
	from mpi import mpi_finalize
	mpi_finalize()
Example #16
0
def helicalshiftali_MPI(stack,
                        maskfile=None,
                        maxit=100,
                        CTF=False,
                        snr=1.0,
                        Fourvar=False,
                        search_rng=-1):
    from applications import MPI_start_end
    from utilities import model_circle, model_blank, get_image, peak_search, get_im, pad
    from utilities import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all
    from pap_statistics import varf2d_MPI
    from fundamentals import fft, ccf, rot_shift3D, rot_shift2D, fshift
    from utilities import get_params2D, set_params2D, chunks_distribution
    from utilities import print_msg, print_begin_msg, print_end_msg
    import os
    import sys
    from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
    from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv
    from mpi import MPI_SUM, MPI_FLOAT, MPI_INT
    from time import time
    from pixel_error import ordersegments
    from math import sqrt, atan2, tan, pi

    nproc = mpi_comm_size(MPI_COMM_WORLD)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    main_node = 0

    ftp = file_type(stack)

    if myid == main_node:
        print_begin_msg("helical-shiftali_MPI")

    max_iter = int(maxit)
    if (myid == main_node):
        infils = EMUtil.get_all_attributes(stack, "filament")
        ptlcoords = EMUtil.get_all_attributes(stack, 'ptcl_source_coord')
        filaments = ordersegments(infils, ptlcoords)
        total_nfils = len(filaments)
        inidl = [0] * total_nfils
        for i in range(total_nfils):
            inidl[i] = len(filaments[i])
        linidl = sum(inidl)
        nima = linidl
        tfilaments = []
        for i in range(total_nfils):
            tfilaments += filaments[i]
        del filaments
    else:
        total_nfils = 0
        linidl = 0
    total_nfils = bcast_number_to_all(total_nfils, source_node=main_node)
    if myid != main_node:
        inidl = [-1] * total_nfils
    inidl = bcast_list_to_all(inidl, myid, source_node=main_node)
    linidl = bcast_number_to_all(linidl, source_node=main_node)
    if myid != main_node:
        tfilaments = [-1] * linidl
    tfilaments = bcast_list_to_all(tfilaments, myid, source_node=main_node)
    filaments = []
    iendi = 0
    for i in range(total_nfils):
        isti = iendi
        iendi = isti + inidl[i]
        filaments.append(tfilaments[isti:iendi])
    del tfilaments, inidl

    if myid == main_node:
        print_msg("total number of filaments: %d" % total_nfils)
    if total_nfils < nproc:
        ERROR(
            'number of CPUs (%i) is larger than the number of filaments (%i), please reduce the number of CPUs used'
            % (nproc, total_nfils), "ehelix_MPI", 1, myid)

    #  balanced load
    temp = chunks_distribution([[len(filaments[i]), i]
                                for i in range(len(filaments))],
                               nproc)[myid:myid + 1][0]
    filaments = [filaments[temp[i][1]] for i in range(len(temp))]
    nfils = len(filaments)

    #filaments = [[0,1]]
    #print "filaments",filaments
    list_of_particles = []
    indcs = []
    k = 0
    for i in range(nfils):
        list_of_particles += filaments[i]
        k1 = k + len(filaments[i])
        indcs.append([k, k1])
        k = k1
    data = EMData.read_images(stack, list_of_particles)
    ldata = len(data)
    print("ldata=", ldata)
    nx = data[0].get_xsize()
    ny = data[0].get_ysize()
    if maskfile == None:
        mrad = min(nx, ny) // 2 - 2
        mask = pad(model_blank(2 * mrad + 1, ny, 1, 1.0), nx, ny, 1, 0.0)
    else:
        mask = get_im(maskfile)

    # apply initial xform.align2d parameters stored in header
    init_params = []
    for im in range(ldata):
        t = data[im].get_attr('xform.align2d')
        init_params.append(t)
        p = t.get_params("2d")
        data[im] = rot_shift2D(data[im], p['alpha'], p['tx'], p['ty'],
                               p['mirror'], p['scale'])

    if CTF:
        from filter import filt_ctf
        from morphology import ctf_img
        ctf_abs_sum = EMData(nx, ny, 1, False)
        ctf_2_sum = EMData(nx, ny, 1, False)
    else:
        ctf_2_sum = None
        ctf_abs_sum = None

    from utilities import info

    for im in range(ldata):
        data[im].set_attr('ID', list_of_particles[im])
        st = Util.infomask(data[im], mask, False)
        data[im] -= st[0]
        if CTF:
            ctf_params = data[im].get_attr("ctf")
            qctf = data[im].get_attr("ctf_applied")
            if qctf == 0:
                data[im] = filt_ctf(fft(data[im]), ctf_params)
                data[im].set_attr('ctf_applied', 1)
            elif qctf != 1:
                ERROR('Incorrectly set qctf flag', "helicalshiftali_MPI", 1,
                      myid)
            ctfimg = ctf_img(nx, ctf_params, ny=ny)
            Util.add_img2(ctf_2_sum, ctfimg)
            Util.add_img_abs(ctf_abs_sum, ctfimg)
        else:
            data[im] = fft(data[im])

    del list_of_particles

    if CTF:
        reduce_EMData_to_root(ctf_2_sum, myid, main_node)
        reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
    if CTF:
        if myid != main_node:
            del ctf_2_sum
            del ctf_abs_sum
        else:
            temp = EMData(nx, ny, 1, False)
            tsnr = 1. / snr
            for i in range(0, nx + 2, 2):
                for j in range(ny):
                    temp.set_value_at(i, j, tsnr)
                    temp.set_value_at(i + 1, j, 0.0)
            #info(ctf_2_sum)
            Util.add_img(ctf_2_sum, temp)
            #info(ctf_2_sum)
            del temp

    total_iter = 0
    shift_x = [0.0] * ldata

    for Iter in range(max_iter):
        if myid == main_node:
            start_time = time()
            print_msg("Iteration #%4d\n" % (total_iter))
        total_iter += 1
        avg = EMData(nx, ny, 1, False)
        for im in range(ldata):
            Util.add_img(avg, fshift(data[im], shift_x[im]))

        reduce_EMData_to_root(avg, myid, main_node)

        if myid == main_node:
            if CTF: tavg = Util.divn_filter(avg, ctf_2_sum)
            else: tavg = Util.mult_scalar(avg, 1.0 / float(nima))
        else:
            tavg = model_blank(nx, ny)

        if Fourvar:
            bcast_EMData_to_all(tavg, myid, main_node)
            vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

        if myid == main_node:
            if Fourvar:
                tavg = fft(Util.divn_img(fft(tavg), vav))
                vav_r = Util.pack_complex_to_real(vav)
            # normalize and mask tavg in real space
            tavg = fft(tavg)
            stat = Util.infomask(tavg, mask, False)
            tavg -= stat[0]
            Util.mul_img(tavg, mask)
            tavg.write_image("tavg.hdf", Iter)
            # For testing purposes: shift tavg to some random place and see if the centering is still correct
            #tavg = rot_shift3D(tavg,sx=3,sy=-4)

        if Fourvar: del vav
        bcast_EMData_to_all(tavg, myid, main_node)
        tavg = fft(tavg)

        sx_sum = 0.0
        nxc = nx // 2

        for ifil in range(nfils):
            """
			# Calculate filament average
			avg = EMData(nx, ny, 1, False)
			filnima = 0
			for im in xrange(indcs[ifil][0], indcs[ifil][1]):
				Util.add_img(avg, data[im])
				filnima += 1
			tavg = Util.mult_scalar(avg, 1.0/float(filnima))
			"""
            # Calculate 1D ccf between each segment and filament average
            nsegms = indcs[ifil][1] - indcs[ifil][0]
            ctx = [None] * nsegms
            pcoords = [None] * nsegms
            for im in range(indcs[ifil][0], indcs[ifil][1]):
                ctx[im - indcs[ifil][0]] = Util.window(ccf(tavg, data[im]), nx,
                                                       1)
                pcoords[im - indcs[ifil][0]] = data[im].get_attr(
                    'ptcl_source_coord')
                #ctx[im-indcs[ifil][0]].write_image("ctx.hdf",im-indcs[ifil][0])
                #print "  CTX  ",myid,im,Util.infomask(ctx[im-indcs[ifil][0]], None, True)
            # search for best x-shift
            cents = nsegms // 2

            dst = sqrt(
                max((pcoords[cents][0] - pcoords[0][0])**2 +
                    (pcoords[cents][1] - pcoords[0][1])**2,
                    (pcoords[cents][0] - pcoords[-1][0])**2 +
                    (pcoords[cents][1] - pcoords[-1][1])**2))
            maxincline = atan2(ny // 2 - 2 - float(search_rng), dst)
            kang = int(dst * tan(maxincline) + 0.5)
            #print  "  settings ",nsegms,cents,dst,search_rng,maxincline,kang

            # ## C code for alignment. @ming
            results = [0.0] * 3
            results = Util.helixshiftali(ctx, pcoords, nsegms, maxincline,
                                         kang, search_rng, nxc)
            sib = int(results[0])
            bang = results[1]
            qm = results[2]
            #print qm, sib, bang

            # qm = -1.e23
            #
            # 			for six in xrange(-search_rng, search_rng+1,1):
            # 				q0 = ctx[cents].get_value_at(six+nxc)
            # 				for incline in xrange(kang+1):
            # 					qt = q0
            # 					qu = q0
            # 					if(kang>0):  tang = tan(maxincline/kang*incline)
            # 					else:        tang = 0.0
            # 					for kim in xrange(cents+1,nsegms):
            # 						dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
            # 						xl = dst*tang+six+nxc
            # 						ixl = int(xl)
            # 						dxl = xl - ixl
            # 						#print "  A  ", ifil,six,incline,kim,xl,ixl,dxl
            # 						qt += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
            # 						xl = -dst*tang+six+nxc
            # 						ixl = int(xl)
            # 						dxl = xl - ixl
            # 						qu += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
            # 					for kim in xrange(cents):
            # 						dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 + (pcoords[cents][1] - pcoords[kim][1])**2)
            # 						xl = -dst*tang+six+nxc
            # 						ixl = int(xl)
            # 						dxl = xl - ixl
            # 						qt += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
            # 						xl =  dst*tang+six+nxc
            # 						ixl = int(xl)
            # 						dxl = xl - ixl
            # 						qu += (1.0-dxl)*ctx[kim].get_value_at(ixl) + dxl*ctx[kim].get_value_at(ixl+1)
            # 					if( qt > qm ):
            # 						qm = qt
            # 						sib = six
            # 						bang = tang
            # 					if( qu > qm ):
            # 						qm = qu
            # 						sib = six
            # 						bang = -tang
            #if incline == 0:  print  "incline = 0  ",six,tang,qt,qu
            #print qm,six,sib,bang
            #print " got results   ",indcs[ifil][0], indcs[ifil][1], ifil,myid,qm,sib,tang,bang,len(ctx),Util.infomask(ctx[0], None, True)
            for im in range(indcs[ifil][0], indcs[ifil][1]):
                kim = im - indcs[ifil][0]
                dst = sqrt((pcoords[cents][0] - pcoords[kim][0])**2 +
                           (pcoords[cents][1] - pcoords[kim][1])**2)
                if (kim < cents): xl = -dst * bang + sib
                else: xl = dst * bang + sib
                shift_x[im] = xl

            # Average shift
            sx_sum += shift_x[indcs[ifil][0] + cents]

        # #print myid,sx_sum,total_nfils
        sx_sum = mpi_reduce(sx_sum, 1, MPI_FLOAT, MPI_SUM, main_node,
                            MPI_COMM_WORLD)
        if myid == main_node:
            sx_sum = float(sx_sum[0]) / total_nfils
            print_msg("Average shift  %6.2f\n" % (sx_sum))
        else:
            sx_sum = 0.0
        sx_sum = 0.0
        sx_sum = bcast_number_to_all(sx_sum, source_node=main_node)
        for im in range(ldata):
            shift_x[im] -= sx_sum
            #print  "   %3d  %6.3f"%(im,shift_x[im])
        #exit()

    # combine shifts found with the original parameters
    for im in range(ldata):
        t1 = Transform()
        ##import random
        ##shix=random.randint(-10, 10)
        ##t1.set_params({"type":"2D","tx":shix})
        t1.set_params({"type": "2D", "tx": shift_x[im]})
        # combine t0 and t1
        tt = t1 * init_params[im]
        data[im].set_attr("xform.align2d", tt)
    # write out headers and STOP, under MPI writing has to be done sequentially
    mpi_barrier(MPI_COMM_WORLD)
    par_str = ["xform.align2d", "ID"]
    if myid == main_node:
        from utilities import file_type
        if (file_type(stack) == "bdb"):
            from utilities import recv_attr_dict_bdb
            recv_attr_dict_bdb(main_node, stack, data, par_str, 0, ldata,
                               nproc)
        else:
            from utilities import recv_attr_dict
            recv_attr_dict(main_node, stack, data, par_str, 0, ldata, nproc)
    else:
        send_attr_dict(main_node, data, par_str, 0, ldata)
    if myid == main_node: print_end_msg("helical-shiftali_MPI")
Example #17
0
def ali3d_MPI(stack, ref_vol, outdir, maskfile = None, ir = 1, ou = -1, rs = 1, 
	    xr = "4 2 2 1", yr = "-1", ts = "1 1 0.5 0.25", delta = "10 6 4 4", an = "-1",
	    center = 0, maxit = 5, term = 95, CTF = False, fourvar = False, snr = 1.0,  ref_a = "S", sym = "c1", 
	    sort=True, cutoff=999.99, pix_cutoff="0", two_tail=False, model_jump="1 1 1 1 1", restart=False, save_half=False,
	    protos=None, oplane=None, lmask=-1, ilmask=-1, findseam=False, vertstep=None, hpars="-1", hsearch="73.0 170.0",
	    full_output = False, compare_repro = False, compare_ref_free = "-1", ref_free_cutoff= "-1 -1 -1 -1",
	    wcmask = None, debug = False, recon_pad = 4):

	from alignment      import Numrinit, prepare_refrings
	from utilities      import model_circle, get_image, drop_image, get_input_from_string
	from utilities      import bcast_list_to_all, bcast_number_to_all, reduce_EMData_to_root, bcast_EMData_to_all 
	from utilities      import send_attr_dict
	from utilities      import get_params_proj, file_type
	from fundamentals   import rot_avg_image
	import os
	import types
	from utilities      import print_begin_msg, print_end_msg, print_msg
	from mpi	    import mpi_bcast, mpi_comm_size, mpi_comm_rank, MPI_FLOAT, MPI_COMM_WORLD, mpi_barrier, mpi_reduce
	from mpi	    import mpi_reduce, MPI_INT, MPI_SUM, mpi_finalize
	from filter	 import filt_ctf
	from projection     import prep_vol, prgs
	from statistics     import hist_list, varf3d_MPI, fsc_mask
	from numpy	  import array, bincount, array2string, ones

	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	myid	   = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0
	if myid == main_node:
		if os.path.exists(outdir):  ERROR('Output directory exists, please change the name and restart the program', "ali3d_MPI", 1)
		os.mkdir(outdir)
	mpi_barrier(MPI_COMM_WORLD)

	if debug:
		from time import sleep
		while not os.path.exists(outdir):
			print  "Node ",myid,"  waiting..."
			sleep(5)

		info_file = os.path.join(outdir, "progress%04d"%myid)
		finfo = open(info_file, 'w')
	else:
		finfo = None
	mjump = get_input_from_string(model_jump)
	xrng	= get_input_from_string(xr)
	if  yr == "-1":  yrng = xrng
	else	  :  yrng = get_input_from_string(yr)
	step	= get_input_from_string(ts)
	delta       = get_input_from_string(delta)
	ref_free_cutoff = get_input_from_string(ref_free_cutoff)	
	pix_cutoff = get_input_from_string(pix_cutoff)
	
	lstp = min(len(xrng), len(yrng), len(step), len(delta))
	if an == "-1":
		an = [-1] * lstp
	else:
		an = get_input_from_string(an)
	# make sure pix_cutoff is set for all iterations
	if len(pix_cutoff)<lstp:
		for i in xrange(len(pix_cutoff),lstp):
			pix_cutoff.append(pix_cutoff[-1])
	# don't waste time on sub-pixel alignment for low-resolution ang incr
	for i in range(len(step)):
		if (delta[i] > 4 or delta[i] == -1) and step[i] < 1:
			step[i] = 1

	first_ring  = int(ir)
	rstep       = int(rs)
	last_ring   = int(ou)
	max_iter    = int(maxit)
	center      = int(center)

	nrefs   = EMUtil.get_image_count( ref_vol )
	nmasks = 0
	if maskfile:
		# read number of masks within each maskfile (mc)
		nmasks   = EMUtil.get_image_count( maskfile )
		# open masks within maskfile (mc)
		maskF   = EMData.read_images(maskfile, xrange(nmasks))
	vol     = EMData.read_images(ref_vol, xrange(nrefs))
	nx      = vol[0].get_xsize()

	## make sure box sizes are the same
	if myid == main_node:
		im=EMData.read_images(stack,[0])
		bx = im[0].get_xsize()
		if bx!=nx:
			print_msg("Error: Stack box size (%i) differs from initial model (%i)\n"%(bx,nx))
			sys.exit()
		del im,bx
	
	# for helical processing:
	helicalrecon = False
	if protos is not None or hpars != "-1" or findseam is True:
		helicalrecon = True
		# if no out-of-plane param set, use 5 degrees
		if oplane is None:
			oplane=5.0
	if protos is not None:
		proto = get_input_from_string(protos)
		if len(proto) != nrefs:
			print_msg("Error: insufficient protofilament numbers supplied")
			sys.exit()
	if hpars != "-1":
		hpars = get_input_from_string(hpars)
		if len(hpars) != 2*nrefs:
			print_msg("Error: insufficient helical parameters supplied")
			sys.exit()
	## create helical parameter file for helical reconstruction
	if helicalrecon is True and myid == main_node:
		from hfunctions import createHpar
		# create initial helical parameter files
		dp=[0]*nrefs
		dphi=[0]*nrefs
		vdp=[0]*nrefs
		vdphi=[0]*nrefs
		for iref in xrange(nrefs):
			hpar = os.path.join(outdir,"hpar%02d.spi"%(iref))
			params = False
			if hpars != "-1":
				# if helical parameters explicitly given, set twist & rise
				params = [float(hpars[iref*2]),float(hpars[(iref*2)+1])]
			dp[iref],dphi[iref],vdp[iref],vdphi[iref] = createHpar(hpar,proto[iref],params,vertstep)

	# get values for helical search parameters
	hsearch = get_input_from_string(hsearch)
	if len(hsearch) != 2:
		print_msg("Error: specify outer and inner radii for helical search")
		sys.exit()

	if last_ring < 0 or last_ring > int(nx/2)-2 :	last_ring = int(nx/2) - 2

	if myid == main_node:
	#	import user_functions
	#	user_func = user_functions.factory[user_func_name]

		print_begin_msg("ali3d_MPI")
		print_msg("Input stack		 : %s\n"%(stack))
		print_msg("Reference volume	    : %s\n"%(ref_vol))	
		print_msg("Output directory	    : %s\n"%(outdir))
		if nmasks > 0:
			print_msg("Maskfile (number of masks)  : %s (%i)\n"%(maskfile,nmasks))
		print_msg("Inner radius		: %i\n"%(first_ring))
		print_msg("Outer radius		: %i\n"%(last_ring))
		print_msg("Ring step		   : %i\n"%(rstep))
		print_msg("X search range	      : %s\n"%(xrng))
		print_msg("Y search range	      : %s\n"%(yrng))
		print_msg("Translational step	  : %s\n"%(step))
		print_msg("Angular step		: %s\n"%(delta))
		print_msg("Angular search range	: %s\n"%(an))
		print_msg("Maximum iteration	   : %i\n"%(max_iter))
		print_msg("Center type		 : %i\n"%(center))
		print_msg("CTF correction	      : %s\n"%(CTF))
		print_msg("Signal-to-Noise Ratio       : %f\n"%(snr))
		print_msg("Reference projection method : %s\n"%(ref_a))
		print_msg("Symmetry group	      : %s\n"%(sym))
		print_msg("Fourier padding for 3D      : %i\n"%(recon_pad))
		print_msg("Number of reference models  : %i\n"%(nrefs))
		print_msg("Sort images between models  : %s\n"%(sort))
		print_msg("Allow images to jump	: %s\n"%(mjump))
		print_msg("CC cutoff standard dev      : %f\n"%(cutoff))
		print_msg("Two tail cutoff	     : %s\n"%(two_tail))
		print_msg("Termination pix error       : %f\n"%(term))
		print_msg("Pixel error cutoff	  : %s\n"%(pix_cutoff))
		print_msg("Restart		     : %s\n"%(restart))
		print_msg("Full output		 : %s\n"%(full_output))
		print_msg("Compare reprojections       : %s\n"%(compare_repro))
		print_msg("Compare ref free class avgs : %s\n"%(compare_ref_free))
		print_msg("Use cutoff from ref free    : %s\n"%(ref_free_cutoff))
		if protos:
			print_msg("Protofilament numbers	: %s\n"%(proto))
			print_msg("Using helical search range   : %s\n"%hsearch) 
		if findseam is True:
			print_msg("Using seam-based reconstruction\n")
		if hpars != "-1":
			print_msg("Using hpars		  : %s\n"%hpars)
		if vertstep != None:
			print_msg("Using vertical step    : %.2f\n"%vertstep)
		if save_half is True:
			print_msg("Saving even/odd halves\n")
		for i in xrange(100) : print_msg("*")
		print_msg("\n\n")
	if maskfile:
		if type(maskfile) is types.StringType: mask3D = get_image(maskfile)
		else:				  mask3D = maskfile
	else: mask3D = model_circle(last_ring, nx, nx, nx)

	numr	= Numrinit(first_ring, last_ring, rstep, "F")
	mask2D  = model_circle(last_ring,nx,nx) - model_circle(first_ring,nx,nx)

	fscmask = model_circle(last_ring,nx,nx,nx)
	if CTF:
		from filter	 import filt_ctf
	from reconstruction_rjh import rec3D_MPI_noCTF

	if myid == main_node:
		active = EMUtil.get_all_attributes(stack, 'active')
		list_of_particles = []
		for im in xrange(len(active)):
			if active[im]:  list_of_particles.append(im)
		del active
		nima = len(list_of_particles)
	else:
		nima = 0
	total_nima = bcast_number_to_all(nima, source_node = main_node)

	if myid != main_node:
		list_of_particles = [-1]*total_nima
	list_of_particles = bcast_list_to_all(list_of_particles, source_node = main_node)

	image_start, image_end = MPI_start_end(total_nima, number_of_proc, myid)

	# create a list of images for each node
	list_of_particles = list_of_particles[image_start: image_end]
	nima = len(list_of_particles)
	if debug:
		finfo.write("image_start, image_end: %d %d\n" %(image_start, image_end))
		finfo.flush()

	data = EMData.read_images(stack, list_of_particles)

	t_zero = Transform({"type":"spider","phi":0,"theta":0,"psi":0,"tx":0,"ty":0})
	transmulti = [[t_zero for i in xrange(nrefs)] for j in xrange(nima)]

	for iref,im in ((iref,im) for iref in xrange(nrefs) for im in xrange(nima)):
		if nrefs == 1:
			transmulti[im][iref] = data[im].get_attr("xform.projection")
		else:
			# if multi models, keep track of eulers for all models
			try:
				transmulti[im][iref] = data[im].get_attr("eulers_txty.%i"%iref)
			except:
				data[im].set_attr("eulers_txty.%i"%iref,t_zero)

	scoremulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)] 
	pixelmulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)] 
	ref_res = [0.0 for x in xrange(nrefs)] 
	apix = data[0].get_attr('apix_x')

	# for oplane parameter, create cylindrical mask
	if oplane is not None and myid == main_node:
		from hfunctions import createCylMask
		cmaskf=os.path.join(outdir, "mask3D_cyl.mrc")
		mask3D = createCylMask(data,ou,lmask,ilmask,cmaskf)
		# if finding seam of helix, create wedge masks
		if findseam is True:
			wedgemask=[]
			for pf in xrange(nrefs):
				wedgemask.append(EMData())
			# wedgemask option
			if wcmask is not None:
				wcmask = get_input_from_string(wcmask)
				if len(wcmask) != 3:
					print_msg("Error: wcmask option requires 3 values: x y radius")
					sys.exit()

	# determine if particles have helix info:
	try:
		data[0].get_attr('h_angle')
		original_data = []
		boxmask = True
		from hfunctions import createBoxMask
	except:
		boxmask = False

	# prepare particles
	for im in xrange(nima):
		data[im].set_attr('ID', list_of_particles[im])
		data[im].set_attr('pix_score', int(0))
		if CTF:
			# only phaseflip particles, not full CTF correction
			ctf_params = data[im].get_attr("ctf")
			st = Util.infomask(data[im], mask2D, False)
			data[im] -= st[0]
			data[im] = filt_ctf(data[im], ctf_params, sign = -1, binary=1)
			data[im].set_attr('ctf_applied', 1)
		# for window mask:
		if boxmask is True:
			h_angle = data[im].get_attr("h_angle")
			original_data.append(data[im].copy())
			bmask = createBoxMask(nx,apix,ou,lmask,h_angle)
			data[im]*=bmask
			del bmask
	if debug:
		finfo.write( '%d loaded  \n' % nima )
		finfo.flush()
	if myid == main_node:
		# initialize data for the reference preparation function
		ref_data = [ mask3D, max(center,0), None, None, None, None ]
		# for method -1, switch off centering in user function

	from time import time	

	#  this is needed for gathering of pixel errors
	disps = []
	recvcount = []
	disps_score = []
	recvcount_score = []
	for im in xrange(number_of_proc):
		if( im == main_node ):  
			disps.append(0)
			disps_score.append(0)
		else:		  
			disps.append(disps[im-1] + recvcount[im-1])
			disps_score.append(disps_score[im-1] + recvcount_score[im-1])
		ib, ie = MPI_start_end(total_nima, number_of_proc, im)
		recvcount.append( ie - ib )
		recvcount_score.append((ie-ib)*nrefs)

	pixer = [0.0]*nima
	cs = [0.0]*3
	total_iter = 0
	volodd = EMData.read_images(ref_vol, xrange(nrefs))
	voleve = EMData.read_images(ref_vol, xrange(nrefs))

	if restart:
		# recreate initial volumes from alignments stored in header
		itout = "000_00"
		for iref in xrange(nrefs):
			if(nrefs == 1):
				modout = ""
			else:
				modout = "_model_%02d"%(iref)	
	
			if(sort): 
				group = iref
				for im in xrange(nima):
					imgroup = data[im].get_attr('group')
					if imgroup == iref:
						data[im].set_attr('xform.projection',transmulti[im][iref])
			else: 
				group = int(999) 
				for im in xrange(nima):
					data[im].set_attr('xform.projection',transmulti[im][iref])
			
			fscfile = os.path.join(outdir, "fsc_%s%s"%(itout,modout))

			vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(data, sym, fscmask, fscfile, myid, main_node, index = group, npad = recon_pad)

			if myid == main_node:
				if helicalrecon:
					from hfunctions import processHelicalVol

					vstep=None
					if vertstep is not None:
						vstep=(vdp[iref],vdphi[iref])
					print_msg("Old rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
					hvals=processHelicalVol(vol[iref],voleve[iref],volodd[iref],iref,outdir,itout,
								dp[iref],dphi[iref],apix,hsearch,findseam,vstep,wcmask)
					(vol[iref],voleve[iref],volodd[iref],dp[iref],dphi[iref],vdp[iref],vdphi[iref])=hvals
					print_msg("New rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
					# get new FSC from symmetrized half volumes
					fscc = fsc_mask( volodd[iref], voleve[iref], mask3D, rstep, fscfile)
				else:
					vol[iref].write_image(os.path.join(outdir, "vol_%s.hdf"%itout),-1)

				if save_half is True:
					volodd[iref].write_image(os.path.join(outdir, "volodd_%s.hdf"%itout),-1)
					voleve[iref].write_image(os.path.join(outdir, "voleve_%s.hdf"%itout),-1)

				if nmasks > 1:
					# Read mask for multiplying
					ref_data[0] = maskF[iref]
				ref_data[2] = vol[iref]
				ref_data[3] = fscc
				#  call user-supplied function to prepare reference image, i.e., center and filter it
				vol[iref], cs,fl = ref_ali3d(ref_data)
				vol[iref].write_image(os.path.join(outdir, "volf_%s.hdf"%(itout)),-1)
				if (apix == 1):
					res_msg = "Models filtered at spatial frequency of:\t"
					res = fl
				else:
					res_msg = "Models filtered at resolution of:       \t"
					res = apix / fl	
				ares = array2string(array(res), precision = 2)
				print_msg("%s%s\n\n"%(res_msg,ares))	
			
			bcast_EMData_to_all(vol[iref], myid, main_node)
			# write out headers, under MPI writing has to be done sequentially
			mpi_barrier(MPI_COMM_WORLD)

	# projection matching	
	for N_step in xrange(lstp):
		terminate = 0
		Iter = -1
 		while(Iter < max_iter-1 and terminate == 0):
			Iter += 1
			total_iter += 1
			itout = "%03g_%02d" %(delta[N_step], Iter)
			if myid == main_node:
				print_msg("ITERATION #%3d, inner iteration #%3d\nDelta = %4.1f, an = %5.2f, xrange = %5.2f, yrange = %5.2f, step = %5.2f\n\n"%(N_step, Iter, delta[N_step], an[N_step], xrng[N_step],yrng[N_step],step[N_step]))
	
			for iref in xrange(nrefs):
				if myid == main_node: start_time = time()
				volft,kb = prep_vol( vol[iref] )

				## constrain projections to out of plane parameter
				theta1 = None
				theta2 = None
				if oplane is not None:
					theta1 = 90-oplane
					theta2 = 90+oplane
				refrings = prepare_refrings( volft, kb, nx, delta[N_step], ref_a, sym, numr, MPI=True, phiEqpsi = "Minus", initial_theta=theta1, delta_theta=theta2)
				
				del volft,kb

				if myid== main_node:
					print_msg( "Time to prepare projections for model %i: %s\n" % (iref, legibleTime(time()-start_time)) )
					start_time = time()
	
				for im in xrange( nima ):
					data[im].set_attr("xform.projection", transmulti[im][iref])
					if an[N_step] == -1:
						t1, peak, pixer[im] = proj_ali_incore(data[im],refrings,numr,xrng[N_step],yrng[N_step],step[N_step],finfo)
					else:
						t1, peak, pixer[im] = proj_ali_incore_local(data[im],refrings,numr,xrng[N_step],yrng[N_step],step[N_step],an[N_step],finfo)
					#data[im].set_attr("xform.projection"%iref, t1)
					if nrefs > 1: data[im].set_attr("eulers_txty.%i"%iref,t1)
					scoremulti[im][iref] = peak
					from pixel_error import max_3D_pixel_error
					# t1 is the current param, t2 is old
					t2 = transmulti[im][iref]
					pixelmulti[im][iref] = max_3D_pixel_error(t1,t2,numr[-3])
					transmulti[im][iref] = t1

				if myid == main_node:
					print_msg("Time of alignment for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
					start_time = time()


			# gather scoring data from all processors
			from mpi import mpi_gatherv
			scoremultisend = sum(scoremulti,[])
			pixelmultisend = sum(pixelmulti,[])
			tmp = mpi_gatherv(scoremultisend,len(scoremultisend),MPI_FLOAT, recvcount_score, disps_score, MPI_FLOAT, main_node,MPI_COMM_WORLD)
			tmp1 = mpi_gatherv(pixelmultisend,len(pixelmultisend),MPI_FLOAT, recvcount_score, disps_score, MPI_FLOAT, main_node,MPI_COMM_WORLD)
			tmp = mpi_bcast(tmp,(total_nima * nrefs), MPI_FLOAT,0, MPI_COMM_WORLD)
			tmp1 = mpi_bcast(tmp1,(total_nima * nrefs), MPI_FLOAT,0, MPI_COMM_WORLD)
			tmp = map(float,tmp)
			tmp1 = map(float,tmp1)
			score = array(tmp).reshape(-1,nrefs)
			pixelerror = array(tmp1).reshape(-1,nrefs) 
			score_local = array(scoremulti)
			mean_score = score.mean(axis=0)
			std_score = score.std(axis=0)
			cut = mean_score - (cutoff * std_score)
			cut2 = mean_score + (cutoff * std_score)
			res_max = score_local.argmax(axis=1)
			minus_cc = [0.0 for x in xrange(nrefs)]
			minus_pix = [0.0 for x in xrange(nrefs)]
			minus_ref = [0.0 for x in xrange(nrefs)]
			
			#output pixel errors
			if(myid == main_node):
				from statistics import hist_list
				lhist = 20
				pixmin = pixelerror.min(axis=1)
				region, histo = hist_list(pixmin, lhist)
				if(region[0] < 0.0):  region[0] = 0.0
				print_msg("Histogram of pixel errors\n      ERROR       number of particles\n")
				for lhx in xrange(lhist):
					print_msg(" %10.3f     %7d\n"%(region[lhx], histo[lhx]))
				# Terminate if 95% within 1 pixel error
				im = 0
				for lhx in xrange(lhist):
					if(region[lhx] > 1.0): break
					im += histo[lhx]
				print_msg( "Percent of particles with pixel error < 1: %f\n\n"% (im/float(total_nima)*100))
				term_cond = float(term)/100
				if(im/float(total_nima) > term_cond): 
					terminate = 1
					print_msg("Terminating internal loop\n")
				del region, histo
			terminate = mpi_bcast(terminate, 1, MPI_INT, 0, MPI_COMM_WORLD)
			terminate = int(terminate[0])	
			
			for im in xrange(nima):
				if(sort==False):
					data[im].set_attr('group',999)
				elif (mjump[N_step]==1):
					data[im].set_attr('group',int(res_max[im]))
				
				pix_run = data[im].get_attr('pix_score')			
				if (pix_cutoff[N_step]==1 and (terminate==1 or Iter == max_iter-1)):
					if (pixelmulti[im][int(res_max[im])] > 1):
						data[im].set_attr('pix_score',int(777))

				if (score_local[im][int(res_max[im])]<cut[int(res_max[im])]) or (two_tail and score_local[im][int(res_max[im])]>cut2[int(res_max[im])]):
					data[im].set_attr('group',int(888))
					minus_cc[int(res_max[im])] = minus_cc[int(res_max[im])] + 1

				if(pix_run == 777):
					data[im].set_attr('group',int(777))
					minus_pix[int(res_max[im])] = minus_pix[int(res_max[im])] + 1

				if (compare_ref_free != "-1") and (ref_free_cutoff[N_step] != -1) and (total_iter > 1):
					id = data[im].get_attr('ID')
					if id in rejects:
						data[im].set_attr('group',int(666))
						minus_ref[int(res_max[im])] = minus_ref[int(res_max[im])] + 1	
						
				
			minus_cc_tot = mpi_reduce(minus_cc,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD)	
			minus_pix_tot = mpi_reduce(minus_pix,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD) 	
			minus_ref_tot = mpi_reduce(minus_ref,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD)
			if (myid == main_node):
				if(sort):
					tot_max = score.argmax(axis=1)
					res = bincount(tot_max)
				else:
					res = ones(nrefs) * total_nima
				print_msg("Particle distribution:	     \t\t%s\n"%(res*1.0))
				afcut1 = res - minus_cc_tot
				afcut2 = afcut1 - minus_pix_tot
				afcut3 = afcut2 - minus_ref_tot
				print_msg("Particle distribution after cc cutoff:\t\t%s\n"%(afcut1))
				print_msg("Particle distribution after pix cutoff:\t\t%s\n"%(afcut2)) 
				print_msg("Particle distribution after ref cutoff:\t\t%s\n\n"%(afcut3)) 
					
						
			res = [0.0 for i in xrange(nrefs)]
			for iref in xrange(nrefs):
				if(center == -1):
					from utilities      import estimate_3D_center_MPI, rotate_3D_shift
					dummy=EMData()
					cs[0], cs[1], cs[2], dummy, dummy = estimate_3D_center_MPI(data, total_nima, myid, number_of_proc, main_node)				
					cs = mpi_bcast(cs, 3, MPI_FLOAT, main_node, MPI_COMM_WORLD)
					cs = [-float(cs[0]), -float(cs[1]), -float(cs[2])]
					rotate_3D_shift(data, cs)


				if(sort): 
					group = iref
					for im in xrange(nima):
						imgroup = data[im].get_attr('group')
						if imgroup == iref:
							data[im].set_attr('xform.projection',transmulti[im][iref])
				else: 
					group = int(999) 
					for im in xrange(nima):
						data[im].set_attr('xform.projection',transmulti[im][iref])
				if(nrefs == 1):
					modout = ""
				else:
					modout = "_model_%02d"%(iref)	
				
				fscfile = os.path.join(outdir, "fsc_%s%s"%(itout,modout))
				vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(data, sym, fscmask, fscfile, myid, main_node, index=group, npad=recon_pad)
	
				if myid == main_node:
					print_msg("3D reconstruction time for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
					start_time = time()
	
				# Compute Fourier variance
				if fourvar:
					outvar = os.path.join(outdir, "volVar_%s.hdf"%(itout))
					ssnr_file = os.path.join(outdir, "ssnr_%s"%(itout))
					varf = varf3d_MPI(data, ssnr_text_file=ssnr_file, mask2D=None, reference_structure=vol[iref], ou=last_ring, rw=1.0, npad=1, CTF=None, sign=1, sym=sym, myid=myid)
					if myid == main_node:
						print_msg("Time to calculate 3D Fourier variance for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
						start_time = time()
						varf = 1.0/varf
						varf.write_image(outvar,-1)
				else:  varf = None

				if myid == main_node:
					if helicalrecon:
						from hfunctions import processHelicalVol

						vstep=None
						if vertstep is not None:
							vstep=(vdp[iref],vdphi[iref])
						print_msg("Old rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
						hvals=processHelicalVol(vol[iref],voleve[iref],volodd[iref],iref,outdir,itout,
									dp[iref],dphi[iref],apix,hsearch,findseam,vstep,wcmask)
						(vol[iref],voleve[iref],volodd[iref],dp[iref],dphi[iref],vdp[iref],vdphi[iref])=hvals
						print_msg("New rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
						# get new FSC from symmetrized half volumes
						fscc = fsc_mask( volodd[iref], voleve[iref], mask3D, rstep, fscfile)

						print_msg("Time to search and apply helical symmetry for model %i: %s\n\n"%(iref, legibleTime(time()-start_time)))
						start_time = time()
					else:
						vol[iref].write_image(os.path.join(outdir, "vol_%s.hdf"%(itout)),-1)

					if save_half is True:
						volodd[iref].write_image(os.path.join(outdir, "volodd_%s.hdf"%(itout)),-1)
						voleve[iref].write_image(os.path.join(outdir, "voleve_%s.hdf"%(itout)),-1)

					if nmasks > 1:
						# Read mask for multiplying
						ref_data[0] = maskF[iref]
					ref_data[2] = vol[iref]
					ref_data[3] = fscc
					ref_data[4] = varf
					#  call user-supplied function to prepare reference image, i.e., center and filter it
					vol[iref], cs,fl = ref_ali3d(ref_data)
					vol[iref].write_image(os.path.join(outdir, "volf_%s.hdf"%(itout)),-1)
					if (apix == 1):
						res_msg = "Models filtered at spatial frequency of:\t"
						res[iref] = fl
					else:
						res_msg = "Models filtered at resolution of:       \t"
						res[iref] = apix / fl	
	
				del varf
				bcast_EMData_to_all(vol[iref], myid, main_node)
				
				if compare_ref_free != "-1": compare_repro = True
				if compare_repro:
					outfile_repro = comp_rep(refrings, data, itout, modout, vol[iref], group, nima, nx, myid, main_node, outdir)
					mpi_barrier(MPI_COMM_WORLD)
					if compare_ref_free != "-1":
						ref_free_output = os.path.join(outdir,"ref_free_%s%s"%(itout,modout))
						rejects = compare(compare_ref_free, outfile_repro,ref_free_output,yrng[N_step], xrng[N_step], rstep,nx,apix,ref_free_cutoff[N_step], number_of_proc, myid, main_node)

			# retrieve alignment params from all processors
			par_str = ['xform.projection','ID','group']
			if nrefs > 1:
				for iref in xrange(nrefs):
					par_str.append('eulers_txty.%i'%iref)

			if myid == main_node:
				from utilities import recv_attr_dict
				recv_attr_dict(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
				
			else:	send_attr_dict(main_node, data, par_str, image_start, image_end)

			if myid == main_node:
				ares = array2string(array(res), precision = 2)
				print_msg("%s%s\n\n"%(res_msg,ares))
				dummy = EMData()
				if full_output:
					nimat = EMUtil.get_image_count(stack)
					output_file = os.path.join(outdir, "paramout_%s"%itout)
					foutput = open(output_file, 'w')
					for im in xrange(nimat):
						# save the parameters for each of the models
						outstring = ""
						dummy.read_image(stack,im,True)
						param3d = dummy.get_attr('xform.projection')
						g = dummy.get_attr("group")
						# retrieve alignments in EMAN-format
						pE = param3d.get_params('eman')
						outstring += "%f\t%f\t%f\t%f\t%f\t%i\n" %(pE["az"], pE["alt"], pE["phi"], pE["tx"], pE["ty"],g)
						foutput.write(outstring)
					foutput.close()
				del dummy
			mpi_barrier(MPI_COMM_WORLD)


#	mpi_finalize()	

	if myid == main_node: print_end_msg("ali3d_MPI")
Example #18
0
def generate_helimic(refvol,
                     outdir,
                     pixel,
                     CTF=False,
                     Cs=2.0,
                     voltage=200.0,
                     ampcont=10.0,
                     nonoise=False,
                     rand_seed=14567):

    from utilities import model_blank, model_gauss, model_gauss_noise, pad, get_im
    from random import random
    from projection import prgs, prep_vol
    from filter import filt_gaussl, filt_ctf
    from EMAN2 import EMAN2Ctf

    if os.path.exists(outdir):
        ERROR(
            'Output directory exists, please change the name and restart the program',
            "sxhelical_demo", 1)
    os.mkdir(outdir)
    seed(rand_seed)
    Util.set_randnum_seed(rand_seed)
    angles = []
    for i in range(3):
        angles.append([0.0 + 60.0 * i, 90.0 - i * 5, 0.0, 0.0, 0.0])

    nangle = len(angles)

    volfts = get_im(refvol)
    nx = volfts.get_xsize()
    ny = volfts.get_ysize()
    nz = volfts.get_zsize()
    volfts, kbx, kby, kbz = prep_vol(volfts)
    iprj = 0
    width = 500
    xstart = 0
    ystart = 0

    for idef in range(3, 6):
        mic = model_blank(2048, 2048)
        #defocus = idef*0.2
        defocus = idef * 0.6  ##@ming
        if CTF:
            #ctf = EMAN2Ctf()
            #ctf.from_dict( {"defocus":defocus, "cs":Cs, "voltage":voltage, "apix":pixel, "ampcont":ampcont, "bfactor":0.0} )
            from utilities import generate_ctf
            ctf = generate_ctf(
                [defocus, 2, 200, 1.84, 0.0, ampcont, defocus * 0.2, 80]
            )  ##@ming   the range of astigmatism amplitude is between 10 percent and 22 percent. 20 percent is a good choice.
        i = idef - 4
        for k in range(1):
            psi = 90 + 10 * i
            proj = prgs(
                volfts, kbz,
                [angles[idef - 3][0], angles[idef - 3][1], psi, 0.0, 0.0], kbx,
                kby)
            proj = Util.window(proj, 320, nz)
            mic += pad(proj, 2048, 2048, 1, 0.0, 750 * i, 20 * i, 0)

        if not nonoise: mic += model_gauss_noise(30.0, 2048, 2048)
        if CTF:
            #apply CTF
            mic = filt_ctf(mic, ctf)

        if not nonoise:
            mic += filt_gaussl(model_gauss_noise(17.5, 2048, 2048), 0.3)

        mic.write_image("%s/mic%1d.hdf" % (outdir, idef - 3), 0)
Example #19
0
def main():
    from utilities import get_input_from_string

    progname = os.path.basename(sys.argv[0])
    usage = (
        progname
        + " stack output_average --radius=particle_radius --xr=xr --yr=yr --ts=ts --thld_err=thld_err --num_ali=num_ali --fl=fl --aa=aa --CTF --verbose --stables"
    )
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--radius", type="int", default=-1, help=" particle radius for alignment")
    parser.add_option(
        "--xr",
        type="string",
        default="2 1",
        help="range for translation search in x direction, search is +/xr (default 2,1)",
    )
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help="range for translation search in y direction, search is +/yr (default = same as xr)",
    )
    parser.add_option(
        "--ts",
        type="string",
        default="1 0.5",
        help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default: 1,0.5)",
    )
    parser.add_option("--thld_err", type="float", default=0.75, help="threshld of pixel error (default = 0.75)")
    parser.add_option(
        "--num_ali", type="int", default=5, help="number of alignments performed for stability (default = 5)"
    )
    parser.add_option("--maxit", type="int", default=30, help="number of iterations for each xr (default = 30)")
    parser.add_option(
        "--fl",
        type="float",
        default=0.3,
        help="cut-off frequency of hyperbolic tangent low-pass Fourier filter (default = 0.3)",
    )
    parser.add_option(
        "--aa", type="float", default=0.2, help="fall-off of hyperbolic tangent low-pass Fourier filter (default = 0.2)"
    )
    parser.add_option("--CTF", action="store_true", default=False, help="Use CTF correction during the alignment ")
    parser.add_option(
        "--verbose", action="store_true", default=False, help="print individual pixel error (default = False)"
    )
    parser.add_option(
        "--stables",
        action="store_true",
        default=False,
        help="output the stable particles number in file (default = False)",
    )
    parser.add_option(
        "--method", type="string", default=" ", help="SHC (standard method is default when flag is ommitted)"
    )
    (options, args) = parser.parse_args()
    if len(args) != 1 and len(args) != 2:
        print "usage: " + usage
        print "Please run '" + progname + " -h' for detailed options"
    else:
        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache

            disable_bdb_cache()

        from applications import within_group_refinement, ali2d_ras
        from pixel_error import multi_align_stability
        from utilities import write_text_file, write_text_row

        global_def.BATCH = True

        xrng = get_input_from_string(options.xr)
        if options.yr == "-1":
            yrng = xrng
        else:
            yrng = get_input_from_string(options.yr)
        step = get_input_from_string(options.ts)

        class_data = EMData.read_images(args[0])

        nx = class_data[0].get_xsize()
        ou = options.radius
        num_ali = options.num_ali
        if ou == -1:
            ou = nx / 2 - 2
        from utilities import model_circle, get_params2D, set_params2D

        mask = model_circle(ou, nx, nx)

        if options.CTF:
            from filter import filt_ctf

            for im in xrange(len(class_data)):
                #  Flip phases
                class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
        for im in class_data:
            im.set_attr("previousmax", -1.0e10)
            try:
                t = im.get_attr("xform.align2d")  # if they are there, no need to set them!
            except:
                try:
                    t = im.get_attr("xform.projection")
                    d = t.get_params("spider")
                    set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0])
                except:
                    set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
        all_ali_params = []

        for ii in xrange(num_ali):
            ali_params = []
            if options.verbose:
                ALPHA = []
                SX = []
                SY = []
                MIRROR = []
            if xrng[0] == 0.0 and yrng[0] == 0.0:
                avet = ali2d_ras(
                    class_data,
                    randomize=True,
                    ir=1,
                    ou=ou,
                    rs=1,
                    step=1.0,
                    dst=90.0,
                    maxit=options.maxit,
                    check_mirror=True,
                    FH=options.fl,
                    FF=options.aa,
                )
            else:
                avet = within_group_refinement(
                    class_data,
                    mask,
                    True,
                    1,
                    ou,
                    1,
                    xrng,
                    yrng,
                    step,
                    90.0,
                    maxit=options.maxit,
                    FH=options.fl,
                    FF=options.aa,
                    method=options.method,
                )
                from utilities import info

                # print "  avet  ",info(avet)
            for im in class_data:
                alpha, sx, sy, mirror, scale = get_params2D(im)
                ali_params.extend([alpha, sx, sy, mirror])
                if options.verbose:
                    ALPHA.append(alpha)
                    SX.append(sx)
                    SY.append(sy)
                    MIRROR.append(mirror)
            all_ali_params.append(ali_params)
            if options.verbose:
                write_text_file([ALPHA, SX, SY, MIRROR], "ali_params_run_%d" % ii)
        """
		avet = class_data[0]
		from utilities import read_text_file
		all_ali_params = []
		for ii in xrange(5):
			temp = read_text_file( "ali_params_run_%d"%ii,-1)
			uuu = []
			for k in xrange(len(temp[0])):
				uuu.extend([temp[0][k],temp[1][k],temp[2][k],temp[3][k]])
			all_ali_params.append(uuu)


		"""

        stable_set, mir_stab_rate, pix_err = multi_align_stability(
            all_ali_params, 0.0, 10000.0, options.thld_err, options.verbose, 2 * ou + 1
        )
        print "%4s %20s %20s %20s %30s %6.2f" % (
            "",
            "Size of set",
            "Size of stable set",
            "Mirror stab rate",
            "Pixel error prior to pruning the set above threshold of",
            options.thld_err,
        )
        print "Average stat: %10d %20d %20.2f   %15.2f" % (len(class_data), len(stable_set), mir_stab_rate, pix_err)
        if len(stable_set) > 0:
            if options.stables:
                stab_mem = [[0, 0.0, 0] for j in xrange(len(stable_set))]
                for j in xrange(len(stable_set)):
                    stab_mem[j] = [int(stable_set[j][1]), stable_set[j][0], j]
                write_text_row(stab_mem, "stable_particles.txt")

            stable_set_id = []
            particle_pixerr = []
            for s in stable_set:
                stable_set_id.append(s[1])
                particle_pixerr.append(s[0])
            from fundamentals import rot_shift2D

            avet.to_zero()
            l = -1
            print "average parameters:  angle, x-shift, y-shift, mirror"
            for j in stable_set_id:
                l += 1
                print " %4d  %4d  %12.2f %12.2f %12.2f        %1d" % (
                    l,
                    j,
                    stable_set[l][2][0],
                    stable_set[l][2][1],
                    stable_set[l][2][2],
                    int(stable_set[l][2][3]),
                )
                avet += rot_shift2D(
                    class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3]
                )
            avet /= l + 1
            avet.set_attr("members", stable_set_id)
            avet.set_attr("pix_err", pix_err)
            avet.set_attr("pixerr", particle_pixerr)
            avet.write_image(args[1])

        global_def.BATCH = False