예제 #1
0
def spruce_up_var_m( refdata ):
	from utilities  import print_msg
	from utilities  import model_circle, get_im
	from filter     import filt_tanl, filt_gaussl
	from morphology import threshold
	import os

	numref     = refdata[0]
	outdir     = refdata[1]
	fscc       = refdata[2]
	total_iter = refdata[3]
	varf       = refdata[4]
	mask       = refdata[5]
	ali50S     = refdata[6]

	if ali50S:
		mask_50S = get_im( "mask-50S.spi" )


	if fscc is None:
		flmin = 0.4
		aamin = 0.1
	else:
		flmin,aamin,idmin=minfilt( fscc )
		aamin = aamin

	msg = "Minimum tangent filter:  cut-off frequency = %10.3f     fall-off = %10.3f\n"%(fflmin, aamin)
	print_msg(msg)

	for i in xrange(numref):
		volf = get_im( os.path.join(outdir, "vol%04d.hdf"% total_iter) , i )
		if(not (varf is None) ):   volf = volf.filter_by_image( varf )
		volf = filt_tanl(volf, flmin, aamin)
		stat = Util.infomask(volf, mask, True)
		volf -= stat[0]
		Util.mul_scalar(volf, 1.0/stat[1])

		nx = volf.get_xsize()
		stat = Util.infomask(volf,model_circle(nx//2-2,nx,nx,nx)-model_circle(nx//2-6,nx,nx,nx), True)
		volf -= stat[0]
		Util.mul_img( volf, mask )

		volf = threshold(volf)
		volf = filt_gaussl( volf, 0.4)

		if ali50S:
			if i==0:
				v50S_0 = volf.copy()
				v50S_0 *= mask_50S
			else:
				from applications import ali_vol_3
				from fundamentals import rot_shift3D
				v50S_i = volf.copy()
				v50S_i *= mask_50S

				params = ali_vol_3(v50S_i, v50S_0, 10.0, 0.5, mask=mask_50S)
				volf = rot_shift3D( volf, params[0], params[1], params[2], params[3], params[4], params[5], 1.0)

		volf.write_image( os.path.join(outdir, "volf%04d.hdf"%total_iter), i )
예제 #2
0
	def test_ccf_circle_2D_31x20(self):
		"""test ccf*: circle 2D, 31x20.........................."""
		from utilities import model_circle
		from fundamentals import ccf, ccfn, ccfnp, ccfnpl, ccfp, ccfpl
		A = model_circle(7, 31, 20)
		B = model_circle(4, 31, 20)
		self.internal_check_ccf(A, B, ccf(A,B,False), ccfn(A,B,False), ccfp(A,B,False), ccfnp(A,B,False), ccfpl(A,B,False), ccfnpl(A,B,False)
									, ccf(A,B,True ), ccfn(A,B,True ), ccfp(A,B,True ), ccfnp(A,B,True ), ccfpl(A,B,True ), ccfnpl(A,B,True ) )
예제 #3
0
def spruce_up_variance(ref_data):
    from utilities import print_msg
    from filter import filt_tanl, fit_tanh, filt_gaussl
    from morphology import threshold
    #  Prepare the reference in 3D alignment, i.e., low-pass filter and center.
    #  Input: list ref_data
    #   0 - mask
    #   1 - center flag
    #   2 - raw average
    #   3 - fsc result
    #   4 1.0/variance
    #  Output: filtered, centered, and masked reference image
    #  apply filtration (FSC) to reference image:
    mask = ref_data[0]
    center = ref_data[1]
    vol = ref_data[2]
    fscc = ref_data[3]
    varf = ref_data[4]

    print_msg("spruce_up with variance\n")
    cs = [0.0] * 3

    if not (varf is None):
        volf = vol.filter_by_image(varf)

    #fl, aa = fit_tanh(ref_data[3])
    fl = 0.22
    aa = 0.15
    msg = "Tangent filter:  cut-off frequency = %10.3f        fall-off = %10.3f\n" % (
        fl, aa)
    print_msg(msg)
    volf = filt_tanl(volf, fl, aa)

    stat = Util.infomask(volf, None, True)
    volf = volf - stat[0]
    Util.mul_scalar(volf, 1.0 / stat[1])

    from utilities import model_circle
    nx = volf.get_xsize()
    stat = Util.infomask(
        volf,
        model_circle(nx // 2 - 2, nx, nx, nx) -
        model_circle(nx // 2 - 6, nx, nx, nx), True)

    volf -= stat[0]
    Util.mul_img(volf, mask)

    volf = threshold(volf)

    volf = filt_gaussl(volf, 0.4)
    return volf, cs
예제 #4
0
	def test_acf_circle_2D_31x20(self):
		"""test acf*: circle 2D, 31x20.........................."""
		from utilities import model_circle
		from fundamentals import acf, acfn, acfnp, acfnpl, acfp, acfpl
		A = model_circle(7, 31, 20)
		self.internal_check_ccf(A, A, acf(A,False), acfn(A,False), acfp(A,False), acfnp(A,False), acfpl(A, False), acfnpl(A, False)
									, acf(A,True ), acfn(A,True ), acfp(A,True ), acfnp(A,True ), acfpl(A, True ), acfnpl(A, True ) )
예제 #5
0
def constant(ref_data):
    from utilities import print_msg
    from filter import fit_tanh, filt_tanl
    from utilities import center_2D
    from morphology import threshold
    #  Prepare the reference in 2D alignment, i.e., low-pass filter and center.
    #  Input: list ref_data
    #   0 - mask
    #   1 - center flag
    #   2 - raw average
    #   3 - fsc result
    #  Output: filtered, centered, and masked reference image
    #  apply filtration (FRC) to reference image:
    global ref_ali2d_counter
    ref_ali2d_counter += 1
    #print_msg("steady   #%6d\n"%(ref_ali2d_counter))
    fl = 0.4
    aa = 0.1
    #msg = "Tangent filter:  cut-off frequency = %10.3f        fall-off = %10.3f\n"%(fl, aa)
    #print_msg(msg)
    from utilities import model_circle
    nx = ref_data[2].get_xsize()
    stat = Util.infomask(ref_data[2], model_circle(nx // 2 - 2, nx, nx), False)
    ref_data[2] -= stat[0]
    #tavg = filt_tanl(threshold(ref_data[2]), fl, aa)
    tavg = filt_tanl(ref_data[2], fl, aa)
    cs = [0.0] * 2
    return tavg, cs
예제 #6
0
파일: sxpetite.py 프로젝트: cpsemmens/eman2
def volshiftali(vv, mask3d=None):
	nv = len(vv)
	ni = vv[0].get_xsize()
	if(mask3d == None):
		mask3d = model_circle(ni//2-2, ni, ni, ni)

	for i in xrange(nv):
		Util.mul_img(vv[i], mask3d)
		fftip(vv[i])

	del mask3d
	ps = [[0.,0.,0.] for i in xrange(nv)]
	changed = True
	while(changed):
		a = EMData(ni, ni, ni, False)
		for i in vv:
			Util.add_img(a,i)

		changed = False
		for i in xrange(nv):
			pp = peak_search(ccf(vv[i],a), 1)
			for j in xrange(3):
				if(pp[0][5+j] != ps[i][j]):
					ps[i][j] = pp[0][5+j]
					changed = True

	return ps
예제 #7
0
def constant( ref_data ):
	from utilities    import print_msg
	from filter       import fit_tanh, filt_tanl
	from utilities    import center_2D
	from morphology   import threshold
	#  Prepare the reference in 2D alignment, i.e., low-pass filter and center.
	#  Input: list ref_data
	#   0 - mask
	#   1 - center flag
	#   2 - raw average
	#   3 - fsc result
	#  Output: filtered, centered, and masked reference image
	#  apply filtration (FRC) to reference image:
	global  ref_ali2d_counter
	ref_ali2d_counter += 1
	#print_msg("steady   #%6d\n"%(ref_ali2d_counter))
	fl = 0.4
	aa = 0.1
	#msg = "Tangent filter:  cut-off frequency = %10.3f        fall-off = %10.3f\n"%(fl, aa)
	#print_msg(msg)
	from utilities import model_circle
	nx = ref_data[2].get_xsize()
	stat = Util.infomask(ref_data[2], model_circle(nx//2-2,nx,nx), False)
	ref_data[2] -= stat[0]
	#tavg = filt_tanl(threshold(ref_data[2]), fl, aa)
	tavg = filt_tanl(ref_data[2], fl, aa)
	cs = [0.0]*2
	return  tavg, cs
예제 #8
0
def spruce_up_variance( ref_data ):
	from utilities      import print_msg
	from filter         import filt_tanl, fit_tanh, filt_gaussl
	from morphology     import threshold
	#  Prepare the reference in 3D alignment, i.e., low-pass filter and center.
	#  Input: list ref_data
	#   0 - mask
	#   1 - center flag
	#   2 - raw average
	#   3 - fsc result
	#   4 1.0/variance
	#  Output: filtered, centered, and masked reference image
	#  apply filtration (FSC) to reference image:
	mask   = ref_data[0]
	center = ref_data[1]
	vol    = ref_data[2]
	fscc   = ref_data[3]
	varf   = ref_data[4]

	print_msg("spruce_up with variance\n")
	cs = [0.0]*3

	if not(varf is None):
		volf = vol.filter_by_image(varf)

	#fl, aa = fit_tanh(ref_data[3])
	fl = 0.22
	aa = 0.15
	msg = "Tangent filter:  cut-off frequency = %10.3f        fall-off = %10.3f\n"%(fl, aa)
	print_msg(msg)
	volf = filt_tanl(volf, fl, aa)

	stat = Util.infomask(volf, None, True)
	volf = volf - stat[0]
	Util.mul_scalar(volf, 1.0/stat[1])

	from utilities import model_circle
	nx = volf.get_xsize()
	stat = Util.infomask(volf, model_circle(nx//2-2,nx,nx,nx)-model_circle(nx//2-6,nx,nx,nx), True)

	volf -= stat[0]
	Util.mul_img(volf, mask)

	volf = threshold(volf)
	
	volf = filt_gaussl(volf, 0.4)
	return  volf, cs
예제 #9
0
def spruce_up(ref_data):
    from utilities import print_msg
    from filter import filt_tanl, fit_tanh
    from morphology import threshold
    #  Prepare the reference in 3D alignment, i.e., low-pass filter and center.
    #  Input: list ref_data
    #   0 - mask
    #   1 - center flag
    #   2 - raw average
    #   3 - fsc result
    #  Output: filtered, centered, and masked reference image
    #  apply filtration (FSC) to reference image:

    print_msg("Changed4 spruce_up\n")
    cs = [0.0] * 3

    stat = Util.infomask(ref_data[2], None, True)
    volf = ref_data[2] - stat[0]
    Util.mul_scalar(volf, 1.0 / stat[1])
    volf = threshold(volf)
    # Apply B-factor
    from filter import filt_gaussinv
    from math import sqrt
    B = 1.0 / sqrt(2. * 14.0)
    volf = filt_gaussinv(volf, B, False)
    nx = volf.get_xsize()
    from utilities import model_circle
    stat = Util.infomask(
        volf,
        model_circle(nx // 2 - 2, nx, nx, nx) -
        model_circle(nx // 2 - 6, nx, nx, nx), True)

    volf -= stat[0]
    Util.mul_img(volf, ref_data[0])
    fl, aa = fit_tanh(ref_data[3])
    #fl = 0.35
    #aa = 0.1
    aa /= 2
    msg = "Tangent filter:  cut-off frequency = %10.3f        fall-off = %10.3f\n" % (
        fl, aa)
    print_msg(msg)
    volf = filt_tanl(volf, fl, aa)
    return volf, cs
예제 #10
0
def adaptive_mask2D(img, nsigma = 1.0, ndilation = 3, kernel_size = 11, gauss_standard_dev =9):
	"""
		Name
			adaptive_mask - create a mask from a given image.
		Input
			img: input image
			nsigma: value for initial thresholding of the image.
		Output
			mask: The mask will have values one, zero, with Gaussian smooth transition between two regions.
	"""
	from utilities  import gauss_edge, model_circle
	from morphology import binarize, dilation
	nx = img.get_xsize()
	ny = img.get_ysize()
	mc = model_circle(nx//2, nx, ny) - model_circle(nx//3, nx, ny)
	s1 = Util.infomask(img, mc, True)
	mask = Util.get_biggest_cluster(binarize(img, s1[0]+s1[1]*nsigma))
	for i in xrange(ndilation):   mask = dilation(mask)
	#mask = gauss_edge(mask, kernel_size, gauss_standard_dev)
	return mask
예제 #11
0
def spruce_up( ref_data ):
	from utilities      import print_msg
	from filter         import filt_tanl, fit_tanh
	from morphology     import threshold
	#  Prepare the reference in 3D alignment, i.e., low-pass filter and center.
	#  Input: list ref_data
	#   0 - mask
	#   1 - center flag
	#   2 - raw average
	#   3 - fsc result
	#  Output: filtered, centered, and masked reference image
	#  apply filtration (FSC) to reference image:

	print_msg("Changed4 spruce_up\n")
	cs = [0.0]*3

	stat = Util.infomask(ref_data[2], None, True)
	volf = ref_data[2] - stat[0]
	Util.mul_scalar(volf, 1.0/stat[1])
	volf = threshold(volf)
	# Apply B-factor
	from filter import filt_gaussinv
	from math import sqrt
	B = 1.0/sqrt(2.*14.0)
	volf = filt_gaussinv(volf, B, False)
	nx = volf.get_xsize()
	from utilities import model_circle
	stat = Util.infomask(volf, model_circle(nx//2-2,nx,nx,nx)-model_circle(nx//2-6,nx,nx,nx), True)

	volf -= stat[0]
	Util.mul_img(volf, ref_data[0])
	fl, aa = fit_tanh(ref_data[3])
	#fl = 0.35
	#aa = 0.1
	aa /= 2
	msg = "Tangent filter:  cut-off frequency = %10.3f        fall-off = %10.3f\n"%(fl, aa)
	print_msg(msg)
	volf = filt_tanl(volf, fl, aa)
	return  volf, cs
예제 #12
0
파일: sxpetite.py 프로젝트: cpsemmens/eman2
def get_resolution(vol, radi, nnxo, fscoutputdir):
	# this function is single processor
	#  Get updated FSC curves
	if(ali3d_options.mask3D is None):  mask = model_circle(radi,nnxo,nnxo,nnxo)
	else:                              mask = get_im(ali3d_options.mask3D)
	nfsc = fsc(vol[0]*mask,vol[1]*mask, 1.0,os.path.join(fscoutputdir,"fsc.txt") )
	currentres = 0.5
	ns = len(nfsc[1])
	for i in xrange(1,ns-1):
		if ( (2*nfsc[1][i]/(1.0+nfsc[1][i]) ) < 0.5):
			currentres = nfsc[0][i-1]
			break
	#print("  Current resolution ",i,currentres)
	if(currentres < 0.0):
		print("  Something wrong with the resolution, cannot continue")
		mpi_finalize()
		exit()

	return  currentres
예제 #13
0
def model_circle(rad, x, y):
    ''' Create a model circle
    
    * has alternative
    
    :Parameters:
    
    rad : int
          Radius
    x : int
        Width of the image
    y : int
        Height of the image
        
    :Returns:
    
    model : array
            Image with disk of radius `rad`
    '''
    
    if utilities is None: raise ImportError, "EMAN2/Sparx library not available, model_circle requires EMAN2/Sparx"
    emdata =  utilities.model_circle(rad, x, y)
    return em2numpy(emdata).copy()
예제 #14
0
def model_circle(rad, x, y):
    ''' Create a model circle
    
    * has alternative
    
    :Parameters:
    
    rad : int
          Radius
    x : int
        Width of the image
    y : int
        Height of the image
        
    :Returns:
    
    model : array
            Image with disk of radius `rad`
    '''

    if utilities is None:
        raise ImportError, "EMAN2/Sparx library not available, model_circle requires EMAN2/Sparx"
    emdata = utilities.model_circle(rad, x, y)
    return em2numpy(emdata).copy()
예제 #15
0
def shiftali_MPI(stack,
                 maskfile=None,
                 maxit=100,
                 CTF=False,
                 snr=1.0,
                 Fourvar=False,
                 search_rng=-1,
                 oneDx=False,
                 search_rng_y=-1):
    from applications import MPI_start_end
    from utilities import model_circle, model_blank, get_image, peak_search, get_im
    from utilities import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all
    from pap_statistics import varf2d_MPI
    from fundamentals import fft, ccf, rot_shift3D, rot_shift2D
    from utilities import get_params2D, set_params2D
    from utilities import print_msg, print_begin_msg, print_end_msg
    import os
    import sys
    from mpi import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
    from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv
    from mpi import MPI_SUM, MPI_FLOAT, MPI_INT
    from EMAN2 import Processor
    from time import time

    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    main_node = 0

    ftp = file_type(stack)

    if myid == main_node:
        print_begin_msg("shiftali_MPI")

    max_iter = int(maxit)

    if myid == main_node:
        if ftp == "bdb":
            from EMAN2db import db_open_dict
            dummy = db_open_dict(stack, True)
        nima = EMUtil.get_image_count(stack)
    else:
        nima = 0
    nima = bcast_number_to_all(nima, source_node=main_node)
    list_of_particles = list(range(nima))

    image_start, image_end = MPI_start_end(nima, number_of_proc, myid)
    list_of_particles = list_of_particles[image_start:image_end]

    # read nx and ctf_app (if CTF) and broadcast to all nodes
    if myid == main_node:
        ima = EMData()
        ima.read_image(stack, list_of_particles[0], True)
        nx = ima.get_xsize()
        ny = ima.get_ysize()
        if CTF: ctf_app = ima.get_attr_default('ctf_applied', 2)
        del ima
    else:
        nx = 0
        ny = 0
        if CTF: ctf_app = 0
    nx = bcast_number_to_all(nx, source_node=main_node)
    ny = bcast_number_to_all(ny, source_node=main_node)
    if CTF:
        ctf_app = bcast_number_to_all(ctf_app, source_node=main_node)
        if ctf_app > 0:
            ERROR("data cannot be ctf-applied", "shiftali_MPI", 1, myid)

    if maskfile == None:
        mrad = min(nx, ny)
        mask = model_circle(mrad // 2 - 2, nx, ny)
    else:
        mask = get_im(maskfile)

    if CTF:
        from filter import filt_ctf
        from morphology import ctf_img
        ctf_abs_sum = EMData(nx, ny, 1, False)
        ctf_2_sum = EMData(nx, ny, 1, False)
    else:
        ctf_2_sum = None

    from global_def import CACHE_DISABLE
    if CACHE_DISABLE:
        data = EMData.read_images(stack, list_of_particles)
    else:
        for i in range(number_of_proc):
            if myid == i:
                data = EMData.read_images(stack, list_of_particles)
            if ftp == "bdb": mpi_barrier(MPI_COMM_WORLD)

    for im in range(len(data)):
        data[im].set_attr('ID', list_of_particles[im])
        st = Util.infomask(data[im], mask, False)
        data[im] -= st[0]
        if CTF:
            ctf_params = data[im].get_attr("ctf")
            ctfimg = ctf_img(nx, ctf_params, ny=ny)
            Util.add_img2(ctf_2_sum, ctfimg)
            Util.add_img_abs(ctf_abs_sum, ctfimg)

    if CTF:
        reduce_EMData_to_root(ctf_2_sum, myid, main_node)
        reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
    else:
        ctf_2_sum = None
    if CTF:
        if myid != main_node:
            del ctf_2_sum
            del ctf_abs_sum
        else:
            temp = EMData(nx, ny, 1, False)
            for i in range(0, nx, 2):
                for j in range(ny):
                    temp.set_value_at(i, j, snr)
            Util.add_img(ctf_2_sum, temp)
            del temp

    total_iter = 0

    # apply initial xform.align2d parameters stored in header
    init_params = []
    for im in range(len(data)):
        t = data[im].get_attr('xform.align2d')
        init_params.append(t)
        p = t.get_params("2d")
        data[im] = rot_shift2D(data[im],
                               p['alpha'],
                               sx=p['tx'],
                               sy=p['ty'],
                               mirror=p['mirror'],
                               scale=p['scale'])

    # fourier transform all images, and apply ctf if CTF
    for im in range(len(data)):
        if CTF:
            ctf_params = data[im].get_attr("ctf")
            data[im] = filt_ctf(fft(data[im]), ctf_params)
        else:
            data[im] = fft(data[im])

    sx_sum = 0
    sy_sum = 0
    sx_sum_total = 0
    sy_sum_total = 0
    shift_x = [0.0] * len(data)
    shift_y = [0.0] * len(data)
    ishift_x = [0.0] * len(data)
    ishift_y = [0.0] * len(data)

    for Iter in range(max_iter):
        if myid == main_node:
            start_time = time()
            print_msg("Iteration #%4d\n" % (total_iter))
        total_iter += 1
        avg = EMData(nx, ny, 1, False)
        for im in data:
            Util.add_img(avg, im)

        reduce_EMData_to_root(avg, myid, main_node)

        if myid == main_node:
            if CTF:
                tavg = Util.divn_filter(avg, ctf_2_sum)
            else:
                tavg = Util.mult_scalar(avg, 1.0 / float(nima))
        else:
            tavg = EMData(nx, ny, 1, False)

        if Fourvar:
            bcast_EMData_to_all(tavg, myid, main_node)
            vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

        if myid == main_node:
            if Fourvar:
                tavg = fft(Util.divn_img(fft(tavg), vav))
                vav_r = Util.pack_complex_to_real(vav)

            # normalize and mask tavg in real space
            tavg = fft(tavg)
            stat = Util.infomask(tavg, mask, False)
            tavg -= stat[0]
            Util.mul_img(tavg, mask)
            # For testing purposes: shift tavg to some random place and see if the centering is still correct
            #tavg = rot_shift3D(tavg,sx=3,sy=-4)
            tavg = fft(tavg)

        if Fourvar: del vav
        bcast_EMData_to_all(tavg, myid, main_node)

        sx_sum = 0
        sy_sum = 0
        if search_rng > 0: nwx = 2 * search_rng + 1
        else: nwx = nx

        if search_rng_y > 0: nwy = 2 * search_rng_y + 1
        else: nwy = ny

        not_zero = 0
        for im in range(len(data)):
            if oneDx:
                ctx = Util.window(ccf(data[im], tavg), nwx, 1)
                p1 = peak_search(ctx)
                p1_x = -int(p1[0][3])
                ishift_x[im] = p1_x
                sx_sum += p1_x
            else:
                p1 = peak_search(Util.window(ccf(data[im], tavg), nwx, nwy))
                p1_x = -int(p1[0][4])
                p1_y = -int(p1[0][5])
                ishift_x[im] = p1_x
                ishift_y[im] = p1_y
                sx_sum += p1_x
                sy_sum += p1_y

            if not_zero == 0:
                if (not (ishift_x[im] == 0.0)) or (not (ishift_y[im] == 0.0)):
                    not_zero = 1

        sx_sum = mpi_reduce(sx_sum, 1, MPI_INT, MPI_SUM, main_node,
                            MPI_COMM_WORLD)

        if not oneDx:
            sy_sum = mpi_reduce(sy_sum, 1, MPI_INT, MPI_SUM, main_node,
                                MPI_COMM_WORLD)

        if myid == main_node:
            sx_sum_total = int(sx_sum[0])
            if not oneDx:
                sy_sum_total = int(sy_sum[0])
        else:
            sx_sum_total = 0
            sy_sum_total = 0

        sx_sum_total = bcast_number_to_all(sx_sum_total, source_node=main_node)

        if not oneDx:
            sy_sum_total = bcast_number_to_all(sy_sum_total,
                                               source_node=main_node)

        sx_ave = round(float(sx_sum_total) / nima)
        sy_ave = round(float(sy_sum_total) / nima)
        for im in range(len(data)):
            p1_x = ishift_x[im] - sx_ave
            p1_y = ishift_y[im] - sy_ave
            params2 = {
                "filter_type": Processor.fourier_filter_types.SHIFT,
                "x_shift": p1_x,
                "y_shift": p1_y,
                "z_shift": 0.0
            }
            data[im] = Processor.EMFourierFilter(data[im], params2)
            shift_x[im] += p1_x
            shift_y[im] += p1_y
        # stop if all shifts are zero
        not_zero = mpi_reduce(not_zero, 1, MPI_INT, MPI_SUM, main_node,
                              MPI_COMM_WORLD)
        if myid == main_node:
            not_zero_all = int(not_zero[0])
        else:
            not_zero_all = 0
        not_zero_all = bcast_number_to_all(not_zero_all, source_node=main_node)

        if myid == main_node:
            print_msg("Time of iteration = %12.2f\n" % (time() - start_time))
            start_time = time()

        if not_zero_all == 0: break

    #for im in xrange(len(data)): data[im] = fft(data[im])  This should not be required as only header information is used
    # combine shifts found with the original parameters
    for im in range(len(data)):
        t0 = init_params[im]
        t1 = Transform()
        t1.set_params({
            "type": "2D",
            "alpha": 0,
            "scale": t0.get_scale(),
            "mirror": 0,
            "tx": shift_x[im],
            "ty": shift_y[im]
        })
        # combine t0 and t1
        tt = t1 * t0
        data[im].set_attr("xform.align2d", tt)

    # write out headers and STOP, under MPI writing has to be done sequentially
    mpi_barrier(MPI_COMM_WORLD)
    par_str = ["xform.align2d", "ID"]
    if myid == main_node:
        from utilities import file_type
        if (file_type(stack) == "bdb"):
            from utilities import recv_attr_dict_bdb
            recv_attr_dict_bdb(main_node, stack, data, par_str, image_start,
                               image_end, number_of_proc)
        else:
            from utilities import recv_attr_dict
            recv_attr_dict(main_node, stack, data, par_str, image_start,
                           image_end, number_of_proc)

    else:
        send_attr_dict(main_node, data, par_str, image_start, image_end)
    if myid == main_node: print_end_msg("shiftali_MPI")
예제 #16
0
def main():
    def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
        if mirror:
            m = 1
            alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0,
                                                       540.0 - psi, 0, 0, 1.0)
        else:
            m = 0
            alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0,
                                                       360.0 - psi, 0, 0, 1.0)
        return alpha, sx, sy, m

    progname = os.path.basename(sys.argv[0])
    usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=15. --aa=0.01  --sym=symmetry --CTF"
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--output_dir",
                      type="string",
                      default="./",
                      help="output directory")
    parser.add_option("--ave2D",
                      type="string",
                      default=False,
                      help="write to the disk a stack of 2D averages")
    parser.add_option("--var2D",
                      type="string",
                      default=False,
                      help="write to the disk a stack of 2D variances")
    parser.add_option("--ave3D",
                      type="string",
                      default=False,
                      help="write to the disk reconstructed 3D average")
    parser.add_option("--var3D",
                      type="string",
                      default=False,
                      help="compute 3D variability (time consuming!)")
    parser.add_option("--img_per_grp",
                      type="int",
                      default=10,
                      help="number of neighbouring projections")
    parser.add_option("--no_norm",
                      action="store_true",
                      default=False,
                      help="do not use normalization")
    #parser.add_option("--radius", 	    type="int"         ,	default=-1   ,				help="radius for 3D variability" )
    parser.add_option("--npad",
                      type="int",
                      default=2,
                      help="number of time to pad the original images")
    parser.add_option("--sym", type="string", default="c1", help="symmetry")
    parser.add_option(
        "--fl",
        type="float",
        default=0.0,
        help=
        "cutoff freqency in absolute frequency (0.0-0.5). (Default - no filtration)"
    )
    parser.add_option(
        "--aa",
        type="float",
        default=0.0,
        help=
        "fall off of the filter. Put 0.01 if user has no clue about falloff (Default - no filtration)"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="use CFT correction")
    parser.add_option("--VERBOSE",
                      action="store_true",
                      default=False,
                      help="Long output for debugging")
    #parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
    #parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
    #parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
    #parser.add_option("--abs", 		type="float"   ,        default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
    #parser.add_option("--squ", 		type="float"   ,	    default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
    parser.add_option(
        "--VAR",
        action="store_true",
        default=False,
        help="stack on input consists of 2D variances (Default False)")
    parser.add_option(
        "--decimate",
        type="float",
        default=1.0,
        help=
        "image decimate rate, a number larger (expand image) or less (shrink image) than 1. default is 1"
    )
    parser.add_option(
        "--window",
        type="int",
        default=0,
        help=
        "reduce images to a small image size without changing pixel_size. Default value is zero."
    )
    #parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
    parser.add_option(
        "--nvec",
        type="int",
        default=0,
        help="number of eigenvectors, default = 0 meaning no PCA calculated")
    parser.add_option(
        "--symmetrize",
        action="store_true",
        default=False,
        help="Prepare input stack for handling symmetry (Default False)")

    (options, args) = parser.parse_args()
    #####
    from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD
    from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
    from applications import MPI_start_end
    from reconstruction import recons3d_em, recons3d_em_MPI
    from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
    from utilities import print_begin_msg, print_end_msg, print_msg
    from utilities import read_text_row, get_image, get_im
    from utilities import bcast_EMData_to_all, bcast_number_to_all
    from utilities import get_symt

    #  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

    from EMAN2db import db_open_dict

    # Set up global variables related to bdb cache
    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()

    # Set up global variables related to ERROR function
    global_def.BATCH = True

    # detect if program is running under MPI
    RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ
    if RUNNING_UNDER_MPI:
        global_def.MPI = True

    if options.symmetrize:
        if RUNNING_UNDER_MPI:
            try:
                sys.argv = mpi_init(len(sys.argv), sys.argv)
                try:
                    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
                    if (number_of_proc > 1):
                        ERROR(
                            "Cannot use more than one CPU for symmetry prepration",
                            "sx3dvariability", 1)
                except:
                    pass
            except:
                pass
        if options.output_dir != "./" and not os.path.exists(
                options.output_dir):
            os.mkdir(options.output_dir)
        #  Input
        #instack = "Clean_NORM_CTF_start_wparams.hdf"
        #instack = "bdb:data"

        from logger import Logger, BaseLogger_Files
        if os.path.exists(os.path.join(options.output_dir, "log.txt")):
            os.remove(os.path.join(options.output_dir, "log.txt"))
        log_main = Logger(BaseLogger_Files())
        log_main.prefix = os.path.join(options.output_dir, "./")

        instack = args[0]
        sym = options.sym.lower()
        if (sym == "c1"):
            ERROR("There is no need to symmetrize stack for C1 symmetry",
                  "sx3dvariability", 1)

        line = ""
        for a in sys.argv:
            line += " " + a
        log_main.add(line)

        if (instack[:4] != "bdb:"):
            if output_dir == "./": stack = "bdb:data"
            else: stack = "bdb:" + options.output_dir + "/data"
            delete_bdb(stack)
            junk = cmdexecute("sxcpy.py  " + instack + "  " + stack)
        else:
            stack = instack

        qt = EMUtil.get_all_attributes(stack, 'xform.projection')

        na = len(qt)
        ts = get_symt(sym)
        ks = len(ts)
        angsa = [None] * na

        for k in xrange(ks):
            #Qfile = "Q%1d"%k
            if options.output_dir != "./":
                Qfile = os.path.join(options.output_dir, "Q%1d" % k)
            else:
                Qfile = os.path.join(options.output_dir, "Q%1d" % k)
            #delete_bdb("bdb:Q%1d"%k)
            delete_bdb("bdb:" + Qfile)
            #junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
            junk = cmdexecute("e2bdb.py  " + stack + "  --makevstack=bdb:" +
                              Qfile)
            #DB = db_open_dict("bdb:Q%1d"%k)
            DB = db_open_dict("bdb:" + Qfile)
            for i in xrange(na):
                ut = qt[i] * ts[k]
                DB.set_attr(i, "xform.projection", ut)
                #bt = ut.get_params("spider")
                #angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
            #write_text_row(angsa, 'ptsma%1d.txt'%k)
            #junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
            #junk = cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
            DB.close()
        if options.output_dir == "./": delete_bdb("bdb:sdata")
        else: delete_bdb("bdb:" + options.output_dir + "/" + "sdata")
        #junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
        sdata = "bdb:" + options.output_dir + "/" + "sdata"
        print(sdata)
        junk = cmdexecute("e2bdb.py   " + options.output_dir +
                          "  --makevstack=" + sdata + " --filt=Q")
        #junk = cmdexecute("ls  EMAN2DB/sdata*")
        #a = get_im("bdb:sdata")
        a = get_im(sdata)
        a.set_attr("variabilitysymmetry", sym)
        #a.write_image("bdb:sdata")
        a.write_image(sdata)

    else:

        sys.argv = mpi_init(len(sys.argv), sys.argv)
        myid = mpi_comm_rank(MPI_COMM_WORLD)
        number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
        main_node = 0

        if len(args) == 1:
            stack = args[0]
        else:
            print(("usage: " + usage))
            print(("Please run '" + progname + " -h' for detailed options"))
            return 1

        t0 = time()
        # obsolete flags
        options.MPI = True
        options.nvec = 0
        options.radiuspca = -1
        options.iter = 40
        options.abs = 0.0
        options.squ = 0.0

        if options.fl > 0.0 and options.aa == 0.0:
            ERROR("Fall off has to be given for the low-pass filter",
                  "sx3dvariability", 1, myid)
        if options.VAR and options.SND:
            ERROR("Only one of var and SND can be set!", "sx3dvariability",
                  myid)
            exit()
        if options.VAR and (options.ave2D or options.ave3D or options.var2D):
            ERROR(
                "When VAR is set, the program cannot output ave2D, ave3D or var2D",
                "sx3dvariability", 1, myid)
            exit()
        #if options.SND and (options.ave2D or options.ave3D):
        #	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
        #	exit()
        if options.nvec > 0:
            ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
            exit()
        if options.nvec > 0 and options.ave3D == None:
            ERROR("When doing PCA analysis, one must set ave3D",
                  "sx3dvariability",
                  myid=myid)
            exit()
        import string
        options.sym = options.sym.lower()

        # if global_def.CACHE_DISABLE:
        # 	from utilities import disable_bdb_cache
        # 	disable_bdb_cache()
        # global_def.BATCH = True

        if myid == main_node:
            if options.output_dir != "./" and not os.path.exists(
                    options.output_dir):
                os.mkdir(options.output_dir)

        img_per_grp = options.img_per_grp
        nvec = options.nvec
        radiuspca = options.radiuspca

        from logger import Logger, BaseLogger_Files
        #if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt"))
        log_main = Logger(BaseLogger_Files())
        log_main.prefix = os.path.join(options.output_dir, "./")

        if myid == main_node:
            line = ""
            for a in sys.argv:
                line += " " + a
            log_main.add(line)
            log_main.add("-------->>>Settings given by all options<<<-------")
            log_main.add("instack  		    :" + stack)
            log_main.add("output_dir        :" + options.output_dir)
            log_main.add("var3d   		    :" + options.var3D)

        if myid == main_node:
            line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
            #print_begin_msg("sx3dvariability")
            msg = "sx3dvariability"
            log_main.add(msg)
            print(line, msg)
            msg = ("%-70s:  %s\n" % ("Input stack", stack))
            log_main.add(msg)
            print(line, msg)

        symbaselen = 0
        if myid == main_node:
            nima = EMUtil.get_image_count(stack)
            img = get_image(stack)
            nx = img.get_xsize()
            ny = img.get_ysize()
            if options.sym != "c1":
                imgdata = get_im(stack)
                try:
                    i = imgdata.get_attr("variabilitysymmetry").lower()
                    if (i != options.sym):
                        ERROR(
                            "The symmetry provided does not agree with the symmetry of the input stack",
                            "sx3dvariability",
                            myid=myid)
                except:
                    ERROR(
                        "Input stack is not prepared for symmetry, please follow instructions",
                        "sx3dvariability",
                        myid=myid)
                from utilities import get_symt
                i = len(get_symt(options.sym))
                if ((nima / i) * i != nima):
                    ERROR(
                        "The length of the input stack is incorrect for symmetry processing",
                        "sx3dvariability",
                        myid=myid)
                symbaselen = nima / i
            else:
                symbaselen = nima
        else:
            nima = 0
            nx = 0
            ny = 0
        nima = bcast_number_to_all(nima)
        nx = bcast_number_to_all(nx)
        ny = bcast_number_to_all(ny)
        Tracker = {}
        Tracker["total_stack"] = nima
        if options.decimate == 1.:
            if options.window != 0:
                nx = options.window
                ny = options.window
        else:
            if options.window == 0:
                nx = int(nx * options.decimate)
                ny = int(ny * options.decimate)
            else:
                nx = int(options.window * options.decimate)
                ny = nx
        Tracker["nx"] = nx
        Tracker["ny"] = ny
        Tracker["nz"] = nx
        symbaselen = bcast_number_to_all(symbaselen)
        if radiuspca == -1: radiuspca = nx / 2 - 2

        if myid == main_node:
            line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
            msg = "%-70s:  %d\n" % ("Number of projection", nima)
            log_main.add(msg)
            print(line, msg)
        img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
        """
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
        if options.VAR:
            #varList   = EMData.read_images(stack, range(img_begin, img_end))
            varList = []
            this_image = EMData()
            for index_of_particle in xrange(img_begin, img_end):
                this_image.read_image(stack, index_of_particle)
                varList.append(
                    image_decimate_window_xform_ctf(this_image,
                                                    options.decimate,
                                                    options.window,
                                                    options.CTF))
        else:
            from utilities import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
            from utilities import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
            from utilities import model_blank, nearest_proj, model_circle
            from applications import pca
            from statistics import avgvar, avgvar_ctf, ccc
            from filter import filt_tanl
            from morphology import threshold, square_root
            from projection import project, prep_vol, prgs
            from sets import Set

            if myid == main_node:
                t1 = time()
                proj_angles = []
                aveList = []
                tab = EMUtil.get_all_attributes(stack, 'xform.projection')
                for i in xrange(nima):
                    t = tab[i].get_params('spider')
                    phi = t['phi']
                    theta = t['theta']
                    psi = t['psi']
                    x = theta
                    if x > 90.0: x = 180.0 - x
                    x = x * 10000 + psi
                    proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
                t2 = time()
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = "%-70s:  %d\n" % ("Number of neighboring projections",
                                        img_per_grp)
                log_main.add(msg)
                print(line, msg)
                msg = "...... Finding neighboring projections\n"
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    msg = "Number of images per group: %d" % img_per_grp
                    log_main.add(msg)
                    print(line, msg)
                    msg = "Now grouping projections"
                    log_main.add(msg)
                    print(line, msg)
                proj_angles.sort()
            proj_angles_list = [0.0] * (nima * 4)
            if myid == main_node:
                for i in xrange(nima):
                    proj_angles_list[i * 4] = proj_angles[i][1]
                    proj_angles_list[i * 4 + 1] = proj_angles[i][2]
                    proj_angles_list[i * 4 + 2] = proj_angles[i][3]
                    proj_angles_list[i * 4 + 3] = proj_angles[i][4]
            proj_angles_list = bcast_list_to_all(proj_angles_list, myid,
                                                 main_node)
            proj_angles = []
            for i in xrange(nima):
                proj_angles.append([
                    proj_angles_list[i * 4], proj_angles_list[i * 4 + 1],
                    proj_angles_list[i * 4 + 2],
                    int(proj_angles_list[i * 4 + 3])
                ])
            del proj_angles_list
            proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp,
                                                  range(img_begin, img_end))

            all_proj = Set()
            for im in proj_list:
                for jm in im:
                    all_proj.add(proj_angles[jm][3])

            all_proj = list(all_proj)
            if options.VERBOSE:
                print("On node %2d, number of images needed to be read = %5d" %
                      (myid, len(all_proj)))

            index = {}
            for i in xrange(len(all_proj)):
                index[all_proj[i]] = i
            mpi_barrier(MPI_COMM_WORLD)

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Finding neighboring projections lasted [s]",
                        time() - t2))
                log_main.add(msg)
                print(msg)
                msg = ("%-70s:  %d\n" %
                       ("Number of groups processed on the main node",
                        len(proj_list)))
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Grouping projections took: ", (time() - t2) / 60,
                          "[min]")
                    print("Number of groups on main node: ", len(proj_list))
            mpi_barrier(MPI_COMM_WORLD)

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("...... calculating the stack of 2D variances \n")
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Now calculating the stack of 2D variances")

            proj_params = [0.0] * (nima * 5)
            aveList = []
            varList = []
            if nvec > 0:
                eigList = [[] for i in xrange(nvec)]

            if options.VERBOSE:
                print("Begin to read images on processor %d" % (myid))
            ttt = time()
            #imgdata = EMData.read_images(stack, all_proj)
            imgdata = []
            for index_of_proj in xrange(len(all_proj)):
                #img     = EMData()
                #img.read_image(stack, all_proj[index_of_proj])
                dmg = image_decimate_window_xform_ctf(
                    get_im(stack, all_proj[index_of_proj]), options.decimate,
                    options.window, options.CTF)
                #print dmg.get_xsize(), "init"
                imgdata.append(dmg)
            if options.VERBOSE:
                print("Reading images on processor %d done, time = %.2f" %
                      (myid, time() - ttt))
                print("On processor %d, we got %d images" %
                      (myid, len(imgdata)))
            mpi_barrier(MPI_COMM_WORLD)
            '''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
            from applications import prepare_2d_forPCA
            from utilities import model_blank
            for i in xrange(len(proj_list)):
                ki = proj_angles[proj_list[i][0]][3]
                if ki >= symbaselen: continue
                mi = index[ki]
                phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi])

                grp_imgdata = []
                for j in xrange(img_per_grp):
                    mj = index[proj_angles[proj_list[i][j]][3]]
                    phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj])
                    alpha, sx, sy, mirror = params_3D_2D_NEW(
                        phi, theta, psi, s2x, s2y, mirror_list[i][j])
                    if thetaM <= 90:
                        if mirror == 0:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, phiM - phi, 0.0, 0.0, 1.0)
                        else:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, 180 - (phiM - phi), 0.0,
                                0.0, 1.0)
                    else:
                        if mirror == 0:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, -(phiM - phi), 0.0, 0.0,
                                1.0)
                        else:
                            alpha, sx, sy, scale = compose_transform2(
                                alpha, sx, sy, 1.0, -(180 - (phiM - phi)), 0.0,
                                0.0, 1.0)
                    set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0])
                    grp_imgdata.append(imgdata[mj])
                    #print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize()

                if not options.no_norm:
                    #print grp_imgdata[j].get_xsize()
                    mask = model_circle(nx / 2 - 2, nx, nx)
                    for k in xrange(img_per_grp):
                        ave, std, minn, maxx = Util.infomask(
                            grp_imgdata[k], mask, False)
                        grp_imgdata[k] -= ave
                        grp_imgdata[k] /= std
                    del mask

                if options.fl > 0.0:
                    from filter import filt_ctf, filt_table
                    from fundamentals import fft, window2d
                    nx2 = 2 * nx
                    ny2 = 2 * ny
                    if options.CTF:
                        from utilities import pad
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = window2d(
                                fft(
                                    filt_tanl(
                                        filt_ctf(
                                            fft(
                                                pad(grp_imgdata[k], nx2, ny2,
                                                    1, 0.0)),
                                            grp_imgdata[k].get_attr("ctf"),
                                            binary=1), options.fl,
                                        options.aa)), nx, ny)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                    else:
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = filt_tanl(grp_imgdata[k],
                                                       options.fl, options.aa)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                else:
                    from utilities import pad, read_text_file
                    from filter import filt_ctf, filt_table
                    from fundamentals import fft, window2d
                    nx2 = 2 * nx
                    ny2 = 2 * ny
                    if options.CTF:
                        from utilities import pad
                        for k in xrange(img_per_grp):
                            grp_imgdata[k] = window2d(
                                fft(
                                    filt_ctf(fft(
                                        pad(grp_imgdata[k], nx2, ny2, 1, 0.0)),
                                             grp_imgdata[k].get_attr("ctf"),
                                             binary=1)), nx, ny)
                            #grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
                            #grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
                '''
				if i < 10 and myid == main_node:
					for k in xrange(10):
						grp_imgdata[k].write_image("grp%03d.hdf"%i, k)
				'''
                """
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("pp.hdf", pp)
				"""
                ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata)
                """
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("qq.hdf", pp)
				"""

                var = model_blank(nx, ny)
                for q in grp_imgdata:
                    Util.add_img2(var, q)
                Util.mul_scalar(var, 1.0 / (len(grp_imgdata) - 1))
                # Switch to std dev
                var = square_root(threshold(var))
                #if options.CTF:	ave, var = avgvar_ctf(grp_imgdata, mode="a")
                #else:	            ave, var = avgvar(grp_imgdata, mode="a")
                """
				if myid == main_node:
					ave.write_image("avgv.hdf",i)
					var.write_image("varv.hdf",i)
				"""

                set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
                set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

                aveList.append(ave)
                varList.append(var)

                if options.VERBOSE:
                    print("%5.2f%% done on processor %d" %
                          (i * 100.0 / len(proj_list), myid))
                if nvec > 0:
                    eig = pca(input_stacks=grp_imgdata,
                              subavg="",
                              mask_radius=radiuspca,
                              nvec=nvec,
                              incore=True,
                              shuffle=False,
                              genbuf=True)
                    for k in xrange(nvec):
                        set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
                        eigList[k].append(eig[k])
                    """
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""

            del imgdata
            #  To this point, all averages, variances, and eigenvectors are computed

            if options.ave2D:
                from fundamentals import fpol
                if myid == main_node:
                    km = 0
                    for i in xrange(number_of_proc):
                        if i == main_node:
                            for im in xrange(len(aveList)):
                                aveList[im].write_image(
                                    os.path.join(options.output_dir,
                                                 options.ave2D), km)
                                km += 1
                        else:
                            nl = mpi_recv(1, MPI_INT, i,
                                          SPARX_MPI_TAG_UNIVERSAL,
                                          MPI_COMM_WORLD)
                            nl = int(nl[0])
                            for im in xrange(nl):
                                ave = recv_EMData(i, im + i + 70000)
                                """
								nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
                                tmpvol = fpol(ave, Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.ave2D), km)
                                km += 1
                else:
                    mpi_send(len(aveList), 1, MPI_INT, main_node,
                             SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    for im in xrange(len(aveList)):
                        send_EMData(aveList[im], main_node, im + myid + 70000)
                        """
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						"""

            if options.ave3D:
                from fundamentals import fpol
                if options.VERBOSE:
                    print("Reconstructing 3D average volume")
                ave3D = recons3d_4nn_MPI(myid,
                                         aveList,
                                         symmetry=options.sym,
                                         npad=options.npad)
                bcast_EMData_to_all(ave3D, myid)
                if myid == main_node:
                    line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                    ave3D = fpol(ave3D, Tracker["nx"], Tracker["nx"],
                                 Tracker["nx"])
                    ave3D.write_image(
                        os.path.join(options.output_dir, options.ave3D))
                    msg = ("%-70s:  %s\n" % (
                        "Writing to the disk volume reconstructed from averages as",
                        options.ave3D))
                    log_main.add(msg)
                    print(line, msg)
            del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList

            if nvec > 0:
                for k in xrange(nvec):
                    if options.VERBOSE:
                        print("Reconstruction eigenvolumes", k)
                    cont = True
                    ITER = 0
                    mask2d = model_circle(radiuspca, nx, nx)
                    while cont:
                        #print "On node %d, iteration %d"%(myid, ITER)
                        eig3D = recons3d_4nn_MPI(myid,
                                                 eigList[k],
                                                 symmetry=options.sym,
                                                 npad=options.npad)
                        bcast_EMData_to_all(eig3D, myid, main_node)
                        if options.fl > 0.0:
                            eig3D = filt_tanl(eig3D, options.fl, options.aa)
                        if myid == main_node:
                            eig3D.write_image(
                                os.path.join(options.outpout_dir,
                                             "eig3d_%03d.hdf" % (k, ITER)))
                        Util.mul_img(eig3D,
                                     model_circle(radiuspca, nx, nx, nx))
                        eig3Df, kb = prep_vol(eig3D)
                        del eig3D
                        cont = False
                        icont = 0
                        for l in xrange(len(eigList[k])):
                            phi, theta, psi, s2x, s2y = get_params_proj(
                                eigList[k][l])
                            proj = prgs(eig3Df, kb,
                                        [phi, theta, psi, s2x, s2y])
                            cl = ccc(proj, eigList[k][l], mask2d)
                            if cl < 0.0:
                                icont += 1
                                cont = True
                                eigList[k][l] *= -1.0
                        u = int(cont)
                        u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node,
                                       MPI_COMM_WORLD)
                        icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM,
                                           main_node, MPI_COMM_WORLD)

                        if myid == main_node:
                            line = strftime("%Y-%m-%d_%H:%M:%S",
                                            localtime()) + " =>"
                            u = int(u[0])
                            msg = (" Eigenvector: ", k, " number changed ",
                                   int(icont[0]))
                            log_main.add(msg)
                            print(line, msg)
                        else:
                            u = 0
                        u = bcast_number_to_all(u, main_node)
                        cont = bool(u)
                        ITER += 1

                    del eig3Df, kb
                    mpi_barrier(MPI_COMM_WORLD)
                del eigList, mask2d

            if options.ave3D: del ave3D
            if options.var2D:
                from fundamentals import fpol
                if myid == main_node:
                    km = 0
                    for i in xrange(number_of_proc):
                        if i == main_node:
                            for im in xrange(len(varList)):
                                tmpvol = fpol(varList[im], Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.var2D), km)
                                km += 1
                        else:
                            nl = mpi_recv(1, MPI_INT, i,
                                          SPARX_MPI_TAG_UNIVERSAL,
                                          MPI_COMM_WORLD)
                            nl = int(nl[0])
                            for im in xrange(nl):
                                ave = recv_EMData(i, im + i + 70000)
                                tmpvol = fpol(ave, Tracker["nx"],
                                              Tracker["nx"], 1)
                                tmpvol.write_image(
                                    os.path.join(options.output_dir,
                                                 options.var2D, km))
                                km += 1
                else:
                    mpi_send(len(varList), 1, MPI_INT, main_node,
                             SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    for im in xrange(len(varList)):
                        send_EMData(varList[im], main_node, im + myid +
                                    70000)  #  What with the attributes??

            mpi_barrier(MPI_COMM_WORLD)

        if options.var3D:
            if myid == main_node and options.VERBOSE:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("Reconstructing 3D variability volume")
                log_main.add(msg)
                print(line, msg)
            t6 = time()
            # radiusvar = options.radius
            # if( radiusvar < 0 ):  radiusvar = nx//2 -3
            res = recons3d_4nn_MPI(myid,
                                   varList,
                                   symmetry=options.sym,
                                   npad=options.npad)
            #res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
            if myid == main_node:
                from fundamentals import fpol
                res = fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"])
                res.write_image(os.path.join(options.output_dir,
                                             options.var3D))

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Reconstructing 3D variability took [s]", time() - t6))
                log_main.add(msg)
                print(line, msg)
                if options.VERBOSE:
                    print("Reconstruction took: %.2f [min]" %
                          ((time() - t6) / 60))

            if myid == main_node:
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("%-70s:  %.2f\n" %
                       ("Total time for these computations [s]", time() - t0))
                print(line, msg)
                log_main.add(msg)
                if options.VERBOSE:
                    print("Total time for these computations: %.2f [min]" %
                          ((time() - t0) / 60))
                line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
                msg = ("sx3dvariability")
                print(line, msg)
                log_main.add(msg)

        from mpi import mpi_finalize
        mpi_finalize()

        if RUNNING_UNDER_MPI:
            global_def.MPI = False

        global_def.BATCH = False
예제 #17
0
def main():
	from utilities import get_input_from_string
	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack output_average --radius=particle_radius --xr=xr --yr=yr --ts=ts --thld_err=thld_err --num_ali=num_ali --fl=fl --aa=aa --CTF --verbose --stables"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--radius",       type="int",              default=-1,          help=" particle radius for alignment")
	parser.add_option("--xr",           type="string"      ,     default="2 1",       help="range for translation search in x direction, search is +/xr (default 2,1)")
	parser.add_option("--yr",           type="string"      ,     default="-1",        help="range for translation search in y direction, search is +/yr (default = same as xr)")
	parser.add_option("--ts",           type="string"      ,     default="1 0.5",     help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default: 1,0.5)")
	parser.add_option("--thld_err",     type="float",            default=0.75,        help="threshld of pixel error (default = 0.75)")
	parser.add_option("--num_ali",      type="int",              default=5,           help="number of alignments performed for stability (default = 5)")
	parser.add_option("--maxit",        type="int",              default=30,          help="number of iterations for each xr (default = 30)")
	parser.add_option("--fl",           type="float"       ,     default=0.3,         help="cut-off frequency of hyperbolic tangent low-pass Fourier filter (default = 0.3)")
	parser.add_option("--aa",           type="float"       ,     default=0.2,         help="fall-off of hyperbolic tangent low-pass Fourier filter (default = 0.2)")
	parser.add_option("--CTF",          action="store_true",     default=False,       help="Use CTF correction during the alignment ")
	parser.add_option("--verbose",      action="store_true",     default=False,       help="print individual pixel error (default = False)")
	parser.add_option("--stables",		action="store_true",	 default=False,	      help="output the stable particles number in file (default = False)")
	parser.add_option("--method",		type="string"      ,	 default=" ",	      help="SHC (standard method is default when flag is ommitted)")
	(options, args) = parser.parse_args()
	if len(args) != 1 and len(args) != 2:
    		print "usage: " + usage
    		print "Please run '" + progname + " -h' for detailed options"
	else:
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()

		from applications   import within_group_refinement, ali2d_ras
		from pixel_error    import multi_align_stability
		from utilities      import write_text_file, write_text_row

		global_def.BATCH = True

		xrng        = get_input_from_string(options.xr)
		if  options.yr == "-1":  yrng = xrng
		else          :  yrng = get_input_from_string(options.yr)
		step        = get_input_from_string(options.ts)

		class_data = EMData.read_images(args[0])

		nx = class_data[0].get_xsize()
		ou = options.radius
		num_ali = options.num_ali
		if ou == -1: ou = nx/2-2
		from utilities import model_circle, get_params2D, set_params2D
		mask = model_circle(ou, nx, nx)

		if options.CTF :
			from filter import filt_ctf
			for im in xrange(len(class_data)):
				#  Flip phases
				class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
		for im in class_data:
			im.set_attr("previousmax", -1.0e10)
			try:
				t = im.get_attr("xform.align2d") # if they are there, no need to set them!
			except:
				try:
					t = im.get_attr("xform.projection")
					d = t.get_params("spider")
					set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0])
				except:
					set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
		all_ali_params = []

		for ii in xrange(num_ali):
			ali_params = []
			if options.verbose:
				ALPHA = []
				SX = []
				SY = []
				MIRROR = []
			if( xrng[0] == 0.0 and yrng[0] == 0.0 ):
				avet = ali2d_ras(class_data, randomize = True, ir = 1, ou = ou, rs = 1, step = 1.0, dst = 90.0, \
						maxit = options.maxit, check_mirror = True, FH=options.fl, FF=options.aa)
			else:
				avet = within_group_refinement(class_data, mask, True, 1, ou, 1, xrng, yrng, step, 90.0, \
						maxit = options.maxit, FH=options.fl, FF=options.aa, method = options.method)
				from utilities import info
				#print "  avet  ",info(avet)
			for im in class_data:
				alpha, sx, sy, mirror, scale = get_params2D(im)
				ali_params.extend([alpha, sx, sy, mirror])
				if options.verbose:
					ALPHA.append(alpha)
					SX.append(sx)
					SY.append(sy)
					MIRROR.append(mirror)
			all_ali_params.append(ali_params)
			if options.verbose:
				write_text_file([ALPHA, SX, SY, MIRROR], "ali_params_run_%d"%ii)
		"""
		avet = class_data[0]
		from utilities import read_text_file
		all_ali_params = []
		for ii in xrange(5):
			temp = read_text_file( "ali_params_run_%d"%ii,-1)
			uuu = []
			for k in xrange(len(temp[0])):
				uuu.extend([temp[0][k],temp[1][k],temp[2][k],temp[3][k]])
			all_ali_params.append(uuu)


		"""

		stable_set, mir_stab_rate, pix_err = multi_align_stability(all_ali_params, 0.0, 10000.0, options.thld_err, options.verbose, 2*ou+1)
		print "%4s %20s %20s %20s %30s %6.2f"%("", "Size of set", "Size of stable set", "Mirror stab rate", "Pixel error prior to pruning the set above threshold of",options.thld_err)
		print "Average stat: %10d %20d %20.2f   %15.2f"%( len(class_data), len(stable_set), mir_stab_rate, pix_err)
		if( len(stable_set) > 0):
			if options.stables:
				stab_mem = [[0,0.0,0] for j in xrange(len(stable_set))]
				for j in xrange(len(stable_set)): stab_mem[j] = [int(stable_set[j][1]), stable_set[j][0], j]
				write_text_row(stab_mem, "stable_particles.txt")

			stable_set_id = []
			particle_pixerr = []
			for s in stable_set:
				stable_set_id.append(s[1])
				particle_pixerr.append(s[0])
			from fundamentals import rot_shift2D
			avet.to_zero()
			l = -1
			print "average parameters:  angle, x-shift, y-shift, mirror"
			for j in stable_set_id:
				l += 1
				print " %4d  %4d  %12.2f %12.2f %12.2f        %1d"%(l,j, stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], int(stable_set[l][2][3]))
				avet += rot_shift2D(class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3] )
			avet /= (l+1)
			avet.set_attr('members', stable_set_id)
			avet.set_attr('pix_err', pix_err)
			avet.set_attr('pixerr', particle_pixerr)
			avet.write_image(args[1])



		global_def.BATCH = False
예제 #18
0
파일: sxchains.py 프로젝트: cpsemmens/eman2
def main():
	import sys
	import os
	import math
	import random
	import pyemtbx.options
	import time
	from   random   import random, seed, randint
	from   optparse import OptionParser

	progname = os.path.basename(sys.argv[0])
	usage = progname + """ [options] <inputfile> <outputfile>

	Forms chains of 2D images based on their similarities.

	Functionality:


	4.  Order a 2-D stack of image based on pair-wise similarity (computed as a cross-correlation coefficent).
		Options 1-3 require image stack to be aligned.  The program will apply orientation parameters if present in headers.
	    The ways to use the program:
	   4.1  Use option initial to specify which image will be used as an initial seed to form the chain.
	        sxprocess.py input_stack.hdf output_stack.hdf --initial=23 --radius=25
	   4.2  If options initial is omitted, the program will determine which image best serves as initial seed to form the chain
	        sxprocess.py input_stack.hdf output_stack.hdf --radius=25
	   4.3  Use option circular to form a circular chain.
	        sxprocess.py input_stack.hdf output_stack.hdf --circular--radius=25
	   4.4  New circular code based on pairwise alignments
			sxprocess.py aclf.hdf chain.hdf circle.hdf --align  --radius=25 --xr=2 --pairwiseccc=lcc.txt

	   4.5  Circular ordering based on pairwise alignments
			sxprocess.py vols.hdf chain.hdf mask.hdf --dd  --radius=25


"""

	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--dd", action="store_true", help="Circular ordering without adjustment of orientations", default=False)
	parser.add_option("--circular", action="store_true", help="Select circular ordering (first image has to be similar to the last)", default=False)
	parser.add_option("--align", action="store_true", help="Compute all pairwise alignments and for the table of their similarities find the best chain", default=False)
	parser.add_option("--initial", type="int", default=-1, help="Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)")
	parser.add_option("--radius", type="int", default=-1, help="Radius of a circular mask for similarity based ordering")
	#  import params for 2D alignment
	parser.add_option("--ou",           type="int",    default=-1,          help="outer radius for 2D alignment < nx/2-1 (set to the radius of the particle)")
	parser.add_option("--xr",           type="int",    default=0,     		help="range for translation search in x direction, search is +/xr (0)")
	parser.add_option("--yr",           type="int",    default=0,          	help="range for translation search in y direction, search is +/yr (0)")
	#parser.add_option("--nomirror",     action="store_true", default=False,   help="Disable checking mirror orientations of images (default False)")
	parser.add_option("--pairwiseccc",  type="string",	default= None,      help="Input/output pairwise ccc file")


 	(options, args) = parser.parse_args()

	global_def.BATCH = True

					
	if options.dd:
		nargs = len(args)
		if nargs != 3:
			print "must provide name of input and two output files!"
			return
		stack = args[0]
		new_stack = args[1]


		from utilities import model_circle
		from statistics import ccc
		from statistics import mono
		lend = EMUtil.get_image_count(stack)
		lccc = [None]*(lend*(lend-1)/2)

		for i in xrange(lend-1):
			v1 = get_im( stack, i )
			if( i == 0 and nargs == 2):
				nx = v1.get_xsize()
				ny = v1.get_ysize()
				nz = v1.get_ysize()
				if options.ou < 1 : radius = nx//2-2
				else:  radius = options.ou
				mask = model_circle(radius, nx, ny, nz)
			else:
				mask = get_im(args[2])
				
			for j in xrange(i+1, lend):
				lccc[mono(i,j)] = [ccc(v1, get_im( stack, j ), mask), 0]


		order = tsp(lccc)
		if(len(order) != lend):
			print  " problem with data length"
			from sys import exit
			exit()
		print  "Total sum of cccs :",TotalDistance(order, lccc)
		print "ordering :",order
		for i in xrange(lend):  get_im(stack, order[i]).write_image( new_stack, i )

	elif options.align:
		nargs = len(args)
		if nargs != 3:
			print "must provide name of input and two output files!"
			return

		from utilities import get_params2D, model_circle
		from fundamentals import rot_shift2D
		from statistics import ccc
		from time import time
		from alignment import align2d, align2d_scf
		from multi_shc import mult_transform 
		
		stack = args[0]
		new_stack = args[1]
		
		d = EMData.read_images(stack)

		"""
		# will align anyway
		try:
			ttt = d[0].get_attr('xform.params2d')
			for i in xrange(len(d)):
				alpha, sx, sy, mirror, scale = get_params2D(d[i])
				d[i] = rot_shift2D(d[i], alpha, sx, sy, mirror)
		except:
			pass
		"""

		nx = d[0].get_xsize()
		ny = d[0].get_ysize()
		if options.ou < 1 : radius = nx//2-2
		else:  radius = options.ou
		mask = model_circle(radius, nx, ny)

		if(options.xr < 0):	xrng = 0
		else:					xrng = options.xr
		if(options.yr < 0):	yrng = xrng
		else:					yrng = options.yr
			
		initial = max(options.initial, 0)

		from statistics import mono
		lend = len(d)
		lccc = [None]*(lend*(lend-1)/2)
		from utilities import read_text_row

		if  options.pairwiseccc == None or not os.path.exists(options.pairwiseccc) :
			st = time()
			for i in xrange(lend-1):
				for j in xrange(i+1, lend):
					#  j>i meaning mono entry (i,j) or (j,i) indicates T i->j (from smaller index to larger)
					#alpha, sx, sy, mir, peak = align2d(d[i],d[j], xrng, yrng, step=options.ts, first_ring=options.ir, last_ring=radius, mode = "F")
					alpha, sx, sy, mir, peak = align2d_scf(d[i],d[j], xrng, yrng, ou=radius)
					lccc[mono(i,j)] = [ccc(d[j], rot_shift2D(d[i], alpha, sx, sy, mir, 1.0), mask), alpha, sx, sy, mir]
				#print "  %4d   %10.1f"%(i,time()-st)

			if(not os.path.exists(options.pairwiseccc)):
				from utilities import write_text_row
				write_text_row([[initial,0,0,0,0]]+lccc,options.pairwiseccc)
		elif(os.path.exists(options.pairwiseccc)):
			lccc = read_text_row(options.pairwiseccc)
			initial = int(lccc[0][0] + 0.1)
			del lccc[0]


		for i in xrange(len(lccc)):
			T = Transform({"type":"2D","alpha":lccc[i][1],"tx":lccc[i][2],"ty":lccc[i][3],"mirror":int(lccc[i][4]+0.1)})
			lccc[i] = [lccc[i][0],T]

		tdummy = Transform({"type":"2D"})
		maxsum = -1.023
		for m in xrange(0,lend):#initial, initial+1):
			indc = range( lend )
			lsnake = [[m, tdummy, 0.0]]
			del indc[m]

			lsum = 0.0
			while len(indc) > 1:
				maxcit = -111.
				for i in xrange(len(indc)):
						cuc = lccc[mono(indc[i], lsnake[-1][0])][0]
						if cuc > maxcit:
								maxcit = cuc
								qi = indc[i]
								#  Here we need transformation from the current to the previous,
								#     meaning indc[i] -> lsnake[-1][0]
								T = lccc[mono(indc[i], lsnake[-1][0])][1]
								#  If direction is from larger to smaller index, the transformation has to be inverted
								if( indc[i] > lsnake[-1][0] ):  T = T.inverse()
								
								
				lsnake.append([qi,T, maxcit])
				lsum += maxcit

				del indc[indc.index(qi)]

			T = lccc[mono(indc[-1], lsnake[-1][0])][1]
			if( indc[-1] > lsnake[-1][0]):  T = T.inverse()
			lsnake.append([indc[-1], T, lccc[mono(indc[-1], lsnake[-1][0])][0]])
			print  " initial image and lsum  ",m,lsum
			#print lsnake
			if(lsum > maxsum):
				maxsum = lsum
				init = m
				snake = [lsnake[i] for i in xrange(lend)]
		print  "  Initial image selected : ",init,maxsum,"    ",TotalDistance([snake[m][0] for m in xrange(lend)], lccc)
		#for q in snake: print q

		from copy import deepcopy
		trans=deepcopy([snake[i][1] for i in xrange(len(snake))])
		print  [snake[i][0] for i in xrange(len(snake))]
예제 #19
0
def main():
	import sys
	import os
	import math
	import random
	import pyemtbx.options
	import time
	from   random   import random, seed, randint
	from   optparse import OptionParser

	progname = os.path.basename(sys.argv[0])
	usage = progname + """ [options] <inputfile> <outputfile>

	Forms chains of 2D images based on their similarities.

	Functionality:


	4.  Order a 2-D stack of image based on pair-wise similarity (computed as a cross-correlation coefficent).
		Options 1-3 require image stack to be aligned.  The program will apply orientation parameters if present in headers.
	    The ways to use the program:
	   4.1  Use option initial to specify which image will be used as an initial seed to form the chain.
	        sxprocess.py input_stack.hdf output_stack.hdf --initial=23 --radius=25
	   4.2  If options initial is omitted, the program will determine which image best serves as initial seed to form the chain
	        sxprocess.py input_stack.hdf output_stack.hdf --radius=25
	   4.3  Use option circular to form a circular chain.
	        sxprocess.py input_stack.hdf output_stack.hdf --circular--radius=25
	   4.4  New circular code based on pairwise alignments
			sxprocess.py aclf.hdf chain.hdf circle.hdf --align  --radius=25 --xr=2 --pairwiseccc=lcc.txt

	   4.5  Circular ordering based on pairwise alignments
			sxprocess.py vols.hdf chain.hdf mask.hdf --dd  --radius=25


"""

	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--dd", action="store_true", help="Circular ordering without adjustment of orientations", default=False)
	parser.add_option("--circular", action="store_true", help="Select circular ordering (first image has to be similar to the last)", default=False)
	parser.add_option("--align", action="store_true", help="Compute all pairwise alignments and for the table of their similarities find the best chain", default=False)
	parser.add_option("--initial", type="int", default=-1, help="Specifies which image will be used as an initial seed to form the chain. (default = 0, means the first image)")
	parser.add_option("--radius", type="int", default=-1, help="Radius of a circular mask for similarity based ordering")
	#  import params for 2D alignment
	parser.add_option("--ou",           type="int",    default=-1,          help="outer radius for 2D alignment < nx/2-1 (set to the radius of the particle)")
	parser.add_option("--xr",           type="int",    default=0,     		help="range for translation search in x direction, search is +/xr (0)")
	parser.add_option("--yr",           type="int",    default=0,          	help="range for translation search in y direction, search is +/yr (0)")
	#parser.add_option("--nomirror",     action="store_true", default=False,   help="Disable checking mirror orientations of images (default False)")
	parser.add_option("--pairwiseccc",  type="string",	default= None,      help="Input/output pairwise ccc file")


 	(options, args) = parser.parse_args()

	global_def.BATCH = True

					
	if options.dd:
		nargs = len(args)
		if nargs != 3:
			print "must provide name of input and two output files!"
			return
		stack = args[0]
		new_stack = args[1]


		from utilities import model_circle
		from statistics import ccc
		from statistics import mono
		lend = EMUtil.get_image_count(stack)
		lccc = [None]*(lend*(lend-1)/2)

		for i in xrange(lend-1):
			v1 = get_im( stack, i )
			if( i == 0 and nargs == 2):
				nx = v1.get_xsize()
				ny = v1.get_ysize()
				nz = v1.get_ysize()
				if options.ou < 1 : radius = nx//2-2
				else:  radius = options.ou
				mask = model_circle(radius, nx, ny, nz)
			else:
				mask = get_im(args[2])
				
			for j in xrange(i+1, lend):
				lccc[mono(i,j)] = [ccc(v1, get_im( stack, j ), mask), 0]


		order = tsp(lccc)
		if(len(order) != lend):
			print  " problem with data length"
			from sys import exit
			exit()
		print  "Total sum of cccs :",TotalDistance(order, lccc)
		print "ordering :",order
		for i in xrange(lend):  get_im(stack, order[i]).write_image( new_stack, i )

	elif options.align:
		nargs = len(args)
		if nargs != 3:
			print "must provide name of input and two output files!"
			return

		from utilities import get_params2D, model_circle
		from fundamentals import rot_shift2D
		from statistics import ccc
		from time import time
		from alignment import align2d, align2d_scf
		from multi_shc import mult_transform 
		
		stack = args[0]
		new_stack = args[1]
		
		d = EMData.read_images(stack)

		"""
		# will align anyway
		try:
			ttt = d[0].get_attr('xform.params2d')
			for i in xrange(len(d)):
				alpha, sx, sy, mirror, scale = get_params2D(d[i])
				d[i] = rot_shift2D(d[i], alpha, sx, sy, mirror)
		except:
			pass
		"""

		nx = d[0].get_xsize()
		ny = d[0].get_ysize()
		if options.ou < 1 : radius = nx//2-2
		else:  radius = options.ou
		mask = model_circle(radius, nx, ny)

		if(options.xr < 0):	xrng = 0
		else:					xrng = options.xr
		if(options.yr < 0):	yrng = xrng
		else:					yrng = options.yr
			
		initial = max(options.initial, 0)

		from statistics import mono
		lend = len(d)
		lccc = [None]*(lend*(lend-1)/2)
		from utilities import read_text_row

		if  options.pairwiseccc == None or not os.path.exists(options.pairwiseccc) :
			st = time()
			for i in xrange(lend-1):
				for j in xrange(i+1, lend):
					#  j>i meaning mono entry (i,j) or (j,i) indicates T i->j (from smaller index to larger)
					#alpha, sx, sy, mir, peak = align2d(d[i],d[j], xrng, yrng, step=options.ts, first_ring=options.ir, last_ring=radius, mode = "F")
					alpha, sx, sy, mir, peak = align2d_scf(d[i],d[j], xrng, yrng, ou=radius)
					lccc[mono(i,j)] = [ccc(d[j], rot_shift2D(d[i], alpha, sx, sy, mir, 1.0), mask), alpha, sx, sy, mir]
				#print "  %4d   %10.1f"%(i,time()-st)

			if(not os.path.exists(options.pairwiseccc)):
				from utilities import write_text_row
				write_text_row([[initial,0,0,0,0]]+lccc,options.pairwiseccc)
		elif(os.path.exists(options.pairwiseccc)):
			lccc = read_text_row(options.pairwiseccc)
			initial = int(lccc[0][0] + 0.1)
			del lccc[0]


		for i in xrange(len(lccc)):
			T = Transform({"type":"2D","alpha":lccc[i][1],"tx":lccc[i][2],"ty":lccc[i][3],"mirror":int(lccc[i][4]+0.1)})
			lccc[i] = [lccc[i][0],T]

		tdummy = Transform({"type":"2D"})
		maxsum = -1.023
		for m in xrange(0,lend):#initial, initial+1):
			indc = range( lend )
			lsnake = [[m, tdummy, 0.0]]
			del indc[m]

			lsum = 0.0
			while len(indc) > 1:
				maxcit = -111.
				for i in xrange(len(indc)):
						cuc = lccc[mono(indc[i], lsnake[-1][0])][0]
						if cuc > maxcit:
								maxcit = cuc
								qi = indc[i]
								#  Here we need transformation from the current to the previous,
								#     meaning indc[i] -> lsnake[-1][0]
								T = lccc[mono(indc[i], lsnake[-1][0])][1]
								#  If direction is from larger to smaller index, the transformation has to be inverted
								if( indc[i] > lsnake[-1][0] ):  T = T.inverse()
								
								
				lsnake.append([qi,T, maxcit])
				lsum += maxcit

				del indc[indc.index(qi)]

			T = lccc[mono(indc[-1], lsnake[-1][0])][1]
			if( indc[-1] > lsnake[-1][0]):  T = T.inverse()
			lsnake.append([indc[-1], T, lccc[mono(indc[-1], lsnake[-1][0])][0]])
			print  " initial image and lsum  ",m,lsum
			#print lsnake
			if(lsum > maxsum):
				maxsum = lsum
				init = m
				snake = [lsnake[i] for i in xrange(lend)]
		print  "  Initial image selected : ",init,maxsum,"    ",TotalDistance([snake[m][0] for m in xrange(lend)], lccc)
		#for q in snake: print q

		from copy import deepcopy
		trans=deepcopy([snake[i][1] for i in xrange(len(snake))])
		print  [snake[i][0] for i in xrange(len(snake))]
		"""
		for m in xrange(lend):
			prms = trans[m].get_params("2D")
			print " %3d   %7.1f   %7.1f   %7.1f   %2d  %6.2f"%(snake[m][0], prms["alpha"], prms["tx"], prms["ty"], prms["mirror"], snake[m][2])
		"""
		for k in xrange(lend-2,0,-1):
			T = snake[k][1]
			for i in xrange(k+1, lend):
 					trans[i] = T*trans[i]
		#  To add - apply all transformations and do the overall centering.
		for m in xrange(lend):
			prms = trans[m].get_params("2D")
			#print " %3d   %7.1f   %7.1f   %7.1f   %2d  %6.2f"%(snake[m][0], prms["alpha"], prms["tx"], prms["ty"], prms["mirror"], snake[m][2])
			#rot_shift2D(d[snake[m][0]], prms["alpha"], prms["tx"], prms["ty"], prms["mirror"]).write_image(new_stack, m)
			rot_shift2D(d[snake[m][0]], prms["alpha"], 0.0,0.0, prms["mirror"]).write_image(new_stack, m)

		order = tsp(lccc)
		if(len(order) != lend):
			print  " problem with data length"
			from sys import exit
			exit()
		print  TotalDistance(order, lccc)
		print order
		ibeg = order.index(init)
		order = [order[(i+ibeg)%lend] for i in xrange(lend)]
		print  TotalDistance(order, lccc)
		print order


		snake = [tdummy]
		for i in xrange(1,lend):
			#  Here we need transformation from the current to the previous,
			#     meaning order[i] -> order[i-1]]
			T = lccc[mono(order[i], order[i-1])][1]
			#  If direction is from larger to smaller index, the transformation has to be inverted
			if( order[i] > order[i-1] ):  T = T.inverse()
			snake.append(T)
		assert(len(snake) == lend)
		from copy import deepcopy
		trans = deepcopy(snake)
		for k in xrange(lend-2,0,-1):
			T = snake[k]
			for i in xrange(k+1, lend):
 					trans[i] = T*trans[i]

		#  Try to smooth the angles - complicated, I am afraid one would have to use angles forward and backwards
		#     and find their average??
		#  In addition, one would have to recenter them
		"""
		trms = []
		for m in xrange(lend):
			prms = trans[m].get_params("2D")
			trms.append([prms["alpha"], prms["mirror"]])
		for i in xrange(3):
			for m in xrange(lend):
				mb = (m-1)%lend
				me = (m+1)%lend
				#  angles order mb,m,me
				# calculate predicted angles mb->m 
		"""

		for m in xrange(lend):
			prms = trans[m].get_params("2D")
			#rot_shift2D(d[order[m]], prms["alpha"], prms["tx"], prms["ty"], prms["mirror"]).write_image("metro.hdf", m)
			rot_shift2D(d[order[m]], prms["alpha"], 0.0,0.0, prms["mirror"]).write_image(args[2], m)

		"""
		#  This was an effort to get number of loops, inconclusive, to say the least
		from numpy import outer, zeros, float32, sqrt
		lend = len(d)
 		cor = zeros(lend,float32)
 		cor = outer(cor, cor)
		for i in xrange(lend):  cor[i][i] = 1.0
		for i in xrange(lend-1):
			for j in xrange(i+1, lend):
				cor[i,j] = lccc[mono(i,j)][0]
				cor[j,i] = cor[i,j]

		lmbd, eigvec = pca(cor)

		from utilities import write_text_file

		nvec=20
		print  [lmbd[j] for j in xrange(nvec)]
		print  " G"
		mm = [-1]*lend
		for i in xrange(lend):  # row
			mi = -1.0e23
			for j in xrange(nvec):
				qt = eigvec[j][i]
				if(abs(qt)>mi):
					mi = abs(qt)
					mm[i] = j
			for j in xrange(nvec):
				qt = eigvec[j][i]
				print  round(qt,3),   #  eigenvector
			print  mm[i]
		print
		for j in xrange(nvec):
			qt = []
			for i in xrange(lend):
				if(mm[i] == j):  qt.append(i)
			if(len(qt)>0):  write_text_file(qt,"loop%02d.txt"%j)
		"""
		"""
		print  [lmbd[j] for j in xrange(nvec)]
		print  " B"
		mm = [-1]*lend
		for i in xrange(lend):  # row
			mi = -1.0e23
			for j in xrange(nvec):
				qt = eigvec[j][i]/sqrt(lmbd[j])
				if(abs(qt)>mi):
					mi = abs(qt)
					mm[i] = j
			for j in xrange(nvec):
				qt = eigvec[j][i]/sqrt(lmbd[j])
				print  round(qt,3),   #  eigenvector
			print  mm[i]
		print
		"""

		"""
		lend=3
 		cor = zeros(lend,float32)
 		
 		cor = outer(cor, cor)
 		
 		
 		cor[0][0] =136.77
 		cor[0][1] = 79.15
 		cor[0][2] = 37.13
 		
 		cor[1][0] = 79.15
 		cor[2][0] = 37.13
 		
 		
 		cor[1][1] = 50.04
 		cor[1][2] = 21.65
 		
 		cor[2][1] = 21.65
 		
 		
 		cor[2][2] = 13.26

		lmbd, eigvec = pca(cor)
		print  lmbd
		print  eigvec
		for i in xrange(lend):  # row
			for j in xrange(lend):  print  eigvec[j][i],   #  eigenvector
			print
		print  " B"
		for i in xrange(lend):  # row
			for j in xrange(lend):  print  eigvec[j][i]/sqrt(lmbd[j]),   #  eigenvector
			print
		print  " G"
		for i in xrange(lend):  # row
			for j in xrange(lend):  print  eigvec[j][i]*sqrt(lmbd[j]),   #  eigenvector
			print
		"""
	else:
		nargs = len(args)
		if nargs != 2:
			print "must provide name of input and output file!"
			return
		
		from utilities import get_params2D, model_circle
		from fundamentals import rot_shift2D
		from statistics import ccc
		from time import time
		from alignment import align2d
		from multi_shc import mult_transform 
		
		stack = args[0]
		new_stack = args[1]
		
		d = EMData.read_images(stack)
		try:
			ttt = d[0].get_attr('xform.params2d')
			for i in xrange(len(d)):
				alpha, sx, sy, mirror, scale = get_params2D(d[i])
				d[i] = rot_shift2D(d[i], alpha, sx, sy, mirror)
		except:
			pass

		nx = d[0].get_xsize()
		ny = d[0].get_ysize()
		if options.radius < 1 : radius = nx//2-2
		else:  radius = options.radius
		mask = model_circle(radius, nx, ny)

		init = options.initial
		
		if init > -1 :
			print "      initial image: %d" % init
			temp = d[init].copy()
			temp.write_image(new_stack, 0)
			del d[init]
			k = 1
			lsum = 0.0
			while len(d) > 1:
				maxcit = -111.
				for i in xrange(len(d)):
						cuc = ccc(d[i], temp, mask)
						if cuc > maxcit:
								maxcit = cuc
								qi = i
				# 	print k, maxcit
				lsum += maxcit
				temp = d[qi].copy()
				del d[qi]
				temp.write_image(new_stack, k)
				k += 1
			print  lsum
			d[0].write_image(new_stack, k)
		else:			
			if options.circular :
				print "Using options.circular"
				#  figure the "best circular" starting image
				maxsum = -1.023
				for m in xrange(len(d)):
					indc = range(len(d) )
					lsnake = [-1]*(len(d)+1)
					lsnake[0]  = m
					lsnake[-1] = m
					del indc[m]
					temp = d[m].copy()
					lsum = 0.0
					direction = +1
					k = 1
					while len(indc) > 1:
						maxcit = -111.
						for i in xrange(len(indc)):
								cuc = ccc(d[indc[i]], temp, mask)
								if cuc > maxcit:
										maxcit = cuc
										qi = indc[i]
						lsnake[k] = qi
						lsum += maxcit
						del indc[indc.index(qi)]
						direction = -direction
						for i in xrange( 1,len(d) ):
							if( direction > 0 ):
								if(lsnake[i] == -1):
									temp = d[lsnake[i-1]].copy()
									#print  "  forw  ",lsnake[i-1]
									k = i
									break
							else:
								if(lsnake[len(d) - i] == -1):
									temp = d[lsnake[len(d) - i +1]].copy()
									#print  "  back  ",lsnake[len(d) - i +1]
									k = len(d) - i
									break

					lsnake[lsnake.index(-1)] = indc[-1]
					#print  " initial image and lsum  ",m,lsum
					#print lsnake
					if(lsum > maxsum):
						maxsum = lsum
						init = m
						snake = [lsnake[i] for i in xrange(len(d))]
				print  "  Initial image selected : ",init,maxsum
				print lsnake
				for m in xrange(len(d)):  d[snake[m]].write_image(new_stack, m)
			else:
				#  figure the "best" starting image
				maxsum = -1.023
				for m in xrange(len(d)):
					indc = range(len(d) )
					lsnake = [m]
					del indc[m]
					temp = d[m].copy()
					lsum = 0.0
					while len(indc) > 1:
						maxcit = -111.
						for i in xrange(len(indc)):
								cuc = ccc(d[indc[i]], temp, mask)
								if cuc > maxcit:
										maxcit = cuc
										qi = indc[i]
						lsnake.append(qi)
						lsum += maxcit
						temp = d[qi].copy()
						del indc[indc.index(qi)]

					lsnake.append(indc[-1])
					#print  " initial image and lsum  ",m,lsum
					#print lsnake
					if(lsum > maxsum):
						maxsum = lsum
						init = m
						snake = [lsnake[i] for i in xrange(len(d))]
				print  "  Initial image selected : ",init,maxsum
				print lsnake
				for m in xrange(len(d)):  d[snake[m]].write_image(new_stack, m)
예제 #20
0
def spruce_up_var_m(refdata):
    from utilities import print_msg
    from utilities import model_circle, get_im
    from filter import filt_tanl, filt_gaussl
    from morphology import threshold
    import os

    numref = refdata[0]
    outdir = refdata[1]
    fscc = refdata[2]
    total_iter = refdata[3]
    varf = refdata[4]
    mask = refdata[5]
    ali50S = refdata[6]

    if ali50S:
        mask_50S = get_im("mask-50S.spi")

    if fscc is None:
        flmin = 0.4
        aamin = 0.1
    else:
        flmin, aamin, idmin = minfilt(fscc)
        aamin = aamin

    msg = "Minimum tangent filter:  cut-off frequency = %10.3f     fall-off = %10.3f\n" % (
        fflmin, aamin)
    print_msg(msg)

    for i in xrange(numref):
        volf = get_im(os.path.join(outdir, "vol%04d.hdf" % total_iter), i)
        if (not (varf is None)): volf = volf.filter_by_image(varf)
        volf = filt_tanl(volf, flmin, aamin)
        stat = Util.infomask(volf, mask, True)
        volf -= stat[0]
        Util.mul_scalar(volf, 1.0 / stat[1])

        nx = volf.get_xsize()
        stat = Util.infomask(
            volf,
            model_circle(nx // 2 - 2, nx, nx, nx) -
            model_circle(nx // 2 - 6, nx, nx, nx), True)
        volf -= stat[0]
        Util.mul_img(volf, mask)

        volf = threshold(volf)
        volf = filt_gaussl(volf, 0.4)

        if ali50S:
            if i == 0:
                v50S_0 = volf.copy()
                v50S_0 *= mask_50S
            else:
                from applications import ali_vol_3
                from fundamentals import rot_shift3D
                v50S_i = volf.copy()
                v50S_i *= mask_50S

                params = ali_vol_3(v50S_i, v50S_0, 10.0, 0.5, mask=mask_50S)
                volf = rot_shift3D(volf, params[0], params[1], params[2],
                                   params[3], params[4], params[5], 1.0)

        volf.write_image(os.path.join(outdir, "volf%04d.hdf" % total_iter), i)
예제 #21
0
파일: sxchains.py 프로젝트: cpsemmens/eman2
		new_stack = args[1]
		
		d = EMData.read_images(stack)
		try:
			ttt = d[0].get_attr('xform.params2d')
			for i in xrange(len(d)):
				alpha, sx, sy, mirror, scale = get_params2D(d[i])
				d[i] = rot_shift2D(d[i], alpha, sx, sy, mirror)
		except:
			pass

		nx = d[0].get_xsize()
		ny = d[0].get_ysize()
		if options.radius < 1 : radius = nx//2-2
		else:  radius = options.radius
		mask = model_circle(radius, nx, ny)

		init = options.initial
		
		if init > -1 :
			print "      initial image: %d" % init
			temp = d[init].copy()
			temp.write_image(new_stack, 0)
			del d[init]
			k = 1
			lsum = 0.0
			while len(d) > 1:
				maxcit = -111.
				for i in xrange(len(d)):
						cuc = ccc(d[i], temp, mask)
						if cuc > maxcit:
예제 #22
0
def main():
    arglist = []
    for arg in sys.argv:
        arglist.append(arg)
    progname = os.path.basename(arglist[0])
    usage = progname + """ firstvolume  secondvolume  maskfile  outputfile  --wn  --step  --cutoff  --radius  --fsc  --res_overall  --out_ang_res  --apix  --MPI

	Compute local resolution in real space within area outlined by the maskfile and within regions wn x wn x wn
	"""
    parser = optparse.OptionParser(usage, version=global_def.SPARXVERSION)

    parser.add_option(
        "--wn",
        type="int",
        default=7,
        help=
        "Size of window within which local real-space FSC is computed. (default 7)"
    )
    parser.add_option(
        "--step",
        type="float",
        default=1.0,
        help="Shell step in Fourier size in pixels. (default 1.0)")
    parser.add_option("--cutoff",
                      type="float",
                      default=0.5,
                      help="Resolution cut-off for FSC. (default 0.5)")
    parser.add_option(
        "--radius",
        type="int",
        default=-1,
        help=
        "If there is no maskfile, sphere with r=radius will be used. By default, the radius is nx/2-wn (default -1)"
    )
    parser.add_option(
        "--fsc",
        type="string",
        default=None,
        help=
        "Save overall FSC curve (might be truncated). By default, the program does not save the FSC curve. (default none)"
    )
    parser.add_option(
        "--res_overall",
        type="float",
        default=-1.0,
        help=
        "Overall resolution at the cutoff level estimated by the user [abs units]. (default None)"
    )
    parser.add_option(
        "--out_ang_res",
        action="store_true",
        default=False,
        help=
        "Additionally creates a local resolution file in Angstroms. (default False)"
    )
    parser.add_option(
        "--apix",
        type="float",
        default=1.0,
        help=
        "Pixel size in Angstrom. Effective only with --out_ang_res options. (default 1.0)"
    )
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="Use MPI version.")

    (options, args) = parser.parse_args(arglist[1:])

    if len(args) < 3 or len(args) > 4:
        print("See usage " + usage)
        sys.exit()

    if global_def.CACHE_DISABLE:
        utilities.disable_bdb_cache()

    res_overall = options.res_overall

    if options.MPI:
        sys.argv = mpi.mpi_init(len(sys.argv), sys.argv)

        number_of_proc = mpi.mpi_comm_size(mpi.MPI_COMM_WORLD)
        myid = mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD)
        main_node = 0
        global_def.MPI = True
        cutoff = options.cutoff

        nk = int(options.wn)

        if (myid == main_node):
            #print sys.argv
            vi = utilities.get_im(sys.argv[1])
            ui = utilities.get_im(sys.argv[2])

            nx = vi.get_xsize()
            ny = vi.get_ysize()
            nz = vi.get_zsize()
            dis = [nx, ny, nz]
        else:
            dis = [0, 0, 0, 0]

        global_def.BATCH = True

        dis = utilities.bcast_list_to_all(dis, myid, source_node=main_node)

        if (myid != main_node):
            nx = int(dis[0])
            ny = int(dis[1])
            nz = int(dis[2])

            vi = utilities.model_blank(nx, ny, nz)
            ui = utilities.model_blank(nx, ny, nz)

        if len(args) == 3:
            m = utilities.model_circle((min(nx, ny, nz) - nk) // 2, nx, ny, nz)
            outvol = args[2]

        elif len(args) == 4:
            if (myid == main_node):
                m = morphology.binarize(utilities.get_im(args[2]), 0.5)
            else:
                m = utilities.model_blank(nx, ny, nz)
            outvol = args[3]
        utilities.bcast_EMData_to_all(m, myid, main_node)
        """Multiline Comment0"""
        freqvol, resolut = statistics.locres(vi, ui, m, nk, cutoff,
                                             options.step, myid, main_node,
                                             number_of_proc)

        if (myid == 0):
            # Remove outliers based on the Interquartile range
            output_volume(freqvol, resolut, options.apix, outvol, options.fsc,
                          options.out_ang_res, nx, ny, nz, res_overall)
        mpi.mpi_finalize()

    else:
        cutoff = options.cutoff
        vi = utilities.get_im(args[0])
        ui = utilities.get_im(args[1])

        nn = vi.get_xsize()
        nk = int(options.wn)

        if len(args) == 3:
            m = utilities.model_circle((nn - nk) // 2, nn, nn, nn)
            outvol = args[2]

        elif len(args) == 4:
            m = morphology.binarize(utilities.get_im(args[2]), 0.5)
            outvol = args[3]

        mc = utilities.model_blank(nn, nn, nn, 1.0) - m

        vf = fundamentals.fft(vi)
        uf = fundamentals.fft(ui)
        """Multiline Comment1"""
        lp = int(nn / 2 / options.step + 0.5)
        step = 0.5 / lp

        freqvol = utilities.model_blank(nn, nn, nn)
        resolut = []
        for i in range(1, lp):
            fl = step * i
            fh = fl + step
            #print(lp,i,step,fl,fh)
            v = fundamentals.fft(filter.filt_tophatb(vf, fl, fh))
            u = fundamentals.fft(filter.filt_tophatb(uf, fl, fh))
            tmp1 = EMAN2_cppwrap.Util.muln_img(v, v)
            tmp2 = EMAN2_cppwrap.Util.muln_img(u, u)

            do = EMAN2_cppwrap.Util.infomask(
                morphology.square_root(
                    morphology.threshold(
                        EMAN2_cppwrap.Util.muln_img(tmp1, tmp2))), m, True)[0]

            tmp3 = EMAN2_cppwrap.Util.muln_img(u, v)
            dp = EMAN2_cppwrap.Util.infomask(tmp3, m, True)[0]
            resolut.append([i, (fl + fh) / 2.0, dp / do])

            tmp1 = EMAN2_cppwrap.Util.box_convolution(tmp1, nk)
            tmp2 = EMAN2_cppwrap.Util.box_convolution(tmp2, nk)
            tmp3 = EMAN2_cppwrap.Util.box_convolution(tmp3, nk)

            EMAN2_cppwrap.Util.mul_img(tmp1, tmp2)

            tmp1 = morphology.square_root(morphology.threshold(tmp1))

            EMAN2_cppwrap.Util.mul_img(tmp1, m)
            EMAN2_cppwrap.Util.add_img(tmp1, mc)

            EMAN2_cppwrap.Util.mul_img(tmp3, m)
            EMAN2_cppwrap.Util.add_img(tmp3, mc)

            EMAN2_cppwrap.Util.div_img(tmp3, tmp1)

            EMAN2_cppwrap.Util.mul_img(tmp3, m)
            freq = (fl + fh) / 2.0
            bailout = True
            for x in range(nn):
                for y in range(nn):
                    for z in range(nn):
                        if (m.get_value_at(x, y, z) > 0.5):
                            if (freqvol.get_value_at(x, y, z) == 0.0):
                                if (tmp3.get_value_at(x, y, z) < cutoff):
                                    freqvol.set_value_at(x, y, z, freq)
                                    bailout = False
                                else:
                                    bailout = False
            if (bailout): break
        #print(len(resolut))
        # remove outliers
        output_volume(freqvol, resolut, options.apix, outvol, options.fsc,
                      options.out_ang_res, nx, ny, nz, res_overall)
예제 #23
0
def ali3d_MPI(stack,
              ref_vol,
              outdir,
              maskfile=None,
              ir=1,
              ou=-1,
              rs=1,
              xr="4 2 2 1",
              yr="-1",
              ts="1 1 0.5 0.25",
              delta="10 6 4 4",
              an="-1",
              center=0,
              maxit=5,
              term=95,
              CTF=False,
              fourvar=False,
              snr=1.0,
              ref_a="S",
              sym="c1",
              sort=True,
              cutoff=999.99,
              pix_cutoff="0",
              two_tail=False,
              model_jump="1 1 1 1 1",
              restart=False,
              save_half=False,
              protos=None,
              oplane=None,
              lmask=-1,
              ilmask=-1,
              findseam=False,
              vertstep=None,
              hpars="-1",
              hsearch="0.0 50.0",
              full_output=False,
              compare_repro=False,
              compare_ref_free="-1",
              ref_free_cutoff="-1 -1 -1 -1",
              wcmask=None,
              debug=False,
              recon_pad=4,
              olmask=75):

    from alignment import Numrinit, prepare_refrings
    from utilities import model_circle, get_image, drop_image, get_input_from_string
    from utilities import bcast_list_to_all, bcast_number_to_all, reduce_EMData_to_root, bcast_EMData_to_all
    from utilities import send_attr_dict
    from utilities import get_params_proj, file_type
    from fundamentals import rot_avg_image
    import os
    import types
    from utilities import print_begin_msg, print_end_msg, print_msg
    from mpi import mpi_bcast, mpi_comm_size, mpi_comm_rank, MPI_FLOAT, MPI_COMM_WORLD, mpi_barrier, mpi_reduce
    from mpi import mpi_reduce, MPI_INT, MPI_SUM, mpi_finalize
    from filter import filt_ctf
    from projection import prep_vol, prgs
    from statistics import hist_list, varf3d_MPI, fsc_mask
    from numpy import array, bincount, array2string, ones

    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    main_node = 0
    if myid == main_node:
        if os.path.exists(outdir):
            ERROR(
                'Output directory exists, please change the name and restart the program',
                "ali3d_MPI", 1)
        os.mkdir(outdir)
    mpi_barrier(MPI_COMM_WORLD)

    if debug:
        from time import sleep
        while not os.path.exists(outdir):
            print "Node ", myid, "  waiting..."
            sleep(5)

        info_file = os.path.join(outdir, "progress%04d" % myid)
        finfo = open(info_file, 'w')
    else:
        finfo = None
    mjump = get_input_from_string(model_jump)
    xrng = get_input_from_string(xr)
    if yr == "-1": yrng = xrng
    else: yrng = get_input_from_string(yr)
    step = get_input_from_string(ts)
    delta = get_input_from_string(delta)
    ref_free_cutoff = get_input_from_string(ref_free_cutoff)
    pix_cutoff = get_input_from_string(pix_cutoff)

    lstp = min(len(xrng), len(yrng), len(step), len(delta))
    if an == "-1":
        an = [-1] * lstp
    else:
        an = get_input_from_string(an)
    # make sure pix_cutoff is set for all iterations
    if len(pix_cutoff) < lstp:
        for i in xrange(len(pix_cutoff), lstp):
            pix_cutoff.append(pix_cutoff[-1])
    # don't waste time on sub-pixel alignment for low-resolution ang incr
    for i in range(len(step)):
        if (delta[i] > 4 or delta[i] == -1) and step[i] < 1:
            step[i] = 1

    first_ring = int(ir)
    rstep = int(rs)
    last_ring = int(ou)
    max_iter = int(maxit)
    center = int(center)

    nrefs = EMUtil.get_image_count(ref_vol)
    nmasks = 0
    if maskfile:
        # read number of masks within each maskfile (mc)
        nmasks = EMUtil.get_image_count(maskfile)
        # open masks within maskfile (mc)
        maskF = EMData.read_images(maskfile, xrange(nmasks))
    vol = EMData.read_images(ref_vol, xrange(nrefs))
    nx = vol[0].get_xsize()

    ## make sure box sizes are the same
    if myid == main_node:
        im = EMData.read_images(stack, [0])
        bx = im[0].get_xsize()
        if bx != nx:
            print_msg(
                "Error: Stack box size (%i) differs from initial model (%i)\n"
                % (bx, nx))
            sys.exit()
        del im, bx

    # for helical processing:
    helicalrecon = False
    if protos is not None or hpars != "-1" or findseam is True:
        helicalrecon = True
        # if no out-of-plane param set, use 5 degrees
        if oplane is None:
            oplane = 5.0
    if protos is not None:
        proto = get_input_from_string(protos)
        if len(proto) != nrefs:
            print_msg("Error: insufficient protofilament numbers supplied")
            sys.exit()
    if hpars != "-1":
        hpars = get_input_from_string(hpars)
        if len(hpars) != 2 * nrefs:
            print_msg("Error: insufficient helical parameters supplied")
            sys.exit()
    ## create helical parameter file for helical reconstruction
    if helicalrecon is True and myid == main_node:
        from hfunctions import createHpar
        # create initial helical parameter files
        dp = [0] * nrefs
        dphi = [0] * nrefs
        vdp = [0] * nrefs
        vdphi = [0] * nrefs
        for iref in xrange(nrefs):
            hpar = os.path.join(outdir, "hpar%02d.spi" % (iref))
            params = False
            if hpars != "-1":
                # if helical parameters explicitly given, set twist & rise
                params = [float(hpars[iref * 2]), float(hpars[(iref * 2) + 1])]
            dp[iref], dphi[iref], vdp[iref], vdphi[iref] = createHpar(
                hpar, proto[iref], params, vertstep)

    # get values for helical search parameters
    hsearch = get_input_from_string(hsearch)
    if len(hsearch) != 2:
        print_msg("Error: specify outer and inner radii for helical search")
        sys.exit()

    if last_ring < 0 or last_ring > int(nx / 2) - 2:
        last_ring = int(nx / 2) - 2

    if myid == main_node:
        #	import user_functions
        #	user_func = user_functions.factory[user_func_name]

        print_begin_msg("ali3d_MPI")
        print_msg("Input stack		 : %s\n" % (stack))
        print_msg("Reference volume	    : %s\n" % (ref_vol))
        print_msg("Output directory	    : %s\n" % (outdir))
        if nmasks > 0:
            print_msg("Maskfile (number of masks)  : %s (%i)\n" %
                      (maskfile, nmasks))
        print_msg("Inner radius		: %i\n" % (first_ring))
        print_msg("Outer radius		: %i\n" % (last_ring))
        print_msg("Ring step		   : %i\n" % (rstep))
        print_msg("X search range	      : %s\n" % (xrng))
        print_msg("Y search range	      : %s\n" % (yrng))
        print_msg("Translational step	  : %s\n" % (step))
        print_msg("Angular step		: %s\n" % (delta))
        print_msg("Angular search range	: %s\n" % (an))
        print_msg("Maximum iteration	   : %i\n" % (max_iter))
        print_msg("Center type		 : %i\n" % (center))
        print_msg("CTF correction	      : %s\n" % (CTF))
        print_msg("Signal-to-Noise Ratio       : %f\n" % (snr))
        print_msg("Reference projection method : %s\n" % (ref_a))
        print_msg("Symmetry group	      : %s\n" % (sym))
        print_msg("Fourier padding for 3D      : %i\n" % (recon_pad))
        print_msg("Number of reference models  : %i\n" % (nrefs))
        print_msg("Sort images between models  : %s\n" % (sort))
        print_msg("Allow images to jump	: %s\n" % (mjump))
        print_msg("CC cutoff standard dev      : %f\n" % (cutoff))
        print_msg("Two tail cutoff	     : %s\n" % (two_tail))
        print_msg("Termination pix error       : %f\n" % (term))
        print_msg("Pixel error cutoff	  : %s\n" % (pix_cutoff))
        print_msg("Restart		     : %s\n" % (restart))
        print_msg("Full output		 : %s\n" % (full_output))
        print_msg("Compare reprojections       : %s\n" % (compare_repro))
        print_msg("Compare ref free class avgs : %s\n" % (compare_ref_free))
        print_msg("Use cutoff from ref free    : %s\n" % (ref_free_cutoff))
        if protos:
            print_msg("Protofilament numbers	: %s\n" % (proto))
            print_msg("Using helical search range   : %s\n" % hsearch)
        if findseam is True:
            print_msg("Using seam-based reconstruction\n")
        if hpars != "-1":
            print_msg("Using hpars		  : %s\n" % hpars)
        if vertstep != None:
            print_msg("Using vertical step    : %.2f\n" % vertstep)
        if save_half is True:
            print_msg("Saving even/odd halves\n")
        for i in xrange(100):
            print_msg("*")
        print_msg("\n\n")
    if maskfile:
        if type(maskfile) is types.StringType: mask3D = get_image(maskfile)
        else: mask3D = maskfile
    else: mask3D = model_circle(last_ring, nx, nx, nx)

    numr = Numrinit(first_ring, last_ring, rstep, "F")
    mask2D = model_circle(last_ring, nx, nx) - model_circle(first_ring, nx, nx)

    fscmask = model_circle(last_ring, nx, nx, nx)
    if CTF:
        from filter import filt_ctf
    from reconstruction_rjh import rec3D_MPI_noCTF

    if myid == main_node:
        active = EMUtil.get_all_attributes(stack, 'active')
        list_of_particles = []
        for im in xrange(len(active)):
            if active[im]: list_of_particles.append(im)
        del active
        nima = len(list_of_particles)
    else:
        nima = 0
    total_nima = bcast_number_to_all(nima, source_node=main_node)

    if myid != main_node:
        list_of_particles = [-1] * total_nima
    list_of_particles = bcast_list_to_all(list_of_particles,
                                          source_node=main_node)

    image_start, image_end = MPI_start_end(total_nima, number_of_proc, myid)

    # create a list of images for each node
    list_of_particles = list_of_particles[image_start:image_end]
    nima = len(list_of_particles)
    if debug:
        finfo.write("image_start, image_end: %d %d\n" %
                    (image_start, image_end))
        finfo.flush()

    data = EMData.read_images(stack, list_of_particles)

    t_zero = Transform({
        "type": "spider",
        "phi": 0,
        "theta": 0,
        "psi": 0,
        "tx": 0,
        "ty": 0
    })
    transmulti = [[t_zero for i in xrange(nrefs)] for j in xrange(nima)]

    for iref, im in ((iref, im) for iref in xrange(nrefs)
                     for im in xrange(nima)):
        if nrefs == 1:
            transmulti[im][iref] = data[im].get_attr("xform.projection")
        else:
            # if multi models, keep track of eulers for all models
            try:
                transmulti[im][iref] = data[im].get_attr("eulers_txty.%i" %
                                                         iref)
            except:
                data[im].set_attr("eulers_txty.%i" % iref, t_zero)

    scoremulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)]
    pixelmulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)]
    ref_res = [0.0 for x in xrange(nrefs)]
    apix = data[0].get_attr('apix_x')

    # for oplane parameter, create cylindrical mask
    if oplane is not None and myid == main_node:
        from hfunctions import createCylMask
        cmaskf = os.path.join(outdir, "mask3D_cyl.mrc")
        mask3D = createCylMask(data, olmask, lmask, ilmask, cmaskf)
        # if finding seam of helix, create wedge masks
        if findseam is True:
            wedgemask = []
            for pf in xrange(nrefs):
                wedgemask.append(EMData())
            # wedgemask option
            if wcmask is not None:
                wcmask = get_input_from_string(wcmask)
                if len(wcmask) != 3:
                    print_msg(
                        "Error: wcmask option requires 3 values: x y radius")
                    sys.exit()

    # determine if particles have helix info:
    try:
        data[0].get_attr('h_angle')
        original_data = []
        boxmask = True
        from hfunctions import createBoxMask
    except:
        boxmask = False

    # prepare particles
    for im in xrange(nima):
        data[im].set_attr('ID', list_of_particles[im])
        data[im].set_attr('pix_score', int(0))
        if CTF:
            # only phaseflip particles, not full CTF correction
            ctf_params = data[im].get_attr("ctf")
            st = Util.infomask(data[im], mask2D, False)
            data[im] -= st[0]
            data[im] = filt_ctf(data[im], ctf_params, sign=-1, binary=1)
            data[im].set_attr('ctf_applied', 1)
        # for window mask:
        if boxmask is True:
            h_angle = data[im].get_attr("h_angle")
            original_data.append(data[im].copy())
            bmask = createBoxMask(nx, apix, ou, lmask, h_angle)
            data[im] *= bmask
            del bmask
    if debug:
        finfo.write('%d loaded  \n' % nima)
        finfo.flush()
    if myid == main_node:
        # initialize data for the reference preparation function
        ref_data = [mask3D, max(center, 0), None, None, None, None]
        # for method -1, switch off centering in user function

    from time import time

    #  this is needed for gathering of pixel errors
    disps = []
    recvcount = []
    disps_score = []
    recvcount_score = []
    for im in xrange(number_of_proc):
        if (im == main_node):
            disps.append(0)
            disps_score.append(0)
        else:
            disps.append(disps[im - 1] + recvcount[im - 1])
            disps_score.append(disps_score[im - 1] + recvcount_score[im - 1])
        ib, ie = MPI_start_end(total_nima, number_of_proc, im)
        recvcount.append(ie - ib)
        recvcount_score.append((ie - ib) * nrefs)

    pixer = [0.0] * nima
    cs = [0.0] * 3
    total_iter = 0
    volodd = EMData.read_images(ref_vol, xrange(nrefs))
    voleve = EMData.read_images(ref_vol, xrange(nrefs))

    if restart:
        # recreate initial volumes from alignments stored in header
        itout = "000_00"
        for iref in xrange(nrefs):
            if (nrefs == 1):
                modout = ""
            else:
                modout = "_model_%02d" % (iref)

            if (sort):
                group = iref
                for im in xrange(nima):
                    imgroup = data[im].get_attr('group')
                    if imgroup == iref:
                        data[im].set_attr('xform.projection',
                                          transmulti[im][iref])
            else:
                group = int(999)
                for im in xrange(nima):
                    data[im].set_attr('xform.projection', transmulti[im][iref])

            fscfile = os.path.join(outdir, "fsc_%s%s" % (itout, modout))

            vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(
                data,
                sym,
                fscmask,
                fscfile,
                myid,
                main_node,
                index=group,
                npad=recon_pad)

            if myid == main_node:
                if helicalrecon:
                    from hfunctions import processHelicalVol
                    vstep = None
                    if vertstep is not None:
                        vstep = (vdp[iref], vdphi[iref])
                    print_msg(
                        "Old rise and twist for model %i     : %8.3f, %8.3f\n"
                        % (iref, dp[iref], dphi[iref]))
                    hvals = processHelicalVol(vol[iref], voleve[iref],
                                              volodd[iref], iref, outdir,
                                              itout, dp[iref], dphi[iref],
                                              apix, hsearch, findseam, vstep,
                                              wcmask)
                    (vol[iref], voleve[iref], volodd[iref], dp[iref],
                     dphi[iref], vdp[iref], vdphi[iref]) = hvals
                    print_msg(
                        "New rise and twist for model %i     : %8.3f, %8.3f\n"
                        % (iref, dp[iref], dphi[iref]))
                    # get new FSC from symmetrized half volumes
                    fscc = fsc_mask(volodd[iref], voleve[iref], mask3D, rstep,
                                    fscfile)
                else:
                    vol[iref].write_image(
                        os.path.join(outdir, "vol_%s.hdf" % itout), -1)

                if save_half is True:
                    volodd[iref].write_image(
                        os.path.join(outdir, "volodd_%s.hdf" % itout), -1)
                    voleve[iref].write_image(
                        os.path.join(outdir, "voleve_%s.hdf" % itout), -1)

                if nmasks > 1:
                    # Read mask for multiplying
                    ref_data[0] = maskF[iref]
                ref_data[2] = vol[iref]
                ref_data[3] = fscc
                #  call user-supplied function to prepare reference image, i.e., center and filter it
                vol[iref], cs, fl = ref_ali3d(ref_data)
                vol[iref].write_image(
                    os.path.join(outdir, "volf_%s.hdf" % (itout)), -1)
                if (apix == 1):
                    res_msg = "Models filtered at spatial frequency of:\t"
                    res = fl
                else:
                    res_msg = "Models filtered at resolution of:       \t"
                    res = apix / fl
                ares = array2string(array(res), precision=2)
                print_msg("%s%s\n\n" % (res_msg, ares))

            bcast_EMData_to_all(vol[iref], myid, main_node)
            # write out headers, under MPI writing has to be done sequentially
            mpi_barrier(MPI_COMM_WORLD)

    # projection matching
    for N_step in xrange(lstp):
        terminate = 0
        Iter = -1
        while (Iter < max_iter - 1 and terminate == 0):
            Iter += 1
            total_iter += 1
            itout = "%03g_%02d" % (delta[N_step], Iter)
            if myid == main_node:
                print_msg(
                    "ITERATION #%3d, inner iteration #%3d\nDelta = %4.1f, an = %5.2f, xrange = %5.2f, yrange = %5.2f, step = %5.2f\n\n"
                    % (N_step, Iter, delta[N_step], an[N_step], xrng[N_step],
                       yrng[N_step], step[N_step]))

            for iref in xrange(nrefs):
                if myid == main_node: start_time = time()
                volft, kb = prep_vol(vol[iref])

                ## constrain projections to out of plane parameter
                theta1 = None
                theta2 = None
                if oplane is not None:
                    theta1 = 90 - oplane
                    theta2 = 90 + oplane
                refrings = prepare_refrings(volft,
                                            kb,
                                            nx,
                                            delta[N_step],
                                            ref_a,
                                            sym,
                                            numr,
                                            MPI=True,
                                            phiEqpsi="Minus",
                                            initial_theta=theta1,
                                            delta_theta=theta2)

                del volft, kb

                if myid == main_node:
                    print_msg(
                        "Time to prepare projections for model %i: %s\n" %
                        (iref, legibleTime(time() - start_time)))
                    start_time = time()

                for im in xrange(nima):
                    data[im].set_attr("xform.projection", transmulti[im][iref])
                    if an[N_step] == -1:
                        t1, peak, pixer[im] = proj_ali_incore(
                            data[im], refrings, numr, xrng[N_step],
                            yrng[N_step], step[N_step], finfo)
                    else:
                        t1, peak, pixer[im] = proj_ali_incore_local(
                            data[im], refrings, numr, xrng[N_step],
                            yrng[N_step], step[N_step], an[N_step], finfo)
                    #data[im].set_attr("xform.projection"%iref, t1)
                    if nrefs > 1:
                        data[im].set_attr("eulers_txty.%i" % iref, t1)
                    scoremulti[im][iref] = peak
                    from pixel_error import max_3D_pixel_error
                    # t1 is the current param, t2 is old
                    t2 = transmulti[im][iref]
                    pixelmulti[im][iref] = max_3D_pixel_error(t1, t2, numr[-3])
                    transmulti[im][iref] = t1

                if myid == main_node:
                    print_msg("Time of alignment for model %i: %s\n" %
                              (iref, legibleTime(time() - start_time)))
                    start_time = time()

            # gather scoring data from all processors
            from mpi import mpi_gatherv
            scoremultisend = sum(scoremulti, [])
            pixelmultisend = sum(pixelmulti, [])
            tmp = mpi_gatherv(scoremultisend, len(scoremultisend), MPI_FLOAT,
                              recvcount_score, disps_score, MPI_FLOAT,
                              main_node, MPI_COMM_WORLD)
            tmp1 = mpi_gatherv(pixelmultisend, len(pixelmultisend), MPI_FLOAT,
                               recvcount_score, disps_score, MPI_FLOAT,
                               main_node, MPI_COMM_WORLD)
            tmp = mpi_bcast(tmp, (total_nima * nrefs), MPI_FLOAT, 0,
                            MPI_COMM_WORLD)
            tmp1 = mpi_bcast(tmp1, (total_nima * nrefs), MPI_FLOAT, 0,
                             MPI_COMM_WORLD)
            tmp = map(float, tmp)
            tmp1 = map(float, tmp1)
            score = array(tmp).reshape(-1, nrefs)
            pixelerror = array(tmp1).reshape(-1, nrefs)
            score_local = array(scoremulti)
            mean_score = score.mean(axis=0)
            std_score = score.std(axis=0)
            cut = mean_score - (cutoff * std_score)
            cut2 = mean_score + (cutoff * std_score)
            res_max = score_local.argmax(axis=1)
            minus_cc = [0.0 for x in xrange(nrefs)]
            minus_pix = [0.0 for x in xrange(nrefs)]
            minus_ref = [0.0 for x in xrange(nrefs)]

            #output pixel errors
            if (myid == main_node):
                from statistics import hist_list
                lhist = 20
                pixmin = pixelerror.min(axis=1)
                region, histo = hist_list(pixmin, lhist)
                if (region[0] < 0.0): region[0] = 0.0
                print_msg(
                    "Histogram of pixel errors\n      ERROR       number of particles\n"
                )
                for lhx in xrange(lhist):
                    print_msg(" %10.3f     %7d\n" % (region[lhx], histo[lhx]))
                # Terminate if 95% within 1 pixel error
                im = 0
                for lhx in xrange(lhist):
                    if (region[lhx] > 1.0): break
                    im += histo[lhx]
                print_msg("Percent of particles with pixel error < 1: %f\n\n" %
                          (im / float(total_nima) * 100))
                term_cond = float(term) / 100
                if (im / float(total_nima) > term_cond):
                    terminate = 1
                    print_msg("Terminating internal loop\n")
                del region, histo
            terminate = mpi_bcast(terminate, 1, MPI_INT, 0, MPI_COMM_WORLD)
            terminate = int(terminate[0])

            for im in xrange(nima):
                if (sort == False):
                    data[im].set_attr('group', 999)
                elif (mjump[N_step] == 1):
                    data[im].set_attr('group', int(res_max[im]))

                pix_run = data[im].get_attr('pix_score')
                if (pix_cutoff[N_step] == 1
                        and (terminate == 1 or Iter == max_iter - 1)):
                    if (pixelmulti[im][int(res_max[im])] > 1):
                        data[im].set_attr('pix_score', int(777))

                if (score_local[im][int(res_max[im])] < cut[int(
                        res_max[im])]) or (two_tail and score_local[im][int(
                            res_max[im])] > cut2[int(res_max[im])]):
                    data[im].set_attr('group', int(888))
                    minus_cc[int(res_max[im])] = minus_cc[int(res_max[im])] + 1

                if (pix_run == 777):
                    data[im].set_attr('group', int(777))
                    minus_pix[int(
                        res_max[im])] = minus_pix[int(res_max[im])] + 1

                if (compare_ref_free != "-1") and (ref_free_cutoff[N_step] !=
                                                   -1) and (total_iter > 1):
                    id = data[im].get_attr('ID')
                    if id in rejects:
                        data[im].set_attr('group', int(666))
                        minus_ref[int(
                            res_max[im])] = minus_ref[int(res_max[im])] + 1

            minus_cc_tot = mpi_reduce(minus_cc, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                      MPI_COMM_WORLD)
            minus_pix_tot = mpi_reduce(minus_pix, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                       MPI_COMM_WORLD)
            minus_ref_tot = mpi_reduce(minus_ref, nrefs, MPI_FLOAT, MPI_SUM, 0,
                                       MPI_COMM_WORLD)
            if (myid == main_node):
                if (sort):
                    tot_max = score.argmax(axis=1)
                    res = bincount(tot_max)
                else:
                    res = ones(nrefs) * total_nima
                print_msg("Particle distribution:	     \t\t%s\n" % (res * 1.0))
                afcut1 = res - minus_cc_tot
                afcut2 = afcut1 - minus_pix_tot
                afcut3 = afcut2 - minus_ref_tot
                print_msg("Particle distribution after cc cutoff:\t\t%s\n" %
                          (afcut1))
                print_msg("Particle distribution after pix cutoff:\t\t%s\n" %
                          (afcut2))
                print_msg("Particle distribution after ref cutoff:\t\t%s\n\n" %
                          (afcut3))

            res = [0.0 for i in xrange(nrefs)]
            for iref in xrange(nrefs):
                if (center == -1):
                    from utilities import estimate_3D_center_MPI, rotate_3D_shift
                    dummy = EMData()
                    cs[0], cs[1], cs[2], dummy, dummy = estimate_3D_center_MPI(
                        data, total_nima, myid, number_of_proc, main_node)
                    cs = mpi_bcast(cs, 3, MPI_FLOAT, main_node, MPI_COMM_WORLD)
                    cs = [-float(cs[0]), -float(cs[1]), -float(cs[2])]
                    rotate_3D_shift(data, cs)

                if (sort):
                    group = iref
                    for im in xrange(nima):
                        imgroup = data[im].get_attr('group')
                        if imgroup == iref:
                            data[im].set_attr('xform.projection',
                                              transmulti[im][iref])
                else:
                    group = int(999)
                    for im in xrange(nima):
                        data[im].set_attr('xform.projection',
                                          transmulti[im][iref])
                if (nrefs == 1):
                    modout = ""
                else:
                    modout = "_model_%02d" % (iref)

                fscfile = os.path.join(outdir, "fsc_%s%s" % (itout, modout))
                vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(
                    data,
                    sym,
                    fscmask,
                    fscfile,
                    myid,
                    main_node,
                    index=group,
                    npad=recon_pad)

                if myid == main_node:
                    print_msg("3D reconstruction time for model %i: %s\n" %
                              (iref, legibleTime(time() - start_time)))
                    start_time = time()

                # Compute Fourier variance
                if fourvar:
                    outvar = os.path.join(outdir, "volVar_%s.hdf" % (itout))
                    ssnr_file = os.path.join(outdir, "ssnr_%s" % (itout))
                    varf = varf3d_MPI(data,
                                      ssnr_text_file=ssnr_file,
                                      mask2D=None,
                                      reference_structure=vol[iref],
                                      ou=last_ring,
                                      rw=1.0,
                                      npad=1,
                                      CTF=None,
                                      sign=1,
                                      sym=sym,
                                      myid=myid)
                    if myid == main_node:
                        print_msg(
                            "Time to calculate 3D Fourier variance for model %i: %s\n"
                            % (iref, legibleTime(time() - start_time)))
                        start_time = time()
                        varf = 1.0 / varf
                        varf.write_image(outvar, -1)
                else:
                    varf = None

                if myid == main_node:
                    if helicalrecon:
                        from hfunctions import processHelicalVol

                        vstep = None
                        if vertstep is not None:
                            vstep = (vdp[iref], vdphi[iref])
                        print_msg(
                            "Old rise and twist for model %i     : %8.3f, %8.3f\n"
                            % (iref, dp[iref], dphi[iref]))
                        hvals = processHelicalVol(vol[iref], voleve[iref],
                                                  volodd[iref], iref, outdir,
                                                  itout, dp[iref], dphi[iref],
                                                  apix, hsearch, findseam,
                                                  vstep, wcmask)
                        (vol[iref], voleve[iref], volodd[iref], dp[iref],
                         dphi[iref], vdp[iref], vdphi[iref]) = hvals
                        print_msg(
                            "New rise and twist for model %i     : %8.3f, %8.3f\n"
                            % (iref, dp[iref], dphi[iref]))
                        # get new FSC from symmetrized half volumes
                        fscc = fsc_mask(volodd[iref], voleve[iref], mask3D,
                                        rstep, fscfile)

                        print_msg(
                            "Time to search and apply helical symmetry for model %i: %s\n\n"
                            % (iref, legibleTime(time() - start_time)))
                        start_time = time()
                    else:
                        vol[iref].write_image(
                            os.path.join(outdir, "vol_%s.hdf" % (itout)), -1)

                    if save_half is True:
                        volodd[iref].write_image(
                            os.path.join(outdir, "volodd_%s.hdf" % (itout)),
                            -1)
                        voleve[iref].write_image(
                            os.path.join(outdir, "voleve_%s.hdf" % (itout)),
                            -1)

                    if nmasks > 1:
                        # Read mask for multiplying
                        ref_data[0] = maskF[iref]
                    ref_data[2] = vol[iref]
                    ref_data[3] = fscc
                    ref_data[4] = varf
                    #  call user-supplied function to prepare reference image, i.e., center and filter it
                    vol[iref], cs, fl = ref_ali3d(ref_data)
                    vol[iref].write_image(
                        os.path.join(outdir, "volf_%s.hdf" % (itout)), -1)
                    if (apix == 1):
                        res_msg = "Models filtered at spatial frequency of:\t"
                        res[iref] = fl
                    else:
                        res_msg = "Models filtered at resolution of:       \t"
                        res[iref] = apix / fl

                del varf
                bcast_EMData_to_all(vol[iref], myid, main_node)

                if compare_ref_free != "-1": compare_repro = True
                if compare_repro:
                    outfile_repro = comp_rep(refrings, data, itout, modout,
                                             vol[iref], group, nima, nx, myid,
                                             main_node, outdir)
                    mpi_barrier(MPI_COMM_WORLD)
                    if compare_ref_free != "-1":
                        ref_free_output = os.path.join(
                            outdir, "ref_free_%s%s" % (itout, modout))
                        rejects = compare(compare_ref_free, outfile_repro,
                                          ref_free_output, yrng[N_step],
                                          xrng[N_step], rstep, nx, apix,
                                          ref_free_cutoff[N_step],
                                          number_of_proc, myid, main_node)

            # retrieve alignment params from all processors
            par_str = ['xform.projection', 'ID', 'group']
            if nrefs > 1:
                for iref in xrange(nrefs):
                    par_str.append('eulers_txty.%i' % iref)

            if myid == main_node:
                from utilities import recv_attr_dict
                recv_attr_dict(main_node, stack, data, par_str, image_start,
                               image_end, number_of_proc)

            else:
                send_attr_dict(main_node, data, par_str, image_start,
                               image_end)

            if myid == main_node:
                ares = array2string(array(res), precision=2)
                print_msg("%s%s\n\n" % (res_msg, ares))
                dummy = EMData()
                if full_output:
                    nimat = EMUtil.get_image_count(stack)
                    output_file = os.path.join(outdir, "paramout_%s" % itout)
                    foutput = open(output_file, 'w')
                    for im in xrange(nimat):
                        # save the parameters for each of the models
                        outstring = ""
                        dummy.read_image(stack, im, True)
                        param3d = dummy.get_attr('xform.projection')
                        g = dummy.get_attr("group")
                        # retrieve alignments in EMAN-format
                        pE = param3d.get_params('eman')
                        outstring += "%f\t%f\t%f\t%f\t%f\t%i\n" % (
                            pE["az"], pE["alt"], pE["phi"], pE["tx"], pE["ty"],
                            g)
                        foutput.write(outstring)
                    foutput.close()
                del dummy
            mpi_barrier(MPI_COMM_WORLD)


#	mpi_finalize()

    if myid == main_node: print_end_msg("ali3d_MPI")
예제 #24
0
def run3Dalignment(paramsdict, partids, partstack, outputdir, procid, myid, main_node, nproc):
	#  Reads from paramsdict["stack"] particles partids set parameters in partstack
	#    and do refinement as specified in paramsdict
	#
	#  Will create outputdir
	#  Will write to outputdir output parameters: params-chunk0.txt and params-chunk1.txt
	if(myid == main_node):
		#  Create output directory
		log = Logger(BaseLogger_Files())
		log.prefix = os.path.join(outputdir)
		#cmd = "mkdir "+log.prefix
		#junk = cmdexecute(cmd)
		log.prefix += "/"
	else:  log = None
	mpi_barrier(MPI_COMM_WORLD)

	ali3d_options.delta  = paramsdict["delta"]
	ali3d_options.ts     = paramsdict["ts"]
	ali3d_options.xr     = paramsdict["xr"]
	#  low pass filter is applied to shrank data, so it has to be adjusted
	ali3d_options.fl     = paramsdict["lowpass"]/paramsdict["shrink"]
	ali3d_options.initfl = paramsdict["initialfl"]/paramsdict["shrink"]
	ali3d_options.aa     = paramsdict["falloff"]
	ali3d_options.maxit  = paramsdict["maxit"]
	ali3d_options.mask3D = paramsdict["mask3D"]
	ali3d_options.an	 = paramsdict["an"]
	ali3d_options.ou     = paramsdict["radius"]  #  This is changed in ali3d_base, but the shrank value is needed in vol recons, fixt it!
	shrinkage            = paramsdict["shrink"]

	projdata = getindexdata(paramsdict["stack"], partids, partstack, myid, nproc)
	onx = projdata[0].get_xsize()
	last_ring = ali3d_options.ou
	if last_ring < 0:	last_ring = int(onx/2) - 2
	mask2D  = model_circle(last_ring,onx,onx) - model_circle(ali3d_options.ir,onx,onx)
	if(shrinkage < 1.0):
		# get the new size
		masks2D = resample(mask2D, shrinkage)
		nx = masks2D.get_xsize()
		masks2D  = model_circle(int(last_ring*shrinkage+0.5),nx,nx) - model_circle(max(int(ali3d_options.ir*shrinkage+0.5),1),nx,nx)
	nima = len(projdata)
	oldshifts = [0.0,0.0]*nima
	for im in xrange(nima):
		#data[im].set_attr('ID', list_of_particles[im])
		ctf_applied = projdata[im].get_attr_default('ctf_applied', 0)
		phi,theta,psi,sx,sy = get_params_proj(projdata[im])
		projdata[im] = fshift(projdata[im], sx, sy)
		set_params_proj(projdata[im],[phi,theta,psi,0.0,0.0])
		#  For local SHC set anchor
		#if(nsoft == 1 and an[0] > -1):
		#	set_params_proj(data[im],[phi,tetha,psi,0.0,0.0], "xform.anchor")
		oldshifts[im] = [sx,sy]
		if ali3d_options.CTF :
			ctf_params = projdata[im].get_attr("ctf")
			if ctf_applied == 0:
				st = Util.infomask(projdata[im], mask2D, False)
				projdata[im] -= st[0]
				projdata[im] = filt_ctf(projdata[im], ctf_params)
				projdata[im].set_attr('ctf_applied', 1)
		if(shrinkage < 1.0):
			#phi,theta,psi,sx,sy = get_params_proj(projdata[im])
			projdata[im] = resample(projdata[im], shrinkage)
			st = Util.infomask(projdata[im], None, True)
			projdata[im] -= st[0]
			st = Util.infomask(projdata[im], masks2D, True)
			projdata[im] /= st[1]
			#sx *= shrinkage
			#sy *= shrinkage
			#set_params_proj(projdata[im], [phi,theta,psi,sx,sy])
			if ali3d_options.CTF :
				ctf_params.apix /= shrinkage
				projdata[im].set_attr('ctf', ctf_params)
		else:
			st = Util.infomask(projdata[im], None, True)
			projdata[im] -= st[0]
			st = Util.infomask(projdata[im], mask2D, True)
			projdata[im] /= st[1]
	del mask2D
	if(shrinkage < 1.0): del masks2D

	"""
	if(paramsdict["delpreviousmax"]):
		for i in xrange(len(projdata)):
			try:  projdata[i].del_attr("previousmax")
			except:  pass
	"""
	if(myid == main_node):
		print_dict(paramsdict,"3D alignment parameters")
		print("                    =>  actual lowpass      :  "******"                    =>  actual init lowpass :  "******"                    =>  PW adjustment       :  ",ali3d_options.pwreference)
		print("                    =>  partids             :  ",partids)
		print("                    =>  partstack           :  ",partstack)
		
	if(ali3d_options.fl > 0.46):  ERROR("Low pass filter in 3D alignment > 0.46 on the scale of shrank data","sxcenter_projections",1,myid) 

	#  Run alignment command, it returns params per CPU
	params = center_projections_3D(projdata, paramsdict["refvol"], \
									ali3d_options, onx, shrinkage, \
									mpi_comm = MPI_COMM_WORLD,  myid = myid, main_node = main_node, log = log )
	del log, projdata

	params = wrap_mpi_gatherv(params, main_node, MPI_COMM_WORLD)

	#  store params
	if(myid == main_node):
		for im in xrange(nima):
			params[im][0] = params[im][0]/shrinkage +oldshifts[im][0]
			params[im][1] = params[im][1]/shrinkage +oldshifts[im][1]
		line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
		print(line,"Executed successfully: ","3D alignment","  number of images:%7d"%len(params))
		write_text_row(params, os.path.join(outputdir,"params.txt") )
예제 #25
0
파일: sxpetite.py 프로젝트: cpsemmens/eman2
def main():

	from utilities import write_text_row, drop_image, model_gauss_noise, get_im, set_params_proj, wrap_mpi_bcast, model_circle
	import user_functions
	from applications import MPI_start_end
	from optparse import OptionParser
	from global_def import SPARXVERSION
	from EMAN2 import EMData
	from multi_shc import multi_shc, do_volume
	from logger import Logger, BaseLogger_Files
	import sys
	import os
	import time
	import socket

	progname = os.path.basename(sys.argv[0])
	usage = progname + " stack  [output_directory]  initial_volume  --ir=inner_radius --ou=outer_radius --rs=ring_step --xr=x_range --yr=y_range  --ts=translational_search_step  --delta=angular_step --an=angular_neighborhood  --center=center_type --fl --aa --ref_a=S --sym=c1"
	parser = OptionParser(usage,version=SPARXVERSION)
	parser.add_option("--ir",      		type= "int",   default= 1,			help="inner radius for rotational correlation > 0 (set to 1)")
	parser.add_option("--ou",      		type= "int",   default= -1,			help="outer radius for rotational correlation < int(nx/2)-1 (set to the radius of the particle)")
	parser.add_option("--rs",      		type= "int",   default= 1,			help="step between rings in rotational correlation >0  (set to 1)" ) 
	parser.add_option("--xr",      		type="string", default= "-1",		help="range for translation search in x direction, search is +/xr (default 0)")
	parser.add_option("--yr",      		type="string", default= "-1",		help="range for translation search in y direction, search is +/yr (default = same as xr)")
	parser.add_option("--ts",      		type="string", default= "1",		help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional")
	parser.add_option("--delta",   		type="string", default= "-1",		help="angular step of reference projections during initialization step (default automatically selected based on radius of the structure.)")
	#parser.add_option("--an",      	type="string", default= "-1",		help="angular neighborhood for local searches (phi and theta)")
	parser.add_option("--center",  		type="float",  default= -1,			help="-1: average shift method; 0: no centering; 1: center of gravity (default=-1)")
	parser.add_option("--maxit",   		type="int",  	default= 400,		help="maximum number of iterations performed for the GA part (set to 400) ")
	parser.add_option("--outlier_percentile",type="float",    default= 95,	help="percentile above which outliers are removed every iteration")
	parser.add_option("--iteration_start",type="int",    default= 0,		help="starting iteration for rviper, 0 means go to the most recent one (default).")
	parser.add_option("--CTF",     		action="store_true", default=False,	help="Use CTF (Default no CTF correction)")
	parser.add_option("--snr",     		type="float",  default= 1.0,		help="Signal-to-Noise Ratio of the data (default 1.0)")
	parser.add_option("--ref_a",   		type="string", default= "S",		help="method for generating the quasi-uniformly distributed projection directions (default S)")
	parser.add_option("--sym",     		type="string", default= "c1",		help="symmetry of the refined structure")
	parser.add_option("--npad",    		type="int",    default= 2,			help="padding size for 3D reconstruction (default=2)")
	parser.add_option("--startangles",  action="store_true", default=False,	help="Use orientation parameters in the input file header to jumpstart the procedure")

	#options introduced for the do_volume function
	parser.add_option("--fl",			type="float",	default=0.12,		help="cut-off frequency of hyperbolic tangent low-pass Fourier filte (default 0.12)")
	parser.add_option("--aa",			type="float",	default=0.1,		help="fall-off of hyperbolic tangent low-pass Fourier filter (default 0.1)")
	parser.add_option("--pwreference",	type="string",	default="",			help="text file with a reference power spectrum (default no power spectrum adjustment)")
	parser.add_option("--mask3D",		type="string",	default=None,		help="3D mask file (default a sphere  WHAT RADIUS??)")
			


	(options, args) = parser.parse_args(sys.argv[1:])

	#print( "  args  ",args)
	if( len(args) == 3):
		volinit = args[2]
		masterdir = args[1]
	elif(len(args) == 2):
		volinit = args[1]
		masterdir = ""
	else:
		print( "usage: " + usage)
		print( "Please run '" + progname + " -h' for detailed options")
		return 1

	orgstack = args[0]
	#print(  orgstack,masterdir,volinit )

	#  INPUT PARAMETERS
	radi  = options.ou
	global_def.BATCH = True
	ali3d_options.ir     = options.ir
	ali3d_options.rs     = options.rs
	ali3d_options.ou     = options.ou
	ali3d_options.xr     = options.xr
	ali3d_options.yr     = options.yr
	ali3d_options.ts     = options.ts
	ali3d_options.an     = "-1"
	ali3d_options.sym    = options.sym
	ali3d_options.delta  = options.delta
	ali3d_options.npad   = options.npad
	ali3d_options.center = options.center
	ali3d_options.CTF    = options.CTF
	ali3d_options.ref_a  = options.ref_a
	ali3d_options.snr    = options.snr
	ali3d_options.mask3D = options.mask3D
	ali3d_options.pwreference = options.pwreference
	ali3d_options.fl     = 0.4
	ali3d_options.aa     = 0.1

	if( ali3d_options.xr == "-1" ):  ali3d_options.xr = "2"
	"""
	print( options)

	print( 'ali3d_options',  ali3d_options.ir    ,\
	ali3d_options.rs        ,\
	ali3d_options.ou        ,\
	ali3d_options.xr        ,\
	ali3d_options.yr        ,\
	ali3d_options.ts        ,\
	ali3d_options.an        ,\
	ali3d_options.sym       ,\
	ali3d_options.delta     ,\
	ali3d_options.npad      ,\
	ali3d_options.center    ,\
	ali3d_options.CTF       ,\
	ali3d_options.ref_a     ,\
	ali3d_options.snr       ,\
	ali3d_options.mask3D    ,\
	ali3d_options.fl        ,\
	ali3d_options.aa    \
	)

		#exit()
"""



	mpi_init(0, [])



	nproc     = mpi_comm_size(MPI_COMM_WORLD)
	myid      = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0

	#mpi_finalize()
	#exit()

	nxinit = -1  #int(280*0.3*2)
	nsoft = 0

	mempernode = 4.0e9


	#  PARAMETERS OF THE PROCEDURE 
	#  threshold error
	thresherr = 0
	fq = 0.11 # low-freq limit to which fuse ref volumes.  Should it be estimated somehow?

	# Get the pixel size, if none set to 1.0, and the original image size
	if(myid == main_node):
		a = get_im(orgstack)
		nnxo = a.get_xsize()
		if ali3d_options.CTF:
			i = a.get_attr('ctf')
			pixel_size = i.apix
		else:
			pixel_size = 1.0
		del a
	else:
		nnxo = 0
		pixel_size = 1.0
	pixel_size = bcast_number_to_all(pixel_size, source_node = main_node)
	nnxo = bcast_number_to_all(nnxo, source_node = main_node)

	if(radi < 1):  radi = nnxo//2-2
	elif((2*radi+2)>nnxo):  ERROR("HERE","particle radius set too large!",1)
	ali3d_options.ou = radi
	if(nxinit < 0):  nxinit = min(32, nnxo)

	nxshrink = nxinit
	minshrink = 32.0/float(nnxo)
	shrink = max(float(nxshrink)/float(nnxo),minshrink)

	#  MASTER DIRECTORY
	if(myid == main_node):
		print( "   masterdir   ",masterdir)
		if( masterdir == ""):
			timestring = strftime("_%d_%b_%Y_%H_%M_%S", localtime())
			masterdir = "master"+timestring
			li = len(masterdir)
			cmd = "{} {}".format("mkdir", masterdir)
			cmdexecute(cmd)
			keepchecking = 0
		else:
			li = 0
			keepchecking = 1
	else:
		li = 0
		keepchecking = 1

	li = mpi_bcast(li,1,MPI_INT,main_node,MPI_COMM_WORLD)[0]

	if( li > 0 ):
		masterdir = mpi_bcast(masterdir,li,MPI_CHAR,main_node,MPI_COMM_WORLD)
		masterdir = string.join(masterdir,"")

	#  create a vstack from input stack to the local stack in masterdir
	#  Stack name set to default
	stack = "bdb:"+masterdir+"/rdata"
	# Initialization of stacks
	if(myid == main_node):
		if keepchecking:
			if(os.path.exists(os.path.join(masterdir,"EMAN2DB/rdata.bdb"))):  doit = False
			else:  doit = True
		else:  doit = True
		if  doit:
			if(orgstack[:4] == "bdb:"):	cmd = "{} {} {}".format("e2bdb.py", orgstack,"--makevstack="+stack)
			else:  cmd = "{} {} {}".format("sxcpy.py", orgstack, stack)
			cmdexecute(cmd)
			cmd = "{} {}".format("sxheader.py  --consecutive  --params=originalid", stack)
			cmdexecute(cmd)
			keepchecking = False
		total_stack = EMUtil.get_image_count(stack)
		junk = get_im(stack)
		nnxo = junk.get_xsize()
		del junk
	else:
		total_stack = 0
		nnxo = 0

	total_stack = bcast_number_to_all(total_stack, source_node = main_node)
	nnxo        = bcast_number_to_all(nnxo, source_node = main_node)

	#  INITIALIZATION

	#  Run exhaustive projection matching to get initial orientation parameters
	#  Estimate initial resolution
	initdir = os.path.join(masterdir,"main000")
	#  make sure the initial volume is not set to zero outside of a mask, as if it is it will crach the program
	if( myid == main_node and (not options.startangles)):
		viv = get_im(volinit)
		if(options.mask3D == None):  mask33d = model_circle(radi,nnxo,nnxo,nnxo)
		else:  mask33d = (options.mask3D).copy()
		st = Util.infomask(viv, mask33d, False)
		if( st[0] == 0.0 ):
			viv += (model_blank(nnxo,nnxo,nnxo,1.0) - mask33d)*model_gauss_noise(st[1]/1000.0,nnxo,nnxo,nnxo)
			viv.write_image(volinit)
		del mask33d, viv

	doit, keepchecking = checkstep(initdir, keepchecking, myid, main_node)
	if  doit:
		partids = os.path.join(masterdir, "ids.txt")
		partstack = os.path.join(masterdir, "paramszero.txt")
		xr = min(8,(nnxo - (2*radi+1))//2)
		if(xr > 3):  ts = "2"
		else:  ts = "1"

		delta = int(options.delta)
		if(delta <= 0.0):
			delta = "%f"%round(degrees(atan(1.0/float(radi))), 2)

		paramsdict = {	"stack":stack,"delta":"2.0", "ts":ts, "xr":"%f"%xr, "an":"-1", "center":options.center, "maxit":1, \
						"currentres":0.4, "aa":0.1, "radius":radi, "nsoft":0, "delpreviousmax":True, "shrink":1.0, "saturatecrit":1.0, \
						"refvol":volinit, "mask3D":options.mask3D}

		if(options.startangles):

			if( myid == main_node ):
				cmd = "mkdir "+initdir
				cmdexecute(cmd)
				line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
				print(line,"INITIALIZATION")
				cmd = "{} {}".format("sxheader.py --params=xform.projection  --export="+os.path.join(initdir,"params-chunk0.txt"), stack)
				cmdexecute(cmd)
				print(line,"Executed successfully: ","Imported initial parameters from the input stack")

		else:
	
			if( myid == main_node ):
				line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
				print(line,"INITIALIZATION")
				write_text_file(range(total_stack), partids)
				write_text_row([[0.0,0.0,0.0,0.0,0.0] for i in xrange(total_stack) ], partstack)

			metamove(paramsdict, partids, partstack, initdir, 0, myid, main_node, nproc)
			if(myid == main_node):
				print(line,"Executed successfully: ","initialization ali3d_base_MPI  %d"%nsoft)

			

		#  store params
		partids = [None]*2
		for procid in xrange(2):  partids[procid] = os.path.join(initdir,"chunk%01d.txt"%procid)
		partstack = [None]*2
		for procid in xrange(2):  partstack[procid] = os.path.join(initdir,"params-chunk%01d.txt"%procid)
		from random import shuffle
		if(myid == main_node):
			#  split randomly
			params = read_text_row(os.path.join(initdir,"params-chunk0.txt"))
			assert(len(params) == total_stack)
			ll = range(total_stack)
			shuffle(ll)
			l1 = ll[:total_stack//2]
			l2 = ll[total_stack//2:]
			del ll
			l1.sort()
			l2.sort()
			write_text_file(l1,partids[0])
			write_text_file(l2,partids[1])
			write_text_row([params[i] for i in l1], partstack[0])
			write_text_row([params[i] for i in l2], partstack[1])
			del params, l1, l2
		mpi_barrier(MPI_COMM_WORLD)

		#  Now parallel
		vol = [None]*2
		for procid in xrange(2):
			projdata = getindexdata(stack, partids[procid], partstack[procid], myid, nproc)
			if ali3d_options.CTF:  vol[procid] = recons3d_4nn_ctf_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
			else:                  vol[procid] = recons3d_4nn_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
			del projdata
			if( myid == main_node):
				vol[procid].write_image(os.path.join(initdir,"vol%01d.hdf"%procid) )
				line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
				print(  line,"Generated inivol #%01d "%procid)


		if(myid == main_node):
			currentres = get_resolution(vol, radi, nnxo, initdir)		
			line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
			print(  line,"Initial resolution %6.4f"%currentres)
			write_text_file([currentres],os.path.join(initdir,"current_resolution.txt"))
		else:  currentres = 0.0
		currentres = bcast_number_to_all(currentres, source_node = main_node)
	else:
		if(myid == main_node): currentres = read_text_file(os.path.join(initdir,"current_resolution.txt"))[0]		
		else:  currentres = 0.0
		currentres = bcast_number_to_all(currentres, source_node = main_node)

	# set for the first iteration
	nxshrink = min(max(32, int((currentres+paramsdict["aa"]/2.)*2*nnxo + 0.5)), nnxo)
	shrink = float(nxshrink)/nnxo
	tracker = {"previous-resolution":currentres, "movedup":False,"eliminated-outliers":False,\
				"previous-nx":nxshrink, "previous-shrink":shrink, "extension":0, "bestsolution":0}
	
	previousoutputdir = initdir
	#  MAIN ITERATION
	mainiteration = 0
	keepgoing = 1
	while(keepgoing):
		mainiteration += 1


		#  prepare output directory
		mainoutputdir = os.path.join(masterdir,"main%03d"%mainiteration)

		if(myid == main_node):
			line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
			print(line,"MAIN ITERATION #",mainiteration, shrink, nxshrink)
			if keepchecking:
				if(os.path.exists(mainoutputdir)):
					doit = 0
					print("Directory  ",mainoutputdir,"  exists!")
				else:
					doit = 1
					keepchecking = False
			else:
				doit = 1

			if doit:
				cmd = "{} {}".format("mkdir", mainoutputdir)
				cmdexecute(cmd)

		# prepare names of input file names, they are in main directory, 
		#   log subdirectories contain outputs from specific refinements
		partids = [None]*2
		for procid in xrange(2):  partids[procid] = os.path.join(previousoutputdir,"chunk%01d.txt"%procid)
		partstack = [None]*2
		for procid in xrange(2):  partstack[procid] = os.path.join(previousoutputdir,"params-chunk%01d.txt"%procid)

		mpi_barrier(MPI_COMM_WORLD)


		#mpi_finalize()
		#exit()

		#print("RACING  A ",myid)
		outvol = [os.path.join(previousoutputdir,"vol%01d.hdf"%procid) for procid in xrange(2)]
		for procid in xrange(2):
			doit, keepchecking = checkstep(outvol[procid], keepchecking, myid, main_node)

			if  doit:
				from multi_shc import do_volume
				projdata = getindexdata(stack, partids[procid], partstack[procid], myid, nproc)
				if ali3d_options.CTF:  vol = recons3d_4nn_ctf_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
				else:                  vol = recons3d_4nn_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
				del projdata
				if( myid == main_node):
					vol.write_image(outvol[procid])
					line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
					print(  line,"Generated inivol #%01d "%procid)
				del vol

		if(myid == main_node):
			if keepchecking:
				procid = 1
				if(os.path.join(mainoutputdir,"fusevol%01d.hdf"%procid)):
					doit = 0
				else:
					doit = 1
					keepchecking = False
			else:
				doit = 1
			if doit:
				vol = [get_im(outvol[procid]) for procid in xrange(2) ]
				fq = 0.11 # which part to fuse
				fuselowf(vol, fq)
				for procid in xrange(2):  vol[procid].write_image(os.path.join(mainoutputdir,"fusevol%01d.hdf"%procid) )
				del vol
		else:  doit = 0
		mpi_barrier(MPI_COMM_WORLD)
		doit = bcast_number_to_all(doit, source_node = main_node)

		#  Refine two groups at a current resolution
		lastring = int(shrink*radi + 0.5)
		if(lastring < 2):
			print(  line,"ERROR!!   lastring too small  ", radi, shrink, lastring)
			break

		#  REFINEMENT
		#  Part "a"  SHC
		for procid in xrange(2):
			coutdir = os.path.join(mainoutputdir,"loga%01d"%procid)
			doit, keepchecking = checkstep(coutdir  , keepchecking, myid, main_node)

			paramsdict = {	"stack":stack,"delta":"%f"%round(degrees(atan(1.0/lastring)), 2) , "ts":"1", "xr":"2", "an":"-1", "center":options.center, "maxit":1500,  \
							"currentres":currentres, "aa":0.1, "radius":radi, "nsoft":1, "saturatecrit":0.75, "delpreviousmax":True, "shrink":shrink, \
							"refvol":os.path.join(mainoutputdir,"fusevol%01d.hdf"%procid),"mask3D":options.mask3D }

			if  doit:

				metamove(paramsdict, partids[procid], partstack[procid], coutdir, procid, myid, main_node, nproc)

		partstack = [None]*2
		for procid in xrange(2):  partstack[procid] = os.path.join(mainoutputdir, "loga%01d"%procid, "params-chunk%01d.txt"%procid)

		for procid in xrange(2):
			outvol = os.path.join(mainoutputdir,"loga%01d"%procid,"shcvol%01d.hdf"%procid)
			doit, keepchecking = checkstep(outvol, keepchecking, myid, main_node)

			if  doit:
				from multi_shc import do_volume
				projdata = getindexdata(stack, partids[procid], partstack[procid], myid, nproc)
				if ali3d_options.CTF:  vol = recons3d_4nn_ctf_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
				else:                  vol = recons3d_4nn_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
				del projdata
				if( myid == main_node):
					vol.write_image(outvol)
					line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
					print(  line,"Generated shcvol #%01d "%procid)
				del vol

		if(myid == main_node):
			if keepchecking:
				procid = 1
				if(os.path.join(mainoutputdir,"loga%01d"%procid,"fusevol%01d.hdf"%procid) ):
					doit = 0
				else:
					doit = 1
					keepchecking = False
			else:
				doit = 1
			if doit:
				vol = []
				for procid in xrange(2):  vol.append(get_im(os.path.join(mainoutputdir,"loga%01d"%procid,"shcvol%01d.hdf"%procid) ))
				fq = 0.11 # which part to fuse
				fuselowf(vol, fq)
				for procid in xrange(2):  vol[procid].write_image( os.path.join(mainoutputdir,"loga%01d"%procid,"fusevol%01d.hdf"%procid) )
				del vol
		else:  doit = 0
		mpi_barrier(MPI_COMM_WORLD)
		doit = bcast_number_to_all(doit, source_node = main_node)


		#  Part "b"  deterministic			
		partstack = [None]*2
		for procid in xrange(2):  partstack[procid] = os.path.join(mainoutputdir,"loga%01d"%procid,"params-chunk%01d.txt"%procid)

		for procid in xrange(2):
			coutdir = os.path.join(mainoutputdir,"logb%01d"%procid)
			doit, keepchecking = checkstep(coutdir, keepchecking, myid, main_node)

			#  Run exhaustive to finish up matching
			paramsdict = {	"stack":stack,"delta":"%f"%round(degrees(atan(1.0/lastring)), 2) , "ts":"1", "xr":"2", "an":"-1", "center":options.center, "maxit":10, \
							"currentres":currentres, "aa":0.1, "radius":radi, "nsoft":0, "saturatecrit":0.95, "delpreviousmax":True, "shrink":shrink, \
							"refvol":os.path.join(mainoutputdir,"loga%01d"%procid,"fusevol%01d.hdf"%procid), "mask3D":options.mask3D }

			if  doit:
				metamove(paramsdict, partids[procid], partstack[procid], coutdir, procid, myid, main_node, nproc)

		partstack = [None]*2
		for procid in xrange(2):  partstack[procid] = os.path.join(mainoutputdir,"logb%01d"%procid,"params-chunk%01d.txt"%procid)

		#  Compute current resolution, store result in main directory
		doit, keepchecking = checkstep(os.path.join(mainoutputdir,"current_resolution.txt"), keepchecking, myid, main_node)
		newres = 0.0
		if doit:
			newres = compute_resolution(stack, mainoutputdir, partids, partstack, radi, nnxo, ali3d_options.CTF, myid, main_node, nproc)
		else:
			if(myid == main_node): newres = read_text_file( os.path.join(mainoutputdir,"current_resolution.txt") )[0]		
		newres = bcast_number_to_all(newres, source_node = main_node)

		#  Here I have code to generate presentable results.  IDs and params have to be merged and stored and an overall volume computed.
		doit, keepchecking = checkstep(os.path.join(mainoutputdir,"volf.hdf"), keepchecking, myid, main_node)
		if  doit:
			if( myid == main_node ):
				pinids = map(int, read_text_file(partids[0]) ) + map(int, read_text_file(partids[1]) )
				params = read_text_row(partstack[0]) + read_text_row(partstack[1])

				assert(len(pinids) == len(params))

				for i in xrange(len(pinids)):
					pinids[i] = [ pinids[i], params[i] ]
				del params
				pinids.sort()

				write_text_file([pinids[i][0] for i in xrange(len(pinids))], os.path.join(mainoutputdir,"indexes.txt"))
				write_text_row( [pinids[i][1] for i in xrange(len(pinids))], os.path.join(mainoutputdir,"params.txt"))
			mpi_barrier(MPI_COMM_WORLD)
			ali3d_options.fl = newres
			ali3d_options.ou = radi
			projdata = getindexdata(stack, os.path.join(mainoutputdir,"indexes.txt"), os.path.join(mainoutputdir,"params.txt"), myid, nproc)
			volf = do_volume(projdata, ali3d_options, mainiteration, mpi_comm = MPI_COMM_WORLD)
			if(myid == main_node): volf.write_image(os.path.join(mainoutputdir,"volf.hdf"))

		mpi_barrier(MPI_COMM_WORLD)

		#print("RACING  X ",myid)
		if(newres == currentres):

			for procid in xrange(2):
				coutdir = os.path.join(mainoutputdir,"logc%01d"%procid)
				doit, keepchecking = checkstep(coutdir, keepchecking, myid, main_node)

				if  doit:
					#  Do cross-check of the results
					paramsdict = {	"stack":stack,"delta":"%f"%round(degrees(atan(1.0/lastring)), 2) , "ts":"1", "xr":"2", "an":"-1", "center":options.center, "maxit":1,  \
									"currentres":newres, "aa":0.1, "radius":radi, "nsoft":0, "saturatecrit":0.95, "delpreviousmax":True, "shrink":shrink, \
									"refvol":os.path.join(mainoutputdir,"vol%01d.hdf"%(1-procid)), "mask3D":options.mask3D }
					#  The cross-check uses parameters from step "b" to make sure shifts are correct.  
					#  As the check is exhaustive, angles are ignored
					metamove(paramsdict, partids[procid], partstack[procid], coutdir, procid, myid, main_node, nproc)

			# identify bad apples
			doit, keepchecking = checkstep(os.path.join(mainoutputdir,"badapples.txt"), keepchecking, myid, main_node)
			if  doit:
				if(myid == main_node):
					from utilities import get_symt
					from pixel_error import max_3D_pixel_error
					ts = get_symt(ali3d_options.sym)
					badapples = []
					deltaerror = 2.0
					total_images_now = 0
					for procid in xrange(2):
						bad = []
						ids  = map(int,read_text_file( partids[procid] ))
						total_images_now += len(ids)
						oldp = read_text_row(partstack[procid])
						newp = read_text_row(os.path.join(mainoutputdir,"logc%01d"%procid,"params-chunk%01d.txt"%procid))

						for i in xrange(len(ids)):
							t1 = Transform({"type":"spider","phi":oldp[i][0],"theta":oldp[i][1],"psi":oldp[i][2]})
							t1.set_trans(Vec2f(-oldp[i][3]*shrink, -oldp[i][4]*shrink))
							t2 = Transform({"type":"spider","phi":newp[i][0],"theta":newp[i][1],"psi":newp[i][2]})
							t2.set_trans(Vec2f(-newp[i][3]*shrink, -newp[i][4]*shrink))
							if(len(ts) > 1):
								# only do it if it is not c1
								pixel_error = +1.0e23
								for kts in ts:
									ut = t2*kts
									# we do not care which position minimizes the error
									pixel_error = min(max_3D_pixel_error(t1, ut, lastring), pixel_error)
							else:
								pixel_error = max_3D_pixel_error(t1, t2, lastring)

							if(pixel_error > deltaerror):
								bad.append(i)
						if(len(bad)>0):
							badapples += [ids[bad[i]] for i in xrange(len(bad))]
							for i in xrange(len(bad)-1,-1,-1):
								del oldp[bad[i]],ids[bad[i]]
						if(len(ids) == 0):
							ERROR("sxpetite","program diverged, all images have large angular errors, most likely the initial model is badly off",1)
						else:
							#  This generate new parameters, hopefully to be used as starting ones in the new iteration
							write_text_file(ids,os.path.join(mainoutputdir,"chunk%01d.txt"%procid))
							write_text_row(oldp,os.path.join(mainoutputdir,"params-chunk%01d.txt"%procid))
					if(len(badapples)>0):
						badapples.sort()
						write_text_file(badapples,os.path.join(mainoutputdir,"badapples.txt"))
						eli = 100*float(len(badapples))/float(total_images_now)
						line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
						print(line,"Elimination of outliers: %5.1f percent"%eli )
					else:  eli = 0.0
					del badapples, oldp,ids,bad,newp,ts
				else:  eli =0.0
				eli = bcast_number_to_all(eli, source_node = main_node)
				
				#  This part under MPI
				if(eli > 0.0):
					#  Compute current resolution
					depres = compute_resolution(stack, mainoutputdir, \
							[os.path.join(mainoutputdir,"chunk%01d.txt"%procid) for procid in xrange(2)], \
							[os.path.join(mainoutputdir,"params-chunk%01d.txt"%procid) for procid in xrange(2)], \
							radi, nnxo, ali3d_options.CTF, myid, main_node, nproc)
					depres = bcast_number_to_all(depres, source_node = main_node)
					if(depres < newres):
						#  elimination of outliers decreased resolution, ignore the effort
						eliminated_outliers = False
					else:
						eliminated_outliers = True
						newres = depres
						"""
						#  It does not seem to be needed, as data is there, we just point to the directory
						for procid in xrange(2):
							#  set pointers to current parameters in main, which are for the reduced set stored above
							partids[procid]   = os.path.join(mainoutputdir,"chunk%01d.txt"%procid
							partstack[procid] = os.path.join(mainoutputdir,"params-chunk%01d.txt"%procid)
						"""
				else:
					eliminated_outliers = False
		else:
			eliminated_outliers = False

		if(myid == main_node and not eliminated_outliers):
			for procid in xrange(2):
				#  This is standard path, copy parameters to be used to the main
				cmd = "{} {} {}".format("cp -p ", partids[procid] , os.path.join(mainoutputdir,"chunk%01d.txt"%procid))
				cmdexecute(cmd)
				cmd = "{} {} {}".format("cp -p ", partstack[procid], os.path.join(mainoutputdir,"params-chunk%01d.txt"%procid))
				cmdexecute(cmd)

		keepgoing = 0
		if( newres > currentres or (eliminated_outliers and not tracker["eliminated-outliers"])):
			if(myid == main_node):  print("  Resolution improved, full steam ahead!")

			if( newres > currentres ):  tracker["movedup"] = True
			else:   tracker["movedup"] = False
			shrink = max(min(2*newres + paramsdict["aa"], 1.0),minshrink)
			tracker["extension"] = 4
			nxshrink = min(int(nnxo*shrink + 0.5) + tracker["extension"],nnxo)
			tracker["previous-resolution"] = newres
			currentres = newres
			tracker["bestsolution"] = mainiteration
			bestoutputdir = mainoutputdir
			tracker["eliminated-outliers"] = eliminated_outliers
			keepgoing = 1
		
		elif(newres < currentres):
			if(not tracker["movedup"] and tracker["extension"] < 2 and mainiteration > 1):
				keepgoing = 0
				if(myid == main_node):  print("  Cannot improve resolution, the best result is in the directory main%03d"%tracker["bestsolution"])
			else:
				if(not tracker["movedup"] and tracker["extension"] > 1 and mainiteration > 1):
					if(myid == main_node):  print("  Resolution decreased.  Will decrease target resolution and will fall back on the best so far:  main%03d"%tracker["bestsolution"])
					bestoutputdir = os.path.join(masterdir,"main%03d"%tracker["bestsolution"])
				elif( tracker["movedup"] and tracker["extension"] > 1 and mainiteration > 1):
					if(myid == main_node):  print("  Resolution decreased.  Will decrease target resolution and will try starting from previous stage:  main%03d"%(mainiteration - 1))
					bestoutputdir = os.path.join(masterdir,"main%03d"%(mainiteration-1))
				elif( mainiteration == 1):
					if(myid == main_node):  print("  Resolution decreased in the first iteration.  It is expected, not to worry")
					bestoutputdir = mainoutputdir
					tracker["extension"] += 1
				else:  # missing something here?
					if(myid == main_node):  print(" Should not be here, ERROR 175!")
					break
					mpi_finalize()
					exit()
				if( bestoutputdir != mainoutputdir ):
					#  This is the key, we just reset the main to previous, so it will be eventually used as a starting in the next iteration
					mainoutputdir = bestoutputdir
					"""
					#  Set data from the main previous best to the current.
					for procid in xrange(2):
						partids[procid]   = os.path.join(bestoutputdir,"chunk%01d.txt"%procid)
						partstack[procid] = os.path.join(bestoutputdir,"params-chunk%01d.txt"%procid)
				"""
				if(myid == main_node):
					currentres = read_text_file( os.path.join(bestoutputdir,"current_resolution.txt") )[0]
				currentres = bcast_number_to_all(currentres, source_node = main_node)

				shrink = max(min(2*currentres + paramsdict["aa"], 1.0), minshrink)
				tracker["extension"] -= 1
				nxshrink = min(int(nnxo*shrink + 0.5) + tracker["extension"],nnxo)
				tracker["previous-resolution"] = newres
				tracker["eliminated-outliers"] = eliminated_outliers
				tracker["movedup"] = False
				keepgoing = 1
			

		elif(newres == currentres):
			if( tracker["extension"] > 0 ):
				if(myid == main_node):  print("The resolution did not improve. This is look ahead move.  Let's try to relax slightly and hope for the best")
				tracker["extension"] -= 1

				tracker["movedup"] = False

				shrink = max(min(2*currentres + paramsdict["aa"], 1.0), minshrink)
				nxshrink = min(int(nnxo*shrink + 0.5) + tracker["extension"],nnxo)
				if( tracker["previous-nx"] == nnxo ):
					keepgoing = 0
				else:
					tracker["previous-resolution"] = newres
					currentres = newres
					tracker["eliminated-outliers"] = eliminated_outliers
					tracker["movedup"] = False
					keepgoing = 1
			else:
				if(myid == main_node):  print("The resolution did not improve.")
				keepgoing = 0

			

		if( keepgoing == 1 ):
			if(myid == main_node):
				print("  New shrink and image dimension :",shrink,nxshrink)
				"""
				#  It does not look like it is necessary, we just have to point to the directory as the files should be there.
				#  Will continue, so update the params files
				for procid in xrange(2):
					#  partids ads partstack contain parameters to be used as starting in the next iteration
					if(not os.path.exists(os.path.join(mainoutputdir,"chunk%01d.txt"%procid))):
						cmd = "{} {} {}".format("cp -p ", partids[procid] , os.path.join(mainoutputdir,"chunk%01d.txt"%procid))
						cmdexecute(cmd)
					if(not os.path.exists(os.path.join(mainoutputdir,"params-chunk%01d.txt"%procid))):
						cmd = "{} {} {}".format("cp -p ", partstack[procid], os.path.join(mainoutputdir,"params-chunk%01d.txt"%procid))
						cmdexecute(cmd)
				"""
			previousoutputdir = mainoutputdir
			tracker["previous-shrink"]     = shrink
			tracker["previous-nx"]         = nxshrink

		else:
			if(myid == main_node):
				print("  Terminating, the best solution is in the directory main%03d"%tracker["bestsolution"])
		mpi_barrier(MPI_COMM_WORLD)

	mpi_finalize()
예제 #26
0
def main():
	import	global_def
	from	optparse 	import OptionParser
	from	EMAN2 		import EMUtil
	import	os
	import	sys
	from time import time

	progname = os.path.basename(sys.argv[0])
	usage = progname + " proj_stack output_averages --MPI"
	parser = OptionParser(usage, version=SPARXVERSION)

	parser.add_option("--img_per_group",type="int"         ,	default=100  ,				help="number of images per group" )
	parser.add_option("--radius", 		type="int"         ,	default=-1   ,				help="radius for alignment" )
	parser.add_option("--xr",           type="string"      ,    default="2 1",              help="range for translation search in x direction, search is +/xr")
	parser.add_option("--yr",           type="string"      ,    default="-1",               help="range for translation search in y direction, search is +/yr (default = same as xr)")
	parser.add_option("--ts",           type="string"      ,    default="1 0.5",            help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional")
	parser.add_option("--iter", 		type="int"         ,	default=30,                 help="number of iterations within alignment (default = 30)" )
	parser.add_option("--num_ali",      type="int"     	   ,    default=5,         			help="number of alignments performed for stability (default = 5)" )
	parser.add_option("--thld_err",     type="float"       ,    default=1.0,         		help="threshold of pixel error (default = 1.732)" )
	parser.add_option("--grouping" , 	type="string"      ,	default="GRP",				help="do grouping of projections: PPR - per projection, GRP - different size groups, exclusive (default), GEV - grouping equal size")
	parser.add_option("--delta",        type="float"       ,    default=-1.0,         		help="angular step for reference projections (required for GEV method)")
	parser.add_option("--fl",           type="float"       ,    default=0.3,                help="cut-off frequency of hyperbolic tangent low-pass Fourier filter")
	parser.add_option("--aa",           type="float"       ,    default=0.2,                help="fall-off of hyperbolic tangent low-pass Fourier filter")
	parser.add_option("--CTF",          action="store_true",    default=False,              help="Consider CTF correction during the alignment ")
	parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")

	(options,args) = parser.parse_args()
	
	from mpi          import mpi_init, mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD, MPI_TAG_UB
	from mpi          import mpi_barrier, mpi_send, mpi_recv, mpi_bcast, MPI_INT, mpi_finalize, MPI_FLOAT
	from applications import MPI_start_end, within_group_refinement, ali2d_ras
	from pixel_error  import multi_align_stability
	from utilities    import send_EMData, recv_EMData
	from utilities    import get_image, bcast_number_to_all, set_params2D, get_params2D
	from utilities    import group_proj_by_phitheta, model_circle, get_input_from_string

	sys.argv = mpi_init(len(sys.argv), sys.argv)
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	main_node = 0

	if len(args) == 2:
		stack  = args[0]
		outdir = args[1]
	else:
		ERROR("incomplete list of arguments", "sxproj_stability", 1, myid=myid)
		exit()
	if not options.MPI:
		ERROR("Non-MPI not supported!", "sxproj_stability", myid=myid)
		exit()		 

	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	global_def.BATCH = True

	#if os.path.exists(outdir):  ERROR('Output directory exists, please change the name and restart the program', "sxproj_stability", 1, myid)
	#mpi_barrier(MPI_COMM_WORLD)

	
	img_per_grp = options.img_per_group
	radius = options.radius
	ite = options.iter
	num_ali = options.num_ali
	thld_err = options.thld_err

	xrng        = get_input_from_string(options.xr)
	if  options.yr == "-1":  yrng = xrng
	else          :  yrng = get_input_from_string(options.yr)
	step        = get_input_from_string(options.ts)


	if myid == main_node:
		nima = EMUtil.get_image_count(stack)
		img  = get_image(stack)
		nx   = img.get_xsize()
		ny   = img.get_ysize()
	else:
		nima = 0
		nx = 0
		ny = 0
	nima = bcast_number_to_all(nima)
	nx   = bcast_number_to_all(nx)
	ny   = bcast_number_to_all(ny)
	if radius == -1: radius = nx/2-2
	mask = model_circle(radius, nx, nx)

	st = time()
	if options.grouping == "GRP":
		if myid == main_node:
			print "  A  ",myid,"  ",time()-st
			proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			proj_params = []
			for i in xrange(nima):
				dp = proj_attr[i].get_params("spider")
				phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp["psi"], -dp["tx"], -dp["ty"]
				proj_params.append([phi, theta, psi, s2x, s2y])

			# Here is where the grouping is done, I didn't put enough annotation in the group_proj_by_phitheta,
			# So I will briefly explain it here
			# proj_list  : Returns a list of list of particle numbers, each list contains img_per_grp particle numbers
			#              except for the last one. Depending on the number of particles left, they will either form a
			#              group or append themselves to the last group
			# angle_list : Also returns a list of list, each list contains three numbers (phi, theta, delta), (phi, 
			#              theta) is the projection angle of the center of the group, delta is the range of this group
			# mirror_list: Also returns a list of list, each list contains img_per_grp True or False, which indicates
			#              whether it should take mirror position.
			# In this program angle_list and mirror list are not of interest.

			proj_list_all, angle_list, mirror_list = group_proj_by_phitheta(proj_params, img_per_grp=img_per_grp)
			del proj_params
			print "  B  number of groups  ",myid,"  ",len(proj_list_all),time()-st
		mpi_barrier(MPI_COMM_WORLD)

		# Number of groups, actually there could be one or two more groups, since the size of the remaining group varies
		# we will simply assign them to main node.
		n_grp = nima/img_per_grp-1

		# Divide proj_list_all equally to all nodes, and becomes proj_list
		proj_list = []
		for i in xrange(n_grp):
			proc_to_stay = i%number_of_proc
			if proc_to_stay == main_node:
				if myid == main_node: 	proj_list.append(proj_list_all[i])
			elif myid == main_node:
				mpi_send(len(proj_list_all[i]), 1, MPI_INT, proc_to_stay, MPI_TAG_UB, MPI_COMM_WORLD)
				mpi_send(proj_list_all[i], len(proj_list_all[i]), MPI_INT, proc_to_stay, MPI_TAG_UB, MPI_COMM_WORLD)
			elif myid == proc_to_stay:
				img_per_grp = mpi_recv(1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
				img_per_grp = int(img_per_grp[0])
				temp = mpi_recv(img_per_grp, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
				proj_list.append(map(int, temp))
				del temp
			mpi_barrier(MPI_COMM_WORLD)
		print "  C  ",myid,"  ",time()-st
		if myid == main_node:
			# Assign the remaining groups to main_node
			for i in xrange(n_grp, len(proj_list_all)):
				proj_list.append(proj_list_all[i])
			del proj_list_all, angle_list, mirror_list


	#   Compute stability per projection projection direction, equal number assigned, thus overlaps
	elif options.grouping == "GEV":
		if options.delta == -1.0: ERROR("Angular step for reference projections is required for GEV method","sxproj_stability",1)
		from utilities import even_angles, nearestk_to_refdir, getvec
		refproj = even_angles(options.delta)
		img_begin, img_end = MPI_start_end(len(refproj), number_of_proc, myid)
		# Now each processor keeps its own share of reference projections
		refprojdir = refproj[img_begin: img_end]
		del refproj

		ref_ang = [0.0]*(len(refprojdir)*2)
		for i in xrange(len(refprojdir)):
			ref_ang[i*2]   = refprojdir[0][0]
			ref_ang[i*2+1] = refprojdir[0][1]+i*0.1

		print "  A  ",myid,"  ",time()-st
		proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
		#  the solution below is very slow, do not use it unless there is a problem with the i/O
		"""
		for i in xrange(number_of_proc):
			if myid == i:
				proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			mpi_barrier(MPI_COMM_WORLD)
		"""
		print "  B  ",myid,"  ",time()-st

		proj_ang = [0.0]*(nima*2)
		for i in xrange(nima):
			dp = proj_attr[i].get_params("spider")
			proj_ang[i*2]   = dp["phi"]
			proj_ang[i*2+1] = dp["theta"]
		print "  C  ",myid,"  ",time()-st
		asi = Util.nearestk_to_refdir(proj_ang, ref_ang, img_per_grp)
		del proj_ang, ref_ang
		proj_list = []
		for i in xrange(len(refprojdir)):
			proj_list.append(asi[i*img_per_grp:(i+1)*img_per_grp])
		del asi
		print "  D  ",myid,"  ",time()-st
		#from sys import exit
		#exit()


	#   Compute stability per projection
	elif options.grouping == "PPR":
		print "  A  ",myid,"  ",time()-st
		proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
		print "  B  ",myid,"  ",time()-st
		proj_params = []
		for i in xrange(nima):
			dp = proj_attr[i].get_params("spider")
			phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp["psi"], -dp["tx"], -dp["ty"]
			proj_params.append([phi, theta, psi, s2x, s2y])
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		print "  C  ",myid,"  ",time()-st
		from utilities import nearest_proj
		proj_list, mirror_list = nearest_proj(proj_params, img_per_grp, range(img_begin, img_begin+1))#range(img_begin, img_end))
		refprojdir = proj_params[img_begin: img_end]
		del proj_params, mirror_list
		print "  D  ",myid,"  ",time()-st
	else:  ERROR("Incorrect projection grouping option","sxproj_stability",1)
	"""
	from utilities import write_text_file
	for i in xrange(len(proj_list)):
		write_text_file(proj_list[i],"projlist%06d_%04d"%(i,myid))
	"""

	###########################################################################################################
	# Begin stability test
	from utilities import get_params_proj, read_text_file
	#if myid == 0:
	#	from utilities import read_text_file
	#	proj_list[0] = map(int, read_text_file("lggrpp0.txt"))


	from utilities import model_blank
	aveList = [model_blank(nx,ny)]*len(proj_list)
	if options.grouping == "GRP":  refprojdir = [[0.0,0.0,-1.0]]*len(proj_list)
	for i in xrange(len(proj_list)):
		print "  E  ",myid,"  ",time()-st
		class_data = EMData.read_images(stack, proj_list[i])
		#print "  R  ",myid,"  ",time()-st
		if options.CTF :
			from filter import filt_ctf
			for im in xrange(len(class_data)):  #  MEM LEAK!!
				atemp = class_data[im].copy()
				btemp = filt_ctf(atemp, atemp.get_attr("ctf"), binary=1)
				class_data[im] = btemp
				#class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
		for im in class_data:
			try:
				t = im.get_attr("xform.align2d") # if they are there, no need to set them!
			except:
				try:
					t = im.get_attr("xform.projection")
					d = t.get_params("spider")
					set_params2D(im, [0.0,-d["tx"],-d["ty"],0,1.0])
				except:
					set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
		#print "  F  ",myid,"  ",time()-st
		# Here, we perform realignment num_ali times
		all_ali_params = []
		for j in xrange(num_ali):
			if( xrng[0] == 0.0 and yrng[0] == 0.0 ):
				avet = ali2d_ras(class_data, randomize = True, ir = 1, ou = radius, rs = 1, step = 1.0, dst = 90.0, maxit = ite, check_mirror = True, FH=options.fl, FF=options.aa)
			else:
				avet = within_group_refinement(class_data, mask, True, 1, radius, 1, xrng, yrng, step, 90.0, ite, options.fl, options.aa)
			ali_params = []
			for im in xrange(len(class_data)):
				alpha, sx, sy, mirror, scale = get_params2D(class_data[im])
				ali_params.extend( [alpha, sx, sy, mirror] )
			all_ali_params.append(ali_params)
		#aveList[i] = avet
		#print "  G  ",myid,"  ",time()-st
		del ali_params
		# We determine the stability of this group here.
		# stable_set contains all particles deemed stable, it is a list of list
		# each list has two elements, the first is the pixel error, the second is the image number
		# stable_set is sorted based on pixel error
		#from utilities import write_text_file
		#write_text_file(all_ali_params, "all_ali_params%03d.txt"%myid)
		stable_set, mir_stab_rate, average_pix_err = multi_align_stability(all_ali_params, 0.0, 10000.0, thld_err, False, 2*radius+1)
		#print "  H  ",myid,"  ",time()-st
		if(len(stable_set) > 5):
			stable_set_id = []
			members = []
			pix_err = []
			# First put the stable members into attr 'members' and 'pix_err'
			for s in stable_set:
				# s[1] - number in this subset
				stable_set_id.append(s[1])
				# the original image number
				members.append(proj_list[i][s[1]])
				pix_err.append(s[0])
			# Then put the unstable members into attr 'members' and 'pix_err'
			from fundamentals import rot_shift2D
			avet.to_zero()
			if options.grouping == "GRP":
				aphi = 0.0
				atht = 0.0
				vphi = 0.0
				vtht = 0.0
			l = -1
			for j in xrange(len(proj_list[i])):
				#  Here it will only work if stable_set_id is sorted in the increasing number, see how l progresses
				if j in stable_set_id:
					l += 1
					avet += rot_shift2D(class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3] )
					if options.grouping == "GRP":
						phi, theta, psi, sxs, sys = get_params_proj(class_data[j])
						if( theta > 90.0):
							phi = (phi+540.0)%360.0
							theta = 180.0 - theta
						aphi += phi
						atht += theta
						vphi += phi*phi
						vtht += theta*theta
				else:
					members.append(proj_list[i][j])
					pix_err.append(99999.99)
			aveList[i] = avet.copy()
			if l>1 :
				l += 1
				aveList[i] /= l
				if options.grouping == "GRP":
					aphi /= l
					atht /= l
					vphi = (vphi - l*aphi*aphi)/l
					vtht = (vtht - l*atht*atht)/l
					from math import sqrt
					refprojdir[i] = [aphi, atht, (sqrt(max(vphi,0.0))+sqrt(max(vtht,0.0)))/2.0]

			# Here more information has to be stored, PARTICULARLY WHAT IS THE REFERENCE DIRECTION
			aveList[i].set_attr('members', members)
			aveList[i].set_attr('refprojdir',refprojdir[i])
			aveList[i].set_attr('pixerr', pix_err)
		else:
			print  " empty group ",i, refprojdir[i]
			aveList[i].set_attr('members',[-1])
			aveList[i].set_attr('refprojdir',refprojdir[i])
			aveList[i].set_attr('pixerr', [99999.])

	del class_data

	if myid == main_node:
		km = 0
		for i in xrange(number_of_proc):
			if i == main_node :
				for im in xrange(len(aveList)):
					aveList[im].write_image(args[1], km)
					km += 1
			else:
				nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
				nl = int(nl[0])
				for im in xrange(nl):
					ave = recv_EMData(i, im+i+70000)
					nm = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					nm = int(nm[0])
					members = mpi_recv(nm, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('members', map(int, members))
					members = mpi_recv(nm, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('pixerr', map(float, members))
					members = mpi_recv(3, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
					ave.set_attr('refprojdir', map(float, members))
					ave.write_image(args[1], km)
					km += 1
	else:
		mpi_send(len(aveList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
		for im in xrange(len(aveList)):
			send_EMData(aveList[im], main_node,im+myid+70000)
			members = aveList[im].get_attr('members')
			mpi_send(len(members), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			mpi_send(members, len(members), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			members = aveList[im].get_attr('pixerr')
			mpi_send(members, len(members), MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			try:
				members = aveList[im].get_attr('refprojdir')
				mpi_send(members, 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
			except:
				mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)

	global_def.BATCH = False
	mpi_barrier(MPI_COMM_WORLD)
	from mpi import mpi_finalize
	mpi_finalize()
예제 #27
0
def do_volume_mrk02(ref_data):
    """
		data - projections (scattered between cpus) or the volume.  If volume, just do the volume processing
		options - the same for all cpus
		return - volume the same for all cpus
	"""
    from EMAN2 import Util
    from mpi import mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD
    from filter import filt_table
    from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
    from utilities import bcast_EMData_to_all, bcast_number_to_all, model_blank
    from fundamentals import rops_table, fftip, fft
    import types

    # Retrieve the function specific input arguments from ref_data
    data = ref_data[0]
    Tracker = ref_data[1]
    iter = ref_data[2]
    mpi_comm = ref_data[3]

    # # For DEBUG
    # print "Type of data %s" % (type(data))
    # print "Type of Tracker %s" % (type(Tracker))
    # print "Type of iter %s" % (type(iter))
    # print "Type of mpi_comm %s" % (type(mpi_comm))

    if (mpi_comm == None): mpi_comm = MPI_COMM_WORLD
    myid = mpi_comm_rank(mpi_comm)
    nproc = mpi_comm_size(mpi_comm)

    try:
        local_filter = Tracker["local_filter"]
    except:
        local_filter = False
    #=========================================================================
    # volume reconstruction
    if (type(data) == types.ListType):
        if Tracker["constants"]["CTF"]:
            vol = recons3d_4nn_ctf_MPI(myid, data, Tracker["constants"]["snr"], \
              symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm, smearstep = Tracker["smearstep"])
        else:
            vol = recons3d_4nn_MPI    (myid, data,\
              symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm)
    else:
        vol = data

    if myid == 0:
        from morphology import threshold
        from filter import filt_tanl, filt_btwl
        from utilities import model_circle, get_im
        import types
        nx = vol.get_xsize()
        if (Tracker["constants"]["mask3D"] == None):
            mask3D = model_circle(
                int(Tracker["constants"]["radius"] * float(nx) /
                    float(Tracker["constants"]["nnxo"]) + 0.5), nx, nx, nx)
        elif (Tracker["constants"]["mask3D"] == "auto"):
            from utilities import adaptive_mask
            mask3D = adaptive_mask(vol)
        else:
            if (type(Tracker["constants"]["mask3D"]) == types.StringType):
                mask3D = get_im(Tracker["constants"]["mask3D"])
            else:
                mask3D = (Tracker["constants"]["mask3D"]).copy()
            nxm = mask3D.get_xsize()
            if (nx != nxm):
                from fundamentals import rot_shift3D
                mask3D = Util.window(
                    rot_shift3D(mask3D, scale=float(nx) / float(nxm)), nx, nx,
                    nx)
                nxm = mask3D.get_xsize()
                assert (nx == nxm)

        stat = Util.infomask(vol, mask3D, False)
        vol -= stat[0]
        Util.mul_scalar(vol, 1.0 / stat[1])
        vol = threshold(vol)
        Util.mul_img(vol, mask3D)
        if (Tracker["PWadjustment"]):
            from utilities import read_text_file, write_text_file
            rt = read_text_file(Tracker["PWadjustment"])
            fftip(vol)
            ro = rops_table(vol)
            #  Here unless I am mistaken it is enough to take the beginning of the reference pw.
            for i in xrange(1, len(ro)):
                ro[i] = (rt[i] / ro[i])**Tracker["upscale"]
            #write_text_file(rops_table(filt_table( vol, ro),1),"foo.txt")
            if Tracker["constants"]["sausage"]:
                ny = vol.get_ysize()
                y = float(ny)
                from math import exp
                for i in xrange(len(ro)):                    ro[i] *= \
(1.0+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.10)/0.025)**2)+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.215)/0.025)**2))

            if local_filter:
                # skip low-pass filtration
                vol = fft(filt_table(vol, ro))
            else:
                if (type(Tracker["lowpass"]) == types.ListType):
                    vol = fft(
                        filt_table(filt_table(vol, Tracker["lowpass"]), ro))
                else:
                    vol = fft(
                        filt_table(
                            filt_tanl(vol, Tracker["lowpass"],
                                      Tracker["falloff"]), ro))
            del ro
        else:
            if Tracker["constants"]["sausage"]:
                ny = vol.get_ysize()
                y = float(ny)
                ro = [0.0] * (ny // 2 + 2)
                from math import exp
                for i in xrange(len(ro)):                    ro[i] = \
(1.0+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.10)/0.025)**2)+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.215)/0.025)**2))
                fftip(vol)
                filt_table(vol, ro)
                del ro
            if not local_filter:
                if (type(Tracker["lowpass"]) == types.ListType):
                    vol = filt_table(vol, Tracker["lowpass"])
                else:
                    vol = filt_tanl(vol, Tracker["lowpass"],
                                    Tracker["falloff"])
            if Tracker["constants"]["sausage"]: vol = fft(vol)

    if local_filter:
        from morphology import binarize
        if (myid == 0): nx = mask3D.get_xsize()
        else: nx = 0
        nx = bcast_number_to_all(nx, source_node=0)
        #  only main processor needs the two input volumes
        if (myid == 0):
            mask = binarize(mask3D, 0.5)
            locres = get_im(Tracker["local_filter"])
            lx = locres.get_xsize()
            if (lx != nx):
                if (lx < nx):
                    from fundamentals import fdecimate, rot_shift3D
                    mask = Util.window(
                        rot_shift3D(mask, scale=float(lx) / float(nx)), lx, lx,
                        lx)
                    vol = fdecimate(vol, lx, lx, lx)
                else:
                    ERROR("local filter cannot be larger than input volume",
                          "user function", 1)
            stat = Util.infomask(vol, mask, False)
            vol -= stat[0]
            Util.mul_scalar(vol, 1.0 / stat[1])
        else:
            lx = 0
            locres = model_blank(1, 1, 1)
            vol = model_blank(1, 1, 1)
        lx = bcast_number_to_all(lx, source_node=0)
        if (myid != 0): mask = model_blank(lx, lx, lx)
        bcast_EMData_to_all(mask, myid, 0, comm=mpi_comm)
        from filter import filterlocal
        vol = filterlocal(locres, vol, mask, Tracker["falloff"], myid, 0,
                          nproc)

        if myid == 0:
            if (lx < nx):
                from fundamentals import fpol
                vol = fpol(vol, nx, nx, nx)
            vol = threshold(vol)
            vol = filt_btwl(vol, 0.38, 0.5)  #  This will have to be corrected.
            Util.mul_img(vol, mask3D)
            del mask3D
            # vol.write_image('toto%03d.hdf'%iter)
        else:
            vol = model_blank(nx, nx, nx)
    else:
        if myid == 0:
            #from utilities import write_text_file
            #write_text_file(rops_table(vol,1),"goo.txt")
            stat = Util.infomask(vol, mask3D, False)
            vol -= stat[0]
            Util.mul_scalar(vol, 1.0 / stat[1])
            vol = threshold(vol)
            vol = filt_btwl(vol, 0.38, 0.5)  #  This will have to be corrected.
            Util.mul_img(vol, mask3D)
            del mask3D
            # vol.write_image('toto%03d.hdf'%iter)
    # broadcast volume
    bcast_EMData_to_all(vol, myid, 0, comm=mpi_comm)
    #=========================================================================
    return vol
예제 #28
0
def main():

	def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
		if mirror:
			m = 1
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)
		else:
			m = 0
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)
		return  alpha, sx, sy, m
	
	progname = os.path.basename(sys.argv[0])
	usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=0.2 --aa=0.1  --sym=symmetry --CTF"
	parser = OptionParser(usage, version=SPARXVERSION)

	parser.add_option("--ave2D",		type="string"	   ,	default=False,				help="write to the disk a stack of 2D averages")
	parser.add_option("--var2D",		type="string"	   ,	default=False,				help="write to the disk a stack of 2D variances")
	parser.add_option("--ave3D",		type="string"	   ,	default=False,				help="write to the disk reconstructed 3D average")
	parser.add_option("--var3D",		type="string"	   ,	default=False,				help="compute 3D variability (time consuming!)")
	parser.add_option("--img_per_grp",	type="int"         ,	default=10   ,				help="number of neighbouring projections")
	parser.add_option("--no_norm",		action="store_true",	default=False,				help="do not use normalization")
	parser.add_option("--radiusvar", 	type="int"         ,	default=-1   ,				help="radius for 3D var" )
	parser.add_option("--npad",			type="int"         ,	default=2    ,				help="number of time to pad the original images")
	parser.add_option("--sym" , 		type="string"      ,	default="c1" ,				help="symmetry")
	parser.add_option("--fl",			type="float"       ,	default=0.0  ,				help="stop-band frequency (Default - no filtration)")
	parser.add_option("--aa",			type="float"       ,	default=0.0  ,				help="fall off of the filter (Default - no filtration)")
	parser.add_option("--CTF",			action="store_true",	default=False,				help="use CFT correction")
	parser.add_option("--VERBOSE",		action="store_true",	default=False,				help="Long output for debugging")
	#parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
	#parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
	#parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
	#parser.add_option("--abs", 			type="float"       ,	default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
	#parser.add_option("--squ", 			type="float"       ,	default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
	parser.add_option("--VAR" , 		action="store_true",	default=False,				help="stack on input consists of 2D variances (Default False)")
	parser.add_option("--decimate",     type="float",           default=1.0,                 help="image decimate rate, a number large than 1. default is 1")
	parser.add_option("--window",       type="int",             default=0,                   help="reduce images to a small image size without changing pixel_size. Default value is zero.")
	#parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
	parser.add_option("--nvec",			type="int"         ,	default=0    ,				help="number of eigenvectors, default = 0 meaning no PCA calculated")
	parser.add_option("--symmetrize",	action="store_true",	default=False,				help="Prepare input stack for handling symmetry (Default False)")
	
	(options,args) = parser.parse_args()
	#####
	from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD, MPI_TAG_UB
	from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
	from applications import MPI_start_end
	from reconstruction import recons3d_em, recons3d_em_MPI
	from reconstruction	import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
	from utilities import print_begin_msg, print_end_msg, print_msg
	from utilities import read_text_row, get_image, get_im
	from utilities import bcast_EMData_to_all, bcast_number_to_all
	from utilities import get_symt

	#  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

	from EMAN2db import db_open_dict
	
	if options.symmetrize :
		try:
			sys.argv = mpi_init(len(sys.argv), sys.argv)
			try:	
				number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
				if( number_of_proc > 1 ):
					ERROR("Cannot use more than one CPU for symmetry prepration","sx3dvariability",1)
			except:
				pass
		except:
			pass

		#  Input
		#instack = "Clean_NORM_CTF_start_wparams.hdf"
		#instack = "bdb:data"
		instack = args[0]
		sym = options.sym
		if( sym == "c1" ):
			ERROR("Thre is no need to symmetrize stack for C1 symmetry","sx3dvariability",1)

		if(instack[:4] !="bdb:"):
			stack = "bdb:data"
			delete_bdb(stack)
			cmdexecute("sxcpy.py  "+instack+"  "+stack)
		else:
			stack = instack

		qt = EMUtil.get_all_attributes(stack,'xform.projection')

		na = len(qt)
		ts = get_symt(sym)
		ks = len(ts)
		angsa = [None]*na
		for k in xrange(ks):
			delete_bdb("bdb:Q%1d"%k)
			cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			DB = db_open_dict("bdb:Q%1d"%k)
			for i in xrange(na):
				ut = qt[i]*ts[k]
				DB.set_attr(i, "xform.projection", ut)
				#bt = ut.get_params("spider")
				#angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
			#write_text_row(angsa, 'ptsma%1d.txt'%k)
			#cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			#cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
			DB.close()
		delete_bdb("bdb:sdata")
		cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
		#cmdexecute("ls  EMAN2DB/sdata*")
		a = get_im("bdb:sdata")
		a.set_attr("variabilitysymmetry",sym)
		a.write_image("bdb:sdata")


	else:

		sys.argv = mpi_init(len(sys.argv), sys.argv)
		myid     = mpi_comm_rank(MPI_COMM_WORLD)
		number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
		main_node = 0

		if len(args) == 1:
			stack = args[0]
		else:
			print( "usage: " + usage)
			print( "Please run '" + progname + " -h' for detailed options")
			return 1

		t0 = time()
	
		# obsolete flags
		options.MPI = True
		options.nvec = 0
		options.radiuspca = -1
		options.iter = 40
		options.abs = 0.0
		options.squ = 0.0

		if options.fl > 0.0 and options.aa == 0.0:
			ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid)
		if options.VAR and options.SND:
			ERROR("Only one of var and SND can be set!", "sx3dvariability", myid)
			exit()
		if options.VAR and (options.ave2D or options.ave3D or options.var2D): 
			ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid)
			exit()
		#if options.SND and (options.ave2D or options.ave3D):
		#	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
		#	exit()
		if options.nvec > 0 :
			ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
			exit()
		if options.nvec > 0 and options.ave3D == None:
			ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", myid=myid)
			exit()
		import string
		options.sym = options.sym.lower()
		 
		if global_def.CACHE_DISABLE:
			from utilities import disable_bdb_cache
			disable_bdb_cache()
		global_def.BATCH = True

		if myid == main_node:
			print_begin_msg("sx3dvariability")
			print_msg("%-70s:  %s\n"%("Input stack", stack))
	
		img_per_grp = options.img_per_grp
		nvec = options.nvec
		radiuspca = options.radiuspca

		symbaselen = 0
		if myid == main_node:
			nima = EMUtil.get_image_count(stack)
			img  = get_image(stack)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			if options.sym != "c1" :
				imgdata = get_im(stack)
				try:
					i = imgdata.get_attr("variabilitysymmetry")
					if(i != options.sym):
						ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", myid=myid)
				except:
					ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", myid=myid)
				from utilities import get_symt
				i = len(get_symt(options.sym))
				if((nima/i)*i != nima):
					ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", myid=myid)
				symbaselen = nima/i
			else:  symbaselen = nima
		else:
			nima = 0
			nx = 0
			ny = 0
		nima = bcast_number_to_all(nima)
		nx   = bcast_number_to_all(nx)
		ny   = bcast_number_to_all(ny)
		Tracker ={}
		Tracker["nx"]  =nx
		Tracker["ny"]  =ny
		Tracker["total_stack"]=nima
		if options.decimate==1.:
			if options.window !=0:
				nx = options.window
				ny = options.window
		else:
			if options.window ==0:
				nx = int(nx/options.decimate)
				ny = int(ny/options.decimate)
			else:
				nx = int(options.window/options.decimate)
				ny = nx
		symbaselen = bcast_number_to_all(symbaselen)
		if radiuspca == -1: radiuspca = nx/2-2

		if myid == main_node:
			print_msg("%-70s:  %d\n"%("Number of projection", nima))
		
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		"""
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
		if options.VAR:
			#varList = EMData.read_images(stack, range(img_begin, img_end))
			varList = []
			this_image = EMData()
			for index_of_particle in xrange(img_begin,img_end):
				this_image.read_image(stack,index_of_particle)
				varList.append(image_decimate_window_xform_ctf(img,options.decimate,options.window,options.CTF))
		else:
			from utilities		import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
			from utilities		import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
			from utilities		import model_blank, nearest_proj, model_circle
			from applications	import pca
			from statistics		import avgvar, avgvar_ctf, ccc
			from filter		    import filt_tanl
			from morphology		import threshold, square_root
			from projection 	import project, prep_vol, prgs
			from sets		    import Set

			if myid == main_node:
				t1 = time()
				proj_angles = []
				aveList = []
				tab = EMUtil.get_all_attributes(stack, 'xform.projection')
				for i in xrange(nima):
					t     = tab[i].get_params('spider')
					phi   = t['phi']
					theta = t['theta']
					psi   = t['psi']
					x     = theta
					if x > 90.0: x = 180.0 - x
					x = x*10000+psi
					proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
				t2 = time()
				print_msg("%-70s:  %d\n"%("Number of neighboring projections", img_per_grp))
				print_msg("...... Finding neighboring projections\n")
				if options.VERBOSE:
					print "Number of images per group: ", img_per_grp
					print "Now grouping projections"
				proj_angles.sort()

			proj_angles_list = [0.0]*(nima*4)
			if myid == main_node:
				for i in xrange(nima):
					proj_angles_list[i*4]   = proj_angles[i][1]
					proj_angles_list[i*4+1] = proj_angles[i][2]
					proj_angles_list[i*4+2] = proj_angles[i][3]
					proj_angles_list[i*4+3] = proj_angles[i][4]
			proj_angles_list = bcast_list_to_all(proj_angles_list, myid, main_node)
			proj_angles = []
			for i in xrange(nima):
				proj_angles.append([proj_angles_list[i*4], proj_angles_list[i*4+1], proj_angles_list[i*4+2], int(proj_angles_list[i*4+3])])
			del proj_angles_list

			proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end))

			all_proj = Set()
			for im in proj_list:
				for jm in im:
					all_proj.add(proj_angles[jm][3])

			all_proj = list(all_proj)
			if options.VERBOSE:
				print "On node %2d, number of images needed to be read = %5d"%(myid, len(all_proj))

			index = {}
			for i in xrange(len(all_proj)): index[all_proj[i]] = i
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2))
				print_msg("%-70s:  %d\n"%("Number of groups processed on the main node", len(proj_list)))
				if options.VERBOSE:
					print "Grouping projections took: ", (time()-t2)/60	, "[min]"
					print "Number of groups on main node: ", len(proj_list)
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				print_msg("...... calculating the stack of 2D variances \n")
				if options.VERBOSE:
					print "Now calculating the stack of 2D variances"

			proj_params = [0.0]*(nima*5)
			aveList = []
			varList = []				
			if nvec > 0:
				eigList = [[] for i in xrange(nvec)]

			if options.VERBOSE: 	print "Begin to read images on processor %d"%(myid)
			ttt = time()
			#imgdata = EMData.read_images(stack, all_proj)
			img     = EMData()
			imgdata = []
			for index_of_proj in xrange(len(all_proj)):
				img.read_image(stack, all_proj[index_of_proj])
				dmg = image_decimate_window_xform_ctf(img,options.decimate,options.window,options.CTF)
				#print dmg.get_xsize(), "init"
				imgdata.append(dmg)
			if options.VERBOSE:
				print "Reading images on processor %d done, time = %.2f"%(myid, time()-ttt)
				print "On processor %d, we got %d images"%(myid, len(imgdata))
			mpi_barrier(MPI_COMM_WORLD)

			'''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
			from applications import prepare_2d_forPCA
			from utilities import model_blank
			for i in xrange(len(proj_list)):
				ki = proj_angles[proj_list[i][0]][3]
				if ki >= symbaselen:  continue
				mi = index[ki]
				phiM, thetaM, psiM, s2xM, s2yM = get_params_proj(imgdata[mi])

				grp_imgdata = []
				for j in xrange(img_per_grp):
					mj = index[proj_angles[proj_list[i][j]][3]]
					phi, theta, psi, s2x, s2y = get_params_proj(imgdata[mj])
					alpha, sx, sy, mirror = params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror_list[i][j])
					if thetaM <= 90:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM-phi, 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM-phi), 0.0, 0.0, 1.0)
					else:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM-phi), 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM-phi)), 0.0, 0.0, 1.0)
					set_params2D(imgdata[mj], [alpha, sx, sy, mirror, 1.0])
					grp_imgdata.append(imgdata[mj])
					#print grp_imgdata[j].get_xsize(), imgdata[mj].get_xsize()

				if not options.no_norm:
					#print grp_imgdata[j].get_xsize()
					mask = model_circle(nx/2-2, nx, nx)
					for k in xrange(img_per_grp):
						ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False)
						grp_imgdata[k] -= ave
						grp_imgdata[k] /= std
					del mask

				if options.fl > 0.0:
					from filter import filt_ctf, filt_table
					from fundamentals import fft, window2d
					nx2 = 2*nx
					ny2 = 2*ny
					if options.CTF:
						from utilities import pad
						for k in xrange(img_per_grp):
							grp_imgdata[k] = window2d(fft( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa) ),nx,ny)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
					else:
						for k in xrange(img_per_grp):
							grp_imgdata[k] = filt_tanl( grp_imgdata[k], options.fl, options.aa)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)
				else:
					from utilities import pad, read_text_file
					from filter import filt_ctf, filt_table
					from fundamentals import fft, window2d
					nx2 = 2*nx
					ny2 = 2*ny
					if options.CTF:
						from utilities import pad
						for k in xrange(img_per_grp):
							grp_imgdata[k] = window2d( fft( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1) ) , nx,ny)
							#grp_imgdata[k] = window2d(fft( filt_table( filt_tanl( filt_ctf(fft(pad(grp_imgdata[k], nx2, ny2, 1,0.0)), grp_imgdata[k].get_attr("ctf"), binary=1), options.fl, options.aa), fifi) ),nx,ny)
							#grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)

				'''
				if i < 10 and myid == main_node:
					for k in xrange(10):
						grp_imgdata[k].write_image("grp%03d.hdf"%i, k)
				'''
				"""
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("pp.hdf", pp)
				"""
				ave, grp_imgdata = prepare_2d_forPCA(grp_imgdata)
				"""
				if myid == main_node and i==0:
					for pp in xrange(len(grp_imgdata)):
						grp_imgdata[pp].write_image("qq.hdf", pp)
				"""

				var = model_blank(nx,ny)
				for q in grp_imgdata:  Util.add_img2( var, q )
				Util.mul_scalar( var, 1.0/(len(grp_imgdata)-1))
				# Switch to std dev
				var = square_root(threshold(var))
				#if options.CTF:	ave, var = avgvar_ctf(grp_imgdata, mode="a")
				#else:	            ave, var = avgvar(grp_imgdata, mode="a")
				"""
				if myid == main_node:
					ave.write_image("avgv.hdf",i)
					var.write_image("varv.hdf",i)
				"""
			
				set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
				set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

				aveList.append(ave)
				varList.append(var)

				if options.VERBOSE:
					print "%5.2f%% done on processor %d"%(i*100.0/len(proj_list), myid)
				if nvec > 0:
					eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True)
					for k in xrange(nvec):
						set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
						eigList[k].append(eig[k])
					"""
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""

			del imgdata
			#  To this point, all averages, variances, and eigenvectors are computed

			if options.ave2D:
				from fundamentals import fpol
				if myid == main_node:
					km = 0
					for i in xrange(number_of_proc):
						if i == main_node :
							for im in xrange(len(aveList)):
								aveList[im].write_image(options.ave2D, km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in xrange(nl):
								ave = recv_EMData(i, im+i+70000)
								"""
								nm = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, MPI_TAG_UB, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
								tmpvol=fpol(ave, Tracker["nx"],Tracker["nx"],Tracker["nx"])								
								tmpvol.write_image(options.ave2D, km)
								km += 1
				else:
					mpi_send(len(aveList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
					for im in xrange(len(aveList)):
						send_EMData(aveList[im], main_node,im+myid+70000)
						"""
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
						"""

			if options.ave3D:
				from fundamentals import fpol
				if options.VERBOSE:
					print "Reconstructing 3D average volume"
				ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad)
				bcast_EMData_to_all(ave3D, myid)
				if myid == main_node:
					ave3D=fpol(ave3D,Tracker["nx"],Tracker["nx"],Tracker["nx"])
					ave3D.write_image(options.ave3D)
					print_msg("%-70s:  %s\n"%("Writing to the disk volume reconstructed from averages as", options.ave3D))
			del ave, var, proj_list, stack, phi, theta, psi, s2x, s2y, alpha, sx, sy, mirror, aveList

			if nvec > 0:
				for k in xrange(nvec):
					if options.VERBOSE:
						print "Reconstruction eigenvolumes", k
					cont = True
					ITER = 0
					mask2d = model_circle(radiuspca, nx, nx)
					while cont:
						#print "On node %d, iteration %d"%(myid, ITER)
						eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad)
						bcast_EMData_to_all(eig3D, myid, main_node)
						if options.fl > 0.0:
							eig3D = filt_tanl(eig3D, options.fl, options.aa)
						if myid == main_node:
							eig3D.write_image("eig3d_%03d.hdf"%k, ITER)
						Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) )
						eig3Df, kb = prep_vol(eig3D)
						del eig3D
						cont = False
						icont = 0
						for l in xrange(len(eigList[k])):
							phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l])
							proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y])
							cl = ccc(proj, eigList[k][l], mask2d)
							if cl < 0.0:
								icont += 1
								cont = True
								eigList[k][l] *= -1.0
						u = int(cont)
						u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD)
						icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

						if myid == main_node:
							u = int(u[0])
							print " Eigenvector: ",k," number changed ",int(icont[0])
						else: u = 0
						u = bcast_number_to_all(u, main_node)
						cont = bool(u)
						ITER += 1

					del eig3Df, kb
					mpi_barrier(MPI_COMM_WORLD)
				del eigList, mask2d

			if options.ave3D: del ave3D
			if options.var2D:
				from fundamentals import fpol 
				if myid == main_node:
					km = 0
					for i in xrange(number_of_proc):
						if i == main_node :
							for im in xrange(len(varList)):
								tmpvol=fpol(varList[im], Tracker["nx"], Tracker["nx"],1)
								tmpvol.write_image(options.var2D, km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in xrange(nl):
								ave = recv_EMData(i, im+i+70000)
								tmpvol=fpol(ave, Tracker["nx"], Tracker["nx"],1)
								tmpvol.write_image(options.var2D, km)
								km += 1
				else:
					mpi_send(len(varList), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
					for im in xrange(len(varList)):
						send_EMData(varList[im], main_node, im+myid+70000)#  What with the attributes??

			mpi_barrier(MPI_COMM_WORLD)

		if  options.var3D:
			if myid == main_node and options.VERBOSE:
				print "Reconstructing 3D variability volume"

			t6 = time()
			radiusvar = options.radiusvar
			if( radiusvar < 0 ):  radiusvar = nx//2 -3
			res = recons3d_4nn_MPI(myid, varList, symmetry=options.sym, npad=options.npad)
			#res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
			if myid == main_node:
				from fundamentals import fpol
				res =fpol(res, Tracker["nx"], Tracker["nx"], Tracker["nx"])
				res.write_image(options.var3D)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Reconstructing 3D variability took [s]", time()-t6))
				if options.VERBOSE:
					print "Reconstruction took: %.2f [min]"%((time()-t6)/60)

			if myid == main_node:
				print_msg("%-70s:  %.2f\n"%("Total time for these computations [s]", time()-t0))
				if options.VERBOSE:
					print "Total time for these computations: %.2f [min]"%((time()-t0)/60)
				print_end_msg("sx3dvariability")

		global_def.BATCH = False

		from mpi import mpi_finalize
		mpi_finalize()
예제 #29
0
파일: sxpetite.py 프로젝트: cpsemmens/eman2
def compute_fscs(stack, outputdir, chunkname, newgoodname, fscoutputdir, doit, keepchecking, nproc, myid, main_node):
	#  Compute reconstructions per group from good particles only to get FSC curves
	#  We will compute two FSC curves - from not averaged parameters and from averaged parameters
	#     So, we have to build two sets:
	#    not averaged  (A2+C3) versus (B0+D5)
	#          averaged  (A0+C1) versus (B3+D4)
	#    This requires pulling good subsets given by goodX*;  I am not sure why good, sxconsistency above produced newgood text files.
	#                                                                 Otherwise, I am not sure what newbad will contain.
	# Input that should vary:  
	#    "bdb:"+os.path.join(outputdir,"chunk%01d%01d"%(procid,i))
	#    os.path.join(outputdir,"newgood%01d.txt"%procid)
	#  Output should be in a separate directory "fscoutputdir"

	if(myid == main_node):
		if keepchecking:
			if(os.path.exists(fscoutputdir)):
				doit = 0
				print("Directory  ",fscoutputdir,"  exists!")
			else:
				doit = 1
				keepchecking = False
		else:
			doit = 1
		if doit:
			cmd = "{} {}".format("mkdir", fscoutputdir)
			cmdexecute(cmd)
	mpi_barrier(MPI_COMM_WORLD)
	
	#  not averaged
	doit, keepchecking = checkstep(os.path.join(fscoutputdir,"volfscn0.hdf"), keepchecking, myid, main_node)
	if doit:
		if(myid == main_node):
			#  A2+C3
			#     indices
			write_text_file( \
				map(int, read_text_file(os.path.join(outputdir,chunkname+"0.txt")))+map(int, read_text_file(os.path.join(outputdir,chunkname+"2.txt"))),   \
				os.path.join(fscoutputdir,"chunkfn0.txt"))
			#  params
			write_text_row( \
				read_text_row(os.path.join(outputdir,newgoodname+"02.txt"))+read_text_row(os.path.join(outputdir,newgoodname+"22.txt")), \
				os.path.join(fscoutputdir,"params-chunkfn0.txt"))

		mpi_barrier(MPI_COMM_WORLD)

		projdata = getindexdata(stack, os.path.join(fscoutputdir,"chunkfn0.txt"), os.path.join(fscoutputdir,"params-chunkfn0.txt"), myid, nproc)
		if ali3d_options.CTF:  vol = recons3d_4nn_ctf_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
		else:                  vol = recons3d_4nn_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
		del projdata
		if(myid == main_node):
			vol.write_image(os.path.join(fscoutputdir,"volfscn0.hdf"))
			line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
			print(line,"Executed successfully: 3D reconstruction of", os.path.join(fscoutputdir,"volfscn0.hdf"))
		del vol


	doit, keepchecking = checkstep(os.path.join(fscoutputdir,"volfscn1.hdf"), keepchecking, myid, main_node)
	if doit:
		if(myid == main_node):
			#  B0+D5
			#     indices
			write_text_file( \
				map(int, read_text_file(os.path.join(outputdir,chunkname+"1.txt")))+map(int, read_text_file(os.path.join(outputdir,chunkname+"3.txt"))),   \
				os.path.join(fscoutputdir,"chunkfn1.txt"))
			#  params
			write_text_row( \
				read_text_row(os.path.join(outputdir,newgoodname+"10.txt"))+read_text_row(os.path.join(outputdir,newgoodname+"32.txt")), \
				os.path.join(fscoutputdir,"params-chunkfn1.txt"))

		mpi_barrier(MPI_COMM_WORLD)

		projdata = getindexdata(stack, os.path.join(fscoutputdir,"chunkfn1.txt"), os.path.join(fscoutputdir,"params-chunkfn1.txt"), myid, nproc)
		if ali3d_options.CTF:  vol = recons3d_4nn_ctf_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
		else:                  vol = recons3d_4nn_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
		del projdata
		if(myid == main_node):
			vol.write_image(os.path.join(fscoutputdir,"volfscn1.hdf"))
			line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
			print(line,"Executed successfully: 3D reconstruction of", os.path.join(fscoutputdir,"volfscn1.hdf"))
		del vol

	#      averaged
	doit, keepchecking = checkstep(os.path.join(fscoutputdir,"volfsca0.hdf"), keepchecking, myid, main_node)
	if doit:
		if(myid == main_node):
			#  A0+C1
			#     indices
			write_text_file( \
				map(int, read_text_file(os.path.join(outputdir,chunkname+"0.txt")))+map(int, read_text_file(os.path.join(outputdir,chunkname+"2.txt"))),   \
				os.path.join(fscoutputdir,"chunkfa0.txt"))
			#  params
			write_text_row( \
				read_text_row(os.path.join(outputdir,newgoodname+"00.txt"))+read_text_row(os.path.join(outputdir,newgoodname+"20.txt")), \
				os.path.join(fscoutputdir,"params-chunkfa0.txt"))
		mpi_barrier(MPI_COMM_WORLD)

		projdata = getindexdata(stack, os.path.join(fscoutputdir,"chunkfa0.txt"), os.path.join(fscoutputdir,"params-chunkfa0.txt"), myid, nproc)
		if ali3d_options.CTF:  vol = recons3d_4nn_ctf_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
		else:                  vol = recons3d_4nn_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
		del projdata
		if(myid == main_node):
			vol.write_image(os.path.join(fscoutputdir,"volfsca0.hdf"))
			line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
			print(line,"Executed successfully: 3D reconstruction of", os.path.join(fscoutputdir,"volfsca0.hdf"))
		del vol


	doit, keepchecking = checkstep(os.path.join(fscoutputdir,"volfsca1.hdf"), keepchecking, myid, main_node)
	if doit:
		if(myid == main_node):
			#  B3+D4
			write_text_file( \
				map(int, read_text_file(os.path.join(outputdir,chunkname+"1.txt")))+map(int, read_text_file(os.path.join(outputdir,chunkname+"3.txt"))),   \
				os.path.join(fscoutputdir,"chunkfa1.txt"))
			#  params
			write_text_row( \
				read_text_row(os.path.join(outputdir,newgoodname+"11.txt"))+read_text_row(os.path.join(outputdir,newgoodname+"31.txt")), \
				os.path.join(fscoutputdir,"params-chunkfa1.txt"))
		mpi_barrier(MPI_COMM_WORLD)

		projdata = getindexdata(stack, os.path.join(fscoutputdir,"chunkfa1.txt"), os.path.join(fscoutputdir,"params-chunkfa1.txt"), myid, nproc)
		if ali3d_options.CTF:  vol = recons3d_4nn_ctf_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
		else:                  vol = recons3d_4nn_MPI(myid, projdata, symmetry=ali3d_options.sym, npad = 2)
		del projdata
		if(myid == main_node):
			vol.write_image(os.path.join(fscoutputdir,"volfsca1.hdf"))
			line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
			print(line,"Executed successfully: 3D reconstruction of", os.path.join(fscoutputdir,"volfsca1.hdf"))
		del vol


 
	#  Get updated FSC curves
	if(myid == main_node):
		if(ali3d_options.mask3D is None):  mask = model_circle(radi,nnxo,nnxo,nnxo)
		else:
			mask = get_im(ali3d_options.mask3D)
		if keepchecking:
			if(os.path.exists(os.path.join(fscoutputdir,"fscn.txt"))):
				doit = 0
			else:
				doit = 1
				keepchecking = False
		else:  doit = 1
		if  doit:  fsc(get_im(os.path.join(fscoutputdir,"volfscn0.hdf"))*mask,\
				get_im(os.path.join(fscoutputdir,"volfscn1.hdf"))*mask,\
				1.0,os.path.join(fscoutputdir,"fscn.txt") )
		if keepchecking:
			if(os.path.exists(os.path.join(fscoutputdir,"fsca.txt"))):
				doit = 0
			else:
				doit = 1
				keepchecking = False
		else:  doit = 1
		if  doit:  fsc(get_im(os.path.join(fscoutputdir,"volfsca0.hdf"))*mask,\
				get_im(os.path.join(fscoutputdir,"volfsca1.hdf"))*mask,\
				1.0,os.path.join(fscoutputdir,"fsca.txt") )

		nfsc = read_text_file(os.path.join(fscoutputdir,"fscn.txt") ,-1)
		currentres = 0.5
		ns = len(nfsc[1])
		for i in xrange(1,ns-1):
			if ( (2*nfsc[1][i]/(1.0+nfsc[1][i]) ) < 0.5):
				currentres = nfsc[0][i-1]
				break
		print("  Current resolution ",i,currentres)
	else:
		currentres = 0.0
	currentres = bcast_number_to_all(currentres, source_node = main_node)
	if(currentres < 0.0):
		if(myid == main_node):
			print("  Something wrong with the resolution, cannot continue")
		mpi_finalize()
		exit()

	mpi_barrier(MPI_COMM_WORLD)
	return  currentres, doit, keepchecking
예제 #30
0
def runcheck(classavgstack, reconfile, outdir, inangles=None, selectdoc=None, prjmethod='trilinear', displayYN=False, 
			 projstack='proj.hdf', outangles='angles.txt', outstack='comp-proj-reproj.hdf', normstack='comp-proj-reproj-norm.hdf'):
	
	print("\n%s, Modified 2018-12-07\n" % __file__)
	
	# Check if inputs exist
	check(classavgstack)
	check(reconfile)
	
	# Create directory if it doesn't exist
	if not os.path.isdir(outdir):
		os.makedirs(outdir)  # os.mkdir() can only operate one directory deep
		print("mkdir -p %s" % outdir)

	# Expand path for outputs
	projstack = os.path.join(outdir, projstack)
	outangles = os.path.join(outdir, outangles)
	outstack  = os.path.join(outdir, outstack)
	normstack = os.path.join(outdir, normstack)
	
	# Get number of images
	nimg0 = EMAN2_cppwrap.EMUtil.get_image_count(classavgstack)
	recon = EMAN2_cppwrap.EMData(reconfile)
	nx = recon.get_xsize()
	
	# In case class averages include discarded images, apply selection file
	if selectdoc:
		goodavgs, extension = os.path.splitext(classavgstack)
		newclasses = goodavgs + "_kept" + extension
		
		# e2proc2d appends to existing files, so rename existing output
		if os.path.exists(newclasses):
			renamefile = newclasses + '.bak'
			os.rename(newclasses, renamefile)
			print("mv %s %s" % (newclasses, renamefile))
		
		cmd7="e2proc2d.py %s %s --list=%s" % (classavgstack, newclasses, selectdoc)
		print(cmd7)
		os.system(cmd7)
		
		# Update class-averages
		classavgstack = newclasses
	
	# Import Euler angles
	if inangles:
		cmd6 = "sxheader.py %s --params=xform.projection --import=%s" % (classavgstack, inangles)
		print(cmd6)
		header(classavgstack, 'xform.projection', fimport=inangles)
	
	try:
		header(classavgstack, 'xform.projection', fexport=outangles)
		cmd1 = "sxheader.py %s --params=xform.projection --export=%s" % (classavgstack, outangles) 
		print(cmd1)
	except RuntimeError:
		print("\nERROR!! No projection angles found in class-average stack header!\n")
		print('Usage:', USAGE)
		exit()
	
	#cmd2="sxproject3d.py %s %s --angles=%s" % (recon, projstack, outangles)
	#print(cmd2)
	#os.system(cmd2)
	
	#  Here if you want to be fancy, there should be an option to chose the projection method,
	#  the mechanism can be copied from sxproject3d.py  PAP
	if prjmethod=='trilinear':
		method_num = 1
	elif prjmethod=='gridding':
		method_num = -1
	elif prjmethod=='nn':
		method_num = 0
	else:
		print("\nERROR!! Valid projection methods are: trilinear (default), gridding, and nn (nearest neighbor).")
		print('Usage:', USAGE)
		exit()
	
	#project3d(recon, stack=projstack, listagls=outangles)
	recon = prep_vol(recon, npad = 2, interpolation_method = 1)

	result=[]
	#  Here you need actual radius to compute proper ccc's, but if you do, you have to deal with translations, PAP
	mask = model_circle(nx//2-2,nx,nx)
	
	# Number of images may have changed
	nimg1   = EMAN2_cppwrap.EMUtil.get_image_count(classavgstack)
	outangles = read_text_row(outangles)
	for imgnum in range(nimg1):
		# get class average
		classimg = get_im(classavgstack, imgnum)
		
		# compute re-projection
		prjimg = prgl(recon, outangles[imgnum], 1, False)
		
		# calculate 1D power spectra
		rops_dst = rops_table(classimg*mask)  
		rops_src = rops_table(prjimg)
		
		#  Set power spectrum of reprojection to the data.
		#  Since data has an envelope, it would make more sense to set data to reconstruction,
		#  but to do it one would have to know the actual resolution of the data. 
		#  you can check sxprocess.py --adjpw to see how this is done properly  PAP
		table = [0.0]*len(rops_dst)  # initialize table
		for j in range( len(rops_dst) ):
			table[j] = sqrt( old_div(rops_dst[j],rops_src[j]) )
		prjimg = fft(filt_table(prjimg, table))  # match FFT amplitdes of re-projection and class average

		cccoeff = ccc(prjimg, classimg, mask)
		#print(imgnum, cccoeff)
		classimg.set_attr_dict({'cross-corr':cccoeff})
		prjimg.set_attr_dict({'cross-corr':cccoeff})
		prjimg.write_image(outstack,2*imgnum)
		classimg.write_image(outstack, 2*imgnum+1)
		result.append(cccoeff)
	del outangles
	meanccc = old_div(sum(result),nimg1)
	print("Average CCC is %s" % meanccc)

	nimg2 = EMAN2_cppwrap.EMUtil.get_image_count(outstack)
	
	for imgnum in xrange(nimg2):
		if (imgnum % 2 ==0):
			prjimg = get_im(outstack,imgnum)
			meanccc1 = prjimg.get_attr_default('mean-cross-corr', -1.0)
			prjimg.set_attr_dict({'mean-cross-corr':meanccc})
			write_header(outstack,prjimg,imgnum)
		if (imgnum % 100) == 0:
			print(imgnum)
	
	# e2proc2d appends to existing files, so delete existing output
	if os.path.exists(normstack):
		os.remove(normstack)
		print("rm %s" % normstack)
		


	#  Why would you want to do it?  If you do, it should have been done during ccc calculations,
	#  otherwise what is see is not corresponding to actual data, thus misleading.  PAP
	#cmd5="e2proc2d.py %s %s --process=normalize" % (outstack, normstack)
	#print(cmd5)
	#os.system(cmd5)
	
	# Optionally pop up e2display
	if displayYN:
		cmd8 = "e2display.py %s" % outstack
		print(cmd8)
		os.system(cmd8)
	
	print("Done!")
예제 #31
0
def do_volume_mrk02(ref_data):
	"""
		data - projections (scattered between cpus) or the volume.  If volume, just do the volume processing
		options - the same for all cpus
		return - volume the same for all cpus
	"""
	from EMAN2          import Util
	from mpi            import mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD
	from filter         import filt_table
	from reconstruction import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
	from utilities      import bcast_EMData_to_all, bcast_number_to_all, model_blank
	from fundamentals import rops_table, fftip, fft
	import types

	# Retrieve the function specific input arguments from ref_data
	data     = ref_data[0]
	Tracker  = ref_data[1]
	iter     = ref_data[2]
	mpi_comm = ref_data[3]
	
	# # For DEBUG
	# print "Type of data %s" % (type(data))
	# print "Type of Tracker %s" % (type(Tracker))
	# print "Type of iter %s" % (type(iter))
	# print "Type of mpi_comm %s" % (type(mpi_comm))
	
	if(mpi_comm == None):  mpi_comm = MPI_COMM_WORLD
	myid  = mpi_comm_rank(mpi_comm)
	nproc = mpi_comm_size(mpi_comm)
	
	try:     local_filter = Tracker["local_filter"]
	except:  local_filter = False
	#=========================================================================
	# volume reconstruction
	if( type(data) == types.ListType ):
		if Tracker["constants"]["CTF"]:
			vol = recons3d_4nn_ctf_MPI(myid, data, Tracker["constants"]["snr"], \
					symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm, smearstep = Tracker["smearstep"])
		else:
			vol = recons3d_4nn_MPI    (myid, data,\
					symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm)
	else:
		vol = data

	if myid == 0:
		from morphology import threshold
		from filter     import filt_tanl, filt_btwl
		from utilities  import model_circle, get_im
		import types
		nx = vol.get_xsize()
		if(Tracker["constants"]["mask3D"] == None):
			mask3D = model_circle(int(Tracker["constants"]["radius"]*float(nx)/float(Tracker["constants"]["nnxo"])+0.5), nx, nx, nx)
		elif(Tracker["constants"]["mask3D"] == "auto"):
			from utilities import adaptive_mask
			mask3D = adaptive_mask(vol)
		else:
			if( type(Tracker["constants"]["mask3D"]) == types.StringType ):  mask3D = get_im(Tracker["constants"]["mask3D"])
			else:  mask3D = (Tracker["constants"]["mask3D"]).copy()
			nxm = mask3D.get_xsize()
			if( nx != nxm):
				from fundamentals import rot_shift3D
				mask3D = Util.window(rot_shift3D(mask3D,scale=float(nx)/float(nxm)),nx,nx,nx)
				nxm = mask3D.get_xsize()
				assert(nx == nxm)

		stat = Util.infomask(vol, mask3D, False)
		vol -= stat[0]
		Util.mul_scalar(vol, 1.0/stat[1])
		vol = threshold(vol)
		Util.mul_img(vol, mask3D)
		if( Tracker["PWadjustment"] ):
			from utilities    import read_text_file, write_text_file
			rt = read_text_file( Tracker["PWadjustment"] )
			fftip(vol)
			ro = rops_table(vol)
			#  Here unless I am mistaken it is enough to take the beginning of the reference pw.
			for i in xrange(1,len(ro)):  ro[i] = (rt[i]/ro[i])**Tracker["upscale"]
			#write_text_file(rops_table(filt_table( vol, ro),1),"foo.txt")
			if Tracker["constants"]["sausage"]:
				ny = vol.get_ysize()
				y = float(ny)
				from math import exp
				for i in xrange(len(ro)):  ro[i] *= \
				  (1.0+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.10)/0.025)**2)+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.215)/0.025)**2))

			if local_filter:
				# skip low-pass filtration
				vol = fft( filt_table( vol, ro) )
			else:
				if( type(Tracker["lowpass"]) == types.ListType ):
					vol = fft( filt_table( filt_table(vol, Tracker["lowpass"]), ro) )
				else:
					vol = fft( filt_table( filt_tanl(vol, Tracker["lowpass"], Tracker["falloff"]), ro) )
			del ro
		else:
			if Tracker["constants"]["sausage"]:
				ny = vol.get_ysize()
				y = float(ny)
				ro = [0.0]*(ny//2+2)
				from math import exp
				for i in xrange(len(ro)):  ro[i] = \
				  (1.0+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.10)/0.025)**2)+1.0*exp(-(((i/y/Tracker["constants"]["pixel_size"])-0.215)/0.025)**2))
				fftip(vol)
				filt_table(vol, ro)
				del ro
			if not local_filter:
				if( type(Tracker["lowpass"]) == types.ListType ):
					vol = filt_table(vol, Tracker["lowpass"])
				else:
					vol = filt_tanl(vol, Tracker["lowpass"], Tracker["falloff"])
			if Tracker["constants"]["sausage"]: vol = fft(vol)

	if local_filter:
		from morphology import binarize
		if(myid == 0): nx = mask3D.get_xsize()
		else:  nx = 0
		nx = bcast_number_to_all(nx, source_node = 0)
		#  only main processor needs the two input volumes
		if(myid == 0):
			mask = binarize(mask3D, 0.5)
			locres = get_im(Tracker["local_filter"])
			lx = locres.get_xsize()
			if(lx != nx):
				if(lx < nx):
					from fundamentals import fdecimate, rot_shift3D
					mask = Util.window(rot_shift3D(mask,scale=float(lx)/float(nx)),lx,lx,lx)
					vol = fdecimate(vol, lx,lx,lx)
				else:  ERROR("local filter cannot be larger than input volume","user function",1)
			stat = Util.infomask(vol, mask, False)
			vol -= stat[0]
			Util.mul_scalar(vol, 1.0/stat[1])
		else:
			lx = 0
			locres = model_blank(1,1,1)
			vol = model_blank(1,1,1)
		lx = bcast_number_to_all(lx, source_node = 0)
		if( myid != 0 ):  mask = model_blank(lx,lx,lx)
		bcast_EMData_to_all(mask, myid, 0, comm=mpi_comm)
		from filter import filterlocal
		vol = filterlocal( locres, vol, mask, Tracker["falloff"], myid, 0, nproc)

		if myid == 0:
			if(lx < nx):
				from fundamentals import fpol
				vol = fpol(vol, nx,nx,nx)
			vol = threshold(vol)
			vol = filt_btwl(vol, 0.38, 0.5)#  This will have to be corrected.
			Util.mul_img(vol, mask3D)
			del mask3D
			# vol.write_image('toto%03d.hdf'%iter)
		else:
			vol = model_blank(nx,nx,nx)
	else:
		if myid == 0:
			#from utilities import write_text_file
			#write_text_file(rops_table(vol,1),"goo.txt")
			stat = Util.infomask(vol, mask3D, False)
			vol -= stat[0]
			Util.mul_scalar(vol, 1.0/stat[1])
			vol = threshold(vol)
			vol = filt_btwl(vol, 0.38, 0.5)#  This will have to be corrected.
			Util.mul_img(vol, mask3D)
			del mask3D
			# vol.write_image('toto%03d.hdf'%iter)
	# broadcast volume
	bcast_EMData_to_all(vol, myid, 0, comm=mpi_comm)
	#=========================================================================
	return vol
예제 #32
0
def compare(compare_ref_free, outfile_repro, ref_free_output, yrng, xrng,
            rstep, nx, apix, ref_free_cutoff, nproc, myid, main_node):

    from alignment import Numrinit, ringwe, Applyws
    from random import seed, randint
    from utilities import get_params2D, set_params2D, model_circle, inverse_transform2, combine_params2
    from fundamentals import rot_shift2D
    from mpi import MPI_COMM_WORLD, mpi_barrier, mpi_bcast, MPI_INT
    from statistics import fsc_mask
    from filter import fit_tanh
    from numpy import array

    fout = "%s.hdf" % ref_free_output
    frc_out = "%s_frc" % ref_free_output
    res_out = "%s_res" % ref_free_output

    nima = EMUtil.get_image_count(compare_ref_free)
    image_start, image_end = MPI_start_end(nima, nproc, myid)
    ima = EMData()
    ima.read_image(compare_ref_free, image_start)

    last_ring = nx / 2 - 2
    first_ring = 1
    mask = model_circle(last_ring, nx, nx)

    refi = []
    numref = EMUtil.get_image_count(outfile_repro)
    cnx = nx / 2 + 1
    cny = cnx

    mode = "F"
    numr = Numrinit(first_ring, last_ring, rstep, mode)
    wr = ringwe(numr, mode)

    ima.to_zero()
    for j in xrange(numref):
        temp = EMData()
        temp.read_image(outfile_repro, j)
        #  even, odd, numer of even, number of images.  After frc, totav
        refi.append(temp)
    #  for each node read its share of data
    data = EMData.read_images(compare_ref_free, range(image_start, image_end))
    for im in xrange(image_start, image_end):
        data[im - image_start].set_attr('ID', im)
        set_params2D(data[im - image_start], [0, 0, 0, 0, 1])
    ringref = []
    for j in xrange(numref):
        refi[j].process_inplace("normalize.mask", {
            "mask": mask,
            "no_sigma": 1
        })  # normalize reference images to N(0,1)
        cimage = Util.Polar2Dm(refi[j], cnx, cny, numr, mode)
        Util.Frngs(cimage, numr)
        Applyws(cimage, numr, wr)
        ringref.append(cimage)

    if myid == main_node: seed(1000)
    data_shift = []
    frc = []
    res = []
    for im in xrange(image_start, image_end):
        alpha, sx, sy, mirror, scale = get_params2D(data[im - image_start])
        alphai, sxi, syi, scalei = inverse_transform2(alpha, sx, sy, 1.0)
        # normalize
        data[im - image_start].process_inplace("normalize.mask", {
            "mask": mask,
            "no_sigma": 1
        })  # subtract average under the mask
        # align current image to the reference
        [angt, sxst, syst, mirrort, xiref,
         peakt] = Util.multiref_polar_ali_2d(data[im - image_start], ringref,
                                             xrng, yrng, 1, mode, numr,
                                             cnx + sxi, cny + syi)
        iref = int(xiref)
        [alphan, sxn, syn, mn] = combine_params2(0.0, -sxi, -syi, 0, angt,
                                                 sxst, syst, (int)(mirrort))
        set_params2D(data[im - image_start],
                     [alphan, sxn, syn, int(mn), scale])
        temp = rot_shift2D(data[im - image_start], alphan, sxn, syn, mn)
        temp.set_attr('assign', iref)
        tfrc = fsc_mask(temp, refi[iref], mask=mask)
        temp.set_attr('frc', tfrc[1])
        res = fit_tanh(tfrc)
        temp.set_attr('res', res)
        data_shift.append(temp)

    for node in xrange(nproc):
        if myid == node:
            for image in data_shift:
                image.write_image(fout, -1)
                refindex = image.get_attr('assign')
                refi[refindex].write_image(fout, -1)
        mpi_barrier(MPI_COMM_WORLD)
    rejects = []
    if myid == main_node:
        a = EMData()
        index = 0
        frc = []
        res = []
        temp = []
        classes = []
        for im in xrange(nima):
            a.read_image(fout, index)
            frc.append(a.get_attr("frc"))
            if ref_free_cutoff != -1:
                classes.append(a.get_attr("class_ptcl_idxs"))
            tmp = a.get_attr("res")
            temp.append(tmp[0])
            res.append("%12f" % (apix / tmp[0]))
            res.append("\n")
            index = index + 2
        res_num = array(temp)
        mean_score = res_num.mean(axis=0)
        std_score = res_num.std(axis=0)
        std = std_score / 2
        if ref_free_cutoff != -1:
            cutoff = mean_score - std * ref_free_cutoff
            reject = res_num < cutoff
            index = 0
            for i in reject:
                if i: rejects.extend(classes[index])
                index = index + 1
            rejects.sort()
            length = mpi_bcast(len(rejects), 1, MPI_INT, main_node,
                               MPI_COMM_WORLD)
            rejects = mpi_bcast(rejects, length, MPI_INT, main_node,
                                MPI_COMM_WORLD)
        del a
        fout_frc = open(frc_out, 'w')
        fout_res = open(res_out, 'w')
        fout_res.write("".join(res))
        temp = zip(*frc)
        datstrings = []
        for i in temp:
            for j in i:
                datstrings.append("  %12f" % (j))
            datstrings.append("\n")
        fout_frc.write("".join(datstrings))
        fout_frc.close()

    del refi
    del ringref
    return rejects
예제 #33
0
def run3Dalignment(paramsdict, partids, partstack, outputdir, procid, myid, main_node, nproc):
	#  Reads from paramsdict["stack"] particles partids set parameters in partstack
	#    and do refinement as specified in paramsdict
	#
	#  Will create outputdir
	#  Will write to outputdir output parameters: params-chunk0.txt and params-chunk1.txt
	if(myid == main_node):
		#  Create output directory
		log = Logger(BaseLogger_Files())
		log.prefix = os.path.join(outputdir)
		#cmd = "mkdir "+log.prefix
		#cmdexecute(cmd)
		log.prefix += "/"
	else:  log = None
	mpi_barrier(MPI_COMM_WORLD)

	ali3d_options.delta  = paramsdict["delta"]
	ali3d_options.ts     = paramsdict["ts"]
	ali3d_options.xr     = paramsdict["xr"]
	#  low pass filter is applied to shrank data, so it has to be adjusted
	ali3d_options.fl     = paramsdict["lowpass"]/paramsdict["shrink"]
	ali3d_options.initfl = paramsdict["initialfl"]/paramsdict["shrink"]
	ali3d_options.aa     = paramsdict["falloff"]
	ali3d_options.maxit  = paramsdict["maxit"]
	ali3d_options.mask3D = paramsdict["mask3D"]
	ali3d_options.an	 = paramsdict["an"]
	ali3d_options.ou     = paramsdict["radius"]  #  This is changed in ali3d_base, but the shrank value is needed in vol recons, fixt it!
	shrinkage            = paramsdict["shrink"]

	projdata = getindexdata(paramsdict["stack"], partids, partstack, myid, nproc)
	onx = projdata[0].get_xsize()
	last_ring = ali3d_options.ou
	if last_ring < 0:	last_ring = int(onx/2) - 2
	mask2D  = model_circle(last_ring,onx,onx) - model_circle(ali3d_options.ir,onx,onx)
	if(shrinkage < 1.0):
		# get the new size
		masks2D = resample(mask2D, shrinkage)
		nx = masks2D.get_xsize()
		masks2D  = model_circle(int(last_ring*shrinkage+0.5),nx,nx) - model_circle(max(int(ali3d_options.ir*shrinkage+0.5),1),nx,nx)
	nima = len(projdata)
	oldshifts = [0.0,0.0]*nima
	for im in xrange(nima):
		#data[im].set_attr('ID', list_of_particles[im])
		ctf_applied = projdata[im].get_attr_default('ctf_applied', 0)
		phi,theta,psi,sx,sy = get_params_proj(projdata[im])
		projdata[im] = fshift(projdata[im], sx, sy)
		set_params_proj(projdata[im],[phi,theta,psi,0.0,0.0])
		#  For local SHC set anchor
		#if(nsoft == 1 and an[0] > -1):
		#	set_params_proj(data[im],[phi,tetha,psi,0.0,0.0], "xform.anchor")
		oldshifts[im] = [sx,sy]
		if ali3d_options.CTF :
			ctf_params = projdata[im].get_attr("ctf")
			if ctf_applied == 0:
				st = Util.infomask(projdata[im], mask2D, False)
				projdata[im] -= st[0]
				projdata[im] = filt_ctf(projdata[im], ctf_params)
				projdata[im].set_attr('ctf_applied', 1)
		if(shrinkage < 1.0):
			#phi,theta,psi,sx,sy = get_params_proj(projdata[im])
			projdata[im] = resample(projdata[im], shrinkage)
			st = Util.infomask(projdata[im], None, True)
			projdata[im] -= st[0]
			st = Util.infomask(projdata[im], masks2D, True)
			projdata[im] /= st[1]
			#sx *= shrinkage
			#sy *= shrinkage
			#set_params_proj(projdata[im], [phi,theta,psi,sx,sy])
			if ali3d_options.CTF :
				ctf_params.apix /= shrinkage
				projdata[im].set_attr('ctf', ctf_params)
		else:
			st = Util.infomask(projdata[im], None, True)
			projdata[im] -= st[0]
			st = Util.infomask(projdata[im], mask2D, True)
			projdata[im] /= st[1]
	del mask2D
	if(shrinkage < 1.0): del masks2D

	"""
	if(paramsdict["delpreviousmax"]):
		for i in xrange(len(projdata)):
			try:  projdata[i].del_attr("previousmax")
			except:  pass
	"""
	if(myid == main_node):
		print_dict(paramsdict,"3D alignment parameters")
		print("                    =>  actual lowpass      :  "******"                    =>  actual init lowpass :  "******"                    =>  PW adjustment       :  ",ali3d_options.pwreference)
		print("                    =>  partids             :  ",partids)
		print("                    =>  partstack           :  ",partstack)
		
	if(ali3d_options.fl > 0.46):  ERROR("Low pass filter in 3D alignment > 0.46 on the scale of shrank data","sxcenter_projections",1,myid) 

	#  Run alignment command, it returns params per CPU
	params = center_projections_3D(projdata, paramsdict["refvol"], \
									ali3d_options, onx, shrinkage, \
									mpi_comm = MPI_COMM_WORLD,  myid = myid, main_node = main_node, log = log )
	del log, projdata

	params = wrap_mpi_gatherv(params, main_node, MPI_COMM_WORLD)

	#  store params
	if(myid == main_node):
		for im in xrange(nima):
			params[im][0] = params[im][0]/shrinkage +oldshifts[im][0]
			params[im][1] = params[im][1]/shrinkage +oldshifts[im][1]
		line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " =>"
		print(line,"Executed successfully: ","3D alignment","  number of images:%7d"%len(params))
		write_text_row(params, os.path.join(outputdir,"params.txt") )
예제 #34
0
def main():
    """
	Main function.

	Arguments:
	None

	Returns:
	None
	"""

    command_args = parse_command_line()

    # Import volume
    print('Import volume.')
    input_vol = utilities.get_im(command_args.input_volume)

    # Sanity checks
    sanity_checks(command_args, input_vol)

    try:
        os.makedirs(command_args.output_dir)
    except OSError:
        print('Output directory already exists. No need to create it.')
    else:
        print('Created output directory.')
    output_prefix = os.path.join(command_args.output_dir, command_args.prefix)

    # Filter volume if specified
    if command_args.low_pass_filter_resolution is not None:
        print('Filter volume to {0}A.'.format(
            command_args.low_pass_filter_resolution))
        input_vol = sparx_filter.filt_tanl(
            input_vol,
            command_args.pixel_size / command_args.low_pass_filter_resolution,
            command_args.low_pass_filter_falloff)
        input_vol.write_image(output_prefix + '_filtered_volume.hdf')
    else:
        print('Skip filter volume.')

    # Create a mask based on the filtered volume
    print('Create mask')
    density_threshold = -9999.0
    nsigma = 1.0
    if command_args.mol_mass:
        density_threshold = input_vol.find_3d_threshold(
            command_args.mol_mass, command_args.pixel_size)
    elif command_args.threshold:
        density_threshold = command_args.threshold
    elif command_args.nsigma:
        nsigma = command_args.nsigma
    else:
        assert False

    if command_args.edge_type == 'cosine':
        mode = 'C'
    elif command_args.edge_type == 'gaussian':
        mode = 'G'
    else:
        assert False

    mask_first = morphology.adaptive_mask_scipy(
        input_vol,
        nsigma=nsigma,
        threshold=density_threshold,
        ndilation=command_args.ndilation,
        nerosion=command_args.nerosion,
        edge_width=command_args.edge_width,
        allow_disconnected=command_args.allow_disconnected,
        mode=mode,
        do_approx=command_args.do_old,
    )

    # Create a second mask based on the filtered volume
    s_mask = None
    s_density_threshold = 1
    s_nsigma = 1.0
    if command_args.second_mask is not None:
        s_mask = utilities.get_im(command_args.second_mask)
        density_threshold = -9999.0
        nsigma = 1.0
        if command_args.s_mol_mass:
            s_density_threshold = input_vol.find_3d_threshold(
                command_args.s_mol_mass, command_args.s_pixel_size)
        elif command_args.s_threshold:
            s_density_threshold = command_args.s_threshold
        elif command_args.s_nsigma:
            s_nsigma = command_args.s_nsigma
        else:
            assert False
    elif command_args.second_mask_shape is not None:
        nx = mask_first.get_xsize()
        ny = mask_first.get_ysize()
        nz = mask_first.get_zsize()
        if command_args.second_mask_shape == 'cube':
            s_nx = command_args.s_nx
            s_ny = command_args.s_ny
            s_nz = command_args.s_nz
            s_mask = utilities.model_blank(s_nx, s_ny, s_nz, 1)
        elif command_args.second_mask_shape == 'cylinder':
            s_radius = command_args.s_radius
            s_nx = command_args.s_nx
            s_ny = command_args.s_ny
            s_nz = command_args.s_nz
            s_mask = utilities.model_cylinder(s_radius, s_nx, s_ny, s_nz)
        elif command_args.second_mask_shape == 'sphere':
            s_radius = command_args.s_radius
            s_nx = command_args.s_nx
            s_ny = command_args.s_ny
            s_nz = command_args.s_nz
            s_mask = utilities.model_circle(s_radius, s_nx, s_ny, s_nz)
        else:
            assert False
        s_mask = utilities.pad(s_mask, nx, ny, nz, 0)

    if s_mask is not None:
        print('Create second mask')

        if command_args.s_edge_type == 'cosine':
            mode = 'C'
        elif command_args.s_edge_type == 'gaussian':
            mode = 'G'
        else:
            assert False

        s_mask = morphology.adaptive_mask_scipy(
            s_mask,
            nsigma=s_nsigma,
            threshold=s_density_threshold,
            ndilation=command_args.s_ndilation,
            nerosion=command_args.s_nerosion,
            edge_width=command_args.s_edge_width,
            allow_disconnected=command_args.s_allow_disconnected,
            mode=mode,
            do_approx=command_args.s_do_old)
        if command_args.s_invert:
            s_mask = 1 - s_mask
        mask_first.write_image(output_prefix + '_mask_first.hdf')
        s_mask.write_image(output_prefix + '_mask_second.hdf')
        masked_combined = mask_first * s_mask
        masked_combined.write_image(output_prefix + '_mask.hdf')
    else:
        mask_first.write_image(output_prefix + '_mask.hdf')
예제 #35
0
def do_volume_mrk03(ref_data):
	"""
		data - projections (scattered between cpus) or the volume.  If volume, just do the volume processing
		options - the same for all cpus
		return - volume the same for all cpus
	"""
	from EMAN2          import Util
	from mpi            import mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD
	from filter         import filt_table
	from reconstruction import recons3d_4nn_MPI, recons3d_4nnw_MPI  #  recons3d_4nn_ctf_MPI
	from utilities      import bcast_EMData_to_all, bcast_number_to_all, model_blank
	from fundamentals import rops_table, fftip, fft
	import types

	# Retrieve the function specific input arguments from ref_data
	data     = ref_data[0]
	Tracker  = ref_data[1]
	iter     = ref_data[2]
	mpi_comm = ref_data[3]
	
	# # For DEBUG
	# print "Type of data %s" % (type(data))
	# print "Type of Tracker %s" % (type(Tracker))
	# print "Type of iter %s" % (type(iter))
	# print "Type of mpi_comm %s" % (type(mpi_comm))
	
	if(mpi_comm == None):  mpi_comm = MPI_COMM_WORLD
	myid  = mpi_comm_rank(mpi_comm)
	nproc = mpi_comm_size(mpi_comm)
	
	try:     local_filter = Tracker["local_filter"]
	except:  local_filter = False
	#=========================================================================
	# volume reconstruction
	if( type(data) == types.ListType ):
		if Tracker["constants"]["CTF"]:
			#vol = recons3d_4nn_ctf_MPI(myid, data, Tracker["constants"]["snr"], \
			#		symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm, smearstep = Tracker["smearstep"])
			vol = recons3d_4nnw_MPI(myid, data, Tracker["bckgnoise"], Tracker["constants"]["snr"], \
				symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm, smearstep = Tracker["smearstep"])
		else:
			vol = recons3d_4nn_MPI    (myid, data,\
					symmetry=Tracker["constants"]["sym"], npad=Tracker["constants"]["npad"], mpi_comm=mpi_comm)
	else:
		vol = data

	if myid == 0:
		from morphology import threshold
		from filter     import filt_tanl, filt_btwl
		from utilities  import model_circle, get_im
		import types
		nx = vol.get_xsize()
		if(Tracker["constants"]["mask3D"] == None):
			mask3D = model_circle(int(Tracker["constants"]["radius"]*float(nx)/float(Tracker["constants"]["nnxo"])+0.5), nx, nx, nx)
		elif(Tracker["constants"]["mask3D"] == "auto"):
			from utilities import adaptive_mask
			mask3D = adaptive_mask(vol)
		else:
			if( type(Tracker["constants"]["mask3D"]) == types.StringType ):  mask3D = get_im(Tracker["constants"]["mask3D"])
			else:  mask3D = (Tracker["constants"]["mask3D"]).copy()
			nxm = mask3D.get_xsize()
			if( nx != nxm):
				from fundamentals import rot_shift3D
				mask3D = Util.window(rot_shift3D(mask3D,scale=float(nx)/float(nxm)),nx,nx,nx)
				nxm = mask3D.get_xsize()
				assert(nx == nxm)

		stat = Util.infomask(vol, mask3D, False)
		vol -= stat[0]
		Util.mul_scalar(vol, 1.0/stat[1])
		vol = threshold(vol)
		Util.mul_img(vol, mask3D)
		if not local_filter:
			if( type(Tracker["lowpass"]) == types.ListType ):
				vol = filt_table(vol, Tracker["lowpass"])
			else:
				vol = filt_tanl(vol, Tracker["lowpass"], Tracker["falloff"])

	if local_filter:
		from morphology import binarize
		if(myid == 0): nx = mask3D.get_xsize()
		else:  nx = 0
		nx = bcast_number_to_all(nx, source_node = 0)
		#  only main processor needs the two input volumes
		if(myid == 0):
			mask = binarize(mask3D, 0.5)
			locres = get_im(Tracker["local_filter"])
			lx = locres.get_xsize()
			if(lx != nx):
				if(lx < nx):
					from fundamentals import fdecimate, rot_shift3D
					mask = Util.window(rot_shift3D(mask,scale=float(lx)/float(nx)),lx,lx,lx)
					vol = fdecimate(vol, lx,lx,lx)
				else:  ERROR("local filter cannot be larger than input volume","user function",1)
			stat = Util.infomask(vol, mask, False)
			vol -= stat[0]
			Util.mul_scalar(vol, 1.0/stat[1])
		else:
			lx = 0
			locres = model_blank(1,1,1)
			vol = model_blank(1,1,1)
		lx = bcast_number_to_all(lx, source_node = 0)
		if( myid != 0 ):  mask = model_blank(lx,lx,lx)
		bcast_EMData_to_all(mask, myid, 0, comm=mpi_comm)
		from filter import filterlocal
		vol = filterlocal( locres, vol, mask, Tracker["falloff"], myid, 0, nproc)

		if myid == 0:
			if(lx < nx):
				from fundamentals import fpol
				vol = fpol(vol, nx,nx,nx)
			vol = threshold(vol)
			Util.mul_img(vol, mask3D)
			del mask3D
			# vol.write_image('toto%03d.hdf'%iter)
		else:
			vol = model_blank(nx,nx,nx)
	"""
	else:
		if myid == 0:
			#from utilities import write_text_file
			#write_text_file(rops_table(vol,1),"goo.txt")
			stat = Util.infomask(vol, mask3D, False)
			vol -= stat[0]
			Util.mul_scalar(vol, 1.0/stat[1])
			vol = threshold(vol)
			Util.mul_img(vol, mask3D)
			del mask3D
			# vol.write_image('toto%03d.hdf'%iter)
	"""
	# broadcast volume
	bcast_EMData_to_all(vol, myid, 0, comm=mpi_comm)
	#=========================================================================
	return vol
예제 #36
0
파일: sxrviper.py 프로젝트: raj347/eman2
def calculate_volumes_after_rotation_and_save_them(ali3d_options, rviper_iter, masterdir, bdb_stack_location, mpi_rank, mpi_size,
												   no_of_viper_runs_analyzed_together, no_of_viper_runs_analyzed_together_from_user_options, mpi_comm = -1):
	
	# This function takes into account the case in which there are more processors than images

	if mpi_comm == -1:
		mpi_comm = MPI_COMM_WORLD

	# some arguments are for debugging purposes

	mainoutputdir = masterdir + DIR_DELIM + NAME_OF_MAIN_DIR + ("%03d" + DIR_DELIM) %(rviper_iter)

	# list_of_projection_indices_used_for_outlier_elimination = map(int, read_text_file(mainoutputdir + DIR_DELIM + "list_of_viper_runs_included_in_outlier_elimination.txt"))
	import json; f = open(mainoutputdir + "list_of_viper_runs_included_in_outlier_elimination.json", 'r')
	list_of_independent_viper_run_indices_used_for_outlier_elimination  = json.load(f); f.close()

	if len(list_of_independent_viper_run_indices_used_for_outlier_elimination)==0:
		print "Error: len(list_of_independent_viper_run_indices_used_for_outlier_elimination)==0"
		mpi_finalize()
		sys.exit()

	# if this data analysis step was already performed in the past then return
	# for future changes make sure that the file checked is the last one to be processed !!!
	
	# if(os.path.exists(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(no_of_viper_runs_analyzed_together - 1) + DIR_DELIM + "rotated_volume.hdf")):
	# check_last_run = max(get_latest_directory_increment_value(mainoutputdir, NAME_OF_RUN_DIR, start_value=0), no_of_viper_runs_analyzed_together_from_user_options)
	# if(os.path.exists(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(check_last_run) + DIR_DELIM + "rotated_volume.hdf")):
	# 	return

	# if this data analysis step was already performed in the past then return
	for check_run in list_of_independent_viper_run_indices_used_for_outlier_elimination:
		if not (os.path.exists(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(check_run) + DIR_DELIM + "rotated_volume.hdf")):
			break
	else:
		return

	partstack = []
	# for i1 in range(0,no_of_viper_runs_analyzed_together):
	for i1 in list_of_independent_viper_run_indices_used_for_outlier_elimination:
		partstack.append(mainoutputdir + NAME_OF_RUN_DIR + "%03d"%(i1) + DIR_DELIM + "rotated_reduced_params.txt")
	partids_file_name = mainoutputdir + "this_iteration_index_keep_images.txt"

	lpartids = map(int, read_text_file(partids_file_name) )
	n_projs = len(lpartids)


	if (mpi_size > n_projs):
		# if there are more processors than images
		working = int(not(mpi_rank < n_projs))
		mpi_subcomm = mpi_comm_split(mpi_comm, working,  mpi_rank - working*n_projs)
		mpi_subsize = mpi_comm_size(mpi_subcomm)
		mpi_subrank = mpi_comm_rank(mpi_subcomm)
		if (mpi_rank < n_projs):

			# for i in xrange(no_of_viper_runs_analyzed_together):
			for idx, i in enumerate(list_of_independent_viper_run_indices_used_for_outlier_elimination):
				projdata = getindexdata(bdb_stack_location + "_%03d"%(rviper_iter - 1), partids_file_name, partstack[idx], mpi_rank, mpi_subsize)
				vol = do_volume(projdata, ali3d_options, 0, mpi_comm = mpi_subcomm)
				del projdata
				if( mpi_rank == 0):
					vol.write_image(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(i) + DIR_DELIM + "rotated_volume.hdf")
					line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " => "
					print line  + "Generated rec_ref_volume_run #%01d \n"%i
				del vol

		mpi_barrier(mpi_comm)
	else:
		for idx, i in enumerate(list_of_independent_viper_run_indices_used_for_outlier_elimination):
			projdata = getindexdata(bdb_stack_location + "_%03d"%(rviper_iter - 1), partids_file_name, partstack[idx], mpi_rank, mpi_size)
			vol = do_volume(projdata, ali3d_options, 0, mpi_comm = mpi_comm)
			del projdata
			if( mpi_rank == 0):
				vol.write_image(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(i) + DIR_DELIM + "rotated_volume.hdf")
				line = strftime("%Y-%m-%d_%H:%M:%S", localtime()) + " => "
				print line + "Generated rec_ref_volume_run #%01d"%i
			del vol

	if( mpi_rank == 0):
		# Align all rotated volumes, calculate their average and save as an overall result
		from utilities import get_params3D, set_params3D, get_im, model_circle
		from statistics import ave_var
		from applications import ali_vol
		# vls = [None]*no_of_viper_runs_analyzed_together
		vls = [None]*len(list_of_independent_viper_run_indices_used_for_outlier_elimination)
		# for i in xrange(no_of_viper_runs_analyzed_together):
		for idx, i in enumerate(list_of_independent_viper_run_indices_used_for_outlier_elimination):
			vls[idx] = get_im(mainoutputdir + DIR_DELIM + NAME_OF_RUN_DIR + "%03d"%(i) + DIR_DELIM + "rotated_volume.hdf")
			set_params3D(vls[idx],[0.,0.,0.,0.,0.,0.,0,1.0])
		asa,sas = ave_var(vls)
		# do the alignment
		nx = asa.get_xsize()
		radius = nx/2 - .5
		st = Util.infomask(asa*asa, model_circle(radius,nx,nx,nx), True)
		goal = st[0]
		going = True
		while(going):
			set_params3D(asa,[0.,0.,0.,0.,0.,0.,0,1.0])
			# for i in xrange(no_of_viper_runs_analyzed_together):
			for idx, i in enumerate(list_of_independent_viper_run_indices_used_for_outlier_elimination):
				o = ali_vol(vls[idx],asa,7.0,5.,radius)  # range of angles and shifts, maybe should be adjusted
				p = get_params3D(o)
				del o
				set_params3D(vls[idx],p)
			asa,sas = ave_var(vls)
			st = Util.infomask(asa*asa, model_circle(radius,nx,nx,nx), True)
			if(st[0] > goal):  goal = st[0]
			else:  going = False
		# over and out
		asa.write_image(mainoutputdir + DIR_DELIM + "average_volume.hdf")
		sas.write_image(mainoutputdir + DIR_DELIM + "variance_volume.hdf")
	return
예제 #37
0
def ali3d_MPI(stack, ref_vol, outdir, maskfile = None, ir = 1, ou = -1, rs = 1, 
	    xr = "4 2 2 1", yr = "-1", ts = "1 1 0.5 0.25", delta = "10 6 4 4", an = "-1",
	    center = 0, maxit = 5, term = 95, CTF = False, fourvar = False, snr = 1.0,  ref_a = "S", sym = "c1", 
	    sort=True, cutoff=999.99, pix_cutoff="0", two_tail=False, model_jump="1 1 1 1 1", restart=False, save_half=False,
	    protos=None, oplane=None, lmask=-1, ilmask=-1, findseam=False, vertstep=None, hpars="-1", hsearch="73.0 170.0",
	    full_output = False, compare_repro = False, compare_ref_free = "-1", ref_free_cutoff= "-1 -1 -1 -1",
	    wcmask = None, debug = False, recon_pad = 4):

	from alignment      import Numrinit, prepare_refrings
	from utilities      import model_circle, get_image, drop_image, get_input_from_string
	from utilities      import bcast_list_to_all, bcast_number_to_all, reduce_EMData_to_root, bcast_EMData_to_all 
	from utilities      import send_attr_dict
	from utilities      import get_params_proj, file_type
	from fundamentals   import rot_avg_image
	import os
	import types
	from utilities      import print_begin_msg, print_end_msg, print_msg
	from mpi	    import mpi_bcast, mpi_comm_size, mpi_comm_rank, MPI_FLOAT, MPI_COMM_WORLD, mpi_barrier, mpi_reduce
	from mpi	    import mpi_reduce, MPI_INT, MPI_SUM, mpi_finalize
	from filter	 import filt_ctf
	from projection     import prep_vol, prgs
	from statistics     import hist_list, varf3d_MPI, fsc_mask
	from numpy	  import array, bincount, array2string, ones

	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	myid	   = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0
	if myid == main_node:
		if os.path.exists(outdir):  ERROR('Output directory exists, please change the name and restart the program', "ali3d_MPI", 1)
		os.mkdir(outdir)
	mpi_barrier(MPI_COMM_WORLD)

	if debug:
		from time import sleep
		while not os.path.exists(outdir):
			print  "Node ",myid,"  waiting..."
			sleep(5)

		info_file = os.path.join(outdir, "progress%04d"%myid)
		finfo = open(info_file, 'w')
	else:
		finfo = None
	mjump = get_input_from_string(model_jump)
	xrng	= get_input_from_string(xr)
	if  yr == "-1":  yrng = xrng
	else	  :  yrng = get_input_from_string(yr)
	step	= get_input_from_string(ts)
	delta       = get_input_from_string(delta)
	ref_free_cutoff = get_input_from_string(ref_free_cutoff)	
	pix_cutoff = get_input_from_string(pix_cutoff)
	
	lstp = min(len(xrng), len(yrng), len(step), len(delta))
	if an == "-1":
		an = [-1] * lstp
	else:
		an = get_input_from_string(an)
	# make sure pix_cutoff is set for all iterations
	if len(pix_cutoff)<lstp:
		for i in xrange(len(pix_cutoff),lstp):
			pix_cutoff.append(pix_cutoff[-1])
	# don't waste time on sub-pixel alignment for low-resolution ang incr
	for i in range(len(step)):
		if (delta[i] > 4 or delta[i] == -1) and step[i] < 1:
			step[i] = 1

	first_ring  = int(ir)
	rstep       = int(rs)
	last_ring   = int(ou)
	max_iter    = int(maxit)
	center      = int(center)

	nrefs   = EMUtil.get_image_count( ref_vol )
	nmasks = 0
	if maskfile:
		# read number of masks within each maskfile (mc)
		nmasks   = EMUtil.get_image_count( maskfile )
		# open masks within maskfile (mc)
		maskF   = EMData.read_images(maskfile, xrange(nmasks))
	vol     = EMData.read_images(ref_vol, xrange(nrefs))
	nx      = vol[0].get_xsize()

	## make sure box sizes are the same
	if myid == main_node:
		im=EMData.read_images(stack,[0])
		bx = im[0].get_xsize()
		if bx!=nx:
			print_msg("Error: Stack box size (%i) differs from initial model (%i)\n"%(bx,nx))
			sys.exit()
		del im,bx
	
	# for helical processing:
	helicalrecon = False
	if protos is not None or hpars != "-1" or findseam is True:
		helicalrecon = True
		# if no out-of-plane param set, use 5 degrees
		if oplane is None:
			oplane=5.0
	if protos is not None:
		proto = get_input_from_string(protos)
		if len(proto) != nrefs:
			print_msg("Error: insufficient protofilament numbers supplied")
			sys.exit()
	if hpars != "-1":
		hpars = get_input_from_string(hpars)
		if len(hpars) != 2*nrefs:
			print_msg("Error: insufficient helical parameters supplied")
			sys.exit()
	## create helical parameter file for helical reconstruction
	if helicalrecon is True and myid == main_node:
		from hfunctions import createHpar
		# create initial helical parameter files
		dp=[0]*nrefs
		dphi=[0]*nrefs
		vdp=[0]*nrefs
		vdphi=[0]*nrefs
		for iref in xrange(nrefs):
			hpar = os.path.join(outdir,"hpar%02d.spi"%(iref))
			params = False
			if hpars != "-1":
				# if helical parameters explicitly given, set twist & rise
				params = [float(hpars[iref*2]),float(hpars[(iref*2)+1])]
			dp[iref],dphi[iref],vdp[iref],vdphi[iref] = createHpar(hpar,proto[iref],params,vertstep)

	# get values for helical search parameters
	hsearch = get_input_from_string(hsearch)
	if len(hsearch) != 2:
		print_msg("Error: specify outer and inner radii for helical search")
		sys.exit()

	if last_ring < 0 or last_ring > int(nx/2)-2 :	last_ring = int(nx/2) - 2

	if myid == main_node:
	#	import user_functions
	#	user_func = user_functions.factory[user_func_name]

		print_begin_msg("ali3d_MPI")
		print_msg("Input stack		 : %s\n"%(stack))
		print_msg("Reference volume	    : %s\n"%(ref_vol))	
		print_msg("Output directory	    : %s\n"%(outdir))
		if nmasks > 0:
			print_msg("Maskfile (number of masks)  : %s (%i)\n"%(maskfile,nmasks))
		print_msg("Inner radius		: %i\n"%(first_ring))
		print_msg("Outer radius		: %i\n"%(last_ring))
		print_msg("Ring step		   : %i\n"%(rstep))
		print_msg("X search range	      : %s\n"%(xrng))
		print_msg("Y search range	      : %s\n"%(yrng))
		print_msg("Translational step	  : %s\n"%(step))
		print_msg("Angular step		: %s\n"%(delta))
		print_msg("Angular search range	: %s\n"%(an))
		print_msg("Maximum iteration	   : %i\n"%(max_iter))
		print_msg("Center type		 : %i\n"%(center))
		print_msg("CTF correction	      : %s\n"%(CTF))
		print_msg("Signal-to-Noise Ratio       : %f\n"%(snr))
		print_msg("Reference projection method : %s\n"%(ref_a))
		print_msg("Symmetry group	      : %s\n"%(sym))
		print_msg("Fourier padding for 3D      : %i\n"%(recon_pad))
		print_msg("Number of reference models  : %i\n"%(nrefs))
		print_msg("Sort images between models  : %s\n"%(sort))
		print_msg("Allow images to jump	: %s\n"%(mjump))
		print_msg("CC cutoff standard dev      : %f\n"%(cutoff))
		print_msg("Two tail cutoff	     : %s\n"%(two_tail))
		print_msg("Termination pix error       : %f\n"%(term))
		print_msg("Pixel error cutoff	  : %s\n"%(pix_cutoff))
		print_msg("Restart		     : %s\n"%(restart))
		print_msg("Full output		 : %s\n"%(full_output))
		print_msg("Compare reprojections       : %s\n"%(compare_repro))
		print_msg("Compare ref free class avgs : %s\n"%(compare_ref_free))
		print_msg("Use cutoff from ref free    : %s\n"%(ref_free_cutoff))
		if protos:
			print_msg("Protofilament numbers	: %s\n"%(proto))
			print_msg("Using helical search range   : %s\n"%hsearch) 
		if findseam is True:
			print_msg("Using seam-based reconstruction\n")
		if hpars != "-1":
			print_msg("Using hpars		  : %s\n"%hpars)
		if vertstep != None:
			print_msg("Using vertical step    : %.2f\n"%vertstep)
		if save_half is True:
			print_msg("Saving even/odd halves\n")
		for i in xrange(100) : print_msg("*")
		print_msg("\n\n")
	if maskfile:
		if type(maskfile) is types.StringType: mask3D = get_image(maskfile)
		else:				  mask3D = maskfile
	else: mask3D = model_circle(last_ring, nx, nx, nx)

	numr	= Numrinit(first_ring, last_ring, rstep, "F")
	mask2D  = model_circle(last_ring,nx,nx) - model_circle(first_ring,nx,nx)

	fscmask = model_circle(last_ring,nx,nx,nx)
	if CTF:
		from filter	 import filt_ctf
	from reconstruction_rjh import rec3D_MPI_noCTF

	if myid == main_node:
		active = EMUtil.get_all_attributes(stack, 'active')
		list_of_particles = []
		for im in xrange(len(active)):
			if active[im]:  list_of_particles.append(im)
		del active
		nima = len(list_of_particles)
	else:
		nima = 0
	total_nima = bcast_number_to_all(nima, source_node = main_node)

	if myid != main_node:
		list_of_particles = [-1]*total_nima
	list_of_particles = bcast_list_to_all(list_of_particles, source_node = main_node)

	image_start, image_end = MPI_start_end(total_nima, number_of_proc, myid)

	# create a list of images for each node
	list_of_particles = list_of_particles[image_start: image_end]
	nima = len(list_of_particles)
	if debug:
		finfo.write("image_start, image_end: %d %d\n" %(image_start, image_end))
		finfo.flush()

	data = EMData.read_images(stack, list_of_particles)

	t_zero = Transform({"type":"spider","phi":0,"theta":0,"psi":0,"tx":0,"ty":0})
	transmulti = [[t_zero for i in xrange(nrefs)] for j in xrange(nima)]

	for iref,im in ((iref,im) for iref in xrange(nrefs) for im in xrange(nima)):
		if nrefs == 1:
			transmulti[im][iref] = data[im].get_attr("xform.projection")
		else:
			# if multi models, keep track of eulers for all models
			try:
				transmulti[im][iref] = data[im].get_attr("eulers_txty.%i"%iref)
			except:
				data[im].set_attr("eulers_txty.%i"%iref,t_zero)

	scoremulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)] 
	pixelmulti = [[0.0 for i in xrange(nrefs)] for j in xrange(nima)] 
	ref_res = [0.0 for x in xrange(nrefs)] 
	apix = data[0].get_attr('apix_x')

	# for oplane parameter, create cylindrical mask
	if oplane is not None and myid == main_node:
		from hfunctions import createCylMask
		cmaskf=os.path.join(outdir, "mask3D_cyl.mrc")
		mask3D = createCylMask(data,ou,lmask,ilmask,cmaskf)
		# if finding seam of helix, create wedge masks
		if findseam is True:
			wedgemask=[]
			for pf in xrange(nrefs):
				wedgemask.append(EMData())
			# wedgemask option
			if wcmask is not None:
				wcmask = get_input_from_string(wcmask)
				if len(wcmask) != 3:
					print_msg("Error: wcmask option requires 3 values: x y radius")
					sys.exit()

	# determine if particles have helix info:
	try:
		data[0].get_attr('h_angle')
		original_data = []
		boxmask = True
		from hfunctions import createBoxMask
	except:
		boxmask = False

	# prepare particles
	for im in xrange(nima):
		data[im].set_attr('ID', list_of_particles[im])
		data[im].set_attr('pix_score', int(0))
		if CTF:
			# only phaseflip particles, not full CTF correction
			ctf_params = data[im].get_attr("ctf")
			st = Util.infomask(data[im], mask2D, False)
			data[im] -= st[0]
			data[im] = filt_ctf(data[im], ctf_params, sign = -1, binary=1)
			data[im].set_attr('ctf_applied', 1)
		# for window mask:
		if boxmask is True:
			h_angle = data[im].get_attr("h_angle")
			original_data.append(data[im].copy())
			bmask = createBoxMask(nx,apix,ou,lmask,h_angle)
			data[im]*=bmask
			del bmask
	if debug:
		finfo.write( '%d loaded  \n' % nima )
		finfo.flush()
	if myid == main_node:
		# initialize data for the reference preparation function
		ref_data = [ mask3D, max(center,0), None, None, None, None ]
		# for method -1, switch off centering in user function

	from time import time	

	#  this is needed for gathering of pixel errors
	disps = []
	recvcount = []
	disps_score = []
	recvcount_score = []
	for im in xrange(number_of_proc):
		if( im == main_node ):  
			disps.append(0)
			disps_score.append(0)
		else:		  
			disps.append(disps[im-1] + recvcount[im-1])
			disps_score.append(disps_score[im-1] + recvcount_score[im-1])
		ib, ie = MPI_start_end(total_nima, number_of_proc, im)
		recvcount.append( ie - ib )
		recvcount_score.append((ie-ib)*nrefs)

	pixer = [0.0]*nima
	cs = [0.0]*3
	total_iter = 0
	volodd = EMData.read_images(ref_vol, xrange(nrefs))
	voleve = EMData.read_images(ref_vol, xrange(nrefs))

	if restart:
		# recreate initial volumes from alignments stored in header
		itout = "000_00"
		for iref in xrange(nrefs):
			if(nrefs == 1):
				modout = ""
			else:
				modout = "_model_%02d"%(iref)	
	
			if(sort): 
				group = iref
				for im in xrange(nima):
					imgroup = data[im].get_attr('group')
					if imgroup == iref:
						data[im].set_attr('xform.projection',transmulti[im][iref])
			else: 
				group = int(999) 
				for im in xrange(nima):
					data[im].set_attr('xform.projection',transmulti[im][iref])
			
			fscfile = os.path.join(outdir, "fsc_%s%s"%(itout,modout))

			vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(data, sym, fscmask, fscfile, myid, main_node, index = group, npad = recon_pad)

			if myid == main_node:
				if helicalrecon:
					from hfunctions import processHelicalVol

					vstep=None
					if vertstep is not None:
						vstep=(vdp[iref],vdphi[iref])
					print_msg("Old rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
					hvals=processHelicalVol(vol[iref],voleve[iref],volodd[iref],iref,outdir,itout,
								dp[iref],dphi[iref],apix,hsearch,findseam,vstep,wcmask)
					(vol[iref],voleve[iref],volodd[iref],dp[iref],dphi[iref],vdp[iref],vdphi[iref])=hvals
					print_msg("New rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
					# get new FSC from symmetrized half volumes
					fscc = fsc_mask( volodd[iref], voleve[iref], mask3D, rstep, fscfile)
				else:
					vol[iref].write_image(os.path.join(outdir, "vol_%s.hdf"%itout),-1)

				if save_half is True:
					volodd[iref].write_image(os.path.join(outdir, "volodd_%s.hdf"%itout),-1)
					voleve[iref].write_image(os.path.join(outdir, "voleve_%s.hdf"%itout),-1)

				if nmasks > 1:
					# Read mask for multiplying
					ref_data[0] = maskF[iref]
				ref_data[2] = vol[iref]
				ref_data[3] = fscc
				#  call user-supplied function to prepare reference image, i.e., center and filter it
				vol[iref], cs,fl = ref_ali3d(ref_data)
				vol[iref].write_image(os.path.join(outdir, "volf_%s.hdf"%(itout)),-1)
				if (apix == 1):
					res_msg = "Models filtered at spatial frequency of:\t"
					res = fl
				else:
					res_msg = "Models filtered at resolution of:       \t"
					res = apix / fl	
				ares = array2string(array(res), precision = 2)
				print_msg("%s%s\n\n"%(res_msg,ares))	
			
			bcast_EMData_to_all(vol[iref], myid, main_node)
			# write out headers, under MPI writing has to be done sequentially
			mpi_barrier(MPI_COMM_WORLD)

	# projection matching	
	for N_step in xrange(lstp):
		terminate = 0
		Iter = -1
 		while(Iter < max_iter-1 and terminate == 0):
			Iter += 1
			total_iter += 1
			itout = "%03g_%02d" %(delta[N_step], Iter)
			if myid == main_node:
				print_msg("ITERATION #%3d, inner iteration #%3d\nDelta = %4.1f, an = %5.2f, xrange = %5.2f, yrange = %5.2f, step = %5.2f\n\n"%(N_step, Iter, delta[N_step], an[N_step], xrng[N_step],yrng[N_step],step[N_step]))
	
			for iref in xrange(nrefs):
				if myid == main_node: start_time = time()
				volft,kb = prep_vol( vol[iref] )

				## constrain projections to out of plane parameter
				theta1 = None
				theta2 = None
				if oplane is not None:
					theta1 = 90-oplane
					theta2 = 90+oplane
				refrings = prepare_refrings( volft, kb, nx, delta[N_step], ref_a, sym, numr, MPI=True, phiEqpsi = "Minus", initial_theta=theta1, delta_theta=theta2)
				
				del volft,kb

				if myid== main_node:
					print_msg( "Time to prepare projections for model %i: %s\n" % (iref, legibleTime(time()-start_time)) )
					start_time = time()
	
				for im in xrange( nima ):
					data[im].set_attr("xform.projection", transmulti[im][iref])
					if an[N_step] == -1:
						t1, peak, pixer[im] = proj_ali_incore(data[im],refrings,numr,xrng[N_step],yrng[N_step],step[N_step],finfo)
					else:
						t1, peak, pixer[im] = proj_ali_incore_local(data[im],refrings,numr,xrng[N_step],yrng[N_step],step[N_step],an[N_step],finfo)
					#data[im].set_attr("xform.projection"%iref, t1)
					if nrefs > 1: data[im].set_attr("eulers_txty.%i"%iref,t1)
					scoremulti[im][iref] = peak
					from pixel_error import max_3D_pixel_error
					# t1 is the current param, t2 is old
					t2 = transmulti[im][iref]
					pixelmulti[im][iref] = max_3D_pixel_error(t1,t2,numr[-3])
					transmulti[im][iref] = t1

				if myid == main_node:
					print_msg("Time of alignment for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
					start_time = time()


			# gather scoring data from all processors
			from mpi import mpi_gatherv
			scoremultisend = sum(scoremulti,[])
			pixelmultisend = sum(pixelmulti,[])
			tmp = mpi_gatherv(scoremultisend,len(scoremultisend),MPI_FLOAT, recvcount_score, disps_score, MPI_FLOAT, main_node,MPI_COMM_WORLD)
			tmp1 = mpi_gatherv(pixelmultisend,len(pixelmultisend),MPI_FLOAT, recvcount_score, disps_score, MPI_FLOAT, main_node,MPI_COMM_WORLD)
			tmp = mpi_bcast(tmp,(total_nima * nrefs), MPI_FLOAT,0, MPI_COMM_WORLD)
			tmp1 = mpi_bcast(tmp1,(total_nima * nrefs), MPI_FLOAT,0, MPI_COMM_WORLD)
			tmp = map(float,tmp)
			tmp1 = map(float,tmp1)
			score = array(tmp).reshape(-1,nrefs)
			pixelerror = array(tmp1).reshape(-1,nrefs) 
			score_local = array(scoremulti)
			mean_score = score.mean(axis=0)
			std_score = score.std(axis=0)
			cut = mean_score - (cutoff * std_score)
			cut2 = mean_score + (cutoff * std_score)
			res_max = score_local.argmax(axis=1)
			minus_cc = [0.0 for x in xrange(nrefs)]
			minus_pix = [0.0 for x in xrange(nrefs)]
			minus_ref = [0.0 for x in xrange(nrefs)]
			
			#output pixel errors
			if(myid == main_node):
				from statistics import hist_list
				lhist = 20
				pixmin = pixelerror.min(axis=1)
				region, histo = hist_list(pixmin, lhist)
				if(region[0] < 0.0):  region[0] = 0.0
				print_msg("Histogram of pixel errors\n      ERROR       number of particles\n")
				for lhx in xrange(lhist):
					print_msg(" %10.3f     %7d\n"%(region[lhx], histo[lhx]))
				# Terminate if 95% within 1 pixel error
				im = 0
				for lhx in xrange(lhist):
					if(region[lhx] > 1.0): break
					im += histo[lhx]
				print_msg( "Percent of particles with pixel error < 1: %f\n\n"% (im/float(total_nima)*100))
				term_cond = float(term)/100
				if(im/float(total_nima) > term_cond): 
					terminate = 1
					print_msg("Terminating internal loop\n")
				del region, histo
			terminate = mpi_bcast(terminate, 1, MPI_INT, 0, MPI_COMM_WORLD)
			terminate = int(terminate[0])	
			
			for im in xrange(nima):
				if(sort==False):
					data[im].set_attr('group',999)
				elif (mjump[N_step]==1):
					data[im].set_attr('group',int(res_max[im]))
				
				pix_run = data[im].get_attr('pix_score')			
				if (pix_cutoff[N_step]==1 and (terminate==1 or Iter == max_iter-1)):
					if (pixelmulti[im][int(res_max[im])] > 1):
						data[im].set_attr('pix_score',int(777))

				if (score_local[im][int(res_max[im])]<cut[int(res_max[im])]) or (two_tail and score_local[im][int(res_max[im])]>cut2[int(res_max[im])]):
					data[im].set_attr('group',int(888))
					minus_cc[int(res_max[im])] = minus_cc[int(res_max[im])] + 1

				if(pix_run == 777):
					data[im].set_attr('group',int(777))
					minus_pix[int(res_max[im])] = minus_pix[int(res_max[im])] + 1

				if (compare_ref_free != "-1") and (ref_free_cutoff[N_step] != -1) and (total_iter > 1):
					id = data[im].get_attr('ID')
					if id in rejects:
						data[im].set_attr('group',int(666))
						minus_ref[int(res_max[im])] = minus_ref[int(res_max[im])] + 1	
						
				
			minus_cc_tot = mpi_reduce(minus_cc,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD)	
			minus_pix_tot = mpi_reduce(minus_pix,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD) 	
			minus_ref_tot = mpi_reduce(minus_ref,nrefs,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD)
			if (myid == main_node):
				if(sort):
					tot_max = score.argmax(axis=1)
					res = bincount(tot_max)
				else:
					res = ones(nrefs) * total_nima
				print_msg("Particle distribution:	     \t\t%s\n"%(res*1.0))
				afcut1 = res - minus_cc_tot
				afcut2 = afcut1 - minus_pix_tot
				afcut3 = afcut2 - minus_ref_tot
				print_msg("Particle distribution after cc cutoff:\t\t%s\n"%(afcut1))
				print_msg("Particle distribution after pix cutoff:\t\t%s\n"%(afcut2)) 
				print_msg("Particle distribution after ref cutoff:\t\t%s\n\n"%(afcut3)) 
					
						
			res = [0.0 for i in xrange(nrefs)]
			for iref in xrange(nrefs):
				if(center == -1):
					from utilities      import estimate_3D_center_MPI, rotate_3D_shift
					dummy=EMData()
					cs[0], cs[1], cs[2], dummy, dummy = estimate_3D_center_MPI(data, total_nima, myid, number_of_proc, main_node)				
					cs = mpi_bcast(cs, 3, MPI_FLOAT, main_node, MPI_COMM_WORLD)
					cs = [-float(cs[0]), -float(cs[1]), -float(cs[2])]
					rotate_3D_shift(data, cs)


				if(sort): 
					group = iref
					for im in xrange(nima):
						imgroup = data[im].get_attr('group')
						if imgroup == iref:
							data[im].set_attr('xform.projection',transmulti[im][iref])
				else: 
					group = int(999) 
					for im in xrange(nima):
						data[im].set_attr('xform.projection',transmulti[im][iref])
				if(nrefs == 1):
					modout = ""
				else:
					modout = "_model_%02d"%(iref)	
				
				fscfile = os.path.join(outdir, "fsc_%s%s"%(itout,modout))
				vol[iref], fscc, volodd[iref], voleve[iref] = rec3D_MPI_noCTF(data, sym, fscmask, fscfile, myid, main_node, index=group, npad=recon_pad)
	
				if myid == main_node:
					print_msg("3D reconstruction time for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
					start_time = time()
	
				# Compute Fourier variance
				if fourvar:
					outvar = os.path.join(outdir, "volVar_%s.hdf"%(itout))
					ssnr_file = os.path.join(outdir, "ssnr_%s"%(itout))
					varf = varf3d_MPI(data, ssnr_text_file=ssnr_file, mask2D=None, reference_structure=vol[iref], ou=last_ring, rw=1.0, npad=1, CTF=None, sign=1, sym=sym, myid=myid)
					if myid == main_node:
						print_msg("Time to calculate 3D Fourier variance for model %i: %s\n"%(iref, legibleTime(time()-start_time)))
						start_time = time()
						varf = 1.0/varf
						varf.write_image(outvar,-1)
				else:  varf = None

				if myid == main_node:
					if helicalrecon:
						from hfunctions import processHelicalVol

						vstep=None
						if vertstep is not None:
							vstep=(vdp[iref],vdphi[iref])
						print_msg("Old rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
						hvals=processHelicalVol(vol[iref],voleve[iref],volodd[iref],iref,outdir,itout,
									dp[iref],dphi[iref],apix,hsearch,findseam,vstep,wcmask)
						(vol[iref],voleve[iref],volodd[iref],dp[iref],dphi[iref],vdp[iref],vdphi[iref])=hvals
						print_msg("New rise and twist for model %i     : %8.3f, %8.3f\n"%(iref,dp[iref],dphi[iref]))
						# get new FSC from symmetrized half volumes
						fscc = fsc_mask( volodd[iref], voleve[iref], mask3D, rstep, fscfile)

						print_msg("Time to search and apply helical symmetry for model %i: %s\n\n"%(iref, legibleTime(time()-start_time)))
						start_time = time()
					else:
						vol[iref].write_image(os.path.join(outdir, "vol_%s.hdf"%(itout)),-1)

					if save_half is True:
						volodd[iref].write_image(os.path.join(outdir, "volodd_%s.hdf"%(itout)),-1)
						voleve[iref].write_image(os.path.join(outdir, "voleve_%s.hdf"%(itout)),-1)

					if nmasks > 1:
						# Read mask for multiplying
						ref_data[0] = maskF[iref]
					ref_data[2] = vol[iref]
					ref_data[3] = fscc
					ref_data[4] = varf
					#  call user-supplied function to prepare reference image, i.e., center and filter it
					vol[iref], cs,fl = ref_ali3d(ref_data)
					vol[iref].write_image(os.path.join(outdir, "volf_%s.hdf"%(itout)),-1)
					if (apix == 1):
						res_msg = "Models filtered at spatial frequency of:\t"
						res[iref] = fl
					else:
						res_msg = "Models filtered at resolution of:       \t"
						res[iref] = apix / fl	
	
				del varf
				bcast_EMData_to_all(vol[iref], myid, main_node)
				
				if compare_ref_free != "-1": compare_repro = True
				if compare_repro:
					outfile_repro = comp_rep(refrings, data, itout, modout, vol[iref], group, nima, nx, myid, main_node, outdir)
					mpi_barrier(MPI_COMM_WORLD)
					if compare_ref_free != "-1":
						ref_free_output = os.path.join(outdir,"ref_free_%s%s"%(itout,modout))
						rejects = compare(compare_ref_free, outfile_repro,ref_free_output,yrng[N_step], xrng[N_step], rstep,nx,apix,ref_free_cutoff[N_step], number_of_proc, myid, main_node)

			# retrieve alignment params from all processors
			par_str = ['xform.projection','ID','group']
			if nrefs > 1:
				for iref in xrange(nrefs):
					par_str.append('eulers_txty.%i'%iref)

			if myid == main_node:
				from utilities import recv_attr_dict
				recv_attr_dict(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
				
			else:	send_attr_dict(main_node, data, par_str, image_start, image_end)

			if myid == main_node:
				ares = array2string(array(res), precision = 2)
				print_msg("%s%s\n\n"%(res_msg,ares))
				dummy = EMData()
				if full_output:
					nimat = EMUtil.get_image_count(stack)
					output_file = os.path.join(outdir, "paramout_%s"%itout)
					foutput = open(output_file, 'w')
					for im in xrange(nimat):
						# save the parameters for each of the models
						outstring = ""
						dummy.read_image(stack,im,True)
						param3d = dummy.get_attr('xform.projection')
						g = dummy.get_attr("group")
						# retrieve alignments in EMAN-format
						pE = param3d.get_params('eman')
						outstring += "%f\t%f\t%f\t%f\t%f\t%i\n" %(pE["az"], pE["alt"], pE["phi"], pE["tx"], pE["ty"],g)
						foutput.write(outstring)
					foutput.close()
				del dummy
			mpi_barrier(MPI_COMM_WORLD)


#	mpi_finalize()	

	if myid == main_node: print_end_msg("ali3d_MPI")
예제 #38
0
def shiftali_MPI(stack, maskfile=None, maxit=100, CTF=False, snr=1.0, Fourvar=False, search_rng=-1, oneDx=False, search_rng_y=-1):  
	from applications import MPI_start_end
	from utilities    import model_circle, model_blank, get_image, peak_search, get_im
	from utilities    import reduce_EMData_to_root, bcast_EMData_to_all, send_attr_dict, file_type, bcast_number_to_all, bcast_list_to_all
	from statistics   import varf2d_MPI
	from fundamentals import fft, ccf, rot_shift3D, rot_shift2D
	from utilities    import get_params2D, set_params2D
	from utilities    import print_msg, print_begin_msg, print_end_msg
	import os
	import sys
	from mpi 	  	  import mpi_init, mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
	from mpi 	  	  import mpi_reduce, mpi_bcast, mpi_barrier, mpi_gatherv
	from mpi 	  	  import MPI_SUM, MPI_FLOAT, MPI_INT
	from EMAN2	  	  import Processor
	from time         import time	
	
	number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
	myid = mpi_comm_rank(MPI_COMM_WORLD)
	main_node = 0
		
	ftp = file_type(stack)

	if myid == main_node:
		print_begin_msg("shiftali_MPI")

	max_iter=int(maxit)

	if myid == main_node:
		if ftp == "bdb":
			from EMAN2db import db_open_dict
			dummy = db_open_dict(stack, True)
		nima = EMUtil.get_image_count(stack)
	else:
		nima = 0
	nima = bcast_number_to_all(nima, source_node = main_node)
	list_of_particles = range(nima)
	
	image_start, image_end = MPI_start_end(nima, number_of_proc, myid)
	list_of_particles = list_of_particles[image_start: image_end]

	# read nx and ctf_app (if CTF) and broadcast to all nodes
	if myid == main_node:
		ima = EMData()
		ima.read_image(stack, list_of_particles[0], True)
		nx = ima.get_xsize()
		ny = ima.get_ysize()
		if CTF:	ctf_app = ima.get_attr_default('ctf_applied', 2)
		del ima
	else:
		nx = 0
		ny = 0
		if CTF:	ctf_app = 0
	nx = bcast_number_to_all(nx, source_node = main_node)
	ny = bcast_number_to_all(ny, source_node = main_node)
	if CTF:
		ctf_app = bcast_number_to_all(ctf_app, source_node = main_node)
		if ctf_app > 0:	ERROR("data cannot be ctf-applied", "shiftali_MPI", 1, myid)

	if maskfile == None:
		mrad = min(nx, ny)
		mask = model_circle(mrad//2-2, nx, ny)
	else:
		mask = get_im(maskfile)

	if CTF:
		from filter import filt_ctf
		from morphology   import ctf_img
		ctf_abs_sum = EMData(nx, ny, 1, False)
		ctf_2_sum = EMData(nx, ny, 1, False)
	else:
		ctf_2_sum = None

	from global_def import CACHE_DISABLE
	if CACHE_DISABLE:
		data = EMData.read_images(stack, list_of_particles)
	else:
		for i in xrange(number_of_proc):
			if myid == i:
				data = EMData.read_images(stack, list_of_particles)
			if ftp == "bdb": mpi_barrier(MPI_COMM_WORLD)


	for im in xrange(len(data)):
		data[im].set_attr('ID', list_of_particles[im])
		st = Util.infomask(data[im], mask, False)
		data[im] -= st[0]
		if CTF:
			ctf_params = data[im].get_attr("ctf")
			ctfimg = ctf_img(nx, ctf_params, ny=ny)
			Util.add_img2(ctf_2_sum, ctfimg)
			Util.add_img_abs(ctf_abs_sum, ctfimg)

	if CTF:
		reduce_EMData_to_root(ctf_2_sum, myid, main_node)
		reduce_EMData_to_root(ctf_abs_sum, myid, main_node)
	else:  ctf_2_sum = None
	if CTF:
		if myid != main_node:
			del ctf_2_sum
			del ctf_abs_sum
		else:
			temp = EMData(nx, ny, 1, False)
			for i in xrange(0,nx,2):
				for j in xrange(ny):
					temp.set_value_at(i,j,snr)
			Util.add_img(ctf_2_sum, temp)
			del temp

	total_iter = 0

	# apply initial xform.align2d parameters stored in header
	init_params = []
	for im in xrange(len(data)):
		t = data[im].get_attr('xform.align2d')
		init_params.append(t)
		p = t.get_params("2d")
		data[im] = rot_shift2D(data[im], p['alpha'], sx=p['tx'], sy=p['ty'], mirror=p['mirror'], scale=p['scale'])

	# fourier transform all images, and apply ctf if CTF
	for im in xrange(len(data)):
		if CTF:
			ctf_params = data[im].get_attr("ctf")
			data[im] = filt_ctf(fft(data[im]), ctf_params)
		else:
			data[im] = fft(data[im])

	sx_sum=0
	sy_sum=0
	sx_sum_total=0
	sy_sum_total=0
	shift_x = [0.0]*len(data)
	shift_y = [0.0]*len(data)
	ishift_x = [0.0]*len(data)
	ishift_y = [0.0]*len(data)

	for Iter in xrange(max_iter):
		if myid == main_node:
			start_time = time()
			print_msg("Iteration #%4d\n"%(total_iter))
		total_iter += 1
		avg = EMData(nx, ny, 1, False)
		for im in data:  Util.add_img(avg, im)

		reduce_EMData_to_root(avg, myid, main_node)

		if myid == main_node:
			if CTF:
				tavg = Util.divn_filter(avg, ctf_2_sum)
			else:	 tavg = Util.mult_scalar(avg, 1.0/float(nima))
		else:
			tavg = EMData(nx, ny, 1, False)                               

		if Fourvar:
			bcast_EMData_to_all(tavg, myid, main_node)
			vav, rvar = varf2d_MPI(myid, data, tavg, mask, "a", CTF)

		if myid == main_node:
			if Fourvar:
				tavg    = fft(Util.divn_img(fft(tavg), vav))
				vav_r	= Util.pack_complex_to_real(vav)

			# normalize and mask tavg in real space
			tavg = fft(tavg)
			stat = Util.infomask( tavg, mask, False ) 
			tavg -= stat[0]
			Util.mul_img(tavg, mask)
			# For testing purposes: shift tavg to some random place and see if the centering is still correct
			#tavg = rot_shift3D(tavg,sx=3,sy=-4)
			tavg = fft(tavg)

		if Fourvar:  del vav
		bcast_EMData_to_all(tavg, myid, main_node)

		sx_sum=0 
		sy_sum=0 
		if search_rng > 0: nwx = 2*search_rng+1
		else:              nwx = nx
		
		if search_rng_y > 0: nwy = 2*search_rng_y+1
		else:                nwy = ny

		not_zero = 0
		for im in xrange(len(data)):
			if oneDx:
				ctx = Util.window(ccf(data[im],tavg),nwx,1)
				p1  = peak_search(ctx)
				p1_x = -int(p1[0][3])
				ishift_x[im] = p1_x
				sx_sum += p1_x
			else:
				p1 = peak_search(Util.window(ccf(data[im],tavg), nwx,nwy))
				p1_x = -int(p1[0][4])
				p1_y = -int(p1[0][5])
				ishift_x[im] = p1_x
				ishift_y[im] = p1_y
				sx_sum += p1_x
				sy_sum += p1_y

			if not_zero == 0:
				if (not(ishift_x[im] == 0.0)) or (not(ishift_y[im] == 0.0)):
					not_zero = 1

		sx_sum = mpi_reduce(sx_sum, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)  

		if not oneDx:
			sy_sum = mpi_reduce(sy_sum, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

		if myid == main_node:
			sx_sum_total = int(sx_sum[0])
			if not oneDx:
				sy_sum_total = int(sy_sum[0])
		else:
			sx_sum_total = 0	
			sy_sum_total = 0

		sx_sum_total = bcast_number_to_all(sx_sum_total, source_node = main_node)

		if not oneDx:
			sy_sum_total = bcast_number_to_all(sy_sum_total, source_node = main_node)

		sx_ave = round(float(sx_sum_total)/nima)
		sy_ave = round(float(sy_sum_total)/nima)
		for im in xrange(len(data)): 
			p1_x = ishift_x[im] - sx_ave
			p1_y = ishift_y[im] - sy_ave
			params2 = {"filter_type" : Processor.fourier_filter_types.SHIFT, "x_shift" : p1_x, "y_shift" : p1_y, "z_shift" : 0.0}
			data[im] = Processor.EMFourierFilter(data[im], params2)
			shift_x[im] += p1_x
			shift_y[im] += p1_y
		# stop if all shifts are zero
		not_zero = mpi_reduce(not_zero, 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)  
		if myid == main_node:
			not_zero_all = int(not_zero[0])
		else:
			not_zero_all = 0
		not_zero_all = bcast_number_to_all(not_zero_all, source_node = main_node)

		if myid == main_node:
			print_msg("Time of iteration = %12.2f\n"%(time()-start_time))
			start_time = time()

		if not_zero_all == 0:  break

	#for im in xrange(len(data)): data[im] = fft(data[im])  This should not be required as only header information is used
	# combine shifts found with the original parameters
	for im in xrange(len(data)):		
		t0 = init_params[im]
		t1 = Transform()
		t1.set_params({"type":"2D","alpha":0,"scale":t0.get_scale(),"mirror":0,"tx":shift_x[im],"ty":shift_y[im]})
		# combine t0 and t1
		tt = t1*t0
		data[im].set_attr("xform.align2d", tt)  

	# write out headers and STOP, under MPI writing has to be done sequentially
	mpi_barrier(MPI_COMM_WORLD)
	par_str = ["xform.align2d", "ID"]
	if myid == main_node:
		from utilities import file_type
		if(file_type(stack) == "bdb"):
			from utilities import recv_attr_dict_bdb
			recv_attr_dict_bdb(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
		else:
			from utilities import recv_attr_dict
			recv_attr_dict(main_node, stack, data, par_str, image_start, image_end, number_of_proc)
		
	else:           send_attr_dict(main_node, data, par_str, image_start, image_end)
	if myid == main_node: print_end_msg("shiftali_MPI")				
예제 #39
0
def compare(compare_ref_free, outfile_repro,ref_free_output,yrng, xrng, rstep,nx,apix,ref_free_cutoff, nproc, myid, main_node):

	from alignment      import   Numrinit, ringwe,  Applyws
	from random	 import   seed, randint
	from utilities      import   get_params2D, set_params2D, model_circle, inverse_transform2, combine_params2
	from fundamentals   import   rot_shift2D
	from mpi	    import   MPI_COMM_WORLD, mpi_barrier, mpi_bcast, MPI_INT
	from statistics     import   fsc_mask
	from filter	 import   fit_tanh
	from numpy	  import   array	

	fout = "%s.hdf" % ref_free_output
	frc_out = "%s_frc" % ref_free_output
	res_out = "%s_res" % ref_free_output
	
	
	nima = EMUtil.get_image_count(compare_ref_free)
	image_start, image_end = MPI_start_end(nima, nproc, myid)
	ima = EMData()
	ima.read_image(compare_ref_free, image_start)
	
	last_ring = nx/2-2
	first_ring = 1
	mask = model_circle(last_ring, nx, nx)

	refi = []
	numref = EMUtil.get_image_count(outfile_repro)
	cnx = nx/2 +1
	cny = cnx
	
	mode = "F"
	numr = Numrinit(first_ring, last_ring, rstep, mode)	
	wr = ringwe(numr, mode)

	ima.to_zero()
	for j in xrange(numref):
		temp = EMData()
		temp.read_image(outfile_repro, j)
		#  even, odd, numer of even, number of images.  After frc, totav
		refi.append(temp)
	#  for each node read its share of data
	data = EMData.read_images(compare_ref_free, range(image_start, image_end))
	for im in xrange(image_start, image_end):
		data[im-image_start].set_attr('ID', im)
		set_params2D(data[im-image_start],[0,0,0,0,1])
	ringref = []
	for j in xrange(numref):
			refi[j].process_inplace("normalize.mask", {"mask":mask, "no_sigma":1}) # normalize reference images to N(0,1)
			cimage = Util.Polar2Dm(refi[j], cnx, cny, numr, mode)
			Util.Frngs(cimage, numr)
			Applyws(cimage, numr, wr)
			ringref.append(cimage)
	
	if myid == main_node: seed(1000)
	data_shift = []	
	frc = []
	res = []
	for im in xrange(image_start, image_end):
		alpha, sx, sy, mirror, scale = get_params2D(data[im-image_start])
		alphai, sxi, syi, scalei = inverse_transform2(alpha, sx, sy, 1.0)
		# normalize
		data[im-image_start].process_inplace("normalize.mask", {"mask":mask, "no_sigma":1}) # subtract average under the mask
		# align current image to the reference
		[angt, sxst, syst, mirrort, xiref, peakt] = Util.multiref_polar_ali_2d(data[im-image_start], ringref, xrng, yrng, 1, mode, numr, cnx+sxi, cny+syi)
		iref = int(xiref)
		[alphan, sxn, syn, mn] = combine_params2(0.0, -sxi, -syi, 0, angt, sxst, syst, (int)(mirrort))
		set_params2D(data[im-image_start], [alphan, sxn, syn, int(mn), scale])
		temp = rot_shift2D(data[im-image_start], alphan, sxn, syn, mn)
		temp.set_attr('assign',iref)
		tfrc = fsc_mask(temp,refi[iref],mask = mask)
		temp.set_attr('frc',tfrc[1])
		res = fit_tanh(tfrc)
		temp.set_attr('res',res)
		data_shift.append(temp)
	
	for node in xrange(nproc):
		if myid == node:
			for image in data_shift:
				image.write_image(fout,-1)
				refindex = image.get_attr('assign')
				refi[refindex].write_image(fout,-1)	
		mpi_barrier(MPI_COMM_WORLD)
	rejects = []
	if myid == main_node:
		a = EMData()
		index = 0
		frc = []
		res = []
		temp = []
		classes = []
		for im in xrange(nima):
			a.read_image(fout, index)
			frc.append(a.get_attr("frc"))
			if ref_free_cutoff != -1: classes.append(a.get_attr("class_ptcl_idxs"))
			tmp = a.get_attr("res")
			temp.append(tmp[0])
			res.append("%12f" %(apix/tmp[0]))
			res.append("\n")
			index = index + 2
		res_num = array(temp)
		mean_score = res_num.mean(axis=0)
		std_score = res_num.std(axis=0)
		std = std_score / 2
		if ref_free_cutoff !=-1:
			cutoff = mean_score - std * ref_free_cutoff
			reject = res_num < cutoff
			index = 0
			for i in reject:
				if i: rejects.extend(classes[index])
				index = index + 1
			rejects.sort()
			length = mpi_bcast(len(rejects),1,MPI_INT,main_node, MPI_COMM_WORLD)	
			rejects = mpi_bcast(rejects,length , MPI_INT, main_node, MPI_COMM_WORLD)
		del a
		fout_frc = open(frc_out,'w')
		fout_res = open(res_out,'w')
		fout_res.write("".join(res))
		temp = zip(*frc)
		datstrings = []
		for i in temp:
			for j in i:
				datstrings.append("  %12f" % (j))
			datstrings.append("\n")
		fout_frc.write("".join(datstrings))
		fout_frc.close()
	
	del refi		
	del ringref
	return rejects
예제 #40
0
def cml_open_proj(stack, ir, ou, lf, hf, dpsi = 1):
	from projection   import cml_sinogram
	from utilities    import model_circle, get_params_proj, model_blank, get_im
	from fundamentals import fftip
	from filter       import filt_tanh

	# number of projections
	if  type(stack) == type(""): nprj = EMUtil.get_image_count(stack)
	else:                       nprj = len(stack)
	Prj  = []                                          # list of projections
	Ori  = [-1] * 4 * nprj                             # orientation intial (phi, theta, psi, index) for each projection

	for i in xrange(nprj):
		image = get_im(stack, i)

		# read initial angles if given
		try:	Ori[4*i], Ori[4*i+1], Ori[4*i+2], s2x, s2y = get_params_proj(image)
		except:	pass
		
		if(i == 0):
			nx = image.get_xsize()
			if(ou < 1): ou = nx // 2 - 1
			diameter = int(2 * ou)
			mask2D   = model_circle(ou, nx, nx)
			if ir > 0:  mask2D -= model_circle(ir, nx, nx)

		# normalize under the mask
		[mean_a, sigma, imin, imax] = Util.infomask(image, mask2D, True)
		image -= mean_a
		Util.mul_scalar(image, 1.0/sigma)
		Util.mul_img(image, mask2D)

		# sinogram
		sino = cml_sinogram(image, diameter, dpsi)

		# prepare the cut positions in order to filter (lf: low freq; hf: high freq)
		ihf = min(int(2 * hf * diameter), diameter + (diameter + 1) % 2)
		ihf = ihf + (ihf + 1) % 2    # index ihf must be odd to take the img part
		ilf = max(int(2 * lf * diameter), 0)
		ilf = ilf + ilf % 2          # index ilf must be even to fall in the real part
		bdf = ihf - ilf + 1

		# process lines
		nxe = sino.get_xsize()
		nye = sino.get_ysize()
		prj = model_blank(bdf, 2*nye)
		pp = model_blank(nxe, 2*nye)
		for li in xrange(nye):
			# get the line li
			line = Util.window(sino, nxe, 1, 1, 0, li-nye//2, 0)
			# u2 (not improve the results)
			#line = filt_tanh(line, ou / float(nx), ou / float(nx))
			# normalize this line
			[mean_l, sigma_l, imin, imax] = Util.infomask(line, None, True)
			line = (line - mean_l) / sigma_l
			# fft
			fftip(line)
			# filter (cut part of coef) and create mirror line
			Util.cml_prepare_line(prj, line, ilf, ihf, li, nye)

		# store the projection
		Prj.append(prj)

	return Prj, Ori
예제 #41
0
def main():
    import global_def
    from optparse import OptionParser
    from EMAN2 import EMUtil
    import os
    import sys
    from time import time

    progname = os.path.basename(sys.argv[0])
    usage = progname + " proj_stack output_averages --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--img_per_group",
                      type="int",
                      default=100,
                      help="number of images per group")
    parser.add_option("--radius",
                      type="int",
                      default=-1,
                      help="radius for alignment")
    parser.add_option(
        "--xr",
        type="string",
        default="2 1",
        help="range for translation search in x direction, search is +/xr")
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help=
        "range for translation search in y direction, search is +/yr (default = same as xr)"
    )
    parser.add_option(
        "--ts",
        type="string",
        default="1 0.5",
        help=
        "step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional"
    )
    parser.add_option(
        "--iter",
        type="int",
        default=30,
        help="number of iterations within alignment (default = 30)")
    parser.add_option(
        "--num_ali",
        type="int",
        default=5,
        help="number of alignments performed for stability (default = 5)")
    parser.add_option("--thld_err",
                      type="float",
                      default=1.0,
                      help="threshold of pixel error (default = 1.732)")
    parser.add_option(
        "--grouping",
        type="string",
        default="GRP",
        help=
        "do grouping of projections: PPR - per projection, GRP - different size groups, exclusive (default), GEV - grouping equal size"
    )
    parser.add_option(
        "--delta",
        type="float",
        default=-1.0,
        help="angular step for reference projections (required for GEV method)"
    )
    parser.add_option(
        "--fl",
        type="float",
        default=0.3,
        help="cut-off frequency of hyperbolic tangent low-pass Fourier filter")
    parser.add_option(
        "--aa",
        type="float",
        default=0.2,
        help="fall-off of hyperbolic tangent low-pass Fourier filter")
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Consider CTF correction during the alignment ")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version")

    (options, args) = parser.parse_args()

    from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, MPI_COMM_WORLD
    from mpi import mpi_barrier, mpi_send, mpi_recv, mpi_bcast, MPI_INT, mpi_finalize, MPI_FLOAT
    from applications import MPI_start_end, within_group_refinement, ali2d_ras
    from pixel_error import multi_align_stability
    from utilities import send_EMData, recv_EMData
    from utilities import get_image, bcast_number_to_all, set_params2D, get_params2D
    from utilities import group_proj_by_phitheta, model_circle, get_input_from_string

    sys.argv = mpi_init(len(sys.argv), sys.argv)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    main_node = 0

    if len(args) == 2:
        stack = args[0]
        outdir = args[1]
    else:
        ERROR("incomplete list of arguments", "sxproj_stability", 1, myid=myid)
        exit()
    if not options.MPI:
        ERROR("Non-MPI not supported!", "sxproj_stability", myid=myid)
        exit()

    if global_def.CACHE_DISABLE:
        from utilities import disable_bdb_cache
        disable_bdb_cache()
    global_def.BATCH = True

    #if os.path.exists(outdir):  ERROR('Output directory exists, please change the name and restart the program', "sxproj_stability", 1, myid)
    #mpi_barrier(MPI_COMM_WORLD)

    img_per_grp = options.img_per_group
    radius = options.radius
    ite = options.iter
    num_ali = options.num_ali
    thld_err = options.thld_err

    xrng = get_input_from_string(options.xr)
    if options.yr == "-1": yrng = xrng
    else: yrng = get_input_from_string(options.yr)
    step = get_input_from_string(options.ts)

    if myid == main_node:
        nima = EMUtil.get_image_count(stack)
        img = get_image(stack)
        nx = img.get_xsize()
        ny = img.get_ysize()
    else:
        nima = 0
        nx = 0
        ny = 0
    nima = bcast_number_to_all(nima)
    nx = bcast_number_to_all(nx)
    ny = bcast_number_to_all(ny)
    if radius == -1: radius = nx / 2 - 2
    mask = model_circle(radius, nx, nx)

    st = time()
    if options.grouping == "GRP":
        if myid == main_node:
            print "  A  ", myid, "  ", time() - st
            proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
            proj_params = []
            for i in xrange(nima):
                dp = proj_attr[i].get_params("spider")
                phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp[
                    "psi"], -dp["tx"], -dp["ty"]
                proj_params.append([phi, theta, psi, s2x, s2y])

            # Here is where the grouping is done, I didn't put enough annotation in the group_proj_by_phitheta,
            # So I will briefly explain it here
            # proj_list  : Returns a list of list of particle numbers, each list contains img_per_grp particle numbers
            #              except for the last one. Depending on the number of particles left, they will either form a
            #              group or append themselves to the last group
            # angle_list : Also returns a list of list, each list contains three numbers (phi, theta, delta), (phi,
            #              theta) is the projection angle of the center of the group, delta is the range of this group
            # mirror_list: Also returns a list of list, each list contains img_per_grp True or False, which indicates
            #              whether it should take mirror position.
            # In this program angle_list and mirror list are not of interest.

            proj_list_all, angle_list, mirror_list = group_proj_by_phitheta(
                proj_params, img_per_grp=img_per_grp)
            del proj_params
            print "  B  number of groups  ", myid, "  ", len(
                proj_list_all), time() - st
        mpi_barrier(MPI_COMM_WORLD)

        # Number of groups, actually there could be one or two more groups, since the size of the remaining group varies
        # we will simply assign them to main node.
        n_grp = nima / img_per_grp - 1

        # Divide proj_list_all equally to all nodes, and becomes proj_list
        proj_list = []
        for i in xrange(n_grp):
            proc_to_stay = i % number_of_proc
            if proc_to_stay == main_node:
                if myid == main_node: proj_list.append(proj_list_all[i])
            elif myid == main_node:
                mpi_send(len(proj_list_all[i]), 1, MPI_INT, proc_to_stay,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                mpi_send(proj_list_all[i], len(proj_list_all[i]), MPI_INT,
                         proc_to_stay, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            elif myid == proc_to_stay:
                img_per_grp = mpi_recv(1, MPI_INT, main_node,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                img_per_grp = int(img_per_grp[0])
                temp = mpi_recv(img_per_grp, MPI_INT, main_node,
                                SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                proj_list.append(map(int, temp))
                del temp
            mpi_barrier(MPI_COMM_WORLD)
        print "  C  ", myid, "  ", time() - st
        if myid == main_node:
            # Assign the remaining groups to main_node
            for i in xrange(n_grp, len(proj_list_all)):
                proj_list.append(proj_list_all[i])
            del proj_list_all, angle_list, mirror_list

    #   Compute stability per projection projection direction, equal number assigned, thus overlaps
    elif options.grouping == "GEV":
        if options.delta == -1.0:
            ERROR(
                "Angular step for reference projections is required for GEV method",
                "sxproj_stability", 1)
        from utilities import even_angles, nearestk_to_refdir, getvec
        refproj = even_angles(options.delta)
        img_begin, img_end = MPI_start_end(len(refproj), number_of_proc, myid)
        # Now each processor keeps its own share of reference projections
        refprojdir = refproj[img_begin:img_end]
        del refproj

        ref_ang = [0.0] * (len(refprojdir) * 2)
        for i in xrange(len(refprojdir)):
            ref_ang[i * 2] = refprojdir[0][0]
            ref_ang[i * 2 + 1] = refprojdir[0][1] + i * 0.1

        print "  A  ", myid, "  ", time() - st
        proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
        #  the solution below is very slow, do not use it unless there is a problem with the i/O
        """
		for i in xrange(number_of_proc):
			if myid == i:
				proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			mpi_barrier(MPI_COMM_WORLD)
		"""
        print "  B  ", myid, "  ", time() - st

        proj_ang = [0.0] * (nima * 2)
        for i in xrange(nima):
            dp = proj_attr[i].get_params("spider")
            proj_ang[i * 2] = dp["phi"]
            proj_ang[i * 2 + 1] = dp["theta"]
        print "  C  ", myid, "  ", time() - st
        asi = Util.nearestk_to_refdir(proj_ang, ref_ang, img_per_grp)
        del proj_ang, ref_ang
        proj_list = []
        for i in xrange(len(refprojdir)):
            proj_list.append(asi[i * img_per_grp:(i + 1) * img_per_grp])
        del asi
        print "  D  ", myid, "  ", time() - st
        #from sys import exit
        #exit()

    #   Compute stability per projection
    elif options.grouping == "PPR":
        print "  A  ", myid, "  ", time() - st
        proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
        print "  B  ", myid, "  ", time() - st
        proj_params = []
        for i in xrange(nima):
            dp = proj_attr[i].get_params("spider")
            phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp[
                "psi"], -dp["tx"], -dp["ty"]
            proj_params.append([phi, theta, psi, s2x, s2y])
        img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
        print "  C  ", myid, "  ", time() - st
        from utilities import nearest_proj
        proj_list, mirror_list = nearest_proj(
            proj_params, img_per_grp,
            range(img_begin, img_begin + 1))  #range(img_begin, img_end))
        refprojdir = proj_params[img_begin:img_end]
        del proj_params, mirror_list
        print "  D  ", myid, "  ", time() - st
    else:
        ERROR("Incorrect projection grouping option", "sxproj_stability", 1)
    """
	from utilities import write_text_file
	for i in xrange(len(proj_list)):
		write_text_file(proj_list[i],"projlist%06d_%04d"%(i,myid))
	"""

    ###########################################################################################################
    # Begin stability test
    from utilities import get_params_proj, read_text_file
    #if myid == 0:
    #	from utilities import read_text_file
    #	proj_list[0] = map(int, read_text_file("lggrpp0.txt"))

    from utilities import model_blank
    aveList = [model_blank(nx, ny)] * len(proj_list)
    if options.grouping == "GRP":
        refprojdir = [[0.0, 0.0, -1.0]] * len(proj_list)
    for i in xrange(len(proj_list)):
        print "  E  ", myid, "  ", time() - st
        class_data = EMData.read_images(stack, proj_list[i])
        #print "  R  ",myid,"  ",time()-st
        if options.CTF:
            from filter import filt_ctf
            for im in xrange(len(class_data)):  #  MEM LEAK!!
                atemp = class_data[im].copy()
                btemp = filt_ctf(atemp, atemp.get_attr("ctf"), binary=1)
                class_data[im] = btemp
                #class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
        for im in class_data:
            try:
                t = im.get_attr(
                    "xform.align2d")  # if they are there, no need to set them!
            except:
                try:
                    t = im.get_attr("xform.projection")
                    d = t.get_params("spider")
                    set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0])
                except:
                    set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
        #print "  F  ",myid,"  ",time()-st
        # Here, we perform realignment num_ali times
        all_ali_params = []
        for j in xrange(num_ali):
            if (xrng[0] == 0.0 and yrng[0] == 0.0):
                avet = ali2d_ras(class_data,
                                 randomize=True,
                                 ir=1,
                                 ou=radius,
                                 rs=1,
                                 step=1.0,
                                 dst=90.0,
                                 maxit=ite,
                                 check_mirror=True,
                                 FH=options.fl,
                                 FF=options.aa)
            else:
                avet = within_group_refinement(class_data, mask, True, 1,
                                               radius, 1, xrng, yrng, step,
                                               90.0, ite, options.fl,
                                               options.aa)
            ali_params = []
            for im in xrange(len(class_data)):
                alpha, sx, sy, mirror, scale = get_params2D(class_data[im])
                ali_params.extend([alpha, sx, sy, mirror])
            all_ali_params.append(ali_params)
        #aveList[i] = avet
        #print "  G  ",myid,"  ",time()-st
        del ali_params
        # We determine the stability of this group here.
        # stable_set contains all particles deemed stable, it is a list of list
        # each list has two elements, the first is the pixel error, the second is the image number
        # stable_set is sorted based on pixel error
        #from utilities import write_text_file
        #write_text_file(all_ali_params, "all_ali_params%03d.txt"%myid)
        stable_set, mir_stab_rate, average_pix_err = multi_align_stability(
            all_ali_params, 0.0, 10000.0, thld_err, False, 2 * radius + 1)
        #print "  H  ",myid,"  ",time()-st
        if (len(stable_set) > 5):
            stable_set_id = []
            members = []
            pix_err = []
            # First put the stable members into attr 'members' and 'pix_err'
            for s in stable_set:
                # s[1] - number in this subset
                stable_set_id.append(s[1])
                # the original image number
                members.append(proj_list[i][s[1]])
                pix_err.append(s[0])
            # Then put the unstable members into attr 'members' and 'pix_err'
            from fundamentals import rot_shift2D
            avet.to_zero()
            if options.grouping == "GRP":
                aphi = 0.0
                atht = 0.0
                vphi = 0.0
                vtht = 0.0
            l = -1
            for j in xrange(len(proj_list[i])):
                #  Here it will only work if stable_set_id is sorted in the increasing number, see how l progresses
                if j in stable_set_id:
                    l += 1
                    avet += rot_shift2D(class_data[j], stable_set[l][2][0],
                                        stable_set[l][2][1],
                                        stable_set[l][2][2],
                                        stable_set[l][2][3])
                    if options.grouping == "GRP":
                        phi, theta, psi, sxs, sys = get_params_proj(
                            class_data[j])
                        if (theta > 90.0):
                            phi = (phi + 540.0) % 360.0
                            theta = 180.0 - theta
                        aphi += phi
                        atht += theta
                        vphi += phi * phi
                        vtht += theta * theta
                else:
                    members.append(proj_list[i][j])
                    pix_err.append(99999.99)
            aveList[i] = avet.copy()
            if l > 1:
                l += 1
                aveList[i] /= l
                if options.grouping == "GRP":
                    aphi /= l
                    atht /= l
                    vphi = (vphi - l * aphi * aphi) / l
                    vtht = (vtht - l * atht * atht) / l
                    from math import sqrt
                    refprojdir[i] = [
                        aphi, atht,
                        (sqrt(max(vphi, 0.0)) + sqrt(max(vtht, 0.0))) / 2.0
                    ]

            # Here more information has to be stored, PARTICULARLY WHAT IS THE REFERENCE DIRECTION
            aveList[i].set_attr('members', members)
            aveList[i].set_attr('refprojdir', refprojdir[i])
            aveList[i].set_attr('pixerr', pix_err)
        else:
            print " empty group ", i, refprojdir[i]
            aveList[i].set_attr('members', [-1])
            aveList[i].set_attr('refprojdir', refprojdir[i])
            aveList[i].set_attr('pixerr', [99999.])

    del class_data

    if myid == main_node:
        km = 0
        for i in xrange(number_of_proc):
            if i == main_node:
                for im in xrange(len(aveList)):
                    aveList[im].write_image(args[1], km)
                    km += 1
            else:
                nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                              MPI_COMM_WORLD)
                nl = int(nl[0])
                for im in xrange(nl):
                    ave = recv_EMData(i, im + i + 70000)
                    nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                                  MPI_COMM_WORLD)
                    nm = int(nm[0])
                    members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                                       MPI_COMM_WORLD)
                    ave.set_attr('members', map(int, members))
                    members = mpi_recv(nm, MPI_FLOAT, i,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    ave.set_attr('pixerr', map(float, members))
                    members = mpi_recv(3, MPI_FLOAT, i,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    ave.set_attr('refprojdir', map(float, members))
                    ave.write_image(args[1], km)
                    km += 1
    else:
        mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL,
                 MPI_COMM_WORLD)
        for im in xrange(len(aveList)):
            send_EMData(aveList[im], main_node, im + myid + 70000)
            members = aveList[im].get_attr('members')
            mpi_send(len(members), 1, MPI_INT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            mpi_send(members, len(members), MPI_INT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            members = aveList[im].get_attr('pixerr')
            mpi_send(members, len(members), MPI_FLOAT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            try:
                members = aveList[im].get_attr('refprojdir')
                mpi_send(members, 3, MPI_FLOAT, main_node,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            except:
                mpi_send([-999.0, -999.0, -999.0], 3, MPI_FLOAT, main_node,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)

    global_def.BATCH = False
    mpi_barrier(MPI_COMM_WORLD)
    from mpi import mpi_finalize
    mpi_finalize()
예제 #42
0
def main():

	def params_3D_2D_NEW(phi, theta, psi, s2x, s2y, mirror):
		# the final ali2d parameters already combine shifts operation first and rotation operation second for parameters converted from 3D
		if mirror:
			m = 1
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)
		else:
			m = 0
			alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)
		return  alpha, sx, sy, m
	
	progname = os.path.basename(sys.argv[0])
	usage = progname + " prj_stack  --ave2D= --var2D=  --ave3D= --var3D= --img_per_grp= --fl=  --aa=   --sym=symmetry --CTF"
	parser = OptionParser(usage, version=SPARXVERSION)
	
	parser.add_option("--output_dir",   type="string"	   ,	default="./",				    help="Output directory")
	parser.add_option("--ave2D",		type="string"	   ,	default=False,				help="Write to the disk a stack of 2D averages")
	parser.add_option("--var2D",		type="string"	   ,	default=False,				help="Write to the disk a stack of 2D variances")
	parser.add_option("--ave3D",		type="string"	   ,	default=False,				help="Write to the disk reconstructed 3D average")
	parser.add_option("--var3D",		type="string"	   ,	default=False,				help="Compute 3D variability (time consuming!)")
	parser.add_option("--img_per_grp",	type="int"         ,	default=100,	     	    help="Number of neighbouring projections.(Default is 100)")
	parser.add_option("--no_norm",		action="store_true",	default=False,				help="Do not use normalization.(Default is to apply normalization)")
	#parser.add_option("--radius", 	    type="int"         ,	default=-1   ,				help="radius for 3D variability" )
	parser.add_option("--npad",			type="int"         ,	default=2    ,				help="Number of time to pad the original images.(Default is 2 times padding)")
	parser.add_option("--sym" , 		type="string"      ,	default="c1",				help="Symmetry. (Default is no symmetry)")
	parser.add_option("--fl",			type="float"       ,	default=0.0,				help="Low pass filter cutoff in absolute frequency (0.0 - 0.5) and is applied to decimated images. (Default - no filtration)")
	parser.add_option("--aa",			type="float"       ,	default=0.02 ,				help="Fall off of the filter. Use default value if user has no clue about falloff (Default value is 0.02)")
	parser.add_option("--CTF",			action="store_true",	default=False,				help="Use CFT correction.(Default is no CTF correction)")
	#parser.add_option("--MPI" , 		action="store_true",	default=False,				help="use MPI version")
	#parser.add_option("--radiuspca", 	type="int"         ,	default=-1   ,				help="radius for PCA" )
	#parser.add_option("--iter", 		type="int"         ,	default=40   ,				help="maximum number of iterations (stop criterion of reconstruction process)" )
	#parser.add_option("--abs", 		type="float"   ,        default=0.0  ,				help="minimum average absolute change of voxels' values (stop criterion of reconstruction process)" )
	#parser.add_option("--squ", 		type="float"   ,	    default=0.0  ,				help="minimum average squared change of voxels' values (stop criterion of reconstruction process)" )
	parser.add_option("--VAR" , 		action="store_true",	default=False,				help="Stack of input consists of 2D variances (Default False)")
	parser.add_option("--decimate",     type  ="float",         default=0.25,               help="Image decimate rate, a number less than 1. (Default is 0.25)")
	parser.add_option("--window",       type  ="int",           default=0,                  help="Target image size relative to original image size. (Default value is zero.)")
	#parser.add_option("--SND",			action="store_true",	default=False,				help="compute squared normalized differences (Default False)")
	#parser.add_option("--nvec",			type="int"         ,	default=0    ,				help="Number of eigenvectors, (Default = 0 meaning no PCA calculated)")
	parser.add_option("--symmetrize",	action="store_true",	default=False,				help="Prepare input stack for handling symmetry (Default False)")
	parser.add_option("--overhead",     type  ="float",         default=0.5,                help="python overhead per CPU.")

	(options,args) = parser.parse_args()
	#####
	from mpi import mpi_init, mpi_comm_rank, mpi_comm_size, mpi_recv, MPI_COMM_WORLD
	from mpi import mpi_barrier, mpi_reduce, mpi_bcast, mpi_send, MPI_FLOAT, MPI_SUM, MPI_INT, MPI_MAX
	#from mpi import *
	from applications   import MPI_start_end
	from reconstruction import recons3d_em, recons3d_em_MPI
	from reconstruction	import recons3d_4nn_MPI, recons3d_4nn_ctf_MPI
	from utilities      import print_begin_msg, print_end_msg, print_msg
	from utilities      import read_text_row, get_image, get_im, wrap_mpi_send, wrap_mpi_recv
	from utilities      import bcast_EMData_to_all, bcast_number_to_all
	from utilities      import get_symt

	#  This is code for handling symmetries by the above program.  To be incorporated. PAP 01/27/2015

	from EMAN2db import db_open_dict

	# Set up global variables related to bdb cache 
	if global_def.CACHE_DISABLE:
		from utilities import disable_bdb_cache
		disable_bdb_cache()
	
	# Set up global variables related to ERROR function
	global_def.BATCH = True
	
	# detect if program is running under MPI
	RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ
	if RUNNING_UNDER_MPI: global_def.MPI = True
	if options.output_dir =="./": current_output_dir = os.path.abspath(options.output_dir)
	else: current_output_dir = options.output_dir
	if options.symmetrize :
		if RUNNING_UNDER_MPI:
			try:
				sys.argv = mpi_init(len(sys.argv), sys.argv)
				try:	
					number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
					if( number_of_proc > 1 ):
						ERROR("Cannot use more than one CPU for symmetry preparation","sx3dvariability",1)
				except:
					pass
			except:
				pass
		if not os.path.exists(current_output_dir): os.mkdir(current_output_dir)
		
		#  Input
		#instack = "Clean_NORM_CTF_start_wparams.hdf"
		#instack = "bdb:data"
		
		
		from logger import Logger,BaseLogger_Files
		if os.path.exists(os.path.join(current_output_dir, "log.txt")): os.remove(os.path.join(current_output_dir, "log.txt"))
		log_main=Logger(BaseLogger_Files())
		log_main.prefix = os.path.join(current_output_dir, "./")
		
		instack = args[0]
		sym = options.sym.lower()
		if( sym == "c1" ):
			ERROR("There is no need to symmetrize stack for C1 symmetry","sx3dvariability",1)
		
		line =""
		for a in sys.argv:
			line +=" "+a
		log_main.add(line)
	
		if(instack[:4] !="bdb:"):
			#if output_dir =="./": stack = "bdb:data"
			stack = "bdb:"+current_output_dir+"/data"
			delete_bdb(stack)
			junk = cmdexecute("sxcpy.py  "+instack+"  "+stack)
		else: stack = instack
		
		qt = EMUtil.get_all_attributes(stack,'xform.projection')

		na = len(qt)
		ts = get_symt(sym)
		ks = len(ts)
		angsa = [None]*na
		
		for k in range(ks):
			#Qfile = "Q%1d"%k
			#if options.output_dir!="./": Qfile = os.path.join(options.output_dir,"Q%1d"%k)
			Qfile = os.path.join(current_output_dir, "Q%1d"%k)
			#delete_bdb("bdb:Q%1d"%k)
			delete_bdb("bdb:"+Qfile)
			#junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:"+Qfile)
			#DB = db_open_dict("bdb:Q%1d"%k)
			DB = db_open_dict("bdb:"+Qfile)
			for i in range(na):
				ut = qt[i]*ts[k]
				DB.set_attr(i, "xform.projection", ut)
				#bt = ut.get_params("spider")
				#angsa[i] = [round(bt["phi"],3)%360.0, round(bt["theta"],3)%360.0, bt["psi"], -bt["tx"], -bt["ty"]]
			#write_text_row(angsa, 'ptsma%1d.txt'%k)
			#junk = cmdexecute("e2bdb.py  "+stack+"  --makevstack=bdb:Q%1d"%k)
			#junk = cmdexecute("sxheader.py  bdb:Q%1d  --params=xform.projection  --import=ptsma%1d.txt"%(k,k))
			DB.close()
		#if options.output_dir =="./": delete_bdb("bdb:sdata")
		delete_bdb("bdb:" + current_output_dir + "/"+"sdata")
		#junk = cmdexecute("e2bdb.py . --makevstack=bdb:sdata --filt=Q")
		sdata = "bdb:"+current_output_dir+"/"+"sdata"
		print(sdata)
		junk = cmdexecute("e2bdb.py   " + current_output_dir +"  --makevstack="+sdata +" --filt=Q")
		#junk = cmdexecute("ls  EMAN2DB/sdata*")
		#a = get_im("bdb:sdata")
		a = get_im(sdata)
		a.set_attr("variabilitysymmetry",sym)
		#a.write_image("bdb:sdata")
		a.write_image(sdata)

	else:

		from fundamentals import window2d
		sys.argv       = mpi_init(len(sys.argv), sys.argv)
		myid           = mpi_comm_rank(MPI_COMM_WORLD)
		number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
		main_node      = 0
		shared_comm  = mpi_comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED,  0, MPI_INFO_NULL)
		myid_on_node = mpi_comm_rank(shared_comm)
		no_of_processes_per_group = mpi_comm_size(shared_comm)
		masters_from_groups_vs_everything_else_comm = mpi_comm_split(MPI_COMM_WORLD, main_node == myid_on_node, myid_on_node)
		color, no_of_groups, balanced_processor_load_on_nodes = get_colors_and_subsets(main_node, MPI_COMM_WORLD, myid, \
		    shared_comm, myid_on_node, masters_from_groups_vs_everything_else_comm)
		overhead_loading = options.overhead*number_of_proc
		#memory_per_node  = options.memory_per_node
		#if memory_per_node == -1.: memory_per_node = 2.*no_of_processes_per_group
		keepgoing = 1
		
		current_window   = options.window
		current_decimate = options.decimate
		
		if len(args) == 1: stack = args[0]
		else:
			print(( "usage: " + usage))
			print(( "Please run '" + progname + " -h' for detailed options"))
			return 1

		t0 = time()	
		# obsolete flags
		options.MPI  = True
		#options.nvec = 0
		options.radiuspca = -1
		options.iter = 40
		options.abs  = 0.0
		options.squ  = 0.0

		if options.fl > 0.0 and options.aa == 0.0:
			ERROR("Fall off has to be given for the low-pass filter", "sx3dvariability", 1, myid)
			
		#if options.VAR and options.SND:
		#	ERROR("Only one of var and SND can be set!", "sx3dvariability", myid)
			
		if options.VAR and (options.ave2D or options.ave3D or options.var2D): 
			ERROR("When VAR is set, the program cannot output ave2D, ave3D or var2D", "sx3dvariability", 1, myid)
			
		#if options.SND and (options.ave2D or options.ave3D):
		#	ERROR("When SND is set, the program cannot output ave2D or ave3D", "sx3dvariability", 1, myid)
		
		#if options.nvec > 0 :
		#	ERROR("PCA option not implemented", "sx3dvariability", 1, myid)
			
		#if options.nvec > 0 and options.ave3D == None:
		#	ERROR("When doing PCA analysis, one must set ave3D", "sx3dvariability", 1, myid)
		
		if current_decimate>1.0 or current_decimate<0.0:
			ERROR("Decimate rate should be a value between 0.0 and 1.0", "sx3dvariability", 1, myid)
		
		if current_window < 0.0:
			ERROR("Target window size should be always larger than zero", "sx3dvariability", 1, myid)
			
		if myid == main_node:
			img  = get_image(stack, 0)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			if(min(nx, ny) < current_window):   keepgoing = 0
		keepgoing = bcast_number_to_all(keepgoing, main_node, MPI_COMM_WORLD)
		if keepgoing == 0: ERROR("The target window size cannot be larger than the size of decimated image", "sx3dvariability", 1, myid)

		import string
		options.sym = options.sym.lower()
		# if global_def.CACHE_DISABLE:
		# 	from utilities import disable_bdb_cache
		# 	disable_bdb_cache()
		# global_def.BATCH = True
		
		if myid == main_node:
			if not os.path.exists(current_output_dir): os.mkdir(current_output_dir)# Never delete output_dir in the program!
	
		img_per_grp = options.img_per_grp
		#nvec        = options.nvec
		radiuspca   = options.radiuspca
		from logger import Logger,BaseLogger_Files
		#if os.path.exists(os.path.join(options.output_dir, "log.txt")): os.remove(os.path.join(options.output_dir, "log.txt"))
		log_main=Logger(BaseLogger_Files())
		log_main.prefix = os.path.join(current_output_dir, "./")

		if myid == main_node:
			line = ""
			for a in sys.argv: line +=" "+a
			log_main.add(line)
			log_main.add("-------->>>Settings given by all options<<<-------")
			log_main.add("Symmetry             : %s"%options.sym)
			log_main.add("Input stack          : %s"%stack)
			log_main.add("Output_dir           : %s"%current_output_dir)
			
			if options.ave3D: log_main.add("Ave3d                : %s"%options.ave3D)
			if options.var3D: log_main.add("Var3d                : %s"%options.var3D)
			if options.ave2D: log_main.add("Ave2D                : %s"%options.ave2D)
			if options.var2D: log_main.add("Var2D                : %s"%options.var2D)
			if options.VAR:   log_main.add("VAR                  : True")
			else:             log_main.add("VAR                  : False")
			if options.CTF:   log_main.add("CTF correction       : True  ")
			else:             log_main.add("CTF correction       : False ")
			
			log_main.add("Image per group      : %5d"%options.img_per_grp)
			log_main.add("Image decimate rate  : %4.3f"%current_decimate)
			log_main.add("Low pass filter      : %4.3f"%options.fl)
			current_fl = options.fl
			if current_fl == 0.0: current_fl = 0.5
			log_main.add("Current low pass filter is equivalent to cutoff frequency %4.3f for original image size"%round((current_fl*current_decimate),3))
			log_main.add("Window size          : %5d "%current_window)
			log_main.add("sx3dvariability begins")
	
		symbaselen = 0
		if myid == main_node:
			nima = EMUtil.get_image_count(stack)
			img  = get_image(stack)
			nx   = img.get_xsize()
			ny   = img.get_ysize()
			nnxo = nx
			nnyo = ny
			if options.sym != "c1" :
				imgdata = get_im(stack)
				try:
					i = imgdata.get_attr("variabilitysymmetry").lower()
					if(i != options.sym):
						ERROR("The symmetry provided does not agree with the symmetry of the input stack", "sx3dvariability", 1, myid)
				except:
					ERROR("Input stack is not prepared for symmetry, please follow instructions", "sx3dvariability", 1, myid)
				from utilities import get_symt
				i = len(get_symt(options.sym))
				if((nima/i)*i != nima):
					ERROR("The length of the input stack is incorrect for symmetry processing", "sx3dvariability", 1, myid)
				symbaselen = nima/i
			else:  symbaselen = nima
		else:
			nima = 0
			nx = 0
			ny = 0
			nnxo = 0
			nnyo = 0
		nima    = bcast_number_to_all(nima)
		nx      = bcast_number_to_all(nx)
		ny      = bcast_number_to_all(ny)
		nnxo    = bcast_number_to_all(nnxo)
		nnyo    = bcast_number_to_all(nnyo)
		if current_window > max(nx, ny):
			ERROR("Window size is larger than the original image size", "sx3dvariability", 1)
		
		if current_decimate == 1.:
			if current_window !=0:
				nx = current_window
				ny = current_window
		else:
			if current_window == 0:
				nx = int(nx*current_decimate+0.5)
				ny = int(ny*current_decimate+0.5)
			else:
				nx = int(current_window*current_decimate+0.5)
				ny = nx
		symbaselen = bcast_number_to_all(symbaselen)
		
		# check FFT prime number
		from fundamentals import smallprime
		is_fft_friendly = (nx == smallprime(nx))
		
		if not is_fft_friendly:
			if myid == main_node:
				log_main.add("The target image size is not a product of small prime numbers")
				log_main.add("Program adjusts the input settings!")
			### two cases
			if current_decimate == 1.:
				nx = smallprime(nx)
				ny = nx
				current_window = nx # update
				if myid == main_node:
					log_main.add("The window size is updated to %d."%current_window)
			else:
				if current_window == 0:
					nx = smallprime(int(nx*current_decimate+0.5))
					current_decimate = float(nx)/nnxo
					ny = nx
					if (myid == main_node):
						log_main.add("The decimate rate is updated to %f."%current_decimate)
				else:
					nx = smallprime(int(current_window*current_decimate+0.5))
					ny = nx
					current_window = int(nx/current_decimate+0.5)
					if (myid == main_node):
						log_main.add("The window size is updated to %d."%current_window)
						
		if myid == main_node:
			log_main.add("The target image size is %d"%nx)
						
		if radiuspca == -1: radiuspca = nx/2-2
		if myid == main_node: log_main.add("%-70s:  %d\n"%("Number of projection", nima))
		img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
		
		"""
		if options.SND:
			from projection		import prep_vol, prgs
			from statistics		import im_diff
			from utilities		import get_im, model_circle, get_params_proj, set_params_proj
			from utilities		import get_ctf, generate_ctf
			from filter			import filt_ctf
		
			imgdata = EMData.read_images(stack, range(img_begin, img_end))

			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)

			bcast_EMData_to_all(vol, myid)
			volft, kb = prep_vol(vol)

			mask = model_circle(nx/2-2, nx, ny)
			varList = []
			for i in xrange(img_begin, img_end):
				phi, theta, psi, s2x, s2y = get_params_proj(imgdata[i-img_begin])
				ref_prj = prgs(volft, kb, [phi, theta, psi, -s2x, -s2y])
				if options.CTF:
					ctf_params = get_ctf(imgdata[i-img_begin])
					ref_prj = filt_ctf(ref_prj, generate_ctf(ctf_params))
				diff, A, B = im_diff(ref_prj, imgdata[i-img_begin], mask)
				diff2 = diff*diff
				set_params_proj(diff2, [phi, theta, psi, s2x, s2y])
				varList.append(diff2)
			mpi_barrier(MPI_COMM_WORLD)
		"""
		
		if options.VAR: # 2D variance images have no shifts
			#varList   = EMData.read_images(stack, range(img_begin, img_end))
			from EMAN2 import Region
			for index_of_particle in range(img_begin,img_end):
				image = get_im(stack, index_of_proj)
				if current_window > 0: varList.append(fdecimate(window2d(image,current_window,current_window), nx,ny))
				else:   varList.append(fdecimate(image, nx,ny))
				
		else:
			from utilities		import bcast_number_to_all, bcast_list_to_all, send_EMData, recv_EMData
			from utilities		import set_params_proj, get_params_proj, params_3D_2D, get_params2D, set_params2D, compose_transform2
			from utilities		import model_blank, nearest_proj, model_circle, write_text_row, wrap_mpi_gatherv
			from applications	import pca
			from statistics		import avgvar, avgvar_ctf, ccc
			from filter		    import filt_tanl
			from morphology		import threshold, square_root
			from projection 	import project, prep_vol, prgs
			from sets		    import Set
			from utilities      import wrap_mpi_recv, wrap_mpi_bcast, wrap_mpi_send
			import numpy as np
			if myid == main_node:
				t1          = time()
				proj_angles = []
				aveList     = []
				tab = EMUtil.get_all_attributes(stack, 'xform.projection')	
				for i in range(nima):
					t     = tab[i].get_params('spider')
					phi   = t['phi']
					theta = t['theta']
					psi   = t['psi']
					x     = theta
					if x > 90.0: x = 180.0 - x
					x = x*10000+psi
					proj_angles.append([x, t['phi'], t['theta'], t['psi'], i])
				t2 = time()
				log_main.add( "%-70s:  %d\n"%("Number of neighboring projections", img_per_grp))
				log_main.add("...... Finding neighboring projections\n")
				log_main.add( "Number of images per group: %d"%img_per_grp)
				log_main.add( "Now grouping projections")
				proj_angles.sort()
				proj_angles_list = np.full((nima, 4), 0.0, dtype=np.float32)	
				for i in range(nima):
					proj_angles_list[i][0] = proj_angles[i][1]
					proj_angles_list[i][1] = proj_angles[i][2]
					proj_angles_list[i][2] = proj_angles[i][3]
					proj_angles_list[i][3] = proj_angles[i][4]
			else: proj_angles_list = 0
			proj_angles_list = wrap_mpi_bcast(proj_angles_list, main_node, MPI_COMM_WORLD)
			proj_angles      = []
			for i in range(nima):
				proj_angles.append([proj_angles_list[i][0], proj_angles_list[i][1], proj_angles_list[i][2], int(proj_angles_list[i][3])])
			del proj_angles_list
			proj_list, mirror_list = nearest_proj(proj_angles, img_per_grp, range(img_begin, img_end))
			all_proj = Set()
			for im in proj_list:
				for jm in im:
					all_proj.add(proj_angles[jm][3])
			all_proj = list(all_proj)
			index = {}
			for i in range(len(all_proj)): index[all_proj[i]] = i
			mpi_barrier(MPI_COMM_WORLD)
			if myid == main_node:
				log_main.add("%-70s:  %.2f\n"%("Finding neighboring projections lasted [s]", time()-t2))
				log_main.add("%-70s:  %d\n"%("Number of groups processed on the main node", len(proj_list)))
				log_main.add("Grouping projections took:  %12.1f [m]"%((time()-t2)/60.))
				log_main.add("Number of groups on main node: ", len(proj_list))
			mpi_barrier(MPI_COMM_WORLD)

			if myid == main_node:
				log_main.add("...... Calculating the stack of 2D variances \n")
			# Memory estimation. There are two memory consumption peaks
			# peak 1. Compute ave, var; 
			# peak 2. Var volume reconstruction;
			# proj_params = [0.0]*(nima*5)
			aveList = []
			varList = []				
			#if nvec > 0: eigList = [[] for i in range(nvec)]
			dnumber   = len(all_proj)# all neighborhood set for assigned to myid
			pnumber   = len(proj_list)*2. + img_per_grp # aveList and varList 
			tnumber   = dnumber+pnumber
			vol_size2 = nx**3*4.*8/1.e9
			vol_size1 = 2.*nnxo**3*4.*8/1.e9
			proj_size         = nnxo*nnyo*len(proj_list)*4.*2./1.e9 # both aveList and varList
			orig_data_size    = nnxo*nnyo*4.*tnumber/1.e9
			reduced_data_size = nx*nx*4.*tnumber/1.e9
			full_data         = np.full((number_of_proc, 2), -1., dtype=np.float16)
			full_data[myid]   = orig_data_size, reduced_data_size
			if myid != main_node: wrap_mpi_send(full_data, main_node, MPI_COMM_WORLD)
			if myid == main_node:
				for iproc in range(number_of_proc):
					if iproc != main_node:
						dummy = wrap_mpi_recv(iproc, MPI_COMM_WORLD)
						full_data[np.where(dummy>-1)] = dummy[np.where(dummy>-1)]
				del dummy
			mpi_barrier(MPI_COMM_WORLD)
			full_data = wrap_mpi_bcast(full_data, main_node, MPI_COMM_WORLD)
			# find the CPU with heaviest load
			minindx         = np.argsort(full_data, 0)
			heavy_load_myid = minindx[-1][1]
			total_mem       = sum(full_data)
			if myid == main_node:
				if current_window == 0:
					log_main.add("Nx:   current image size = %d. Decimated by %f from %d"%(nx, current_decimate, nnxo))
				else:
					log_main.add("Nx:   current image size = %d. Windowed to %d, and decimated by %f from %d"%(nx, current_window, current_decimate, nnxo))
				log_main.add("Nproj:       number of particle images.")
				log_main.add("Navg:        number of 2D average images.")
				log_main.add("Nvar:        number of 2D variance images.")
				log_main.add("Img_per_grp: user defined image per group for averaging = %d"%img_per_grp)
				log_main.add("Overhead:    total python overhead memory consumption   = %f"%overhead_loading)
				log_main.add("Total memory) = 4.0*nx^2*(nproj + navg +nvar+ img_per_grp)/1.0e9 + overhead: %12.3f [GB]"%\
				   (total_mem[1] + overhead_loading))
			del full_data
			mpi_barrier(MPI_COMM_WORLD)
			if myid == heavy_load_myid:
				log_main.add("Begin reading and preprocessing images on processor. Wait... ")
				ttt = time()
			#imgdata = EMData.read_images(stack, all_proj)			
			imgdata = [ None for im in range(len(all_proj))]
			for index_of_proj in range(len(all_proj)):
				#image = get_im(stack, all_proj[index_of_proj])
				if( current_window > 0): imgdata[index_of_proj] = fdecimate(window2d(get_im(stack, all_proj[index_of_proj]),current_window,current_window), nx, ny)
				else:                    imgdata[index_of_proj] = fdecimate(get_im(stack, all_proj[index_of_proj]), nx, ny)
				
				if (current_decimate> 0.0 and options.CTF):
					ctf = imgdata[index_of_proj].get_attr("ctf")
					ctf.apix = ctf.apix/current_decimate
					imgdata[index_of_proj].set_attr("ctf", ctf)
					
				if myid == heavy_load_myid and index_of_proj%100 == 0:
					log_main.add(" ...... %6.2f%% "%(index_of_proj/float(len(all_proj))*100.))
			mpi_barrier(MPI_COMM_WORLD)
			if myid == heavy_load_myid:
				log_main.add("All_proj preprocessing cost %7.2f m"%((time()-ttt)/60.))
				log_main.add("Wait untill reading on all CPUs done...")
			'''	
			imgdata2 = EMData.read_images(stack, range(img_begin, img_end))
			if options.fl > 0.0:
				for k in xrange(len(imgdata2)):
					imgdata2[k] = filt_tanl(imgdata2[k], options.fl, options.aa)
			if options.CTF:
				vol = recons3d_4nn_ctf_MPI(myid, imgdata2, 1.0, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			else:
				vol = recons3d_4nn_MPI(myid, imgdata2, symmetry=options.sym, npad=options.npad, xysize=-1, zsize=-1)
			if myid == main_node:
				vol.write_image("vol_ctf.hdf")
				print_msg("Writing to the disk volume reconstructed from averages as		:  %s\n"%("vol_ctf.hdf"))
			del vol, imgdata2
			mpi_barrier(MPI_COMM_WORLD)
			'''
			from applications import prepare_2d_forPCA
			from utilities    import model_blank
			from EMAN2        import Transform
			if not options.no_norm: 
				mask = model_circle(nx/2-2, nx, nx)
			if options.CTF: 
				from utilities import pad
				from filter import filt_ctf
			from filter import filt_tanl
			if myid == heavy_load_myid:
				log_main.add("Start computing 2D aveList and varList. Wait...")
				ttt = time()
			inner=nx//2-4
			outer=inner+2
			xform_proj_for_2D = [ None for i in range(len(proj_list))]
			for i in range(len(proj_list)):
				ki = proj_angles[proj_list[i][0]][3]
				if ki >= symbaselen:  continue
				mi = index[ki]
				dpar = Util.get_transform_params(imgdata[mi], "xform.projection", "spider")
				phiM, thetaM, psiM, s2xM, s2yM  = dpar["phi"],dpar["theta"],dpar["psi"],-dpar["tx"]*current_decimate,-dpar["ty"]*current_decimate
				grp_imgdata = []
				for j in range(img_per_grp):
					mj = index[proj_angles[proj_list[i][j]][3]]
					cpar = Util.get_transform_params(imgdata[mj], "xform.projection", "spider")
					alpha, sx, sy, mirror = params_3D_2D_NEW(cpar["phi"], cpar["theta"],cpar["psi"], -cpar["tx"]*current_decimate, -cpar["ty"]*current_decimate, mirror_list[i][j])
					if thetaM <= 90:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, phiM - cpar["phi"], 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, 180-(phiM - cpar["phi"]), 0.0, 0.0, 1.0)
					else:
						if mirror == 0:  alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(phiM- cpar["phi"]), 0.0, 0.0, 1.0)
						else:            alpha, sx, sy, scale = compose_transform2(alpha, sx, sy, 1.0, -(180-(phiM - cpar["phi"])), 0.0, 0.0, 1.0)
					imgdata[mj].set_attr("xform.align2d", Transform({"type":"2D","alpha":alpha,"tx":sx,"ty":sy,"mirror":mirror,"scale":1.0}))
					grp_imgdata.append(imgdata[mj])
				if not options.no_norm:
					for k in range(img_per_grp):
						ave, std, minn, maxx = Util.infomask(grp_imgdata[k], mask, False)
						grp_imgdata[k] -= ave
						grp_imgdata[k] /= std
				if options.fl > 0.0:
					for k in range(img_per_grp):
						grp_imgdata[k] = filt_tanl(grp_imgdata[k], options.fl, options.aa)

				#  Because of background issues, only linear option works.
				if options.CTF:  ave, var = aves_wiener(grp_imgdata, SNR = 1.0e5, interpolation_method = "linear")
				else:  ave, var = ave_var(grp_imgdata)
				# Switch to std dev
				# threshold is not really needed,it is just in case due to numerical accuracy something turns out negative.
				var = square_root(threshold(var))

				set_params_proj(ave, [phiM, thetaM, 0.0, 0.0, 0.0])
				set_params_proj(var, [phiM, thetaM, 0.0, 0.0, 0.0])

				aveList.append(ave)
				varList.append(var)
				xform_proj_for_2D[i] = [phiM, thetaM, 0.0, 0.0, 0.0]

				'''
				if nvec > 0:
					eig = pca(input_stacks=grp_imgdata, subavg="", mask_radius=radiuspca, nvec=nvec, incore=True, shuffle=False, genbuf=True)
					for k in range(nvec):
						set_params_proj(eig[k], [phiM, thetaM, 0.0, 0.0, 0.0])
						eigList[k].append(eig[k])
					"""
					if myid == 0 and i == 0:
						for k in xrange(nvec):
							eig[k].write_image("eig.hdf", k)
					"""
				'''
				if (myid == heavy_load_myid) and (i%100 == 0):
					log_main.add(" ......%6.2f%%  "%(i/float(len(proj_list))*100.))		
			del imgdata, grp_imgdata, cpar, dpar, all_proj, proj_angles, index
			if not options.no_norm: del mask
			if myid == main_node: del tab
			#  At this point, all averages and variances are computed
			mpi_barrier(MPI_COMM_WORLD)
			
			if (myid == heavy_load_myid):
				log_main.add("Computing aveList and varList took %12.1f [m]"%((time()-ttt)/60.))
			
			xform_proj_for_2D = wrap_mpi_gatherv(xform_proj_for_2D, main_node, MPI_COMM_WORLD)
			if (myid == main_node):
				write_text_row(xform_proj_for_2D, os.path.join(current_output_dir, "params.txt"))
			del xform_proj_for_2D
			mpi_barrier(MPI_COMM_WORLD)
			if options.ave2D:
				from fundamentals import fpol
				from applications import header
				if myid == main_node:
					log_main.add("Compute ave2D ... ")
					km = 0
					for i in range(number_of_proc):
						if i == main_node :
							for im in range(len(aveList)):
								aveList[im].write_image(os.path.join(current_output_dir, options.ave2D), km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in range(nl):
								ave = recv_EMData(i, im+i+70000)
								"""
								nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								nm = int(nm[0])
								members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('members', map(int, members))
								members = mpi_recv(nm, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('pix_err', map(float, members))
								members = mpi_recv(3, MPI_FLOAT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
								ave.set_attr('refprojdir', map(float, members))
								"""
								tmpvol=fpol(ave, nx, nx,1)								
								tmpvol.write_image(os.path.join(current_output_dir, options.ave2D), km)
								km += 1
				else:
					mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
					for im in range(len(aveList)):
						send_EMData(aveList[im], main_node,im+myid+70000)
						"""
						members = aveList[im].get_attr('members')
						mpi_send(len(members), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						mpi_send(members, len(members), MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						members = aveList[im].get_attr('pix_err')
						mpi_send(members, len(members), MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						try:
							members = aveList[im].get_attr('refprojdir')
							mpi_send(members, 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						except:
							mpi_send([-999.0,-999.0,-999.0], 3, MPI_FLOAT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
						"""
				if myid == main_node:
					header(os.path.join(current_output_dir, options.ave2D), params='xform.projection', fimport = os.path.join(current_output_dir, "params.txt"))
				mpi_barrier(MPI_COMM_WORLD)	
			if options.ave3D:
				from fundamentals import fpol
				t5 = time()
				if myid == main_node: log_main.add("Reconstruct ave3D ... ")
				ave3D = recons3d_4nn_MPI(myid, aveList, symmetry=options.sym, npad=options.npad)
				bcast_EMData_to_all(ave3D, myid)
				if myid == main_node:
					if current_decimate != 1.0: ave3D = resample(ave3D, 1./current_decimate)
					ave3D = fpol(ave3D, nnxo, nnxo, nnxo) # always to the orignal image size
					set_pixel_size(ave3D, 1.0)
					ave3D.write_image(os.path.join(current_output_dir, options.ave3D))
					log_main.add("Ave3D reconstruction took %12.1f [m]"%((time()-t5)/60.0))
					log_main.add("%-70s:  %s\n"%("The reconstructed ave3D is saved as ", options.ave3D))
					
			mpi_barrier(MPI_COMM_WORLD)		
			del ave, var, proj_list, stack, alpha, sx, sy, mirror, aveList
			'''
			if nvec > 0:
				for k in range(nvec):
					if myid == main_node:log_main.add("Reconstruction eigenvolumes", k)
					cont = True
					ITER = 0
					mask2d = model_circle(radiuspca, nx, nx)
					while cont:
						#print "On node %d, iteration %d"%(myid, ITER)
						eig3D = recons3d_4nn_MPI(myid, eigList[k], symmetry=options.sym, npad=options.npad)
						bcast_EMData_to_all(eig3D, myid, main_node)
						if options.fl > 0.0:
							eig3D = filt_tanl(eig3D, options.fl, options.aa)
						if myid == main_node:
							eig3D.write_image(os.path.join(options.outpout_dir, "eig3d_%03d.hdf"%(k, ITER)))
						Util.mul_img( eig3D, model_circle(radiuspca, nx, nx, nx) )
						eig3Df, kb = prep_vol(eig3D)
						del eig3D
						cont = False
						icont = 0
						for l in range(len(eigList[k])):
							phi, theta, psi, s2x, s2y = get_params_proj(eigList[k][l])
							proj = prgs(eig3Df, kb, [phi, theta, psi, s2x, s2y])
							cl = ccc(proj, eigList[k][l], mask2d)
							if cl < 0.0:
								icont += 1
								cont = True
								eigList[k][l] *= -1.0
						u = int(cont)
						u = mpi_reduce([u], 1, MPI_INT, MPI_MAX, main_node, MPI_COMM_WORLD)
						icont = mpi_reduce([icont], 1, MPI_INT, MPI_SUM, main_node, MPI_COMM_WORLD)

						if myid == main_node:
							u = int(u[0])
							log_main.add(" Eigenvector: ",k," number changed ",int(icont[0]))
						else: u = 0
						u = bcast_number_to_all(u, main_node)
						cont = bool(u)
						ITER += 1

					del eig3Df, kb
					mpi_barrier(MPI_COMM_WORLD)
				del eigList, mask2d
			'''
			if options.ave3D: del ave3D
			if options.var2D:
				from fundamentals import fpol 
				from applications import header
				if myid == main_node:
					log_main.add("Compute var2D...")
					km = 0
					for i in range(number_of_proc):
						if i == main_node :
							for im in range(len(varList)):
								tmpvol=fpol(varList[im], nx, nx,1)
								tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km)
								km += 1
						else:
							nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
							nl = int(nl[0])
							for im in range(nl):
								ave = recv_EMData(i, im+i+70000)
								tmpvol=fpol(ave, nx, nx,1)
								tmpvol.write_image(os.path.join(current_output_dir, options.var2D), km)
								km += 1
				else:
					mpi_send(len(varList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
					for im in range(len(varList)):
						send_EMData(varList[im], main_node, im+myid+70000)#  What with the attributes??
				mpi_barrier(MPI_COMM_WORLD)
				if myid == main_node:
					from applications import header
					header(os.path.join(current_output_dir, options.var2D), params = 'xform.projection',fimport = os.path.join(current_output_dir, "params.txt"))
				mpi_barrier(MPI_COMM_WORLD)
		if options.var3D:
			if myid == main_node: log_main.add("Reconstruct var3D ...")
			t6 = time()
			# radiusvar = options.radius
			# if( radiusvar < 0 ):  radiusvar = nx//2 -3
			res = recons3d_4nn_MPI(myid, varList, symmetry = options.sym, npad=options.npad)
			#res = recons3d_em_MPI(varList, vol_stack, options.iter, radiusvar, options.abs, True, options.sym, options.squ)
			if myid == main_node:
				from fundamentals import fpol
				if current_decimate != 1.0: res	= resample(res, 1./current_decimate)
				res = fpol(res, nnxo, nnxo, nnxo)
				set_pixel_size(res, 1.0)
				res.write_image(os.path.join(current_output_dir, options.var3D))
				log_main.add("%-70s:  %s\n"%("The reconstructed var3D is saved as ", options.var3D))
				log_main.add("Var3D reconstruction took %f12.1 [m]"%((time()-t6)/60.0))
				log_main.add("Total computation time %f12.1 [m]"%((time()-t0)/60.0))
				log_main.add("sx3dvariability finishes")
		from mpi import mpi_finalize
		mpi_finalize()
		
		if RUNNING_UNDER_MPI: global_def.MPI = False

		global_def.BATCH = False
예제 #43
0
def cml_open_proj(stack, ir, ou, lf, hf, dpsi=1):
    from projection import cml_sinogram
    from utilities import model_circle, get_params_proj, model_blank, get_im
    from fundamentals import fftip
    from filter import filt_tanh

    # number of projections
    if type(stack) == type(""): nprj = EMUtil.get_image_count(stack)
    else: nprj = len(stack)
    Prj = []  # list of projections
    Ori = [
        -1
    ] * 4 * nprj  # orientation intial (phi, theta, psi, index) for each projection

    for i in xrange(nprj):
        image = get_im(stack, i)

        # read initial angles if given
        try:
            Ori[4 * i], Ori[4 * i +
                            1], Ori[4 * i +
                                    2], s2x, s2y = get_params_proj(image)
        except:
            pass

        if (i == 0):
            nx = image.get_xsize()
            if (ou < 1): ou = nx // 2 - 1
            diameter = int(2 * ou)
            mask2D = model_circle(ou, nx, nx)
            if ir > 0: mask2D -= model_circle(ir, nx, nx)

        # normalize under the mask
        [mean_a, sigma, imin, imax] = Util.infomask(image, mask2D, True)
        image -= mean_a
        Util.mul_scalar(image, 1.0 / sigma)
        Util.mul_img(image, mask2D)

        # sinogram
        sino = cml_sinogram(image, diameter, dpsi)

        # prepare the cut positions in order to filter (lf: low freq; hf: high freq)
        ihf = min(int(2 * hf * diameter), diameter + (diameter + 1) % 2)
        ihf = ihf + (ihf + 1) % 2  # index ihf must be odd to take the img part
        ilf = max(int(2 * lf * diameter), 0)
        ilf = ilf + ilf % 2  # index ilf must be even to fall in the real part
        bdf = ihf - ilf + 1

        # process lines
        nxe = sino.get_xsize()
        nye = sino.get_ysize()
        prj = model_blank(bdf, 2 * nye)
        pp = model_blank(nxe, 2 * nye)
        for li in xrange(nye):
            # get the line li
            line = Util.window(sino, nxe, 1, 1, 0, li - nye // 2, 0)
            # u2 (not improve the results)
            #line = filt_tanh(line, ou / float(nx), ou / float(nx))
            # normalize this line
            [mean_l, sigma_l, imin, imax] = Util.infomask(line, None, True)
            line = (line - mean_l) / sigma_l
            # fft
            fftip(line)
            # filter (cut part of coef) and create mirror line
            Util.cml_prepare_line(prj, line, ilf, ihf, li, nye)

        # store the projection
        Prj.append(prj)

    return Prj, Ori
예제 #44
0
파일: sxstability.py 프로젝트: cryoem/test
def main():
    from utilities import get_input_from_string

    progname = os.path.basename(sys.argv[0])
    usage = (
        progname
        + " stack output_average --radius=particle_radius --xr=xr --yr=yr --ts=ts --thld_err=thld_err --num_ali=num_ali --fl=fl --aa=aa --CTF --verbose --stables"
    )
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--radius", type="int", default=-1, help=" particle radius for alignment")
    parser.add_option(
        "--xr",
        type="string",
        default="2 1",
        help="range for translation search in x direction, search is +/xr (default 2,1)",
    )
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help="range for translation search in y direction, search is +/yr (default = same as xr)",
    )
    parser.add_option(
        "--ts",
        type="string",
        default="1 0.5",
        help="step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default: 1,0.5)",
    )
    parser.add_option("--thld_err", type="float", default=0.75, help="threshld of pixel error (default = 0.75)")
    parser.add_option(
        "--num_ali", type="int", default=5, help="number of alignments performed for stability (default = 5)"
    )
    parser.add_option("--maxit", type="int", default=30, help="number of iterations for each xr (default = 30)")
    parser.add_option(
        "--fl",
        type="float",
        default=0.3,
        help="cut-off frequency of hyperbolic tangent low-pass Fourier filter (default = 0.3)",
    )
    parser.add_option(
        "--aa", type="float", default=0.2, help="fall-off of hyperbolic tangent low-pass Fourier filter (default = 0.2)"
    )
    parser.add_option("--CTF", action="store_true", default=False, help="Use CTF correction during the alignment ")
    parser.add_option(
        "--verbose", action="store_true", default=False, help="print individual pixel error (default = False)"
    )
    parser.add_option(
        "--stables",
        action="store_true",
        default=False,
        help="output the stable particles number in file (default = False)",
    )
    parser.add_option(
        "--method", type="string", default=" ", help="SHC (standard method is default when flag is ommitted)"
    )
    (options, args) = parser.parse_args()
    if len(args) != 1 and len(args) != 2:
        print "usage: " + usage
        print "Please run '" + progname + " -h' for detailed options"
    else:
        if global_def.CACHE_DISABLE:
            from utilities import disable_bdb_cache

            disable_bdb_cache()

        from applications import within_group_refinement, ali2d_ras
        from pixel_error import multi_align_stability
        from utilities import write_text_file, write_text_row

        global_def.BATCH = True

        xrng = get_input_from_string(options.xr)
        if options.yr == "-1":
            yrng = xrng
        else:
            yrng = get_input_from_string(options.yr)
        step = get_input_from_string(options.ts)

        class_data = EMData.read_images(args[0])

        nx = class_data[0].get_xsize()
        ou = options.radius
        num_ali = options.num_ali
        if ou == -1:
            ou = nx / 2 - 2
        from utilities import model_circle, get_params2D, set_params2D

        mask = model_circle(ou, nx, nx)

        if options.CTF:
            from filter import filt_ctf

            for im in xrange(len(class_data)):
                #  Flip phases
                class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
        for im in class_data:
            im.set_attr("previousmax", -1.0e10)
            try:
                t = im.get_attr("xform.align2d")  # if they are there, no need to set them!
            except:
                try:
                    t = im.get_attr("xform.projection")
                    d = t.get_params("spider")
                    set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0])
                except:
                    set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
        all_ali_params = []

        for ii in xrange(num_ali):
            ali_params = []
            if options.verbose:
                ALPHA = []
                SX = []
                SY = []
                MIRROR = []
            if xrng[0] == 0.0 and yrng[0] == 0.0:
                avet = ali2d_ras(
                    class_data,
                    randomize=True,
                    ir=1,
                    ou=ou,
                    rs=1,
                    step=1.0,
                    dst=90.0,
                    maxit=options.maxit,
                    check_mirror=True,
                    FH=options.fl,
                    FF=options.aa,
                )
            else:
                avet = within_group_refinement(
                    class_data,
                    mask,
                    True,
                    1,
                    ou,
                    1,
                    xrng,
                    yrng,
                    step,
                    90.0,
                    maxit=options.maxit,
                    FH=options.fl,
                    FF=options.aa,
                    method=options.method,
                )
                from utilities import info

                # print "  avet  ",info(avet)
            for im in class_data:
                alpha, sx, sy, mirror, scale = get_params2D(im)
                ali_params.extend([alpha, sx, sy, mirror])
                if options.verbose:
                    ALPHA.append(alpha)
                    SX.append(sx)
                    SY.append(sy)
                    MIRROR.append(mirror)
            all_ali_params.append(ali_params)
            if options.verbose:
                write_text_file([ALPHA, SX, SY, MIRROR], "ali_params_run_%d" % ii)
        """
		avet = class_data[0]
		from utilities import read_text_file
		all_ali_params = []
		for ii in xrange(5):
			temp = read_text_file( "ali_params_run_%d"%ii,-1)
			uuu = []
			for k in xrange(len(temp[0])):
				uuu.extend([temp[0][k],temp[1][k],temp[2][k],temp[3][k]])
			all_ali_params.append(uuu)


		"""

        stable_set, mir_stab_rate, pix_err = multi_align_stability(
            all_ali_params, 0.0, 10000.0, options.thld_err, options.verbose, 2 * ou + 1
        )
        print "%4s %20s %20s %20s %30s %6.2f" % (
            "",
            "Size of set",
            "Size of stable set",
            "Mirror stab rate",
            "Pixel error prior to pruning the set above threshold of",
            options.thld_err,
        )
        print "Average stat: %10d %20d %20.2f   %15.2f" % (len(class_data), len(stable_set), mir_stab_rate, pix_err)
        if len(stable_set) > 0:
            if options.stables:
                stab_mem = [[0, 0.0, 0] for j in xrange(len(stable_set))]
                for j in xrange(len(stable_set)):
                    stab_mem[j] = [int(stable_set[j][1]), stable_set[j][0], j]
                write_text_row(stab_mem, "stable_particles.txt")

            stable_set_id = []
            particle_pixerr = []
            for s in stable_set:
                stable_set_id.append(s[1])
                particle_pixerr.append(s[0])
            from fundamentals import rot_shift2D

            avet.to_zero()
            l = -1
            print "average parameters:  angle, x-shift, y-shift, mirror"
            for j in stable_set_id:
                l += 1
                print " %4d  %4d  %12.2f %12.2f %12.2f        %1d" % (
                    l,
                    j,
                    stable_set[l][2][0],
                    stable_set[l][2][1],
                    stable_set[l][2][2],
                    int(stable_set[l][2][3]),
                )
                avet += rot_shift2D(
                    class_data[j], stable_set[l][2][0], stable_set[l][2][1], stable_set[l][2][2], stable_set[l][2][3]
                )
            avet /= l + 1
            avet.set_attr("members", stable_set_id)
            avet.set_attr("pix_err", pix_err)
            avet.set_attr("pixerr", particle_pixerr)
            avet.write_image(args[1])

        global_def.BATCH = False