Beispiel #1
0
def main():

    progname = os.path.basename(sys.argv[0])
    usage = progname + " proj_stack output_averages --MPI"
    parser = OptionParser(usage, version=SPARXVERSION)

    parser.add_option("--img_per_group",
                      type="int",
                      default=100,
                      help="number of images per group")
    parser.add_option("--radius",
                      type="int",
                      default=-1,
                      help="radius for alignment")
    parser.add_option(
        "--xr",
        type="string",
        default="2 1",
        help="range for translation search in x direction, search is +/xr")
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help=
        "range for translation search in y direction, search is +/yr (default = same as xr)"
    )
    parser.add_option(
        "--ts",
        type="string",
        default="1 0.5",
        help=
        "step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional"
    )
    parser.add_option(
        "--iter",
        type="int",
        default=30,
        help="number of iterations within alignment (default = 30)")
    parser.add_option(
        "--num_ali",
        type="int",
        default=5,
        help="number of alignments performed for stability (default = 5)")
    parser.add_option("--thld_err",
                      type="float",
                      default=1.0,
                      help="threshold of pixel error (default = 1.732)")
    parser.add_option(
        "--grouping",
        type="string",
        default="GRP",
        help=
        "do grouping of projections: PPR - per projection, GRP - different size groups, exclusive (default), GEV - grouping equal size"
    )
    parser.add_option(
        "--delta",
        type="float",
        default=-1.0,
        help="angular step for reference projections (required for GEV method)"
    )
    parser.add_option(
        "--fl",
        type="float",
        default=0.3,
        help="cut-off frequency of hyperbolic tangent low-pass Fourier filter")
    parser.add_option(
        "--aa",
        type="float",
        default=0.2,
        help="fall-off of hyperbolic tangent low-pass Fourier filter")
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Consider CTF correction during the alignment ")
    parser.add_option("--MPI",
                      action="store_true",
                      default=False,
                      help="use MPI version")

    (options, args) = parser.parse_args()

    myid = mpi.mpi_comm_rank(MPI_COMM_WORLD)
    number_of_proc = mpi.mpi_comm_size(MPI_COMM_WORLD)
    main_node = 0

    if len(args) == 2:
        stack = args[0]
        outdir = args[1]
    else:
        sp_global_def.ERROR("Incomplete list of arguments",
                            "sxproj_stability.main",
                            1,
                            myid=myid)
        return
    if not options.MPI:
        sp_global_def.ERROR("Non-MPI not supported!",
                            "sxproj_stability.main",
                            1,
                            myid=myid)
        return

    if sp_global_def.CACHE_DISABLE:
        from sp_utilities import disable_bdb_cache
        disable_bdb_cache()
    sp_global_def.BATCH = True

    img_per_grp = options.img_per_group
    radius = options.radius
    ite = options.iter
    num_ali = options.num_ali
    thld_err = options.thld_err

    xrng = get_input_from_string(options.xr)
    if options.yr == "-1":
        yrng = xrng
    else:
        yrng = get_input_from_string(options.yr)

    step = get_input_from_string(options.ts)

    if myid == main_node:
        nima = EMUtil.get_image_count(stack)
        img = get_image(stack)
        nx = img.get_xsize()
        ny = img.get_ysize()
    else:
        nima = 0
        nx = 0
        ny = 0
    nima = bcast_number_to_all(nima)
    nx = bcast_number_to_all(nx)
    ny = bcast_number_to_all(ny)
    if radius == -1: radius = nx / 2 - 2
    mask = model_circle(radius, nx, nx)

    st = time()
    if options.grouping == "GRP":
        if myid == main_node:
            sxprint("  A  ", myid, "  ", time() - st)
            proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
            proj_params = []
            for i in range(nima):
                dp = proj_attr[i].get_params("spider")
                phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp[
                    "psi"], -dp["tx"], -dp["ty"]
                proj_params.append([phi, theta, psi, s2x, s2y])

            # Here is where the grouping is done, I didn't put enough annotation in the group_proj_by_phitheta,
            # So I will briefly explain it here
            # proj_list  : Returns a list of list of particle numbers, each list contains img_per_grp particle numbers
            #              except for the last one. Depending on the number of particles left, they will either form a
            #              group or append themselves to the last group
            # angle_list : Also returns a list of list, each list contains three numbers (phi, theta, delta), (phi,
            #              theta) is the projection angle of the center of the group, delta is the range of this group
            # mirror_list: Also returns a list of list, each list contains img_per_grp True or False, which indicates
            #              whether it should take mirror position.
            # In this program angle_list and mirror list are not of interest.

            proj_list_all, angle_list, mirror_list = group_proj_by_phitheta(
                proj_params, img_per_grp=img_per_grp)
            del proj_params
            sxprint("  B  number of groups  ", myid, "  ", len(proj_list_all),
                    time() - st)
        mpi_barrier(MPI_COMM_WORLD)

        # Number of groups, actually there could be one or two more groups, since the size of the remaining group varies
        # we will simply assign them to main node.
        n_grp = nima / img_per_grp - 1

        # Divide proj_list_all equally to all nodes, and becomes proj_list
        proj_list = []
        for i in range(n_grp):
            proc_to_stay = i % number_of_proc
            if proc_to_stay == main_node:
                if myid == main_node: proj_list.append(proj_list_all[i])
            elif myid == main_node:
                mpi_send(len(proj_list_all[i]), 1, MPI_INT, proc_to_stay,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                mpi_send(proj_list_all[i], len(proj_list_all[i]), MPI_INT,
                         proc_to_stay, SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            elif myid == proc_to_stay:
                img_per_grp = mpi_recv(1, MPI_INT, main_node,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                img_per_grp = int(img_per_grp[0])
                temp = mpi_recv(img_per_grp, MPI_INT, main_node,
                                SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                proj_list.append(list(map(int, temp)))
                del temp
            mpi_barrier(MPI_COMM_WORLD)
        sxprint("  C  ", myid, "  ", time() - st)
        if myid == main_node:
            # Assign the remaining groups to main_node
            for i in range(n_grp, len(proj_list_all)):
                proj_list.append(proj_list_all[i])
            del proj_list_all, angle_list, mirror_list

    #   Compute stability per projection projection direction, equal number assigned, thus overlaps
    elif options.grouping == "GEV":

        if options.delta == -1.0:
            ERROR(
                "Angular step for reference projections is required for GEV method"
            )
            return

        from sp_utilities import even_angles, nearestk_to_refdir, getvec
        refproj = even_angles(options.delta)
        img_begin, img_end = MPI_start_end(len(refproj), number_of_proc, myid)
        # Now each processor keeps its own share of reference projections
        refprojdir = refproj[img_begin:img_end]
        del refproj

        ref_ang = [0.0] * (len(refprojdir) * 2)
        for i in range(len(refprojdir)):
            ref_ang[i * 2] = refprojdir[0][0]
            ref_ang[i * 2 + 1] = refprojdir[0][1] + i * 0.1

        sxprint("  A  ", myid, "  ", time() - st)
        proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
        #  the solution below is very slow, do not use it unless there is a problem with the i/O
        """
		for i in xrange(number_of_proc):
			if myid == i:
				proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
			mpi_barrier(MPI_COMM_WORLD)
		"""
        sxprint("  B  ", myid, "  ", time() - st)

        proj_ang = [0.0] * (nima * 2)
        for i in range(nima):
            dp = proj_attr[i].get_params("spider")
            proj_ang[i * 2] = dp["phi"]
            proj_ang[i * 2 + 1] = dp["theta"]
        sxprint("  C  ", myid, "  ", time() - st)
        asi = Util.nearestk_to_refdir(proj_ang, ref_ang, img_per_grp)
        del proj_ang, ref_ang
        proj_list = []
        for i in range(len(refprojdir)):
            proj_list.append(asi[i * img_per_grp:(i + 1) * img_per_grp])
        del asi
        sxprint("  D  ", myid, "  ", time() - st)
        #from sys import exit
        #exit()

    #   Compute stability per projection
    elif options.grouping == "PPR":
        sxprint("  A  ", myid, "  ", time() - st)
        proj_attr = EMUtil.get_all_attributes(stack, "xform.projection")
        sxprint("  B  ", myid, "  ", time() - st)
        proj_params = []
        for i in range(nima):
            dp = proj_attr[i].get_params("spider")
            phi, theta, psi, s2x, s2y = dp["phi"], dp["theta"], dp[
                "psi"], -dp["tx"], -dp["ty"]
            proj_params.append([phi, theta, psi, s2x, s2y])
        img_begin, img_end = MPI_start_end(nima, number_of_proc, myid)
        sxprint("  C  ", myid, "  ", time() - st)
        from sp_utilities import nearest_proj
        proj_list, mirror_list = nearest_proj(
            proj_params, img_per_grp,
            list(range(img_begin, img_begin + 1)))  #range(img_begin, img_end))
        refprojdir = proj_params[img_begin:img_end]
        del proj_params, mirror_list
        sxprint("  D  ", myid, "  ", time() - st)

    else:
        ERROR("Incorrect projection grouping option")
        return

    ###########################################################################################################
    # Begin stability test
    from sp_utilities import get_params_proj, read_text_file
    #if myid == 0:
    #	from utilities import read_text_file
    #	proj_list[0] = map(int, read_text_file("lggrpp0.txt"))

    from sp_utilities import model_blank
    aveList = [model_blank(nx, ny)] * len(proj_list)
    if options.grouping == "GRP":
        refprojdir = [[0.0, 0.0, -1.0]] * len(proj_list)
    for i in range(len(proj_list)):
        sxprint("  E  ", myid, "  ", time() - st)
        class_data = EMData.read_images(stack, proj_list[i])
        #print "  R  ",myid,"  ",time()-st
        if options.CTF:
            from sp_filter import filt_ctf
            for im in range(len(class_data)):  #  MEM LEAK!!
                atemp = class_data[im].copy()
                btemp = filt_ctf(atemp, atemp.get_attr("ctf"), binary=1)
                class_data[im] = btemp
                #class_data[im] = filt_ctf(class_data[im], class_data[im].get_attr("ctf"), binary=1)
        for im in class_data:
            try:
                t = im.get_attr(
                    "xform.align2d")  # if they are there, no need to set them!
            except:
                try:
                    t = im.get_attr("xform.projection")
                    d = t.get_params("spider")
                    set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0])
                except:
                    set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
        #print "  F  ",myid,"  ",time()-st
        # Here, we perform realignment num_ali times
        all_ali_params = []
        for j in range(num_ali):
            if (xrng[0] == 0.0 and yrng[0] == 0.0):
                avet = ali2d_ras(class_data,
                                 randomize=True,
                                 ir=1,
                                 ou=radius,
                                 rs=1,
                                 step=1.0,
                                 dst=90.0,
                                 maxit=ite,
                                 check_mirror=True,
                                 FH=options.fl,
                                 FF=options.aa)
            else:
                avet = within_group_refinement(class_data, mask, True, 1,
                                               radius, 1, xrng, yrng, step,
                                               90.0, ite, options.fl,
                                               options.aa)
            ali_params = []
            for im in range(len(class_data)):
                alpha, sx, sy, mirror, scale = get_params2D(class_data[im])
                ali_params.extend([alpha, sx, sy, mirror])
            all_ali_params.append(ali_params)
        #aveList[i] = avet
        #print "  G  ",myid,"  ",time()-st
        del ali_params
        # We determine the stability of this group here.
        # stable_set contains all particles deemed stable, it is a list of list
        # each list has two elements, the first is the pixel error, the second is the image number
        # stable_set is sorted based on pixel error
        #from utilities import write_text_file
        #write_text_file(all_ali_params, "all_ali_params%03d.txt"%myid)
        stable_set, mir_stab_rate, average_pix_err = multi_align_stability(
            all_ali_params, 0.0, 10000.0, thld_err, False, 2 * radius + 1)
        #print "  H  ",myid,"  ",time()-st
        if (len(stable_set) > 5):
            stable_set_id = []
            members = []
            pix_err = []
            # First put the stable members into attr 'members' and 'pix_err'
            for s in stable_set:
                # s[1] - number in this subset
                stable_set_id.append(s[1])
                # the original image number
                members.append(proj_list[i][s[1]])
                pix_err.append(s[0])
            # Then put the unstable members into attr 'members' and 'pix_err'
            from sp_fundamentals import rot_shift2D
            avet.to_zero()
            if options.grouping == "GRP":
                aphi = 0.0
                atht = 0.0
                vphi = 0.0
                vtht = 0.0
            l = -1
            for j in range(len(proj_list[i])):
                #  Here it will only work if stable_set_id is sorted in the increasing number, see how l progresses
                if j in stable_set_id:
                    l += 1
                    avet += rot_shift2D(class_data[j], stable_set[l][2][0],
                                        stable_set[l][2][1],
                                        stable_set[l][2][2],
                                        stable_set[l][2][3])
                    if options.grouping == "GRP":
                        phi, theta, psi, sxs, sy_s = get_params_proj(
                            class_data[j])
                        if (theta > 90.0):
                            phi = (phi + 540.0) % 360.0
                            theta = 180.0 - theta
                        aphi += phi
                        atht += theta
                        vphi += phi * phi
                        vtht += theta * theta
                else:
                    members.append(proj_list[i][j])
                    pix_err.append(99999.99)
            aveList[i] = avet.copy()
            if l > 1:
                l += 1
                aveList[i] /= l
                if options.grouping == "GRP":
                    aphi /= l
                    atht /= l
                    vphi = (vphi - l * aphi * aphi) / l
                    vtht = (vtht - l * atht * atht) / l
                    from math import sqrt
                    refprojdir[i] = [
                        aphi, atht,
                        (sqrt(max(vphi, 0.0)) + sqrt(max(vtht, 0.0))) / 2.0
                    ]

            # Here more information has to be stored, PARTICULARLY WHAT IS THE REFERENCE DIRECTION
            aveList[i].set_attr('members', members)
            aveList[i].set_attr('refprojdir', refprojdir[i])
            aveList[i].set_attr('pixerr', pix_err)
        else:
            sxprint(" empty group ", i, refprojdir[i])
            aveList[i].set_attr('members', [-1])
            aveList[i].set_attr('refprojdir', refprojdir[i])
            aveList[i].set_attr('pixerr', [99999.])

    del class_data

    if myid == main_node:
        km = 0
        for i in range(number_of_proc):
            if i == main_node:
                for im in range(len(aveList)):
                    aveList[im].write_image(args[1], km)
                    km += 1
            else:
                nl = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                              MPI_COMM_WORLD)
                nl = int(nl[0])
                for im in range(nl):
                    ave = recv_EMData(i, im + i + 70000)
                    nm = mpi_recv(1, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                                  MPI_COMM_WORLD)
                    nm = int(nm[0])
                    members = mpi_recv(nm, MPI_INT, i, SPARX_MPI_TAG_UNIVERSAL,
                                       MPI_COMM_WORLD)
                    ave.set_attr('members', list(map(int, members)))
                    members = mpi_recv(nm, MPI_FLOAT, i,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    ave.set_attr('pixerr', list(map(float, members)))
                    members = mpi_recv(3, MPI_FLOAT, i,
                                       SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                    ave.set_attr('refprojdir', list(map(float, members)))
                    ave.write_image(args[1], km)
                    km += 1
    else:
        mpi_send(len(aveList), 1, MPI_INT, main_node, SPARX_MPI_TAG_UNIVERSAL,
                 MPI_COMM_WORLD)
        for im in range(len(aveList)):
            send_EMData(aveList[im], main_node, im + myid + 70000)
            members = aveList[im].get_attr('members')
            mpi_send(len(members), 1, MPI_INT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            mpi_send(members, len(members), MPI_INT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            members = aveList[im].get_attr('pixerr')
            mpi_send(members, len(members), MPI_FLOAT, main_node,
                     SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            try:
                members = aveList[im].get_attr('refprojdir')
                mpi_send(members, 3, MPI_FLOAT, main_node,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
            except:
                mpi_send([-999.0, -999.0, -999.0], 3, MPI_FLOAT, main_node,
                         SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)

    sp_global_def.BATCH = False
    mpi_barrier(MPI_COMM_WORLD)
Beispiel #2
0
def apsh(refimgs,
         imgs2align,
         outangles=None,
         refanglesdoc=None,
         outaligndoc=None,
         outerradius=-1,
         maxshift=0,
         ringstep=1,
         mode="F",
         log=None,
         verbose=False):
    """
	Generates polar representations of a series of images to be used as alignment references.
	
	Arguments:
		refimgs : Input reference image stack (filename or EMData object)
		imgs2align : Image stack to be aligned (filename or EMData object)
		outangles : Output Euler angles doc file
		refanglesdoc : Input Euler angles for reference projections
		outaligndoc : Output 2D alignment doc file
		outerradius : Outer alignment radius
		maxshift : Maximum shift allowed
		ringstep : Alignment radius step size
		mode : Mode, full circle ("F") vs. half circle ("H")
		log : Logger object
		verbose : (boolean) Whether to write additional information to screen
	"""

    # Generate polar representation(s) of reference(s)
    alignrings, polarrefs = mref2polar(refimgs,
                                       outerradius=outerradius,
                                       ringstep=ringstep,
                                       log=log,
                                       verbose=verbose)

    # Read image stack (as a filename or already an EMDataobject)
    if isinstance(imgs2align, str):
        imagestacklist = EMData.read_images(imgs2align)
    else:
        imagestacklist = [imgs2align]

    # Get number of images
    numimg = len(imagestacklist)

    # Get image dimensions (assuming square, and that images and references have the same dimension)
    idim = imagestacklist[0]['nx']

    # Calculate image center
    halfdim = idim / 2 + 1

    # Set constants
    currshift = 0
    scale = 1

    # Initialize output angles
    outangleslist = []
    outalignlist = []

    if outerradius <= 0:
        outerradius = halfdim - 3

    # Set search range
    txrng = tyrng = search_range(idim, outerradius, currshift, maxshift)

    print_log_msg(
        'Running multireference alignment allowing a maximum shift of %s\n' %
        maxshift, log, verbose)

    # Loop through images
    for imgindex in range(numimg):
        currimg = imagestacklist[imgindex]

        # Perform multi-reference alignment (adapted from alignment.mref_ali2d)
        best2dparamslist = [
            angt, sxst, syst, mirrorfloat, bestreffloat, peakt
        ] = Util.multiref_polar_ali_2d(currimg, polarrefs, txrng, tyrng,
                                       ringstep, mode, alignrings, halfdim,
                                       halfdim)
        bestref = int(bestreffloat)
        mirrorflag = int(mirrorfloat)

        # Store parameters
        params2dlist = [angt, sxst, syst, mirrorflag, scale]
        outalignlist.append(params2dlist)

        if refanglesdoc:
            refangleslist = read_text_row(refanglesdoc)
            besteulers = refangleslist[bestref]
        else:
            besteulers = [0] * 5

        # Check for mirroring
        if mirrorflag == 1:
            tempeulers = list(
                compose_transform3(besteulers[0], besteulers[1], besteulers[2],
                                   besteulers[3], besteulers[4], 0, 1, 0, 180,
                                   0, 0, 0, 0, 1))
            combinedparams = list(
                compose_transform3(tempeulers[0], tempeulers[1], tempeulers[2],
                                   tempeulers[3], tempeulers[4], 0, 1, 0, 0,
                                   -angt, 0, 0, 0, 1))
        else:
            combinedparams = list(
                compose_transform3(besteulers[0], besteulers[1], besteulers[2],
                                   besteulers[3], besteulers[4], 0, 1, 0, 0,
                                   -angt, 0, 0, 0, 1))
        # compose_transform3: returns phi,theta,psi, tx,ty,tz, scale

        outangleslist.append(combinedparams)

        # Set transformations as image attribute
        set_params2D(currimg, params2dlist, xform="xform.align2d"
                     )  # sometimes I get a vector error with sxheader
        set_params_proj(currimg, besteulers,
                        xform="xform.projection")  # use shifts

    if outangles or outaligndoc:
        msg = ''
        if outangles:
            write_text_row(outangleslist, outangles)
            msg += 'Wrote alignment angles to %s\n' % outangles
            print_log_msg(msg, log, verbose)
        if outaligndoc:
            write_text_row(outalignlist, outaligndoc)
            msg += 'Wrote 2D alignment parameters to %s\n' % outaligndoc
            print_log_msg(msg, log, verbose)

    return outalignlist
Beispiel #3
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = progname + """ Input Output [options]
	
	Generate three micrographs, each micrograph contains one projection of a long filament.
	Input: Reference Volume, output directory 
	Output: Three micrographs stored in output directory		
				 
		sxhelical_demo.py tmp.hdf  mic --generate_micrograph --CTF --apix=1.84	
	
	Generate noisy cylinder ini.hdf with radius 35 pixels and box size 100 by 100 by 200
	
		sxhelical_demo.py ini.hdf --generate_noisycyl --boxsize="100,100,200" --rad=35
	
	Generate rectangular 2D mask mask2d.hdf with width 60 pixels and image size 200 by 200 pixels
	
		sxhelical_demo.py mask2d.hdf --generate_mask --masksize="200,200" --maskwidth=60
	
	Apply the centering parameters to bdb:adata, normalize using average and standard deviation outside the mask, and output the new images to bdb:data
		
		sxhelical_demo.py bdb:adata bdb:data mask2d.hdf --applyparams
	
	Generate run through example script for helicon
	
		sxhelical_demo.py --generate_script --filename=run --seg_ny=180 --ptcl_dist=15 --fract=0.35
	"""
    parser = OptionParser(usage, version=SPARXVERSION)

    # helicise the Atom coordinates

    # generate micrographs of helical filament
    parser.add_option(
        "--generate_micrograph",
        action="store_true",
        default=False,
        help=
        "Generate three micrographs where each micrograph contains one projection of a long filament. \n Input: Reference Volume, output directory \n Output: Three micrographs containing helical filament projections stored in output directory"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Use CTF correction")
    parser.add_option("--apix",
                      type="float",
                      default=-1,
                      help="pixel size in Angstroms")
    parser.add_option(
        "--rand_seed",
        type="int",
        default=14567,
        help=
        "the seed used for generating random numbers (default 14567) for adding noise to the generated micrographs."
    )
    parser.add_option("--Cs",
                      type="float",
                      default=2.0,
                      help="Microscope Cs (spherical aberation)")
    parser.add_option("--voltage",
                      type="float",
                      default=200.0,
                      help="Microscope voltage in KV")
    parser.add_option("--ac",
                      type="float",
                      default=10.0,
                      help="Amplitude contrast (percentage, default=10)")
    parser.add_option("--nonoise",
                      action="store_true",
                      default=False,
                      help="Do not add noise to the micrograph.")

    # generate initial volume
    parser.add_option("--generate_noisycyl",
                      action="store_true",
                      default=False,
                      help="Generate initial volume of noisy cylinder.")
    parser.add_option(
        "--boxsize",
        type="string",
        default="100,100,200",
        help=
        "String containing x , y, z dimensions (separated by comma) in pixels")
    parser.add_option("--rad",
                      type="int",
                      default=35,
                      help="Radius of initial volume in pixels")

    # generate 2D mask
    parser.add_option("--generate_mask",
                      action="store_true",
                      default=False,
                      help="Generate 2D rectangular mask.")
    parser.add_option(
        "--masksize",
        type="string",
        default="200,200",
        help=
        "String containing x and y dimensions (separated by comma) in pixels")
    parser.add_option("--maskwidth",
                      type="int",
                      default=60,
                      help="Width of rectangular mask")

    # Apply 2D alignment parameters to input stack and output new images to output stack
    parser.add_option(
        "--applyparams",
        action="store_true",
        default=False,
        help=
        "Apply the centering parameters to input stack, normalize using average and standard deviation outside the mask, and output the new images to output stack"
    )

    # Generate run script
    parser.add_option("--generate_script",
                      action="store_true",
                      default=False,
                      help="Generate script for helicon run through example")
    parser.add_option("--filename",
                      type="string",
                      default="runhelicon",
                      help="Name of run script to generate")
    parser.add_option("--seg_ny",
                      type="int",
                      default=180,
                      help="y-dimension of segment used for refinement")
    parser.add_option(
        "--ptcl_dist",
        type="int",
        default=15,
        help=
        "Distance in pixels between adjacent segments windowed from same filament"
    )
    parser.add_option(
        "--fract",
        type="float",
        default=0.35,
        help="Fraction of the volume used for applying helical symmetry.")

    (options, args) = parser.parse_args()
    if len(args) > 3:
        sxprint("usage: " + usage)
        sxprint("Please run '" + progname + " -h' for detailed options")
        ERROR(
            "Invalid number of parameters. Please see usage information above."
        )
        return

    else:
        if options.generate_script:
            generate_runscript(options.filename, options.seg_ny,
                               options.ptcl_dist, options.fract)

        if options.generate_micrograph:
            if options.apix <= 0:
                ERROR("Please enter pixel size.")
                return

            generate_helimic(args[0], args[1], options.apix, options.CTF,
                             options.Cs, options.voltage, options.ac,
                             options.nonoise, options.rand_seed)

        if options.generate_noisycyl:

            from sp_utilities import model_cylinder, model_gauss_noise
            outvol = args[0]
            boxdims = options.boxsize.split(',')

            if len(boxdims) < 1 or len(boxdims) > 3:
                ERROR(
                    "Enter box size as string containing x , y, z dimensions (separated by comma) in pixels. E.g.: --boxsize=\'100,100,200\'"
                )
                return

            nx = int(boxdims[0])

            if len(boxdims) == 1:
                ny = nx
                nz = nx
            else:
                ny = int(boxdims[1])
                if len(boxdims) == 3:
                    nz = int(boxdims[2])

            (model_cylinder(options.rad, nx, ny, nz) *
             model_gauss_noise(1.0, nx, ny, nz)).write_image(outvol)

        if options.generate_mask:
            from sp_utilities import model_blank, pad
            outvol = args[0]
            maskdims = options.masksize.split(',')

            if len(maskdims) < 1 or len(maskdims) > 2:
                ERROR(
                    "Enter box size as string containing x , y dimensions (separated by comma) in pixels. E.g.: --boxsize=\'200,200\'"
                )
                return

            nx = int(maskdims[0])

            if len(maskdims) == 1:
                ny = nx
            else:
                ny = int(maskdims[1])

            mask = pad(model_blank(options.maskwidth, ny, 1, 1.0), nx, ny, 1,
                       0.0)
            mask.write_image(outvol)

        if options.applyparams:
            from sp_utilities import get_im, get_params2D, set_params2D
            from sp_fundamentals import cyclic_shift
            stack = args[0]
            newstack = args[1]
            mask = get_im(args[2])
            nima = EMUtil.get_image_count(stack)
            for im in range(nima):
                prj = get_im(stack, im)
                alpha, sx, sy, mirror, scale = get_params2D(prj)
                prj = cyclic_shift(prj, int(sx))
                set_params2D(prj, [0.0, 0., 0.0, 0, 1])
                stat = Util.infomask(prj, mask, False)
                prj = (prj - stat[0]) / stat[1]
                ctf_params = prj.get_attr("ctf")
                prj.set_attr('ctf_applied', 0)
                prj.write_image(newstack, im)
Beispiel #4
0
def main():
    global Tracker, Blockdata
    progname = os.path.basename(sys.argv[0])
    usage = progname + " --output_dir=output_dir  --isac_dir=output_dir_of_isac "
    parser = optparse.OptionParser(usage, version=sp_global_def.SPARXVERSION)
    parser.add_option(
        "--pw_adjustment",
        type="string",
        default="analytical_model",
        help=
        "adjust power spectrum of 2-D averages to an analytic model. Other opions: no_adjustment; bfactor; a text file of 1D rotationally averaged PW",
    )
    #### Four options for --pw_adjustment:
    # 1> analytical_model(default);
    # 2> no_adjustment;
    # 3> bfactor;
    # 4> adjust_to_given_pw2(user has to provide a text file that contains 1D rotationally averaged PW)

    # options in common
    parser.add_option(
        "--isac_dir",
        type="string",
        default="",
        help="ISAC run output directory, input directory for this command",
    )
    parser.add_option(
        "--output_dir",
        type="string",
        default="",
        help="output directory where computed averages are saved",
    )
    parser.add_option(
        "--pixel_size",
        type="float",
        default=-1.0,
        help=
        "pixel_size of raw images. one can put 1.0 in case of negative stain data",
    )
    parser.add_option(
        "--fl",
        type="float",
        default=-1.0,
        help=
        "low pass filter, = -1.0, not applied; =0.0, using FH1 (initial resolution), = 1.0 using FH2 (resolution after local alignment), or user provided value in absolute freqency [0.0:0.5]",
    )
    parser.add_option("--stack",
                      type="string",
                      default="",
                      help="data stack used in ISAC")
    parser.add_option("--radius", type="int", default=-1, help="radius")
    parser.add_option("--xr",
                      type="float",
                      default=-1.0,
                      help="local alignment search range")
    # parser.add_option("--ts",                    type   ="float",          default =1.0,    help= "local alignment search step")
    parser.add_option(
        "--fh",
        type="float",
        default=-1.0,
        help="local alignment high frequencies limit",
    )
    # parser.add_option("--maxit",                 type   ="int",            default =5,      help= "local alignment iterations")
    parser.add_option("--navg",
                      type="int",
                      default=1000000,
                      help="number of aveages")
    parser.add_option(
        "--local_alignment",
        action="store_true",
        default=False,
        help="do local alignment",
    )
    parser.add_option(
        "--noctf",
        action="store_true",
        default=False,
        help=
        "no ctf correction, useful for negative stained data. always ctf for cryo data",
    )
    parser.add_option(
        "--B_start",
        type="float",
        default=45.0,
        help=
        "start frequency (Angstrom) of power spectrum for B_factor estimation",
    )
    parser.add_option(
        "--Bfactor",
        type="float",
        default=-1.0,
        help=
        "User defined bactors (e.g. 25.0[A^2]). By default, the program automatically estimates B-factor. ",
    )

    (options, args) = parser.parse_args(sys.argv[1:])

    adjust_to_analytic_model = (True if options.pw_adjustment
                                == "analytical_model" else False)
    no_adjustment = True if options.pw_adjustment == "no_adjustment" else False
    B_enhance = True if options.pw_adjustment == "bfactor" else False
    adjust_to_given_pw2 = (
        True if not (adjust_to_analytic_model or no_adjustment or B_enhance)
        else False)

    # mpi
    nproc = mpi.mpi_comm_size(mpi.MPI_COMM_WORLD)
    myid = mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD)

    Blockdata = {}
    Blockdata["nproc"] = nproc
    Blockdata["myid"] = myid
    Blockdata["main_node"] = 0
    Blockdata["shared_comm"] = mpi.mpi_comm_split_type(
        mpi.MPI_COMM_WORLD, mpi.MPI_COMM_TYPE_SHARED, 0, mpi.MPI_INFO_NULL)
    Blockdata["myid_on_node"] = mpi.mpi_comm_rank(Blockdata["shared_comm"])
    Blockdata["no_of_processes_per_group"] = mpi.mpi_comm_size(
        Blockdata["shared_comm"])
    masters_from_groups_vs_everything_else_comm = mpi.mpi_comm_split(
        mpi.MPI_COMM_WORLD,
        Blockdata["main_node"] == Blockdata["myid_on_node"],
        Blockdata["myid_on_node"],
    )
    Blockdata["color"], Blockdata[
        "no_of_groups"], balanced_processor_load_on_nodes = sp_utilities.get_colors_and_subsets(
            Blockdata["main_node"],
            mpi.MPI_COMM_WORLD,
            Blockdata["myid"],
            Blockdata["shared_comm"],
            Blockdata["myid_on_node"],
            masters_from_groups_vs_everything_else_comm,
        )
    #  We need two nodes for processing of volumes
    Blockdata["node_volume"] = [
        Blockdata["no_of_groups"] - 3,
        Blockdata["no_of_groups"] - 2,
        Blockdata["no_of_groups"] - 1,
    ]  # For 3D stuff take three last nodes
    #  We need two CPUs for processing of volumes, they are taken to be main CPUs on each volume
    #  We have to send the two myids to all nodes so we can identify main nodes on two selected groups.
    Blockdata["nodes"] = [
        Blockdata["node_volume"][0] * Blockdata["no_of_processes_per_group"],
        Blockdata["node_volume"][1] * Blockdata["no_of_processes_per_group"],
        Blockdata["node_volume"][2] * Blockdata["no_of_processes_per_group"],
    ]
    # End of Blockdata: sorting requires at least three nodes, and the used number of nodes be integer times of three
    sp_global_def.BATCH = True
    sp_global_def.MPI = True

    if adjust_to_given_pw2:
        checking_flag = 0
        if Blockdata["myid"] == Blockdata["main_node"]:
            if not os.path.exists(options.pw_adjustment):
                checking_flag = 1
        checking_flag = sp_utilities.bcast_number_to_all(
            checking_flag, Blockdata["main_node"], mpi.MPI_COMM_WORLD)

        if checking_flag == 1:
            sp_global_def.ERROR("User provided power spectrum does not exist",
                                myid=Blockdata["myid"])

    Tracker = {}
    Constants = {}
    Constants["isac_dir"] = options.isac_dir
    Constants["masterdir"] = options.output_dir
    Constants["pixel_size"] = options.pixel_size
    Constants["orgstack"] = options.stack
    Constants["radius"] = options.radius
    Constants["xrange"] = options.xr
    Constants["FH"] = options.fh
    Constants["low_pass_filter"] = options.fl
    # Constants["maxit"]                        = options.maxit
    Constants["navg"] = options.navg
    Constants["B_start"] = options.B_start
    Constants["Bfactor"] = options.Bfactor

    if adjust_to_given_pw2:
        Constants["modelpw"] = options.pw_adjustment
    Tracker["constants"] = Constants
    # -------------------------------------------------------------
    #
    # Create and initialize Tracker dictionary with input options  # State Variables

    # <<<---------------------->>>imported functions<<<---------------------------------------------

    # x_range = max(Tracker["constants"]["xrange"], int(1./Tracker["ini_shrink"])+1)
    # y_range =  x_range

    ####-----------------------------------------------------------
    # Create Master directory and associated subdirectories
    line = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + " =>"
    if Tracker["constants"]["masterdir"] == Tracker["constants"]["isac_dir"]:
        masterdir = os.path.join(Tracker["constants"]["isac_dir"], "sharpen")
    else:
        masterdir = Tracker["constants"]["masterdir"]

    if Blockdata["myid"] == Blockdata["main_node"]:
        msg = "Postprocessing ISAC 2D averages starts"
        sp_global_def.sxprint(line, "Postprocessing ISAC 2D averages starts")
        if not masterdir:
            timestring = time.strftime("_%d_%b_%Y_%H_%M_%S", time.localtime())
            masterdir = "sharpen_" + Tracker["constants"]["isac_dir"]
            os.makedirs(masterdir)
        else:
            if os.path.exists(masterdir):
                sp_global_def.sxprint("%s already exists" % masterdir)
            else:
                os.makedirs(masterdir)
        sp_global_def.write_command(masterdir)
        subdir_path = os.path.join(masterdir, "ali2d_local_params_avg")
        if not os.path.exists(subdir_path):
            os.mkdir(subdir_path)
        subdir_path = os.path.join(masterdir, "params_avg")
        if not os.path.exists(subdir_path):
            os.mkdir(subdir_path)
        li = len(masterdir)
    else:
        li = 0
    li = mpi.mpi_bcast(li, 1, mpi.MPI_INT, Blockdata["main_node"],
                       mpi.MPI_COMM_WORLD)[0]
    masterdir = mpi.mpi_bcast(masterdir, li, mpi.MPI_CHAR,
                              Blockdata["main_node"], mpi.MPI_COMM_WORLD)
    masterdir = b"".join(masterdir).decode('latin1')
    Tracker["constants"]["masterdir"] = masterdir
    log_main = sp_logger.Logger(sp_logger.BaseLogger_Files())
    log_main.prefix = Tracker["constants"]["masterdir"] + "/"

    while not os.path.exists(Tracker["constants"]["masterdir"]):
        sp_global_def.sxprint(
            "Node ",
            Blockdata["myid"],
            "  waiting...",
            Tracker["constants"]["masterdir"],
        )
        time.sleep(1)
    mpi.mpi_barrier(mpi.MPI_COMM_WORLD)

    if Blockdata["myid"] == Blockdata["main_node"]:
        init_dict = {}
        sp_global_def.sxprint(Tracker["constants"]["isac_dir"])
        Tracker["directory"] = os.path.join(Tracker["constants"]["isac_dir"],
                                            "2dalignment")
        core = sp_utilities.read_text_row(
            os.path.join(Tracker["directory"], "initial2Dparams.txt"))
        for im in range(len(core)):
            init_dict[im] = core[im]
        del core
    else:
        init_dict = 0
    init_dict = sp_utilities.wrap_mpi_bcast(init_dict,
                                            Blockdata["main_node"],
                                            communicator=mpi.MPI_COMM_WORLD)
    ###
    do_ctf = True
    if options.noctf:
        do_ctf = False
    if Blockdata["myid"] == Blockdata["main_node"]:
        if do_ctf:
            sp_global_def.sxprint("CTF correction is on")
        else:
            sp_global_def.sxprint("CTF correction is off")
        if options.local_alignment:
            sp_global_def.sxprint("local refinement is on")
        else:
            sp_global_def.sxprint("local refinement is off")
        if B_enhance:
            sp_global_def.sxprint("Bfactor is to be applied on averages")
        elif adjust_to_given_pw2:
            sp_global_def.sxprint(
                "PW of averages is adjusted to a given 1D PW curve")
        elif adjust_to_analytic_model:
            sp_global_def.sxprint(
                "PW of averages is adjusted to analytical model")
        else:
            sp_global_def.sxprint("PW of averages is not adjusted")
        # Tracker["constants"]["orgstack"] = "bdb:"+ os.path.join(Tracker["constants"]["isac_dir"],"../","sparx_stack")
        image = sp_utilities.get_im(Tracker["constants"]["orgstack"], 0)
        Tracker["constants"]["nnxo"] = image.get_xsize()
        if Tracker["constants"]["pixel_size"] == -1.0:
            sp_global_def.sxprint(
                "Pixel size value is not provided by user. extracting it from ctf header entry of the original stack."
            )
            try:
                ctf_params = image.get_attr("ctf")
                Tracker["constants"]["pixel_size"] = ctf_params.apix
            except:
                sp_global_def.ERROR(
                    "Pixel size could not be extracted from the original stack.",
                    myid=Blockdata["myid"],
                )
        ## Now fill in low-pass filter

        isac_shrink_path = os.path.join(Tracker["constants"]["isac_dir"],
                                        "README_shrink_ratio.txt")

        if not os.path.exists(isac_shrink_path):
            sp_global_def.ERROR(
                "%s does not exist in the specified ISAC run output directory"
                % (isac_shrink_path),
                myid=Blockdata["myid"],
            )

        isac_shrink_file = open(isac_shrink_path, "r")
        isac_shrink_lines = isac_shrink_file.readlines()
        isac_shrink_ratio = float(
            isac_shrink_lines[5]
        )  # 6th line: shrink ratio (= [target particle radius]/[particle radius]) used in the ISAC run
        isac_radius = float(
            isac_shrink_lines[6]
        )  # 7th line: particle radius at original pixel size used in the ISAC run
        isac_shrink_file.close()
        print("Extracted parameter values")
        print("ISAC shrink ratio    : {0}".format(isac_shrink_ratio))
        print("ISAC particle radius : {0}".format(isac_radius))
        Tracker["ini_shrink"] = isac_shrink_ratio
    else:
        Tracker["ini_shrink"] = 0.0
    Tracker = sp_utilities.wrap_mpi_bcast(Tracker,
                                          Blockdata["main_node"],
                                          communicator=mpi.MPI_COMM_WORLD)

    # print(Tracker["constants"]["pixel_size"], "pixel_size")
    x_range = max(
        Tracker["constants"]["xrange"],
        int(old_div(1.0, Tracker["ini_shrink"]) + 0.99999),
    )
    a_range = y_range = x_range

    if Blockdata["myid"] == Blockdata["main_node"]:
        parameters = sp_utilities.read_text_row(
            os.path.join(Tracker["constants"]["isac_dir"],
                         "all_parameters.txt"))
    else:
        parameters = 0
    parameters = sp_utilities.wrap_mpi_bcast(parameters,
                                             Blockdata["main_node"],
                                             communicator=mpi.MPI_COMM_WORLD)
    params_dict = {}
    list_dict = {}
    # parepare params_dict

    # navg = min(Tracker["constants"]["navg"]*Blockdata["nproc"], EMUtil.get_image_count(os.path.join(Tracker["constants"]["isac_dir"], "class_averages.hdf")))
    navg = min(
        Tracker["constants"]["navg"],
        EMAN2_cppwrap.EMUtil.get_image_count(
            os.path.join(Tracker["constants"]["isac_dir"],
                         "class_averages.hdf")),
    )
    global_dict = {}
    ptl_list = []
    memlist = []
    if Blockdata["myid"] == Blockdata["main_node"]:
        sp_global_def.sxprint("Number of averages computed in this run is %d" %
                              navg)
        for iavg in range(navg):
            params_of_this_average = []
            image = sp_utilities.get_im(
                os.path.join(Tracker["constants"]["isac_dir"],
                             "class_averages.hdf"),
                iavg,
            )
            members = sorted(image.get_attr("members"))
            memlist.append(members)
            for im in range(len(members)):
                abs_id = members[im]
                global_dict[abs_id] = [iavg, im]
                P = sp_utilities.combine_params2(
                    init_dict[abs_id][0],
                    init_dict[abs_id][1],
                    init_dict[abs_id][2],
                    init_dict[abs_id][3],
                    parameters[abs_id][0],
                    old_div(parameters[abs_id][1], Tracker["ini_shrink"]),
                    old_div(parameters[abs_id][2], Tracker["ini_shrink"]),
                    parameters[abs_id][3],
                )
                if parameters[abs_id][3] == -1:
                    sp_global_def.sxprint(
                        "WARNING: Image #{0} is an unaccounted particle with invalid 2D alignment parameters and should not be the member of any classes. Please check the consitency of input dataset."
                        .format(abs_id)
                    )  # How to check what is wrong about mirror = -1 (Toshio 2018/01/11)
                params_of_this_average.append([P[0], P[1], P[2], P[3], 1.0])
                ptl_list.append(abs_id)
            params_dict[iavg] = params_of_this_average
            list_dict[iavg] = members
            sp_utilities.write_text_row(
                params_of_this_average,
                os.path.join(
                    Tracker["constants"]["masterdir"],
                    "params_avg",
                    "params_avg_%03d.txt" % iavg,
                ),
            )
        ptl_list.sort()
        init_params = [None for im in range(len(ptl_list))]
        for im in range(len(ptl_list)):
            init_params[im] = [ptl_list[im]] + params_dict[global_dict[
                ptl_list[im]][0]][global_dict[ptl_list[im]][1]]
        sp_utilities.write_text_row(
            init_params,
            os.path.join(Tracker["constants"]["masterdir"],
                         "init_isac_params.txt"),
        )
    else:
        params_dict = 0
        list_dict = 0
        memlist = 0
    params_dict = sp_utilities.wrap_mpi_bcast(params_dict,
                                              Blockdata["main_node"],
                                              communicator=mpi.MPI_COMM_WORLD)
    list_dict = sp_utilities.wrap_mpi_bcast(list_dict,
                                            Blockdata["main_node"],
                                            communicator=mpi.MPI_COMM_WORLD)
    memlist = sp_utilities.wrap_mpi_bcast(memlist,
                                          Blockdata["main_node"],
                                          communicator=mpi.MPI_COMM_WORLD)
    # Now computing!
    del init_dict
    tag_sharpen_avg = 1000
    ## always apply low pass filter to B_enhanced images to suppress noise in high frequencies
    enforced_to_H1 = False
    if B_enhance:
        if Tracker["constants"]["low_pass_filter"] == -1.0:
            enforced_to_H1 = True

    # distribute workload among mpi processes
    image_start, image_end = sp_applications.MPI_start_end(
        navg, Blockdata["nproc"], Blockdata["myid"])

    if Blockdata["myid"] == Blockdata["main_node"]:
        cpu_dict = {}
        for iproc in range(Blockdata["nproc"]):
            local_image_start, local_image_end = sp_applications.MPI_start_end(
                navg, Blockdata["nproc"], iproc)
            for im in range(local_image_start, local_image_end):
                cpu_dict[im] = iproc
    else:
        cpu_dict = 0

    cpu_dict = sp_utilities.wrap_mpi_bcast(cpu_dict,
                                           Blockdata["main_node"],
                                           communicator=mpi.MPI_COMM_WORLD)

    slist = [None for im in range(navg)]
    ini_list = [None for im in range(navg)]
    avg1_list = [None for im in range(navg)]
    avg2_list = [None for im in range(navg)]
    data_list = [None for im in range(navg)]
    plist_dict = {}

    if Blockdata["myid"] == Blockdata["main_node"]:
        if B_enhance:
            sp_global_def.sxprint(
                "Avg ID   B-factor  FH1(Res before ali) FH2(Res after ali)")
        else:
            sp_global_def.sxprint(
                "Avg ID   FH1(Res before ali)  FH2(Res after ali)")

    FH_list = [[0, 0.0, 0.0] for im in range(navg)]
    for iavg in range(image_start, image_end):

        mlist = EMAN2_cppwrap.EMData.read_images(
            Tracker["constants"]["orgstack"], list_dict[iavg])

        for im in range(len(mlist)):
            sp_utilities.set_params2D(mlist[im],
                                      params_dict[iavg][im],
                                      xform="xform.align2d")

        if options.local_alignment:
            new_avg, plist, FH2 = sp_applications.refinement_2d_local(
                mlist,
                Tracker["constants"]["radius"],
                a_range,
                x_range,
                y_range,
                CTF=do_ctf,
                SNR=1.0e10,
            )
            plist_dict[iavg] = plist
            FH1 = -1.0

        else:
            new_avg, frc, plist = compute_average(
                mlist, Tracker["constants"]["radius"], do_ctf)
            FH1 = get_optimistic_res(frc)
            FH2 = -1.0

        FH_list[iavg] = [iavg, FH1, FH2]

        if B_enhance:
            new_avg, gb = apply_enhancement(
                new_avg,
                Tracker["constants"]["B_start"],
                Tracker["constants"]["pixel_size"],
                Tracker["constants"]["Bfactor"],
            )
            sp_global_def.sxprint("  %6d      %6.3f  %4.3f  %4.3f" %
                                  (iavg, gb, FH1, FH2))

        elif adjust_to_given_pw2:
            roo = sp_utilities.read_text_file(Tracker["constants"]["modelpw"],
                                              -1)
            roo = roo[0]  # always on the first column
            new_avg = adjust_pw_to_model(new_avg,
                                         Tracker["constants"]["pixel_size"],
                                         roo)
            sp_global_def.sxprint("  %6d      %4.3f  %4.3f  " %
                                  (iavg, FH1, FH2))

        elif adjust_to_analytic_model:
            new_avg = adjust_pw_to_model(new_avg,
                                         Tracker["constants"]["pixel_size"],
                                         None)
            sp_global_def.sxprint("  %6d      %4.3f  %4.3f   " %
                                  (iavg, FH1, FH2))

        elif no_adjustment:
            pass

        if Tracker["constants"]["low_pass_filter"] != -1.0:
            if Tracker["constants"]["low_pass_filter"] == 0.0:
                low_pass_filter = FH1
            elif Tracker["constants"]["low_pass_filter"] == 1.0:
                low_pass_filter = FH2
                if not options.local_alignment:
                    low_pass_filter = FH1
            else:
                low_pass_filter = Tracker["constants"]["low_pass_filter"]
                if low_pass_filter >= 0.45:
                    low_pass_filter = 0.45
            new_avg = sp_filter.filt_tanl(new_avg, low_pass_filter, 0.02)
        else:  # No low pass filter but if enforced
            if enforced_to_H1:
                new_avg = sp_filter.filt_tanl(new_avg, FH1, 0.02)
        if B_enhance:
            new_avg = sp_fundamentals.fft(new_avg)

        new_avg.set_attr("members", list_dict[iavg])
        new_avg.set_attr("n_objects", len(list_dict[iavg]))
        slist[iavg] = new_avg
        sp_global_def.sxprint(
            time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + " =>",
            "Refined average %7d" % iavg,
        )

    ## send to main node to write
    mpi.mpi_barrier(mpi.MPI_COMM_WORLD)

    for im in range(navg):
        # avg
        if (cpu_dict[im] == Blockdata["myid"]
                and Blockdata["myid"] != Blockdata["main_node"]):
            sp_utilities.send_EMData(slist[im], Blockdata["main_node"],
                                     tag_sharpen_avg)

        elif (cpu_dict[im] == Blockdata["myid"]
              and Blockdata["myid"] == Blockdata["main_node"]):
            slist[im].set_attr("members", memlist[im])
            slist[im].set_attr("n_objects", len(memlist[im]))
            slist[im].write_image(
                os.path.join(Tracker["constants"]["masterdir"],
                             "class_averages.hdf"),
                im,
            )

        elif (cpu_dict[im] != Blockdata["myid"]
              and Blockdata["myid"] == Blockdata["main_node"]):
            new_avg_other_cpu = sp_utilities.recv_EMData(
                cpu_dict[im], tag_sharpen_avg)
            new_avg_other_cpu.set_attr("members", memlist[im])
            new_avg_other_cpu.set_attr("n_objects", len(memlist[im]))
            new_avg_other_cpu.write_image(
                os.path.join(Tracker["constants"]["masterdir"],
                             "class_averages.hdf"),
                im,
            )

        if options.local_alignment:
            if cpu_dict[im] == Blockdata["myid"]:
                sp_utilities.write_text_row(
                    plist_dict[im],
                    os.path.join(
                        Tracker["constants"]["masterdir"],
                        "ali2d_local_params_avg",
                        "ali2d_local_params_avg_%03d.txt" % im,
                    ),
                )

            if (cpu_dict[im] == Blockdata["myid"]
                    and cpu_dict[im] != Blockdata["main_node"]):
                sp_utilities.wrap_mpi_send(plist_dict[im],
                                           Blockdata["main_node"],
                                           mpi.MPI_COMM_WORLD)
                sp_utilities.wrap_mpi_send(FH_list, Blockdata["main_node"],
                                           mpi.MPI_COMM_WORLD)

            elif (cpu_dict[im] != Blockdata["main_node"]
                  and Blockdata["myid"] == Blockdata["main_node"]):
                dummy = sp_utilities.wrap_mpi_recv(cpu_dict[im],
                                                   mpi.MPI_COMM_WORLD)
                plist_dict[im] = dummy
                dummy = sp_utilities.wrap_mpi_recv(cpu_dict[im],
                                                   mpi.MPI_COMM_WORLD)
                FH_list[im] = dummy[im]
        else:
            if (cpu_dict[im] == Blockdata["myid"]
                    and cpu_dict[im] != Blockdata["main_node"]):
                sp_utilities.wrap_mpi_send(FH_list, Blockdata["main_node"],
                                           mpi.MPI_COMM_WORLD)

            elif (cpu_dict[im] != Blockdata["main_node"]
                  and Blockdata["myid"] == Blockdata["main_node"]):
                dummy = sp_utilities.wrap_mpi_recv(cpu_dict[im],
                                                   mpi.MPI_COMM_WORLD)
                FH_list[im] = dummy[im]

        mpi.mpi_barrier(mpi.MPI_COMM_WORLD)
    mpi.mpi_barrier(mpi.MPI_COMM_WORLD)

    if options.local_alignment:
        if Blockdata["myid"] == Blockdata["main_node"]:
            ali3d_local_params = [None for im in range(len(ptl_list))]
            for im in range(len(ptl_list)):
                ali3d_local_params[im] = [ptl_list[im]] + plist_dict[
                    global_dict[ptl_list[im]][0]][global_dict[ptl_list[im]][1]]
            sp_utilities.write_text_row(
                ali3d_local_params,
                os.path.join(Tracker["constants"]["masterdir"],
                             "ali2d_local_params.txt"),
            )
            sp_utilities.write_text_row(
                FH_list,
                os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt"))
    else:
        if Blockdata["myid"] == Blockdata["main_node"]:
            sp_utilities.write_text_row(
                FH_list,
                os.path.join(Tracker["constants"]["masterdir"], "FH_list.txt"))

    mpi.mpi_barrier(mpi.MPI_COMM_WORLD)
    target_xr = 3
    target_yr = 3
    if Blockdata["myid"] == 0:
        cmd = "{} {} {} {} {} {} {} {} {} {}".format(
            "sp_chains.py",
            os.path.join(Tracker["constants"]["masterdir"],
                         "class_averages.hdf"),
            os.path.join(Tracker["constants"]["masterdir"], "junk.hdf"),
            os.path.join(Tracker["constants"]["masterdir"],
                         "ordered_class_averages.hdf"),
            "--circular",
            "--radius=%d" % Tracker["constants"]["radius"],
            "--xr=%d" % (target_xr + 1),
            "--yr=%d" % (target_yr + 1),
            "--align",
            ">/dev/null",
        )
        junk = sp_utilities.cmdexecute(cmd)
        cmd = "{} {}".format(
            "rm -rf",
            os.path.join(Tracker["constants"]["masterdir"], "junk.hdf"))
        junk = sp_utilities.cmdexecute(cmd)

    return
Beispiel #5
0
def ali2d_single_iter(
    data,
    numr,
    wr,
    cs,
    tavg,
    cnx,
    cny,
    xrng,
    yrng,
    step,
    nomirror=False,
    mode="F",
    CTF=False,
    random_method="",
    T=1.0,
    ali_params="xform.align2d",
    delta=0.0,
):
    """
		single iteration of 2D alignment using ormq
		if CTF = True, apply CTF to data (not to reference!)
	"""

    maxrin = numr[-1]  #  length
    ou = numr[-3]  #  maximum radius
    if random_method == "SCF":
        frotim = [sp_fundamentals.fft(tavg)]
        xrng = int(xrng + 0.5)
        yrng = int(yrng + 0.5)
        cimage = EMAN2_cppwrap.Util.Polar2Dm(sp_fundamentals.scf(tavg), cnx,
                                             cny, numr, mode)
        EMAN2_cppwrap.Util.Frngs(cimage, numr)
        EMAN2_cppwrap.Util.Applyws(cimage, numr, wr)
    else:
        # 2D alignment using rotational ccf in polar coords and quadratic interpolation
        cimage = EMAN2_cppwrap.Util.Polar2Dm(tavg, cnx, cny, numr, mode)
        EMAN2_cppwrap.Util.Frngs(cimage, numr)
        EMAN2_cppwrap.Util.Applyws(cimage, numr, wr)

    sx_sum = 0.0
    sy_sum = 0.0
    sxn = 0.0
    syn = 0.0
    mn = 0
    nope = 0
    mashi = cnx - ou - 2
    for im in range(len(data)):
        if CTF:
            # Apply CTF to image
            ctf_params = data[im].get_attr("ctf")
            ima = sp_filter.filt_ctf(data[im], ctf_params, True)
        else:
            ima = data[im]

        if random_method == "PCP":
            sxi = data[im][0][0].get_attr("sxi")
            syi = data[im][0][0].get_attr("syi")
            nx = ny = data[im][0][0].get_attr("inx")
        else:
            nx = ima.get_xsize()
            ny = ima.get_ysize()
            alpha, sx, sy, mirror, dummy = sp_utilities.get_params2D(
                data[im], ali_params)
            alpha, sx, sy, dummy = sp_utilities.combine_params2(
                alpha, sx, sy, mirror, 0.0, -cs[0], -cs[1], 0)
            alphai, sxi, syi, scalei = sp_utilities.inverse_transform2(
                alpha, sx, sy)
            #  introduce constraints on parameters to accomodate use of cs centering
            sxi = min(max(sxi, -mashi), mashi)
            syi = min(max(syi, -mashi), mashi)

        #  The search range procedure was adjusted for 3D searches, so since in 2D the order of operations is inverted, we have to invert ranges
        txrng = search_range(nx, ou, sxi, xrng, "ali2d_single_iter")
        txrng = [txrng[1], txrng[0]]
        tyrng = search_range(ny, ou, syi, yrng, "ali2d_single_iter")
        tyrng = [tyrng[1], tyrng[0]]
        # print im, "B",cnx,sxi,syi,txrng, tyrng
        # align current image to the reference
        if random_method == "SHC":
            """Multiline Comment0"""
            #  For shc combining of shifts is problematic as the image may randomly slide away and never come back.
            #  A possibility would be to reject moves that results in too large departure from the center.
            #  On the other hand, one cannot simply do searches around the proper center all the time,
            #    as if xr is decreased, the image cannot be brought back if the established shifts are further than new range
            olo = EMAN2_cppwrap.Util.shc(
                ima,
                [cimage],
                txrng,
                tyrng,
                step,
                -1.0,
                mode,
                numr,
                cnx + sxi,
                cny + syi,
                "c1",
            )
            ##olo = Util.shc(ima, [cimage], xrng, yrng, step, -1.0, mode, numr, cnx, cny, "c1")
            if data[im].get_attr("previousmax") < olo[5]:
                # [angt, sxst, syst, mirrort, peakt] = ormq(ima, cimage, xrng, yrng, step, mode, numr, cnx+sxi, cny+syi, delta)
                # print  angt, sxst, syst, mirrort, peakt,olo
                angt = olo[0]
                sxst = olo[1]
                syst = olo[2]
                mirrort = int(olo[3])
                # combine parameters and set them to the header, ignore previous angle and mirror
                [alphan, sxn, syn,
                 mn] = sp_utilities.combine_params2(0.0, -sxi, -syi, 0, angt,
                                                    sxst, syst, mirrort)
                sp_utilities.set_params2D(data[im],
                                          [alphan, sxn, syn, mn, 1.0],
                                          ali_params)
                ##set_params2D(data[im], [angt, sxst, syst, mirrort, 1.0], ali_params)
                data[im].set_attr("previousmax", olo[5])
            else:
                # Did not find a better peak, but we have to set shifted parameters, as the average shifted
                sp_utilities.set_params2D(data[im],
                                          [alpha, sx, sy, mirror, 1.0],
                                          ali_params)
                nope += 1
                mn = 0
                sxn = 0.0
                syn = 0.0
        elif random_method == "SCF":
            sxst, syst, iref, angt, mirrort, totpeak = multalign2d_scf(
                data[im], [cimage], frotim, numr, xrng, yrng, ou=ou)
            [alphan, sxn, syn,
             mn] = sp_utilities.combine_params2(0.0, -sxi, -syi, 0, angt, sxst,
                                                syst, mirrort)
            sp_utilities.set_params2D(data[im], [alphan, sxn, syn, mn, 1.0],
                                      ali_params)
        elif random_method == "PCP":
            [angt, sxst, syst, mirrort,
             peakt] = ormq_fast(data[im], cimage, txrng, tyrng, step, numr,
                                mode, delta)
            sxst = rings[0][0][0].get_attr("sxi")
            syst = rings[0][0][0].get_attr("syi")
            sp_global_def.sxprint(sxst, syst, sx, sy)
            dummy, sxs, sys, dummy = sp_utilities.inverse_transform2(
                -angt, sx + sxst, sy + syst)
            sp_utilities.set_params2D(data[im][0][0],
                                      [angt, sxs, sys, mirrort, 1.0],
                                      ali_params)
        else:
            if nomirror:
                [angt, sxst, syst, mirrort,
                 peakt] = ornq(ima, cimage, txrng, tyrng, step, mode, numr,
                               cnx + sxi, cny + syi)
            else:
                [angt, sxst, syst, mirrort, peakt] = ormq(
                    ima,
                    cimage,
                    txrng,
                    tyrng,
                    step,
                    mode,
                    numr,
                    cnx + sxi,
                    cny + syi,
                    delta,
                )
            # combine parameters and set them to the header, ignore previous angle and mirror
            [alphan, sxn, syn,
             mn] = sp_utilities.combine_params2(0.0, -sxi, -syi, 0, angt, sxst,
                                                syst, mirrort)
            sp_utilities.set_params2D(data[im], [alphan, sxn, syn, mn, 1.0],
                                      ali_params)

        if mn == 0:
            sx_sum += sxn
        else:
            sx_sum -= sxn
        sy_sum += syn

    return sx_sum, sy_sum, nope
Beispiel #6
0
def mref_ali2d_MPI(stack,
                   refim,
                   outdir,
                   maskfile=None,
                   ir=1,
                   ou=-1,
                   rs=1,
                   xrng=0,
                   yrng=0,
                   step=1,
                   center=1,
                   maxit=10,
                   CTF=False,
                   snr=1.0,
                   user_func_name="ref_ali2d",
                   rand_seed=1000):
    # 2D multi-reference alignment using rotational ccf in polar coordinates and quadratic interpolation

    from sp_utilities import model_circle, combine_params2, inverse_transform2, drop_image, get_image, get_im
    from sp_utilities import reduce_EMData_to_root, bcast_EMData_to_all, bcast_number_to_all
    from sp_utilities import send_attr_dict
    from sp_utilities import center_2D
    from sp_statistics import fsc_mask
    from sp_alignment import Numrinit, ringwe, search_range
    from sp_fundamentals import rot_shift2D, fshift
    from sp_utilities import get_params2D, set_params2D
    from random import seed, randint
    from sp_morphology import ctf_2
    from sp_filter import filt_btwl, filt_params
    from numpy import reshape, shape
    from sp_utilities import print_msg, print_begin_msg, print_end_msg
    import os
    import sys
    import shutil
    from sp_applications import MPI_start_end
    from mpi import mpi_comm_size, mpi_comm_rank, MPI_COMM_WORLD
    from mpi import mpi_reduce, mpi_bcast, mpi_barrier, mpi_recv, mpi_send
    from mpi import MPI_SUM, MPI_FLOAT, MPI_INT

    number_of_proc = mpi_comm_size(MPI_COMM_WORLD)
    myid = mpi_comm_rank(MPI_COMM_WORLD)
    main_node = 0

    # create the output directory, if it does not exist

    if os.path.exists(outdir):
        ERROR(
            'Output directory exists, please change the name and restart the program',
            "mref_ali2d_MPI ", 1, myid)
    mpi_barrier(MPI_COMM_WORLD)

    import sp_global_def
    if myid == main_node:
        os.mkdir(outdir)
        sp_global_def.LOGFILE = os.path.join(outdir, sp_global_def.LOGFILE)
        print_begin_msg("mref_ali2d_MPI")

    nima = EMUtil.get_image_count(stack)

    image_start, image_end = MPI_start_end(nima, number_of_proc, myid)

    nima = EMUtil.get_image_count(stack)
    ima = EMData()
    ima.read_image(stack, image_start)

    first_ring = int(ir)
    last_ring = int(ou)
    rstep = int(rs)
    max_iter = int(maxit)

    if max_iter == 0:
        max_iter = 10
        auto_stop = True
    else:
        auto_stop = False

    if myid == main_node:
        print_msg("Input stack                 : %s\n" % (stack))
        print_msg("Reference stack             : %s\n" % (refim))
        print_msg("Output directory            : %s\n" % (outdir))
        print_msg("Maskfile                    : %s\n" % (maskfile))
        print_msg("Inner radius                : %i\n" % (first_ring))

    nx = ima.get_xsize()
    # default value for the last ring
    if last_ring == -1: last_ring = nx / 2 - 2

    if myid == main_node:
        print_msg("Outer radius                : %i\n" % (last_ring))
        print_msg("Ring step                   : %i\n" % (rstep))
        print_msg("X search range              : %f\n" % (xrng))
        print_msg("Y search range              : %f\n" % (yrng))
        print_msg("Translational step          : %f\n" % (step))
        print_msg("Center type                 : %i\n" % (center))
        print_msg("Maximum iteration           : %i\n" % (max_iter))
        print_msg("CTF correction              : %s\n" % (CTF))
        print_msg("Signal-to-Noise Ratio       : %f\n" % (snr))
        print_msg("Random seed                 : %i\n\n" % (rand_seed))
        print_msg("User function               : %s\n" % (user_func_name))
    import sp_user_functions
    user_func = sp_user_functions.factory[user_func_name]

    if maskfile:
        import types
        if type(maskfile) is bytes: mask = get_image(maskfile)
        else: mask = maskfile
    else: mask = model_circle(last_ring, nx, nx)
    #  references, do them on all processors...
    refi = []
    numref = EMUtil.get_image_count(refim)

    # IMAGES ARE SQUARES! center is in SPIDER convention
    cnx = nx / 2 + 1
    cny = cnx

    mode = "F"
    #precalculate rings
    numr = Numrinit(first_ring, last_ring, rstep, mode)
    wr = ringwe(numr, mode)

    # prepare reference images on all nodes
    ima.to_zero()
    for j in range(numref):
        #  even, odd, numer of even, number of images.  After frc, totav
        refi.append([get_im(refim, j), ima.copy(), 0])
    #  for each node read its share of data
    data = EMData.read_images(stack, list(range(image_start, image_end)))
    for im in range(image_start, image_end):
        data[im - image_start].set_attr('ID', im)

    if myid == main_node: seed(rand_seed)

    a0 = -1.0
    again = True
    Iter = 0

    ref_data = [mask, center, None, None]

    while Iter < max_iter and again:
        ringref = []
        mashi = cnx - last_ring - 2
        for j in range(numref):
            refi[j][0].process_inplace("normalize.mask", {
                "mask": mask,
                "no_sigma": 1
            })  # normalize reference images to N(0,1)
            cimage = Util.Polar2Dm(refi[j][0], cnx, cny, numr, mode)
            Util.Frngs(cimage, numr)
            Util.Applyws(cimage, numr, wr)
            ringref.append(cimage)
            # zero refi
            refi[j][0].to_zero()
            refi[j][1].to_zero()
            refi[j][2] = 0

        assign = [[] for i in range(numref)]
        # begin MPI section
        for im in range(image_start, image_end):
            alpha, sx, sy, mirror, scale = get_params2D(data[im - image_start])
            #  Why inverse?  07/11/2015 PAP
            alphai, sxi, syi, scalei = inverse_transform2(alpha, sx, sy)
            # normalize
            data[im - image_start].process_inplace("normalize.mask", {
                "mask": mask,
                "no_sigma": 0
            })  # subtract average under the mask
            # If shifts are outside of the permissible range, reset them
            if (abs(sxi) > mashi or abs(syi) > mashi):
                sxi = 0.0
                syi = 0.0
                set_params2D(data[im - image_start], [0.0, 0.0, 0.0, 0, 1.0])
            ny = nx
            txrng = search_range(nx, last_ring, sxi, xrng, "mref_ali2d_MPI")
            txrng = [txrng[1], txrng[0]]
            tyrng = search_range(ny, last_ring, syi, yrng, "mref_ali2d_MPI")
            tyrng = [tyrng[1], tyrng[0]]
            # align current image to the reference
            [angt, sxst, syst, mirrort, xiref,
             peakt] = Util.multiref_polar_ali_2d(data[im - image_start],
                                                 ringref, txrng, tyrng, step,
                                                 mode, numr, cnx + sxi,
                                                 cny + syi)

            iref = int(xiref)
            # combine parameters and set them to the header, ignore previous angle and mirror
            [alphan, sxn, syn,
             mn] = combine_params2(0.0, -sxi, -syi, 0, angt, sxst, syst,
                                   (int)(mirrort))
            set_params2D(data[im - image_start],
                         [alphan, sxn, syn, int(mn), scale])
            data[im - image_start].set_attr('assign', iref)
            # apply current parameters and add to the average
            temp = rot_shift2D(data[im - image_start], alphan, sxn, syn, mn)
            it = im % 2
            Util.add_img(refi[iref][it], temp)
            assign[iref].append(im)
            #assign[im] = iref
            refi[iref][2] += 1.0
        del ringref
        # end MPI section, bring partial things together, calculate new reference images, broadcast them back

        for j in range(numref):
            reduce_EMData_to_root(refi[j][0], myid, main_node)
            reduce_EMData_to_root(refi[j][1], myid, main_node)
            refi[j][2] = mpi_reduce(refi[j][2], 1, MPI_FLOAT, MPI_SUM,
                                    main_node, MPI_COMM_WORLD)
            if (myid == main_node): refi[j][2] = int(refi[j][2][0])
        # gather assignements
        for j in range(numref):
            if myid == main_node:
                for n in range(number_of_proc):
                    if n != main_node:
                        import sp_global_def
                        ln = mpi_recv(1, MPI_INT, n,
                                      sp_global_def.SPARX_MPI_TAG_UNIVERSAL,
                                      MPI_COMM_WORLD)
                        lis = mpi_recv(ln[0], MPI_INT, n,
                                       sp_global_def.SPARX_MPI_TAG_UNIVERSAL,
                                       MPI_COMM_WORLD)
                        for l in range(ln[0]):
                            assign[j].append(int(lis[l]))
            else:
                import sp_global_def
                mpi_send(len(assign[j]), 1, MPI_INT, main_node,
                         sp_global_def.SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)
                mpi_send(assign[j], len(assign[j]), MPI_INT, main_node,
                         sp_global_def.SPARX_MPI_TAG_UNIVERSAL, MPI_COMM_WORLD)

        if myid == main_node:
            # replace the name of the stack with reference with the current one
            refim = os.path.join(outdir, "aqm%03d.hdf" % Iter)
            a1 = 0.0
            ave_fsc = []
            for j in range(numref):
                if refi[j][2] < 4:
                    #ERROR("One of the references vanished","mref_ali2d_MPI",1)
                    #  if vanished, put a random image (only from main node!) there
                    assign[j] = []
                    assign[j].append(
                        randint(image_start, image_end - 1) - image_start)
                    refi[j][0] = data[assign[j][0]].copy()
                    #print 'ERROR', j
                else:
                    #frsc = fsc_mask(refi[j][0], refi[j][1], mask, 1.0, os.path.join(outdir,"drm%03d%04d"%(Iter, j)))
                    from sp_statistics import fsc
                    frsc = fsc(
                        refi[j][0], refi[j][1], 1.0,
                        os.path.join(outdir, "drm%03d%04d.txt" % (Iter, j)))
                    Util.add_img(refi[j][0], refi[j][1])
                    Util.mul_scalar(refi[j][0], 1.0 / float(refi[j][2]))

                    if ave_fsc == []:
                        for i in range(len(frsc[1])):
                            ave_fsc.append(frsc[1][i])
                        c_fsc = 1
                    else:
                        for i in range(len(frsc[1])):
                            ave_fsc[i] += frsc[1][i]
                        c_fsc += 1
                    #print 'OK', j, len(frsc[1]), frsc[1][0:5], ave_fsc[0:5]

            #print 'sum', sum(ave_fsc)
            if sum(ave_fsc) != 0:
                for i in range(len(ave_fsc)):
                    ave_fsc[i] /= float(c_fsc)
                    frsc[1][i] = ave_fsc[i]

            for j in range(numref):
                ref_data[2] = refi[j][0]
                ref_data[3] = frsc
                refi[j][0], cs = user_func(ref_data)

                # write the current average
                TMP = []
                for i_tmp in range(len(assign[j])):
                    TMP.append(float(assign[j][i_tmp]))
                TMP.sort()
                refi[j][0].set_attr_dict({'ave_n': refi[j][2], 'members': TMP})
                del TMP
                refi[j][0].process_inplace("normalize.mask", {
                    "mask": mask,
                    "no_sigma": 1
                })
                refi[j][0].write_image(refim, j)

            Iter += 1
            msg = "ITERATION #%3d        %d\n\n" % (Iter, again)
            print_msg(msg)
            for j in range(numref):
                msg = "   group #%3d   number of particles = %7d\n" % (
                    j, refi[j][2])
                print_msg(msg)
        Iter = bcast_number_to_all(Iter, main_node)  # need to tell all
        if again:
            for j in range(numref):
                bcast_EMData_to_all(refi[j][0], myid, main_node)

    #  clean up
    del assign
    # write out headers  and STOP, under MPI writing has to be done sequentially (time-consumming)
    mpi_barrier(MPI_COMM_WORLD)
    if CTF and data_had_ctf == 0:
        for im in range(len(data)):
            data[im].set_attr('ctf_applied', 0)
    par_str = ['xform.align2d', 'assign', 'ID']
    if myid == main_node:
        from sp_utilities import file_type
        if (file_type(stack) == "bdb"):
            from sp_utilities import recv_attr_dict_bdb
            recv_attr_dict_bdb(main_node, stack, data, par_str, image_start,
                               image_end, number_of_proc)
        else:
            from sp_utilities import recv_attr_dict
            recv_attr_dict(main_node, stack, data, par_str, image_start,
                           image_end, number_of_proc)
    else:
        send_attr_dict(main_node, data, par_str, image_start, image_end)
    if myid == main_node:
        print_end_msg("mref_ali2d_MPI")
Beispiel #7
0
def mref_ali2d(stack,
               refim,
               outdir,
               maskfile=None,
               ir=1,
               ou=-1,
               rs=1,
               xrng=0,
               yrng=0,
               step=1,
               center=1,
               maxit=0,
               CTF=False,
               snr=1.0,
               user_func_name="ref_ali2d",
               rand_seed=1000,
               MPI=False):
    """
        Name
            mref_ali2d - Perform 2-D multi-reference alignment of an image series
        Input
            stack: set of 2-D images in a stack file, images have to be squares
            refim: set of initial reference 2-D images in a stack file 
            maskfile: optional maskfile to be used in the alignment
            inner_radius: inner radius for rotational correlation > 0
            outer_radius: outer radius for rotational correlation < nx/2-1
            ring_step: step between rings in rotational correlation >0
            x_range: range for translation search in x direction, search is +/xr 
            y_range: range for translation search in y direction, search is +/yr 
            translation_step: step of translation search in both directions
            center: center the average
            max_iter: maximum number of iterations the program will perform
            CTF: if this flag is set, the program will use CTF information provided in file headers
            snr: signal-to-noise ratio of the data
            rand_seed: the seed used for generating random numbers
            MPI: whether to use MPI version
        Output
            output_directory: directory name into which the output files will be written.
            header: the alignment parameters are stored in the headers of input files as 'xform.align2d'.
    """
    # 2D multi-reference alignment using rotational ccf in polar coordinates and quadratic interpolation
    if MPI:
        mref_ali2d_MPI(stack, refim, outdir, maskfile, ir, ou, rs, xrng, yrng,
                       step, center, maxit, CTF, snr, user_func_name,
                       rand_seed)
        return

    from sp_utilities import model_circle, combine_params2, inverse_transform2, drop_image, get_image
    from sp_utilities import center_2D, get_im, get_params2D, set_params2D
    from sp_statistics import fsc
    from sp_alignment import Numrinit, ringwe, fine_2D_refinement, search_range
    from sp_fundamentals import rot_shift2D, fshift
    from random import seed, randint
    import os
    import sys

    from sp_utilities import print_begin_msg, print_end_msg, print_msg
    import shutil

    # create the output directory, if it does not exist
    if os.path.exists(outdir):
        shutil.rmtree(
            outdir
        )  #ERROR('Output directory exists, please change the name and restart the program', "mref_ali2d", 1)
    os.mkdir(outdir)
    import sp_global_def
    sp_global_def.LOGFILE = os.path.join(outdir, sp_global_def.LOGFILE)

    first_ring = int(ir)
    last_ring = int(ou)
    rstep = int(rs)
    max_iter = int(maxit)

    print_begin_msg("mref_ali2d")

    print_msg("Input stack                 : %s\n" % (stack))
    print_msg("Reference stack             : %s\n" % (refim))
    print_msg("Output directory            : %s\n" % (outdir))
    print_msg("Maskfile                    : %s\n" % (maskfile))
    print_msg("Inner radius                : %i\n" % (first_ring))

    ima = EMData()
    ima.read_image(stack, 0)
    nx = ima.get_xsize()
    # default value for the last ring
    if last_ring == -1: last_ring = nx / 2 - 2

    print_msg("Outer radius                : %i\n" % (last_ring))
    print_msg("Ring step                   : %i\n" % (rstep))
    print_msg("X search range              : %i\n" % (xrng))
    print_msg("Y search range              : %i\n" % (yrng))
    print_msg("Translational step          : %i\n" % (step))
    print_msg("Center type                 : %i\n" % (center))
    print_msg("Maximum iteration           : %i\n" % (max_iter))
    print_msg("CTF correction              : %s\n" % (CTF))
    print_msg("Signal-to-Noise Ratio       : %f\n" % (snr))
    print_msg("Random seed                 : %i\n\n" % (rand_seed))
    print_msg("User function               : %s\n" % (user_func_name))
    output = sys.stdout

    import sp_user_functions
    user_func = sp_user_functions.factory[user_func_name]

    if maskfile:
        import types
        if type(maskfile) is bytes: mask = get_image(maskfile)
        else: mask = maskfile
    else: mask = model_circle(last_ring, nx, nx)
    #  references
    refi = []
    numref = EMUtil.get_image_count(refim)

    # IMAGES ARE SQUARES! center is in SPIDER convention
    cnx = nx / 2 + 1
    cny = cnx

    mode = "F"
    #precalculate rings
    numr = Numrinit(first_ring, last_ring, rstep, mode)
    wr = ringwe(numr, mode)
    # reference images
    params = []
    #read all data
    data = EMData.read_images(stack)
    nima = len(data)
    # prepare the reference
    ima.to_zero()
    for j in range(numref):
        temp = EMData()
        temp.read_image(refim, j)
        #  eve, odd, numer of even, number of images.  After frc, totav
        refi.append([temp, ima.copy(), 0])

    seed(rand_seed)
    again = True

    ref_data = [mask, center, None, None]

    Iter = 0

    while Iter < max_iter and again:
        ringref = []
        #print "numref",numref

        ### Reference ###
        mashi = cnx - last_ring - 2
        for j in range(numref):
            refi[j][0].process_inplace("normalize.mask", {
                "mask": mask,
                "no_sigma": 1
            })
            cimage = Util.Polar2Dm(refi[j][0], cnx, cny, numr, mode)
            Util.Frngs(cimage, numr)
            Util.Applyws(cimage, numr, wr)
            ringref.append(cimage)

        assign = [[] for i in range(numref)]
        sx_sum = [0.0] * numref
        sy_sum = [0.0] * numref
        for im in range(nima):
            alpha, sx, sy, mirror, scale = get_params2D(data[im])
            #  Why inverse?  07/11/2015  PAP
            alphai, sxi, syi, scalei = inverse_transform2(alpha, sx, sy)
            # normalize
            data[im].process_inplace("normalize.mask", {
                "mask": mask,
                "no_sigma": 0
            })
            # If shifts are outside of the permissible range, reset them
            if (abs(sxi) > mashi or abs(syi) > mashi):
                sxi = 0.0
                syi = 0.0
                set_params2D(data[im], [0.0, 0.0, 0.0, 0, 1.0])
            ny = nx
            txrng = search_range(nx, last_ring, sxi, xrng, "mref_ali2d")
            txrng = [txrng[1], txrng[0]]
            tyrng = search_range(ny, last_ring, syi, yrng, "mref_ali2d")
            tyrng = [tyrng[1], tyrng[0]]
            # align current image to the reference
            #[angt, sxst, syst, mirrort, xiref, peakt] = Util.multiref_polar_ali_2d_p(data[im],
            #    ringref, txrng, tyrng, step, mode, numr, cnx+sxi, cny+syi)
            #print(angt, sxst, syst, mirrort, xiref, peakt)
            [angt, sxst, syst, mirrort, xiref,
             peakt] = Util.multiref_polar_ali_2d(data[im], ringref, txrng,
                                                 tyrng, step, mode, numr,
                                                 cnx + sxi, cny + syi)

            iref = int(xiref)
            # combine parameters and set them to the header, ignore previous angle and mirror
            [alphan, sxn, syn, mn] = combine_params2(0.0, -sxi, -syi, 0, angt,
                                                     sxst, syst, int(mirrort))
            set_params2D(data[im], [alphan, sxn, syn, int(mn), scale])
            if mn == 0: sx_sum[iref] += sxn
            else: sx_sum[iref] -= sxn
            sy_sum[iref] += syn
            data[im].set_attr('assign', iref)
            # apply current parameters and add to the average
            temp = rot_shift2D(data[im], alphan, sxn, syn, mn)
            it = im % 2
            Util.add_img(refi[iref][it], temp)

            assign[iref].append(im)
            refi[iref][2] += 1
        del ringref
        if again:
            a1 = 0.0
            for j in range(numref):
                msg = "   group #%3d   number of particles = %7d\n" % (
                    j, refi[j][2])
                print_msg(msg)
                if refi[j][2] < 4:
                    #ERROR("One of the references vanished","mref_ali2d",1)
                    #  if vanished, put a random image there
                    assign[j] = []
                    assign[j].append(randint(0, nima - 1))
                    refi[j][0] = data[assign[j][0]].copy()
                else:
                    max_inter = 0  # switch off fine refi.
                    br = 1.75
                    #  the loop has to
                    for INter in range(max_inter + 1):
                        # Calculate averages at least ones, meaning even if no within group refinement was requested
                        frsc = fsc(
                            refi[j][0], refi[j][1], 1.0,
                            os.path.join(outdir,
                                         "drm_%03d_%04d.txt" % (Iter, j)))
                        Util.add_img(refi[j][0], refi[j][1])
                        Util.mul_scalar(refi[j][0], 1.0 / float(refi[j][2]))

                        ref_data[2] = refi[j][0]
                        ref_data[3] = frsc
                        refi[j][0], cs = user_func(ref_data)
                        if center == -1:
                            cs[0] = sx_sum[j] / len(assign[j])
                            cs[1] = sy_sum[j] / len(assign[j])
                            refi[j][0] = fshift(refi[j][0], -cs[0], -cs[1])
                        for i in range(len(assign[j])):
                            im = assign[j][i]
                            alpha, sx, sy, mirror, scale = get_params2D(
                                data[im])
                            alphan, sxn, syn, mirrorn = combine_params2(
                                alpha, sx, sy, mirror, 0.0, -cs[0], -cs[1], 0)
                            set_params2D(
                                data[im],
                                [alphan, sxn, syn,
                                 int(mirrorn), scale])
                        # refine images within the group
                        #  Do the refinement only if max_inter>0, but skip it for the last iteration.
                        if INter < max_inter:
                            fine_2D_refinement(data, br, mask, refi[j][0], j)
                            #  Calculate updated average
                            refi[j][0].to_zero()
                            refi[j][1].to_zero()
                            for i in range(len(assign[j])):
                                im = assign[j][i]
                                alpha, sx, sy, mirror, scale = get_params2D(
                                    data[im])
                                # apply current parameters and add to the average
                                temp = rot_shift2D(data[im], alpha, sx, sy, mn)
                                it = im % 2
                                Util.add_img(refi[j][it], temp)
                # write the current average
                TMP = []
                for i_tmp in range(len(assign[j])):
                    TMP.append(float(assign[j][i_tmp]))
                TMP.sort()
                refi[j][0].set_attr_dict({'ave_n': refi[j][2], 'members': TMP})
                del TMP
                # replace the name of the stack with reference with the current one
                newrefim = os.path.join(outdir, "aqm%03d.hdf" % Iter)
                refi[j][0].write_image(newrefim, j)
            Iter += 1
            msg = "ITERATION #%3d        \n" % (Iter)
            print_msg(msg)

    newrefim = os.path.join(outdir, "multi_ref.hdf")
    for j in range(numref):
        refi[j][0].write_image(newrefim, j)
    from sp_utilities import write_headers
    write_headers(stack, data, list(range(nima)))
    print_end_msg("mref_ali2d")
Beispiel #8
0
def main():
    from sp_utilities import get_input_from_string
    progname = os.path.basename(sys.argv[0])
    usage = progname + " stack output_average --radius=particle_radius --xr=xr --yr=yr --ts=ts --thld_err=thld_err --num_ali=num_ali --fl=fl --aa=aa --CTF --verbose --stables"
    parser = OptionParser(usage, version=SPARXVERSION)
    parser.add_option("--radius",
                      type="int",
                      default=-1,
                      help=" particle radius for alignment")
    parser.add_option(
        "--xr",
        type="string",
        default="2 1",
        help=
        "range for translation search in x direction, search is +/xr (default 2,1)"
    )
    parser.add_option(
        "--yr",
        type="string",
        default="-1",
        help=
        "range for translation search in y direction, search is +/yr (default = same as xr)"
    )
    parser.add_option(
        "--ts",
        type="string",
        default="1 0.5",
        help=
        "step size of the translation search in both directions, search is -xr, -xr+ts, 0, xr-ts, xr, can be fractional (default: 1,0.5)"
    )
    parser.add_option("--thld_err",
                      type="float",
                      default=0.7,
                      help="threshld of pixel error (default = 0.75)")
    parser.add_option(
        "--num_ali",
        type="int",
        default=5,
        help="number of alignments performed for stability (default = 5)")
    parser.add_option("--maxit",
                      type="int",
                      default=30,
                      help="number of iterations for each xr (default = 30)")
    parser.add_option(
        "--fl",
        type="float",
        default=0.45,
        help=
        "cut-off frequency of hyperbolic tangent low-pass Fourier filter (default = 0.3)"
    )
    parser.add_option(
        "--aa",
        type="float",
        default=0.2,
        help=
        "fall-off of hyperbolic tangent low-pass Fourier filter (default = 0.2)"
    )
    parser.add_option("--CTF",
                      action="store_true",
                      default=False,
                      help="Use CTF correction during the alignment ")
    parser.add_option("--verbose",
                      action="store_true",
                      default=False,
                      help="print individual pixel error (default = False)")
    parser.add_option(
        "--stables",
        action="store_true",
        default=False,
        help="output the stable particles number in file (default = False)")
    parser.add_option(
        "--method",
        type="string",
        default=" ",
        help="SHC (standard method is default when flag is ommitted)")

    (options, args) = parser.parse_args()

    if len(args) != 1 and len(args) != 2:
        sxprint("Usage: " + usage)
        sxprint("Please run \'" + progname + " -h\' for detailed options")
        ERROR(
            "Invalid number of parameters used. Please see usage information above."
        )
        return
    else:
        if sp_global_def.CACHE_DISABLE:
            from sp_utilities import disable_bdb_cache
            disable_bdb_cache()

        from sp_applications import within_group_refinement, ali2d_ras
        from sp_pixel_error import multi_align_stability
        from sp_utilities import write_text_file, write_text_row

        sp_global_def.BATCH = True

        xrng = get_input_from_string(options.xr)

        if options.yr == "-1":
            yrng = xrng
        else:
            yrng = get_input_from_string(options.yr)

        step = get_input_from_string(options.ts)

        class_data = EMData.read_images(args[0])

        nx = class_data[0].get_xsize()
        ou = options.radius
        num_ali = options.num_ali
        if ou == -1: ou = nx / 2 - 2
        from sp_utilities import model_circle, get_params2D, set_params2D
        mask = model_circle(ou, nx, nx)

        if options.CTF:
            from sp_filter import filt_ctf
            for im in range(len(class_data)):
                #  Flip phases
                class_data[im] = filt_ctf(class_data[im],
                                          class_data[im].get_attr("ctf"),
                                          binary=1)
        for im in class_data:
            im.set_attr("previousmax", -1.0e10)
            try:
                t = im.get_attr(
                    "xform.align2d")  # if they are there, no need to set them!
            except:
                try:
                    t = im.get_attr("xform.projection")
                    d = t.get_params("spider")
                    set_params2D(im, [0.0, -d["tx"], -d["ty"], 0, 1.0])
                except:
                    set_params2D(im, [0.0, 0.0, 0.0, 0, 1.0])
        all_ali_params = []

        for ii in range(num_ali):
            ali_params = []
            if options.verbose:
                ALPHA = []
                SX = []
                SY = []
                MIRROR = []
            if (xrng[0] == 0.0 and yrng[0] == 0.0):
                avet = ali2d_ras(class_data, randomize = True, ir = 1, ou = ou, rs = 1, step = 1.0, dst = 90.0, \
                  maxit = options.maxit, check_mirror = True, FH=options.fl, FF=options.aa)
            else:
                avet = within_group_refinement(class_data, mask, True, 1, ou, 1, xrng, yrng, step, 90.0, \
                  maxit = options.maxit, FH=options.fl, FF=options.aa, method = options.method)
                from sp_utilities import info
                #print "  avet  ",info(avet)
            for im in class_data:
                alpha, sx, sy, mirror, scale = get_params2D(im)
                ali_params.extend([alpha, sx, sy, mirror])
                if options.verbose:
                    ALPHA.append(alpha)
                    SX.append(sx)
                    SY.append(sy)
                    MIRROR.append(mirror)
            all_ali_params.append(ali_params)
            if options.verbose:
                write_text_file([ALPHA, SX, SY, MIRROR],
                                "ali_params_run_%d" % ii)
        """
		avet = class_data[0]
		from sp_utilities import read_text_file
		all_ali_params = []
		for ii in xrange(5):
			temp = read_text_file( "ali_params_run_%d"%ii,-1)
			uuu = []
			for k in xrange(len(temp[0])):
				uuu.extend([temp[0][k],temp[1][k],temp[2][k],temp[3][k]])
			all_ali_params.append(uuu)


		"""

        stable_set, mir_stab_rate, pix_err = multi_align_stability(
            all_ali_params, 0.0, 10000.0, options.thld_err, options.verbose,
            2 * ou + 1)
        sxprint("%4s %20s %20s %20s %30s %6.2f" %
                ("", "Size of set", "Size of stable set", "Mirror stab rate",
                 "Pixel error prior to pruning the set above threshold of",
                 options.thld_err))
        sxprint("Average stat: %10d %20d %20.2f   %15.2f" %
                (len(class_data), len(stable_set), mir_stab_rate, pix_err))
        if (len(stable_set) > 0):
            if options.stables:
                stab_mem = [[0, 0.0, 0] for j in range(len(stable_set))]
                for j in range(len(stable_set)):
                    stab_mem[j] = [int(stable_set[j][1]), stable_set[j][0], j]
                write_text_row(stab_mem, "stable_particles.txt")

            stable_set_id = []
            particle_pixerr = []
            for s in stable_set:
                stable_set_id.append(s[1])
                particle_pixerr.append(s[0])
            from sp_fundamentals import rot_shift2D
            avet.to_zero()
            l = -1
            sxprint("average parameters:  angle, x-shift, y-shift, mirror")
            for j in stable_set_id:
                l += 1
                sxprint(" %4d  %4d  %12.2f %12.2f %12.2f        %1d" %
                        (l, j, stable_set[l][2][0], stable_set[l][2][1],
                         stable_set[l][2][2], int(stable_set[l][2][3])))
                avet += rot_shift2D(class_data[j], stable_set[l][2][0],
                                    stable_set[l][2][1], stable_set[l][2][2],
                                    stable_set[l][2][3])
            avet /= (l + 1)
            avet.set_attr('members', stable_set_id)
            avet.set_attr('pix_err', pix_err)
            avet.set_attr('pixerr', particle_pixerr)
            avet.write_image(args[1])

        sp_global_def.BATCH = False