Beispiel #1
0
def cml_refine_agls(Prj, Ori, delta):
    from copy import deepcopy
    from sp_utilities import amoeba
    global g_n_prj

    scales = [delta] * (g_n_prj + 2)

    for iprj in range(g_n_prj):
        # init vec_in
        vec_in = [Ori[4 * iprj], Ori[4 * iprj + 1], Ori[4 * iprj + 2]]
        # prepare vec_data
        vec_data = [Prj, deepcopy(Ori), iprj]
        # simplex
        optvec, disc, niter = amoeba(vec_in,
                                     scales,
                                     cml_refine_agls_wrap_dev,
                                     data=vec_data)
        # assign new angles refine
        Ori[4 * iprj] = (optvec[0] + 360) % 360
        Ori[4 * iprj + 1] = optvec[1]
        Ori[4 * iprj + 2] = optvec[2]
        sxprint('refine:', iprj, 'angles:', Ori[4 * iprj:4 * iprj + 4],
                'disc:', -disc)

    return Ori
Beispiel #2
0
def fit_tanh(dres, low=0.1):
    """
		dres - list produced by the fsc funcion
		dres[0] - absolute frequencies
		dres[1] - fsc, because it was calculated from the dataset split into halves, convert it to full using rn = 2r/(1+r)
		dres[2] - number of points use to calculate fsc coeff
		low cutoff of the fsc curve
		return parameters of the tanh filter: freq - cutoff frequency at which filter value is 0.5, and fall_off, the 'width' of the filter
	"""
    def fit_tanh_func(args, data):
        pass  #IMPORTIMPORTIMPORT from math import pi, tanh
        v = 0.0

        if (data[1][0] < 0.0):
            data[1][0] *= -1.0

        for i in range(len(data[0])):
            fsc = 2 * data[1][i] / (1.0 + data[1][i])
            if args[0] == 0 or args[1] == 0: qt = 0
            else:
                qt = fsc - 0.5 * (math.tanh(math.pi * (data[0][i] + args[0]) /
                                            2.0 / args[1] / args[0]) -
                                  math.tanh(math.pi * (data[0][i] - args[0]) /
                                            2.0 / args[1] / args[0]))
            v -= qt * qt
        #print args,v
        return v

    setzero = False
    for i in range(1, len(dres[0])):
        if not setzero:
            if (2 * dres[1][i] / (1.0 + dres[1][i]) < low): setzero = True
        if setzero: dres[1][i] = 0.0

    freq = -1.0
    for i in range(1, len(dres[0]) - 1):
        if ((2 * dres[1][i] / (1.0 + dres[1][i])) < 0.5):
            freq = dres[0][i - 1]
            break
    if freq < 0.0:
        # the curve never falls below 0.5, most likely something's wrong; however, return reasonable values
        freq = 0.4
        fall_off = 0.2
        return freq, fall_off

    pass  #IMPORTIMPORTIMPORT from sp_utilities import amoeba
    args = [freq, 0.1]
    scale = [0.05, 0.05]
    result = sp_utilities.amoeba(args, scale, fit_tanh_func, data=dres)
    """Multiline Comment0"""
    #MULTILINEMULTILINEMULTILINE 0
    #MULTILINEMULTILINEMULTILINE 0
    #MULTILINEMULTILINEMULTILINE 0
    #MULTILINEMULTILINEMULTILINE 0
    #MULTILINEMULTILINEMULTILINE 0
    #MULTILINEMULTILINEMULTILINE 0
    #MULTILINEMULTILINEMULTILINE 0
    #MULTILINEMULTILINEMULTILINE 0
    #MULTILINEMULTILINEMULTILINE 0
    return result[0][0], result[0][1]
Beispiel #3
0
def fit_tanh1(dres, low=0.1):
    """
		dres - list produced by the fsc funcion
		dres[0] - absolute frequencies
		dres[1] - fsc, to be conservative, do not use factor of 2.
		dres[2] - number of points use to calculate fsc coeff
		low cutoff of the fsc curve
		return parameters of the tanh filter: freq - cutoff frequency at which filter value is 0.5, and fall_off, the 'width' of the filter
	"""
    def fit_tanh_func(args, data):
        from math import pi, tanh
        v = 0.0
        for i in range(len(data[0])):
            fsc = data[1][i]
            if args[0] == 0 or args[1] == 0: qt = 0
            else:
                qt = fsc - 0.5 * (
                    tanh(pi *
                         (data[0][i] + args[0]) / 2.0 / args[1] / args[0]) -
                    tanh(pi *
                         (data[0][i] - args[0]) / 2.0 / args[1] / args[0]))
            v -= qt * qt
        #print args,v
        return v

    setzero = False
    for i in range(1, len(dres[0])):
        if not setzero:
            if (dres[1][i] < low): setzero = True
        if setzero: dres[1][i] = 0.0

    freq = -1.0
    for i in range(1, len(dres[0]) - 1):
        if (dres[1][i] < 0.5):
            freq = dres[0][i - 1]
            break
    if (freq < 0.0):
        # the curve never falls below 0.5, most likely something's wrong; however, return reasonable values
        freq = 0.2
        fall_off = 0.2
        return freq, fall_off
    from sp_utilities import amoeba
    args = [freq, 0.1]
    scale = [0.05, 0.05]
    result = amoeba(args, scale, fit_tanh_func, data=dres)
    '''
	args[0] = result[0][0]
	args[1] = result[0][1]
	#print  args
	from math import pi, tanh
	for i in xrange(len(dres[0])):
		fsc = 2*dres[1][i]/(1.0+dres[1][i])
		print i, dres[0][i],fsc , 0.5*( tanh(pi*(dres[0][i]+args[0])/2.0/args[1]/args[0]) - tanh(pi*(dres[0][i]-args[0])/2.0/args[1]/args[0]) )
		#qt  = fsc - 0.5*( tanh(pi*(dres[0][i]+args[0])/2.0/args[1]/args[0]) - tanh(pi*(dres[0][i]-args[0])/2.0/args[1]/args[0]) )
	'''
    return result[0][0], result[0][1]
Beispiel #4
0
def fine_2D_refinement(data, br, mask, tavg, group=-1):

    # IMAGES ARE SQUARES!
    nx = data[0].get_xsize()
    #  center is in SPIDER convention
    cnx = int(old_div(nx, 2)) + 1
    cny = cnx

    if group > -1:
        nima = 0
        for im in range(len(data)):
            if data[im].get_attr("ref_num") == group:
                nima += 1
    else:
        nima = len(data)

    # prepare KB interpolants
    kb = kbt(nx)
    # load stuff for amoeba
    stuff = []
    stuff.insert(0, kb)
    stuff.insert(1, mask)
    stuff.insert(2, nima)
    # stuff.insert(3,tave)  # current average
    # stuff.insert(4,data)  # current image in the gridding format
    weights = [
        br
    ] * 3  # weights define initial bracketing, so one would have to figure how to set them correctly

    for im in range(len(data)):
        if group > -1:
            if data[im].get_attr("ref_num") != group:
                continue
        # subtract current image from the average
        alpha = data[im].get_attr("alpha")
        sx = data[im].get_attr("sx")
        sy = data[im].get_attr("sy")
        mirror = data[im].get_attr("mirror")
        ddata = sp_fundamentals.prepg(data[im], kb)
        ddata.set_attr_dict({
            "alpha": alpha,
            "sx": sx,
            "sy": sy,
            "mirror": mirror
        })
        temp = sp_fundamentals.rtshgkb(ddata, alpha, sx, sy, kb)
        if mirror:
            temp.process_inplace("xform.mirror", {"axis": "x"})
        #  Subtract current image from the average
        refim = EMAN2_cppwrap.Util.madn_scalar(tavg, temp,
                                               old_div(-1.0, float(nima)))
        stuff.append(refim)  # curent ave-1
        stuff.append(ddata)  # curent image
        # perform amoeba alignment
        params = [alpha, sx, sy]
        outparams = sp_utilities.amoeba(params, weights, crit2d, 1.0e-4,
                                        1.0e-4, 500, stuff)
        del stuff[3]
        del stuff[3]
        # set parameters to the header
        data[im].set_attr_dict({
            "alpha": outparams[0][0],
            "sx": outparams[0][1],
            "sy": outparams[0][2],
            "mirror": mirror,
        })
        # update the average
        temp = sp_fundamentals.rtshgkb(ddata, outparams[0][0], outparams[0][1],
                                       outparams[0][2], kb)
        if mirror:
            temp.process_inplace("xform.mirror", {"axis": "x"})
        # check whether the criterion actually increased
        # add current image to the average
        tavg = EMAN2_cppwrap.Util.madn_scalar(refim, temp,
                                              old_div(1.0, float(nima)))