示例#1
0
def feffit(params, datasets, _larch=None, rmax_out=10,
           path_outputs=True, **kws):

    def _resid(params, datasets=None, _larch=None, **kws):
        """ this is the residua function """
        # print '---feffit residual '
        #for i in dir(params):
        # print i, getattr(params, i)
        return concatenate([d.residual() for d in datasets])

    if isinstance(datasets, FeffitDataSet):
        datasets = [datasets]
    for ds in datasets:
        if not isinstance(ds, FeffitDataSet):
            print "feffit needs a list of FeffitDataSets"
            return
    fitkws = dict(datasets=datasets)
    fit = Minimizer(_resid, params, fcn_kws=fitkws, _larch=_larch)
    fit.leastsq()
    # scale uncertainties to sqrt(n_idp - n_varys)
    n_idp = 0
    for ds in datasets:
        n_idp += ds.transform.n_idp
    err_scale = sqrt(n_idp - params.nvarys)
    for name in dir(params):
        p = getattr(params, name)
        if isParameter(p) and p.vary:
            p.stderr *= err_scale

    # here we create outputs:
    for ds in datasets:
        ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs)

    out = larch.Group(name='feffit fit results',
                      fit = fit,
                      params = params,
                      datasets = datasets)

    return out
示例#2
0
def autobk(energy, mu, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, debug=False, _larch=None, **kws):

    """Use Autobk algorithm to remove XAFS background
    Options are:
      rbkg -- distance out to which the chi(R) is minimized
    """
    if _larch is None:
        raise Warning("cannot calculate autobk spline -- larch broken?")

    if 'kw' in kws:
        kweight = kws['kw']

    energy = remove_dups(energy)

    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    if edge_step is None:
        if _larch.symtable.isgroup(group) and hasattr(group, 'edge_step'):
            edge_step = group.edge_step
    if e0 is None:
        if _larch.symtable.isgroup(group) and hasattr(group, 'e0'):
            e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        edge_step, e0 = pre_edge(energy, mu, group=group,
                                 _larch=_larch, **pre_kws)

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_nearest(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    kraw = np.sqrt(ETOK*(energy[ie0:] - e0))
    if kmax is None:
        kmax = max(kraw)
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)

    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk)

    # calc k-value and initial guess for y-values of spline params
    nspline = max(4, min(60, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y  = np.zeros(nspline)
    spl_k  = np.zeros(nspline)
    spl_e  = np.zeros(nspline)
    for i in range(nspline):
        q = kmin + i*(kmax-kmin)/(nspline - 1)
        ik = index_nearest(kraw, q)

        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    ncoefs = len(coefs)
    params = Group()
    for i in range(ncoefs):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i<len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout)

    fitkws = dict(ncoefs=len(coefs), chi_std=chi_std,
                  knots=knots, order=order, kraw=kraw, mu=mu[ie0:],
                  irbkg=irbkg, kout=kout, ftwin=ftwin, nfft=nfft)
    # do fit
    fit = Minimizer(__resid, params, fcn_kws=fitkws, _larch=_larch, toler=1.e-4)
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(ncoefs)]

    bkg, chi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout)
    obkg  = np.zeros(len(mu))
    obkg[:ie0] = mu[:ie0]
    obkg[ie0:] = bkg
    if _larch.symtable.isgroup(group):
        group.bkg  = obkg
        group.chie = (mu-obkg)/edge_step
        group.k    = kout
        group.chi  = chi/edge_step
        if debug:
            group.spline_params = params
            ix_bkg = np.zeros(len(mu))
            ix_bkg[:ie0] = mu[:ie0]
            ix_bkg[ie0:] = initbkg
            group.init_bkg = ix_bkg
            group.init_chi = initchi/edge_step
            group.spline_e = spl_e
            group.spline_y = np.array([coefs[i] for i in range(nspline)])
            group.spline_yinit = spl_y