Ejemplo n.º 1
0
def xas_deconvolve(energy, norm=None, group=None, form='gaussian',
                   esigma=1.0, eshift=0.0, _larch=None):
    """XAS spectral deconvolution

    This function de-convolves a normalized mu(E) spectra with a
    peak shape, enhancing separation of XANES features.

    This can be unstable -- Use results with caution!

    Arguments
    ----------
    energy:   array of x-ray energies, in eV or group
    norm:     array of normalized mu(E)
    group:    output group
    form:     form of deconvolution function. One of
              'gaussian' (default) or 'lorentzian'
    esigma    energy sigma to pass to gaussian() or lorentzian()
              [in eV, default=1.0]
    eshift    energy shift to apply to result. [in eV, default=0]

    Returns
    -------
    None
       The array 'deconv' will be written to the output group.

    Notes
    -----
       Support See First Argument Group convention, requiring group
       members 'energy' and 'norm'
    """
    if _larch is None:
        raise Warning("cannot deconvolve -- larch broken?")

    energy, mu, group = parse_group_args(energy, members=('energy', 'norm'),
                                         defaults=(norm,), group=group,
                                         fcn_name='xas_deconv')
    eshift = eshift + 0.5 * esigma

    en  = remove_dups(energy)
    en  = en - en[0]
    estep = max(0.001, 0.001*int(min(en[1:]-en[:-1])*1000.0))
    npts = 1  + int(max(en) / estep)

    x = np.arange(npts)*estep
    y = _interp(en, mu, x, kind='linear', _larch=_larch)

    kernel = gaussian
    if form.lower().startswith('lor'):
        kernel = lorentzian

    yext = np.concatenate((y, np.arange(len(y))*y[-1]))
    ret, err = deconvolve(yext, kernel(x, 0, esigma))
    nret = min(len(x), len(ret))

    ret = ret[:nret]*yext[nret-1]/ret[nret-1]
    out = _interp(x+eshift, ret, en, kind='linear', _larch=_larch)

    group = set_xafsGroup(group, _larch=_larch)
    group.deconv = out
Ejemplo n.º 2
0
def xas_deconvolve(energy, norm=None, group=None, form='gaussian',
                   esigma=1.0, eshift=0.0, _larch=None):
    """XAS spectral deconvolution

    This function de-convolves a normalized mu(E) spectra with a
    peak shape, enhancing separation of XANES features.

    This can be unstable -- Use results with caution!

    Arguments
    ----------
    energy:   array of x-ray energies, in eV or group
    norm:     array of normalized mu(E)
    group:    output group
    form:     form of deconvolution function. One of
              'gaussian' (default) or 'lorentzian'
    esigma    energy sigma to pass to gaussian() or lorentzian()
              [in eV, default=1.0]
    eshift    energy shift to apply to result. [in eV, default=0]

    Returns
    -------
    None
       The array 'deconv' will be written to the output group.

    Notes
    -----
       Support See First Argument Group convention, requiring group
       members 'energy' and 'norm'
    """
    if _larch is None:
        raise Warning("cannot deconvolve -- larch broken?")

    energy, mu, group = parse_group_args(energy, members=('energy', 'norm'),
                                         defaults=(norm,), group=group,
                                         fcn_name='xas_deconv')
    eshift = eshift + 0.5 * esigma

    en  = remove_dups(energy)
    en  = en - en[0]
    estep = max(0.001, 0.001*int(min(en[1:]-en[:-1])*1000.0))
    npts = 1  + int(max(en) / estep)

    x = np.arange(npts)*estep
    y = _interp(en, mu, x, kind='linear', _larch=_larch)

    kernel = gaussian
    if form.lower().startswith('lor'):
        kernel = lorentzian

    yext = np.concatenate((y, np.arange(len(y))*y[-1]))
    ret, err = deconvolve(yext, kernel(x, 0, esigma))
    nret = min(len(x), len(ret))

    ret = ret[:nret]*yext[nret-1]/ret[nret-1]
    out = _interp(x+eshift, ret, en, kind='linear', _larch=_larch)

    group = set_xafsGroup(group, _larch=_larch)
    group.deconv = out
Ejemplo n.º 3
0
def find_e0(energy, mu=None, group=None, _larch=None):
    """calculate E0 given mu(energy)

    This finds the point with maximum derivative with some
    checks to avoid spurious glitches.

    Arguments
    ----------
    energy:  array of x-ray energies, in eV or group
    mu:      array of mu(E)
    group:   output group

    Returns
    -------
    Value of e0.  If provided, group.e0 will be set to this value.

    Notes
    -----
       Supports First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='find_e0')

    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    dmu = np.gradient(mu) / np.gradient(energy)
    # find points of high derivative
    high_deriv_pts = np.where(dmu > max(dmu) * 0.05)[0]
    idmu_max, dmu_max = 0, 0
    for i in high_deriv_pts:
        if (dmu[i] > dmu_max and (i + 1 in high_deriv_pts)
                and (i - 1 in high_deriv_pts)):
            idmu_max, dmu_max = i, dmu[i]

    e0 = energy[idmu_max]
    if group is not None:
        group = set_xafsGroup(group, _larch=_larch)
        group.e0 = e0
    return e0
Ejemplo n.º 4
0
def find_e0(energy, mu=None, group=None, _larch=None):
    """calculate E0 given mu(energy)

    This finds the point with maximum derivative with some
    checks to avoid spurious glitches.

    Arguments
    ----------
    energy:  array of x-ray energies, in eV or group
    mu:      array of mu(E)
    group:   output group

    Returns
    -------
    Value of e0.  If provided, group.e0 will be set to this value.

    Notes
    -----
       Supports First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='find_e0')

    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    dmu = np.gradient(mu)/np.gradient(energy)
    # find points of high derivative
    high_deriv_pts = np.where(dmu >  max(dmu)*0.05)[0]
    idmu_max, dmu_max = 0, 0
    for i in high_deriv_pts:
        if (dmu[i] > dmu_max and
            (i+1 in high_deriv_pts) and
            (i-1 in high_deriv_pts)):
            idmu_max, dmu_max = i, dmu[i]

    e0 = energy[idmu_max]
    if group is not None:
        group = set_xafsGroup(group, _larch=_larch)
        group.e0 = e0
    return e0
Ejemplo n.º 5
0
def preedge(energy,
            mu,
            e0=None,
            step=None,
            nnorm=3,
            nvict=0,
            pre1=None,
            pre2=-50,
            norm1=100,
            norm2=None):
    """pre edge subtraction, normalization for XAFS (straight python)

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV
    mu:      array of mu(E)
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=3 (quadratic), max=5
    Returns
    -------
      dictionary with elements (among others)
          e0          energy origin in eV
          edge_step   edge step
          norm        normalized mu(E)
          pre_edge    determined pre-edge curve
          post_edge   determined post-edge, normalization curve

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

    """
    energy = remove_dups(energy)

    if e0 is None or e0 < energy[0] or e0 > energy[-1]:
        energy = remove_dups(energy)
        dmu = np.gradient(mu) / np.gradient(energy)
        # find points of high derivative
        high_deriv_pts = np.where(dmu > max(dmu) * 0.05)[0]
        idmu_max, dmu_max = 0, 0
        for i in high_deriv_pts:
            if (dmu[i] > dmu_max and (i + 1 in high_deriv_pts)
                    and (i - 1 in high_deriv_pts)):
                idmu_max, dmu_max = i, dmu[i]

        e0 = energy[idmu_max]
    nnorm = max(min(nnorm, MAX_NNORM), 0)
    ie0 = index_nearest(energy, e0)
    e0 = energy[ie0]

    if pre1 is None: pre1 = min(energy) - e0
    if norm2 is None: norm2 = max(energy) - e0
    if norm2 < 0: norm2 = max(energy) - e0 - norm2
    pre1 = max(pre1, (min(energy) - e0))
    norm2 = min(norm2, (max(energy) - e0))

    if pre1 > pre2:
        pre1, pre2 = pre2, pre1
    if norm1 > norm2:
        norm1, norm2 = norm2, norm1

    p1 = index_of(energy, pre1 + e0)
    p2 = index_nearest(energy, pre2 + e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)

    omu = mu * energy**nvict
    ex, mx = remove_nans2(energy[p1:p2], omu[p1:p2])
    precoefs = polyfit(ex, mx, 1)
    pre_edge = (precoefs[0] * energy + precoefs[1]) * energy**(-nvict)
    # normalization
    p1 = index_of(energy, norm1 + e0)
    p2 = index_nearest(energy, norm2 + e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)

    coefs = polyfit(energy[p1:p2], omu[p1:p2], nnorm)
    post_edge = 0
    norm_coefs = []
    for n, c in enumerate(reversed(list(coefs))):
        post_edge += c * energy**(n - nvict)
        norm_coefs.append(c)
    edge_step = step
    if edge_step is None:
        edge_step = post_edge[ie0] - pre_edge[ie0]

    norm = (mu - pre_edge) / edge_step
    out = {
        'e0': e0,
        'edge_step': edge_step,
        'norm': norm,
        'pre_edge': pre_edge,
        'post_edge': post_edge,
        'norm_coefs': norm_coefs,
        'nvict': nvict,
        'nnorm': nnorm,
        'norm1': norm1,
        'norm2': norm2,
        'pre1': pre1,
        'pre2': pre2,
        'precoefs': precoefs
    }

    return out
Ejemplo n.º 6
0
def preedge(energy, mu, e0=None, step=None,
            nnorm=3, nvict=0, pre1=None, pre2=-50,
            norm1=100, norm2=None):
    """pre edge subtraction, normalization for XAFS (straight python)

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV
    mu:      array of mu(E)
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=3 (quadratic), max=5
    Returns
    -------
      dictionary with elements (among others)
          e0          energy origin in eV
          edge_step   edge step
          norm        normalized mu(E)
          pre_edge    determined pre-edge curve
          post_edge   determined post-edge, normalization curve

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

    """
    energy = remove_dups(energy)

    if e0 is None or e0 < energy[0] or e0 > energy[-1]:
        energy = remove_dups(energy)
        dmu = np.gradient(mu)/np.gradient(energy)
        # find points of high derivative
        high_deriv_pts = np.where(dmu >  max(dmu)*0.05)[0]
        idmu_max, dmu_max = 0, 0
        for i in high_deriv_pts:
            if (dmu[i] > dmu_max and
                (i+1 in high_deriv_pts) and
                (i-1 in high_deriv_pts)):
                idmu_max, dmu_max = i, dmu[i]

        e0 = energy[idmu_max]
    nnorm = max(min(nnorm, MAX_NNORM), 1)
    ie0 = index_nearest(energy, e0)
    e0 = energy[ie0]

    if pre1 is None:  pre1  = min(energy) - e0
    if norm2 is None: norm2 = max(energy) - e0
    if norm2 < 0:     norm2 = max(energy) - e0 - norm2
    pre1  = max(pre1,  (min(energy) - e0))
    norm2 = min(norm2, (max(energy) - e0))

    if pre1 > pre2:
        pre1, pre2 = pre2, pre1
    if norm1 > norm2:
        norm1, norm2 = norm2, norm1

    p1 = index_of(energy, pre1+e0)
    p2 = index_nearest(energy, pre2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)

    omu  = mu*energy**nvict
    ex, mx = remove_nans2(energy[p1:p2], omu[p1:p2])
    precoefs = polyfit(ex, mx, 1)
    pre_edge = (precoefs[0] * energy + precoefs[1]) * energy**(-nvict)
    # normalization
    p1 = index_of(energy, norm1+e0)
    p2 = index_nearest(energy, norm2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)
    coefs = polyfit(energy[p1:p2], omu[p1:p2], nnorm)
    post_edge = 0
    norm_coefs = []
    for n, c in enumerate(reversed(list(coefs))):
        post_edge += c * energy**(n-nvict)
        norm_coefs.append(c)
    edge_step = step
    if edge_step is None:
        edge_step = post_edge[ie0] - pre_edge[ie0]

    norm = (mu - pre_edge)/edge_step
    out = {'e0': e0, 'edge_step': edge_step, 'norm': norm,
           'pre_edge': pre_edge, 'post_edge': post_edge,
           'norm_coefs': norm_coefs, 'nvict': nvict,
           'nnorm': nnorm, 'norm1': norm1, 'norm2': norm2,
           'pre1': pre1, 'pre2': pre2, 'precoefs': precoefs}

    return out
Ejemplo n.º 7
0
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1,
           calc_uncertainties=False, _larch=None, **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [False]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return

    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='autobk')

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n')
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')
    iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y, spl_k, spl_e  = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q  = kmin + i*(kmax-kmin)/(nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i<len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4,
                    fcn_kws = dict(ncoefs=len(coefs), chi_std=chi_std,
                                   knots=knots, order=order,
                                   kraw=kraw[:iemax-ie0+1],
                                   mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout,
                                   ftwin=ftwin, kweight=kweight,
                                   nfft=nfft, nclamp=nclamp,
                                   clamp_lo=clamp_lo, clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                           knots, coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0+len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg  = obkg
    group.chie = (mu-obkg)/edge_step
    group.k    = kout
    group.chi  = chi/edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0+len(bkg)] = initbkg
    params.init_chi = initchi/edge_step
    params.knots_e  = spl_e
    params.knots_y  = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax  
    group.autobk_details = params

    # uncertainties in mu0 and chi:  fairly slow!!
    if HAS_UNCERTAIN and calc_uncertainties:
        vbest, vstd = [], []
        for n in fit.var_names:
            par = getattr(params, n)
            vbest.append(par.value)
            vstd.append(par.stderr)
        uvars = uncertainties.correlated_values(vbest, params.covar)
        # uncertainty in bkg (aka mu0)
        # note that much of this is working around
        # limitations in the uncertainty package that make it
        #  1. take an argument list (not array)
        #  2. work on returned scalars (but not arrays)
        #  3. not handle kw args and *args well (so use
        #     of global "index" is important here)
        nkx = iemax-ie0 + 1
        def my_dsplev(*args):
            coefs = np.array(args)
            return splev(kraw[:nkx], [knots, coefs, order])[index]
        fdbkg = uncertainties.wrap(my_dsplev)
        dmu0  = [fdbkg(*uvars).std_dev() for index in range(len(bkg))]
        group.delta_bkg = np.zeros(len(mu))
        group.delta_bkg[ie0:ie0+len(bkg)] = np.array(dmu0)

        # uncertainty in chi (see notes above)
        def my_dchi(*args):
            coefs = np.array(args)
            b,chi = spline_eval(kraw[:nkx], mu[ie0:iemax+1],
                                knots, coefs, order, kout)
            return chi[index]
        fdchi = uncertainties.wrap(my_dchi)
        dchi  = [fdchi(*uvars).std_dev() for index in range(len(kout))]
        group.delta_chi = np.array(dchi)/edge_step
Ejemplo n.º 8
0
def autobk(energy,
           mu=None,
           group=None,
           rbkg=1,
           nknots=None,
           e0=None,
           edge_step=None,
           kmin=0,
           kmax=None,
           kweight=1,
           dk=0,
           win='hanning',
           k_std=None,
           chi_std=None,
           nfft=2048,
           kstep=0.05,
           pre_edge_kws=None,
           nclamp=4,
           clamp_lo=1,
           clamp_hi=1,
           calc_uncertainties=False,
           _larch=None,
           **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [False]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='autobk')

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3,
                       nvict=0,
                       pre1=None,
                       pre2=-50.,
                       norm1=100.,
                       norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n'
            )
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi / (kstep * nfft)
    if rbkg < 2 * rgrid: rbkg = 2 * rgrid
    irbkg = int(1.01 + rbkg / rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe) * np.sqrt(ETOK * abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout = kstep * np.arange(int(1.01 + kmax / kstep), dtype='float64')
    iemax = min(len(energy), 2 + index_of(energy, e0 + kmax * kmax / ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(
        kout, xmin=kmin, xmax=kmax, window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2 * int(rbkg * (kmax - kmin) / np.pi) + 1))
    spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q = kmin + i * (kmax - kmin) / (nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw) - 1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik + ie0]
        spl_y[i] = (2 * mu[ik + ie0] + mu[i1 + ie0] + mu[i2 + ie0]) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i < len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid,
                    params,
                    _larch=_larch,
                    toler=1.e-4,
                    fcn_kws=dict(ncoefs=len(coefs),
                                 chi_std=chi_std,
                                 knots=knots,
                                 order=order,
                                 kraw=kraw[:iemax - ie0 + 1],
                                 mu=mu[ie0:iemax + 1],
                                 irbkg=irbkg,
                                 kout=kout,
                                 ftwin=ftwin,
                                 kweight=kweight,
                                 nfft=nfft,
                                 nclamp=nclamp,
                                 clamp_lo=clamp_lo,
                                 clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots,
                           coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0 + len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg = obkg
    group.chie = (mu - obkg) / edge_step
    group.k = kout
    group.chi = chi / edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0 + len(bkg)] = initbkg
    params.init_chi = initchi / edge_step
    params.knots_e = spl_e
    params.knots_y = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax
    group.autobk_details = params

    # uncertainties in mu0 and chi:  fairly slow!!
    if HAS_UNCERTAIN and calc_uncertainties:
        vbest, vstd = [], []
        for n in fit.var_names:
            par = getattr(params, n)
            vbest.append(par.value)
            vstd.append(par.stderr)
        uvars = uncertainties.correlated_values(vbest, params.covar)
        # uncertainty in bkg (aka mu0)
        # note that much of this is working around
        # limitations in the uncertainty package that make it
        #  1. take an argument list (not array)
        #  2. work on returned scalars (but not arrays)
        #  3. not handle kw args and *args well (so use
        #     of global "index" is important here)
        nkx = iemax - ie0 + 1

        def my_dsplev(*args):
            coefs = np.array(args)
            return splev(kraw[:nkx], [knots, coefs, order])[index]

        fdbkg = uncertainties.wrap(my_dsplev)
        dmu0 = [fdbkg(*uvars).std_dev() for index in range(len(bkg))]
        group.delta_bkg = np.zeros(len(mu))
        group.delta_bkg[ie0:ie0 + len(bkg)] = np.array(dmu0)

        # uncertainty in chi (see notes above)
        def my_dchi(*args):
            coefs = np.array(args)
            b, chi = spline_eval(kraw[:nkx], mu[ie0:iemax + 1], knots, coefs,
                                 order, kout)
            return chi[index]

        fdchi = uncertainties.wrap(my_dchi)
        dchi = [fdchi(*uvars).std_dev() for index in range(len(kout))]
        group.delta_chi = np.array(dchi) / edge_step
Ejemplo n.º 9
0
def autobk(energy,
           mu=None,
           group=None,
           rbkg=1,
           nknots=None,
           e0=None,
           edge_step=None,
           kmin=0,
           kmax=None,
           kweight=1,
           dk=0,
           win='hanning',
           k_std=None,
           chi_std=None,
           nfft=2048,
           kstep=0.05,
           pre_edge_kws=None,
           nclamp=4,
           clamp_lo=1,
           clamp_hi=1,
           calc_uncertainties=True,
           err_sigma=1,
           _larch=None,
           **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [True]
      err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='autobk')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3,
                       nvict=0,
                       pre1=None,
                       pre2=-50.,
                       norm1=100.,
                       norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n'
            )
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi / (kstep * nfft)
    if rbkg < 2 * rgrid: rbkg = 2 * rgrid
    irbkg = int(1.01 + rbkg / rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe) * np.sqrt(ETOK * abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout = kstep * np.arange(int(1.01 + kmax / kstep), dtype='float64')
    iemax = min(len(energy), 2 + index_of(energy, e0 + kmax * kmax / ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(
        kout, xmin=kmin, xmax=kmax, window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2 * int(rbkg * (kmax - kmin) / np.pi) + 1))
    spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q = kmin + i * (kmax - kmin) / (nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw) - 1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik + ie0]
        spl_y[i] = (2 * mu[ik + ie0] + mu[i1 + ie0] + mu[i2 + ie0]) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i < len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid,
                    params,
                    _larch=_larch,
                    toler=1.e-4,
                    fcn_kws=dict(ncoefs=len(coefs),
                                 chi_std=chi_std,
                                 knots=knots,
                                 order=order,
                                 kraw=kraw[:iemax - ie0 + 1],
                                 mu=mu[ie0:iemax + 1],
                                 irbkg=irbkg,
                                 kout=kout,
                                 ftwin=ftwin,
                                 kweight=kweight,
                                 nfft=nfft,
                                 nclamp=nclamp,
                                 clamp_lo=clamp_lo,
                                 clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots,
                           coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0 + len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg = obkg
    group.chie = (mu - obkg) / edge_step
    group.k = kout
    group.chi = chi / edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0 + len(bkg)] = initbkg
    params.init_chi = initchi / edge_step
    params.knots_e = spl_e
    params.knots_y = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax
    group.autobk_details = params

    # uncertainties in mu0 and chi: can be fairly slow.
    if calc_uncertainties:
        nchi = len(chi)
        nmue = iemax - ie0 + 1
        redchi = params.chi_reduced
        covar = params.covar / redchi
        jac_chi = np.zeros(nchi * nspl).reshape((nspl, nchi))
        jac_bkg = np.zeros(nmue * nspl).reshape((nspl, nmue))

        cvals, cerrs = [], []
        for i in range(len(coefs)):
            par = getattr(params, FMT_COEF % i)
            cvals.append(getattr(par, 'value', 0.0))
            cdel = getattr(par, 'stderr', 0.0)
            if cdel is None:
                cdel = 0.0
            cerrs.append(cdel / 2.0)
        cvals = np.array(cvals)
        cerrs = np.array(cerrs)

        # find derivatives by hand!
        _k = kraw[:nmue]
        _m = mu[ie0:iemax + 1]
        for i in range(nspl):
            cval0 = cvals[i]
            cvals[i] = cval0 + cerrs[i]
            bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0 - cerrs[i]
            bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0
            jac_chi[i] = (chi1 - chi2) / (2 * cerrs[i])
            jac_bkg[i] = (bkg1 - bkg2) / (2 * cerrs[i])

        dfchi = np.zeros(nchi)
        dfbkg = np.zeros(nmue)
        for i in range(nspl):
            for j in range(nspl):
                dfchi += jac_chi[i] * jac_chi[j] * covar[i, j]
                dfbkg += jac_bkg[i] * jac_bkg[j] * covar[i, j]

        prob = 0.5 * (1.0 + erf(err_sigma / np.sqrt(2.0)))
        dchi = t.ppf(prob, nchi - nspl) * np.sqrt(dfchi * redchi)
        dbkg = t.ppf(prob, nmue - nspl) * np.sqrt(dfbkg * redchi)

        group.delta_chi = dchi
        group.delta_bkg = 0.0 * mu
        group.delta_bkg[ie0:ie0 + len(dbkg)] = dbkg
Ejemplo n.º 10
0
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1,
           calc_uncertainties=True, err_sigma=1, _larch=None, **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [True]
      err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='autobk')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n')
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')
    iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y, spl_k, spl_e  = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q  = kmin + i*(kmax-kmin)/(nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i<len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4,
                    fcn_kws = dict(ncoefs=len(coefs), chi_std=chi_std,
                                   knots=knots, order=order,
                                   kraw=kraw[:iemax-ie0+1],
                                   mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout,
                                   ftwin=ftwin, kweight=kweight,
                                   nfft=nfft, nclamp=nclamp,
                                   clamp_lo=clamp_lo, clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                           knots, coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0+len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg  = obkg
    group.chie = (mu-obkg)/edge_step
    group.k    = kout
    group.chi  = chi/edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0+len(bkg)] = initbkg
    params.init_chi = initchi/edge_step
    params.knots_e  = spl_e
    params.knots_y  = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax
    group.autobk_details = params

    # uncertainties in mu0 and chi: can be fairly slow.
    if calc_uncertainties:
        nchi = len(chi)
        nmue = iemax-ie0 + 1
        redchi = params.chi_reduced
        covar = params.covar / redchi
        jac_chi = np.zeros(nchi*nspl).reshape((nspl, nchi))
        jac_bkg = np.zeros(nmue*nspl).reshape((nspl, nmue))

        cvals, cerrs = [], []
        for i in range(len(coefs)):
             par = getattr(params, FMT_COEF % i)
             cvals.append(getattr(par, 'value', 0.0))
             cdel = getattr(par, 'stderr', 0.0)
             if cdel is None:
                 cdel = 0.0
             cerrs.append(cdel/2.0)
        cvals = np.array(cvals)
        cerrs = np.array(cerrs)

        # find derivatives by hand!
        _k = kraw[:nmue]
        _m = mu[ie0:iemax+1]
        for i in range(nspl):
            cval0 = cvals[i]
            cvals[i] = cval0 + cerrs[i]
            bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0 - cerrs[i]
            bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0
            jac_chi[i] = (chi1 - chi2) / (2*cerrs[i])
            jac_bkg[i] = (bkg1 - bkg2) / (2*cerrs[i])

        dfchi = np.zeros(nchi)
        dfbkg = np.zeros(nmue)
        for i in range(nspl):
            for j in range(nspl):
                dfchi += jac_chi[i]*jac_chi[j]*covar[i,j]
                dfbkg += jac_bkg[i]*jac_bkg[j]*covar[i,j]

        prob = 0.5*(1.0 + erf(err_sigma/np.sqrt(2.0)))
        dchi = t.ppf(prob, nchi-nspl) * np.sqrt(dfchi*redchi)
        dbkg = t.ppf(prob, nmue-nspl) * np.sqrt(dfbkg*redchi)

        group.delta_chi = dchi
        group.delta_bkg = 0.0*mu
        group.delta_bkg[ie0:ie0+len(dbkg)] = dbkg