def xrf_resid(pars, peaks=None, mca=None, det=None, bgr=None,
              sig=None, filters=None, flyield=None, use_bgr=False,
              xray_en=30.0, emin=0.0, emax=30.0, **kws):

    sig_o = pars.sig_o.value
    sig_s = pars.sig_s.value
    sig_q = pars.sig_q.value
    model = mca.data * 0.0
    if use_bgr:
        model += mca.bgr
    for use, pcen, psig, pamp in peaks:
        amp = pamp.value
        cen = pcen.value
        sig = sig_o + cen * (sig_s + sig_q * cen)
        psig.value = sig
        if use: model += amp*gaussian(mca.energy, cen, sig)

    mca.model = model
    imin = index_of(mca.energy, emin)
    imax = index_of(mca.energy, emax)
    resid = (mca.data - model)
    # if det['use']:
    #     resid = resid / np.maximum(1.e-29, (1.0 - mca.det_atten))

    return resid[imin:imax]
Exemple #2
0
def xrf_resid(pars,
              peaks=None,
              mca=None,
              det=None,
              bgr=None,
              sig=None,
              filters=None,
              flyield=None,
              use_bgr=False,
              xray_en=30.0,
              emin=0.0,
              emax=30.0,
              **kws):

    sig_o = pars.sig_o.value
    sig_s = pars.sig_s.value
    sig_q = pars.sig_q.value
    model = mca.data * 0.0
    if use_bgr:
        model += mca.bgr
    for use, pcen, psig, pamp in peaks:
        amp = pamp.value
        cen = pcen.value
        sig = sig_o + cen * (sig_s + sig_q * cen)
        psig.value = sig
        if use: model += amp * gaussian(mca.energy, cen, sig)

    mca.model = model
    imin = index_of(mca.energy, emin)
    imax = index_of(mca.energy, emax)
    resid = (mca.data - model)
    # if det['use']:
    #     resid = resid / np.maximum(1.e-29, (1.0 - mca.det_atten))

    return resid[imin:imax]
Exemple #3
0
 def get_xranges(self, x):
     xmin, xmax = min(x), max(x)
     i1, i2 = 0, len(x)
     _xmin = self.xmin.GetValue()
     _xmax = self.xmax.GetValue()
     if _xmin > min(x):
         i1 = index_of(x, _xmin)
         xmin = x[i1]
     if _xmax < max(x):
         i2 = index_of(x, _xmax) + 1
         xmax = x[i2]
     xv1 = max(min(x), xmin - (xmax - xmin) / 5.0)
     xv2 = min(max(x), xmax + (xmax - xmin) / 5.0)
     return i1, i2, xv1, xv2
Exemple #4
0
    def on_cursor(self, event=None, side='left'):
        if event is None:
            return
        x, y  = event.xdata, event.ydata
        if len(self.panel.fig.get_axes()) > 1:
            try:
                x, y = self.panel.axes.transData.inverted().transform((event.x, event.y))
            except:
                pass
        ix = x
        if self.xrd is not None:
            if self.xunit == '2th':
                q = calc_2th_to_q(x,self.xrd.wavelength*1e10)
            elif self.xunit == 'd':
                q = calc_d_to_q(x)
            else:
                q = x
            ix = index_of(self.xrd.data1D[0], q)

        if side == 'right':
            self.xmarker_right = ix
        elif side == 'left':
            self.xmarker_left = ix
        self.eventx = x
        self.eventy = y

        if self.xmarker_left is not None and self.xmarker_right is not None:
            ix1, ix2 = self.xmarker_left, self.xmarker_right
            self.xmarker_left  = min(ix1, ix2)
            self.xmarker_right = max(ix1, ix2)

        if side == 'left':
            self.x_for_zoom = np.array(self.xrd.data1D)[0,ix]
        self.update_status()
        self.draw()
Exemple #5
0
    def on_cursor(self, event=None, side='left'):
        if event is None:
            return
        x, y  = event.xdata, event.ydata
        if len(self.panel.fig.get_axes()) > 1:
            try:
                x, y = self.panel.axes.transData.inverted().transform((event.x, event.y))
            except:
                pass
        ix = x
        if self.mca is not None:
            ix = index_of(self.mca.energy, x)

        if side == 'right':
            self.xmarker_right = ix
        elif side == 'left':
            self.xmarker_left = ix

        if self.xmarker_left is not None and self.xmarker_right is not None:
            ix1, ix2 = self.xmarker_left, self.xmarker_right
            self.xmarker_left  = min(ix1, ix2)
            self.xmarker_right = max(ix1, ix2)

        if side == 'left':
            self.energy_for_zoom = self.mca.energy[ix]
        self.update_status()
        self.draw()
Exemple #6
0
    def on_cursor(self, event=None, side='left'):
        if event is None:
            return
        x, y  = event.xdata, event.ydata
        if len(self.panel.fig.get_axes()) > 1:
            try:
                x, y = self.panel.axes.transData.inverted().transform((event.x, event.y))
            except:
                pass
        ix = x
        if self.xrd is not None:
            if self.xunit == '2th':
                q = calc_2th_to_q(x,self.xrd.wavelength*1e10)
            elif self.xunit == 'd':
                q = calc_d_to_q(x)
            else:
                q = x
            ix = index_of(self.xrd.data1D[0], q)

        if side == 'right':
            self.xmarker_right = ix
        elif side == 'left':
            self.xmarker_left = ix
        self.eventx = x
        self.eventy = y

        if self.xmarker_left is not None and self.xmarker_right is not None:
            ix1, ix2 = self.xmarker_left, self.xmarker_right
            self.xmarker_left  = min(ix1, ix2)
            self.xmarker_right = max(ix1, ix2)

        if side == 'left':
            self.x_for_zoom = np.array(self.xrd.data1D)[0,ix]
        self.update_status()
        self.draw()
Exemple #7
0
    def on_cursor(self, event=None, side='left'):
        if event is None:
            return
        x, y  = event.xdata, event.ydata
        if len(self.panel.fig.get_axes()) > 1:
            try:
                x, y = self.panel.axes.transData.inverted().transform((event.x, event.y))
            except:
                pass
        ix = x
        if self.mca is not None:
            ix = index_of(self.mca.energy, x)

        if side == 'right':
            self.xmarker_right = ix
        elif side == 'left':
            self.xmarker_left = ix

        if self.xmarker_left is not None and self.xmarker_right is not None:
            ix1, ix2 = self.xmarker_left, self.xmarker_right
            self.xmarker_left  = min(ix1, ix2)
            self.xmarker_right = max(ix1, ix2)

        if side == 'left':
            self.energy_for_zoom = self.mca.energy[ix]
        self.update_status()
        self.draw()
Exemple #8
0
    def onPick2Timer(self, evt=None):
        try:
            curhist = self.larch.symtable._plotter.plot1.cursor_hist[:]
        except:
            return
        if (time.time() - self.pick2_t0) > self.pick2_timeout:
            msg = self.pick2_group.pick2_msg.SetLabel(" ")
            self.larch.symtable._plotter.plot1.cursor_hist = []
            self.pick2_timer.Stop()
            return

        if len(curhist) < 2:
            self.pick2_group.pick2_msg.SetLabel("%i/2" % (len(curhist)))
            return

        self.pick2_group.pick2_msg.SetLabel("done.")
        self.pick2_timer.Stop()

        # guess param values
        xcur = (curhist[0][0], curhist[1][0])
        xmin, xmax = min(xcur), max(xcur)

        dgroup = getattr(self.larch.symtable, self.controller.groupname)
        x, y = dgroup.x, dgroup.y
        i0 = index_of(dgroup.x, xmin)
        i1 = index_of(dgroup.x, xmax)
        x, y = dgroup.x[i0 : i1 + 1], dgroup.y[i0 : i1 + 1]

        mod = self.pick2_group.mclass(prefix=self.pick2_group.prefix)
        parwids = self.pick2_group.parwids
        try:
            guesses = mod.guess(y, x=x)
        except:
            return

        for name, param in guesses.items():
            if name in parwids:
                parwids[name].value.SetValue(param.value)

        dgroup._tmp = mod.eval(guesses, x=dgroup.x)
        self.larch.symtable._plotter.plot1.oplot(dgroup.x, dgroup._tmp)
        self.larch.symtable._plotter.plot1.cursor_hist = []
    def guess_starting_values(self, y, x):
        """could probably improve this"""
        ymax, ymin = max(y), min(y)
        yex = ymax
        self.set_initval(self.params.amplitude, 5.*(ymax-ymin))
        if self.negative:
            yex = ymin
            self.params.amplitude.value = -(ymax - ymin)*5.0
        iyex = index_nearest(y, yex)
        self.set_initval(self.params.center, x[iyex])

        halfy = yex /2.0
        ihalfy = index_of(y, halfy)
        sig0 = abs(x[iyex] - x[ihalfy])
        sig1 = 0.15*(max(x) - min(x))
        if sig1 < sig0 : sig0 = sig1
        self.set_initval(self.params.sigma,  sig0)
        bkg0 = ymin
        if self.negative: bkg0 = ymax
        self.set_init_bkg(bkg0)
Exemple #10
0
def preedge(energy, mu, e0=None, step=None,
            nnorm=3, nvict=0, pre1=None, pre2=-50,
            norm1=100, norm2=None):
    """pre edge subtraction, normalization for XAFS (straight python)

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV
    mu:      array of mu(E)
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=3 (quadratic), max=5
    Returns
    -------
      dictionary with elements (among others)
          e0          energy origin in eV
          edge_step   edge step
          norm        normalized mu(E)
          pre_edge    determined pre-edge curve
          post_edge   determined post-edge, normalization curve

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

    """
    energy = remove_dups(energy)

    if e0 is None or e0 < energy[0] or e0 > energy[-1]:
        energy = remove_dups(energy)
        dmu = np.gradient(mu)/np.gradient(energy)
        # find points of high derivative
        high_deriv_pts = np.where(dmu >  max(dmu)*0.05)[0]
        idmu_max, dmu_max = 0, 0
        for i in high_deriv_pts:
            if (dmu[i] > dmu_max and
                (i+1 in high_deriv_pts) and
                (i-1 in high_deriv_pts)):
                idmu_max, dmu_max = i, dmu[i]

        e0 = energy[idmu_max]
    nnorm = max(min(nnorm, MAX_NNORM), 1)
    ie0 = index_nearest(energy, e0)
    e0 = energy[ie0]

    if pre1 is None:  pre1  = min(energy) - e0
    if norm2 is None: norm2 = max(energy) - e0
    if norm2 < 0:     norm2 = max(energy) - e0 - norm2
    pre1  = max(pre1,  (min(energy) - e0))
    norm2 = min(norm2, (max(energy) - e0))

    if pre1 > pre2:
        pre1, pre2 = pre2, pre1
    if norm1 > norm2:
        norm1, norm2 = norm2, norm1

    p1 = index_of(energy, pre1+e0)
    p2 = index_nearest(energy, pre2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)

    omu  = mu*energy**nvict
    ex, mx = remove_nans2(energy[p1:p2], omu[p1:p2])
    precoefs = polyfit(ex, mx, 1)
    pre_edge = (precoefs[0] * energy + precoefs[1]) * energy**(-nvict)
    # normalization
    p1 = index_of(energy, norm1+e0)
    p2 = index_nearest(energy, norm2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)
    coefs = polyfit(energy[p1:p2], omu[p1:p2], nnorm)
    post_edge = 0
    norm_coefs = []
    for n, c in enumerate(reversed(list(coefs))):
        post_edge += c * energy**(n-nvict)
        norm_coefs.append(c)
    edge_step = step
    if edge_step is None:
        edge_step = post_edge[ie0] - pre_edge[ie0]

    norm = (mu - pre_edge)/edge_step
    out = {'e0': e0, 'edge_step': edge_step, 'norm': norm,
           'pre_edge': pre_edge, 'post_edge': post_edge,
           'norm_coefs': norm_coefs, 'nvict': nvict,
           'nnorm': nnorm, 'norm1': norm1, 'norm2': norm2,
           'pre1': pre1, 'pre2': pre2, 'precoefs': precoefs}

    return out
Exemple #11
0
    def process(self, gname,  **kws):
        """ handle process (pre-edge/normalize) XAS data from XAS form, overwriting
        larch group '_y1_' attribute to be plotted
        """
        dgroup = self.controller.get_group(gname)
        proc_opts = {}

        proc_opts['xshift'] = self.xshift.GetValue()
        proc_opts['yshift'] = self.yshift.GetValue()
        proc_opts['xscale'] = self.xscale.GetValue()
        proc_opts['yscale'] = self.yscale.GetValue()
        proc_opts['smooth_op'] = self.smooth_op.GetStringSelection()
        proc_opts['smooth_c0'] = int(self.smooth_c0.GetValue())
        proc_opts['smooth_c1'] = int(self.smooth_c1.GetValue())
        proc_opts['smooth_sig'] = float(self.smooth_sig.GetValue())
        proc_opts['smooth_conv'] = self.smooth_conv.GetStringSelection()

        self.xaspanel.Enable(dgroup.datatype.startswith('xas'))
        if dgroup.datatype.startswith('xas'):
            proc_opts['datatype'] = 'xas'
            proc_opts['e0'] = self.xas_e0.GetValue()
            proc_opts['step'] = self.xas_step.GetValue()
            proc_opts['pre1']  = self.xas_pre1.GetValue()
            proc_opts['pre2']  = self.xas_pre2.GetValue()
            proc_opts['norm1'] = self.xas_nor1.GetValue()
            proc_opts['norm2'] = self.xas_nor2.GetValue()
            proc_opts['nvict'] = self.xas_vict.GetSelection()
            proc_opts['nnorm'] = self.xas_nnor.GetSelection()

            proc_opts['auto_e0'] = self.xas_autoe0.IsChecked()
            proc_opts['show_e0'] = self.xas_showe0.IsChecked()
            proc_opts['auto_step'] = self.xas_autostep.IsChecked()
            proc_opts['nnorm'] = int(self.xas_nnor.GetSelection())
            proc_opts['nvict'] = int(self.xas_vict.GetSelection())
            proc_opts['xas_op'] = self.xas_op.GetStringSelection()

        self.controller.process(dgroup, proc_opts=proc_opts)

        if dgroup.datatype.startswith('xas'):

            if self.xas_autoe0.IsChecked():
                self.xas_e0.SetValue(dgroup.proc_opts['e0'])
            if self.xas_autostep.IsChecked():
                self.xas_step.SetValue(dgroup.proc_opts['edge_step'])

            self.xas_pre1.SetValue(dgroup.proc_opts['pre1'])
            self.xas_pre2.SetValue(dgroup.proc_opts['pre2'])
            self.xas_nor1.SetValue(dgroup.proc_opts['norm1'])
            self.xas_nor2.SetValue(dgroup.proc_opts['norm2'])

            dgroup.orig_ylabel = dgroup.plot_ylabel
            dgroup.plot_ylabel = '$\mu$'
            dgroup.plot_y2label = None
            dgroup.plot_xlabel = '$E \,\mathrm{(eV)}$'
            dgroup.plot_yarrays = [(dgroup.mu, PLOTOPTS_1, dgroup.plot_ylabel)]
            y4e0 = dgroup.mu

            out = self.xas_op.GetStringSelection().lower() # raw, pre, norm, flat
            if out.startswith('raw data + pre'):
                dgroup.plot_yarrays = [(dgroup.mu,        PLOTOPTS_1, '$\mu$'),
                                       (dgroup.pre_edge,  PLOTOPTS_2, 'pre edge'),
                                       (dgroup.post_edge, PLOTOPTS_2, 'post edge')]
            elif out.startswith('pre'):
                dgroup.pre_edge_sub = dgroup.norm * dgroup.edge_step
                dgroup.plot_yarrays = [(dgroup.pre_edge_sub, PLOTOPTS_1,
                                        'pre-edge subtracted $\mu$')]
                y4e0 = dgroup.pre_edge_sub
                dgroup.plot_ylabel = 'pre-edge subtracted $\mu$'
            elif 'norm' in out and 'deriv' in out:
                dgroup.plot_yarrays = [(dgroup.norm, PLOTOPTS_1, 'normalized $\mu$'),
                                       (dgroup.dmude, PLOTOPTS_D, '$d\mu/dE$')]
                y4e0 = dgroup.norm
                dgroup.plot_ylabel = 'normalized $\mu$'
                dgroup.plot_y2label = '$d\mu/dE$'
                dgroup.y = dgroup.norm

            elif out.startswith('norm'):
                dgroup.plot_yarrays = [(dgroup.norm, PLOTOPTS_1, 'normalized $\mu$')]
                y4e0 = dgroup.norm
                dgroup.plot_ylabel = 'normalized $\mu$'
                dgroup.y = dgroup.norm

            elif out.startswith('deriv'):
                dgroup.plot_yarrays = [(dgroup.dmude, PLOTOPTS_1, '$d\mu/dE$')]
                y4e0 = dgroup.dmude
                dgroup.plot_ylabel = '$d\mu/dE$'
                dgroup.y = dgroup.dmude

            dgroup.plot_ymarkers = []
            if self.xas_showe0.IsChecked():
                ie0 = index_of(dgroup.xdat, dgroup.e0)
                dgroup.plot_ymarkers = [(dgroup.e0, y4e0[ie0], {'label': 'e0'})]
Exemple #12
0
def pre_edge(energy, mu=None, group=None, e0=None, step=None,
             nnorm=3, nvict=0, pre1=None, pre2=-50,
             norm1=100, norm2=None, make_flat=True, _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=3 (quadratic), max=5
    make_flat: boolean (Default True) to calculate flattened output.


    Returns
    -------
      None

    The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E)
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

     2 If the first argument is a Group, it must contain 'energy' and 'mu'.
       If it exists, group.e0 will be used as e0.
       See First Argrument Group in Documentation
    """



    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='pre_edge')
    pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm,
                      nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1,
                      norm2=norm2)


    group = set_xafsGroup(group, _larch=_larch)

    e0    = pre_dat['e0']
    norm  = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1+e0)
    p2 = index_nearest(energy, norm2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2-p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Group(c0 = Parameter(0, vary=True),
                      c1 = Parameter(0, vary=True),
                      c2 = Parameter(0, vary=True),
                      en=enx, mu=mux)
        fit = Minimizer(flat_resid, fpars, _larch=_larch, toler=1.e-5)
        try:
            fit.leastsq()
        except (TypeError, ValueError):
            pass
        fc0, fc1, fc2  = fpars.c0.value, fpars.c1.value, fpars.c2.value
        flat_diff   = fc0 + energy * (fc1 + energy * fc2)
        flat        = norm - flat_diff  + flat_diff[ie0]
        flat[:ie0]  = norm[:ie0]


    group.e0 = e0
    group.norm = norm
    group.flat = flat
    group.dmude = np.gradient(mu)/np.gradient(energy)
    group.edge_step  = pre_dat['edge_step']
    group.pre_edge   = pre_dat['pre_edge']
    group.post_edge  = pre_dat['post_edge']

    group.pre_edge_details = Group()
    group.pre_edge_details.pre1   = pre_dat['pre1']
    group.pre_edge_details.pre2   = pre_dat['pre2']
    group.pre_edge_details.norm1  = pre_dat['norm1']
    group.pre_edge_details.norm2  = pre_dat['norm2']
    group.pre_edge_details.pre_slope  = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)
    return
Exemple #13
0
    def xas_process(self, gname, new_mu=False, **kws):
        """ process (pre-edge/normalize) XAS data from XAS form, overwriting
        larch group '_y1_' attribute to be plotted
        """

        out = self.xas_op.GetStringSelection().lower()  # raw, pre, norm, flat
        preopts = {'group': gname, 'e0': None}

        lgroup = getattr(self.larch.symtable, gname)
        dtcorr = self.dtcorr.IsChecked()
        if new_mu:
            try:
                del lgroup.e0, lgroup.edge_step
            except:
                pass

        if not self.xas_autoe0.IsChecked():
            e0 = self.xas_e0.GetValue()
            if e0 < max(lgroup._xdat_) and e0 > min(lgroup._xdat_):
                preopts['e0'] = e0

        if not self.xas_autostep.IsChecked():
            preopts['step'] = self.xas_step.GetValue()

        dt = debugtime()

        preopts['pre1'] = self.xas_pre1.GetValue()
        preopts['pre2'] = self.xas_pre2.GetValue()
        preopts['norm1'] = self.xas_nor1.GetValue()
        preopts['norm2'] = self.xas_nor2.GetValue()

        preopts['nvict'] = self.xas_vict.GetSelection()
        preopts['nvict'] = self.xas_vict.GetSelection()
        preopts['nnorm'] = self.xas_nnor.GetSelection()

        preopts['make_flat'] = 'False'
        preopts['group'] = gname
        preopts = ", ".join(["%s=%s" % (k, v) for k, v in preopts.items()])

        preedge_cmd = "pre_edge(%s._xdat_, %s._ydat_, %s)"
        self.larch(preedge_cmd % (gname, gname, preopts))
        if self.xas_autoe0.IsChecked():
            self.xas_e0.SetValue(lgroup.e0)
        if self.xas_autostep.IsChecked():
            self.xas_step.SetValue(lgroup.edge_step)

        details_group = lgroup
        try:
            details_group = lgroup.pre_edge_details
        except:
            pass

        self.xas_pre1.SetValue(details_group.pre1)
        self.xas_pre2.SetValue(details_group.pre2)
        self.xas_nor1.SetValue(details_group.norm1)
        self.xas_nor2.SetValue(details_group.norm2)

        popts1 = dict(style='solid', linewidth=3, marker='None', markersize=4)
        popts2 = dict(style='short dashed',
                      linewidth=2,
                      zorder=-5,
                      marker='None',
                      markersize=4)
        poptsd = dict(style='solid',
                      linewidth=2,
                      zorder=-5,
                      side='right',
                      y2label='derivative',
                      marker='None',
                      markersize=4)

        lgroup.plot_yarrays = [(lgroup._ydat_, popts1, lgroup.plot_ylabel)]
        y4e0 = lgroup._ydat_
        if out.startswith('raw data with'):
            lgroup.plot_yarrays = [(lgroup._ydat_, popts1, lgroup.plot_ylabel),
                                   (lgroup.pre_edge, popts2, 'pre edge'),
                                   (lgroup.post_edge, popts2, 'post edge')]
        elif out.startswith('pre'):
            self.larch('%s.pre_edge_sub = %s.norm * %s.edge_step' %
                       (gname, gname, gname))
            lgroup.plot_yarrays = [(lgroup.pre_edge_sub, popts1,
                                    'pre edge subtracted XAFS')]
            y4e0 = lgroup.pre_edge_sub
        elif 'norm' in out and 'deriv' in out:
            lgroup.plot_yarrays = [(lgroup.norm, popts1, 'normalized XAFS'),
                                   (lgroup.dmude, poptsd, 'derivative')]
            y4e0 = lgroup.norm

        elif out.startswith('norm'):
            lgroup.plot_yarrays = [(lgroup.norm, popts1, 'normalized XAFS')]
            y4e0 = lgroup.norm
        elif out.startswith('deriv'):
            lgroup.plot_yarrays = [(lgroup.dmude, popts1, 'derivative')]
            y4e0 = lgroup.dmude

        lgroup.plot_ymarkers = []
        if self.xas_showe0.IsChecked():
            ie0 = index_of(lgroup._xdat_, lgroup.e0)
            lgroup.plot_ymarkers = [(lgroup.e0, y4e0[ie0], {'label': 'e0'})]
        return
Exemple #14
0
    def xas_process(self, gname, new_mu=False, **kws):
        """ process (pre-edge/normalize) XAS data from XAS form, overwriting
        larch group '_y1_' attribute to be plotted
        """

        out = self.xas_op.GetStringSelection().lower() # raw, pre, norm, flat
        preopts = {'group': gname, 'e0': None}

        lgroup = getattr(self.larch.symtable, gname)
        dtcorr = self.dtcorr.IsChecked()
        if new_mu:
            try:
                del lgroup.e0, lgroup.edge_step
            except:
                pass

        if not self.xas_autoe0.IsChecked():
            e0 = self.xas_e0.GetValue()
            if e0 < max(lgroup._xdat_) and e0 > min(lgroup._xdat_):
                preopts['e0'] = e0

        if not self.xas_autostep.IsChecked():
            preopts['step'] = self.xas_step.GetValue()

        dt = debugtime()

        preopts['pre1']  = self.xas_pre1.GetValue()
        preopts['pre2']  = self.xas_pre2.GetValue()
        preopts['norm1'] = self.xas_nor1.GetValue()
        preopts['norm2'] = self.xas_nor2.GetValue()

        preopts['nvict'] = self.xas_vict.GetSelection()
        preopts['nvict'] = self.xas_vict.GetSelection()
        preopts['nnorm'] = self.xas_nnor.GetSelection()

        preopts['make_flat'] = 'False'
        preopts['group'] = gname
        preopts = ", ".join(["%s=%s" %(k, v) for k,v in preopts.items()])

        preedge_cmd = "pre_edge(%s._xdat_, %s._ydat_, %s)"
        self.larch(preedge_cmd % (gname, gname, preopts))
        if self.xas_autoe0.IsChecked():
            self.xas_e0.SetValue(lgroup.e0)
        if self.xas_autostep.IsChecked():
            self.xas_step.SetValue(lgroup.edge_step)

        self.xas_pre1.SetValue(lgroup.pre1)
        self.xas_pre2.SetValue(lgroup.pre2)
        self.xas_nor1.SetValue(lgroup.norm1)
        self.xas_nor2.SetValue(lgroup.norm2)

        popts1 = dict(style='solid', linewidth=3,
                      marker='None', markersize=4)
        popts2 = dict(style='short dashed', linewidth=2, zorder=-5,
                      marker='None', markersize=4)
        poptsd = dict(style='solid', linewidth=2, zorder=-5,
                      side='right',  y2label='derivative',
                      marker='None', markersize=4)

        lgroup.plot_yarrays = [(lgroup._ydat_, popts1, lgroup.plot_ylabel)]
        y4e0 = lgroup._ydat_
        if out.startswith('raw data with'):
            lgroup.plot_yarrays = [(lgroup._ydat_,    popts1, lgroup.plot_ylabel),
                                   (lgroup.pre_edge,  popts2, 'pre edge'),
                                   (lgroup.post_edge, popts2, 'post edge')]
        elif out.startswith('pre'):
            self.larch('%s.pre_edge_sub = %s.norm * %s.edge_step' %
                       (gname, gname, gname))
            lgroup.plot_yarrays = [(lgroup.pre_edge_sub, popts1,
                                    'pre edge subtracted XAFS')]
            y4e0 = lgroup.pre_edge_sub
        elif 'norm' in out and 'deriv' in out:
            lgroup.plot_yarrays = [(lgroup.norm, popts1, 'normalized XAFS'),
                                   (lgroup.dmude, poptsd, 'derivative')]
            y4e0 = lgroup.norm

        elif out.startswith('norm'):
            lgroup.plot_yarrays = [(lgroup.norm, popts1, 'normalized XAFS')]
            y4e0 = lgroup.norm
        elif out.startswith('deriv'):
            lgroup.plot_yarrays = [(lgroup.dmude, popts1, 'derivative')]
            y4e0 = lgroup.dmude

        lgroup.plot_ymarkers = []
        if self.xas_showe0.IsChecked():
            ie0 = index_of(lgroup._xdat_, lgroup.e0)
            lgroup.plot_ymarkers = [(lgroup.e0, y4e0[ie0], {'label': 'e0'})]
        return
Exemple #15
0
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None,
          whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False,
          _larch=None):
    """
    Match mu(E) data for tabulated f''(E) using the MBACK algorithm and,
    optionally, the Lee & Xiang extension

    Arguments:
      energy, mu:    arrays of energy and mu(E)
      order:         order of polynomial [3]
      group:         output group (and input group for e0)
      z:             Z number of absorber
      edge:          absorption edge (K, L3)
      e0:            edge energy
      emin:          beginning energy for fit
      emax:          ending energy for fit
      whiteline:     exclusion zone around white lines
      leexiang:      flag to use the Lee & Xiang extension
      tables:        'chantler' (default) or 'cl'
      fit_erfc:      True to float parameters of error function
      return_f1:     True to put the f1 array in the group

    Returns:
      group.f2:      tabulated f2(E)
      group.f1:      tabulated f1(E) (if return_f1 is True)
      group.fpp:     matched data
      group.mback_params:  Group of parameters for the minimization

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970
      * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    order=int(order)
    if order < 1: order = 1 # set order of polynomial
    if order > MAXORDER: order = MAXORDER

    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    group = set_xafsGroup(group, _larch=_larch)

    if e0 is None:              # need to run find_e0:
        e0 = xray_edge(z, edge, _larch=_larch)[0]
    if e0 is None:
        e0 = group.e0
    if e0 is None:
        find_e0(energy, mu, group=group)


    ### theta is an array used to exclude the regions <emin, >emax, and
    ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere
    (i1, i2) = (0, len(energy)-1)
    if emin is not None: i1 = index_of(energy, emin)
    if emax is not None: i2 = index_of(energy, emax)
    theta = np.ones(len(energy)) # default: 1 throughout
    theta[0:i1]  = 0
    theta[i2:-1] = 0
    if whiteline:
        pre     = 1.0*(energy<e0)
        post    = 1.0*(energy>e0+float(whiteline))
        theta   = theta * (pre + post)
    if edge.lower().startswith('l'):
        l2      = xray_edge(z, 'L2', _larch=_larch)[0]
        l2_pre  = 1.0*(energy<l2)
        l2_post = 1.0*(energy>l2+float(whiteline))
        theta   = theta * (l2_pre + l2_post)


    ## this is used to weight the pre- and post-edge differently as
    ## defined in the MBACK paper
    weight1 = 1*(energy<e0)
    weight2 = 1*(energy>e0)
    weight  = np.sqrt(sum(weight1))*weight1 + np.sqrt(sum(weight2))*weight2


    ## get the f'' function from CL or Chantler
    if tables.lower() == 'chantler':
        f1 = f1_chantler(z, energy, _larch=_larch)
        f2 = f2_chantler(z, energy, _larch=_larch)
    else:
        (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch)
    group.f2=f2
    if return_f1: group.f1=f1

    n = edge
    if edge.lower().startswith('l'): n = 'L'
    params = Group(s      = Parameter(1, vary=True, _larch=_larch),     # scale of data
                   xi     = Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc
                   em     = Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid
                   e0     = Parameter(e0, vary=False, _larch=_larch),   # abs. edge energy
                   ## various arrays need by the objective function
                   en     = energy,
                   mu     = mu,
                   f2     = group.f2,
                   weight = weight,
                   theta  = theta,
                   leexiang = leexiang,
                   _larch = _larch)
    if fit_erfc:
        params.a = Parameter(1, vary=True,  _larch=_larch) # amplitude of erfc
    else:
        params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc

    for i in range(order): # polynomial coefficients
        setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch))

    fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5)
    fit.leastsq()

    eoff = energy - params.e0.value
    normalization_function = params.a.value*erfc((energy-params.em.value)/params.xi.value) + params.c0.value
    for i in range(MAXORDER):
        j = i+1
        attr = 'c%d' % j
        if hasattr(params, attr):
            normalization_function  = normalization_function + getattr(getattr(params, attr), 'value') * eoff**j

    group.fpp = params.s*mu - normalization_function
    group.mback_params = params
Exemple #16
0
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1,
           calc_uncertainties=False, _larch=None, **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [False]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return

    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='autobk')

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n')
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')
    iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y, spl_k, spl_e  = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q  = kmin + i*(kmax-kmin)/(nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i<len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4,
                    fcn_kws = dict(ncoefs=len(coefs), chi_std=chi_std,
                                   knots=knots, order=order,
                                   kraw=kraw[:iemax-ie0+1],
                                   mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout,
                                   ftwin=ftwin, kweight=kweight,
                                   nfft=nfft, nclamp=nclamp,
                                   clamp_lo=clamp_lo, clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                           knots, coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0+len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg  = obkg
    group.chie = (mu-obkg)/edge_step
    group.k    = kout
    group.chi  = chi/edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0+len(bkg)] = initbkg
    params.init_chi = initchi/edge_step
    params.knots_e  = spl_e
    params.knots_y  = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax  
    group.autobk_details = params

    # uncertainties in mu0 and chi:  fairly slow!!
    if HAS_UNCERTAIN and calc_uncertainties:
        vbest, vstd = [], []
        for n in fit.var_names:
            par = getattr(params, n)
            vbest.append(par.value)
            vstd.append(par.stderr)
        uvars = uncertainties.correlated_values(vbest, params.covar)
        # uncertainty in bkg (aka mu0)
        # note that much of this is working around
        # limitations in the uncertainty package that make it
        #  1. take an argument list (not array)
        #  2. work on returned scalars (but not arrays)
        #  3. not handle kw args and *args well (so use
        #     of global "index" is important here)
        nkx = iemax-ie0 + 1
        def my_dsplev(*args):
            coefs = np.array(args)
            return splev(kraw[:nkx], [knots, coefs, order])[index]
        fdbkg = uncertainties.wrap(my_dsplev)
        dmu0  = [fdbkg(*uvars).std_dev() for index in range(len(bkg))]
        group.delta_bkg = np.zeros(len(mu))
        group.delta_bkg[ie0:ie0+len(bkg)] = np.array(dmu0)

        # uncertainty in chi (see notes above)
        def my_dchi(*args):
            coefs = np.array(args)
            b,chi = spline_eval(kraw[:nkx], mu[ie0:iemax+1],
                                knots, coefs, order, kout)
            return chi[index]
        fdchi = uncertainties.wrap(my_dchi)
        dchi  = [fdchi(*uvars).std_dev() for index in range(len(kout))]
        group.delta_chi = np.array(dchi)/edge_step
Exemple #17
0
def estimate_noise(k, chi=None, group=None, rmin=15.0, rmax=30.0,
                   kweight=1, kmin=0, kmax=20, dk=4, dk2=None, kstep=0.05,
                   kwindow='kaiser', nfft=2048, _larch=None, **kws):
    """
    estimate noise levels in EXAFS spectrum and estimate highest k
    where data is above the noise level
    Parameters:
    -----------
      k:        1-d array of photo-electron wavenumber in Ang^-1 (or group)
      chi:      1-d array of chi
      group:    output Group  [see Note below]
      rmin:     minimum R value for high-R region of chi(R)
      rmax:     maximum R value for high-R region of chi(R)
      kweight:  exponent for weighting spectra by k**kweight [1]
      kmin:     starting k for FT Window [0]
      kmax:     ending k for FT Window  [20]
      dk:       tapering parameter for FT Window [4]
      dk2:      second tapering parameter for FT Window [None]
      kstep:    value to use for delta_k ( Ang^-1) [0.05]
      window:   name of window type ['kaiser']
      nfft:     value to use for N_fft [2048].

    Returns:
    ---------
      None   -- outputs are written to supplied group.  Values (scalars) written
      to output group:
        epsilon_k     estimated noise in chi(k)
        epsilon_r     estimated noise in chi(R)
        kmax_suggest  highest estimated k value where |chi(k)| > epsilon_k

    Notes:
    -------

     1. This method uses the high-R portion of chi(R) as a measure of the noise
        level in the chi(R) data and uses Parseval's theorem to convert this noise
        level to that in chi(k).  This method implicitly assumes that there is no
        signal in the high-R portion of the spectrum, and that the noise in the
        spectrum s "white" (independent of R) .  Each of these assumptions can be
        questioned.
     2. The estimate for 'kmax_suggest' has a tendency to be fair but pessimistic
        in how far out the chi(k) data goes before being dominated by noise.
     3. Follows the 'First Argument Group' convention, so that you can either
        specifiy all of (an array for 'k', an array for 'chi', option output Group)
        OR pass a group with 'k' and 'chi' as the first argument
    """
    k, chi, group = parse_group_args(k, members=('k', 'chi'),
                                     defaults=(chi,), group=group,
                                     fcn_name='esitmate_noise')



    # save _sys.xafsGroup -- we want to NOT write to it here!
    savgroup = set_xafsGroup(None, _larch=_larch)
    tmpgroup = Group()
    rmax_out = min(10*pi, rmax+2)

    xftf(k, chi, kmin=kmin, kmax=kmax, rmax_out=rmax_out,
         kweight=kweight, dk=dk, dk2=dk2, kwindow=kwindow,
         nfft=nfft, kstep=kstep, group=tmpgroup, _larch=_larch)

    chir  = tmpgroup.chir
    rstep = tmpgroup.r[1] - tmpgroup.r[0]

    irmin = int(0.01 + rmin/rstep)
    irmax = min(nfft/2,  int(1.01 + rmax/rstep))
    highr = realimag(chir[irmin:irmax])

    # get average of window function value, scale eps_r scale by this
    # this is imperfect, but improves the result.
    kwin_ave = tmpgroup.kwin.sum()*kstep/(kmax-kmin)
    eps_r = sqrt((highr*highr).sum() / len(highr)) / kwin_ave

    # use Parseval's theorem to convert epsilon_r to epsilon_k,
    # compensating for kweight
    w = 2 * kweight + 1
    scale = sqrt((2*pi*w)/(kstep*(kmax**w - kmin**w)))
    eps_k = scale*eps_r

    # do reverse FT to get chiq array
    xftr(tmpgroup.r, tmpgroup.chir, group=tmpgroup, rmin=0.5, rmax=9.5,
         dr=1.0, window='parzen', nfft=nfft, kstep=kstep, _larch=_larch)

    # sets kmax_suggest to the largest k value for which
    # | chi(q) / k**kweight| > epsilon_k
    iq0 = index_of(tmpgroup.q, (kmax+kmin)/2.0)
    tst = tmpgroup.chiq_mag[iq0:] / ( tmpgroup.q[iq0:])**kweight
    kmax_suggest = tmpgroup.q[iq0 + where(tst < eps_k)[0][0]]

    # restore original _sys.xafsGroup, set output variables
    _larch.symtable._sys.xafsGroup = savgroup
    group = set_xafsGroup(group, _larch=_larch)
    group.epsilon_k = eps_k
    group.epsilon_r = eps_r
    group.kmax_suggest = kmax_suggest
Exemple #18
0
    def xas_process(self, gname, new_mu=False, **kws):
        """ process (pre-edge/normalize) XAS data from XAS form, overwriting
        larch group '_y1_' attribute to be plotted
        """
        dgroup = getattr(self.larch.symtable, gname)

        if not hasattr(dgroup, 'energy'):
            dgroup.energy = dgroup._xdat
        if not hasattr(dgroup, 'mu'):
            dgroup.mu = dgroup._ydat

        e0 = None
        if not self.xas_autoe0.IsChecked():
            _e0 = self.xas_e0.GetValue()
            if _e0 < max(dgroup.energy) and _e0 > min(dgroup.energy):
                e0 = float(_e0)

        preopts = {'e0': e0}
        if not self.xas_autostep.IsChecked():
            preopts['step'] = self.xas_step.GetValue()
        preopts['pre1'] = self.xas_pre1.GetValue()
        preopts['pre2'] = self.xas_pre2.GetValue()
        preopts['norm1'] = self.xas_nor1.GetValue()
        preopts['norm2'] = self.xas_nor2.GetValue()
        preopts['nvict'] = self.xas_vict.GetSelection()
        preopts['nnorm'] = self.xas_nnor.GetSelection()
        preopts['make_flat'] = False
        preopts['_larch'] = self.larch

        pre_edge(dgroup, **preopts)
        dgroup.pre_edge_details.e0 = dgroup.e0
        dgroup.pre_edge_details.edge_step = dgroup.edge_step
        dgroup.pre_edge_details.auto_e0 = self.xas_autoe0.IsChecked()
        dgroup.pre_edge_details.show_e0 = self.xas_showe0.IsChecked()
        dgroup.pre_edge_details.auto_step = self.xas_autostep.IsChecked()

        if self.xas_autoe0.IsChecked():
            self.xas_e0.SetValue(dgroup.e0)
        if self.xas_autostep.IsChecked():
            self.xas_step.SetValue(dgroup.edge_step)

        details_group = dgroup.pre_edge_details
        self.xas_pre1.SetValue(details_group.pre1)
        self.xas_pre2.SetValue(details_group.pre2)
        self.xas_nor1.SetValue(details_group.norm1)
        self.xas_nor2.SetValue(details_group.norm2)

        dgroup.orig_ylabel = dgroup.plot_ylabel
        dgroup.plot_ylabel = '$\mu$'
        dgroup.plot_y2label = None
        dgroup.plot_xlabel = '$E \,\mathrm{(eV)}$'
        dgroup.plot_yarrays = [(dgroup.mu, PLOTOPTS_1, dgroup.plot_ylabel)]
        y4e0 = dgroup.mu

        out = self.xas_op.GetStringSelection().lower()  # raw, pre, norm, flat
        if out.startswith('raw data + pre'):
            dgroup.plot_yarrays = [(dgroup.mu, PLOTOPTS_1, '$\mu$'),
                                   (dgroup.pre_edge, PLOTOPTS_2, 'pre edge'),
                                   (dgroup.post_edge, PLOTOPTS_2, 'post edge')]
        elif out.startswith('pre'):
            dgroup.pre_edge_sub = dgroup.norm * dgroup.edge_step
            dgroup.plot_yarrays = [(dgroup.pre_edge_sub, PLOTOPTS_1,
                                    'pre-edge subtracted $\mu$')]
            y4e0 = dgroup.pre_edge_sub
            dgroup.plot_ylabel = 'pre-edge subtracted $\mu$'
        elif 'norm' in out and 'deriv' in out:
            dgroup.plot_yarrays = [(dgroup.norm, PLOTOPTS_1,
                                    'normalized $\mu$'),
                                   (dgroup.dmude, PLOTOPTS_D, '$d\mu/dE$')]
            y4e0 = dgroup.norm
            dgroup.plot_ylabel = 'normalized $\mu$'
            dgroup.plot_y2label = '$d\mu/dE$'
        elif out.startswith('norm'):
            dgroup.plot_yarrays = [(dgroup.norm, PLOTOPTS_1,
                                    'normalized $\mu$')]
            y4e0 = dgroup.norm
            dgroup.plot_ylabel = 'normalized $\mu$'
        elif out.startswith('deriv'):
            dgroup.plot_yarrays = [(dgroup.dmude, PLOTOPTS_1, '$d\mu/dE$')]
            y4e0 = dgroup.dmude
            dgroup.plot_ylabel = '$d\mu/dE$'

        dgroup.plot_ymarkers = []
        if self.xas_showe0.IsChecked():
            ie0 = index_of(dgroup._xdat, dgroup.e0)
            dgroup.plot_ymarkers = [(dgroup.e0, y4e0[ie0], {'label': 'e0'})]
        return
Exemple #19
0
def pre_edge(energy,
             mu=None,
             group=None,
             e0=None,
             step=None,
             nnorm=3,
             nvict=0,
             pre1=None,
             pre2=-50,
             norm1=100,
             norm2=None,
             make_flat=True,
             _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=3 (quadratic), max=5
    make_flat: boolean (Default True) to calculate flattened output.


    Returns
    -------
      None

    The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E)
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

     2 If the first argument is a Group, it must contain 'energy' and 'mu'.
       If it exists, group.e0 will be used as e0.
       See First Argrument Group in Documentation
    """

    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='pre_edge')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    pre_dat = preedge(energy,
                      mu,
                      e0=e0,
                      step=step,
                      nnorm=nnorm,
                      nvict=nvict,
                      pre1=pre1,
                      pre2=pre2,
                      norm1=norm1,
                      norm2=norm2)

    group = set_xafsGroup(group, _larch=_larch)

    e0 = pre_dat['e0']
    norm = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1 + e0)
    p2 = index_nearest(energy, norm2 + e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2 - p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Group(c0=Parameter(0, vary=True),
                      c1=Parameter(0, vary=True),
                      c2=Parameter(0, vary=True),
                      en=enx,
                      mu=mux)
        fit = Minimizer(flat_resid, fpars, _larch=_larch, toler=1.e-5)
        try:
            fit.leastsq()
        except (TypeError, ValueError):
            pass
        fc0, fc1, fc2 = fpars.c0.value, fpars.c1.value, fpars.c2.value
        flat_diff = fc0 + energy * (fc1 + energy * fc2)
        flat = norm - flat_diff + flat_diff[ie0]
        flat[:ie0] = norm[:ie0]

    group.e0 = e0
    group.norm = norm
    group.flat = flat
    group.dmude = np.gradient(mu) / np.gradient(energy)
    group.edge_step = pre_dat['edge_step']
    group.pre_edge = pre_dat['pre_edge']
    group.post_edge = pre_dat['post_edge']

    group.pre_edge_details = Group()
    group.pre_edge_details.pre1 = pre_dat['pre1']
    group.pre_edge_details.pre2 = pre_dat['pre2']
    group.pre_edge_details.nnorm = pre_dat['nnorm']
    group.pre_edge_details.norm1 = pre_dat['norm1']
    group.pre_edge_details.norm2 = pre_dat['norm2']
    group.pre_edge_details.pre_slope = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)
    return
Exemple #20
0
def mback(energy,
          mu,
          group=None,
          order=3,
          z=None,
          edge='K',
          e0=None,
          emin=None,
          emax=None,
          whiteline=None,
          leexiang=False,
          tables='cl',
          fit_erfc=False,
          return_f1=False,
          _larch=None):
    """
    Match mu(E) data for tabulated f"(E) using the MBACK algorithm and,
    optionally, the Lee & Xiang extension

    Arguments:
      energy, mu:    arrays of energy and mu(E)
      order:         order of polynomial [3]
      group:         output group (and input group for e0)
      z:             Z number of absorber
      edge:          absorption edge (K, L3)
      e0:            edge energy
      emin:          beginning energy for fit
      emax:          ending energy for fit
      whiteline:     exclusion zone around white lines
      leexiang:      flag to use the Lee & Xiang extension
      tables:        'cl' or 'chantler'
      fit_erfc:      True to float parameters of error function
      return_f1:     True to put the f1 array in the group

    Returns:
      group.f2:      tabulated f2(E)
      group.f1:      tabulated f1(E) (if return_f1 is True)
      group.fpp:     matched data
      group.mback_params:  Group of parameters for the minimization

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970
      * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    order = int(order)
    if order < 1: order = 1  # set order of polynomial
    if order > MAXORDER: order = MAXORDER

    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='mback')
    group = set_xafsGroup(group, _larch=_larch)

    if e0 is None:  # need to run find_e0:
        e0 = xray_edge(z, edge, _larch=_larch)[0]
    if e0 is None:
        e0 = group.e0
    if e0 is None:
        find_e0(energy, mu, group=group)

    ### theta is an array used to exclude the regions <emin, >emax, and
    ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere
    (i1, i2) = (0, len(energy) - 1)
    if emin != None: i1 = index_of(energy, emin)
    if emax != None: i2 = index_of(energy, emax)
    theta = np.ones(len(energy))  # default: 1 throughout
    theta[0:i1] = 0
    theta[i2:-1] = 0
    if whiteline:
        pre = 1.0 * (energy < e0)
        post = 1.0 * (energy > e0 + float(whiteline))
        theta = theta * (pre + post)
    if edge.lower().startswith('l'):
        l2 = xray_edge(z, 'L2', _larch=_larch)[0]
        l2_pre = 1.0 * (energy < l2)
        l2_post = 1.0 * (energy > l2 + float(whiteline))
        theta = theta * (l2_pre + l2_post)

    ## this is used to weight the pre- and post-edge differently as
    ## defined in the MBACK paper
    weight1 = 1 * (energy < e0)
    weight2 = 1 * (energy > e0)
    weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2

    ## get the f'' function from CL or Chantler
    if tables.lower() == 'chantler':
        f1 = f1_chantler(z, energy, _larch=_larch)
        f2 = f2_chantler(z, energy, _larch=_larch)
    else:
        (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch)
    group.f2 = f2
    if return_f1: group.f1 = f1

    n = edge
    if edge.lower().startswith('l'): n = 'L'
    params = Group(
        s=Parameter(1, vary=True, _larch=_larch),  # scale of data
        xi=Parameter(50, vary=fit_erfc, min=0, _larch=_larch),  # width of erfc
        em=Parameter(xray_line(z, n, _larch=_larch)[0],
                     vary=False,
                     _larch=_larch),  # erfc centroid
        e0=Parameter(e0, vary=False, _larch=_larch),  # abs. edge energy
        ## various arrays need by the objective function
        en=energy,
        mu=mu,
        f2=group.f2,
        weight=weight,
        theta=theta,
        leexiang=leexiang,
        _larch=_larch)
    if fit_erfc:
        params.a = Parameter(1, vary=True, _larch=_larch)  # amplitude of erfc
    else:
        params.a = Parameter(0, vary=False, _larch=_larch)  # amplitude of erfc

    for i in range(order):  # polynomial coefficients
        setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch))

    fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5)
    fit.leastsq()

    eoff = energy - params.e0.value
    normalization_function = params.a.value * erfc(
        (energy - params.em.value) / params.xi.value) + params.c0.value
    for i in range(MAXORDER):
        j = i + 1
        attr = 'c%d' % j
        if hasattr(params, attr):
            normalization_function = normalization_function + getattr(
                getattr(params, attr), 'value') * eoff**j

    group.fpp = params.s * mu - normalization_function
    group.mback_params = params
Exemple #21
0
def autobk(energy,
           mu=None,
           group=None,
           rbkg=1,
           nknots=None,
           e0=None,
           edge_step=None,
           kmin=0,
           kmax=None,
           kweight=1,
           dk=0,
           win='hanning',
           k_std=None,
           chi_std=None,
           nfft=2048,
           kstep=0.05,
           pre_edge_kws=None,
           nclamp=4,
           clamp_lo=1,
           clamp_hi=1,
           calc_uncertainties=True,
           err_sigma=1,
           _larch=None,
           **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [True]
      err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='autobk')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3,
                       nvict=0,
                       pre1=None,
                       pre2=-50.,
                       norm1=100.,
                       norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n'
            )
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi / (kstep * nfft)
    if rbkg < 2 * rgrid: rbkg = 2 * rgrid
    irbkg = int(1.01 + rbkg / rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe) * np.sqrt(ETOK * abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout = kstep * np.arange(int(1.01 + kmax / kstep), dtype='float64')
    iemax = min(len(energy), 2 + index_of(energy, e0 + kmax * kmax / ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(
        kout, xmin=kmin, xmax=kmax, window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2 * int(rbkg * (kmax - kmin) / np.pi) + 1))
    spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q = kmin + i * (kmax - kmin) / (nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw) - 1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik + ie0]
        spl_y[i] = (2 * mu[ik + ie0] + mu[i1 + ie0] + mu[i2 + ie0]) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i < len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid,
                    params,
                    _larch=_larch,
                    toler=1.e-4,
                    fcn_kws=dict(ncoefs=len(coefs),
                                 chi_std=chi_std,
                                 knots=knots,
                                 order=order,
                                 kraw=kraw[:iemax - ie0 + 1],
                                 mu=mu[ie0:iemax + 1],
                                 irbkg=irbkg,
                                 kout=kout,
                                 ftwin=ftwin,
                                 kweight=kweight,
                                 nfft=nfft,
                                 nclamp=nclamp,
                                 clamp_lo=clamp_lo,
                                 clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots,
                           coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0 + len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg = obkg
    group.chie = (mu - obkg) / edge_step
    group.k = kout
    group.chi = chi / edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0 + len(bkg)] = initbkg
    params.init_chi = initchi / edge_step
    params.knots_e = spl_e
    params.knots_y = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax
    group.autobk_details = params

    # uncertainties in mu0 and chi: can be fairly slow.
    if calc_uncertainties:
        nchi = len(chi)
        nmue = iemax - ie0 + 1
        redchi = params.chi_reduced
        covar = params.covar / redchi
        jac_chi = np.zeros(nchi * nspl).reshape((nspl, nchi))
        jac_bkg = np.zeros(nmue * nspl).reshape((nspl, nmue))

        cvals, cerrs = [], []
        for i in range(len(coefs)):
            par = getattr(params, FMT_COEF % i)
            cvals.append(getattr(par, 'value', 0.0))
            cdel = getattr(par, 'stderr', 0.0)
            if cdel is None:
                cdel = 0.0
            cerrs.append(cdel / 2.0)
        cvals = np.array(cvals)
        cerrs = np.array(cerrs)

        # find derivatives by hand!
        _k = kraw[:nmue]
        _m = mu[ie0:iemax + 1]
        for i in range(nspl):
            cval0 = cvals[i]
            cvals[i] = cval0 + cerrs[i]
            bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0 - cerrs[i]
            bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0
            jac_chi[i] = (chi1 - chi2) / (2 * cerrs[i])
            jac_bkg[i] = (bkg1 - bkg2) / (2 * cerrs[i])

        dfchi = np.zeros(nchi)
        dfbkg = np.zeros(nmue)
        for i in range(nspl):
            for j in range(nspl):
                dfchi += jac_chi[i] * jac_chi[j] * covar[i, j]
                dfbkg += jac_bkg[i] * jac_bkg[j] * covar[i, j]

        prob = 0.5 * (1.0 + erf(err_sigma / np.sqrt(2.0)))
        dchi = t.ppf(prob, nchi - nspl) * np.sqrt(dfchi * redchi)
        dbkg = t.ppf(prob, nmue - nspl) * np.sqrt(dfbkg * redchi)

        group.delta_chi = dchi
        group.delta_bkg = 0.0 * mu
        group.delta_bkg[ie0:ie0 + len(dbkg)] = dbkg
Exemple #22
0
def estimate_noise(k,
                   chi=None,
                   group=None,
                   rmin=15.0,
                   rmax=30.0,
                   kweight=1,
                   kmin=0,
                   kmax=20,
                   dk=4,
                   dk2=None,
                   kstep=0.05,
                   kwindow='kaiser',
                   nfft=2048,
                   _larch=None,
                   **kws):
    """
    estimate noise levels in EXAFS spectrum and estimate highest k
    where data is above the noise level
    Parameters:
    -----------
      k:        1-d array of photo-electron wavenumber in Ang^-1 (or group)
      chi:      1-d array of chi
      group:    output Group  [see Note below]
      rmin:     minimum R value for high-R region of chi(R)
      rmax:     maximum R value for high-R region of chi(R)
      kweight:  exponent for weighting spectra by k**kweight [1]
      kmin:     starting k for FT Window [0]
      kmax:     ending k for FT Window  [20]
      dk:       tapering parameter for FT Window [4]
      dk2:      second tapering parameter for FT Window [None]
      kstep:    value to use for delta_k ( Ang^-1) [0.05]
      window:   name of window type ['kaiser']
      nfft:     value to use for N_fft [2048].

    Returns:
    ---------
      None   -- outputs are written to supplied group.  Values (scalars) written
      to output group:
        epsilon_k     estimated noise in chi(k)
        epsilon_r     estimated noise in chi(R)
        kmax_suggest  highest estimated k value where |chi(k)| > epsilon_k

    Notes:
    -------

     1. This method uses the high-R portion of chi(R) as a measure of the noise
        level in the chi(R) data and uses Parseval's theorem to convert this noise
        level to that in chi(k).  This method implicitly assumes that there is no
        signal in the high-R portion of the spectrum, and that the noise in the
        spectrum s "white" (independent of R) .  Each of these assumptions can be
        questioned.
     2. The estimate for 'kmax_suggest' has a tendency to be fair but pessimistic
        in how far out the chi(k) data goes before being dominated by noise.
     3. Follows the 'First Argument Group' convention, so that you can either
        specifiy all of (an array for 'k', an array for 'chi', option output Group)
        OR pass a group with 'k' and 'chi' as the first argument
    """
    k, chi, group = parse_group_args(k,
                                     members=('k', 'chi'),
                                     defaults=(chi, ),
                                     group=group,
                                     fcn_name='esitmate_noise')

    # save _sys.xafsGroup -- we want to NOT write to it here!
    savgroup = set_xafsGroup(None, _larch=_larch)
    tmpgroup = Group()
    rmax_out = min(10 * pi, rmax + 2)

    xftf(k,
         chi,
         kmin=kmin,
         kmax=kmax,
         rmax_out=rmax_out,
         kweight=kweight,
         dk=dk,
         dk2=dk2,
         kwindow=kwindow,
         nfft=nfft,
         kstep=kstep,
         group=tmpgroup,
         _larch=_larch)

    chir = tmpgroup.chir
    rstep = tmpgroup.r[1] - tmpgroup.r[0]

    irmin = int(0.01 + rmin / rstep)
    irmax = min(nfft / 2, int(1.01 + rmax / rstep))
    highr = realimag(chir[irmin:irmax])

    # get average of window function value, scale eps_r scale by this
    # this is imperfect, but improves the result.
    kwin_ave = tmpgroup.kwin.sum() * kstep / (kmax - kmin)
    eps_r = sqrt((highr * highr).sum() / len(highr)) / kwin_ave

    # use Parseval's theorem to convert epsilon_r to epsilon_k,
    # compensating for kweight
    w = 2 * kweight + 1
    scale = sqrt((2 * pi * w) / (kstep * (kmax**w - kmin**w)))
    eps_k = scale * eps_r

    # do reverse FT to get chiq array
    xftr(tmpgroup.r,
         tmpgroup.chir,
         group=tmpgroup,
         rmin=0.5,
         rmax=9.5,
         dr=1.0,
         window='parzen',
         nfft=nfft,
         kstep=kstep,
         _larch=_larch)

    # sets kmax_suggest to the largest k value for which
    # | chi(q) / k**kweight| > epsilon_k
    iq0 = index_of(tmpgroup.q, (kmax + kmin) / 2.0)
    tst = tmpgroup.chiq_mag[iq0:] / (tmpgroup.q[iq0:])**kweight
    kmax_suggest = tmpgroup.q[iq0 + where(tst < eps_k)[0][0]]

    # restore original _sys.xafsGroup, set output variables
    _larch.symtable._sys.xafsGroup = savgroup
    group = set_xafsGroup(group, _larch=_larch)
    group.epsilon_k = eps_k
    group.epsilon_r = eps_r
    group.kmax_suggest = kmax_suggest
Exemple #23
0
def autobk(energy,
           mu=None,
           group=None,
           rbkg=1,
           nknots=None,
           e0=None,
           edge_step=None,
           kmin=0,
           kmax=None,
           kweight=1,
           dk=0,
           win='hanning',
           k_std=None,
           chi_std=None,
           nfft=2048,
           kstep=0.05,
           pre_edge_kws=None,
           nclamp=4,
           clamp_lo=1,
           clamp_hi=1,
           calc_uncertainties=False,
           _larch=None,
           **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [False]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='autobk')

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3,
                       nvict=0,
                       pre1=None,
                       pre2=-50.,
                       norm1=100.,
                       norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n'
            )
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi / (kstep * nfft)
    if rbkg < 2 * rgrid: rbkg = 2 * rgrid
    irbkg = int(1.01 + rbkg / rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe) * np.sqrt(ETOK * abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout = kstep * np.arange(int(1.01 + kmax / kstep), dtype='float64')
    iemax = min(len(energy), 2 + index_of(energy, e0 + kmax * kmax / ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(
        kout, xmin=kmin, xmax=kmax, window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2 * int(rbkg * (kmax - kmin) / np.pi) + 1))
    spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q = kmin + i * (kmax - kmin) / (nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw) - 1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik + ie0]
        spl_y[i] = (2 * mu[ik + ie0] + mu[i1 + ie0] + mu[i2 + ie0]) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i < len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid,
                    params,
                    _larch=_larch,
                    toler=1.e-4,
                    fcn_kws=dict(ncoefs=len(coefs),
                                 chi_std=chi_std,
                                 knots=knots,
                                 order=order,
                                 kraw=kraw[:iemax - ie0 + 1],
                                 mu=mu[ie0:iemax + 1],
                                 irbkg=irbkg,
                                 kout=kout,
                                 ftwin=ftwin,
                                 kweight=kweight,
                                 nfft=nfft,
                                 nclamp=nclamp,
                                 clamp_lo=clamp_lo,
                                 clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots,
                           coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0 + len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg = obkg
    group.chie = (mu - obkg) / edge_step
    group.k = kout
    group.chi = chi / edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0 + len(bkg)] = initbkg
    params.init_chi = initchi / edge_step
    params.knots_e = spl_e
    params.knots_y = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax
    group.autobk_details = params

    # uncertainties in mu0 and chi:  fairly slow!!
    if HAS_UNCERTAIN and calc_uncertainties:
        vbest, vstd = [], []
        for n in fit.var_names:
            par = getattr(params, n)
            vbest.append(par.value)
            vstd.append(par.stderr)
        uvars = uncertainties.correlated_values(vbest, params.covar)
        # uncertainty in bkg (aka mu0)
        # note that much of this is working around
        # limitations in the uncertainty package that make it
        #  1. take an argument list (not array)
        #  2. work on returned scalars (but not arrays)
        #  3. not handle kw args and *args well (so use
        #     of global "index" is important here)
        nkx = iemax - ie0 + 1

        def my_dsplev(*args):
            coefs = np.array(args)
            return splev(kraw[:nkx], [knots, coefs, order])[index]

        fdbkg = uncertainties.wrap(my_dsplev)
        dmu0 = [fdbkg(*uvars).std_dev() for index in range(len(bkg))]
        group.delta_bkg = np.zeros(len(mu))
        group.delta_bkg[ie0:ie0 + len(bkg)] = np.array(dmu0)

        # uncertainty in chi (see notes above)
        def my_dchi(*args):
            coefs = np.array(args)
            b, chi = spline_eval(kraw[:nkx], mu[ie0:iemax + 1], knots, coefs,
                                 order, kout)
            return chi[index]

        fdchi = uncertainties.wrap(my_dchi)
        dchi = [fdchi(*uvars).std_dev() for index in range(len(kout))]
        group.delta_chi = np.array(dchi) / edge_step
Exemple #24
0
def preedge(energy,
            mu,
            e0=None,
            step=None,
            nnorm=3,
            nvict=0,
            pre1=None,
            pre2=-50,
            norm1=100,
            norm2=None):
    """pre edge subtraction, normalization for XAFS (straight python)

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV
    mu:      array of mu(E)
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=3 (quadratic), max=5
    Returns
    -------
      dictionary with elements (among others)
          e0          energy origin in eV
          edge_step   edge step
          norm        normalized mu(E)
          pre_edge    determined pre-edge curve
          post_edge   determined post-edge, normalization curve

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

    """
    energy = remove_dups(energy)

    if e0 is None or e0 < energy[0] or e0 > energy[-1]:
        energy = remove_dups(energy)
        dmu = np.gradient(mu) / np.gradient(energy)
        # find points of high derivative
        high_deriv_pts = np.where(dmu > max(dmu) * 0.05)[0]
        idmu_max, dmu_max = 0, 0
        for i in high_deriv_pts:
            if (dmu[i] > dmu_max and (i + 1 in high_deriv_pts)
                    and (i - 1 in high_deriv_pts)):
                idmu_max, dmu_max = i, dmu[i]

        e0 = energy[idmu_max]
    nnorm = max(min(nnorm, MAX_NNORM), 0)
    ie0 = index_nearest(energy, e0)
    e0 = energy[ie0]

    if pre1 is None: pre1 = min(energy) - e0
    if norm2 is None: norm2 = max(energy) - e0
    if norm2 < 0: norm2 = max(energy) - e0 - norm2
    pre1 = max(pre1, (min(energy) - e0))
    norm2 = min(norm2, (max(energy) - e0))

    if pre1 > pre2:
        pre1, pre2 = pre2, pre1
    if norm1 > norm2:
        norm1, norm2 = norm2, norm1

    p1 = index_of(energy, pre1 + e0)
    p2 = index_nearest(energy, pre2 + e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)

    omu = mu * energy**nvict
    ex, mx = remove_nans2(energy[p1:p2], omu[p1:p2])
    precoefs = polyfit(ex, mx, 1)
    pre_edge = (precoefs[0] * energy + precoefs[1]) * energy**(-nvict)
    # normalization
    p1 = index_of(energy, norm1 + e0)
    p2 = index_nearest(energy, norm2 + e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)

    coefs = polyfit(energy[p1:p2], omu[p1:p2], nnorm)
    post_edge = 0
    norm_coefs = []
    for n, c in enumerate(reversed(list(coefs))):
        post_edge += c * energy**(n - nvict)
        norm_coefs.append(c)
    edge_step = step
    if edge_step is None:
        edge_step = post_edge[ie0] - pre_edge[ie0]

    norm = (mu - pre_edge) / edge_step
    out = {
        'e0': e0,
        'edge_step': edge_step,
        'norm': norm,
        'pre_edge': pre_edge,
        'post_edge': post_edge,
        'norm_coefs': norm_coefs,
        'nvict': nvict,
        'nnorm': nnorm,
        'norm1': norm1,
        'norm2': norm2,
        'pre1': pre1,
        'pre2': pre2,
        'precoefs': precoefs
    }

    return out
Exemple #25
0
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1,
           calc_uncertainties=True, err_sigma=1, _larch=None, **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [True]
      err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='autobk')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n')
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')
    iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y, spl_k, spl_e  = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q  = kmin + i*(kmax-kmin)/(nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i<len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4,
                    fcn_kws = dict(ncoefs=len(coefs), chi_std=chi_std,
                                   knots=knots, order=order,
                                   kraw=kraw[:iemax-ie0+1],
                                   mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout,
                                   ftwin=ftwin, kweight=kweight,
                                   nfft=nfft, nclamp=nclamp,
                                   clamp_lo=clamp_lo, clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                           knots, coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0+len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg  = obkg
    group.chie = (mu-obkg)/edge_step
    group.k    = kout
    group.chi  = chi/edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0+len(bkg)] = initbkg
    params.init_chi = initchi/edge_step
    params.knots_e  = spl_e
    params.knots_y  = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax
    group.autobk_details = params

    # uncertainties in mu0 and chi: can be fairly slow.
    if calc_uncertainties:
        nchi = len(chi)
        nmue = iemax-ie0 + 1
        redchi = params.chi_reduced
        covar = params.covar / redchi
        jac_chi = np.zeros(nchi*nspl).reshape((nspl, nchi))
        jac_bkg = np.zeros(nmue*nspl).reshape((nspl, nmue))

        cvals, cerrs = [], []
        for i in range(len(coefs)):
             par = getattr(params, FMT_COEF % i)
             cvals.append(getattr(par, 'value', 0.0))
             cdel = getattr(par, 'stderr', 0.0)
             if cdel is None:
                 cdel = 0.0
             cerrs.append(cdel/2.0)
        cvals = np.array(cvals)
        cerrs = np.array(cerrs)

        # find derivatives by hand!
        _k = kraw[:nmue]
        _m = mu[ie0:iemax+1]
        for i in range(nspl):
            cval0 = cvals[i]
            cvals[i] = cval0 + cerrs[i]
            bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0 - cerrs[i]
            bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0
            jac_chi[i] = (chi1 - chi2) / (2*cerrs[i])
            jac_bkg[i] = (bkg1 - bkg2) / (2*cerrs[i])

        dfchi = np.zeros(nchi)
        dfbkg = np.zeros(nmue)
        for i in range(nspl):
            for j in range(nspl):
                dfchi += jac_chi[i]*jac_chi[j]*covar[i,j]
                dfbkg += jac_bkg[i]*jac_bkg[j]*covar[i,j]

        prob = 0.5*(1.0 + erf(err_sigma/np.sqrt(2.0)))
        dchi = t.ppf(prob, nchi-nspl) * np.sqrt(dfchi*redchi)
        dbkg = t.ppf(prob, nmue-nspl) * np.sqrt(dfbkg*redchi)

        group.delta_chi = dchi
        group.delta_bkg = 0.0*mu
        group.delta_bkg[ie0:ie0+len(dbkg)] = dbkg