Пример #1
0
def fluo_corr(energy, mu, formula, elem, group=None, edge='K', anginp=45,
              angout=45,  _larch=None, **pre_kws):
    """correct over-absorption (self-absorption) for fluorescene XAFS
    using the FLUO alogrithm of D. Haskel.

    Arguments
    ---------
      energy    array of energies
      mu        uncorrected fluorescence mu
      formula   string for sample stoichiometry
      elem      atomic symbol or Z of absorbing element
      group     output group [default None]
      edge      name of edge ('K', 'L3', ...) [default 'K']
      anginp    input angle in degrees  [default 45]
      angout    output angle in degrees  [default 45]

    Additional keywords will be passed to pre_edge(), which will be used
    to ensure consistent normalization.

    Returns
    --------
       None, writes `mu_corr` and `norm_corr` (normalized `mu_corr`)
       to output group.

    Notes
    -----
       Support First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='fluo_corr')

    # generate normalized mu for correction
    preinp   = preedge(energy, mu, **pre_kws)
    mu_inp   = preinp['norm']

    anginp   = max(1.e-7, np.deg2rad(anginp))
    angout   = max(1.e-7, np.deg2rad(angout))

    # find edge energies and fluorescence line energy
    e_edge   = xray_edge(elem, edge, _larch=_larch)[0]
    e_fluor  = xray_line(elem, edge, _larch=_larch)[0]

    # calculate mu(E) for fluorescence energy, above, below edge
    energies = np.array([e_fluor, e_edge-10.0, e_edge+10.0])
    muvals   = material_mu(formula, energies, density=1, _larch=_larch)

    mu_fluor = muvals[0] * np.sin(anginp)/np.sin(angout)
    mu_below = muvals[1]
    mu_celem = muvals[2] - muvals[1]

    alpha    = (mu_fluor + mu_below)/mu_celem
    mu_corr  = mu_inp*alpha/(alpha + 1 - mu_inp)
    preout   = preedge(energy, mu_corr, **pre_kws)

    if group is not None:
        group = set_xafsGroup(group, _larch=_larch)
        group.mu_corr = mu_corr
        group.norm_corr = preout['norm']
Пример #2
0
def find_e0(energy, mu=None, group=None, _larch=None):
    """calculate E0 given mu(energy)

    This finds the point with maximum derivative with some
    checks to avoid spurious glitches.

    Arguments
    ----------
    energy:  array of x-ray energies, in eV or group
    mu:      array of mu(E)
    group:   output group

    Returns
    -------
    Value of e0.  If provided, group.e0 will be set to this value.

    Notes
    -----
       Supports First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='find_e0')
    e0 = _finde0(energy, mu)
    if group is not None:
        group = set_xafsGroup(group, _larch=_larch)
        group.e0 = e0
    return e0
Пример #3
0
def find_e0(energy, mu=None, group=None, _larch=None):
    """calculate :math:`E_0`, the energy threshold of absorption, or
    'edge energy', given :math:`\mu(E)`.

    :math:`E_0` is found as the point with maximum derivative with
    some checks to avoid spurious glitches.

    Arguments:
        energy (ndarray or group): array of x-ray energies, in eV, or group
        mu     (ndaarray or None): array of mu(E) values
        group  (group or None):    output group
        _larch (larch instance or None):  current larch session.

    Returns:
        float: Value of e0. If a group is provided, group.e0 will also be set.

    Notes:
        1. Supports :ref:`First Argument Group` convention, requiring group members `energy` and `mu`
        2. Supports :ref:`Set XAFS Group` convention within Larch or if `_larch` is set.
    """
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='find_e0')
    e0 = _finde0(energy, mu)
    if group is not None:
        group = set_xafsGroup(group, _larch=_larch)
        group.e0 = e0
    return e0
Пример #4
0
def xas_deconvolve(energy, norm=None, group=None, form='gaussian',
                   esigma=1.0, eshift=0.0, _larch=None):
    """XAS spectral deconvolution

    This function de-convolves a normalized mu(E) spectra with a
    peak shape, enhancing separation of XANES features.

    This can be unstable -- Use results with caution!

    Arguments
    ----------
    energy:   array of x-ray energies, in eV or group
    norm:     array of normalized mu(E)
    group:    output group
    form:     form of deconvolution function. One of
              'gaussian' (default) or 'lorentzian'
    esigma    energy sigma to pass to gaussian() or lorentzian()
              [in eV, default=1.0]
    eshift    energy shift to apply to result. [in eV, default=0]

    Returns
    -------
    None
       The array 'deconv' will be written to the output group.

    Notes
    -----
       Support See First Argument Group convention, requiring group
       members 'energy' and 'norm'
    """
    if _larch is None:
        raise Warning("cannot deconvolve -- larch broken?")

    energy, mu, group = parse_group_args(energy, members=('energy', 'norm'),
                                         defaults=(norm,), group=group,
                                         fcn_name='xas_deconv')
    eshift = eshift + 0.5 * esigma

    en  = remove_dups(energy)
    en  = en - en[0]
    estep = max(0.001, 0.001*int(min(en[1:]-en[:-1])*1000.0))
    npts = 1  + int(max(en) / estep)

    x = np.arange(npts)*estep
    y = _interp(en, mu, x, kind='linear', _larch=_larch)

    kernel = gaussian
    if form.lower().startswith('lor'):
        kernel = lorentzian

    yext = np.concatenate((y, np.arange(len(y))*y[-1]))
    ret, err = deconvolve(yext, kernel(x, 0, esigma))
    nret = min(len(x), len(ret))

    ret = ret[:nret]*yext[nret-1]/ret[nret-1]
    out = _interp(x+eshift, ret, en, kind='linear', _larch=_larch)

    group = set_xafsGroup(group, _larch=_larch)
    group.deconv = out
Пример #5
0
def xas_convolve(energy, norm=None, group=None, form='lorentzian',
                   esigma=1.0, eshift=0.0, _larch=None):
    """
    convolve a normalized mu(E) spectra with a Lorentzian or Gaussian peak
    shape, degrading separation of XANES features.

    This is provided as a complement to xas_deconvolve, and to deliberately
    broaden spectra to compare with spectra measured at lower resolution.

    Arguments
    ----------
    energy:   array of x-ray energies (in eV) or XAFS data group
    norm:     array of normalized mu(E)
    group:    output group
    form:     form of deconvolution function. One of
              'lorentzian' or  'gaussian' ['lorentzian']
    esigma    energy sigma (in eV) to pass to gaussian() or lorentzian() [1.0]
    eshift    energy shift (in eV) to apply to result [0]

    Returns
    -------
    None
       The array 'conv' will be written to the output group.

    Notes
    -----
       Follows the First Argument Group convention, using group members named
       'energy' and 'norm'
    """

    energy, mu, group = parse_group_args(energy, members=('energy', 'norm'),
                                         defaults=(norm,), group=group,
                                         fcn_name='xas_convolve')
    eshift = eshift + 0.5 * esigma

    en  = remove_dups(energy)
    en  = en - en[0]
    estep = max(0.001, 0.001*int(min(en[1:]-en[:-1])*1000.0))

    npad = 1 + int(max(estep*2.01, 50*esigma)/estep)

    npts = npad  + int(max(en) / estep)

    x = np.arange(npts)*estep
    y = interp(en, mu, x, kind='cubic')

    kernel = lorentzian
    if form.lower().startswith('g'):
        kernel = gaussian

    k = kernel(x, center=0, sigma=esigma)
    ret = np.convolve(y, k, mode='full')

    out = interp(x-eshift, ret[:len(x)], en, kind='cubic')

    group = set_xafsGroup(group, _larch=_larch)
    group.conv = out / k.sum()
Пример #6
0
def xas_convolve(energy, norm=None, group=None, form='lorentzian',
                   esigma=1.0, eshift=0.0, _larch=None):
    """
    convolve a normalized mu(E) spectra with a Lorentzian or Gaussian peak
    shape, degrading separation of XANES features.

    This is provided as a complement to xas_deconvolve, and to deliberately
    broaden spectra to compare with spectra measured at lower resolution.

    Arguments
    ----------
    energy:   array of x-ray energies (in eV) or XAFS data group
    norm:     array of normalized mu(E)
    group:    output group
    form:     form of deconvolution function. One of
              'lorentzian' or  'gaussian' ['lorentzian']
    esigma    energy sigma (in eV) to pass to gaussian() or lorentzian() [1.0]
    eshift    energy shift (in eV) to apply to result [0]

    Returns
    -------
    None
       The array 'conv' will be written to the output group.

    Notes
    -----
       Follows the First Argument Group convention, using group members named
       'energy' and 'norm'
    """

    energy, mu, group = parse_group_args(energy, members=('energy', 'norm'),
                                         defaults=(norm,), group=group,
                                         fcn_name='xas_convolve')
    eshift = eshift + 0.5 * esigma

    en  = remove_dups(energy)
    en  = en - en[0]
    estep = max(0.001, 0.001*int(min(en[1:]-en[:-1])*1000.0))

    npad = 1 + int(max(estep*2.01, 50*esigma)/estep)

    npts = npad  + int(max(en) / estep)

    x = np.arange(npts)*estep
    y = interp(en, mu, x, kind='cubic')

    kernel = lorentzian
    if form.lower().startswith('g'):
        kernel = gaussian

    k = kernel(x, center=0, sigma=esigma)
    ret = np.convolve(y, k, mode='full')

    out = interp(x-eshift, ret[:len(x)], en, kind='cubic')

    group = set_xafsGroup(group, _larch=_larch)
    group.conv = out / k.sum()
Пример #7
0
def find_e0(energy, mu=None, group=None, _larch=None):
    """calculate E0 given mu(energy)

    This finds the point with maximum derivative with some
    checks to avoid spurious glitches.

    Arguments
    ----------
    energy:  array of x-ray energies, in eV or group
    mu:      array of mu(E)
    group:   output group

    Returns
    -------
    Value of e0.  If provided, group.e0 will be set to this value.

    Notes
    -----
       Supports First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='find_e0')

    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    dmu = np.gradient(mu) / np.gradient(energy)
    # find points of high derivative
    high_deriv_pts = np.where(dmu > max(dmu) * 0.05)[0]
    idmu_max, dmu_max = 0, 0
    for i in high_deriv_pts:
        if (dmu[i] > dmu_max and (i + 1 in high_deriv_pts)
                and (i - 1 in high_deriv_pts)):
            idmu_max, dmu_max = i, dmu[i]

    e0 = energy[idmu_max]
    if group is not None:
        group = set_xafsGroup(group, _larch=_larch)
        group.e0 = e0
    return e0
Пример #8
0
def find_e0(energy, mu=None, group=None, _larch=None):
    """calculate E0 given mu(energy)

    This finds the point with maximum derivative with some
    checks to avoid spurious glitches.

    Arguments
    ----------
    energy:  array of x-ray energies, in eV or group
    mu:      array of mu(E)
    group:   output group

    Returns
    -------
    Value of e0.  If provided, group.e0 will be set to this value.

    Notes
    -----
       Supports First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='find_e0')

    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    dmu = np.gradient(mu)/np.gradient(energy)
    # find points of high derivative
    high_deriv_pts = np.where(dmu >  max(dmu)*0.05)[0]
    idmu_max, dmu_max = 0, 0
    for i in high_deriv_pts:
        if (dmu[i] > dmu_max and
            (i+1 in high_deriv_pts) and
            (i-1 in high_deriv_pts)):
            idmu_max, dmu_max = i, dmu[i]

    e0 = energy[idmu_max]
    if group is not None:
        group = set_xafsGroup(group, _larch=_larch)
        group.e0 = e0
    return e0
Пример #9
0
def sort_xafs(energy,
              mu=None,
              group=None,
              fix_repeats=True,
              overwrite=True,
              _larch=None):
    """sort energy, mu pair of XAFS data so that energy is monotonically increasing

    Arguments
    ---------
    energy       input energy array
    mu           input mu array
    group        output group
    fix_repeats  bool, whether to fix repeated energies
    overwrite    bool, whether to overwrite arrays [True]

    Returns
    -------
      None

    if overwrite is False, a group named 'sorted' will be created
    in the output group, with sorted energy and mu arrays

    (if the output group is None, _sys.xafsGroup will be written to)

    """
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='sort_xafs')

    indices = np.argsort(energy)
    new_energy = energy[indices]
    new_mu = mu[indices]

    if fix_repeats:
        new_energy = remove_dups(new_energy)

    if not overwrite:
        group.sorted = Group(energy=new_energy, mu=new_mu)
    else:
        group.energy = new_energy
        group.mu = new_mu
    return
Пример #10
0
def sort_xafs(energy, mu=None, group=None, fix_repeats=True, overwrite=True, _larch=None):
    """sort energy, mu pair of XAFS data so that energy is monotonically increasing

    Arguments
    ---------
    energy       input energy array
    mu           input mu array
    group        output group
    fix_repeats  bool, whether to fix repeated energies
    overwrite    bool, whether to overwrite arrays [True]

    Returns
    -------
      None

    if overwrite is False, a group named 'sorted' will be created
    in the output group, with sorted energy and mu arrays

    (if the output group is None, _sys.xafsGroup will be written to)

    """
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                        fcn_name='sort_xafs')

    indices = np.argsort(energy)
    new_energy  = energy[indices]
    new_mu  = mu[indices]

    if fix_repeats:
        new_energy = remove_dups(new_energy)

    if not overwrite:
        group.sorted = Group(energy=new_energy, mu=new_mu)
    else:
        group.energy = new_energy
        group.mu = mu
    return
Пример #11
0
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1,
           calc_uncertainties=True, err_sigma=1, _larch=None, **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [True]
      err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='autobk')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n')
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')
    iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y, spl_k, spl_e  = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q  = kmin + i*(kmax-kmin)/(nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Parameters()
    for i in range(len(coefs)):
        params.add(name = FMT_COEF % i, value=coefs[i], vary=i<len(spl_y))

    initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                                   knots, coefs, order, kout)

    # do fit
    result = minimize(__resid, params, method='leastsq',
                      gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5,
                      kws = dict(ncoefs=len(coefs), chi_std=chi_std,
                                 knots=knots, order=order,
                                 kraw=kraw[:iemax-ie0+1],
                                 mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout,
                                 ftwin=ftwin, kweight=kweight,
                                 nfft=nfft, nclamp=nclamp,
                                 clamp_lo=clamp_lo, clamp_hi=clamp_hi))

    # write final results
    coefs = [result.params[FMT_COEF % i].value for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                           knots, coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0+len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg  = obkg
    group.chie = (mu-obkg)/edge_step
    group.k    = kout
    group.chi  = chi/edge_step

    # now fill in 'autobk_details' group
    details = Group(params=result.params)

    details.init_bkg = np.copy(mu)
    details.init_bkg[ie0:ie0+len(bkg)] = initbkg
    details.init_chi = initchi/edge_step
    details.knots_e  = spl_e
    details.knots_y  = np.array([coefs[i] for i in range(nspl)])
    details.init_knots_y = spl_y
    details.nfev = result.nfev
    details.kmin = kmin
    details.kmax = kmax
    group.autobk_details = details

    # uncertainties in mu0 and chi: can be fairly slow.
    if calc_uncertainties:
        nchi = len(chi)
        nmue = iemax-ie0 + 1
        redchi = result.redchi
        covar  = result.covar / redchi
        jac_chi = np.zeros(nchi*nspl).reshape((nspl, nchi))
        jac_bkg = np.zeros(nmue*nspl).reshape((nspl, nmue))

        cvals, cerrs = [], []
        for i in range(len(coefs)):
             par = result.params[FMT_COEF % i]
             cvals.append(getattr(par, 'value', 0.0))
             cdel = getattr(par, 'stderr', 0.0)
             if cdel is None:
                 cdel = 0.0
             cerrs.append(cdel/2.0)
        cvals = np.array(cvals)
        cerrs = np.array(cerrs)

        # find derivatives by hand!
        _k = kraw[:nmue]
        _m = mu[ie0:iemax+1]
        for i in range(nspl):
            cval0 = cvals[i]
            cvals[i] = cval0 + cerrs[i]
            bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0 - cerrs[i]
            bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0
            jac_chi[i] = (chi1 - chi2) / (2*cerrs[i])
            jac_bkg[i] = (bkg1 - bkg2) / (2*cerrs[i])

        dfchi = np.zeros(nchi)
        dfbkg = np.zeros(nmue)
        for i in range(nspl):
            for j in range(nspl):
                dfchi += jac_chi[i]*jac_chi[j]*covar[i,j]
                dfbkg += jac_bkg[i]*jac_bkg[j]*covar[i,j]

        prob = 0.5*(1.0 + erf(err_sigma/np.sqrt(2.0)))
        dchi = t.ppf(prob, nchi-nspl) * np.sqrt(dfchi*redchi)
        dbkg = t.ppf(prob, nmue-nspl) * np.sqrt(dfbkg*redchi)

        group.delta_chi = dchi
        group.delta_bkg = 0.0*mu
        group.delta_bkg[ie0:ie0+len(dbkg)] = dbkg
Пример #12
0
def pre_edge(energy, mu=None, group=None, e0=None, step=None,
             nnorm=3, nvict=0, pre1=None, pre2=-50,
             norm1=100, norm2=None, make_flat=True, _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=3 (quadratic), max=5
    make_flat: boolean (Default True) to calculate flattened output.


    Returns
    -------
      None

    The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E)
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

     2 If the first argument is a Group, it must contain 'energy' and 'mu'.
       If it exists, group.e0 will be used as e0.
       See First Argrument Group in Documentation
    """



    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='pre_edge')
    pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm,
                      nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1,
                      norm2=norm2)


    group = set_xafsGroup(group, _larch=_larch)

    e0    = pre_dat['e0']
    norm  = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1+e0)
    p2 = index_nearest(energy, norm2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2-p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Group(c0 = Parameter(0, vary=True),
                      c1 = Parameter(0, vary=True),
                      c2 = Parameter(0, vary=True),
                      en=enx, mu=mux)
        fit = Minimizer(flat_resid, fpars, _larch=_larch, toler=1.e-5)
        try:
            fit.leastsq()
        except (TypeError, ValueError):
            pass
        fc0, fc1, fc2  = fpars.c0.value, fpars.c1.value, fpars.c2.value
        flat_diff   = fc0 + energy * (fc1 + energy * fc2)
        flat        = norm - flat_diff  + flat_diff[ie0]
        flat[:ie0]  = norm[:ie0]


    group.e0 = e0
    group.norm = norm
    group.flat = flat
    group.dmude = np.gradient(mu)/np.gradient(energy)
    group.edge_step  = pre_dat['edge_step']
    group.pre_edge   = pre_dat['pre_edge']
    group.post_edge  = pre_dat['post_edge']

    group.pre_edge_details = Group()
    group.pre_edge_details.pre1   = pre_dat['pre1']
    group.pre_edge_details.pre2   = pre_dat['pre2']
    group.pre_edge_details.norm1  = pre_dat['norm1']
    group.pre_edge_details.norm2  = pre_dat['norm2']
    group.pre_edge_details.pre_slope  = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)
    return
Пример #13
0
def mback_norm(energy, mu=None, group=None, z=None, edge='K', e0=None,
               pre1=None, pre2=-50, norm1=100, norm2=None, nnorm=1, nvict=1,
               _larch=None):
    """
    simplified version of MBACK to Match mu(E) data for tabulated f''(E)
    for normalization

    Arguments:
      energy, mu:  arrays of energy and mu(E)
      group:       output group (and input group for e0)
      z:           Z number of absorber
      e0:          edge energy
      pre1:        low E range (relative to E0) for pre-edge fit
      pre2:        high E range (relative to E0) for pre-edge fit
      norm1:       low E range (relative to E0) for post-edge fit
      norm2:       high E range (relative to E0) for post-edge fit
      nnorm:       degree of polynomial (ie, nnorm+1 coefficients will be
                   found) for post-edge normalization curve fit to the
                   scaled f2. Default=1 (linear)

    Returns:
      group.norm_poly:     normalized mu(E) from pre_edge()
      group.norm:          normalized mu(E) from this method
      group.mback_mu:      tabulated f2 scaled and pre_edge added to match mu(E)
      group.mback_params:  Group of parameters for the minimization

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    group = set_xafsGroup(group, _larch=_larch)
    group.norm_poly = group.norm*1.0

    if z is not None:              # need to run find_e0:
        e0_nominal = xray_edge(z, edge)[0]
    if e0 is None:
        e0 = getattr(group, 'e0', None)
        if e0 is None:
            find_e0(energy, mu, group=group)
            e0 = group.e0

    atsym = None
    if z is None or z < 2:
        atsym, edge = guess_edge(group.e0, _larch=_larch)
        z = atomic_number(atsym)
    if atsym is None and z is not None:
        atsym = atomic_symbol(z)

    if getattr(group, 'pre_edge_details', None) is None:  # pre_edge never run
        preedge(energy, mu, pre1=pre1, pre2=pre2, nvict=nvict,
                norm1=norm1, norm2=norm2, e0=e0, nnorm=nnorm)

    mu_pre = mu - group.pre_edge
    f2 = f2_chantler(z, energy)

    weights = np.ones(len(energy))*1.0

    if norm2 is None:
        norm2 = max(energy) - e0

    if norm2 < 0:
        norm2 = max(energy) - e0  - norm2

    # avoid l2 and higher edges
    if edge.lower().startswith('l'):
        if edge.lower() == 'l3':
            e_l2 = xray_edge(z, 'L2').edge
            norm2 = min(norm2,  e_l2-e0)
        elif edge.lower() == 'l2':
            e_l2 = xray_edge(z, 'L1').edge
            norm2 = min(norm2,  e_l1-e0)

    ipre2 = index_of(energy, e0+pre2)
    inor1 = index_of(energy, e0+norm1)
    inor2 = index_of(energy, e0+norm2) + 1


    weights[ipre2:] = 0.0
    weights[inor1:inor2] = np.linspace(0.1, 1.0, inor2-inor1)

    params = Parameters()
    params.add(name='slope',   value=0.0,    vary=True)
    params.add(name='offset',  value=-f2[0], vary=True)
    params.add(name='scale',   value=f2[-1], vary=True)

    out = minimize(f2norm, params, method='leastsq',
                   gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5,
                   kws = dict(en=energy, mu=mu_pre, f2=f2, weights=weights))

    p = out.params.valuesdict()

    model = (p['offset'] + p['slope']*energy + f2) * p['scale']

    group.mback_mu = model + group.pre_edge

    pre_f2 = preedge(energy, model, nnorm=nnorm, nvict=nvict, e0=e0,
                     pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2)

    step_new = pre_f2['edge_step']

    group.edge_step_poly  = group.edge_step
    group.edge_step_mback = step_new
    group.norm_mback = mu_pre / step_new


    group.mback_params = Group(e0=e0, pre1=pre1, pre2=pre2, norm1=norm1,
                               norm2=norm2, nnorm=nnorm, fit_params=p,
                               fit_weights=weights, model=model, f2=f2,
                               pre_f2=pre_f2, atsym=atsym, edge=edge)

    if (abs(step_new - group.edge_step)/(1.e-13+group.edge_step)) > 0.75:
        print("Warning: mback edge step failed....")
    else:
        group.edge_step = step_new
        group.norm       = group.norm_mback
Пример #14
0
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None,
          whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False,
          _larch=None):
    """
    Match mu(E) data for tabulated f''(E) using the MBACK algorithm and,
    optionally, the Lee & Xiang extension

    Arguments:
      energy, mu:    arrays of energy and mu(E)
      order:         order of polynomial [3]
      group:         output group (and input group for e0)
      z:             Z number of absorber
      edge:          absorption edge (K, L3)
      e0:            edge energy
      emin:          beginning energy for fit
      emax:          ending energy for fit
      whiteline:     exclusion zone around white lines
      leexiang:      flag to use the Lee & Xiang extension
      tables:        'chantler' (default) or 'cl'
      fit_erfc:      True to float parameters of error function
      return_f1:     True to put the f1 array in the group

    Returns:
      group.f2:      tabulated f2(E)
      group.f1:      tabulated f1(E) (if return_f1 is True)
      group.fpp:     matched data
      group.mback_params:  Group of parameters for the minimization

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970
      * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    order=int(order)
    if order < 1: order = 1 # set order of polynomial
    if order > MAXORDER: order = MAXORDER

    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    group = set_xafsGroup(group, _larch=_larch)

    if e0 is None:              # need to run find_e0:
        e0 = xray_edge(z, edge, _larch=_larch)[0]
    if e0 is None:
        e0 = group.e0
    if e0 is None:
        find_e0(energy, mu, group=group)


    ### theta is an array used to exclude the regions <emin, >emax, and
    ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere
    (i1, i2) = (0, len(energy)-1)
    if emin is not None: i1 = index_of(energy, emin)
    if emax is not None: i2 = index_of(energy, emax)
    theta = np.ones(len(energy)) # default: 1 throughout
    theta[0:i1]  = 0
    theta[i2:-1] = 0
    if whiteline:
        pre     = 1.0*(energy<e0)
        post    = 1.0*(energy>e0+float(whiteline))
        theta   = theta * (pre + post)
    if edge.lower().startswith('l'):
        l2      = xray_edge(z, 'L2', _larch=_larch)[0]
        l2_pre  = 1.0*(energy<l2)
        l2_post = 1.0*(energy>l2+float(whiteline))
        theta   = theta * (l2_pre + l2_post)


    ## this is used to weight the pre- and post-edge differently as
    ## defined in the MBACK paper
    weight1 = 1*(energy<e0)
    weight2 = 1*(energy>e0)
    weight  = np.sqrt(sum(weight1))*weight1 + np.sqrt(sum(weight2))*weight2


    ## get the f'' function from CL or Chantler
    if tables.lower() == 'chantler':
        f1 = f1_chantler(z, energy, _larch=_larch)
        f2 = f2_chantler(z, energy, _larch=_larch)
    else:
        (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch)
    group.f2=f2
    if return_f1: group.f1=f1

    n = edge
    if edge.lower().startswith('l'): n = 'L'
    params = Group(s      = Parameter(1, vary=True, _larch=_larch),     # scale of data
                   xi     = Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc
                   em     = Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid
                   e0     = Parameter(e0, vary=False, _larch=_larch),   # abs. edge energy
                   ## various arrays need by the objective function
                   en     = energy,
                   mu     = mu,
                   f2     = group.f2,
                   weight = weight,
                   theta  = theta,
                   leexiang = leexiang,
                   _larch = _larch)
    if fit_erfc:
        params.a = Parameter(1, vary=True,  _larch=_larch) # amplitude of erfc
    else:
        params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc

    for i in range(order): # polynomial coefficients
        setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch))

    fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5)
    fit.leastsq()

    eoff = energy - params.e0.value
    normalization_function = params.a.value*erfc((energy-params.em.value)/params.xi.value) + params.c0.value
    for i in range(MAXORDER):
        j = i+1
        attr = 'c%d' % j
        if hasattr(params, attr):
            normalization_function  = normalization_function + getattr(getattr(params, attr), 'value') * eoff**j

    group.fpp = params.s*mu - normalization_function
    group.mback_params = params
Пример #15
0
def rebin_xafs(energy,
               mu=None,
               group=None,
               e0=None,
               pre1=None,
               pre2=-30,
               pre_step=2,
               xanes_step=None,
               exafs1=15,
               exafs2=None,
               exafs_kstep=0.05,
               method='centroid',
               _larch=None):
    """rebin XAFS energy and mu to a 'standard 3 region XAFS scan'

    Arguments
    ---------
    energy       input energy array
    mu           input mu array
    group        output group
    e0           energy reference -- all energy values are relative to this
    pre1         start of pre-edge region [1st energy point]
    pre2         end of pre-edge region, start of XANES region [-30]
    pre_step     energy step for pre-edge region [2]
    xanes_step   energy step for XANES region [see note]
    exafs1       end of XANES region, start of EXAFS region [15]
    exafs2       end of EXAFS region [last energy point]
    exafs_kstep  k-step for EXAFS region [0.05]
    method       one of 'boxcar', 'centroid' ['centroid']

    Returns
    -------
      None

    A group named 'rebinned' will be created in the output group, with the
    following  attributes:
        energy  new energy array
        mu      mu for energy array
        e0      e0 copied from current group

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    ------
     1 If the first argument is a Group, it must contain 'energy' and 'mu'.
       See First Argrument Group in Documentation

     2 If xanes_step is None, it will be found from the data.  If it is
       given, it may be increased to better fit the input energy array.

     3 The EXAFS region will be spaced in k-space

     4 The rebinned data is found by determining which segments of the
       input energy correspond to each bin in the new energy array. That
       is, each input energy is assigned to exactly one bin in the new
       array.  For each new energy bin, the new value is selected from the
       data in the segment as either
         a) linear interpolation if there are fewer than 3 points in the segment.
         b) mean value ('boxcar')
         c) centroid ('centroid')

    """
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='rebin_xafs')

    if e0 is None:
        e0 = getattr(group, 'e0', None)

    if e0 is None:
        raise ValueError("need e0")

    if pre1 is None:
        pre1 = pre_step * int((min(energy) - e0) / pre_step)

    if exafs2 is None:
        exafs2 = max(energy) - e0

    # determine xanes step size:
    #  find mean of energy difference, ignoring first/last 1% of energies
    npts = len(energy)
    n1 = max(2, int(npts / 100.0))
    de_mean = np.diff(energy[n1:-n1]).mean()
    xanes_step_def = max(0.1, 0.05 * (1 + int(de_mean / 0.05)))
    if xanes_step is None:
        xanes_step = xanes_step_def
    else:
        xanes_step = max(xanes_step, xanes_step_def)

    # create new energy array from the 3 segments (pre, xanes, exafs)
    en = []
    for start, stop, step, isk in ((pre1, pre2, pre_step,
                                    False), (pre2, exafs1, xanes_step, False),
                                   (exafs1, exafs2, exafs_kstep, True)):
        if isk:
            start = etok(start)
            stop = etok(stop)
        reg = np.linspace(start + step, stop,
                          int(0.1 + abs(stop - start) / step))
        if isk:
            reg = ktoe(reg)
        en.extend(e0 + reg)

    # find the segment boundaries of the old energy array
    bounds = [index_of(energy, e) for e in en]
    mu_out = []
    err_out = []
    j0 = 0
    for i in range(len(en)):
        if i == len(en) - 1:
            j1 = len(energy) - 1
        else:
            j1 = int((bounds[i] + bounds[i + 1] + 1) / 2.0)
        # if not enough points in segment, do interpolation
        if (j1 - j0) < 3:
            jx = j1 + 1
            if (jx - j0) < 2:
                jx += 1
            val = interp1d(energy[j0:jx], mu[j0:jx], en[i])
            err = mu[j0:j1].std()
        else:
            if method.startswith('box'):
                val = mu[j0:j1].mean()
            else:
                val = (mu[j0:j1] * energy[j0:j1]).mean() / energy[j0:j1].mean()
        mu_out.append(val)
        err_out.append(mu[j0:j1].std())
        j0 = j1

    newname = group.__name__ + '_rebinned'
    group.rebinned = Group(energy=np.array(en),
                           mu=np.array(mu_out),
                           delta_mu=np.array(err_out),
                           e0=e0,
                           __name__=newname)
    return
Пример #16
0
def xas_deconvolve(energy,
                   norm=None,
                   group=None,
                   form='lorentzian',
                   esigma=1.0,
                   eshift=0.0,
                   smooth=True,
                   sgwindow=None,
                   sgorder=3,
                   _larch=None):
    """XAS spectral deconvolution

    de-convolve a normalized mu(E) spectra with a peak shape, enhancing the
    intensity and separation of peaks of a XANES spectrum.

    The results can be unstable, and noisy, and should be used
    with caution!

    Arguments
    ----------
    energy:   array of x-ray energies (in eV) or XAFS data group
    norm:     array of normalized mu(E)
    group:    output group
    form:     functional form of deconvolution function. One of
              'gaussian' or 'lorentzian' [default]
    esigma    energy sigma to pass to gaussian() or lorentzian()
              [in eV, default=1.0]
    eshift    energy shift to apply to result. [in eV, default=0]
    smooth    whether to smooth result with savitzky_golay method [True]
    sgwindow  window size for savitzky_golay [found from data step and esigma]
    sgorder   order for savitzky_golay [3]

    Returns
    -------
    None
       The array 'deconv' will be written to the output group.

    Notes
    -----
       Support See First Argument Group convention, requiring group
       members 'energy' and 'norm'

       Smoothing with savitzky_golay() requires a window and order.  By
       default, window = int(esigma / estep) where estep is step size for
       the gridded data, approximately the finest energy step in the data.
    """
    if _larch is None:
        raise Warning("cannot deconvolve -- larch broken?")

    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'norm'),
                                         defaults=(norm, ),
                                         group=group,
                                         fcn_name='xas_deconvolve')
    eshift = eshift + 0.5 * esigma

    en = remove_dups(energy)
    en = en - en[0]
    estep = max(0.001, 0.001 * int(min(en[1:] - en[:-1]) * 1000.0))
    npts = 1 + int(max(en) / estep)

    x = np.arange(npts) * estep
    y = interp(en, mu, x, kind='cubic', _larch=_larch)

    kernel = lorentzian
    if form.lower().startswith('g'):
        kernel = gaussian

    yext = np.concatenate((y, np.arange(len(y)) * y[-1]))
    ret, err = deconvolve(yext, kernel(x, center=0, sigma=esigma))
    nret = min(len(x), len(ret))

    ret = ret[:nret] * yext[nret - 1] / ret[nret - 1]
    if smooth:
        if sgwindow is None:
            sgwindow = int(1.0 * esigma / estep)

        sqwindow = int(sgwindow)
        if sgwindow < (sgorder + 1):
            sgwindow = sgorder + 2
        if sgwindow % 2 == 0:
            sgwindow += 1
        ret = savitzky_golay(ret, sgwindow, sgorder)

    out = interp(x + eshift, ret, en, kind='cubic', _larch=_larch)
    group = set_xafsGroup(group, _larch=_larch)
    group.deconv = out
Пример #17
0
def mback(energy,
          mu=None,
          group=None,
          order=3,
          z=None,
          edge='K',
          e0=None,
          emin=None,
          emax=None,
          whiteline=None,
          leexiang=False,
          tables='chantler',
          fit_erfc=False,
          return_f1=False,
          _larch=None):
    """
    Match mu(E) data for tabulated f''(E) using the MBACK algorithm and,
    optionally, the Lee & Xiang extension

    Arguments:
      energy, mu:    arrays of energy and mu(E)
      order:         order of polynomial [3]
      group:         output group (and input group for e0)
      z:             Z number of absorber
      edge:          absorption edge (K, L3)
      e0:            edge energy
      emin:          beginning energy for fit
      emax:          ending energy for fit
      whiteline:     exclusion zone around white lines
      leexiang:      flag to use the Lee & Xiang extension
      tables:        'chantler' (default) or 'cl'
      fit_erfc:      True to float parameters of error function
      return_f1:     True to put the f1 array in the group

    Returns:
      group.f2:      tabulated f2(E)
      group.f1:      tabulated f1(E) (if return_f1 is True)
      group.fpp:     matched data
      group.mback_params:  Group of parameters for the minimization

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970
      * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    order = int(order)
    if order < 1: order = 1  # set order of polynomial
    if order > MAXORDER: order = MAXORDER

    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    group = set_xafsGroup(group, _larch=_larch)

    if e0 is None:  # need to run find_e0:
        e0 = xray_edge(z, edge, _larch=_larch)[0]
    if e0 is None:
        e0 = group.e0
    if e0 is None:
        find_e0(energy, mu, group=group)

    ### theta is an array used to exclude the regions <emin, >emax, and
    ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere
    (i1, i2) = (0, len(energy) - 1)
    if emin is not None: i1 = index_of(energy, emin)
    if emax is not None: i2 = index_of(energy, emax)
    theta = np.ones(len(energy))  # default: 1 throughout
    theta[0:i1] = 0
    theta[i2:-1] = 0
    if whiteline:
        pre = 1.0 * (energy < e0)
        post = 1.0 * (energy > e0 + float(whiteline))
        theta = theta * (pre + post)
    if edge.lower().startswith('l'):
        l2 = xray_edge(z, 'L2', _larch=_larch)[0]
        l2_pre = 1.0 * (energy < l2)
        l2_post = 1.0 * (energy > l2 + float(whiteline))
        theta = theta * (l2_pre + l2_post)

    ## this is used to weight the pre- and post-edge differently as
    ## defined in the MBACK paper
    weight1 = 1 * (energy < e0)
    weight2 = 1 * (energy > e0)
    weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2
    ## get the f'' function from CL or Chantler
    if tables.lower() == 'chantler':
        f1 = f1_chantler(z, energy, _larch=_larch)
        f2 = f2_chantler(z, energy, _larch=_larch)
    else:
        (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch)
    group.f2 = f2
    if return_f1:
        group.f1 = f1

    em = xray_line(z, edge.upper(), _larch=_larch)[0]  # erfc centroid

    params = Parameters()
    params.add(name='s', value=1, vary=True)  # scale of data
    params.add(name='xi', value=50, vary=fit_erfc, min=0)  # width of erfc
    params.add(name='a', value=0, vary=False)  # amplitude of erfc
    if fit_erfc:
        params['a'].value = 1
        params['a'].vary = True

    for i in range(order):  # polynomial coefficients
        params.add(name='c%d' % i, value=0, vary=True)

    out = minimize(match_f2,
                   params,
                   method='leastsq',
                   gtol=1.e-5,
                   ftol=1.e-5,
                   xtol=1.e-5,
                   epsfcn=1.e-5,
                   kws=dict(en=energy,
                            mu=mu,
                            f2=f2,
                            e0=e0,
                            em=em,
                            order=order,
                            weight=weight,
                            theta=theta,
                            leexiang=leexiang))

    opars = out.params.valuesdict()
    eoff = energy - e0

    norm_function = opars['a'] * erfc(
        (energy - em) / opars['xi']) + opars['c0']
    for i in range(order):
        j = i + 1
        attr = 'c%d' % j
        if attr in opars:
            norm_function += opars[attr] * eoff**j

    group.e0 = e0
    group.fpp = opars['s'] * mu - norm_function
    group.mback_params = opars
    tmp = Group(energy=energy, mu=group.f2 - norm_function, e0=0)

    # calculate edge step from f2 + norm_function: should be very smooth
    pre_f2 = preedge(energy, group.f2 + norm_function, e0=e0, nnorm=2, nvict=0)
    group.edge_step = pre_f2['edge_step'] / opars['s']

    pre_fpp = preedge(energy, mu, e0=e0, nnorm=2, nvict=0)

    group.norm = (mu - pre_fpp['pre_edge']) / group.edge_step
Пример #18
0
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0.1,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1,
           calc_uncertainties=True, err_sigma=1, _larch=None, **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0.1]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [True]
      err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = sys.stdout
    if _larch is not None:
        msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='autobk')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n')
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')
    iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk, dx2=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(5, min(64, int(2*rbkg*(kmax-kmin)/np.pi) + 2))
    spl_y, spl_k, spl_e  = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q  = kmin + i*(kmax-kmin)/(nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Parameters()
    for i in range(len(coefs)):
        params.add(name = FMT_COEF % i, value=coefs[i], vary=i<len(spl_y))

    initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                                   knots, coefs, order, kout)

    # do fit
    result = minimize(__resid, params, method='leastsq',
                      gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5,
                      kws = dict(ncoefs=len(coefs), chi_std=chi_std,
                                 knots=knots, order=order,
                                 kraw=kraw[:iemax-ie0+1],
                                 mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout,
                                 ftwin=ftwin, kweight=kweight,
                                 nfft=nfft, nclamp=nclamp,
                                 clamp_lo=clamp_lo, clamp_hi=clamp_hi))

    # write final results
    coefs = [result.params[FMT_COEF % i].value for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                           knots, coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0+len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg  = obkg
    group.chie = (mu-obkg)/edge_step
    group.k    = kout
    group.chi  = chi/edge_step
    group.e0   = e0

    # now fill in 'autobk_details' group
    details = Group(params=result.params)

    details.init_bkg = np.copy(mu)
    details.init_bkg[ie0:ie0+len(bkg)] = initbkg
    details.init_chi = initchi/edge_step
    details.knots_e  = spl_e
    details.knots_y  = np.array([coefs[i] for i in range(nspl)])
    details.init_knots_y = spl_y
    details.nfev = result.nfev
    details.kmin = kmin
    details.kmax = kmax
    group.autobk_details = details

    # uncertainties in mu0 and chi: can be fairly slow.
    if calc_uncertainties:
        nchi = len(chi)
        nmue = iemax-ie0 + 1
        redchi = result.redchi
        covar  = result.covar / redchi
        jac_chi = np.zeros(nchi*nspl).reshape((nspl, nchi))
        jac_bkg = np.zeros(nmue*nspl).reshape((nspl, nmue))

        cvals, cerrs = [], []
        for i in range(len(coefs)):
             par = result.params[FMT_COEF % i]
             cvals.append(getattr(par, 'value', 0.0))
             cdel = getattr(par, 'stderr', 0.0)
             if cdel is None:
                 cdel = 0.0
             cerrs.append(cdel/2.0)
        cvals = np.array(cvals)
        cerrs = np.array(cerrs)

        # find derivatives by hand!
        _k = kraw[:nmue]
        _m = mu[ie0:iemax+1]
        for i in range(nspl):
            cval0 = cvals[i]
            cvals[i] = cval0 + cerrs[i]
            bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0 - cerrs[i]
            bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout)

            cvals[i] = cval0
            jac_chi[i] = (chi1 - chi2) / (2*cerrs[i])
            jac_bkg[i] = (bkg1 - bkg2) / (2*cerrs[i])

        dfchi = np.zeros(nchi)
        dfbkg = np.zeros(nmue)
        for i in range(nspl):
            for j in range(nspl):
                dfchi += jac_chi[i]*jac_chi[j]*covar[i,j]
                dfbkg += jac_bkg[i]*jac_bkg[j]*covar[i,j]

        prob = 0.5*(1.0 + erf(err_sigma/np.sqrt(2.0)))
        dchi = t.ppf(prob, nchi-nspl) * np.sqrt(dfchi*redchi)
        dbkg = t.ppf(prob, nmue-nspl) * np.sqrt(dfbkg*redchi)

        group.delta_chi = dchi
        group.delta_bkg = 0.0*mu
        group.delta_bkg[ie0:ie0+len(dbkg)] = dbkg
Пример #19
0
def fluo_corr(energy,
              mu,
              formula,
              elem,
              group=None,
              edge='K',
              anginp=45,
              angout=45,
              _larch=None,
              **pre_kws):
    """correct over-absorption (self-absorption) for fluorescene XAFS
    using the FLUO alogrithm of D. Haskel.

    Arguments
    ---------
      energy    array of energies
      mu        uncorrected fluorescence mu
      formula   string for sample stoichiometry
      elem      atomic symbol or Z of absorbing element
      group     output group [default None]
      edge      name of edge ('K', 'L3', ...) [default 'K']
      anginp    input angle in degrees  [default 45]
      angout    output angle in degrees  [default 45]

    Additional keywords will be passed to pre_edge(), which will be used
    to ensure consistent normalization.

    Returns
    --------
       None, writes `mu_corr` and `norm_corr` (normalized `mu_corr`)
       to output group.

    Notes
    -----
       Support First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='fluo_corr')

    # generate normalized mu for correction
    preinp = preedge(energy, mu, **pre_kws)
    mu_inp = preinp['norm']

    anginp = max(1.e-7, np.deg2rad(anginp))
    angout = max(1.e-7, np.deg2rad(angout))

    # find edge energies and fluorescence line energy
    e_edge = xray_edge(elem, edge, _larch=_larch)[0]
    e_fluor = xray_line(elem, edge, _larch=_larch)[0]

    # calculate mu(E) for fluorescence energy, above, below edge
    energies = np.array([e_fluor, e_edge - 10.0, e_edge + 10.0])
    muvals = material_mu(formula, energies, density=1, _larch=_larch)

    mu_fluor = muvals[0] * np.sin(anginp) / np.sin(angout)
    mu_below = muvals[1]
    mu_celem = muvals[2] - muvals[1]

    alpha = (mu_fluor + mu_below) / mu_celem
    mu_corr = mu_inp * alpha / (alpha + 1 - mu_inp)
    preout = preedge(energy, mu_corr, **pre_kws)

    if group is not None:
        group = set_xafsGroup(group, _larch=_larch)
        group.mu_corr = mu_corr
        group.norm_corr = preout['norm']
Пример #20
0
def cauchy_wavelet(k, chi=None, group=None, kweight=0, rmax_out=10,
                   nfft=2048, _larch=None):
    """
    Cauchy Wavelet Transform for XAFS, following work of Munoz, Argoul, and Farges

    Parameters:
    -----------
      k:        1-d array of photo-electron wavenumber in Ang^-1 or group
      chi:      1-d array of chi
      group:    output Group
      rmax_out: highest R for output data (10 Ang)
      kweight:  exponent for weighting spectra by k**kweight
      nfft:     value to use for N_fft (2048).

      Returns:
    ---------
      None   -- outputs are written to supplied group.

    Notes:
    -------
    Arrays written to output group:
    r                  uniform array of R, out to rmax_out.
    wcauchy            complex cauchy wavelet(k, R)
    wcauchy_mag        magnitude of wavelet(k, R)
    wcauchy_re         real part of wavelet(k, R)
    wcauchy_im         imaginary part of wavelet(k, R)

    Supports First Argument Group convention (with group
    member names 'k' and 'chi')

    """
    k, chi, group = parse_group_args(k, members=('k', 'chi'),
                                     defaults=(chi,), group=group,
                                     fcn_name='cauchy_wavelet')

    kstep = np.round(1000.*(k[1]-k[0]))/1000.0
    rstep = (np.pi/2048)/kstep
    rmin = 1.e-7
    rmax = rmax_out
    nrpts = int(np.round((rmax-rmin)/rstep))
    nkout = len(k)
    if kweight != 0:
        chi = chi * k**kweight

    # extend EXAFS to 1024 data points...
    NFT = int(nfft/2)
    if len(k) < NFT:
        knew = np.arange(NFT) * kstep
        xnew = np.zeros(NFT) * kstep
        xnew[:len(k)] = chi
    else:
        knew = k[:NFT]
        xnew = chi[:NFT]

    # FT parameters
    freq = (1.0/kstep)*np.arange(nfft)/(2*nfft)
    omega = 2*np.pi*freq

    # simple FT calculation
    tff = np.fft.fft(xnew, n= 2*nfft)

    # scale parameter
    r  = np.linspace(0, rmax, nrpts)
    r[0] = 1.e-19
    a  = nrpts/(2*r)

    # Characteristic values for Cauchy wavelet:
    cauchy_sum = np.log(2*np.pi) - np.log(1.0+np.arange(nrpts)).sum()

    # Main calculation:
    out = np.zeros(nkout*nrpts,
                   dtype='complex128').reshape(nrpts, nkout)
    for i in range(nrpts):
        aom = a[i]*omega
        aom[np.where(aom==0)] = 1.e-19
        filt = cauchy_sum + nrpts*np.log(aom) - aom
        tmp  = np.conj(np.exp(filt))*tff[:nfft]
        out[i, :] = np.fft.ifft(tmp, 2*nfft)[:nkout]

    group = set_xafsGroup(group, _larch=_larch)
    group.r  =  r
    group.wcauchy =  out
    group.wcauchy_mag =  np.sqrt(out.real**2 + out.imag**2)
    group.wcauchy_re =  out.real
    group.wcauchy_im =  out.imag
Пример #21
0
def rebin_xafs(energy, mu=None, group=None, e0=None, pre1=None, pre2=-30,
               pre_step=2, xanes_step=None, exafs1=15, exafs2=None,
               exafs_kstep=0.05, method='centroid', _larch=None):
    """rebin XAFS energy and mu to a 'standard 3 region XAFS scan'

    Arguments
    ---------
    energy       input energy array
    mu           input mu array
    group        output group
    e0           energy reference -- all energy values are relative to this
    pre1         start of pre-edge region [1st energy point]
    pre2         end of pre-edge region, start of XANES region [-30]
    pre_step     energy step for pre-edge region [2]
    xanes_step   energy step for XANES region [see note]
    exafs1       end of XANES region, start of EXAFS region [15]
    exafs2       end of EXAFS region [last energy point]
    exafs_kstep  k-step for EXAFS region [0.05]
    method       one of 'boxcar', 'centroid' ['centroid']

    Returns
    -------
      None

    A group named 'rebinned' will be created in the output group, with the
    following  attributes:
        energy  new energy array
        mu      mu for energy array
        e0      e0 copied from current group

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    ------
     1 If the first argument is a Group, it must contain 'energy' and 'mu'.
       See First Argrument Group in Documentation

     2 If xanes_step is None, it will be found from the data.  If it is
       given, it may be increased to better fit the input energy array.

     3 The EXAFS region will be spaced in k-space

     4 The rebinned data is found by determining which segments of the
       input energy correspond to each bin in the new energy array. That
       is, each input energy is assigned to exactly one bin in the new
       array.  For each new energy bin, the new value is selected from the
       data in the segment as either
         a) linear interpolation if there are fewer than 3 points in the segment.
         b) mean value ('boxcar')
         c) centroid ('centroid')

    """
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                        fcn_name='rebin_xafs')

    if e0 is None:
        e0 = getattr(group, 'e0', None)

    if e0 is None:
        raise ValueError("need e0")

    if pre1 is None:
        pre1 = pre_step*int((min(energy) - e0)/pre_step)

    if exafs2 is None:
        exafs2 = max(energy) - e0

    # determine xanes step size:
    #  find mean of energy difference, ignoring first/last 1% of energies
    npts = len(energy)
    n1 = max(2, int(npts/100.0))
    de_mean = np.diff(energy[n1:-n1]).mean()
    xanes_step_def = max(0.1, 0.05 * (1 + int(de_mean/0.05)))
    if xanes_step is None:
        xanes_step = xanes_step_def
    else:
        xanes_step = max(xanes_step, xanes_step_def)

    # create new energy array from the 3 segments (pre, xanes, exafs)
    en = []
    for start, stop, step, isk in ((pre1, pre2, pre_step, False),
                                   (pre2, exafs1, xanes_step, False),
                                   (exafs1, exafs2, exafs_kstep, True)):
        if isk:
            start = etok(start)
            stop = etok(stop)
        reg = np.linspace(start+step, stop, int(0.1 + abs(stop-start)/step))
        if isk:
            reg = ktoe(reg)
        en.extend(e0 + reg)

    # find the segment boundaries of the old energy array
    bounds = [index_of(energy, e) for e in en]
    mu_out = []
    err_out = []
    j0 = 0
    for i in range(len(en)):
        if i == len(en) - 1:
            j1 = len(energy) - 1
        else:
            j1 = int((bounds[i] + bounds[i+1] + 1)/2.0)
        # if not enough points in segment, do interpolation
        if (j1 - j0) < 3:
            jx = j1 + 1
            if (jx - j0) < 2:
                jx += 1
            val = interp1d(energy[j0:jx], mu[j0:jx], en[i])
            err = mu[j0:j1].std()
        else:
            if method.startswith('box'):
                val =  mu[j0:j1].mean()
            else:
                val = (mu[j0:j1]*energy[j0:j1]).mean()/energy[j0:j1].mean()
        mu_out.append(val)
        err_out.append(mu[j0:j1].std())
        j0 = j1

    newname = group.__name__ + '_rebinned'
    group.rebinned = Group(energy=np.array(en), mu=np.array(mu_out),
                           delta_mu=np.array(err_out), e0=e0,
                           __name__=newname)
    return
Пример #22
0
def mback_norm(energy, mu=None, group=None, z=None, edge='K', e0=None,
               pre1=None, pre2=None, norm1=None, norm2=None, nnorm=None, nvict=1,
               _larch=None):
    """
    simplified version of MBACK to Match mu(E) data for tabulated f''(E)
    for normalization

    Arguments:
      energy, mu:  arrays of energy and mu(E)
      group:       output group (and input group for e0)
      z:           Z number of absorber
      e0:          edge energy
      pre1:        low E range (relative to E0) for pre-edge fit
      pre2:        high E range (relative to E0) for pre-edge fit
      norm1:       low E range (relative to E0) for post-edge fit
      norm2:       high E range (relative to E0) for post-edge fit
      nnorm:       degree of polynomial (ie, nnorm+1 coefficients will be
                   found) for post-edge normalization curve fit to the
                   scaled f2. Default=1 (linear)

    Returns:
      group.norm_poly:     normalized mu(E) from pre_edge()
      group.norm:          normalized mu(E) from this method
      group.mback_mu:      tabulated f2 scaled and pre_edge added to match mu(E)
      group.mback_params:  Group of parameters for the minimization

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    if _larch is not None:
        group = set_xafsGroup(group, _larch=_larch)
    group.norm_poly = group.norm*1.0

    if z is not None:              # need to run find_e0:
        e0_nominal = xray_edge(z, edge).energy
    if e0 is None:
        e0 = getattr(group, 'e0', None)
        if e0 is None:
            find_e0(energy, mu, group=group)
            e0 = group.e0

    atsym = None
    if z is None or z < 2:
        atsym, edge = guess_edge(group.e0)
        z = atomic_number(atsym)
    if atsym is None and z is not None:
        atsym = atomic_symbol(z)

    if getattr(group, 'pre_edge_details', None) is None:  # pre_edge never run
        preedge(energy, mu, pre1=pre1, pre2=pre2, nvict=nvict,
                norm1=norm1, norm2=norm2, e0=e0, nnorm=nnorm)

    mu_pre = mu - group.pre_edge
    f2 = f2_chantler(z, energy)

    weights = np.ones(len(energy))*1.0

    if norm2 is None:
        norm2 = max(energy) - e0

    if norm2 < 0:
        norm2 = max(energy) - e0  - norm2

    # avoid l2 and higher edges
    if edge.lower().startswith('l'):
        if edge.lower() == 'l3':
            e_l2 = xray_edge(z, 'L2').energy
            norm2 = min(norm2,  e_l2-e0)
        elif edge.lower() == 'l2':
            e_l2 = xray_edge(z, 'L1').energy
            norm2 = min(norm2,  e_l1-e0)

    ipre2 = index_of(energy, e0+pre2)
    inor1 = index_of(energy, e0+norm1)
    inor2 = index_of(energy, e0+norm2) + 1


    weights[ipre2:] = 0.0
    weights[inor1:inor2] = np.linspace(0.1, 1.0, inor2-inor1)

    params = Parameters()
    params.add(name='slope',   value=0.0,    vary=True)
    params.add(name='offset',  value=-f2[0], vary=True)
    params.add(name='scale',   value=f2[-1], vary=True)

    out = minimize(f2norm, params, method='leastsq',
                   gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5,
                   kws = dict(en=energy, mu=mu_pre, f2=f2, weights=weights))

    p = out.params.valuesdict()

    model = (p['offset'] + p['slope']*energy + f2) * p['scale']

    group.mback_mu = model + group.pre_edge

    pre_f2 = preedge(energy, model, nnorm=nnorm, nvict=nvict, e0=e0,
                     pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2)

    step_new = pre_f2['edge_step']

    group.edge_step_poly  = group.edge_step
    group.edge_step_mback = step_new
    group.norm_mback = mu_pre / step_new


    group.mback_params = Group(e0=e0, pre1=pre1, pre2=pre2, norm1=norm1,
                               norm2=norm2, nnorm=nnorm, fit_params=p,
                               fit_weights=weights, model=model, f2=f2,
                               pre_f2=pre_f2, atsym=atsym, edge=edge)

    if (abs(step_new - group.edge_step)/(1.e-13+group.edge_step)) > 0.75:
        print("Warning: mback edge step failed....")
    else:
        group.edge_step = step_new
        group.norm       = group.norm_mback
Пример #23
0
def xftf(k,
         chi=None,
         group=None,
         kmin=0,
         kmax=20,
         kweight=0,
         dk=1,
         dk2=None,
         with_phase=False,
         window='kaiser',
         rmax_out=10,
         nfft=2048,
         kstep=0.05,
         _larch=None,
         **kws):
    """
    forward XAFS Fourier transform, from chi(k) to chi(R), using
    common XAFS conventions.

    Parameters:
    -----------
      k:        1-d array of photo-electron wavenumber in Ang^-1 or group
      chi:      1-d array of chi
      group:    output Group
      rmax_out: highest R for output data (10 Ang)
      kweight:  exponent for weighting spectra by k**kweight
      kmin:     starting k for FT Window
      kmax:     ending k for FT Window
      dk:       tapering parameter for FT Window
      dk2:      second tapering parameter for FT Window
      window:   name of window type
      nfft:     value to use for N_fft (2048).
      kstep:    value to use for delta_k (0.05 Ang^-1).
      with_phase: output the phase as well as magnitude, real, imag  [False]

    Returns:
    ---------
      None   -- outputs are written to supplied group.

    Notes:
    -------
    Arrays written to output group:
        kwin               window function Omega(k) (length of input chi(k)).
        r                  uniform array of R, out to rmax_out.
        chir               complex array of chi(R).
        chir_mag           magnitude of chi(R).
        chir_re            real part of chi(R).
        chir_im            imaginary part of chi(R).
        chir_pha           phase of chi(R) if with_phase=True
                           (a noticable performance hit)

    Supports First Argument Group convention (with group member names 'k' and 'chi')
    """
    # allow kweight keyword == kw
    if 'kw' in kws:
        kweight = kws['kw']

    k, chi, group = parse_group_args(k,
                                     members=('k', 'chi'),
                                     defaults=(chi, ),
                                     group=group,
                                     fcn_name='xftf')

    cchi, win = xftf_prep(k,
                          chi,
                          kmin=kmin,
                          kmax=kmax,
                          kweight=kweight,
                          dk=dk,
                          dk2=dk2,
                          nfft=nfft,
                          kstep=kstep,
                          window=window,
                          _larch=_larch)

    out = xftf_fast(cchi * win, kstep=kstep, nfft=nfft)
    rstep = pi / (kstep * nfft)

    irmax = int(min(nfft / 2, 1.01 + rmax_out / rstep))

    group = set_xafsGroup(group, _larch=_larch)
    r = rstep * arange(irmax)
    mag = sqrt(out.real**2 + out.imag**2)
    group.kwin = win[:len(chi)]
    group.r = r[:irmax]
    group.chir = out[:irmax]
    group.chir_mag = mag[:irmax]
    group.chir_re = out.real[:irmax]
    group.chir_im = out.imag[:irmax]
    if with_phase:
        group.chir_pha = complex_phase(out[:irmax])
Пример #24
0
def mback(energy,
          mu,
          group=None,
          order=3,
          z=None,
          edge='K',
          e0=None,
          emin=None,
          emax=None,
          whiteline=None,
          leexiang=False,
          tables='chantler',
          fit_erfc=False,
          return_f1=False,
          _larch=None):
    """
    Match mu(E) data for tabulated f''(E) using the MBACK algorithm and,
    optionally, the Lee & Xiang extension

    Arguments:
      energy, mu:    arrays of energy and mu(E)
      order:         order of polynomial [3]
      group:         output group (and input group for e0)
      z:             Z number of absorber
      edge:          absorption edge (K, L3)
      e0:            edge energy
      emin:          beginning energy for fit
      emax:          ending energy for fit
      whiteline:     exclusion zone around white lines
      leexiang:      flag to use the Lee & Xiang extension
      tables:        'chantler' (default) or 'cl'
      fit_erfc:      True to float parameters of error function
      return_f1:     True to put the f1 array in the group

    Returns:
      group.f2:      tabulated f2(E)
      group.f1:      tabulated f1(E) (if return_f1 is True)
      group.fpp:     matched data
      group.mback_params:  Group of parameters for the minimization

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970
      * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    order = int(order)
    if order < 1: order = 1  # set order of polynomial
    if order > MAXORDER: order = MAXORDER

    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    group = set_xafsGroup(group, _larch=_larch)

    if e0 is None:  # need to run find_e0:
        e0 = xray_edge(z, edge, _larch=_larch)[0]
    if e0 is None:
        e0 = group.e0
    if e0 is None:
        find_e0(energy, mu, group=group)

    ### theta is an array used to exclude the regions <emin, >emax, and
    ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere
    (i1, i2) = (0, len(energy) - 1)
    if emin is not None: i1 = index_of(energy, emin)
    if emax is not None: i2 = index_of(energy, emax)
    theta = np.ones(len(energy))  # default: 1 throughout
    theta[0:i1] = 0
    theta[i2:-1] = 0
    if whiteline:
        pre = 1.0 * (energy < e0)
        post = 1.0 * (energy > e0 + float(whiteline))
        theta = theta * (pre + post)
    if edge.lower().startswith('l'):
        l2 = xray_edge(z, 'L2', _larch=_larch)[0]
        l2_pre = 1.0 * (energy < l2)
        l2_post = 1.0 * (energy > l2 + float(whiteline))
        theta = theta * (l2_pre + l2_post)

    ## this is used to weight the pre- and post-edge differently as
    ## defined in the MBACK paper
    weight1 = 1 * (energy < e0)
    weight2 = 1 * (energy > e0)
    weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2

    ## get the f'' function from CL or Chantler
    if tables.lower() == 'chantler':
        f1 = f1_chantler(z, energy, _larch=_larch)
        f2 = f2_chantler(z, energy, _larch=_larch)
    else:
        (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch)
    group.f2 = f2
    if return_f1: group.f1 = f1

    n = edge
    if edge.lower().startswith('l'): n = 'L'
    params = Group(
        s=Parameter(1, vary=True, _larch=_larch),  # scale of data
        xi=Parameter(50, vary=fit_erfc, min=0, _larch=_larch),  # width of erfc
        em=Parameter(xray_line(z, n, _larch=_larch)[0],
                     vary=False,
                     _larch=_larch),  # erfc centroid
        e0=Parameter(e0, vary=False, _larch=_larch),  # abs. edge energy
        ## various arrays need by the objective function
        en=energy,
        mu=mu,
        f2=group.f2,
        weight=weight,
        theta=theta,
        leexiang=leexiang,
        _larch=_larch)
    if fit_erfc:
        params.a = Parameter(1, vary=True, _larch=_larch)  # amplitude of erfc
    else:
        params.a = Parameter(0, vary=False, _larch=_larch)  # amplitude of erfc

    for i in range(order):  # polynomial coefficients
        setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch))

    fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5)
    fit.leastsq()

    eoff = energy - params.e0.value
    normalization_function = params.a.value * erfc(
        (energy - params.em.value) / params.xi.value) + params.c0.value
    for i in range(MAXORDER):
        j = i + 1
        attr = 'c%d' % j
        if hasattr(params, attr):
            normalization_function = normalization_function + getattr(
                getattr(params, attr), 'value') * eoff**j

    group.fpp = params.s * mu - normalization_function
    group.mback_params = params
Пример #25
0
def mback(energy, mu=None, group=None, z=None, edge='K', e0=None, pre1=None, pre2=-50,
          norm1=100, norm2=None, order=3, leexiang=False, tables='chantler', fit_erfc=False,
          return_f1=False, _larch=None):
    """
    Match mu(E) data for tabulated f''(E) using the MBACK algorithm and,
    optionally, the Lee & Xiang extension

    Arguments
    ----------
      energy:     array of x-ray energies, in eV.
      mu:         array of mu(E).
      group:      output group.
	  z:          atomic number of the absorber.
	  edge:       x-ray absorption edge (default 'K')
      e0:         edge energy, in eV.  If None, it will be determined here.
      pre1:       low E range (relative to e0) for pre-edge region.
      pre2:       high E range (relative to e0) for pre-edge region.
      norm1:      low E range (relative to e0) for post-edge region.
      norm2:      high E range (relative to e0) for post-edge region.
      order:      order of the legendre polynomial for normalization.
	              (default=3, min=0, max=5).
      leexiang:   boolean (default False)  to use the Lee & Xiang extension.
      tables:     tabulated scattering factors: 'chantler' [deprecated]
      fit_erfc:   boolean (default False) to fit parameters of error function.
      return_f1:  boolean (default False) to include the f1 array in the group.


    Returns
    -------
      None

    The following attributes will be written to the output group:
      group.f2:            tabulated f2(E).
      group.f1:            tabulated f1(E) (if 'return_f1' is True).
      group.fpp:           mback atched spectrum.
	  group.edge_step:     edge step of spectrum.
	  group.norm:          normalized spectrum.
      group.mback_params:  group of parameters for the minimization.

    Notes:
        Chantler tables is now used, with Cromer-Liberman no longer supported.
    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970
      * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    order = max(min(order, MAXORDER), 0)

    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    if _larch is not None:
        group = set_xafsGroup(group, _larch=_larch)

    energy = remove_dups(energy)
    if e0 is None or e0 < energy[1] or e0 > energy[-2]:
        e0 = find_e0(energy, mu, group=group)

    print(e0)
    ie0 = index_nearest(energy, e0)
    e0 = energy[ie0]

    pre1_input = pre1
    norm2_input = norm2

    if pre1 is None:  pre1  = min(energy) - e0
    if norm2 is None: norm2 = max(energy) - e0
    if norm2 < 0:     norm2 = max(energy) - e0 - norm2
    pre1  = max(pre1,  (min(energy) - e0))
    norm2 = min(norm2, (max(energy) - e0))

    if pre1 > pre2:
        pre1, pre2 = pre2, pre1
    if norm1 > norm2:
        norm1, norm2 = norm2, norm1

    p1 = index_of(energy, pre1+e0)
    p2 = index_nearest(energy, pre2+e0)
    n1 = index_nearest(energy, norm1+e0)
    n2 = index_of(energy, norm2+e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)
    if n2 - n1 < 2:
        p2 = min(len(energy), p1 + 2)

    ## theta is a boolean array indicating the
	## energy values considered for the fit.
    ## theta=1 for included values, theta=0 for excluded values.
    theta            = np.zeros_like(energy, dtype='int')
    theta[p1:(p2+1)] = 1
    theta[n1:(n2+1)] = 1

    ## weights for the pre- and post-edge regions, as defined in the MBACK paper (?)
    weight            = np.ones_like(energy, dtype=float)
    weight[p1:(p2+1)] = np.sqrt(np.sum(weight[p1:(p2+1)]))
    weight[n1:(n2+1)] = np.sqrt(np.sum(weight[n1:(n2+1)]))

    ## get the f'' function from CL or Chantler
    f1 = f1_chantler(z, energy)
    f2 = f2_chantler(z, energy)
    group.f2 = f2
    if return_f1:
        group.f1 = f1

    em = find_xray_line(z, edge).energy # erfc centroid

    params = Parameters()
    params.add(name='s',  value=1.0,  vary=True)  # scale of data
    params.add(name='xi', value=50.0, vary=False, min=0) # width of erfc
    params.add(name='a',  value=0.0, vary=False)  # amplitude of erfc
    if fit_erfc:
        params['a'].vary  = True
        params['a'].value = 0.5
        params['xi'].vary  = True

    for i in range(order+1): # polynomial coefficients
        params.add(name='c%d' % i, value=0, vary=True)

    out = minimize(match_f2, params, method='leastsq',
                   gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5,
                   kws = dict(en=energy, mu=mu, f2=f2, e0=e0, em=em,
                              order=order, weight=weight, theta=theta, leexiang=leexiang))

    opars = out.params.valuesdict()
    eoff = energy - e0

    norm_function = opars['a']*erfc((energy-em)/opars['xi']) + opars['c0']
    for i in range(order):
        attr = 'c%d' % (i + 1)
        if attr in opars:
            norm_function  += opars[attr]* eoff**(i + 1)

    group.e0 = e0
    group.fpp = opars['s']*mu - norm_function
    # calculate edge step and normalization from f2 + norm_function
    pre_f2 = preedge(energy, group.f2+norm_function, e0=e0, pre1=pre1,
	         pre2=pre2, norm1=norm1, norm2=norm2, nnorm=2, nvict=0)
    group.edge_step = pre_f2['edge_step'] / opars['s']
    group.norm = (opars['s']*mu -  pre_f2['pre_edge']) / pre_f2['edge_step']
    group.mback_details = Group(params=opars, pre_f2=pre_f2,
                                f2_scaled=opars['s']*f2,
                                norm_function=norm_function)
Пример #26
0
def pre_edge_baseline(energy, norm=None, group=None, form='lorentzian',
                      emin=None, emax=None, elo=None, ehi=None,
                      with_line=True, _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments
    ----------
    energy:    array of x-ray energies, in eV, or group (see note 1)
    norm:      array of normalized mu(E)
    group:     output group
    elo:       low energy of pre-edge peak region to not fit baseline [e0-20]
    ehi:       high energy of pre-edge peak region ot not fit baseline [e0-10]
    emax:      max energy (eV) to use for baesline fit [e0-5]
    emin:      min energy (eV) to use for baesline fit [e0-40]
    form:      form used for baseline (see note 2)  ['lorentzian']
    with_line: whether to include linear component in baseline ['True']


    Returns
    -------
      None

    A group named 'prepeaks' will be created in the output group, with the following
    attributes:
        energy        energy array for pre-edge peaks = energy[emin:emax]
        baseline      fitted baseline array over pre-edge peak energies
        norm          spectrum over pre-edge peak energies
        peaks         baseline-subtraced spectrum over pre-edge peak energies
        centroid      estimated centroid of pre-edge peaks (see note 3)
        peak_energies list of predicted peak energies (see note 4)
        fit_details   details of fit to extract pre-edge peaks.

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 If the first argument is a Group, it must contain 'energy' and 'norm'.
       See First Argrument Group in Documentation

     2 A function will be fit to the input mu(E) data over the range between
       [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the
       region [elo:ehi].  The baseline function is specified with the `form`
       keyword argument, which can be one of
           'lorentzian', 'gaussian', or 'voigt',
       with 'lorentzian' the default.  In addition, the `with_line` keyword
       argument can be used to add a line to this baseline function.

     3 The value calculated for `prepeaks.centroid`  will be found as
         (prepeaks.energy*prepeaks.peaks).sum() / prepeaks.peaks.sum()
     4 The values in the `peak_energies` list will be predicted energies
       of the peaks in `prepeaks.peaks` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy, members=('energy', 'norm'),
                                           defaults=(norm,), group=group,
                                           fcn_name='pre_edge_baseline')

    prepeaks_setup(energy, norm=norm, group=group, emin=emin, emax=emax,
                   elo=elo, ehi=ehi, _larch=_larch)

    emin = group.prepeaks.emin
    emax = group.prepeaks.emax
    elo = group.prepeaks.elo
    ehi = group.prepeaks.ehi

    dele = 1.e-13 + min(np.diff(energy))/5.0

    imin = index_of(energy, emin+dele)
    ilo  = index_of(energy, elo+dele)
    ihi  = index_of(energy, ehi+dele)
    imax = index_of(energy, emax+dele)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo+1], energy[ihi:imax+1]))
    ydat = np.concatenate((norm[imin:ilo+1], norm[ihi:imax+1]))


    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0, sigma=2.0,
                               center=emax,
                               intercept=0, slope=0)
    params['amplitude'].min =  0.0
    params['sigma'].min     =  0.25
    params['sigma'].max     = 50.0
    params['center'].max    = emax + 25.0
    params['center'].min    = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    result = model.fit(ydat, params, x=xdat)

    cen = dcen = 0.
    peak_energies = []

    # energy including pre-edge peaks, for output
    edat = energy[imin: imax+1]
    norm = norm[imin:imax+1]
    bline = peaks = dpeaks = norm*0.0

    # get baseline and resulting norm over edat range
    if result is not None:
        bline = result.eval(result.params, x=edat)
        peaks = norm-bline

        # estimate centroid
        cen = (edat*peaks).sum() / peaks.sum()

        # uncertainty in norm includes only uncertainties in baseline fit
        # and uncertainty in centroid:
        try:
            dpeaks = result.eval_uncertainty(result.params, x=edat)
        except:
            dbpeaks = 0.0

        cen_plus = (edat*(peaks+dpeaks)).sum()/ (peaks+dpeaks).sum()
        cen_minus = (edat*(peaks-dpeaks)).sum()/ (peaks-dpeaks).sum()
        dcen = abs(cen_minus - cen_plus) / 2.0

        # locate peak positions
        if HAS_PEAKUTILS:
            peak_ids = peakutils.peak.indexes(peaks, thres=0.05, min_dist=2)
            peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat, norm=norm, baseline=bline,
                           peaks=peaks, delta_peaks=dpeaks,
                           centroid=cen, delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin, emax=emax, elo=elo, ehi=ehi,
                           form=form, with_line=with_line)
    return
Пример #27
0
def xftf(k, chi=None, group=None, kmin=0, kmax=20, kweight=0,
         dk=1, dk2=None, with_phase=False, window='kaiser', rmax_out=10,
         nfft=2048, kstep=0.05, _larch=None, **kws):
    """
    forward XAFS Fourier transform, from chi(k) to chi(R), using
    common XAFS conventions.

    Parameters:
    -----------
      k:        1-d array of photo-electron wavenumber in Ang^-1 or group
      chi:      1-d array of chi
      group:    output Group
      rmax_out: highest R for output data (10 Ang)
      kweight:  exponent for weighting spectra by k**kweight
      kmin:     starting k for FT Window
      kmax:     ending k for FT Window
      dk:       tapering parameter for FT Window
      dk2:      second tapering parameter for FT Window
      window:   name of window type
      nfft:     value to use for N_fft (2048).
      kstep:    value to use for delta_k (0.05 Ang^-1).
      with_phase: output the phase as well as magnitude, real, imag  [False]

    Returns:
    ---------
      None   -- outputs are written to supplied group.

    Notes:
    -------
    Arrays written to output group:
        kwin               window function Omega(k) (length of input chi(k)).
	r                  uniform array of R, out to rmax_out.
	chir               complex array of chi(R).
	chir_mag           magnitude of chi(R).
	chir_re            real part of chi(R).
	chir_im            imaginary part of chi(R).
	chir_pha           phase of chi(R) if with_phase=True
                           (a noticable performance hit)

    Supports First Argument Group convention (with group member names 'k' and 'chi')
    """
    # allow kweight keyword == kw
    if 'kw' in kws:
        kweight = kws['kw']

    k, chi, group = parse_group_args(k, members=('k', 'chi'),
                                     defaults=(chi,), group=group,
                                     fcn_name='xftf')

    cchi, win  = xftf_prep(k, chi, kmin=kmin, kmax=kmax, kweight=kweight,
                               dk=dk, dk2=dk2, nfft=nfft, kstep=kstep,
                               window=window, _larch=_larch)

    out = xftf_fast(cchi*win, kstep=kstep, nfft=nfft)
    rstep = pi/(kstep*nfft)

    irmax = min(nfft/2, int(1.01 + rmax_out/rstep))

    group = set_xafsGroup(group, _larch=_larch)
    r   = rstep * arange(irmax)
    mag = sqrt(out.real**2 + out.imag**2)
    group.kwin =  win[:len(chi)]
    group.r    =  r[:irmax]
    group.chir =  out[:irmax]
    group.chir_mag =  mag[:irmax]
    group.chir_re  =  out.real[:irmax]
    group.chir_im  =  out.imag[:irmax]
    if with_phase:
        group.chir_pha =  complex_phase(out[:irmax])
Пример #28
0
def xftr(r,
         chir=None,
         group=None,
         rmin=0,
         rmax=20,
         with_phase=False,
         dr=1,
         dr2=None,
         rw=0,
         window='kaiser',
         qmax_out=None,
         nfft=2048,
         kstep=0.05,
         _larch=None,
         **kws):
    """
    reverse XAFS Fourier transform, from chi(R) to chi(q).

    calculate reverse XAFS Fourier transform
    This assumes that chir_re and (optional chir_im are
    on a uniform r-grid given by r.

    Parameters:
    ------------
      r:        1-d array of distance, or group.
      chir:     1-d array of chi(R)
      group:    output Group
      qmax_out: highest *k* for output data (30 Ang^-1)
      rweight:  exponent for weighting spectra by r^rweight (0)
      rmin:     starting *R* for FT Window
      rmax:     ending *R* for FT Window
      dr:       tapering parameter for FT Window
      dr2:      second tapering parameter for FT Window
      window:   name of window type
      nfft:     value to use for N_fft (2048).
      kstep:    value to use for delta_k (0.05).
      with_phase: output the phase as well as magnitude, real, imag  [False]

    Returns:
    ---------
      None -- outputs are written to supplied group.

    Notes:
    -------
    Arrays written to output group:
        rwin               window Omega(R) (length of input chi(R)).
        q                  uniform array of k, out to qmax_out.
        chiq               complex array of chi(k).
        chiq_mag           magnitude of chi(k).
        chiq_re            real part of chi(k).
        chiq_im            imaginary part of chi(k).
        chiq_pha           phase of chi(k) if with_phase=True
                           (a noticable performance hit)

    Supports First Argument Group convention (with group member names 'r' and 'chir')
    """
    if 'rweight' in kws:
        rw = kws['rweight']

    r, chir, group = parse_group_args(r,
                                      members=('r', 'chir'),
                                      defaults=(chir, ),
                                      group=group,
                                      fcn_name='xftr')
    rstep = r[1] - r[0]
    kstep = pi / (rstep * nfft)
    scale = 1.0

    cchir = zeros(nfft, dtype='complex128')
    r_ = rstep * arange(nfft, dtype='float64')

    cchir[0:len(chir)] = chir
    if chir.dtype == np.dtype('complex128'):
        scale = 0.5

    win = ftwindow(r_, xmin=rmin, xmax=rmax, dx=dr, dx2=dr2, window=window)
    out = scale * xftr_fast(cchir * win * r_**rw, kstep=kstep, nfft=nfft)
    if qmax_out is None: qmax_out = 30.0
    q = linspace(0, qmax_out, int(1.05 + qmax_out / kstep))
    nkpts = len(q)

    group = set_xafsGroup(group, _larch=_larch)
    group.q = q
    mag = sqrt(out.real**2 + out.imag**2)
    group.rwin = win[:len(chir)]
    group.chiq = out[:nkpts]
    group.chiq_mag = mag[:nkpts]
    group.chiq_re = out.real[:nkpts]
    group.chiq_im = out.imag[:nkpts]
    if with_phase:
        group.chiq_pha = complex_phase(out[:nkpts])
Пример #29
0
def xas_deconvolve(energy, norm=None, group=None, form='lorentzian',
                   esigma=1.0, eshift=0.0, smooth=True,
                   sgwindow=None, sgorder=3, _larch=None):
    """XAS spectral deconvolution

    de-convolve a normalized mu(E) spectra with a peak shape, enhancing the
    intensity and separation of peaks of a XANES spectrum.

    The results can be unstable, and noisy, and should be used
    with caution!

    Arguments
    ----------
    energy:   array of x-ray energies (in eV) or XAFS data group
    norm:     array of normalized mu(E)
    group:    output group
    form:     functional form of deconvolution function. One of
              'gaussian' or 'lorentzian' [default]
    esigma    energy sigma to pass to gaussian() or lorentzian()
              [in eV, default=1.0]
    eshift    energy shift to apply to result. [in eV, default=0]
    smooth    whether to smooth result with savitzky_golay method [True]
    sgwindow  window size for savitzky_golay [found from data step and esigma]
    sgorder   order for savitzky_golay [3]

    Returns
    -------
    None
       The array 'deconv' will be written to the output group.

    Notes
    -----
       Support See First Argument Group convention, requiring group
       members 'energy' and 'norm'

       Smoothing with savitzky_golay() requires a window and order.  By
       default, window = int(esigma / estep) where estep is step size for
       the gridded data, approximately the finest energy step in the data.
    """

    energy, mu, group = parse_group_args(energy, members=('energy', 'norm'),
                                         defaults=(norm,), group=group,
                                         fcn_name='xas_deconvolve')
    eshift = eshift + 0.5 * esigma

    en  = remove_dups(energy)
    en  = en - en[0]
    estep = max(0.001, 0.001*int(min(en[1:]-en[:-1])*1000.0))
    npts = 1  + int(max(en) / estep)

    x = np.arange(npts)*estep
    y = interp(en, mu, x, kind='cubic')

    kernel = lorentzian
    if form.lower().startswith('g'):
        kernel = gaussian

    yext = np.concatenate((y, np.arange(len(y))*y[-1]))
    ret, err = deconvolve(yext, kernel(x, center=0, sigma=esigma))
    nret = min(len(x), len(ret))

    ret = ret[:nret]*yext[nret-1]/ret[nret-1]
    if smooth:
        if sgwindow is None:
            sgwindow = int(1.0*esigma/estep)

        sqwindow = int(sgwindow)
        if sgwindow < (sgorder+1):
            sgwindow = sgorder + 2
        if sgwindow % 2 == 0:
            sgwindow += 1
        ret = savitzky_golay(ret, sgwindow, sgorder)

    out = interp(x+eshift, ret, en, kind='cubic')
    group = set_xafsGroup(group, _larch=_larch)
    group.deconv = out
Пример #30
0
def pre_edge(energy, mu=None, group=None, e0=None, step=None,
             nnorm=None, nvict=0, pre1=None, pre2=-50,
             norm1=100, norm2=None, make_flat=True, emin_area=None,
             _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump
       5. estimate area from emin_area to norm2, to get norm_area

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV. If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=None (see note)
    make_flat: boolean (Default True) to calculate flattened output.
    emin_area: energy threshold for area normalization (see note)


    Returns
    -------
      None

    The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E), using polynomial
        norm_area   normalized mu(E), using integrated area
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
    1  If the first argument is a Group, it must contain 'energy' and 'mu'.
       If it exists, group.e0 will be used as e0.
       See First Argrument Group in Documentation
    2  nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].
    3  nnorm will default to 2 in norm2-norm1>400, to 1 if 100>norm2-norm1>300,
       and to 0 in norm2-norm1<100.
    4  norm_area will be estimated so that the area between emin_area and norm2
       is equal to (norm2-emin_area).  By default emin_area will be set to the
       *nominal* edge energy for the element and edge - 3*core_level_width

    """


    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='pre_edge')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm,
                      nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1,
                      norm2=norm2)


    group = set_xafsGroup(group, _larch=_larch)

    e0    = pre_dat['e0']
    norm  = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1+e0)
    p2 = index_nearest(energy, norm2+e0)
    if p2-p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2-p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Parameters()
        ncoefs = len(pre_dat['norm_coefs'])
        fpars.add('c0', value=0, vary=True)
        fpars.add('c1', value=0, vary=(ncoefs>1))
        fpars.add('c2', value=0, vary=(ncoefs>2))
        fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux))
        result = fit.leastsq(xtol=1.e-6, ftol=1.e-6)

        fc0 = result.params['c0'].value
        fc1 = result.params['c1'].value
        fc2 = result.params['c2'].value

        flat_diff   = fc0 + energy * (fc1 + energy * fc2)
        flat        = norm - (flat_diff  - flat_diff[ie0])
        flat[:ie0]  = norm[:ie0]


    group.e0 = e0
    group.norm = norm
    group.norm_poly = 1.0*norm
    group.flat = flat
    group.dmude = np.gradient(mu)/np.gradient(energy)
    group.edge_step  = pre_dat['edge_step']
    group.edge_step_poly = pre_dat['edge_step']
    group.pre_edge   = pre_dat['pre_edge']
    group.post_edge  = pre_dat['post_edge']

    group.pre_edge_details = Group()
    group.pre_edge_details.pre1   = pre_dat['pre1']
    group.pre_edge_details.pre2   = pre_dat['pre2']
    group.pre_edge_details.nnorm  = pre_dat['nnorm']
    group.pre_edge_details.norm1  = pre_dat['norm1']
    group.pre_edge_details.norm2  = pre_dat['norm2']
    group.pre_edge_details.nvict  = pre_dat['nvict']
    group.pre_edge_details.pre1_input  = pre_dat['pre1_input']
    group.pre_edge_details.norm2_input  = pre_dat['norm2_input']
    group.pre_edge_details.pre_slope  = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)

    # guess element and edge
    group.atsym = getattr(group, 'atsym', None)
    group.edge = getattr(group, 'edge', None)

    if group.atsym is None or group.edge is None:
        _atsym, _edge = guess_edge(group.e0, _larch=_larch)
        if group.atsym is None: group.atsym = _atsym
        if group.edge is None:  group.edge = _edge

    # calcuate area-normalization
    if emin_area is None:
        emin_area = (xray_edge(group.atsym, group.edge).edge
                     - 2*core_width(group.atsym, group.edge))
    i1 = index_of(energy, emin_area)
    i2 = index_of(energy, e0+norm2)
    en = energy[i1:i2]
    area_step = max(1.e-15, simps(norm[i1:i2], en) / en.ptp())
    group.edge_step_area = group.edge_step_poly * area_step
    group.norm_area = norm/area_step
    group.pre_edge_details.emin_area = emin_area

    return
Пример #31
0
def estimate_noise(k,
                   chi=None,
                   group=None,
                   rmin=15.0,
                   rmax=30.0,
                   kweight=1,
                   kmin=0,
                   kmax=20,
                   dk=4,
                   dk2=None,
                   kstep=0.05,
                   kwindow='kaiser',
                   nfft=2048,
                   _larch=None,
                   **kws):
    """
    estimate noise levels in EXAFS spectrum and estimate highest k
    where data is above the noise level
    Parameters:
    -----------
      k:        1-d array of photo-electron wavenumber in Ang^-1 (or group)
      chi:      1-d array of chi
      group:    output Group  [see Note below]
      rmin:     minimum R value for high-R region of chi(R)
      rmax:     maximum R value for high-R region of chi(R)
      kweight:  exponent for weighting spectra by k**kweight [1]
      kmin:     starting k for FT Window [0]
      kmax:     ending k for FT Window  [20]
      dk:       tapering parameter for FT Window [4]
      dk2:      second tapering parameter for FT Window [None]
      kstep:    value to use for delta_k ( Ang^-1) [0.05]
      window:   name of window type ['kaiser']
      nfft:     value to use for N_fft [2048].

    Returns:
    ---------
      None   -- outputs are written to supplied group.  Values (scalars) written
      to output group:
        epsilon_k     estimated noise in chi(k)
        epsilon_r     estimated noise in chi(R)
        kmax_suggest  highest estimated k value where |chi(k)| > epsilon_k

    Notes:
    -------

     1. This method uses the high-R portion of chi(R) as a measure of the noise
        level in the chi(R) data and uses Parseval's theorem to convert this noise
        level to that in chi(k).  This method implicitly assumes that there is no
        signal in the high-R portion of the spectrum, and that the noise in the
        spectrum s "white" (independent of R) .  Each of these assumptions can be
        questioned.
     2. The estimate for 'kmax_suggest' has a tendency to be fair but pessimistic
        in how far out the chi(k) data goes before being dominated by noise.
     3. Follows the 'First Argument Group' convention, so that you can either
        specifiy all of (an array for 'k', an array for 'chi', option output Group)
        OR pass a group with 'k' and 'chi' as the first argument
    """
    k, chi, group = parse_group_args(k,
                                     members=('k', 'chi'),
                                     defaults=(chi, ),
                                     group=group,
                                     fcn_name='esitmate_noise')

    # save _sys.xafsGroup -- we want to NOT write to it here!
    savgroup = set_xafsGroup(None, _larch=_larch)
    tmpgroup = Group()
    rmax_out = min(10 * pi, rmax + 2)

    xftf(k,
         chi,
         kmin=kmin,
         kmax=kmax,
         rmax_out=rmax_out,
         kweight=kweight,
         dk=dk,
         dk2=dk2,
         kwindow=kwindow,
         nfft=nfft,
         kstep=kstep,
         group=tmpgroup,
         _larch=_larch)

    chir = tmpgroup.chir
    rstep = tmpgroup.r[1] - tmpgroup.r[0]

    irmin = int(0.01 + rmin / rstep)
    irmax = min(nfft / 2, int(1.01 + rmax / rstep))
    highr = realimag(chir[irmin:irmax])

    # get average of window function value, scale eps_r scale by this
    # this is imperfect, but improves the result.
    kwin_ave = tmpgroup.kwin.sum() * kstep / (kmax - kmin)
    eps_r = sqrt((highr * highr).sum() / len(highr)) / kwin_ave

    # use Parseval's theorem to convert epsilon_r to epsilon_k,
    # compensating for kweight
    w = 2 * kweight + 1
    scale = sqrt((2 * pi * w) / (kstep * (kmax**w - kmin**w)))
    eps_k = scale * eps_r

    # do reverse FT to get chiq array
    xftr(tmpgroup.r,
         tmpgroup.chir,
         group=tmpgroup,
         rmin=0.5,
         rmax=9.5,
         dr=1.0,
         window='parzen',
         nfft=nfft,
         kstep=kstep,
         _larch=_larch)

    # sets kmax_suggest to the largest k value for which
    # | chi(q) / k**kweight| > epsilon_k
    iq0 = index_of(tmpgroup.q, (kmax + kmin) / 2.0)
    tst = tmpgroup.chiq_mag[iq0:] / (tmpgroup.q[iq0:])**kweight
    kmax_suggest = tmpgroup.q[iq0 + where(tst < eps_k)[0][0]]

    # restore original _sys.xafsGroup, set output variables
    _larch.symtable._sys.xafsGroup = savgroup
    group = set_xafsGroup(group, _larch=_larch)
    group.epsilon_k = eps_k
    group.epsilon_r = eps_r
    group.kmax_suggest = kmax_suggest
Пример #32
0
def prepeaks_setup(energy, norm=None, group=None, emin=None, emax=None,
                   elo=None, ehi=None, _larch=None):
    """set up pre edge peak group

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments
    ----------
    energy:    array of x-ray energies, in eV, or group (see note 1)
    norm:      array of normalized mu(E)
    group:     output group
    emax:      max energy (eV) to use for baesline fit [e0-5]
    emin:      min energy (eV) to use for baesline fit [e0-40]
    elo:       low energy of pre-edge peak region to not fit baseline [e0-20]
    ehi:       high energy of pre-edge peak region ot not fit baseline [e0-10]


    Returns
    -------
      None

    A group named 'prepeaks' will be created in the output group, with the following
    attributes:
        energy        energy array for pre-edge peaks = energy[emin:emax]
        norm          spectrum over pre-edge peak energies

    Notes
    -----
     1 If the first argument is a Group, it must contain 'energy' and 'norm'.
       See First Argrument Group in Documentation
    """
    energy, norm, group = parse_group_args(energy, members=('energy', 'norm'),
                                           defaults=(norm,), group=group,
                                           fcn_name='pre_edge_baseline')

    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(norm.shape) > 1:
        norm = norm.squeeze()

    dat_emin, dat_emax = min(energy), max(energy)
    dat_e0 = getattr(group, 'e0', -1)

    if dat_e0 > 0:
        if emin is None:
            emin = dat_e0 - 30.0
        if emax is None:
            emax = dat_e0 - 1.0
        if elo is None:
            elo = dat_e0 - 15.0
        if ehi is None:
            ehi = dat_e0 - 5.0
        if emin < 0:
            emin += dat_e0
        if elo < 0:
            elo += dat_e0
        if emax < dat_emin:
            emax += dat_e0
        if ehi < dat_emin:
            ehi += dat_e0

    if emax is None or emin is None or elo is None or ehi is None:
        raise ValueError("must provide emin and emax to prepeaks_setup")


    # get indices for input energies
    if emin > emax:
        emin, emax = emax, emin
    if emin > elo:
        elo, emin = emin, elo
    if ehi > emax:
        ehi, emax = emax, ehi

    dele = 1.e-13 + min(np.diff(energy))/5.0

    ilo  = index_of(energy, elo+dele)
    ihi  = index_of(energy, ehi+dele)
    imin = index_of(energy, emin+dele)
    imax = index_of(energy, emax+dele)

    edat = energy[imin: imax+1]
    norm = norm[imin:imax+1]

    if not hasattr(group, 'prepeaks'):
        group.prepeaks = Group(energy=edat, norm=norm,
                               emin=emin, emax=emax,
                               elo=elo, ehi=ehi)
    else:
        group.prepeaks.energy = edat
        group.prepeaks.norm = norm
        group.prepeaks.emin = emin
        group.prepeaks.emax = emax
        group.prepeaks.elo = elo
        group.prepeaks.ehi = ehi

    group.prepeaks.xdat = edat
    group.prepeaks.ydat = norm
    return
Пример #33
0
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None,
           edge_step=None, kmin=0, kmax=None, kweight=1, dk=0,
           win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05,
           pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1,
           calc_uncertainties=False, _larch=None, **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [False]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='autobk')

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3, nvict=0, pre1=None,
                       pre2=-50., norm1=100., norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n')
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi/(kstep*nfft)
    if rbkg < 2*rgrid: rbkg = 2*rgrid
    irbkg = int(1.01 + rbkg/rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout  = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64')
    iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax,
                                     window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
    spl_y, spl_k, spl_e  = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q  = kmin + i*(kmax-kmin)/(nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw)-1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik+ie0]
        spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i<len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4,
                    fcn_kws = dict(ncoefs=len(coefs), chi_std=chi_std,
                                   knots=knots, order=order,
                                   kraw=kraw[:iemax-ie0+1],
                                   mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout,
                                   ftwin=ftwin, kweight=kweight,
                                   nfft=nfft, nclamp=nclamp,
                                   clamp_lo=clamp_lo, clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1],
                           knots, coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0+len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg  = obkg
    group.chie = (mu-obkg)/edge_step
    group.k    = kout
    group.chi  = chi/edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0+len(bkg)] = initbkg
    params.init_chi = initchi/edge_step
    params.knots_e  = spl_e
    params.knots_y  = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax
    group.autobk_details = params

    # uncertainties in mu0 and chi:  fairly slow!!
    if HAS_UNCERTAIN and calc_uncertainties:
        vbest, vstd = [], []
        for n in fit.var_names:
            par = getattr(params, n)
            vbest.append(par.value)
            vstd.append(par.stderr)
        uvars = uncertainties.correlated_values(vbest, params.covar)
        # uncertainty in bkg (aka mu0)
        # note that much of this is working around
        # limitations in the uncertainty package that make it
        #  1. take an argument list (not array)
        #  2. work on returned scalars (but not arrays)
        #  3. not handle kw args and *args well (so use
        #     of global "index" is important here)
        nkx = iemax-ie0 + 1
        def my_dsplev(*args):
            coefs = np.array(args)
            return splev(kraw[:nkx], [knots, coefs, order])[index]
        fdbkg = uncertainties.wrap(my_dsplev)
        dmu0  = [fdbkg(*uvars).std_dev() for index in range(len(bkg))]
        group.delta_bkg = np.zeros(len(mu))
        group.delta_bkg[ie0:ie0+len(bkg)] = np.array(dmu0)

        # uncertainty in chi (see notes above)
        def my_dchi(*args):
            coefs = np.array(args)
            b,chi = spline_eval(kraw[:nkx], mu[ie0:iemax+1],
                                knots, coefs, order, kout)
            return chi[index]
        fdchi = uncertainties.wrap(my_dchi)
        dchi  = [fdchi(*uvars).std_dev() for index in range(len(kout))]
        group.delta_chi = np.array(dchi)/edge_step
Пример #34
0
def xftr(r, chir=None, group=None, rmin=0, rmax=20, with_phase=False,
            dr=1, dr2=None, rw=0, window='kaiser', qmax_out=None,
            nfft=2048, kstep=0.05, _larch=None, **kws):
    """
    reverse XAFS Fourier transform, from chi(R) to chi(q).

    calculate reverse XAFS Fourier transform
    This assumes that chir_re and (optional chir_im are
    on a uniform r-grid given by r.

    Parameters:
    ------------
      r:        1-d array of distance, or group.
      chir:     1-d array of chi(R)
      group:    output Group
      qmax_out: highest *k* for output data (30 Ang^-1)
      rweight:  exponent for weighting spectra by r^rweight (0)
      rmin:     starting *R* for FT Window
      rmax:     ending *R* for FT Window
      dr:       tapering parameter for FT Window
      dr2:      second tapering parameter for FT Window
      window:   name of window type
      nfft:     value to use for N_fft (2048).
      kstep:    value to use for delta_k (0.05).
      with_phase: output the phase as well as magnitude, real, imag  [False]

    Returns:
    ---------
      None -- outputs are written to supplied group.

    Notes:
    -------
    Arrays written to output group:
        rwin               window Omega(R) (length of input chi(R)).
	q                  uniform array of k, out to qmax_out.
	chiq               complex array of chi(k).
	chiq_mag           magnitude of chi(k).
	chiq_re            real part of chi(k).
	chiq_im            imaginary part of chi(k).
	chiq_pha           phase of chi(k) if with_phase=True
                           (a noticable performance hit)

    Supports First Argument Group convention (with group member names 'r' and 'chir')
    """
    if 'rweight' in kws:
        rw = kws['rweight']

    r, chir, group = parse_group_args(r, members=('r', 'chir'),
                                     defaults=(chir,), group=group,
                                     fcn_name='xftr')
    rstep = r[1] - r[0]
    kstep = pi/(rstep*nfft)
    scale = 1.0

    cchir = zeros(nfft, dtype='complex128')
    r_    = rstep * arange(nfft, dtype='float64')

    cchir[0:len(chir)] = chir
    if chir.dtype == np.dtype('complex128'):
        scale = 0.5

    win = ftwindow(r_, xmin=rmin, xmax=rmax, dx=dr, dx2=dr2, window=window)
    out = scale * xftr_fast( cchir*win * r_**rw, kstep=kstep, nfft=nfft)
    if qmax_out is None: qmax_out = 30.0
    q = linspace(0, qmax_out, int(1.05 + qmax_out/kstep))
    nkpts = len(q)

    group = set_xafsGroup(group, _larch=_larch)
    group.q = q
    mag = sqrt(out.real**2 + out.imag**2)
    group.rwin =  win[:len(chir)]
    group.chiq     =  out[:nkpts]
    group.chiq_mag =  mag[:nkpts]
    group.chiq_re  =  out.real[:nkpts]
    group.chiq_im  =  out.imag[:nkpts]
    if with_phase:
        group.chiq_pha =  complex_phase(out[:nkpts])
Пример #35
0
def autobk(energy,
           mu=None,
           group=None,
           rbkg=1,
           nknots=None,
           e0=None,
           edge_step=None,
           kmin=0,
           kmax=None,
           kweight=1,
           dk=0,
           win='hanning',
           k_std=None,
           chi_std=None,
           nfft=2048,
           kstep=0.05,
           pre_edge_kws=None,
           nclamp=4,
           clamp_lo=1,
           clamp_hi=1,
           calc_uncertainties=False,
           _larch=None,
           **kws):
    """Use Autobk algorithm to remove XAFS background

    Parameters:
    -----------
      energy:    1-d array of x-ray energies, in eV, or group
      mu:        1-d array of mu(E)
      group:     output group (and input group for e0 and edge_step).
      rbkg:      distance (in Ang) for chi(R) above
                 which the signal is ignored. Default = 1.
      e0:        edge energy, in eV.  If None, it will be determined.
      edge_step: edge step.  If None, it will be determined.
      pre_edge_kws:  keyword arguments to pass to pre_edge()
      nknots:    number of knots in spline.  If None, it will be determined.
      kmin:      minimum k value   [0]
      kmax:      maximum k value   [full data range].
      kweight:   k weight for FFT.  [1]
      dk:        FFT window window parameter.  [0]
      win:       FFT window function name.     ['hanning']
      nfft:      array size to use for FFT [2048]
      kstep:     k step size to use for FFT [0.05]
      k_std:     optional k array for standard chi(k).
      chi_std:   optional chi array for standard chi(k).
      nclamp:    number of energy end-points for clamp [2]
      clamp_lo:  weight of low-energy clamp [1]
      clamp_hi:  weight of high-energy clamp [1]
      calc_uncertaintites:  Flag to calculate uncertainties in
                            mu_0(E) and chi(k) [False]

    Output arrays are written to the provided group.

    Follows the 'First Argument Group' convention.
    """
    msg = _larch.writer.write
    if 'kw' in kws:
        kweight = kws.pop('kw')
    if len(kws) > 0:
        msg('Unrecognized a:rguments for autobk():\n')
        msg('    %s\n' % (', '.join(kws.keys())))
        return
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='autobk')

    energy = remove_dups(energy)
    # if e0 or edge_step are not specified, get them, either from the
    # passed-in group or from running pre_edge()
    group = set_xafsGroup(group, _larch=_larch)

    if edge_step is None and isgroup(group, 'edge_step'):
        edge_step = group.edge_step
    if e0 is None and isgroup(group, 'e0'):
        e0 = group.e0
    if e0 is None or edge_step is None:
        # need to run pre_edge:
        pre_kws = dict(nnorm=3,
                       nvict=0,
                       pre1=None,
                       pre2=-50.,
                       norm1=100.,
                       norm2=None)
        if pre_edge_kws is not None:
            pre_kws.update(pre_edge_kws)
        pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws)
        if e0 is None:
            e0 = group.e0
        if edge_step is None:
            edge_step = group.edge_step
    if e0 is None or edge_step is None:
        msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n'
            )
        return

    # get array indices for rkbg and e0: irbkg, ie0
    ie0 = index_of(energy, e0)
    rgrid = np.pi / (kstep * nfft)
    if rbkg < 2 * rgrid: rbkg = 2 * rgrid
    irbkg = int(1.01 + rbkg / rgrid)

    # save ungridded k (kraw) and grided k (kout)
    # and ftwin (*k-weighting) for FT in residual
    enpe = energy[ie0:] - e0
    kraw = np.sign(enpe) * np.sqrt(ETOK * abs(enpe))
    if kmax is None:
        kmax = max(kraw)
    else:
        kmax = max(0, min(max(kraw), kmax))
    kout = kstep * np.arange(int(1.01 + kmax / kstep), dtype='float64')
    iemax = min(len(energy), 2 + index_of(energy, e0 + kmax * kmax / ETOK)) - 1

    # interpolate provided chi(k) onto the kout grid
    if chi_std is not None and k_std is not None:
        chi_std = np.interp(kout, k_std, chi_std)
    # pre-load FT window
    ftwin = kout**kweight * ftwindow(
        kout, xmin=kmin, xmax=kmax, window=win, dx=dk)
    # calc k-value and initial guess for y-values of spline params
    nspl = max(4, min(128, 2 * int(rbkg * (kmax - kmin) / np.pi) + 1))
    spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl)
    for i in range(nspl):
        q = kmin + i * (kmax - kmin) / (nspl - 1)
        ik = index_nearest(kraw, q)
        i1 = min(len(kraw) - 1, ik + 5)
        i2 = max(0, ik - 5)
        spl_k[i] = kraw[ik]
        spl_e[i] = energy[ik + ie0]
        spl_y[i] = (2 * mu[ik + ie0] + mu[i1 + ie0] + mu[i2 + ie0]) / 4.0

    # get spline represention: knots, coefs, order=3
    # coefs will be varied in fit.
    knots, coefs, order = splrep(spl_k, spl_y)

    # set fit parameters from initial coefficients
    params = Group()
    for i in range(len(coefs)):
        name = FMT_COEF % i
        p = Parameter(coefs[i], name=name, vary=i < len(spl_y))
        p._getval()
        setattr(params, name, p)

    initbkg, initchi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1],
                                   knots, coefs, order, kout)

    # do fit
    fit = Minimizer(__resid,
                    params,
                    _larch=_larch,
                    toler=1.e-4,
                    fcn_kws=dict(ncoefs=len(coefs),
                                 chi_std=chi_std,
                                 knots=knots,
                                 order=order,
                                 kraw=kraw[:iemax - ie0 + 1],
                                 mu=mu[ie0:iemax + 1],
                                 irbkg=irbkg,
                                 kout=kout,
                                 ftwin=ftwin,
                                 kweight=kweight,
                                 nfft=nfft,
                                 nclamp=nclamp,
                                 clamp_lo=clamp_lo,
                                 clamp_hi=clamp_hi))
    fit.leastsq()

    # write final results
    coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))]
    bkg, chi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots,
                           coefs, order, kout)
    obkg = np.copy(mu)
    obkg[ie0:ie0 + len(bkg)] = bkg

    # outputs to group
    group = set_xafsGroup(group, _larch=_larch)
    group.bkg = obkg
    group.chie = (mu - obkg) / edge_step
    group.k = kout
    group.chi = chi / edge_step

    # now fill in 'autobk_details' group
    params.init_bkg = np.copy(mu)
    params.init_bkg[ie0:ie0 + len(bkg)] = initbkg
    params.init_chi = initchi / edge_step
    params.knots_e = spl_e
    params.knots_y = np.array([coefs[i] for i in range(nspl)])
    params.init_knots_y = spl_y
    params.nfev = params.fit_details.nfev
    params.kmin = kmin
    params.kmax = kmax
    group.autobk_details = params

    # uncertainties in mu0 and chi:  fairly slow!!
    if HAS_UNCERTAIN and calc_uncertainties:
        vbest, vstd = [], []
        for n in fit.var_names:
            par = getattr(params, n)
            vbest.append(par.value)
            vstd.append(par.stderr)
        uvars = uncertainties.correlated_values(vbest, params.covar)
        # uncertainty in bkg (aka mu0)
        # note that much of this is working around
        # limitations in the uncertainty package that make it
        #  1. take an argument list (not array)
        #  2. work on returned scalars (but not arrays)
        #  3. not handle kw args and *args well (so use
        #     of global "index" is important here)
        nkx = iemax - ie0 + 1

        def my_dsplev(*args):
            coefs = np.array(args)
            return splev(kraw[:nkx], [knots, coefs, order])[index]

        fdbkg = uncertainties.wrap(my_dsplev)
        dmu0 = [fdbkg(*uvars).std_dev() for index in range(len(bkg))]
        group.delta_bkg = np.zeros(len(mu))
        group.delta_bkg[ie0:ie0 + len(bkg)] = np.array(dmu0)

        # uncertainty in chi (see notes above)
        def my_dchi(*args):
            coefs = np.array(args)
            b, chi = spline_eval(kraw[:nkx], mu[ie0:iemax + 1], knots, coefs,
                                 order, kout)
            return chi[index]

        fdchi = uncertainties.wrap(my_dchi)
        dchi = [fdchi(*uvars).std_dev() for index in range(len(kout))]
        group.delta_chi = np.array(dchi) / edge_step
Пример #36
0
def estimate_noise(k, chi=None, group=None, rmin=15.0, rmax=30.0,
                   kweight=1, kmin=0, kmax=20, dk=4, dk2=None, kstep=0.05,
                   kwindow='kaiser', nfft=2048, _larch=None, **kws):
    """
    estimate noise levels in EXAFS spectrum and estimate highest k
    where data is above the noise level
    Parameters:
    -----------
      k:        1-d array of photo-electron wavenumber in Ang^-1 (or group)
      chi:      1-d array of chi
      group:    output Group  [see Note below]
      rmin:     minimum R value for high-R region of chi(R)
      rmax:     maximum R value for high-R region of chi(R)
      kweight:  exponent for weighting spectra by k**kweight [1]
      kmin:     starting k for FT Window [0]
      kmax:     ending k for FT Window  [20]
      dk:       tapering parameter for FT Window [4]
      dk2:      second tapering parameter for FT Window [None]
      kstep:    value to use for delta_k ( Ang^-1) [0.05]
      window:   name of window type ['kaiser']
      nfft:     value to use for N_fft [2048].

    Returns:
    ---------
      None   -- outputs are written to supplied group.  Values (scalars) written
      to output group:
        epsilon_k     estimated noise in chi(k)
        epsilon_r     estimated noise in chi(R)
        kmax_suggest  highest estimated k value where |chi(k)| > epsilon_k

    Notes:
    -------

     1. This method uses the high-R portion of chi(R) as a measure of the noise
        level in the chi(R) data and uses Parseval's theorem to convert this noise
        level to that in chi(k).  This method implicitly assumes that there is no
        signal in the high-R portion of the spectrum, and that the noise in the
        spectrum s "white" (independent of R) .  Each of these assumptions can be
        questioned.
     2. The estimate for 'kmax_suggest' has a tendency to be fair but pessimistic
        in how far out the chi(k) data goes before being dominated by noise.
     3. Follows the 'First Argument Group' convention, so that you can either
        specifiy all of (an array for 'k', an array for 'chi', option output Group)
        OR pass a group with 'k' and 'chi' as the first argument
    """
    k, chi, group = parse_group_args(k, members=('k', 'chi'),
                                     defaults=(chi,), group=group,
                                     fcn_name='esitmate_noise')



    # save _sys.xafsGroup -- we want to NOT write to it here!
    savgroup = set_xafsGroup(None, _larch=_larch)
    tmpgroup = Group()
    rmax_out = min(10*pi, rmax+2)

    xftf(k, chi, kmin=kmin, kmax=kmax, rmax_out=rmax_out,
         kweight=kweight, dk=dk, dk2=dk2, kwindow=kwindow,
         nfft=nfft, kstep=kstep, group=tmpgroup, _larch=_larch)

    chir  = tmpgroup.chir
    rstep = tmpgroup.r[1] - tmpgroup.r[0]

    irmin = int(0.01 + rmin/rstep)
    irmax = min(nfft/2,  int(1.01 + rmax/rstep))
    highr = realimag(chir[irmin:irmax])

    # get average of window function value, scale eps_r scale by this
    # this is imperfect, but improves the result.
    kwin_ave = tmpgroup.kwin.sum()*kstep/(kmax-kmin)
    eps_r = sqrt((highr*highr).sum() / len(highr)) / kwin_ave

    # use Parseval's theorem to convert epsilon_r to epsilon_k,
    # compensating for kweight
    w = 2 * kweight + 1
    scale = sqrt((2*pi*w)/(kstep*(kmax**w - kmin**w)))
    eps_k = scale*eps_r

    # do reverse FT to get chiq array
    xftr(tmpgroup.r, tmpgroup.chir, group=tmpgroup, rmin=0.5, rmax=9.5,
         dr=1.0, window='parzen', nfft=nfft, kstep=kstep, _larch=_larch)

    # sets kmax_suggest to the largest k value for which
    # | chi(q) / k**kweight| > epsilon_k
    iq0 = index_of(tmpgroup.q, (kmax+kmin)/2.0)
    tst = tmpgroup.chiq_mag[iq0:] / ( tmpgroup.q[iq0:])**kweight
    kmax_suggest = tmpgroup.q[iq0 + where(tst < eps_k)[0][0]]

    # restore original _sys.xafsGroup, set output variables
    _larch.symtable._sys.xafsGroup = savgroup
    group = set_xafsGroup(group, _larch=_larch)
    group.epsilon_k = eps_k
    group.epsilon_r = eps_r
    group.kmax_suggest = kmax_suggest
Пример #37
0
def pre_edge(energy,
             mu=None,
             group=None,
             e0=None,
             step=None,
             nnorm=None,
             nvict=0,
             pre1=None,
             pre2=None,
             norm1=None,
             norm2=None,
             make_flat=True,
             _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolate the two curves to E0 and take their difference
          to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note 1)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV. If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Notes.
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. See Notes.
    make_flat: boolean (Default True) to calculate flattened output.

    Returns
    -------
      None: The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E), using polynomial
        norm_area   normalized mu(E), using integrated area
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
      1. Supports `First Argument Group` convention, requiring group members `energy` and `mu`.
      2. Support `Set XAFS Group` convention within Larch or if `_larch` is set.
      3. pre_edge: a line is fit to mu(energy)*energy**nvict over the region,
         energy=[e0+pre1, e0+pre2]. pre1 and pre2 default to None, which will set
             pre1 = e0 - 2nd energy point, rounded to 5 eV
             pre2 = roughly pre1/3.0, rounded to 5 eV
      4. post-edge: a polynomial of order nnorm is fit to mu(energy)*energy**nvict
         between energy=[e0+norm1, e0+norm2]. nnorm, norm1, norm2 default to None,
         which will set:
              norm2 = max energy - e0, rounded to 5 eV
              norm1 = roughly min(150, norm2/3.0), rounded to 5 eV
              nnorm = 2 in norm2-norm1>350, 1 if norm2-norm1>50, or 0 if less.
      5. flattening fits a quadratic curve (no matter nnorm) to the post-edge
         normalized mu(E) and subtracts that curve from it.
    """

    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='pre_edge')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    pre_dat = preedge(energy,
                      mu,
                      e0=e0,
                      step=step,
                      nnorm=nnorm,
                      nvict=nvict,
                      pre1=pre1,
                      pre2=pre2,
                      norm1=norm1,
                      norm2=norm2)

    group = set_xafsGroup(group, _larch=_larch)

    e0 = pre_dat['e0']
    norm = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1 + e0)
    p2 = index_nearest(energy, norm2 + e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2 - p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Parameters()
        ncoefs = len(pre_dat['norm_coefs'])
        fpars.add('c0', value=0, vary=True)
        fpars.add('c1', value=0, vary=(ncoefs > 1))
        fpars.add('c2', value=0, vary=(ncoefs > 2))
        fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux))
        result = fit.leastsq(xtol=1.e-6, ftol=1.e-6)

        fc0 = result.params['c0'].value
        fc1 = result.params['c1'].value
        fc2 = result.params['c2'].value

        flat_diff = fc0 + energy * (fc1 + energy * fc2)
        flat = norm - (flat_diff - flat_diff[ie0])
        flat[:ie0] = norm[:ie0]

    group.e0 = e0
    group.norm = norm
    group.norm_poly = 1.0 * norm
    group.flat = flat
    group.dmude = np.gradient(mu) / np.gradient(energy)
    group.edge_step = pre_dat['edge_step']
    group.edge_step_poly = pre_dat['edge_step']
    group.pre_edge = pre_dat['pre_edge']
    group.post_edge = pre_dat['post_edge']

    group.pre_edge_details = Group()
    for attr in ('pre1', 'pre2', 'norm1', 'norm2', 'nnorm', 'nvict'):
        setattr(group.pre_edge_details, attr, pre_dat.get(attr, None))

    group.pre_edge_details.pre_slope = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)

    # guess element and edge
    group.atsym = getattr(group, 'atsym', None)
    group.edge = getattr(group, 'edge', None)

    if group.atsym is None or group.edge is None:
        _atsym, _edge = guess_edge(group.e0)
        if group.atsym is None: group.atsym = _atsym
        if group.edge is None: group.edge = _edge
    return
Пример #38
0
def mback(energy, mu=None, group=None, z=None, edge='K', e0=None, pre1=None, pre2=-50,
          norm1=100, norm2=None, order=3, leexiang=False, tables='chantler', fit_erfc=False,
          return_f1=False, _larch=None):
    """
    Match mu(E) data for tabulated f''(E) using the MBACK algorithm and,
    optionally, the Lee & Xiang extension

    Arguments
    ----------
      energy:     array of x-ray energies, in eV.
      mu:         array of mu(E).
      group:      output group.
	  z:          atomic number of the absorber.
	  edge:       x-ray absorption edge (default 'K')
      e0:         edge energy, in eV.  If None, it will be determined here.
      pre1:       low E range (relative to e0) for pre-edge region.
      pre2:       high E range (relative to e0) for pre-edge region.
      norm1:      low E range (relative to e0) for post-edge region.
      norm2:      high E range (relative to e0) for post-edge region.
      order:      order of the legendre polynomial for normalization.
	              (default=3, min=0, max=5).
      leexiang:   boolean (default False)  to use the Lee & Xiang extension.
      tables:     tabulated scattering factors: 'chantler' (default) or 'cl' (cromer-liberman)
      fit_erfc:   boolean (default False) to fit parameters of error function.
      return_f1:  boolean (default False) to include the f1 array in the group.


    Returns
    -------
      None

    The following attributes will be written to the output group:
      group.f2:            tabulated f2(E).
      group.f1:            tabulated f1(E) (if 'return_f1' is True).
      group.fpp:           mback atched spectrum.
	  group.edge_step:     edge step of spectrum.
	  group.norm:          normalized spectrum.
      group.mback_params:  group of parameters for the minimization.

    References:
      * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711
      * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970
      * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266
      * Chantler: http://dx.doi.org/10.1063/1.555974
    """
    order = max(min(order, MAXORDER), 0)

    ### implement the First Argument Group convention
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='mback')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    group = set_xafsGroup(group, _larch=_larch)

    energy = remove_dups(energy)
    if e0 is None or e0 < energy[1] or e0 > energy[-2]:
        e0 = find_e0(energy, mu, group=group)

    print(e0)
    ie0 = index_nearest(energy, e0)
    e0 = energy[ie0]

    pre1_input = pre1
    norm2_input = norm2

    if pre1 is None:  pre1  = min(energy) - e0
    if norm2 is None: norm2 = max(energy) - e0
    if norm2 < 0:     norm2 = max(energy) - e0 - norm2
    pre1  = max(pre1,  (min(energy) - e0))
    norm2 = min(norm2, (max(energy) - e0))

    if pre1 > pre2:
        pre1, pre2 = pre2, pre1
    if norm1 > norm2:
        norm1, norm2 = norm2, norm1

    p1 = index_of(energy, pre1+e0)
    p2 = index_nearest(energy, pre2+e0)
    n1 = index_nearest(energy, norm1+e0)
    n2 = index_of(energy, norm2+e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)
    if n2 - n1 < 2:
        p2 = min(len(energy), p1 + 2)

    ## theta is a boolean array indicating the
	## energy values considered for the fit.
    ## theta=1 for included values, theta=0 for excluded values.
    theta            = np.zeros_like(energy, dtype='int')
    theta[p1:(p2+1)] = 1
    theta[n1:(n2+1)] = 1

    ## weights for the pre- and post-edge regions, as defined in the MBACK paper (?)
    weight            = np.ones_like(energy, dtype=float)
    weight[p1:(p2+1)] = np.sqrt(np.sum(weight[p1:(p2+1)]))
    weight[n1:(n2+1)] = np.sqrt(np.sum(weight[n1:(n2+1)]))

	## get the f'' function from CL or Chantler
    if tables.lower() == 'chantler':
        f1 = f1_chantler(z, energy, _larch=_larch)
        f2 = f2_chantler(z, energy, _larch=_larch)
    else:
        (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch)
    group.f2 = f2
    if return_f1:
        group.f1 = f1

    em = find_xray_line(z, edge)[0] # erfc centroid

    params = Parameters()
    params.add(name='s',  value=1.0,  vary=True)  # scale of data
    params.add(name='xi', value=50.0, vary=False, min=0) # width of erfc
    params.add(name='a',  value=0.0, vary=False)  # amplitude of erfc
    if fit_erfc:
        params['a'].vary  = True
        params['a'].value = 0.5
        params['xi'].vary  = True

    for i in range(order+1): # polynomial coefficients
        params.add(name='c%d' % i, value=0, vary=True)

    out = minimize(match_f2, params, method='leastsq',
                   gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5,
                   kws = dict(en=energy, mu=mu, f2=f2, e0=e0, em=em,
                              order=order, weight=weight, theta=theta, leexiang=leexiang))

    opars = out.params.valuesdict()
    eoff = energy - e0

    norm_function = opars['a']*erfc((energy-em)/opars['xi']) + opars['c0']
    for i in range(order):
        attr = 'c%d' % (i + 1)
        if attr in opars:
            norm_function  += opars[attr]* eoff**(i + 1)

    group.e0 = e0
    group.fpp = opars['s']*mu - norm_function
    # calculate edge step and normalization from f2 + norm_function
    pre_f2 = preedge(energy, group.f2+norm_function, e0=e0, pre1=pre1,
	         pre2=pre2, norm1=norm1, norm2=norm2, nnorm=2, nvict=0)
    group.edge_step = pre_f2['edge_step'] / opars['s']
    group.norm = (opars['s']*mu -  pre_f2['pre_edge']) / pre_f2['edge_step']
    group.mback_details = Group(params=opars, pre_f2=pre_f2,
                                f2_scaled=opars['s']*f2,
                                norm_function=norm_function)
Пример #39
0
def prepeaks_setup(energy,
                   norm=None,
                   group=None,
                   emin=None,
                   emax=None,
                   elo=None,
                   ehi=None,
                   _larch=None):
    """set up pre edge peak group.

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments:
       energy (ndarray or group): array of x-ray energies, in eV, or group (see note 1)
       norm (ndarray or None):    array of normalized mu(E)
       group (group or None):     output group
       emax (float or None):      max energy (eV) to use for baesline fit [e0-5]
       emin (float or None):      min energy (eV) to use for baesline fit [e0-40]
       elo: (float or None)       low energy of pre-edge peak region to not fit baseline [e0-20]
       ehi: (float or None)       high energy of pre-edge peak region ot not fit baseline [e0-10]
       _larch (larch instance or None):  current larch session.

    A group named `prepeaks` will be created in the output group, containing:

        ==============   ===========================================================
         attribute        meaning
        ==============   ===========================================================
         energy           energy array for pre-edge peaks = energy[emin:emax]
         norm             spectrum over pre-edge peak energies
        ==============   ===========================================================

    Notes:
        1. Supports :ref:`First Argument Group` convention, requiring group members `energy` and `norm`
        2. Supports :ref:`Set XAFS Group` convention within Larch or if `_larch` is set.
    """
    energy, norm, group = parse_group_args(energy,
                                           members=('energy', 'norm'),
                                           defaults=(norm, ),
                                           group=group,
                                           fcn_name='pre_edge_baseline')

    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(norm.shape) > 1:
        norm = norm.squeeze()

    dat_emin, dat_emax = min(energy), max(energy)
    dat_e0 = getattr(group, 'e0', -1)

    if dat_e0 > 0:
        if emin is None:
            emin = dat_e0 - 30.0
        if emax is None:
            emax = dat_e0 - 1.0
        if elo is None:
            elo = dat_e0 - 15.0
        if ehi is None:
            ehi = dat_e0 - 5.0
        if emin < 0:
            emin += dat_e0
        if elo < 0:
            elo += dat_e0
        if emax < dat_emin:
            emax += dat_e0
        if ehi < dat_emin:
            ehi += dat_e0

    if emax is None or emin is None or elo is None or ehi is None:
        raise ValueError("must provide emin and emax to prepeaks_setup")

    # get indices for input energies
    if emin > emax:
        emin, emax = emax, emin
    if emin > elo:
        elo, emin = emin, elo
    if ehi > emax:
        ehi, emax = emax, ehi

    dele = 1.e-13 + min(np.diff(energy)) / 5.0

    ilo = index_of(energy, elo + dele)
    ihi = index_of(energy, ehi + dele)
    imin = index_of(energy, emin + dele)
    imax = index_of(energy, emax + dele)

    edat = energy[imin:imax + 1]
    norm = norm[imin:imax + 1]

    if not hasattr(group, 'prepeaks'):
        group.prepeaks = Group(energy=edat,
                               norm=norm,
                               emin=emin,
                               emax=emax,
                               elo=elo,
                               ehi=ehi)
    else:
        group.prepeaks.energy = edat
        group.prepeaks.norm = norm
        group.prepeaks.emin = emin
        group.prepeaks.emax = emax
        group.prepeaks.elo = elo
        group.prepeaks.ehi = ehi

    group.prepeaks.xdat = edat
    group.prepeaks.ydat = norm
    return
Пример #40
0
def fluo_corr(energy,
              mu,
              formula,
              elem,
              group=None,
              edge='K',
              anginp=45,
              angout=45,
              _larch=None,
              **pre_kws):
    """correct over-absorption (self-absorption) for fluorescene XAFS
    using the FLUO alogrithm of D. Haskel.

    Arguments
    ---------
      energy    array of energies
      mu        uncorrected fluorescence mu
      formula   string for sample stoichiometry
      elem      atomic symbol or Z of absorbing element
      group     output group [default None]
      edge      name of edge ('K', 'L3', ...) [default 'K']
      anginp    input angle in degrees  [default 45]
      angout    output angle in degrees  [default 45]

    Additional keywords will be passed to pre_edge(), which will be used
    to ensure consistent normalization.

    Returns
    --------
       None, writes `mu_corr` and `norm_corr` (normalized `mu_corr`)
       to output group.

    Notes
    -----
       Support First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='fluo_corr')
    # gather pre-edge options
    pre_opts = {
        'e0': None,
        'nnorm': 1,
        'nvict': 0,
        'pre1': None,
        'pre2': -30,
        'norm1': 100,
        'norm2': None
    }
    if hasattr(group, 'pre_edge_details'):
        uopts = getattr(group.pre_edge_details, 'call_args', {})
        for attr in pre_opts:
            if attr in uopts:
                pre_opts[attr] = uopts[attr]
    pre_opts.update(pre_kws)
    pre_opts['step'] = None
    pre_opts['nvict'] = 0

    # generate normalized mu for correction
    preinp = preedge(energy, mu, **pre_opts)

    ang_corr = (np.sin(max(1.e-7, np.deg2rad(anginp))) /
                np.sin(max(1.e-7, np.deg2rad(angout))))

    # find edge energies and fluorescence line energy
    e_edge = xray_edge(elem, edge).energy
    e_fluor = xray_line(elem, edge).energy

    # calculate mu(E) for fluorescence energy, above, below edge

    muvals = material_mu(formula,
                         np.array([e_fluor, e_edge - 10.0, e_edge + 10.0]),
                         density=1)

    alpha = (muvals[0] * ang_corr + muvals[1]) / (muvals[2] - muvals[1])
    mu_corr = mu * alpha / (alpha + 1 - preinp['norm'])
    preout = preedge(energy, mu_corr, **pre_opts)
    if group is not None:
        if _larch is not None:
            group = set_xafsGroup(group, _larch=_larch)
        group.mu_corr = mu_corr
        group.norm_corr = preout['norm']
Пример #41
0
def pre_edge_baseline(energy,
                      norm=None,
                      group=None,
                      form='lorentzian',
                      emin=None,
                      emax=None,
                      elo=None,
                      ehi=None,
                      with_line=True,
                      _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments:
       energy (ndarray or group): array of x-ray energies, in eV, or group (see note 1)
       norm (ndarray or group):   array of normalized mu(E)
       group (group or None):     output group
       elo (float or None):       low energy of pre-edge peak region to not fit baseline [e0-20]
       ehi (float or None):       high energy of pre-edge peak region ot not fit baseline [e0-10]
       emax (float or None):      max energy (eV) to use for baesline fit [e0-5]
       emin (float or None):      min energy (eV) to use for baesline fit [e0-40]
       form (string):             form used for baseline (see note 2)  ['lorentzian']
       with_line (bool):          whether to include linear component in baseline ['True']
       _larch (larch instance or None):  current larch session.


    A function will be fit to the input mu(E) data over the range between
    [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the region
    [elo:ehi].  The baseline function is specified with the `form` keyword
    argument, which can be one of 'lorentzian', 'gaussian', or 'voigt',
    with 'lorentzian' the default.  In addition, the `with_line` keyword
    argument can be used to add a line to this baseline function.

    A group named 'prepeaks' will be used or created in the output group, containing

        ==============   ===========================================================
         attribute        meaning
        ==============   ===========================================================
         energy           energy array for pre-edge peaks = energy[emin:emax]
         energy           energy array for pre-edge peaks = energy[emin:emax]
         baseline         fitted baseline array over pre-edge peak energies
         norm             spectrum over pre-edge peak energies
         peaks            baseline-subtraced spectrum over pre-edge peak energies
         centroid         estimated centroid of pre-edge peaks (see note 3)
         peak_energies    list of predicted peak energies (see note 4)
         fit_details      details of fit to extract pre-edge peaks.
        ==============   ===========================================================

    Notes:
       1. Supports :ref:`First Argument Group` convention, requiring group members `energy` and `norm`
       2. Supports :ref:`Set XAFS Group` convention within Larch or if `_larch` is set.
       3. The value calculated for `prepeaks.centroid`  will be found as
          (prepeaks.energy*prepeaks.peaks).sum() / prepeaks.peaks.sum()
       4. The values in the `peak_energies` list will be predicted energies
          of the peaks in `prepeaks.peaks` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy,
                                           members=('energy', 'norm'),
                                           defaults=(norm, ),
                                           group=group,
                                           fcn_name='pre_edge_baseline')

    prepeaks_setup(energy,
                   norm=norm,
                   group=group,
                   emin=emin,
                   emax=emax,
                   elo=elo,
                   ehi=ehi,
                   _larch=_larch)

    emin = group.prepeaks.emin
    emax = group.prepeaks.emax
    elo = group.prepeaks.elo
    ehi = group.prepeaks.ehi

    dele = 1.e-13 + min(np.diff(energy)) / 5.0

    imin = index_of(energy, emin + dele)
    ilo = index_of(energy, elo + dele)
    ihi = index_of(energy, ehi + dele)
    imax = index_of(energy, emax + dele)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo + 1], energy[ihi:imax + 1]))
    ydat = np.concatenate((norm[imin:ilo + 1], norm[ihi:imax + 1]))

    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0,
                               sigma=2.0,
                               center=emax,
                               intercept=0,
                               slope=0)
    params['amplitude'].min = 0.0
    params['sigma'].min = 0.25
    params['sigma'].max = 50.0
    params['center'].max = emax + 25.0
    params['center'].min = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    result = model.fit(ydat, params, x=xdat)

    cen = dcen = 0.
    peak_energies = []

    # energy including pre-edge peaks, for output
    edat = energy[imin:imax + 1]
    norm = norm[imin:imax + 1]
    bline = peaks = dpeaks = norm * 0.0

    # get baseline and resulting norm over edat range
    if result is not None:
        bline = result.eval(result.params, x=edat)
        peaks = norm - bline

        # estimate centroid
        cen = (edat * peaks).sum() / peaks.sum()

        # uncertainty in norm includes only uncertainties in baseline fit
        # and uncertainty in centroid:
        try:
            dpeaks = result.eval_uncertainty(result.params, x=edat)
        except:
            dbpeaks = 0.0

        cen_plus = (edat * (peaks + dpeaks)).sum() / (peaks + dpeaks).sum()
        cen_minus = (edat * (peaks - dpeaks)).sum() / (peaks - dpeaks).sum()
        dcen = abs(cen_minus - cen_plus) / 2.0

        # locate peak positions
        if HAS_PEAKUTILS:
            peak_ids = peakutils.peak.indexes(peaks, thres=0.05, min_dist=2)
            peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat,
                           norm=norm,
                           baseline=bline,
                           peaks=peaks,
                           delta_peaks=dpeaks,
                           centroid=cen,
                           delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin,
                           emax=emax,
                           elo=elo,
                           ehi=ehi,
                           form=form,
                           with_line=with_line)
    return
Пример #42
0
def pre_edge(energy,
             mu=None,
             group=None,
             e0=None,
             step=None,
             nnorm=3,
             nvict=0,
             pre1=None,
             pre2=-50,
             norm1=100,
             norm2=None,
             make_flat=True,
             _larch=None):
    """pre edge subtraction, normalization for XAFS

    This performs a number of steps:
       1. determine E0 (if not supplied) from max of deriv(mu)
       2. fit a line of polymonial to the region below the edge
       3. fit a polymonial to the region above the edge
       4. extrapolae the two curves to E0 to determine the edge jump

    Arguments
    ----------
    energy:  array of x-ray energies, in eV, or group (see note)
    mu:      array of mu(E)
    group:   output group
    e0:      edge energy, in eV.  If None, it will be determined here.
    step:    edge jump.  If None, it will be determined here.
    pre1:    low E range (relative to E0) for pre-edge fit
    pre2:    high E range (relative to E0) for pre-edge fit
    nvict:   energy exponent to use for pre-edg fit.  See Note
    norm1:   low E range (relative to E0) for post-edge fit
    norm2:   high E range (relative to E0) for post-edge fit
    nnorm:   degree of polynomial (ie, nnorm+1 coefficients will be found) for
             post-edge normalization curve. Default=3 (quadratic), max=5
    make_flat: boolean (Default True) to calculate flattened output.


    Returns
    -------
      None

    The following attributes will be written to the output group:
        e0          energy origin
        edge_step   edge step
        norm        normalized mu(E)
        flat        flattened, normalized mu(E)
        pre_edge    determined pre-edge curve
        post_edge   determined post-edge, normalization curve
        dmude       derivative of mu(E)

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 nvict gives an exponent to the energy term for the fits to the pre-edge
       and the post-edge region.  For the pre-edge, a line (m * energy + b) is
       fit to mu(energy)*energy**nvict over the pre-edge region,
       energy=[e0+pre1, e0+pre2].  For the post-edge, a polynomial of order
       nnorm will be fit to mu(energy)*energy**nvict of the post-edge region
       energy=[e0+norm1, e0+norm2].

     2 If the first argument is a Group, it must contain 'energy' and 'mu'.
       If it exists, group.e0 will be used as e0.
       See First Argrument Group in Documentation
    """

    energy, mu, group = parse_group_args(energy,
                                         members=('energy', 'mu'),
                                         defaults=(mu, ),
                                         group=group,
                                         fcn_name='pre_edge')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(mu.shape) > 1:
        mu = mu.squeeze()

    pre_dat = preedge(energy,
                      mu,
                      e0=e0,
                      step=step,
                      nnorm=nnorm,
                      nvict=nvict,
                      pre1=pre1,
                      pre2=pre2,
                      norm1=norm1,
                      norm2=norm2)

    group = set_xafsGroup(group, _larch=_larch)

    e0 = pre_dat['e0']
    norm = pre_dat['norm']
    norm1 = pre_dat['norm1']
    norm2 = pre_dat['norm2']
    # generate flattened spectra, by fitting a quadratic to .norm
    # and removing that.
    flat = norm
    ie0 = index_nearest(energy, e0)
    p1 = index_of(energy, norm1 + e0)
    p2 = index_nearest(energy, norm2 + e0)
    if p2 - p1 < 2:
        p2 = min(len(energy), p1 + 2)

    if make_flat and p2 - p1 > 4:
        enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2])
        # enx, mux = (energy[p1:p2], norm[p1:p2])
        fpars = Parameters()
        fpars.add('c0', value=0, vary=True)
        fpars.add('c1', value=0, vary=True)
        fpars.add('c2', value=0, vary=True)
        fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux))
        result = fit.leastsq(xtol=1.e-6, ftol=1.e-6)

        fc0 = result.params['c0'].value
        fc1 = result.params['c1'].value
        fc2 = result.params['c2'].value

        flat_diff = fc0 + energy * (fc1 + energy * fc2)
        flat = norm - flat_diff + flat_diff[ie0]
        flat[:ie0] = norm[:ie0]

    group.e0 = e0
    group.norm = norm
    group.flat = flat
    group.dmude = np.gradient(mu) / np.gradient(energy)
    group.edge_step = pre_dat['edge_step']
    group.pre_edge = pre_dat['pre_edge']
    group.post_edge = pre_dat['post_edge']

    group.pre_edge_details = Group()
    group.pre_edge_details.pre1 = pre_dat['pre1']
    group.pre_edge_details.pre2 = pre_dat['pre2']
    group.pre_edge_details.nnorm = pre_dat['nnorm']
    group.pre_edge_details.norm1 = pre_dat['norm1']
    group.pre_edge_details.norm2 = pre_dat['norm2']
    group.pre_edge_details.pre1_input = pre_dat['pre1_input']
    group.pre_edge_details.norm2_input = pre_dat['norm2_input']
    group.pre_edge_details.pre_slope = pre_dat['precoefs'][0]
    group.pre_edge_details.pre_offset = pre_dat['precoefs'][1]

    for i in range(MAX_NNORM):
        if hasattr(group, 'norm_c%i' % i):
            delattr(group, 'norm_c%i' % i)
    for i, c in enumerate(pre_dat['norm_coefs']):
        setattr(group.pre_edge_details, 'norm_c%i' % i, c)
    return
Пример #43
0
def cauchy_wavelet(k,
                   chi=None,
                   group=None,
                   kweight=0,
                   rmax_out=10,
                   nfft=2048,
                   _larch=None):
    """
    Cauchy Wavelet Transform for XAFS, following work of Munoz, Argoul, and Farges

    Parameters:
    -----------
      k:        1-d array of photo-electron wavenumber in Ang^-1 or group
      chi:      1-d array of chi
      group:    output Group
      rmax_out: highest R for output data (10 Ang)
      kweight:  exponent for weighting spectra by k**kweight
      nfft:     value to use for N_fft (2048).

      Returns:
    ---------
      None   -- outputs are written to supplied group.

    Notes:
    -------
    Arrays written to output group:
    r                  uniform array of R, out to rmax_out.
    wcauchy            complex cauchy wavelet(k, R)
    wcauchy_mag        magnitude of wavelet(k, R)
    wcauchy_re         real part of wavelet(k, R)
    wcauchy_im         imaginary part of wavelet(k, R)

    Supports First Argument Group convention (with group
    member names 'k' and 'chi')

    """
    k, chi, group = parse_group_args(k,
                                     members=('k', 'chi'),
                                     defaults=(chi, ),
                                     group=group,
                                     fcn_name='cauchy_wavelet')

    kstep = np.round(1000. * (k[1] - k[0])) / 1000.0
    rstep = (np.pi / 2048) / kstep
    rmin = 1.e-7
    rmax = rmax_out
    nrpts = int(np.round((rmax - rmin) / rstep))
    nkout = len(k)
    if kweight != 0:
        chi = chi * k**kweight

    # extend EXAFS to 1024 data points...
    NFT = int(nfft / 2)
    if len(k) < NFT:
        knew = np.arange(NFT) * kstep
        xnew = np.zeros(NFT) * kstep
        xnew[:len(k)] = chi
    else:
        knew = k[:NFT]
        xnew = chi[:NFT]

    # FT parameters
    freq = (1.0 / kstep) * np.arange(nfft) / (2 * nfft)
    omega = 2 * np.pi * freq

    # simple FT calculation
    tff = np.fft.fft(xnew, n=2 * nfft)

    # scale parameter
    r = np.linspace(0, rmax, nrpts)
    r[0] = 1.e-19
    a = nrpts / (2 * r)

    # Characteristic values for Cauchy wavelet:
    cauchy_sum = np.log(2 * np.pi) - np.log(1.0 + np.arange(nrpts)).sum()

    # Main calculation:
    out = np.zeros(nkout * nrpts, dtype='complex128').reshape(nrpts, nkout)
    for i in range(nrpts):
        aom = a[i] * omega
        aom[np.where(aom == 0)] = 1.e-19
        filt = cauchy_sum + nrpts * np.log(aom) - aom
        tmp = np.conj(np.exp(filt)) * tff[:nfft]
        out[i, :] = np.fft.ifft(tmp, 2 * nfft)[:nkout]

    group = set_xafsGroup(group, _larch=_larch)
    group.r = r
    group.wcauchy = out
    group.wcauchy_mag = np.sqrt(out.real**2 + out.imag**2)
    group.wcauchy_re = out.real
    group.wcauchy_im = out.imag
Пример #44
0
def pre_edge_baseline(energy,
                      norm=None,
                      group=None,
                      form='lorentzian',
                      emin=None,
                      emax=None,
                      elo=None,
                      ehi=None,
                      with_line=True,
                      _larch=None):
    """remove baseline from main edge over pre edge peak region

    This assumes that pre_edge() has been run successfully on the spectra
    and that the spectra has decent pre-edge subtraction and normalization.

    Arguments
    ----------
    energy:    array of x-ray energies, in eV, or group (see note 1)
    norm:      array of normalized mu(E)
    group:     output group
    elo:       low energy of pre-edge peak region to not fit baseline [e0-20]
    ehi:       high energy of pre-edge peak region ot not fit baseline [e0-10]
    emax       max energy (eV) to use for baesline fit [e0-5]
    emin:      min energy (eV) to use for baesline fit [e0-40]
    form:      form used for baseline (see note 2)  ['lorentzian']
    with_line: whether to include linear component in baseline ['True']


    Returns
    -------
      None

    A group named 'prepeaks' will be created in the output group, with the following
    attributes:
        energy        energy array for pre-edge peaks = energy[emin-eneg:emax+epos]
        baseline      fitted baseline array over pre-edge peak energies
        mu            baseline-subtraced spectrum over pre-edge peak energies
        dmu           estimated uncertainty in mu from fit
        centroid      estimated centroid of pre-edge peaks (see note 3)
        peak_energies list of predicted peak energies (see note 4)
        fit_details   details of fit to extract pre-edge peaks.

    (if the output group is None, _sys.xafsGroup will be written to)

    Notes
    -----
     1 If the first argument is a Group, it must contain 'energy' and 'norm'.
       See First Argrument Group in Documentation

     2 A function will be fit to the input mu(E) data over the range between
       [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the
       region [elo:ehi].  The baseline function is specified with the `form`
       keyword argument, which can be one of
           'lorentzian', 'gaussian', or 'voigt',
       with 'lorentzian' the default.  In addition, the `with_line` keyword
       argument can be used to add a line to this baseline function.

     3 The value calculated for `prepeaks.centroid`  will be found as
         (prepeaks.energy*prepeaks.mu).sum() / prepeaks.mu.sum()
     4 The values in the `peak_energies` list will be predicted energies
       of the peaks in `prepeaks.mu` as found by peakutils.

    """
    energy, norm, group = parse_group_args(energy,
                                           members=('energy', 'norm'),
                                           defaults=(norm, ),
                                           group=group,
                                           fcn_name='pre_edge_baseline')
    if len(energy.shape) > 1:
        energy = energy.squeeze()
    if len(norm.shape) > 1:
        norm = norm.squeeze()

    dat_emin, dat_emax = min(energy), max(energy)

    dat_e0 = getattr(group, 'e0', -1)

    if dat_e0 > 0:
        if emin is None:
            emin = dat_e0 - 30.0
        if emax is None:
            emax = dat_e0 - 1.0
        if elo is None:
            elo = dat_e0 - 15.0
        if ehi is None:
            ehi = dat_e0 - 5.0
        if emin < 0:
            emin += dat_e0
        if elo < 0:
            elo += dat_e0
        if emax < dat_emin:
            emax += dat_e0
        if ehi < dat_emin:
            ehi += dat_e0

    if emax is None or emin is None or elo is None or ehi is None:
        raise ValueError("must provide emin and emax to pre_edge_baseline")

    # get indices for input energies
    if emin > emax:
        emin, emax = emax, emin
    if emin > elo:
        elo, emin = emin, elo
    if ehi > emax:
        ehi, emax = emax, ehi

    imin = index_of(energy, emin)
    ilo = index_of(energy, elo)
    ihi = index_of(energy, ehi)
    imax = index_of(energy, emax)

    # build xdat, ydat: dat to fit (skipping pre-edge peaks)
    xdat = np.concatenate((energy[imin:ilo + 1], energy[ihi:imax + 1]))
    ydat = np.concatenate((norm[imin:ilo + 1], norm[ihi:imax + 1]))

    # build fitting model: note that we always include
    # a LinearModel but may fix slope and intercept
    form = form.lower()
    if form.startswith('voig'):
        model = VoigtModel()
    elif form.startswith('gaus'):
        model = GaussianModel()
    else:
        model = LorentzianModel()

    model += LinearModel()
    params = model.make_params(amplitude=1.0,
                               sigma=2.0,
                               center=emax,
                               intercept=0,
                               slope=0)
    params['amplitude'].min = 0.0
    params['sigma'].min = 0.25
    params['sigma'].max = 50.0
    params['center'].max = emax + 25.0
    params['center'].min = emax - 25.0

    if not with_line:
        params['slope'].vary = False
        params['intercept'].vary = False

    # run fit
    result = model.fit(ydat, params, x=xdat)

    # energy including pre-edge peaks, for output
    edat = energy[imin:imax + 1]

    # get baseline and resulting mu over edat range
    bline = result.eval(result.params, x=edat)
    mu = norm[imin:imax + 1] - bline

    # uncertainty in mu includes only uncertainties in baseline fit
    dmu = result.eval_uncertainty(result.params, x=edat)

    # estimate centroid and its uncertainty
    cen = (edat * mu).sum() / mu.sum()
    cen_plus = (edat * (mu + dmu)).sum() / (mu + dmu).sum()
    cen_minus = (edat * (mu - dmu)).sum() / (mu - dmu).sum()
    dcen = abs(cen_minus - cen_plus) / 2.0

    # locate peak positions
    peak_energies = []
    if HAS_PEAKUTILS:
        peak_ids = peakutils.peak.indexes(mu, thres=0.05, min_dist=2)
        peak_energies = [edat[pid] for pid in peak_ids]

    group = set_xafsGroup(group, _larch=_larch)
    group.prepeaks = Group(energy=edat,
                           mu=mu,
                           delta_mu=dmu,
                           baseline=bline,
                           centroid=cen,
                           delta_centroid=dcen,
                           peak_energies=peak_energies,
                           fit_details=result,
                           emin=emin,
                           emax=emax,
                           elo=elo,
                           ehi=ehi,
                           form=form,
                           with_line=with_line)
    return
Пример #45
0
def fluo_corr(energy, mu, formula, elem, group=None, edge='K', anginp=45,
              angout=45, _larch=None, **pre_kws):
    """correct over-absorption (self-absorption) for fluorescene XAFS
    using the FLUO alogrithm of D. Haskel.

    Arguments
    ---------
      energy    array of energies
      mu        uncorrected fluorescence mu
      formula   string for sample stoichiometry
      elem      atomic symbol or Z of absorbing element
      group     output group [default None]
      edge      name of edge ('K', 'L3', ...) [default 'K']
      anginp    input angle in degrees  [default 45]
      angout    output angle in degrees  [default 45]

    Additional keywords will be passed to pre_edge(), which will be used
    to ensure consistent normalization.

    Returns
    --------
       None, writes `mu_corr` and `norm_corr` (normalized `mu_corr`)
       to output group.

    Notes
    -----
       Support First Argument Group convention, requiring group
       members 'energy' and 'mu'
    """
    energy, mu, group = parse_group_args(energy, members=('energy', 'mu'),
                                         defaults=(mu,), group=group,
                                         fcn_name='fluo_corr')
    # gather pre-edge options
    pre_opts = {'e0': None, 'nnorm': 1, 'nvict': 0,
                'pre1': None, 'pre2': -30,
                'norm1': 100, 'norm2': None}
    if hasattr(group, 'pre_edge_details'):
        uopts = getattr(group.pre_edge_details, 'call_args', {})
        for attr in pre_opts:
            if attr in uopts:
                pre_opts[attr] = uopts[attr]
    pre_opts.update(pre_kws)
    pre_opts['step'] = None
    pre_opts['nvict'] = 0

    # generate normalized mu for correction
    preinp   = preedge(energy, mu, **pre_opts)

    ang_corr = (np.sin(max(1.e-7, np.deg2rad(anginp))) /
                np.sin(max(1.e-7, np.deg2rad(angout))))

    # find edge energies and fluorescence line energy
    e_edge  = xray_edge(elem, edge)[0]
    e_fluor = xray_line(elem, edge)[0]

    # calculate mu(E) for fluorescence energy, above, below edge
    muvals  = material_mu(formula,
                         np.array([e_fluor, e_edge-10.0, e_edge+10.0]),
                         density=1)

    alpha   = (muvals[0]*ang_corr + muvals[1])/(muvals[2] - muvals[1])
    mu_corr = mu*alpha/(alpha + 1 - preinp['norm'])
    preout  = preedge(energy, mu_corr, **pre_opts)
    if group is not None:
        if _larch is not None:
            group = set_xafsGroup(group, _larch=_larch)
        group.mu_corr = mu_corr
        group.norm_corr = preout['norm']