def get_xranges(self, x): opts = self.read_form() dgroup = self.controller.get_group() en_eps = min(np.diff(dgroup.energy)) / 5. i1 = index_of(x, opts['emin'] + en_eps) i2 = index_of(x, opts['emax'] + en_eps) + 1 return i1, i2
def merge_groups(grouplist, master=None, xarray='energy', yarray='mu', kind='cubic', trim=True, calc_yerr=True, _larch=None): """merge arrays from a list of groups. Arguments --------- grouplist list of groups to merge master group to use for common x arrary [None -> 1st group] xarray name of x-array for merge ['energy'] yarray name of y-array for merge ['mu'] kind interpolation kind ['cubic'] trim whether to trim to the shortest energy range [True] calc_yerr whether to use the variance in the input as yerr [True] Returns -------- group with x-array and y-array containing merged data. """ if master is None: master = grouplist[0] xout = getattr(master, xarray) xmins = [min(xout)] xmaxs = [max(xout)] yvals = [] for g in grouplist: x = getattr(g, xarray) y = getattr(g, yarray) yvals.append(interp(x, y, xout, kind=kind)) xmins.append(min(x)) xmaxs.append(max(x)) yvals = np.array(yvals) yave = yvals.mean(axis=0) ystd = yvals.std(axis=0) if trim: xmin = min(xmins) xmax = min(xmaxs) ixmin = index_of(xout, xmin) ixmax = index_of(xout, xmax) xout = xout[ixmin:ixmax] yave = yave[ixmin:ixmax] ystd = ystd[ixmin:ixmax] grp = Group() setattr(grp, xarray, xout) setattr(grp, yarray, yave) setattr(grp, yarray + '_std', ystd) return grp
def onPick2Timer(self, evt=None): """checks for 'Pick 2' events, and initiates 'Pick 2' guess for a model from the selected data range """ try: plotframe = self.controller.get_display(win=1) curhist = plotframe.cursor_hist[:] plotframe.Raise() except: return if (time.time() - self.pick2_t0) > self.pick2_timeout: msg = self.pick2_group.pick2_msg.SetLabel(" ") plotframe.cursor_hist = [] self.pick2_timer.Stop() return if len(curhist) < 2: self.pick2_group.pick2_msg.SetLabel("%i/2" % (len(curhist))) return self.pick2_group.pick2_msg.SetLabel("done.") self.pick2_timer.Stop() # guess param values xcur = (curhist[0][0], curhist[1][0]) xmin, xmax = min(xcur), max(xcur) dgroup = getattr(self.larch.symtable, self.controller.groupname) x, y = dgroup.xdat, dgroup.ydat i0 = index_of(dgroup.xdat, xmin) i1 = index_of(dgroup.xdat, xmax) x, y = dgroup.xdat[i0:i1+1], dgroup.ydat[i0:i1+1] mod = self.pick2_group.mclass(prefix=self.pick2_group.prefix) parwids = self.pick2_group.parwids try: guesses = mod.guess(y, x=x) except: return for name, param in guesses.items(): if name in parwids: parwids[name].value.SetValue(param.value) dgroup._tmp = mod.eval(guesses, x=dgroup.xdat) plotframe = self.controller.get_display(win=1) plotframe.cursor_hist = [] plotframe.oplot(dgroup.xdat, dgroup._tmp) self.pick2erase_panel = plotframe.panel self.pick2erase_timer.Start(5000)
def extend_plotrange(x, y, xmin=None, xmax=None, extend=0.10): """return plot limits to extend a plot range for x, y pairs""" xeps = min(diff(x)) / 5. if xmin is None: xmin = min(x) if xmax is None: xmax = max(x) xmin = max(min(x), xmin - 5) xmax = min(max(x), xmax + 5) i0 = index_of(x, xmin + xeps) i1 = index_of(x, xmax + xeps) + 1 xspan = x[i0:i1] xrange = max(xspan) - min(xspan) yspan = y[i0:i1] yrange = max(yspan) - min(yspan) return (min(xspan) - extend * xrange, max(xspan) + extend * xrange, min(yspan) - extend * yrange, max(yspan) + extend * yrange)
def plot_prepeaks_baseline(dgroup, subtract_baseline=False, show_fitrange=True, show_peakrange=True, win=1, _larch=None, **kws): """Plot pre-edge peak baseline fit, as from `pre_edge_baseline` or XAS Viewer dgroup must have a 'prepeaks' attribute """ if not hasattr(dgroup, 'prepeaks'): raise ValueError('Group needs prepeaks') #endif ppeak = dgroup.prepeaks px0, px1, py0, py1 = extend_plotrange(dgroup.xdat, dgroup.ydat, xmin=ppeak.emin, xmax=ppeak.emax) title = "pre_edge baseline\n %s" % dgroup.filename popts = dict(xmin=px0, xmax=px1, ymin=py0, ymax=py1, title=title, xlabel='Energy (eV)', ylabel='mu (normalized)', delay_draw=True, show_legend=True, style='solid', linewidth=3, label='data', new=True, marker='None', markersize=4, win=win, _larch=_larch) popts.update(kws) ydat = dgroup.ydat xdat = dgroup.xdat if subtract_baseline: xdat = ppeak.energy ydat = ppeak.baseline popts['label'] = 'baseline subtracted peaks' _plot(xdat, ydat, **popts) else: _plot(xdat, ydat, **popts) popts['new'] = False popts['label'] = 'baseline' _oplot(ppeak.energy, ppeak.baseline, **popts) popts = dict(win=win, _larch=_larch, delay_draw=True, label='_nolegend_') if show_fitrange: for x in (ppeak.emin, ppeak.emax): _plot_axvline(x, color='#DDDDCC', **popts) _plot_axvline(ppeak.centroid, color='#EECCCC', **popts) if show_peakrange: for x in (ppeak.elo, ppeak.ehi): y = ydat[index_of(xdat, x)] _plot_marker(x, y, color='#222255', marker='o', size=8, **popts) redraw(win=win, xmin=px0, xmax=px1, ymin=py0, ymax=py1, show_legend=True, _larch=_larch)
def extend_plotrange(x, y, xmin=None, xmax=None, extend=0.10): """return plot limits to extend a plot range for x, y pairs""" xeps = min(diff(x)) / 5. if xmin is None: xmin = min(x) if xmax is None: xmax = max(x) xmin = max(min(x), xmin-5) xmax = min(max(x), xmax+5) i0 = index_of(x, xmin + xeps) i1 = index_of(x, xmax + xeps) + 1 xspan = x[i0:i1] xrange = max(xspan) - min(xspan) yspan = y[i0:i1] yrange = max(yspan) - min(yspan) return (min(xspan) - extend * xrange, max(xspan) + extend * xrange, min(yspan) - extend * yrange, max(yspan) + extend * yrange)
def preedge(energy, mu, e0=None, step=None, nnorm=None, nvict=0, pre1=None, pre2=None, norm1=None, norm2=None): """pre edge subtraction, normalization for XAFS (straight python) This performs a number of steps: 1. determine E0 (if not supplied) from max of deriv(mu) 2. fit a line to the region below the edge 3. fit a polymonial to the region above the edge 4. extrapolate the two curves to E0 and take their difference to determine the edge jump Arguments ---------- energy: array of x-ray energies, in eV mu: array of mu(E) e0: edge energy, in eV. If None, it will be determined here. step: edge jump. If None, it will be determined here. pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit nvict: energy exponent to use for pre-edg fit. See Note norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve. Default=None -- see note. Returns ------- dictionary with elements (among others) e0 energy origin in eV edge_step edge step norm normalized mu(E) pre_edge determined pre-edge curve post_edge determined post-edge, normalization curve Notes ----- 1 pre_edge: a line is fit to mu(energy)*energy**nvict over the region, energy=[e0+pre1, e0+pre2]. pre1 and pre2 default to None, which will set pre1 = e0 - 2nd energy point, rounded to 5 eV pre2 = roughly pre1/3.0, rounded to 5 eV 2 post-edge: a polynomial of order nnorm is fit to mu(energy)*energy**nvict between energy=[e0+norm1, e0+norm2]. nnorm, norm1, norm2 default to None, which will set: nnorm = 2 in norm2-norm1>350, 1 if norm2-norm1>50, or 0 if less. norm2 = max energy - e0, rounded to 5 eV norm1 = roughly min(150, norm2/3.0), rounded to 5 eV """ energy = remove_dups(energy) if e0 is None or e0 < energy[1] or e0 > energy[-2]: e0 = _finde0(energy, mu) ie0 = index_nearest(energy, e0) e0 = energy[ie0] if pre1 is None: # skip first energy point, often bad if ie0 > 20: pre1 = 5.0 * round((energy[1] - e0) / 5.0) else: pre1 = 2.0 * round((energy[1] - e0) / 2.0) pre1 = max(pre1, (min(energy) - e0)) if pre2 is None: pre2 = 5.0 * round(pre1 / 15.0) if pre1 > pre2: pre1, pre2 = pre2, pre1 if norm2 is None: norm2 = 5.0 * round((max(energy) - e0) / 5.0) if norm2 < 0: norm2 = max(energy) - e0 - norm2 norm2 = min(norm2, (max(energy) - e0)) if norm1 is None: norm1 = min(150, 5.0 * round(norm2 / 15.0)) if norm1 > norm2: norm1, norm2 = norm2, norm1 if nnorm is None: nnorm = 2 if norm2 - norm1 < 350: nnorm = 1 if norm2 - norm1 < 50: nnorm = 0 nnorm = max(min(nnorm, MAX_NNORM), 0) # preedge p1 = index_of(energy, pre1 + e0) p2 = index_nearest(energy, pre2 + e0) if p2 - p1 < 2: p2 = min(len(energy), p1 + 2) omu = mu * energy**nvict ex, mx = remove_nans2(energy[p1:p2], omu[p1:p2]) precoefs = polyfit(ex, mx, 1) pre_edge = (precoefs[0] * energy + precoefs[1]) * energy**(-nvict) # normalization p1 = index_of(energy, norm1 + e0) p2 = index_nearest(energy, norm2 + e0) if p2 - p1 < 2: p2 = min(len(energy), p1 + 2) presub = (mu - pre_edge)[p1:p2] coefs = polyfit(energy[p1:p2], presub, nnorm) post_edge = 1.0 * pre_edge norm_coefs = [] for n, c in enumerate(reversed(list(coefs))): post_edge += c * energy**(n) norm_coefs.append(c) edge_step = step if edge_step is None: edge_step = post_edge[ie0] - pre_edge[ie0] edge_step = abs(edge_step) norm = (mu - pre_edge) / edge_step return { 'e0': e0, 'edge_step': edge_step, 'norm': norm, 'pre_edge': pre_edge, 'post_edge': post_edge, 'norm_coefs': norm_coefs, 'nvict': nvict, 'nnorm': nnorm, 'norm1': norm1, 'norm2': norm2, 'pre1': pre1, 'pre2': pre2, 'precoefs': precoefs }
def rebin_xafs(energy, mu=None, group=None, e0=None, pre1=None, pre2=-30, pre_step=2, xanes_step=None, exafs1=15, exafs2=None, exafs_kstep=0.05, method='centroid', _larch=None): """rebin XAFS energy and mu to a 'standard 3 region XAFS scan' Arguments --------- energy input energy array mu input mu array group output group e0 energy reference -- all energy values are relative to this pre1 start of pre-edge region [1st energy point] pre2 end of pre-edge region, start of XANES region [-30] pre_step energy step for pre-edge region [2] xanes_step energy step for XANES region [see note] exafs1 end of XANES region, start of EXAFS region [15] exafs2 end of EXAFS region [last energy point] exafs_kstep k-step for EXAFS region [0.05] method one of 'boxcar', 'centroid' ['centroid'] Returns ------- None A group named 'rebinned' will be created in the output group, with the following attributes: energy new energy array mu mu for energy array e0 e0 copied from current group (if the output group is None, _sys.xafsGroup will be written to) Notes ------ 1 If the first argument is a Group, it must contain 'energy' and 'mu'. See First Argrument Group in Documentation 2 If xanes_step is None, it will be found from the data. If it is given, it may be increased to better fit the input energy array. 3 The EXAFS region will be spaced in k-space 4 The rebinned data is found by determining which segments of the input energy correspond to each bin in the new energy array. That is, each input energy is assigned to exactly one bin in the new array. For each new energy bin, the new value is selected from the data in the segment as either a) linear interpolation if there are fewer than 3 points in the segment. b) mean value ('boxcar') c) centroid ('centroid') """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='rebin_xafs') if e0 is None: e0 = getattr(group, 'e0', None) if e0 is None: raise ValueError("need e0") if pre1 is None: pre1 = pre_step * int((min(energy) - e0) / pre_step) if exafs2 is None: exafs2 = max(energy) - e0 # determine xanes step size: # find mean of energy difference, ignoring first/last 1% of energies npts = len(energy) n1 = max(2, int(npts / 100.0)) de_mean = np.diff(energy[n1:-n1]).mean() xanes_step_def = max(0.1, 0.05 * (1 + int(de_mean / 0.05))) if xanes_step is None: xanes_step = xanes_step_def else: xanes_step = max(xanes_step, xanes_step_def) # create new energy array from the 3 segments (pre, xanes, exafs) en = [] for start, stop, step, isk in ((pre1, pre2, pre_step, False), (pre2, exafs1, xanes_step, False), (exafs1, exafs2, exafs_kstep, True)): if isk: start = etok(start) stop = etok(stop) reg = np.linspace(start + step, stop, int(0.1 + abs(stop - start) / step)) if isk: reg = ktoe(reg) en.extend(e0 + reg) # find the segment boundaries of the old energy array bounds = [index_of(energy, e) for e in en] mu_out = [] err_out = [] j0 = 0 for i in range(len(en)): if i == len(en) - 1: j1 = len(energy) - 1 else: j1 = int((bounds[i] + bounds[i + 1] + 1) / 2.0) # if not enough points in segment, do interpolation if (j1 - j0) < 3: jx = j1 + 1 if (jx - j0) < 2: jx += 1 val = interp1d(energy[j0:jx], mu[j0:jx], en[i]) err = mu[j0:j1].std() else: if method.startswith('box'): val = mu[j0:j1].mean() else: val = (mu[j0:j1] * energy[j0:j1]).mean() / energy[j0:j1].mean() mu_out.append(val) err_out.append(mu[j0:j1].std()) j0 = j1 newname = group.__name__ + '_rebinned' group.rebinned = Group(energy=np.array(en), mu=np.array(mu_out), delta_mu=np.array(err_out), e0=e0, __name__=newname) return
def prepeaks_setup(energy, norm=None, group=None, emin=None, emax=None, elo=None, ehi=None, _larch=None): """set up pre edge peak group. This assumes that pre_edge() has been run successfully on the spectra and that the spectra has decent pre-edge subtraction and normalization. Arguments: energy (ndarray or group): array of x-ray energies, in eV, or group (see note 1) norm (ndarray or None): array of normalized mu(E) group (group or None): output group emax (float or None): max energy (eV) to use for baesline fit [e0-5] emin (float or None): min energy (eV) to use for baesline fit [e0-40] elo: (float or None) low energy of pre-edge peak region to not fit baseline [e0-20] ehi: (float or None) high energy of pre-edge peak region ot not fit baseline [e0-10] _larch (larch instance or None): current larch session. A group named `prepeaks` will be created in the output group, containing: ============== =========================================================== attribute meaning ============== =========================================================== energy energy array for pre-edge peaks = energy[emin:emax] norm spectrum over pre-edge peak energies ============== =========================================================== Notes: 1. Supports :ref:`First Argument Group` convention, requiring group members `energy` and `norm` 2. Supports :ref:`Set XAFS Group` convention within Larch or if `_larch` is set. """ energy, norm, group = parse_group_args(energy, members=('energy', 'norm'), defaults=(norm, ), group=group, fcn_name='pre_edge_baseline') if len(energy.shape) > 1: energy = energy.squeeze() if len(norm.shape) > 1: norm = norm.squeeze() dat_emin, dat_emax = min(energy), max(energy) dat_e0 = getattr(group, 'e0', -1) if dat_e0 > 0: if emin is None: emin = dat_e0 - 30.0 if emax is None: emax = dat_e0 - 1.0 if elo is None: elo = dat_e0 - 15.0 if ehi is None: ehi = dat_e0 - 5.0 if emin < 0: emin += dat_e0 if elo < 0: elo += dat_e0 if emax < dat_emin: emax += dat_e0 if ehi < dat_emin: ehi += dat_e0 if emax is None or emin is None or elo is None or ehi is None: raise ValueError("must provide emin and emax to prepeaks_setup") # get indices for input energies if emin > emax: emin, emax = emax, emin if emin > elo: elo, emin = emin, elo if ehi > emax: ehi, emax = emax, ehi dele = 1.e-13 + min(np.diff(energy)) / 5.0 ilo = index_of(energy, elo + dele) ihi = index_of(energy, ehi + dele) imin = index_of(energy, emin + dele) imax = index_of(energy, emax + dele) edat = energy[imin:imax + 1] norm = norm[imin:imax + 1] if not hasattr(group, 'prepeaks'): group.prepeaks = Group(energy=edat, norm=norm, emin=emin, emax=emax, elo=elo, ehi=ehi) else: group.prepeaks.energy = edat group.prepeaks.norm = norm group.prepeaks.emin = emin group.prepeaks.emax = emax group.prepeaks.elo = elo group.prepeaks.ehi = ehi group.prepeaks.xdat = edat group.prepeaks.ydat = norm return
def pre_edge_baseline(energy, norm=None, group=None, form='lorentzian', emin=None, emax=None, elo=None, ehi=None, with_line=True, _larch=None): """remove baseline from main edge over pre edge peak region This assumes that pre_edge() has been run successfully on the spectra and that the spectra has decent pre-edge subtraction and normalization. Arguments: energy (ndarray or group): array of x-ray energies, in eV, or group (see note 1) norm (ndarray or group): array of normalized mu(E) group (group or None): output group elo (float or None): low energy of pre-edge peak region to not fit baseline [e0-20] ehi (float or None): high energy of pre-edge peak region ot not fit baseline [e0-10] emax (float or None): max energy (eV) to use for baesline fit [e0-5] emin (float or None): min energy (eV) to use for baesline fit [e0-40] form (string): form used for baseline (see note 2) ['lorentzian'] with_line (bool): whether to include linear component in baseline ['True'] _larch (larch instance or None): current larch session. A function will be fit to the input mu(E) data over the range between [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the region [elo:ehi]. The baseline function is specified with the `form` keyword argument, which can be one of 'lorentzian', 'gaussian', or 'voigt', with 'lorentzian' the default. In addition, the `with_line` keyword argument can be used to add a line to this baseline function. A group named 'prepeaks' will be used or created in the output group, containing ============== =========================================================== attribute meaning ============== =========================================================== energy energy array for pre-edge peaks = energy[emin:emax] energy energy array for pre-edge peaks = energy[emin:emax] baseline fitted baseline array over pre-edge peak energies norm spectrum over pre-edge peak energies peaks baseline-subtraced spectrum over pre-edge peak energies centroid estimated centroid of pre-edge peaks (see note 3) peak_energies list of predicted peak energies (see note 4) fit_details details of fit to extract pre-edge peaks. ============== =========================================================== Notes: 1. Supports :ref:`First Argument Group` convention, requiring group members `energy` and `norm` 2. Supports :ref:`Set XAFS Group` convention within Larch or if `_larch` is set. 3. The value calculated for `prepeaks.centroid` will be found as (prepeaks.energy*prepeaks.peaks).sum() / prepeaks.peaks.sum() 4. The values in the `peak_energies` list will be predicted energies of the peaks in `prepeaks.peaks` as found by peakutils. """ energy, norm, group = parse_group_args(energy, members=('energy', 'norm'), defaults=(norm, ), group=group, fcn_name='pre_edge_baseline') prepeaks_setup(energy, norm=norm, group=group, emin=emin, emax=emax, elo=elo, ehi=ehi, _larch=_larch) emin = group.prepeaks.emin emax = group.prepeaks.emax elo = group.prepeaks.elo ehi = group.prepeaks.ehi dele = 1.e-13 + min(np.diff(energy)) / 5.0 imin = index_of(energy, emin + dele) ilo = index_of(energy, elo + dele) ihi = index_of(energy, ehi + dele) imax = index_of(energy, emax + dele) # build xdat, ydat: dat to fit (skipping pre-edge peaks) xdat = np.concatenate((energy[imin:ilo + 1], energy[ihi:imax + 1])) ydat = np.concatenate((norm[imin:ilo + 1], norm[ihi:imax + 1])) # build fitting model: note that we always include # a LinearModel but may fix slope and intercept form = form.lower() if form.startswith('voig'): model = VoigtModel() elif form.startswith('gaus'): model = GaussianModel() else: model = LorentzianModel() model += LinearModel() params = model.make_params(amplitude=1.0, sigma=2.0, center=emax, intercept=0, slope=0) params['amplitude'].min = 0.0 params['sigma'].min = 0.25 params['sigma'].max = 50.0 params['center'].max = emax + 25.0 params['center'].min = emax - 25.0 if not with_line: params['slope'].vary = False params['intercept'].vary = False result = model.fit(ydat, params, x=xdat) cen = dcen = 0. peak_energies = [] # energy including pre-edge peaks, for output edat = energy[imin:imax + 1] norm = norm[imin:imax + 1] bline = peaks = dpeaks = norm * 0.0 # get baseline and resulting norm over edat range if result is not None: bline = result.eval(result.params, x=edat) peaks = norm - bline # estimate centroid cen = (edat * peaks).sum() / peaks.sum() # uncertainty in norm includes only uncertainties in baseline fit # and uncertainty in centroid: try: dpeaks = result.eval_uncertainty(result.params, x=edat) except: dbpeaks = 0.0 cen_plus = (edat * (peaks + dpeaks)).sum() / (peaks + dpeaks).sum() cen_minus = (edat * (peaks - dpeaks)).sum() / (peaks - dpeaks).sum() dcen = abs(cen_minus - cen_plus) / 2.0 # locate peak positions if HAS_PEAKUTILS: peak_ids = peakutils.peak.indexes(peaks, thres=0.05, min_dist=2) peak_energies = [edat[pid] for pid in peak_ids] group = set_xafsGroup(group, _larch=_larch) group.prepeaks = Group(energy=edat, norm=norm, baseline=bline, peaks=peaks, delta_peaks=dpeaks, centroid=cen, delta_centroid=dcen, peak_energies=peak_energies, fit_details=result, emin=emin, emax=emax, elo=elo, ehi=ehi, form=form, with_line=with_line) return
def get_xlims(x, xmin, xmax): xeps = min(np.diff(x))/ 5. i1 = index_of(x, xmin + xeps) i2 = index_of(x, xmax + xeps) + 1 return i1, i2
def pre_edge(energy, mu=None, group=None, e0=None, step=None, nnorm=None, nvict=0, pre1=None, pre2=None, norm1=None, norm2=None, make_flat=True, _larch=None): """pre edge subtraction, normalization for XAFS This performs a number of steps: 1. determine E0 (if not supplied) from max of deriv(mu) 2. fit a line of polymonial to the region below the edge 3. fit a polymonial to the region above the edge 4. extrapolate the two curves to E0 and take their difference to determine the edge jump Arguments ---------- energy: array of x-ray energies, in eV, or group (see note 1) mu: array of mu(E) group: output group e0: edge energy, in eV. If None, it will be determined here. step: edge jump. If None, it will be determined here. pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit nvict: energy exponent to use for pre-edg fit. See Notes. norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve. See Notes. make_flat: boolean (Default True) to calculate flattened output. Returns ------- None: The following attributes will be written to the output group: e0 energy origin edge_step edge step norm normalized mu(E), using polynomial norm_area normalized mu(E), using integrated area flat flattened, normalized mu(E) pre_edge determined pre-edge curve post_edge determined post-edge, normalization curve dmude derivative of mu(E) (if the output group is None, _sys.xafsGroup will be written to) Notes ----- 1. Supports `First Argument Group` convention, requiring group members `energy` and `mu`. 2. Support `Set XAFS Group` convention within Larch or if `_larch` is set. 3. pre_edge: a line is fit to mu(energy)*energy**nvict over the region, energy=[e0+pre1, e0+pre2]. pre1 and pre2 default to None, which will set pre1 = e0 - 2nd energy point, rounded to 5 eV pre2 = roughly pre1/3.0, rounded to 5 eV 4. post-edge: a polynomial of order nnorm is fit to mu(energy)*energy**nvict between energy=[e0+norm1, e0+norm2]. nnorm, norm1, norm2 default to None, which will set: norm2 = max energy - e0, rounded to 5 eV norm1 = roughly min(150, norm2/3.0), rounded to 5 eV nnorm = 2 in norm2-norm1>350, 1 if norm2-norm1>50, or 0 if less. 5. flattening fits a quadratic curve (no matter nnorm) to the post-edge normalized mu(E) and subtracts that curve from it. """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='pre_edge') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm, nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2) group = set_xafsGroup(group, _larch=_larch) e0 = pre_dat['e0'] norm = pre_dat['norm'] norm1 = pre_dat['norm1'] norm2 = pre_dat['norm2'] # generate flattened spectra, by fitting a quadratic to .norm # and removing that. flat = norm ie0 = index_nearest(energy, e0) p1 = index_of(energy, norm1 + e0) p2 = index_nearest(energy, norm2 + e0) if p2 - p1 < 2: p2 = min(len(energy), p1 + 2) if make_flat and p2 - p1 > 4: enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2]) # enx, mux = (energy[p1:p2], norm[p1:p2]) fpars = Parameters() ncoefs = len(pre_dat['norm_coefs']) fpars.add('c0', value=0, vary=True) fpars.add('c1', value=0, vary=(ncoefs > 1)) fpars.add('c2', value=0, vary=(ncoefs > 2)) fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux)) result = fit.leastsq(xtol=1.e-6, ftol=1.e-6) fc0 = result.params['c0'].value fc1 = result.params['c1'].value fc2 = result.params['c2'].value flat_diff = fc0 + energy * (fc1 + energy * fc2) flat = norm - (flat_diff - flat_diff[ie0]) flat[:ie0] = norm[:ie0] group.e0 = e0 group.norm = norm group.norm_poly = 1.0 * norm group.flat = flat group.dmude = np.gradient(mu) / np.gradient(energy) group.edge_step = pre_dat['edge_step'] group.edge_step_poly = pre_dat['edge_step'] group.pre_edge = pre_dat['pre_edge'] group.post_edge = pre_dat['post_edge'] group.pre_edge_details = Group() for attr in ('pre1', 'pre2', 'norm1', 'norm2', 'nnorm', 'nvict'): setattr(group.pre_edge_details, attr, pre_dat.get(attr, None)) group.pre_edge_details.pre_slope = pre_dat['precoefs'][0] group.pre_edge_details.pre_offset = pre_dat['precoefs'][1] for i in range(MAX_NNORM): if hasattr(group, 'norm_c%i' % i): delattr(group, 'norm_c%i' % i) for i, c in enumerate(pre_dat['norm_coefs']): setattr(group.pre_edge_details, 'norm_c%i' % i, c) # guess element and edge group.atsym = getattr(group, 'atsym', None) group.edge = getattr(group, 'edge', None) if group.atsym is None or group.edge is None: _atsym, _edge = guess_edge(group.e0) if group.atsym is None: group.atsym = _atsym if group.edge is None: group.edge = _edge return
def mback_norm(energy, mu=None, group=None, z=None, edge='K', e0=None, pre1=None, pre2=None, norm1=None, norm2=None, nnorm=None, nvict=1, _larch=None): """ simplified version of MBACK to Match mu(E) data for tabulated f''(E) for normalization Arguments: energy, mu: arrays of energy and mu(E) group: output group (and input group for e0) z: Z number of absorber e0: edge energy pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve fit to the scaled f2. Default=1 (linear) Returns: group.norm_poly: normalized mu(E) from pre_edge() group.norm: normalized mu(E) from this method group.mback_mu: tabulated f2 scaled and pre_edge added to match mu(E) group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Chantler: http://dx.doi.org/10.1063/1.555974 """ ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() if _larch is not None: group = set_xafsGroup(group, _larch=_larch) group.norm_poly = group.norm*1.0 if z is not None: # need to run find_e0: e0_nominal = xray_edge(z, edge).energy if e0 is None: e0 = getattr(group, 'e0', None) if e0 is None: find_e0(energy, mu, group=group) e0 = group.e0 atsym = None if z is None or z < 2: atsym, edge = guess_edge(group.e0) z = atomic_number(atsym) if atsym is None and z is not None: atsym = atomic_symbol(z) if getattr(group, 'pre_edge_details', None) is None: # pre_edge never run preedge(energy, mu, pre1=pre1, pre2=pre2, nvict=nvict, norm1=norm1, norm2=norm2, e0=e0, nnorm=nnorm) mu_pre = mu - group.pre_edge f2 = f2_chantler(z, energy) weights = np.ones(len(energy))*1.0 if norm2 is None: norm2 = max(energy) - e0 if norm2 < 0: norm2 = max(energy) - e0 - norm2 # avoid l2 and higher edges if edge.lower().startswith('l'): if edge.lower() == 'l3': e_l2 = xray_edge(z, 'L2').energy norm2 = min(norm2, e_l2-e0) elif edge.lower() == 'l2': e_l2 = xray_edge(z, 'L1').energy norm2 = min(norm2, e_l1-e0) ipre2 = index_of(energy, e0+pre2) inor1 = index_of(energy, e0+norm1) inor2 = index_of(energy, e0+norm2) + 1 weights[ipre2:] = 0.0 weights[inor1:inor2] = np.linspace(0.1, 1.0, inor2-inor1) params = Parameters() params.add(name='slope', value=0.0, vary=True) params.add(name='offset', value=-f2[0], vary=True) params.add(name='scale', value=f2[-1], vary=True) out = minimize(f2norm, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws = dict(en=energy, mu=mu_pre, f2=f2, weights=weights)) p = out.params.valuesdict() model = (p['offset'] + p['slope']*energy + f2) * p['scale'] group.mback_mu = model + group.pre_edge pre_f2 = preedge(energy, model, nnorm=nnorm, nvict=nvict, e0=e0, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2) step_new = pre_f2['edge_step'] group.edge_step_poly = group.edge_step group.edge_step_mback = step_new group.norm_mback = mu_pre / step_new group.mback_params = Group(e0=e0, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2, nnorm=nnorm, fit_params=p, fit_weights=weights, model=model, f2=f2, pre_f2=pre_f2, atsym=atsym, edge=edge) if (abs(step_new - group.edge_step)/(1.e-13+group.edge_step)) > 0.75: print("Warning: mback edge step failed....") else: group.edge_step = step_new group.norm = group.norm_mback
def get_plot_arrays(self, dgroup): lab = plotlabels.norm if dgroup is None: return dgroup.plot_y2label = None dgroup.plot_xlabel = plotlabels.energy dgroup.plot_yarrays = [('norm', PLOTOPTS_1, lab)] if dgroup.datatype != 'xas': pchoice = PlotOne_Choices_nonxas[ self.plotone_op.GetStringSelection()] dgroup.plot_xlabel = 'x' dgroup.plot_ylabel = 'y' dgroup.plot_yarrays = [('ydat', PLOTOPTS_1, 'ydat')] dgroup.dmude = np.gradient(dgroup.ydat) / np.gradient(dgroup.xdat) if not hasattr(dgroup, 'scale'): dgroup.scale = 1.0 dgroup.norm = dgroup.ydat * dgroup.scale if pchoice == 'dmude': dgroup.plot_ylabel = 'dy/dx' dgroup.plot_yarrays = [('dmude', PLOTOPTS_1, 'dy/dx')] elif pchoice == 'norm': dgroup.plot_ylabel = 'scaled y' dgroup.plot_yarrays = [('norm', PLOTOPTS_1, 'y/scale')] elif pchoice == 'norm+dnormde': lab = plotlabels.norm dgroup.plot_y2label = 'dy/dx' dgroup.plot_yarrays = [('ydat', PLOTOPTS_1, 'y'), ('dnormde', PLOTOPTS_D, 'dy/dx')] return req_attrs = ['e0', 'norm', 'dmude', 'pre_edge'] pchoice = PlotOne_Choices[self.plotone_op.GetStringSelection()] if pchoice in ('mu', 'norm', 'flat', 'dmude'): lab = getattr(plotlabels, pchoice) dgroup.plot_yarrays = [(pchoice, PLOTOPTS_1, lab)] elif pchoice == 'prelines': dgroup.plot_yarrays = [('mu', PLOTOPTS_1, plotlabels.mu), ('pre_edge', PLOTOPTS_2, 'pre edge'), ('post_edge', PLOTOPTS_2, 'post edge')] elif pchoice == 'preedge': lab = r'pre-edge subtracted $\mu$' dgroup.pre_edge_sub = dgroup.norm * dgroup.edge_step dgroup.plot_yarrays = [('pre_edge_sub', PLOTOPTS_1, lab)] elif pchoice == 'mu+dmude': lab = plotlabels.mu lab2 = plotlabels.dmude dgroup.plot_yarrays = [('mu', PLOTOPTS_1, lab), ('dmude', PLOTOPTS_D, lab2)] dgroup.plot_y2label = lab2 elif pchoice == 'norm+dnormde': lab = plotlabels.norm lab2 = plotlabels.dmude + ' (normalized)' dgroup.plot_yarrays = [('norm', PLOTOPTS_1, lab), ('dnormde', PLOTOPTS_D, lab2)] dgroup.plot_y2label = lab2 elif pchoice == 'mback_norm': req_attrs.append('mback_norm') lab = r'$\mu$' if not hasattr(dgroup, 'mback_mu'): self.process(dgroup=dgroup, force_mback=True) dgroup.plot_yarrays = [('mu', PLOTOPTS_1, lab), ('mback_mu', PLOTOPTS_2, r'tabulated $\mu(E)$')] elif pchoice == 'mback_poly': req_attrs.append('mback_norm') lab = plotlabels.norm if not hasattr(dgroup, 'mback_mu'): self.process(dgroup=dgroup, force_mback=True) dgroup.plot_yarrays = [('norm_mback', PLOTOPTS_1, 'mback'), ('norm_poly', PLOTOPTS_2, 'polynomial')] elif pchoice == 'area_norm': dgroup.plot_yarrays = [('norm_area', PLOTOPTS_1, 'area'), ('norm_poly', PLOTOPTS_2, 'polynomial')] dgroup.plot_ylabel = lab needs_proc = False for attr in req_attrs: needs_proc = needs_proc or (not hasattr(dgroup, attr)) if needs_proc: self.process(dgroup=dgroup, noskip=True) y4e0 = dgroup.ydat = getattr(dgroup, dgroup.plot_yarrays[0][0], dgroup.mu) dgroup.plot_extras = [] if self.wids['showe0'].IsChecked(): ie0 = index_of(dgroup.energy, dgroup.e0) dgroup.plot_extras.append(('marker', dgroup.e0, y4e0[ie0], {}))
def prepeaks_setup(energy, norm=None, group=None, emin=None, emax=None, elo=None, ehi=None, _larch=None): """set up pre edge peak group This assumes that pre_edge() has been run successfully on the spectra and that the spectra has decent pre-edge subtraction and normalization. Arguments ---------- energy: array of x-ray energies, in eV, or group (see note 1) norm: array of normalized mu(E) group: output group emax: max energy (eV) to use for baesline fit [e0-5] emin: min energy (eV) to use for baesline fit [e0-40] elo: low energy of pre-edge peak region to not fit baseline [e0-20] ehi: high energy of pre-edge peak region ot not fit baseline [e0-10] Returns ------- None A group named 'prepeaks' will be created in the output group, with the following attributes: energy energy array for pre-edge peaks = energy[emin:emax] norm spectrum over pre-edge peak energies Notes ----- 1 If the first argument is a Group, it must contain 'energy' and 'norm'. See First Argrument Group in Documentation """ energy, norm, group = parse_group_args(energy, members=('energy', 'norm'), defaults=(norm,), group=group, fcn_name='pre_edge_baseline') if len(energy.shape) > 1: energy = energy.squeeze() if len(norm.shape) > 1: norm = norm.squeeze() dat_emin, dat_emax = min(energy), max(energy) dat_e0 = getattr(group, 'e0', -1) if dat_e0 > 0: if emin is None: emin = dat_e0 - 30.0 if emax is None: emax = dat_e0 - 1.0 if elo is None: elo = dat_e0 - 15.0 if ehi is None: ehi = dat_e0 - 5.0 if emin < 0: emin += dat_e0 if elo < 0: elo += dat_e0 if emax < dat_emin: emax += dat_e0 if ehi < dat_emin: ehi += dat_e0 if emax is None or emin is None or elo is None or ehi is None: raise ValueError("must provide emin and emax to prepeaks_setup") # get indices for input energies if emin > emax: emin, emax = emax, emin if emin > elo: elo, emin = emin, elo if ehi > emax: ehi, emax = emax, ehi dele = 1.e-13 + min(np.diff(energy))/5.0 ilo = index_of(energy, elo+dele) ihi = index_of(energy, ehi+dele) imin = index_of(energy, emin+dele) imax = index_of(energy, emax+dele) edat = energy[imin: imax+1] norm = norm[imin:imax+1] if not hasattr(group, 'prepeaks'): group.prepeaks = Group(energy=edat, norm=norm, emin=emin, emax=emax, elo=elo, ehi=ehi) else: group.prepeaks.energy = edat group.prepeaks.norm = norm group.prepeaks.emin = emin group.prepeaks.emax = emax group.prepeaks.elo = elo group.prepeaks.ehi = ehi group.prepeaks.xdat = edat group.prepeaks.ydat = norm return
def preedge(energy, mu, e0=None, step=None, nnorm=None, nvict=0, pre1=None, pre2=-50, norm1=100, norm2=None): """pre edge subtraction, normalization for XAFS (straight python) This performs a number of steps: 1. determine E0 (if not supplied) from max of deriv(mu) 2. fit a line of polymonial to the region below the edge 3. fit a polymonial to the region above the edge 4. extrapolae the two curves to E0 to determine the edge jump Arguments ---------- energy: array of x-ray energies, in eV mu: array of mu(E) e0: edge energy, in eV. If None, it will be determined here. step: edge jump. If None, it will be determined here. pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit nvict: energy exponent to use for pre-edg fit. See Note norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve. Default=None -- see note. Returns ------- dictionary with elements (among others) e0 energy origin in eV edge_step edge step norm normalized mu(E) pre_edge determined pre-edge curve post_edge determined post-edge, normalization curve Notes ----- 1 nvict gives an exponent to the energy term for the fits to the pre-edge and the post-edge region. For the pre-edge, a line (m * energy + b) is fit to mu(energy)*energy**nvict over the pre-edge region, energy=[e0+pre1, e0+pre2]. For the post-edge, a polynomial of order nnorm will be fit to mu(energy)*energy**nvict of the post-edge region energy=[e0+norm1, e0+norm2]. 2 nnorm will default to 2 in norm2-norm1>300, to 1 if 100>norm2-norm1>300, and to 0 in norm2-norm1<100. """ energy = remove_dups(energy) if e0 is None or e0 < energy[1] or e0 > energy[-2]: e0 = _finde0(energy, mu) ie0 = index_nearest(energy, e0) e0 = energy[ie0] pre1_input = pre1 norm2_input = norm2 if pre1 is None: pre1 = min(energy) - e0 if norm2 is None: norm2 = max(energy) - e0 if norm2 < 0: norm2 = max(energy) - e0 - norm2 pre1 = max(pre1, (min(energy) - e0)) norm2 = min(norm2, (max(energy) - e0)) if pre1 > pre2: pre1, pre2 = pre2, pre1 if norm1 > norm2: norm1, norm2 = norm2, norm1 p1 = index_of(energy, pre1+e0) p2 = index_nearest(energy, pre2+e0) if p2-p1 < 2: p2 = min(len(energy), p1 + 2) omu = mu*energy**nvict ex, mx = remove_nans2(energy[p1:p2], omu[p1:p2]) precoefs = polyfit(ex, mx, 1) pre_edge = (precoefs[0] * energy + precoefs[1]) * energy**(-nvict) # normalization p1 = index_of(energy, norm1+e0) p2 = index_nearest(energy, norm2+e0) if p2-p1 < 2: p2 = min(len(energy), p1 + 2) if nnorm is None: nnorm = 0 if norm2-norm1 > 100: nnorm = 1 if norm2-norm1 > 400: nnorm = 2 nnorm = max(min(nnorm, MAX_NNORM), 0) presub = (mu-pre_edge)[p1:p2] coefs = polyfit(energy[p1:p2], presub, nnorm) post_edge = 1.0*pre_edge norm_coefs = [] for n, c in enumerate(reversed(list(coefs))): post_edge += c * energy**(n) norm_coefs.append(c) edge_step = step if edge_step is None: edge_step = post_edge[ie0] - pre_edge[ie0] norm = (mu - pre_edge)/edge_step return {'e0': e0, 'edge_step': edge_step, 'norm': norm, 'pre_edge': pre_edge, 'post_edge': post_edge, 'norm_coefs': norm_coefs, 'nvict': nvict, 'nnorm': nnorm, 'norm1': norm1, 'norm2': norm2, 'pre1': pre1, 'pre2': pre2, 'precoefs': precoefs, 'norm2_input': norm2_input, 'pre1_input': pre1_input}
def plot_mu(dgroup, show_norm=False, show_deriv=False, show_pre=False, show_post=False, show_e0=False, with_deriv=False, emin=None, emax=None, label='mu', new=True, delay_draw=False, offset=0, title=None, win=1, _larch=None): """ plot_mu(dgroup, norm=False, deriv=False, show_pre=False, show_post=False, show_e0=False, show_deriv=False, emin=None, emax=None, label=None, new=True, win=1) Plot mu(E) for an XAFS data group in various forms Arguments ---------- dgroup group of XAFS data after pre_edge() results (see Note 1) show_norm bool whether to show normalized data [False] show_deriv bool whether to show derivative of XAFS data [False] show_pre bool whether to show pre-edge curve [False] show_post bool whether to show post-edge curve [False] show_e0 bool whether to show E0 [False] with_deriv bool whether to show deriv together with mu [False] emin min energy to show, absolute or relative to E0 [None, start of data] emax max energy to show, absolute or relative to E0 [None, end of data] label string for label [None: 'mu', `dmu/dE', or 'mu norm'] title string for plot titlel [None, may use filename if available] new bool whether to start a new plot [True] delay_draw bool whether to delay draw until more traces are added [False] offset vertical offset to use for y-array [0] win integer plot window to use [1] Notes ----- 1. The input data group must have the following attributes: energy, mu, norm, e0, pre_edge, edge_step """ if hasattr(dgroup, 'mu'): mu = dgroup.mu elif hasattr(dgroup, 'mutrans'): mu = dgroup.mutrans elif hasattr(dgroup, 'mufluor'): mu = dgroup.mufluor else: raise ValueError("XAFS data group has no array for mu") #endif ylabel = plotlabels.mu if label is None: label = 'mu' #endif if show_deriv: mu = gradient(mu)/gradient(dgroup.energy) ylabel = plotlabels.dmude dlabel = '%s (deriv)' % label elif show_norm: mu = dgroup.norm ylabel = "%s (norm)" % ylabel dlabel = "%s (norm)" % label #endif emin, emax = _get_erange(dgroup, emin, emax) title = _get_title(dgroup, title=title) opts = dict(win=win, show_legend=True, linewidth=3, title=title, xmin=emin, xmax=emax, delay_draw=True, _larch=_larch) _plot(dgroup.energy, mu+offset, xlabel=plotlabels.energy, ylabel=ylabel, label=label, zorder=20, new=new, **opts) if with_deriv: dmu = gradient(mu)/gradient(dgroup.energy) _plot(dgroup.energy, dmu+offset, ylabel=plotlabels.dmude, label='%s (deriv)' % label, zorder=18, side='right', **opts) #endif if (not show_norm and not show_deriv): if show_pre: _plot(dgroup.energy, dgroup.pre_edge+offset, label='pre_edge', zorder=18, **opts) #endif if show_post: _plot(dgroup.energy, dgroup.post_edge+offset, label='post_edge', zorder=18, **opts) if show_pre: i = index_of(dgroup.energy, dgroup.e0) ypre = dgroup.pre_edge[i] ypost = dgroup.post_edge[i] _plot_arrow(dgroup.e0, ypre, dgroup.e0+offset, ypost, color=plotlabels.e0color, width=0.25, head_width=0, zorder=3, win=win, _larch=_larch) #endif #endif #endif if show_e0: _plot_axvline(dgroup.e0, zorder=2, size=3, label='E0', color=plotlabels.e0color, win=win, _larch=_larch) disp = _getDisplay(win=win, _larch=_larch) if disp is not None: disp.panel.conf.draw_legend() redraw(win=win, xmin=emin, xmax=emax, _larch=_larch)
def estimate_noise(k, chi=None, group=None, rmin=15.0, rmax=30.0, kweight=1, kmin=0, kmax=20, dk=4, dk2=None, kstep=0.05, kwindow='kaiser', nfft=2048, _larch=None, **kws): """ estimate noise levels in EXAFS spectrum and estimate highest k where data is above the noise level Parameters: ----------- k: 1-d array of photo-electron wavenumber in Ang^-1 (or group) chi: 1-d array of chi group: output Group [see Note below] rmin: minimum R value for high-R region of chi(R) rmax: maximum R value for high-R region of chi(R) kweight: exponent for weighting spectra by k**kweight [1] kmin: starting k for FT Window [0] kmax: ending k for FT Window [20] dk: tapering parameter for FT Window [4] dk2: second tapering parameter for FT Window [None] kstep: value to use for delta_k ( Ang^-1) [0.05] window: name of window type ['kaiser'] nfft: value to use for N_fft [2048]. Returns: --------- None -- outputs are written to supplied group. Values (scalars) written to output group: epsilon_k estimated noise in chi(k) epsilon_r estimated noise in chi(R) kmax_suggest highest estimated k value where |chi(k)| > epsilon_k Notes: ------- 1. This method uses the high-R portion of chi(R) as a measure of the noise level in the chi(R) data and uses Parseval's theorem to convert this noise level to that in chi(k). This method implicitly assumes that there is no signal in the high-R portion of the spectrum, and that the noise in the spectrum s "white" (independent of R) . Each of these assumptions can be questioned. 2. The estimate for 'kmax_suggest' has a tendency to be fair but pessimistic in how far out the chi(k) data goes before being dominated by noise. 3. Follows the 'First Argument Group' convention, so that you can either specifiy all of (an array for 'k', an array for 'chi', option output Group) OR pass a group with 'k' and 'chi' as the first argument """ k, chi, group = parse_group_args(k, members=('k', 'chi'), defaults=(chi, ), group=group, fcn_name='esitmate_noise') # save _sys.xafsGroup -- we want to NOT write to it here! savgroup = set_xafsGroup(None, _larch=_larch) tmpgroup = Group() rmax_out = min(10 * pi, rmax + 2) xftf(k, chi, kmin=kmin, kmax=kmax, rmax_out=rmax_out, kweight=kweight, dk=dk, dk2=dk2, kwindow=kwindow, nfft=nfft, kstep=kstep, group=tmpgroup, _larch=_larch) chir = tmpgroup.chir rstep = tmpgroup.r[1] - tmpgroup.r[0] irmin = int(0.01 + rmin / rstep) irmax = min(nfft / 2, int(1.01 + rmax / rstep)) highr = realimag(chir[irmin:irmax]) # get average of window function value, scale eps_r scale by this # this is imperfect, but improves the result. kwin_ave = tmpgroup.kwin.sum() * kstep / (kmax - kmin) eps_r = sqrt((highr * highr).sum() / len(highr)) / kwin_ave # use Parseval's theorem to convert epsilon_r to epsilon_k, # compensating for kweight w = 2 * kweight + 1 scale = sqrt((2 * pi * w) / (kstep * (kmax**w - kmin**w))) eps_k = scale * eps_r # do reverse FT to get chiq array xftr(tmpgroup.r, tmpgroup.chir, group=tmpgroup, rmin=0.5, rmax=9.5, dr=1.0, window='parzen', nfft=nfft, kstep=kstep, _larch=_larch) # sets kmax_suggest to the largest k value for which # | chi(q) / k**kweight| > epsilon_k iq0 = index_of(tmpgroup.q, (kmax + kmin) / 2.0) tst = tmpgroup.chiq_mag[iq0:] / (tmpgroup.q[iq0:])**kweight kmax_suggest = tmpgroup.q[iq0 + where(tst < eps_k)[0][0]] # restore original _sys.xafsGroup, set output variables _larch.symtable._sys.xafsGroup = savgroup group = set_xafsGroup(group, _larch=_larch) group.epsilon_k = eps_k group.epsilon_r = eps_r group.kmax_suggest = kmax_suggest
def mback(energy, mu=None, group=None, z=None, edge='K', e0=None, pre1=None, pre2=-50, norm1=100, norm2=None, order=3, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments ---------- energy: array of x-ray energies, in eV. mu: array of mu(E). group: output group. z: atomic number of the absorber. edge: x-ray absorption edge (default 'K') e0: edge energy, in eV. If None, it will be determined here. pre1: low E range (relative to e0) for pre-edge region. pre2: high E range (relative to e0) for pre-edge region. norm1: low E range (relative to e0) for post-edge region. norm2: high E range (relative to e0) for post-edge region. order: order of the legendre polynomial for normalization. (default=3, min=0, max=5). leexiang: boolean (default False) to use the Lee & Xiang extension. tables: tabulated scattering factors: 'chantler' [deprecated] fit_erfc: boolean (default False) to fit parameters of error function. return_f1: boolean (default False) to include the f1 array in the group. Returns ------- None The following attributes will be written to the output group: group.f2: tabulated f2(E). group.f1: tabulated f1(E) (if 'return_f1' is True). group.fpp: mback atched spectrum. group.edge_step: edge step of spectrum. group.norm: normalized spectrum. group.mback_params: group of parameters for the minimization. Notes: Chantler tables is now used, with Cromer-Liberman no longer supported. References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = max(min(order, MAXORDER), 0) ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() if _larch is not None: group = set_xafsGroup(group, _larch=_larch) energy = remove_dups(energy) if e0 is None or e0 < energy[1] or e0 > energy[-2]: e0 = find_e0(energy, mu, group=group) print(e0) ie0 = index_nearest(energy, e0) e0 = energy[ie0] pre1_input = pre1 norm2_input = norm2 if pre1 is None: pre1 = min(energy) - e0 if norm2 is None: norm2 = max(energy) - e0 if norm2 < 0: norm2 = max(energy) - e0 - norm2 pre1 = max(pre1, (min(energy) - e0)) norm2 = min(norm2, (max(energy) - e0)) if pre1 > pre2: pre1, pre2 = pre2, pre1 if norm1 > norm2: norm1, norm2 = norm2, norm1 p1 = index_of(energy, pre1+e0) p2 = index_nearest(energy, pre2+e0) n1 = index_nearest(energy, norm1+e0) n2 = index_of(energy, norm2+e0) if p2 - p1 < 2: p2 = min(len(energy), p1 + 2) if n2 - n1 < 2: p2 = min(len(energy), p1 + 2) ## theta is a boolean array indicating the ## energy values considered for the fit. ## theta=1 for included values, theta=0 for excluded values. theta = np.zeros_like(energy, dtype='int') theta[p1:(p2+1)] = 1 theta[n1:(n2+1)] = 1 ## weights for the pre- and post-edge regions, as defined in the MBACK paper (?) weight = np.ones_like(energy, dtype=float) weight[p1:(p2+1)] = np.sqrt(np.sum(weight[p1:(p2+1)])) weight[n1:(n2+1)] = np.sqrt(np.sum(weight[n1:(n2+1)])) ## get the f'' function from CL or Chantler f1 = f1_chantler(z, energy) f2 = f2_chantler(z, energy) group.f2 = f2 if return_f1: group.f1 = f1 em = find_xray_line(z, edge).energy # erfc centroid params = Parameters() params.add(name='s', value=1.0, vary=True) # scale of data params.add(name='xi', value=50.0, vary=False, min=0) # width of erfc params.add(name='a', value=0.0, vary=False) # amplitude of erfc if fit_erfc: params['a'].vary = True params['a'].value = 0.5 params['xi'].vary = True for i in range(order+1): # polynomial coefficients params.add(name='c%d' % i, value=0, vary=True) out = minimize(match_f2, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws = dict(en=energy, mu=mu, f2=f2, e0=e0, em=em, order=order, weight=weight, theta=theta, leexiang=leexiang)) opars = out.params.valuesdict() eoff = energy - e0 norm_function = opars['a']*erfc((energy-em)/opars['xi']) + opars['c0'] for i in range(order): attr = 'c%d' % (i + 1) if attr in opars: norm_function += opars[attr]* eoff**(i + 1) group.e0 = e0 group.fpp = opars['s']*mu - norm_function # calculate edge step and normalization from f2 + norm_function pre_f2 = preedge(energy, group.f2+norm_function, e0=e0, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2, nnorm=2, nvict=0) group.edge_step = pre_f2['edge_step'] / opars['s'] group.norm = (opars['s']*mu - pre_f2['pre_edge']) / pre_f2['edge_step'] group.mback_details = Group(params=opars, pre_f2=pre_f2, f2_scaled=opars['s']*f2, norm_function=norm_function)
def mback_norm(energy, mu=None, group=None, z=None, edge='K', e0=None, pre1=None, pre2=-50, norm1=100, norm2=None, nnorm=1, nvict=1, _larch=None): """ simplified version of MBACK to Match mu(E) data for tabulated f''(E) for normalization Arguments: energy, mu: arrays of energy and mu(E) group: output group (and input group for e0) z: Z number of absorber e0: edge energy pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve fit to the scaled f2. Default=1 (linear) Returns: group.norm_poly: normalized mu(E) from pre_edge() group.norm: normalized mu(E) from this method group.mback_mu: tabulated f2 scaled and pre_edge added to match mu(E) group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Chantler: http://dx.doi.org/10.1063/1.555974 """ ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() if _larch is not None: group = set_xafsGroup(group, _larch=_larch) group.norm_poly = group.norm*1.0 if z is not None: # need to run find_e0: e0_nominal = xray_edge(z, edge)[0] if e0 is None: e0 = getattr(group, 'e0', None) if e0 is None: find_e0(energy, mu, group=group) e0 = group.e0 atsym = None if z is None or z < 2: atsym, edge = guess_edge(group.e0) z = atomic_number(atsym) if atsym is None and z is not None: atsym = atomic_symbol(z) if getattr(group, 'pre_edge_details', None) is None: # pre_edge never run preedge(energy, mu, pre1=pre1, pre2=pre2, nvict=nvict, norm1=norm1, norm2=norm2, e0=e0, nnorm=nnorm) mu_pre = mu - group.pre_edge f2 = f2_chantler(z, energy) weights = np.ones(len(energy))*1.0 if norm2 is None: norm2 = max(energy) - e0 if norm2 < 0: norm2 = max(energy) - e0 - norm2 # avoid l2 and higher edges if edge.lower().startswith('l'): if edge.lower() == 'l3': e_l2 = xray_edge(z, 'L2').edge norm2 = min(norm2, e_l2-e0) elif edge.lower() == 'l2': e_l2 = xray_edge(z, 'L1').edge norm2 = min(norm2, e_l1-e0) ipre2 = index_of(energy, e0+pre2) inor1 = index_of(energy, e0+norm1) inor2 = index_of(energy, e0+norm2) + 1 weights[ipre2:] = 0.0 weights[inor1:inor2] = np.linspace(0.1, 1.0, inor2-inor1) params = Parameters() params.add(name='slope', value=0.0, vary=True) params.add(name='offset', value=-f2[0], vary=True) params.add(name='scale', value=f2[-1], vary=True) out = minimize(f2norm, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws = dict(en=energy, mu=mu_pre, f2=f2, weights=weights)) p = out.params.valuesdict() model = (p['offset'] + p['slope']*energy + f2) * p['scale'] group.mback_mu = model + group.pre_edge pre_f2 = preedge(energy, model, nnorm=nnorm, nvict=nvict, e0=e0, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2) step_new = pre_f2['edge_step'] group.edge_step_poly = group.edge_step group.edge_step_mback = step_new group.norm_mback = mu_pre / step_new group.mback_params = Group(e0=e0, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2, nnorm=nnorm, fit_params=p, fit_weights=weights, model=model, f2=f2, pre_f2=pre_f2, atsym=atsym, edge=edge) if (abs(step_new - group.edge_step)/(1.e-13+group.edge_step)) > 0.75: print("Warning: mback edge step failed....") else: group.edge_step = step_new group.norm = group.norm_mback
def plot_mu(dgroup, show_norm=False, show_deriv=False, show_pre=False, show_post=False, show_e0=False, with_deriv=False, emin=None, emax=None, label='mu', new=True, delay_draw=False, offset=0, title=None, win=1, _larch=None): """ plot_mu(dgroup, norm=False, deriv=False, show_pre=False, show_post=False, show_e0=False, show_deriv=False, emin=None, emax=None, label=None, new=True, win=1) Plot mu(E) for an XAFS data group in various forms Arguments ---------- dgroup group of XAFS data after pre_edge() results (see Note 1) show_norm bool whether to show normalized data [False] show_deriv bool whether to show derivative of XAFS data [False] show_pre bool whether to show pre-edge curve [False] show_post bool whether to show post-edge curve [False] show_e0 bool whether to show E0 [False] with_deriv bool whether to show deriv together with mu [False] emin min energy to show, absolute or relative to E0 [None, start of data] emax max energy to show, absolute or relative to E0 [None, end of data] label string for label [None: 'mu', `dmu/dE', or 'mu norm'] title string for plot titlel [None, may use filename if available] new bool whether to start a new plot [True] delay_draw bool whether to delay draw until more traces are added [False] offset vertical offset to use for y-array [0] win integer plot window to use [1] Notes ----- 1. The input data group must have the following attributes: energy, mu, norm, e0, pre_edge, edge_step """ if hasattr(dgroup, 'mu'): mu = dgroup.mu elif hasattr(dgroup, 'mutrans'): mu = dgroup.mutrans elif hasattr(dgroup, 'mufluor'): mu = dgroup.mufluor else: raise ValueError("XAFS data group has no array for mu") #endif ylabel = plotlabels.mu if label is None: label = 'mu' #endif if show_deriv: mu = gradient(mu) / gradient(dgroup.energy) ylabel = plotlabels.dmude dlabel = '%s (deriv)' % label elif show_norm: mu = dgroup.norm ylabel = "%s (norm)" % ylabel dlabel = "%s (norm)" % label #endif emin, emax = _get_erange(dgroup, emin, emax) title = _get_title(dgroup, title=title) opts = dict(win=win, show_legend=True, linewidth=3, title=title, xmin=emin, xmax=emax, delay_draw=True, _larch=_larch) _plot(dgroup.energy, mu + offset, xlabel=plotlabels.energy, ylabel=ylabel, label=label, zorder=20, new=new, **opts) if with_deriv: dmu = gradient(mu) / gradient(dgroup.energy) _plot(dgroup.energy, dmu + offset, ylabel=plotlabels.dmude, label='%s (deriv)' % label, zorder=18, side='right', **opts) #endif if (not show_norm and not show_deriv): if show_pre: _plot(dgroup.energy, dgroup.pre_edge + offset, label='pre_edge', zorder=18, **opts) #endif if show_post: _plot(dgroup.energy, dgroup.post_edge + offset, label='post_edge', zorder=18, **opts) if show_pre: i = index_of(dgroup.energy, dgroup.e0) ypre = dgroup.pre_edge[i] ypost = dgroup.post_edge[i] _plot_arrow(dgroup.e0, ypre, dgroup.e0 + offset, ypost, color=plotlabels.e0color, width=0.25, head_width=0, zorder=3, win=win, _larch=_larch) #endif #endif #endif if show_e0: _plot_axvline(dgroup.e0, zorder=2, size=3, label='E0', color=plotlabels.e0color, win=win, _larch=_larch) disp = _getDisplay(win=win, _larch=_larch) if disp is not None: disp.panel.conf.draw_legend() redraw(win=win, xmin=emin, xmax=emax, _larch=_larch)
def mback(energy, mu=None, group=None, z=None, edge='K', e0=None, pre1=None, pre2=-50, norm1=100, norm2=None, order=3, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments ---------- energy: array of x-ray energies, in eV. mu: array of mu(E). group: output group. z: atomic number of the absorber. edge: x-ray absorption edge (default 'K') e0: edge energy, in eV. If None, it will be determined here. pre1: low E range (relative to e0) for pre-edge region. pre2: high E range (relative to e0) for pre-edge region. norm1: low E range (relative to e0) for post-edge region. norm2: high E range (relative to e0) for post-edge region. order: order of the legendre polynomial for normalization. (default=3, min=0, max=5). leexiang: boolean (default False) to use the Lee & Xiang extension. tables: tabulated scattering factors: 'chantler' (default) or 'cl' (cromer-liberman) fit_erfc: boolean (default False) to fit parameters of error function. return_f1: boolean (default False) to include the f1 array in the group. Returns ------- None The following attributes will be written to the output group: group.f2: tabulated f2(E). group.f1: tabulated f1(E) (if 'return_f1' is True). group.fpp: mback atched spectrum. group.edge_step: edge step of spectrum. group.norm: normalized spectrum. group.mback_params: group of parameters for the minimization. References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = max(min(order, MAXORDER), 0) ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() if _larch is not None: group = set_xafsGroup(group, _larch=_larch) energy = remove_dups(energy) if e0 is None or e0 < energy[1] or e0 > energy[-2]: e0 = find_e0(energy, mu, group=group) print(e0) ie0 = index_nearest(energy, e0) e0 = energy[ie0] pre1_input = pre1 norm2_input = norm2 if pre1 is None: pre1 = min(energy) - e0 if norm2 is None: norm2 = max(energy) - e0 if norm2 < 0: norm2 = max(energy) - e0 - norm2 pre1 = max(pre1, (min(energy) - e0)) norm2 = min(norm2, (max(energy) - e0)) if pre1 > pre2: pre1, pre2 = pre2, pre1 if norm1 > norm2: norm1, norm2 = norm2, norm1 p1 = index_of(energy, pre1+e0) p2 = index_nearest(energy, pre2+e0) n1 = index_nearest(energy, norm1+e0) n2 = index_of(energy, norm2+e0) if p2 - p1 < 2: p2 = min(len(energy), p1 + 2) if n2 - n1 < 2: p2 = min(len(energy), p1 + 2) ## theta is a boolean array indicating the ## energy values considered for the fit. ## theta=1 for included values, theta=0 for excluded values. theta = np.zeros_like(energy, dtype='int') theta[p1:(p2+1)] = 1 theta[n1:(n2+1)] = 1 ## weights for the pre- and post-edge regions, as defined in the MBACK paper (?) weight = np.ones_like(energy, dtype=float) weight[p1:(p2+1)] = np.sqrt(np.sum(weight[p1:(p2+1)])) weight[n1:(n2+1)] = np.sqrt(np.sum(weight[n1:(n2+1)])) ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy) f2 = f2_chantler(z, energy) else: (f1, f2) = f1f2_cl(z, energy, edge=edge) group.f2 = f2 if return_f1: group.f1 = f1 em = find_xray_line(z, edge)[0] # erfc centroid params = Parameters() params.add(name='s', value=1.0, vary=True) # scale of data params.add(name='xi', value=50.0, vary=False, min=0) # width of erfc params.add(name='a', value=0.0, vary=False) # amplitude of erfc if fit_erfc: params['a'].vary = True params['a'].value = 0.5 params['xi'].vary = True for i in range(order+1): # polynomial coefficients params.add(name='c%d' % i, value=0, vary=True) out = minimize(match_f2, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws = dict(en=energy, mu=mu, f2=f2, e0=e0, em=em, order=order, weight=weight, theta=theta, leexiang=leexiang)) opars = out.params.valuesdict() eoff = energy - e0 norm_function = opars['a']*erfc((energy-em)/opars['xi']) + opars['c0'] for i in range(order): attr = 'c%d' % (i + 1) if attr in opars: norm_function += opars[attr]* eoff**(i + 1) group.e0 = e0 group.fpp = opars['s']*mu - norm_function # calculate edge step and normalization from f2 + norm_function pre_f2 = preedge(energy, group.f2+norm_function, e0=e0, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2, nnorm=2, nvict=0) group.edge_step = pre_f2['edge_step'] / opars['s'] group.norm = (opars['s']*mu - pre_f2['pre_edge']) / pre_f2['edge_step'] group.mback_details = Group(params=opars, pre_f2=pre_f2, f2_scaled=opars['s']*f2, norm_function=norm_function)
def pre_edge(energy, mu=None, group=None, e0=None, step=None, nnorm=None, nvict=0, pre1=None, pre2=-50, norm1=100, norm2=None, make_flat=True, emin_area=None, _larch=None): """pre edge subtraction, normalization for XAFS This performs a number of steps: 1. determine E0 (if not supplied) from max of deriv(mu) 2. fit a line of polymonial to the region below the edge 3. fit a polymonial to the region above the edge 4. extrapolae the two curves to E0 to determine the edge jump 5. estimate area from emin_area to norm2, to get norm_area Arguments ---------- energy: array of x-ray energies, in eV, or group (see note) mu: array of mu(E) group: output group e0: edge energy, in eV. If None, it will be determined here. step: edge jump. If None, it will be determined here. pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit nvict: energy exponent to use for pre-edg fit. See Note norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve. Default=None (see note) make_flat: boolean (Default True) to calculate flattened output. emin_area: energy threshold for area normalization (see note) Returns ------- None The following attributes will be written to the output group: e0 energy origin edge_step edge step norm normalized mu(E), using polynomial norm_area normalized mu(E), using integrated area flat flattened, normalized mu(E) pre_edge determined pre-edge curve post_edge determined post-edge, normalization curve dmude derivative of mu(E) (if the output group is None, _sys.xafsGroup will be written to) Notes ----- 1 If the first argument is a Group, it must contain 'energy' and 'mu'. If it exists, group.e0 will be used as e0. See First Argrument Group in Documentation 2 nvict gives an exponent to the energy term for the fits to the pre-edge and the post-edge region. For the pre-edge, a line (m * energy + b) is fit to mu(energy)*energy**nvict over the pre-edge region, energy=[e0+pre1, e0+pre2]. For the post-edge, a polynomial of order nnorm will be fit to mu(energy)*energy**nvict of the post-edge region energy=[e0+norm1, e0+norm2]. 3 nnorm will default to 2 in norm2-norm1>400, to 1 if 100>norm2-norm1>300, and to 0 in norm2-norm1<100. 4 norm_area will be estimated so that the area between emin_area and norm2 is equal to (norm2-emin_area). By default emin_area will be set to the *nominal* edge energy for the element and edge - 3*core_level_width """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='pre_edge') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm, nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2) group = set_xafsGroup(group, _larch=_larch) e0 = pre_dat['e0'] norm = pre_dat['norm'] norm1 = pre_dat['norm1'] norm2 = pre_dat['norm2'] # generate flattened spectra, by fitting a quadratic to .norm # and removing that. flat = norm ie0 = index_nearest(energy, e0) p1 = index_of(energy, norm1+e0) p2 = index_nearest(energy, norm2+e0) if p2-p1 < 2: p2 = min(len(energy), p1 + 2) if make_flat and p2-p1 > 4: enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2]) # enx, mux = (energy[p1:p2], norm[p1:p2]) fpars = Parameters() ncoefs = len(pre_dat['norm_coefs']) fpars.add('c0', value=0, vary=True) fpars.add('c1', value=0, vary=(ncoefs>1)) fpars.add('c2', value=0, vary=(ncoefs>2)) fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux)) result = fit.leastsq(xtol=1.e-6, ftol=1.e-6) fc0 = result.params['c0'].value fc1 = result.params['c1'].value fc2 = result.params['c2'].value flat_diff = fc0 + energy * (fc1 + energy * fc2) flat = norm - (flat_diff - flat_diff[ie0]) flat[:ie0] = norm[:ie0] group.e0 = e0 group.norm = norm group.norm_poly = 1.0*norm group.flat = flat group.dmude = np.gradient(mu)/np.gradient(energy) group.edge_step = pre_dat['edge_step'] group.edge_step_poly = pre_dat['edge_step'] group.pre_edge = pre_dat['pre_edge'] group.post_edge = pre_dat['post_edge'] group.pre_edge_details = Group() group.pre_edge_details.pre1 = pre_dat['pre1'] group.pre_edge_details.pre2 = pre_dat['pre2'] group.pre_edge_details.nnorm = pre_dat['nnorm'] group.pre_edge_details.norm1 = pre_dat['norm1'] group.pre_edge_details.norm2 = pre_dat['norm2'] group.pre_edge_details.nvict = pre_dat['nvict'] group.pre_edge_details.pre1_input = pre_dat['pre1_input'] group.pre_edge_details.norm2_input = pre_dat['norm2_input'] group.pre_edge_details.pre_slope = pre_dat['precoefs'][0] group.pre_edge_details.pre_offset = pre_dat['precoefs'][1] for i in range(MAX_NNORM): if hasattr(group, 'norm_c%i' % i): delattr(group, 'norm_c%i' % i) for i, c in enumerate(pre_dat['norm_coefs']): setattr(group.pre_edge_details, 'norm_c%i' % i, c) # guess element and edge group.atsym = getattr(group, 'atsym', None) group.edge = getattr(group, 'edge', None) if group.atsym is None or group.edge is None: _atsym, _edge = guess_edge(group.e0, _larch=_larch) if group.atsym is None: group.atsym = _atsym if group.edge is None: group.edge = _edge # calcuate area-normalization if emin_area is None: emin_area = (xray_edge(group.atsym, group.edge).edge - 2*core_width(group.atsym, group.edge)) i1 = index_of(energy, emin_area) i2 = index_of(energy, e0+norm2) en = energy[i1:i2] area_step = max(1.e-15, simps(norm[i1:i2], en) / en.ptp()) group.edge_step_area = group.edge_step_poly * area_step group.norm_area = norm/area_step group.pre_edge_details.emin_area = emin_area return
def rebin_xafs(energy, mu=None, group=None, e0=None, pre1=None, pre2=-30, pre_step=2, xanes_step=None, exafs1=15, exafs2=None, exafs_kstep=0.05, method='centroid', _larch=None): """rebin XAFS energy and mu to a 'standard 3 region XAFS scan' Arguments --------- energy input energy array mu input mu array group output group e0 energy reference -- all energy values are relative to this pre1 start of pre-edge region [1st energy point] pre2 end of pre-edge region, start of XANES region [-30] pre_step energy step for pre-edge region [2] xanes_step energy step for XANES region [see note] exafs1 end of XANES region, start of EXAFS region [15] exafs2 end of EXAFS region [last energy point] exafs_kstep k-step for EXAFS region [0.05] method one of 'boxcar', 'centroid' ['centroid'] Returns ------- None A group named 'rebinned' will be created in the output group, with the following attributes: energy new energy array mu mu for energy array e0 e0 copied from current group (if the output group is None, _sys.xafsGroup will be written to) Notes ------ 1 If the first argument is a Group, it must contain 'energy' and 'mu'. See First Argrument Group in Documentation 2 If xanes_step is None, it will be found from the data. If it is given, it may be increased to better fit the input energy array. 3 The EXAFS region will be spaced in k-space 4 The rebinned data is found by determining which segments of the input energy correspond to each bin in the new energy array. That is, each input energy is assigned to exactly one bin in the new array. For each new energy bin, the new value is selected from the data in the segment as either a) linear interpolation if there are fewer than 3 points in the segment. b) mean value ('boxcar') c) centroid ('centroid') """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='rebin_xafs') if e0 is None: e0 = getattr(group, 'e0', None) if e0 is None: raise ValueError("need e0") if pre1 is None: pre1 = pre_step*int((min(energy) - e0)/pre_step) if exafs2 is None: exafs2 = max(energy) - e0 # determine xanes step size: # find mean of energy difference, ignoring first/last 1% of energies npts = len(energy) n1 = max(2, int(npts/100.0)) de_mean = np.diff(energy[n1:-n1]).mean() xanes_step_def = max(0.1, 0.05 * (1 + int(de_mean/0.05))) if xanes_step is None: xanes_step = xanes_step_def else: xanes_step = max(xanes_step, xanes_step_def) # create new energy array from the 3 segments (pre, xanes, exafs) en = [] for start, stop, step, isk in ((pre1, pre2, pre_step, False), (pre2, exafs1, xanes_step, False), (exafs1, exafs2, exafs_kstep, True)): if isk: start = etok(start) stop = etok(stop) reg = np.linspace(start+step, stop, int(0.1 + abs(stop-start)/step)) if isk: reg = ktoe(reg) en.extend(e0 + reg) # find the segment boundaries of the old energy array bounds = [index_of(energy, e) for e in en] mu_out = [] err_out = [] j0 = 0 for i in range(len(en)): if i == len(en) - 1: j1 = len(energy) - 1 else: j1 = int((bounds[i] + bounds[i+1] + 1)/2.0) # if not enough points in segment, do interpolation if (j1 - j0) < 3: jx = j1 + 1 if (jx - j0) < 2: jx += 1 val = interp1d(energy[j0:jx], mu[j0:jx], en[i]) err = mu[j0:j1].std() else: if method.startswith('box'): val = mu[j0:j1].mean() else: val = (mu[j0:j1]*energy[j0:j1]).mean()/energy[j0:j1].mean() mu_out.append(val) err_out.append(mu[j0:j1].std()) j0 = j1 newname = group.__name__ + '_rebinned' group.rebinned = Group(energy=np.array(en), mu=np.array(mu_out), delta_mu=np.array(err_out), e0=e0, __name__=newname) return
def pre_edge_baseline(energy, norm=None, group=None, form='lorentzian', emin=None, emax=None, elo=None, ehi=None, with_line=True, _larch=None): """remove baseline from main edge over pre edge peak region This assumes that pre_edge() has been run successfully on the spectra and that the spectra has decent pre-edge subtraction and normalization. Arguments ---------- energy: array of x-ray energies, in eV, or group (see note 1) norm: array of normalized mu(E) group: output group elo: low energy of pre-edge peak region to not fit baseline [e0-20] ehi: high energy of pre-edge peak region ot not fit baseline [e0-10] emax: max energy (eV) to use for baesline fit [e0-5] emin: min energy (eV) to use for baesline fit [e0-40] form: form used for baseline (see note 2) ['lorentzian'] with_line: whether to include linear component in baseline ['True'] Returns ------- None A group named 'prepeaks' will be created in the output group, with the following attributes: energy energy array for pre-edge peaks = energy[emin:emax] baseline fitted baseline array over pre-edge peak energies norm spectrum over pre-edge peak energies peaks baseline-subtraced spectrum over pre-edge peak energies centroid estimated centroid of pre-edge peaks (see note 3) peak_energies list of predicted peak energies (see note 4) fit_details details of fit to extract pre-edge peaks. (if the output group is None, _sys.xafsGroup will be written to) Notes ----- 1 If the first argument is a Group, it must contain 'energy' and 'norm'. See First Argrument Group in Documentation 2 A function will be fit to the input mu(E) data over the range between [emin:elo] and [ehi:emax], ignorng the pre-edge peaks in the region [elo:ehi]. The baseline function is specified with the `form` keyword argument, which can be one of 'lorentzian', 'gaussian', or 'voigt', with 'lorentzian' the default. In addition, the `with_line` keyword argument can be used to add a line to this baseline function. 3 The value calculated for `prepeaks.centroid` will be found as (prepeaks.energy*prepeaks.peaks).sum() / prepeaks.peaks.sum() 4 The values in the `peak_energies` list will be predicted energies of the peaks in `prepeaks.peaks` as found by peakutils. """ energy, norm, group = parse_group_args(energy, members=('energy', 'norm'), defaults=(norm,), group=group, fcn_name='pre_edge_baseline') prepeaks_setup(energy, norm=norm, group=group, emin=emin, emax=emax, elo=elo, ehi=ehi, _larch=_larch) emin = group.prepeaks.emin emax = group.prepeaks.emax elo = group.prepeaks.elo ehi = group.prepeaks.ehi dele = 1.e-13 + min(np.diff(energy))/5.0 imin = index_of(energy, emin+dele) ilo = index_of(energy, elo+dele) ihi = index_of(energy, ehi+dele) imax = index_of(energy, emax+dele) # build xdat, ydat: dat to fit (skipping pre-edge peaks) xdat = np.concatenate((energy[imin:ilo+1], energy[ihi:imax+1])) ydat = np.concatenate((norm[imin:ilo+1], norm[ihi:imax+1])) # build fitting model: note that we always include # a LinearModel but may fix slope and intercept form = form.lower() if form.startswith('voig'): model = VoigtModel() elif form.startswith('gaus'): model = GaussianModel() else: model = LorentzianModel() model += LinearModel() params = model.make_params(amplitude=1.0, sigma=2.0, center=emax, intercept=0, slope=0) params['amplitude'].min = 0.0 params['sigma'].min = 0.25 params['sigma'].max = 50.0 params['center'].max = emax + 25.0 params['center'].min = emax - 25.0 if not with_line: params['slope'].vary = False params['intercept'].vary = False result = model.fit(ydat, params, x=xdat) cen = dcen = 0. peak_energies = [] # energy including pre-edge peaks, for output edat = energy[imin: imax+1] norm = norm[imin:imax+1] bline = peaks = dpeaks = norm*0.0 # get baseline and resulting norm over edat range if result is not None: bline = result.eval(result.params, x=edat) peaks = norm-bline # estimate centroid cen = (edat*peaks).sum() / peaks.sum() # uncertainty in norm includes only uncertainties in baseline fit # and uncertainty in centroid: try: dpeaks = result.eval_uncertainty(result.params, x=edat) except: dbpeaks = 0.0 cen_plus = (edat*(peaks+dpeaks)).sum()/ (peaks+dpeaks).sum() cen_minus = (edat*(peaks-dpeaks)).sum()/ (peaks-dpeaks).sum() dcen = abs(cen_minus - cen_plus) / 2.0 # locate peak positions if HAS_PEAKUTILS: peak_ids = peakutils.peak.indexes(peaks, thres=0.05, min_dist=2) peak_energies = [edat[pid] for pid in peak_ids] group = set_xafsGroup(group, _larch=_larch) group.prepeaks = Group(energy=edat, norm=norm, baseline=bline, peaks=peaks, delta_peaks=dpeaks, centroid=cen, delta_centroid=dcen, peak_energies=peak_energies, fit_details=result, emin=emin, emax=emax, elo=elo, ehi=ehi, form=form, with_line=with_line) return
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None, edge_step=None, kmin=0, kmax=None, kweight=1, dk=0.1, win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05, pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1, calc_uncertainties=True, err_sigma=1, _larch=None, **kws): """Use Autobk algorithm to remove XAFS background Parameters: ----------- energy: 1-d array of x-ray energies, in eV, or group mu: 1-d array of mu(E) group: output group (and input group for e0 and edge_step). rbkg: distance (in Ang) for chi(R) above which the signal is ignored. Default = 1. e0: edge energy, in eV. If None, it will be determined. edge_step: edge step. If None, it will be determined. pre_edge_kws: keyword arguments to pass to pre_edge() nknots: number of knots in spline. If None, it will be determined. kmin: minimum k value [0] kmax: maximum k value [full data range]. kweight: k weight for FFT. [1] dk: FFT window window parameter. [0.1] win: FFT window function name. ['hanning'] nfft: array size to use for FFT [2048] kstep: k step size to use for FFT [0.05] k_std: optional k array for standard chi(k). chi_std: optional chi array for standard chi(k). nclamp: number of energy end-points for clamp [2] clamp_lo: weight of low-energy clamp [1] clamp_hi: weight of high-energy clamp [1] calc_uncertaintites: Flag to calculate uncertainties in mu_0(E) and chi(k) [True] err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1] Output arrays are written to the provided group. Follows the 'First Argument Group' convention. """ msg = sys.stdout if _larch is not None: msg = _larch.writer.write if 'kw' in kws: kweight = kws.pop('kw') if len(kws) > 0: msg('Unrecognized a:rguments for autobk():\n') msg(' %s\n' % (', '.join(kws.keys()))) return energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='autobk') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() energy = remove_dups(energy) # if e0 or edge_step are not specified, get them, either from the # passed-in group or from running pre_edge() group = set_xafsGroup(group, _larch=_larch) if edge_step is None and isgroup(group, 'edge_step'): edge_step = group.edge_step if e0 is None and isgroup(group, 'e0'): e0 = group.e0 if e0 is None or edge_step is None: # need to run pre_edge: pre_kws = dict(nnorm=3, nvict=0, pre1=None, pre2=-50., norm1=100., norm2=None) if pre_edge_kws is not None: pre_kws.update(pre_edge_kws) pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws) if e0 is None: e0 = group.e0 if edge_step is None: edge_step = group.edge_step if e0 is None or edge_step is None: msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n') return # get array indices for rkbg and e0: irbkg, ie0 ie0 = index_of(energy, e0) rgrid = np.pi/(kstep*nfft) if rbkg < 2*rgrid: rbkg = 2*rgrid irbkg = int(1.01 + rbkg/rgrid) # save ungridded k (kraw) and grided k (kout) # and ftwin (*k-weighting) for FT in residual enpe = energy[ie0:] - e0 kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe)) if kmax is None: kmax = max(kraw) else: kmax = max(0, min(max(kraw), kmax)) kout = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64') iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1 # interpolate provided chi(k) onto the kout grid if chi_std is not None and k_std is not None: chi_std = np.interp(kout, k_std, chi_std) # pre-load FT window ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax, window=win, dx=dk, dx2=dk) # calc k-value and initial guess for y-values of spline params nspl = max(5, min(64, int(2*rbkg*(kmax-kmin)/np.pi) + 2)) spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl) for i in range(nspl): q = kmin + i*(kmax-kmin)/(nspl - 1) ik = index_nearest(kraw, q) i1 = min(len(kraw)-1, ik + 5) i2 = max(0, ik - 5) spl_k[i] = kraw[ik] spl_e[i] = energy[ik+ie0] spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0 # get spline represention: knots, coefs, order=3 # coefs will be varied in fit. knots, coefs, order = splrep(spl_k, spl_y) # set fit parameters from initial coefficients params = Parameters() for i in range(len(coefs)): params.add(name = FMT_COEF % i, value=coefs[i], vary=i<len(spl_y)) initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1], knots, coefs, order, kout) # do fit result = minimize(__resid, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws = dict(ncoefs=len(coefs), chi_std=chi_std, knots=knots, order=order, kraw=kraw[:iemax-ie0+1], mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout, ftwin=ftwin, kweight=kweight, nfft=nfft, nclamp=nclamp, clamp_lo=clamp_lo, clamp_hi=clamp_hi)) # write final results coefs = [result.params[FMT_COEF % i].value for i in range(len(coefs))] bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1], knots, coefs, order, kout) obkg = np.copy(mu) obkg[ie0:ie0+len(bkg)] = bkg # outputs to group group = set_xafsGroup(group, _larch=_larch) group.bkg = obkg group.chie = (mu-obkg)/edge_step group.k = kout group.chi = chi/edge_step group.e0 = e0 # now fill in 'autobk_details' group details = Group(params=result.params) details.init_bkg = np.copy(mu) details.init_bkg[ie0:ie0+len(bkg)] = initbkg details.init_chi = initchi/edge_step details.knots_e = spl_e details.knots_y = np.array([coefs[i] for i in range(nspl)]) details.init_knots_y = spl_y details.nfev = result.nfev details.kmin = kmin details.kmax = kmax group.autobk_details = details # uncertainties in mu0 and chi: can be fairly slow. if calc_uncertainties: nchi = len(chi) nmue = iemax-ie0 + 1 redchi = result.redchi covar = result.covar / redchi jac_chi = np.zeros(nchi*nspl).reshape((nspl, nchi)) jac_bkg = np.zeros(nmue*nspl).reshape((nspl, nmue)) cvals, cerrs = [], [] for i in range(len(coefs)): par = result.params[FMT_COEF % i] cvals.append(getattr(par, 'value', 0.0)) cdel = getattr(par, 'stderr', 0.0) if cdel is None: cdel = 0.0 cerrs.append(cdel/2.0) cvals = np.array(cvals) cerrs = np.array(cerrs) # find derivatives by hand! _k = kraw[:nmue] _m = mu[ie0:iemax+1] for i in range(nspl): cval0 = cvals[i] cvals[i] = cval0 + cerrs[i] bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout) cvals[i] = cval0 - cerrs[i] bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout) cvals[i] = cval0 jac_chi[i] = (chi1 - chi2) / (2*cerrs[i]) jac_bkg[i] = (bkg1 - bkg2) / (2*cerrs[i]) dfchi = np.zeros(nchi) dfbkg = np.zeros(nmue) for i in range(nspl): for j in range(nspl): dfchi += jac_chi[i]*jac_chi[j]*covar[i,j] dfbkg += jac_bkg[i]*jac_bkg[j]*covar[i,j] prob = 0.5*(1.0 + erf(err_sigma/np.sqrt(2.0))) dchi = t.ppf(prob, nchi-nspl) * np.sqrt(dfchi*redchi) dbkg = t.ppf(prob, nmue-nspl) * np.sqrt(dfbkg*redchi) group.delta_chi = dchi group.delta_bkg = 0.0*mu group.delta_bkg[ie0:ie0+len(dbkg)] = dbkg
def estimate_noise(k, chi=None, group=None, rmin=15.0, rmax=30.0, kweight=1, kmin=0, kmax=20, dk=4, dk2=None, kstep=0.05, kwindow='kaiser', nfft=2048, _larch=None, **kws): """ estimate noise levels in EXAFS spectrum and estimate highest k where data is above the noise level Parameters: ----------- k: 1-d array of photo-electron wavenumber in Ang^-1 (or group) chi: 1-d array of chi group: output Group [see Note below] rmin: minimum R value for high-R region of chi(R) rmax: maximum R value for high-R region of chi(R) kweight: exponent for weighting spectra by k**kweight [1] kmin: starting k for FT Window [0] kmax: ending k for FT Window [20] dk: tapering parameter for FT Window [4] dk2: second tapering parameter for FT Window [None] kstep: value to use for delta_k ( Ang^-1) [0.05] window: name of window type ['kaiser'] nfft: value to use for N_fft [2048]. Returns: --------- None -- outputs are written to supplied group. Values (scalars) written to output group: epsilon_k estimated noise in chi(k) epsilon_r estimated noise in chi(R) kmax_suggest highest estimated k value where |chi(k)| > epsilon_k Notes: ------- 1. This method uses the high-R portion of chi(R) as a measure of the noise level in the chi(R) data and uses Parseval's theorem to convert this noise level to that in chi(k). This method implicitly assumes that there is no signal in the high-R portion of the spectrum, and that the noise in the spectrum s "white" (independent of R) . Each of these assumptions can be questioned. 2. The estimate for 'kmax_suggest' has a tendency to be fair but pessimistic in how far out the chi(k) data goes before being dominated by noise. 3. Follows the 'First Argument Group' convention, so that you can either specifiy all of (an array for 'k', an array for 'chi', option output Group) OR pass a group with 'k' and 'chi' as the first argument """ k, chi, group = parse_group_args(k, members=('k', 'chi'), defaults=(chi,), group=group, fcn_name='esitmate_noise') # save _sys.xafsGroup -- we want to NOT write to it here! savgroup = set_xafsGroup(None, _larch=_larch) tmpgroup = Group() rmax_out = min(10*pi, rmax+2) xftf(k, chi, kmin=kmin, kmax=kmax, rmax_out=rmax_out, kweight=kweight, dk=dk, dk2=dk2, kwindow=kwindow, nfft=nfft, kstep=kstep, group=tmpgroup, _larch=_larch) chir = tmpgroup.chir rstep = tmpgroup.r[1] - tmpgroup.r[0] irmin = int(0.01 + rmin/rstep) irmax = min(nfft/2, int(1.01 + rmax/rstep)) highr = realimag(chir[irmin:irmax]) # get average of window function value, scale eps_r scale by this # this is imperfect, but improves the result. kwin_ave = tmpgroup.kwin.sum()*kstep/(kmax-kmin) eps_r = sqrt((highr*highr).sum() / len(highr)) / kwin_ave # use Parseval's theorem to convert epsilon_r to epsilon_k, # compensating for kweight w = 2 * kweight + 1 scale = sqrt((2*pi*w)/(kstep*(kmax**w - kmin**w))) eps_k = scale*eps_r # do reverse FT to get chiq array xftr(tmpgroup.r, tmpgroup.chir, group=tmpgroup, rmin=0.5, rmax=9.5, dr=1.0, window='parzen', nfft=nfft, kstep=kstep, _larch=_larch) # sets kmax_suggest to the largest k value for which # | chi(q) / k**kweight| > epsilon_k iq0 = index_of(tmpgroup.q, (kmax+kmin)/2.0) tst = tmpgroup.chiq_mag[iq0:] / ( tmpgroup.q[iq0:])**kweight kmax_suggest = tmpgroup.q[iq0 + where(tst < eps_k)[0][0]] # restore original _sys.xafsGroup, set output variables _larch.symtable._sys.xafsGroup = savgroup group = set_xafsGroup(group, _larch=_larch) group.epsilon_k = eps_k group.epsilon_r = eps_r group.kmax_suggest = kmax_suggest
def get_plot_arrays(self, dgroup): lab = plotlabels.norm if dgroup is None: return dgroup.plot_y2label = None dgroup.plot_xlabel = plotlabels.energy dgroup.plot_yarrays = [('norm', PLOTOPTS_1, lab)] if dgroup.datatype != 'xas': pchoice = PlotOne_Choices_nonxas[self.plotone_op.GetStringSelection()] dgroup.plot_xlabel = 'x' dgroup.plot_ylabel = 'y' dgroup.plot_yarrays = [('ydat', PLOTOPTS_1, 'ydat')] dgroup.dmude = np.gradient(dgroup.ydat)/np.gradient(dgroup.xdat) if pchoice == 'dmude': dgroup.plot_ylabel = 'dy/dx' dgroup.plot_yarrays = [('dmude', PLOTOPTS_1, 'dy/dx')] elif pchoice == 'norm+dnormde': lab = plotlabels.norm dgroup.plot_y2label = 'dy/dx' dgroup.plot_yarrays = [('ydat', PLOTOPTS_1, 'y'), ('dnormde', PLOTOPTS_D, 'dy/dx')] return req_attrs = ['e0', 'norm', 'dmude', 'pre_edge'] pchoice = PlotOne_Choices[self.plotone_op.GetStringSelection()] if pchoice in ('mu', 'norm', 'flat', 'dmude'): lab = getattr(plotlabels, pchoice) dgroup.plot_yarrays = [(pchoice, PLOTOPTS_1, lab)] elif pchoice == 'prelines': dgroup.plot_yarrays = [('mu', PLOTOPTS_1, plotlabels.mu), ('pre_edge', PLOTOPTS_2, 'pre edge'), ('post_edge', PLOTOPTS_2, 'post edge')] elif pchoice == 'preedge': lab = r'pre-edge subtracted $\mu$' dgroup.pre_edge_sub = dgroup.norm * dgroup.edge_step dgroup.plot_yarrays = [('pre_edge_sub', PLOTOPTS_1, lab)] elif pchoice == 'mu+dmude': lab = plotlabels.mu lab2 = plotlabels.dmude dgroup.plot_yarrays = [('mu', PLOTOPTS_1, lab), ('dmude', PLOTOPTS_D, lab2)] dgroup.plot_y2label = lab2 elif pchoice == 'norm+dnormde': lab = plotlabels.norm lab2 = plotlabels.dmude + ' (normalized)' dgroup.plot_yarrays = [('norm', PLOTOPTS_1, lab), ('dnormde', PLOTOPTS_D, lab2)] dgroup.plot_y2label = lab2 elif pchoice == 'mback_norm': req_attrs.append('mback_norm') lab = r'$\mu$' if not hasattr(dgroup, 'mback_mu'): self.process(dgroup=dgroup, force_mback=True) dgroup.plot_yarrays = [('mu', PLOTOPTS_1, lab), ('mback_mu', PLOTOPTS_2, r'tabulated $\mu(E)$')] elif pchoice == 'mback_poly': req_attrs.append('mback_norm') lab = plotlabels.norm if not hasattr(dgroup, 'mback_mu'): self.process(dgroup=dgroup, force_mback=True) dgroup.plot_yarrays = [('norm_mback', PLOTOPTS_1, 'mback'), ('norm_poly', PLOTOPTS_2, 'polynomial')] elif pchoice == 'area_norm': dgroup.plot_yarrays = [('norm_area', PLOTOPTS_1, 'area'), ('norm_poly', PLOTOPTS_2, 'polynomial')] dgroup.plot_ylabel = lab needs_proc = False for attr in req_attrs: needs_proc = needs_proc or (not hasattr(dgroup, attr)) if needs_proc: self.process(dgroup=dgroup, noskip=True) y4e0 = dgroup.ydat = getattr(dgroup, dgroup.plot_yarrays[0][0], dgroup.mu) dgroup.plot_extras = [] if self.wids['showe0'].IsChecked(): ie0 = index_of(dgroup.energy, dgroup.e0) dgroup.plot_extras.append(('marker', dgroup.e0, y4e0[ie0], {}))