def fit(self, y, x=None, dy=None, _larch=None, **kws): fcn_kws={'y':y, 'x':x, 'dy':dy} fcn_kws.update(kws) if _larch is not None: self._larch = _larch f = Minimizer(self.__objective, self.params, _larch=self._larch, fcn_kws=fcn_kws, scale_covar=True) f.leastsq() return f
def pre_edge(energy, mu=None, group=None, e0=None, step=None, nnorm=3, nvict=0, pre1=None, pre2=-50, norm1=100, norm2=None, make_flat=True, _larch=None): """pre edge subtraction, normalization for XAFS This performs a number of steps: 1. determine E0 (if not supplied) from max of deriv(mu) 2. fit a line of polymonial to the region below the edge 3. fit a polymonial to the region above the edge 4. extrapolae the two curves to E0 to determine the edge jump Arguments ---------- energy: array of x-ray energies, in eV, or group (see note) mu: array of mu(E) group: output group e0: edge energy, in eV. If None, it will be determined here. step: edge jump. If None, it will be determined here. pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit nvict: energy exponent to use for pre-edg fit. See Note norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve. Default=3 (quadratic), max=5 make_flat: boolean (Default True) to calculate flattened output. Returns ------- None The following attributes will be written to the output group: e0 energy origin edge_step edge step norm normalized mu(E) flat flattened, normalized mu(E) pre_edge determined pre-edge curve post_edge determined post-edge, normalization curve dmude derivative of mu(E) (if the output group is None, _sys.xafsGroup will be written to) Notes ----- 1 nvict gives an exponent to the energy term for the fits to the pre-edge and the post-edge region. For the pre-edge, a line (m * energy + b) is fit to mu(energy)*energy**nvict over the pre-edge region, energy=[e0+pre1, e0+pre2]. For the post-edge, a polynomial of order nnorm will be fit to mu(energy)*energy**nvict of the post-edge region energy=[e0+norm1, e0+norm2]. 2 If the first argument is a Group, it must contain 'energy' and 'mu'. If it exists, group.e0 will be used as e0. See First Argrument Group in Documentation """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='pre_edge') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm, nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2) group = set_xafsGroup(group, _larch=_larch) e0 = pre_dat['e0'] norm = pre_dat['norm'] norm1 = pre_dat['norm1'] norm2 = pre_dat['norm2'] # generate flattened spectra, by fitting a quadratic to .norm # and removing that. flat = norm ie0 = index_nearest(energy, e0) p1 = index_of(energy, norm1 + e0) p2 = index_nearest(energy, norm2 + e0) if p2 - p1 < 2: p2 = min(len(energy), p1 + 2) if make_flat and p2 - p1 > 4: enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2]) # enx, mux = (energy[p1:p2], norm[p1:p2]) fpars = Group(c0=Parameter(0, vary=True), c1=Parameter(0, vary=True), c2=Parameter(0, vary=True), en=enx, mu=mux) fit = Minimizer(flat_resid, fpars, _larch=_larch, toler=1.e-5) try: fit.leastsq() except (TypeError, ValueError): pass fc0, fc1, fc2 = fpars.c0.value, fpars.c1.value, fpars.c2.value flat_diff = fc0 + energy * (fc1 + energy * fc2) flat = norm - flat_diff + flat_diff[ie0] flat[:ie0] = norm[:ie0] group.e0 = e0 group.norm = norm group.flat = flat group.dmude = np.gradient(mu) / np.gradient(energy) group.edge_step = pre_dat['edge_step'] group.pre_edge = pre_dat['pre_edge'] group.post_edge = pre_dat['post_edge'] group.pre_edge_details = Group() group.pre_edge_details.pre1 = pre_dat['pre1'] group.pre_edge_details.pre2 = pre_dat['pre2'] group.pre_edge_details.nnorm = pre_dat['nnorm'] group.pre_edge_details.norm1 = pre_dat['norm1'] group.pre_edge_details.norm2 = pre_dat['norm2'] group.pre_edge_details.pre_slope = pre_dat['precoefs'][0] group.pre_edge_details.pre_offset = pre_dat['precoefs'][1] for i in range(MAX_NNORM): if hasattr(group, 'norm_c%i' % i): delattr(group, 'norm_c%i' % i) for i, c in enumerate(pre_dat['norm_coefs']): setattr(group.pre_edge_details, 'norm_c%i' % i, c) return
def feffit(params, datasets, _larch=None, rmax_out=10, path_outputs=True, **kws): """execute a Feffit fit: a fit of feff paths to a list of datasets Parameters: ------------ paramgroup: group containing parameters for fit datasets: Feffit Dataset group or list of Feffit Dataset group. rmax_out: maximum R value to calculate output arrays. path_output: Flag to set whether all Path outputs should be written. Returns: --------- a fit results group. This will contain subgroups of: datasets: an array of FeffitDataSet groups used in the fit. params: This will be identical to the input parameter group. fit: an object which points to the low-level fit. Statistical parameters will be put into the params group. Each dataset will have a 'data' and 'model' subgroup, each with arrays: k wavenumber array of k chi chi(k). kwin window Omega(k) (length of input chi(k)). r uniform array of R, out to rmax_out. chir complex array of chi(R). chir_mag magnitude of chi(R). chir_pha phase of chi(R). chir_re real part of chi(R). chir_im imaginary part of chi(R). """ def _resid(params, datasets=None, _larch=None, **kwargs): """ this is the residual function""" return concatenate([d._residual() for d in datasets]) if isNamedClass(datasets, FeffitDataSet): datasets = [datasets] for ds in datasets: if not isNamedClass(ds, FeffitDataSet): print("feffit needs a list of FeffitDataSets") return fitkws = dict(datasets=datasets) fit = Minimizer(_resid, params, fcn_kws=fitkws, scale_covar=True, _larch=_larch, **kws) fit.leastsq() dat = concatenate([d._residual(data_only=True) for d in datasets]) params.rfactor = (params.fit_details.fvec**2).sum() / (dat**2).sum() # remove temporary parameters for _feffdat and reff # that had been placed by _pathparams() #for pname in ('_feffdat', 'reff'): # if hasattr(params, pname): # delattr(params, pname) n_idp = 0 for ds in datasets: n_idp += ds.n_idp # here we rescale chi-square and reduced chi-square to n_idp npts = len(params.residual) params.chi_square *= n_idp * 1.0 / npts params.chi_reduced = params.chi_square / (n_idp * 1.0 - params.nvarys) # With scale_covar = True, Minimizer() scales the uncertainties # by reduced chi-square assuming params.nfree is the correct value # for degrees-of-freedom. But n_idp-params.nvarys is a better measure, # so we rescale uncertainties here. covar = getattr(params, 'covar', None) if covar is not None: err_scale = (params.nfree / (n_idp - params.nvarys)) for name in dir(params): p = getattr(params, name) if isParameter(p) and p.vary: p.stderr *= sqrt(err_scale) # next, propagate uncertainties to constraints and path parameters. params.covar *= err_scale vsave, vbest = {}, [] # 1. save current params for vname in params.covar_vars: par = getattr(params, vname) vsave[vname] = par vbest.append(par.value) # 2. get correlated uncertainties, set params accordingly uvars = correlated_values(vbest, params.covar) # 3. evaluate constrained params, save stderr for nam in dir(params): obj = getattr(params, nam) eval_stderr(obj, uvars, params.covar_vars, vsave, _larch) # 3. evaluate path params, save stderr for ds in datasets: for p in ds.pathlist: _larch.symtable._sys.paramGroup._feffdat = copy(p._feffdat) _larch.symtable._sys.paramGroup.reff = p._feffdat.reff for param in ('degen', 's02', 'e0', 'ei', 'deltar', 'sigma2', 'third', 'fourth'): obj = getattr(p, param) eval_stderr(obj, uvars, params.covar_vars, vsave, _larch) # restore saved parameters again for vname in params.covar_vars: setattr(params, vname, vsave[vname]) # clear any errors evaluting uncertainties if len(_larch.error) > 0: _larch.error = [] # here we create outputs arrays for chi(k), chi(r): for ds in datasets: ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs) return Group(name='feffit fit results', fit=fit, params=params, datasets=datasets)
def pre_edge(energy, mu=None, group=None, e0=None, step=None, nnorm=3, nvict=0, pre1=None, pre2=-50, norm1=100, norm2=None, make_flat=True, _larch=None): """pre edge subtraction, normalization for XAFS This performs a number of steps: 1. determine E0 (if not supplied) from max of deriv(mu) 2. fit a line of polymonial to the region below the edge 3. fit a polymonial to the region above the edge 4. extrapolae the two curves to E0 to determine the edge jump Arguments ---------- energy: array of x-ray energies, in eV, or group (see note) mu: array of mu(E) group: output group e0: edge energy, in eV. If None, it will be determined here. step: edge jump. If None, it will be determined here. pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit nvict: energy exponent to use for pre-edg fit. See Note norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve. Default=3 (quadratic), max=5 make_flat: boolean (Default True) to calculate flattened output. Returns ------- None The following attributes will be written to the output group: e0 energy origin edge_step edge step norm normalized mu(E) flat flattened, normalized mu(E) pre_edge determined pre-edge curve post_edge determined post-edge, normalization curve dmude derivative of mu(E) (if the output group is None, _sys.xafsGroup will be written to) Notes ----- 1 nvict gives an exponent to the energy term for the fits to the pre-edge and the post-edge region. For the pre-edge, a line (m * energy + b) is fit to mu(energy)*energy**nvict over the pre-edge region, energy=[e0+pre1, e0+pre2]. For the post-edge, a polynomial of order nnorm will be fit to mu(energy)*energy**nvict of the post-edge region energy=[e0+norm1, e0+norm2]. 2 If the first argument is a Group, it must contain 'energy' and 'mu'. If it exists, group.e0 will be used as e0. See First Argrument Group in Documentation """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='pre_edge') pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm, nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2) group = set_xafsGroup(group, _larch=_larch) e0 = pre_dat['e0'] norm = pre_dat['norm'] norm1 = pre_dat['norm1'] norm2 = pre_dat['norm2'] # generate flattened spectra, by fitting a quadratic to .norm # and removing that. flat = norm ie0 = index_nearest(energy, e0) p1 = index_of(energy, norm1+e0) p2 = index_nearest(energy, norm2+e0) if p2-p1 < 2: p2 = min(len(energy), p1 + 2) if make_flat and p2-p1 > 4: enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2]) # enx, mux = (energy[p1:p2], norm[p1:p2]) fpars = Group(c0 = Parameter(0, vary=True), c1 = Parameter(0, vary=True), c2 = Parameter(0, vary=True), en=enx, mu=mux) fit = Minimizer(flat_resid, fpars, _larch=_larch, toler=1.e-5) try: fit.leastsq() except (TypeError, ValueError): pass fc0, fc1, fc2 = fpars.c0.value, fpars.c1.value, fpars.c2.value flat_diff = fc0 + energy * (fc1 + energy * fc2) flat = norm - flat_diff + flat_diff[ie0] flat[:ie0] = norm[:ie0] group.e0 = e0 group.norm = norm group.flat = flat group.dmude = np.gradient(mu)/np.gradient(energy) group.edge_step = pre_dat['edge_step'] group.pre_edge = pre_dat['pre_edge'] group.post_edge = pre_dat['post_edge'] group.pre_edge_details = Group() group.pre_edge_details.pre1 = pre_dat['pre1'] group.pre_edge_details.pre2 = pre_dat['pre2'] group.pre_edge_details.norm1 = pre_dat['norm1'] group.pre_edge_details.norm2 = pre_dat['norm2'] group.pre_edge_details.pre_slope = pre_dat['precoefs'][0] group.pre_edge_details.pre_offset = pre_dat['precoefs'][1] for i in range(MAX_NNORM): if hasattr(group, 'norm_c%i' % i): delattr(group, 'norm_c%i' % i) for i, c in enumerate(pre_dat['norm_coefs']): setattr(group.pre_edge_details, 'norm_c%i' % i, c) return
def onFitPeaks(self, event=None): opts = {} filters, peaks = [], [] sig, det, bgr = {}, {}, {} opts['flyield'] = self.wids.flyield_use.IsChecked() opts['xray_en'] = self.wids.xray_en.GetValue() opts['emin'] = self.wids.fit_emin.GetValue() opts['emax'] = self.wids.fit_emax.GetValue() det['use'] = self.wids.det_use.IsChecked() det['thickness'] = self.wids.det_thk.GetValue() det['material'] = self.wids.det_mat.GetStringSelection() bgr['use'] = self.wids.bgr_use.IsChecked() bgr['width'] = self.wids.bgr_width.GetValue() bgr['compress'] = int(self.wids.bgr_compress.GetStringSelection()) bgr['exponent'] = int(self.wids.bgr_exponent.GetStringSelection()) sig['offset'] = self.wids.sig_offset.param sig['slope'] = self.wids.sig_slope.param sig['quad'] = self.wids.sig_quad.param for k in self.wids.filters: f = (k[0].GetStringSelection(), k[1].GetValue(), k[2].param) filters.append(f) for k in self.wids.peaks: use = k[0].IsChecked() p = (k[0].IsChecked(), k[1].param, k[2].param, k[3].param) peaks.append(p) if not use: k[1].param.vary = False k[2].param.vary = False k[3].param.vary = False opts['det'] = det opts['bgr'] = bgr opts['sig'] = sig opts['filters'] = filters opts['peaks'] = peaks mca = self.mca mca.data = mca.counts * 1.0 energy = mca.energy _larch = self.parent.larch if bgr['use']: bgr.pop('use') xrf_background(energy=mca.energy, counts=mca.counts, group=mca, _larch=_larch, **bgr) opts['use_bgr'] = True opts['mca'] = mca if det['use']: mu = material_mu(det['material'], energy * 1000.0, _larch=_larch) / 10.0 t = det['thickness'] mca.det_atten = np.exp(-t * mu) fit = Minimizer(xrf_resid, self.paramgroup, toler=1.e-4, _larch=_larch, fcn_kws=opts) fit.leastsq() parent = self.parent parent.oplot(mca.energy, mca.model, label='fit', style='solid', color='#DD33DD') print(fitting.fit_report(self.paramgroup, _larch=_larch))
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None, edge_step=None, kmin=0, kmax=None, kweight=1, dk=0, win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05, pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1, calc_uncertainties=False, _larch=None, **kws): """Use Autobk algorithm to remove XAFS background Parameters: ----------- energy: 1-d array of x-ray energies, in eV, or group mu: 1-d array of mu(E) group: output group (and input group for e0 and edge_step). rbkg: distance (in Ang) for chi(R) above which the signal is ignored. Default = 1. e0: edge energy, in eV. If None, it will be determined. edge_step: edge step. If None, it will be determined. pre_edge_kws: keyword arguments to pass to pre_edge() nknots: number of knots in spline. If None, it will be determined. kmin: minimum k value [0] kmax: maximum k value [full data range]. kweight: k weight for FFT. [1] dk: FFT window window parameter. [0] win: FFT window function name. ['hanning'] nfft: array size to use for FFT [2048] kstep: k step size to use for FFT [0.05] k_std: optional k array for standard chi(k). chi_std: optional chi array for standard chi(k). nclamp: number of energy end-points for clamp [2] clamp_lo: weight of low-energy clamp [1] clamp_hi: weight of high-energy clamp [1] calc_uncertaintites: Flag to calculate uncertainties in mu_0(E) and chi(k) [False] Output arrays are written to the provided group. Follows the 'First Argument Group' convention. """ msg = _larch.writer.write if 'kw' in kws: kweight = kws.pop('kw') if len(kws) > 0: msg('Unrecognized a:rguments for autobk():\n') msg(' %s\n' % (', '.join(kws.keys()))) return energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='autobk') energy = remove_dups(energy) # if e0 or edge_step are not specified, get them, either from the # passed-in group or from running pre_edge() group = set_xafsGroup(group, _larch=_larch) if edge_step is None and isgroup(group, 'edge_step'): edge_step = group.edge_step if e0 is None and isgroup(group, 'e0'): e0 = group.e0 if e0 is None or edge_step is None: # need to run pre_edge: pre_kws = dict(nnorm=3, nvict=0, pre1=None, pre2=-50., norm1=100., norm2=None) if pre_edge_kws is not None: pre_kws.update(pre_edge_kws) pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws) if e0 is None: e0 = group.e0 if edge_step is None: edge_step = group.edge_step if e0 is None or edge_step is None: msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n') return # get array indices for rkbg and e0: irbkg, ie0 ie0 = index_of(energy, e0) rgrid = np.pi/(kstep*nfft) if rbkg < 2*rgrid: rbkg = 2*rgrid irbkg = int(1.01 + rbkg/rgrid) # save ungridded k (kraw) and grided k (kout) # and ftwin (*k-weighting) for FT in residual enpe = energy[ie0:] - e0 kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe)) if kmax is None: kmax = max(kraw) else: kmax = max(0, min(max(kraw), kmax)) kout = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64') iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1 # interpolate provided chi(k) onto the kout grid if chi_std is not None and k_std is not None: chi_std = np.interp(kout, k_std, chi_std) # pre-load FT window ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax, window=win, dx=dk) # calc k-value and initial guess for y-values of spline params nspl = max(4, min(128, 2*int(rbkg*(kmax-kmin)/np.pi) + 1)) spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl) for i in range(nspl): q = kmin + i*(kmax-kmin)/(nspl - 1) ik = index_nearest(kraw, q) i1 = min(len(kraw)-1, ik + 5) i2 = max(0, ik - 5) spl_k[i] = kraw[ik] spl_e[i] = energy[ik+ie0] spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0 # get spline represention: knots, coefs, order=3 # coefs will be varied in fit. knots, coefs, order = splrep(spl_k, spl_y) # set fit parameters from initial coefficients params = Group() for i in range(len(coefs)): name = FMT_COEF % i p = Parameter(coefs[i], name=name, vary=i<len(spl_y)) p._getval() setattr(params, name, p) initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1], knots, coefs, order, kout) # do fit fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4, fcn_kws = dict(ncoefs=len(coefs), chi_std=chi_std, knots=knots, order=order, kraw=kraw[:iemax-ie0+1], mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout, ftwin=ftwin, kweight=kweight, nfft=nfft, nclamp=nclamp, clamp_lo=clamp_lo, clamp_hi=clamp_hi)) fit.leastsq() # write final results coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))] bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1], knots, coefs, order, kout) obkg = np.copy(mu) obkg[ie0:ie0+len(bkg)] = bkg # outputs to group group = set_xafsGroup(group, _larch=_larch) group.bkg = obkg group.chie = (mu-obkg)/edge_step group.k = kout group.chi = chi/edge_step # now fill in 'autobk_details' group params.init_bkg = np.copy(mu) params.init_bkg[ie0:ie0+len(bkg)] = initbkg params.init_chi = initchi/edge_step params.knots_e = spl_e params.knots_y = np.array([coefs[i] for i in range(nspl)]) params.init_knots_y = spl_y params.nfev = params.fit_details.nfev params.kmin = kmin params.kmax = kmax group.autobk_details = params # uncertainties in mu0 and chi: fairly slow!! if HAS_UNCERTAIN and calc_uncertainties: vbest, vstd = [], [] for n in fit.var_names: par = getattr(params, n) vbest.append(par.value) vstd.append(par.stderr) uvars = uncertainties.correlated_values(vbest, params.covar) # uncertainty in bkg (aka mu0) # note that much of this is working around # limitations in the uncertainty package that make it # 1. take an argument list (not array) # 2. work on returned scalars (but not arrays) # 3. not handle kw args and *args well (so use # of global "index" is important here) nkx = iemax-ie0 + 1 def my_dsplev(*args): coefs = np.array(args) return splev(kraw[:nkx], [knots, coefs, order])[index] fdbkg = uncertainties.wrap(my_dsplev) dmu0 = [fdbkg(*uvars).std_dev() for index in range(len(bkg))] group.delta_bkg = np.zeros(len(mu)) group.delta_bkg[ie0:ie0+len(bkg)] = np.array(dmu0) # uncertainty in chi (see notes above) def my_dchi(*args): coefs = np.array(args) b,chi = spline_eval(kraw[:nkx], mu[ie0:iemax+1], knots, coefs, order, kout) return chi[index] fdchi = uncertainties.wrap(my_dchi) dchi = [fdchi(*uvars).std_dev() for index in range(len(kout))] group.delta_chi = np.array(dchi)/edge_step
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order=int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy)-1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0*(energy<e0) post = 1.0*(energy>e0+float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0*(energy<l2) l2_post = 1.0*(energy>l2+float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1*(energy<e0) weight2 = 1*(energy>e0) weight = np.sqrt(sum(weight1))*weight1 + np.sqrt(sum(weight2))*weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2=f2 if return_f1: group.f1=f1 n = edge if edge.lower().startswith('l'): n = 'L' params = Group(s = Parameter(1, vary=True, _larch=_larch), # scale of data xi = Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc em = Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid e0 = Parameter(e0, vary=False, _larch=_larch), # abs. edge energy ## various arrays need by the objective function en = energy, mu = mu, f2 = group.f2, weight = weight, theta = theta, leexiang = leexiang, _larch = _larch) if fit_erfc: params.a = Parameter(1, vary=True, _larch=_larch) # amplitude of erfc else: params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc for i in range(order): # polynomial coefficients setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch)) fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5) fit.leastsq() eoff = energy - params.e0.value normalization_function = params.a.value*erfc((energy-params.em.value)/params.xi.value) + params.c0.value for i in range(MAXORDER): j = i+1 attr = 'c%d' % j if hasattr(params, attr): normalization_function = normalization_function + getattr(getattr(params, attr), 'value') * eoff**j group.fpp = params.s*mu - normalization_function group.mback_params = params
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy) - 1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0 * (energy < e0) post = 1.0 * (energy > e0 + float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0 * (energy < l2) l2_post = 1.0 * (energy > l2 + float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1 * (energy < e0) weight2 = 1 * (energy > e0) weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2 = f2 if return_f1: group.f1 = f1 n = edge if edge.lower().startswith('l'): n = 'L' params = Group( s=Parameter(1, vary=True, _larch=_larch), # scale of data xi=Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc em=Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid e0=Parameter(e0, vary=False, _larch=_larch), # abs. edge energy ## various arrays need by the objective function en=energy, mu=mu, f2=group.f2, weight=weight, theta=theta, leexiang=leexiang, _larch=_larch) if fit_erfc: params.a = Parameter(1, vary=True, _larch=_larch) # amplitude of erfc else: params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc for i in range(order): # polynomial coefficients setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch)) fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5) fit.leastsq() eoff = energy - params.e0.value normalization_function = params.a.value * erfc( (energy - params.em.value) / params.xi.value) + params.c0.value for i in range(MAXORDER): j = i + 1 attr = 'c%d' % j if hasattr(params, attr): normalization_function = normalization_function + getattr( getattr(params, attr), 'value') * eoff**j group.fpp = params.s * mu - normalization_function group.mback_params = params
def onFitPeaks(self, event=None): opts = {} filters, peaks = [], [] sig, det, bgr = {}, {}, {} opts['flyield'] = self.wids.flyield_use.IsChecked() opts['xray_en'] = self.wids.xray_en.GetValue() opts['emin'] = self.wids.fit_emin.GetValue() opts['emax'] = self.wids.fit_emax.GetValue() det['use'] = self.wids.det_use.IsChecked() det['thickness'] = self.wids.det_thk.GetValue() det['material'] = self.wids.det_mat.GetStringSelection() bgr['use'] = self.wids.bgr_use.IsChecked() bgr['width'] = self.wids.bgr_width.GetValue() bgr['compress'] = int(self.wids.bgr_compress.GetStringSelection()) bgr['exponent'] = int(self.wids.bgr_exponent.GetStringSelection()) sig['offset'] = self.wids.sig_offset.param sig['slope'] = self.wids.sig_slope.param sig['quad'] = self.wids.sig_quad.param for k in self.wids.filters: f = (k[0].GetStringSelection(), k[1].GetValue(), k[2].param) filters.append(f) for k in self.wids.peaks: use = k[0].IsChecked() p = (k[0].IsChecked(), k[1].param, k[2].param, k[3].param) peaks.append(p) if not use: k[1].param.vary = False k[2].param.vary = False k[3].param.vary = False opts['det'] = det opts['bgr'] = bgr opts['sig'] = sig opts['filters'] = filters opts['peaks'] = peaks mca = self.mca mca.data = mca.counts*1.0 energy = mca.energy _larch = self.parent.larch if bgr['use']: bgr.pop('use') xrf_background(energy=mca.energy, counts=mca.counts, group=mca, _larch=_larch, **bgr) opts['use_bgr']=True opts['mca'] = mca if det['use']: mu = material_mu(det['material'], energy*1000.0, _larch=_larch)/10.0 t = det['thickness'] mca.det_atten = np.exp(-t*mu) fit = Minimizer(xrf_resid, self.paramgroup, toler=1.e-4, _larch=_larch, fcn_kws = opts) fit.leastsq() parent = self.parent parent.oplot(mca.energy, mca.model, label='fit', style='solid', color='#DD33DD') print( fitting.fit_report(self.paramgroup, _larch=_larch))
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None, edge_step=None, kmin=0, kmax=None, kweight=1, dk=0, win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05, pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1, calc_uncertainties=False, _larch=None, **kws): """Use Autobk algorithm to remove XAFS background Parameters: ----------- energy: 1-d array of x-ray energies, in eV, or group mu: 1-d array of mu(E) group: output group (and input group for e0 and edge_step). rbkg: distance (in Ang) for chi(R) above which the signal is ignored. Default = 1. e0: edge energy, in eV. If None, it will be determined. edge_step: edge step. If None, it will be determined. pre_edge_kws: keyword arguments to pass to pre_edge() nknots: number of knots in spline. If None, it will be determined. kmin: minimum k value [0] kmax: maximum k value [full data range]. kweight: k weight for FFT. [1] dk: FFT window window parameter. [0] win: FFT window function name. ['hanning'] nfft: array size to use for FFT [2048] kstep: k step size to use for FFT [0.05] k_std: optional k array for standard chi(k). chi_std: optional chi array for standard chi(k). nclamp: number of energy end-points for clamp [2] clamp_lo: weight of low-energy clamp [1] clamp_hi: weight of high-energy clamp [1] calc_uncertaintites: Flag to calculate uncertainties in mu_0(E) and chi(k) [False] Output arrays are written to the provided group. Follows the 'First Argument Group' convention. """ msg = _larch.writer.write if 'kw' in kws: kweight = kws.pop('kw') if len(kws) > 0: msg('Unrecognized a:rguments for autobk():\n') msg(' %s\n' % (', '.join(kws.keys()))) return energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='autobk') energy = remove_dups(energy) # if e0 or edge_step are not specified, get them, either from the # passed-in group or from running pre_edge() group = set_xafsGroup(group, _larch=_larch) if edge_step is None and isgroup(group, 'edge_step'): edge_step = group.edge_step if e0 is None and isgroup(group, 'e0'): e0 = group.e0 if e0 is None or edge_step is None: # need to run pre_edge: pre_kws = dict(nnorm=3, nvict=0, pre1=None, pre2=-50., norm1=100., norm2=None) if pre_edge_kws is not None: pre_kws.update(pre_edge_kws) pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws) if e0 is None: e0 = group.e0 if edge_step is None: edge_step = group.edge_step if e0 is None or edge_step is None: msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n' ) return # get array indices for rkbg and e0: irbkg, ie0 ie0 = index_of(energy, e0) rgrid = np.pi / (kstep * nfft) if rbkg < 2 * rgrid: rbkg = 2 * rgrid irbkg = int(1.01 + rbkg / rgrid) # save ungridded k (kraw) and grided k (kout) # and ftwin (*k-weighting) for FT in residual enpe = energy[ie0:] - e0 kraw = np.sign(enpe) * np.sqrt(ETOK * abs(enpe)) if kmax is None: kmax = max(kraw) else: kmax = max(0, min(max(kraw), kmax)) kout = kstep * np.arange(int(1.01 + kmax / kstep), dtype='float64') iemax = min(len(energy), 2 + index_of(energy, e0 + kmax * kmax / ETOK)) - 1 # interpolate provided chi(k) onto the kout grid if chi_std is not None and k_std is not None: chi_std = np.interp(kout, k_std, chi_std) # pre-load FT window ftwin = kout**kweight * ftwindow( kout, xmin=kmin, xmax=kmax, window=win, dx=dk) # calc k-value and initial guess for y-values of spline params nspl = max(4, min(128, 2 * int(rbkg * (kmax - kmin) / np.pi) + 1)) spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl) for i in range(nspl): q = kmin + i * (kmax - kmin) / (nspl - 1) ik = index_nearest(kraw, q) i1 = min(len(kraw) - 1, ik + 5) i2 = max(0, ik - 5) spl_k[i] = kraw[ik] spl_e[i] = energy[ik + ie0] spl_y[i] = (2 * mu[ik + ie0] + mu[i1 + ie0] + mu[i2 + ie0]) / 4.0 # get spline represention: knots, coefs, order=3 # coefs will be varied in fit. knots, coefs, order = splrep(spl_k, spl_y) # set fit parameters from initial coefficients params = Group() for i in range(len(coefs)): name = FMT_COEF % i p = Parameter(coefs[i], name=name, vary=i < len(spl_y)) p._getval() setattr(params, name, p) initbkg, initchi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots, coefs, order, kout) # do fit fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4, fcn_kws=dict(ncoefs=len(coefs), chi_std=chi_std, knots=knots, order=order, kraw=kraw[:iemax - ie0 + 1], mu=mu[ie0:iemax + 1], irbkg=irbkg, kout=kout, ftwin=ftwin, kweight=kweight, nfft=nfft, nclamp=nclamp, clamp_lo=clamp_lo, clamp_hi=clamp_hi)) fit.leastsq() # write final results coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))] bkg, chi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots, coefs, order, kout) obkg = np.copy(mu) obkg[ie0:ie0 + len(bkg)] = bkg # outputs to group group = set_xafsGroup(group, _larch=_larch) group.bkg = obkg group.chie = (mu - obkg) / edge_step group.k = kout group.chi = chi / edge_step # now fill in 'autobk_details' group params.init_bkg = np.copy(mu) params.init_bkg[ie0:ie0 + len(bkg)] = initbkg params.init_chi = initchi / edge_step params.knots_e = spl_e params.knots_y = np.array([coefs[i] for i in range(nspl)]) params.init_knots_y = spl_y params.nfev = params.fit_details.nfev params.kmin = kmin params.kmax = kmax group.autobk_details = params # uncertainties in mu0 and chi: fairly slow!! if HAS_UNCERTAIN and calc_uncertainties: vbest, vstd = [], [] for n in fit.var_names: par = getattr(params, n) vbest.append(par.value) vstd.append(par.stderr) uvars = uncertainties.correlated_values(vbest, params.covar) # uncertainty in bkg (aka mu0) # note that much of this is working around # limitations in the uncertainty package that make it # 1. take an argument list (not array) # 2. work on returned scalars (but not arrays) # 3. not handle kw args and *args well (so use # of global "index" is important here) nkx = iemax - ie0 + 1 def my_dsplev(*args): coefs = np.array(args) return splev(kraw[:nkx], [knots, coefs, order])[index] fdbkg = uncertainties.wrap(my_dsplev) dmu0 = [fdbkg(*uvars).std_dev() for index in range(len(bkg))] group.delta_bkg = np.zeros(len(mu)) group.delta_bkg[ie0:ie0 + len(bkg)] = np.array(dmu0) # uncertainty in chi (see notes above) def my_dchi(*args): coefs = np.array(args) b, chi = spline_eval(kraw[:nkx], mu[ie0:iemax + 1], knots, coefs, order, kout) return chi[index] fdchi = uncertainties.wrap(my_dchi) dchi = [fdchi(*uvars).std_dev() for index in range(len(kout))] group.delta_chi = np.array(dchi) / edge_step
def feffit(params, datasets, _larch=None, rmax_out=10, path_outputs=True, **kws): """execute a Feffit fit: a fit of feff paths to a list of datasets Parameters: ------------ paramgroup: group containing parameters for fit datasets: Feffit Dataset group or list of Feffit Dataset group. rmax_out: maximum R value to calculate output arrays. path_output: Flag to set whether all Path outputs should be written. Returns: --------- a fit results group. This will contain subgroups of: datasets: an array of FeffitDataSet groups used in the fit. params: This will be identical to the input parameter group. fit: an object which points to the low-level fit. Statistical parameters will be put into the params group. Each dataset will have a 'data' and 'model' subgroup, each with arrays: k wavenumber array of k chi chi(k). kwin window Omega(k) (length of input chi(k)). r uniform array of R, out to rmax_out. chir complex array of chi(R). chir_mag magnitude of chi(R). chir_pha phase of chi(R). chir_re real part of chi(R). chir_im imaginary part of chi(R). """ def _resid(params, datasets=None, _larch=None, **kwargs): """ this is the residual function""" return concatenate([d._residual() for d in datasets]) if isNamedClass(datasets, FeffitDataSet): datasets = [datasets] for ds in datasets: if not isNamedClass(ds, FeffitDataSet): print( "feffit needs a list of FeffitDataSets") return fitkws = dict(datasets=datasets) fit = Minimizer(_resid, params, fcn_kws=fitkws, scale_covar=True, _larch=_larch, **kws) fit.leastsq() dat = concatenate([d._residual(data_only=True) for d in datasets]) params.rfactor = (params.fit_details.fvec**2).sum() / (dat**2).sum() # remove temporary parameters for _feffdat and reff # that had been placed by _pathparams() #for pname in ('_feffdat', 'reff'): # if hasattr(params, pname): # delattr(params, pname) n_idp = 0 for ds in datasets: n_idp += ds.n_idp # here we rescale chi-square and reduced chi-square to n_idp npts = len(params.residual) params.chi_square *= n_idp*1.0 / npts params.chi_reduced = params.chi_square/(n_idp*1.0 - params.nvarys) # With scale_covar = True, Minimizer() scales the uncertainties # by reduced chi-square assuming params.nfree is the correct value # for degrees-of-freedom. But n_idp-params.nvarys is a better measure, # so we rescale uncertainties here. covar = getattr(params, 'covar', None) if covar is not None: err_scale = (params.nfree / (n_idp - params.nvarys)) for name in dir(params): p = getattr(params, name) if isParameter(p) and p.vary: p.stderr *= sqrt(err_scale) # next, propagate uncertainties to constraints and path parameters. params.covar *= err_scale vsave, vbest = {}, [] # 1. save current params for vname in params.covar_vars: par = getattr(params, vname) vsave[vname] = par vbest.append(par.value) # 2. get correlated uncertainties, set params accordingly uvars = correlated_values(vbest, params.covar) # 3. evaluate constrained params, save stderr for nam in dir(params): obj = getattr(params, nam) eval_stderr(obj, uvars, params.covar_vars, vsave, _larch) # 3. evaluate path params, save stderr for ds in datasets: for p in ds.pathlist: _larch.symtable._sys.paramGroup._feffdat = copy(p._feffdat) _larch.symtable._sys.paramGroup.reff = p._feffdat.reff for param in ('degen', 's02', 'e0', 'ei', 'deltar', 'sigma2', 'third', 'fourth'): obj = getattr(p, param) eval_stderr(obj, uvars, params.covar_vars, vsave, _larch) # restore saved parameters again for vname in params.covar_vars: setattr(params, vname, vsave[vname]) # clear any errors evaluting uncertainties if len(_larch.error) > 0: _larch.error = [] # here we create outputs arrays for chi(k), chi(r): for ds in datasets: ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs) return Group(name='feffit fit results', fit=fit, params=params, datasets=datasets)
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None, edge_step=None, kmin=0, kmax=None, kweight=1, dk=0, win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05, pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1, calc_uncertainties=True, err_sigma=1, _larch=None, **kws): """Use Autobk algorithm to remove XAFS background Parameters: ----------- energy: 1-d array of x-ray energies, in eV, or group mu: 1-d array of mu(E) group: output group (and input group for e0 and edge_step). rbkg: distance (in Ang) for chi(R) above which the signal is ignored. Default = 1. e0: edge energy, in eV. If None, it will be determined. edge_step: edge step. If None, it will be determined. pre_edge_kws: keyword arguments to pass to pre_edge() nknots: number of knots in spline. If None, it will be determined. kmin: minimum k value [0] kmax: maximum k value [full data range]. kweight: k weight for FFT. [1] dk: FFT window window parameter. [0] win: FFT window function name. ['hanning'] nfft: array size to use for FFT [2048] kstep: k step size to use for FFT [0.05] k_std: optional k array for standard chi(k). chi_std: optional chi array for standard chi(k). nclamp: number of energy end-points for clamp [2] clamp_lo: weight of low-energy clamp [1] clamp_hi: weight of high-energy clamp [1] calc_uncertaintites: Flag to calculate uncertainties in mu_0(E) and chi(k) [True] err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1] Output arrays are written to the provided group. Follows the 'First Argument Group' convention. """ msg = _larch.writer.write if 'kw' in kws: kweight = kws.pop('kw') if len(kws) > 0: msg('Unrecognized a:rguments for autobk():\n') msg(' %s\n' % (', '.join(kws.keys()))) return energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='autobk') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() energy = remove_dups(energy) # if e0 or edge_step are not specified, get them, either from the # passed-in group or from running pre_edge() group = set_xafsGroup(group, _larch=_larch) if edge_step is None and isgroup(group, 'edge_step'): edge_step = group.edge_step if e0 is None and isgroup(group, 'e0'): e0 = group.e0 if e0 is None or edge_step is None: # need to run pre_edge: pre_kws = dict(nnorm=3, nvict=0, pre1=None, pre2=-50., norm1=100., norm2=None) if pre_edge_kws is not None: pre_kws.update(pre_edge_kws) pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws) if e0 is None: e0 = group.e0 if edge_step is None: edge_step = group.edge_step if e0 is None or edge_step is None: msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n' ) return # get array indices for rkbg and e0: irbkg, ie0 ie0 = index_of(energy, e0) rgrid = np.pi / (kstep * nfft) if rbkg < 2 * rgrid: rbkg = 2 * rgrid irbkg = int(1.01 + rbkg / rgrid) # save ungridded k (kraw) and grided k (kout) # and ftwin (*k-weighting) for FT in residual enpe = energy[ie0:] - e0 kraw = np.sign(enpe) * np.sqrt(ETOK * abs(enpe)) if kmax is None: kmax = max(kraw) else: kmax = max(0, min(max(kraw), kmax)) kout = kstep * np.arange(int(1.01 + kmax / kstep), dtype='float64') iemax = min(len(energy), 2 + index_of(energy, e0 + kmax * kmax / ETOK)) - 1 # interpolate provided chi(k) onto the kout grid if chi_std is not None and k_std is not None: chi_std = np.interp(kout, k_std, chi_std) # pre-load FT window ftwin = kout**kweight * ftwindow( kout, xmin=kmin, xmax=kmax, window=win, dx=dk) # calc k-value and initial guess for y-values of spline params nspl = max(4, min(128, 2 * int(rbkg * (kmax - kmin) / np.pi) + 1)) spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl) for i in range(nspl): q = kmin + i * (kmax - kmin) / (nspl - 1) ik = index_nearest(kraw, q) i1 = min(len(kraw) - 1, ik + 5) i2 = max(0, ik - 5) spl_k[i] = kraw[ik] spl_e[i] = energy[ik + ie0] spl_y[i] = (2 * mu[ik + ie0] + mu[i1 + ie0] + mu[i2 + ie0]) / 4.0 # get spline represention: knots, coefs, order=3 # coefs will be varied in fit. knots, coefs, order = splrep(spl_k, spl_y) # set fit parameters from initial coefficients params = Group() for i in range(len(coefs)): name = FMT_COEF % i p = Parameter(coefs[i], name=name, vary=i < len(spl_y)) p._getval() setattr(params, name, p) initbkg, initchi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots, coefs, order, kout) # do fit fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4, fcn_kws=dict(ncoefs=len(coefs), chi_std=chi_std, knots=knots, order=order, kraw=kraw[:iemax - ie0 + 1], mu=mu[ie0:iemax + 1], irbkg=irbkg, kout=kout, ftwin=ftwin, kweight=kweight, nfft=nfft, nclamp=nclamp, clamp_lo=clamp_lo, clamp_hi=clamp_hi)) fit.leastsq() # write final results coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))] bkg, chi = spline_eval(kraw[:iemax - ie0 + 1], mu[ie0:iemax + 1], knots, coefs, order, kout) obkg = np.copy(mu) obkg[ie0:ie0 + len(bkg)] = bkg # outputs to group group = set_xafsGroup(group, _larch=_larch) group.bkg = obkg group.chie = (mu - obkg) / edge_step group.k = kout group.chi = chi / edge_step # now fill in 'autobk_details' group params.init_bkg = np.copy(mu) params.init_bkg[ie0:ie0 + len(bkg)] = initbkg params.init_chi = initchi / edge_step params.knots_e = spl_e params.knots_y = np.array([coefs[i] for i in range(nspl)]) params.init_knots_y = spl_y params.nfev = params.fit_details.nfev params.kmin = kmin params.kmax = kmax group.autobk_details = params # uncertainties in mu0 and chi: can be fairly slow. if calc_uncertainties: nchi = len(chi) nmue = iemax - ie0 + 1 redchi = params.chi_reduced covar = params.covar / redchi jac_chi = np.zeros(nchi * nspl).reshape((nspl, nchi)) jac_bkg = np.zeros(nmue * nspl).reshape((nspl, nmue)) cvals, cerrs = [], [] for i in range(len(coefs)): par = getattr(params, FMT_COEF % i) cvals.append(getattr(par, 'value', 0.0)) cdel = getattr(par, 'stderr', 0.0) if cdel is None: cdel = 0.0 cerrs.append(cdel / 2.0) cvals = np.array(cvals) cerrs = np.array(cerrs) # find derivatives by hand! _k = kraw[:nmue] _m = mu[ie0:iemax + 1] for i in range(nspl): cval0 = cvals[i] cvals[i] = cval0 + cerrs[i] bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout) cvals[i] = cval0 - cerrs[i] bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout) cvals[i] = cval0 jac_chi[i] = (chi1 - chi2) / (2 * cerrs[i]) jac_bkg[i] = (bkg1 - bkg2) / (2 * cerrs[i]) dfchi = np.zeros(nchi) dfbkg = np.zeros(nmue) for i in range(nspl): for j in range(nspl): dfchi += jac_chi[i] * jac_chi[j] * covar[i, j] dfbkg += jac_bkg[i] * jac_bkg[j] * covar[i, j] prob = 0.5 * (1.0 + erf(err_sigma / np.sqrt(2.0))) dchi = t.ppf(prob, nchi - nspl) * np.sqrt(dfchi * redchi) dbkg = t.ppf(prob, nmue - nspl) * np.sqrt(dfbkg * redchi) group.delta_chi = dchi group.delta_bkg = 0.0 * mu group.delta_bkg[ie0:ie0 + len(dbkg)] = dbkg
def autobk(energy, mu=None, group=None, rbkg=1, nknots=None, e0=None, edge_step=None, kmin=0, kmax=None, kweight=1, dk=0, win='hanning', k_std=None, chi_std=None, nfft=2048, kstep=0.05, pre_edge_kws=None, nclamp=4, clamp_lo=1, clamp_hi=1, calc_uncertainties=True, err_sigma=1, _larch=None, **kws): """Use Autobk algorithm to remove XAFS background Parameters: ----------- energy: 1-d array of x-ray energies, in eV, or group mu: 1-d array of mu(E) group: output group (and input group for e0 and edge_step). rbkg: distance (in Ang) for chi(R) above which the signal is ignored. Default = 1. e0: edge energy, in eV. If None, it will be determined. edge_step: edge step. If None, it will be determined. pre_edge_kws: keyword arguments to pass to pre_edge() nknots: number of knots in spline. If None, it will be determined. kmin: minimum k value [0] kmax: maximum k value [full data range]. kweight: k weight for FFT. [1] dk: FFT window window parameter. [0] win: FFT window function name. ['hanning'] nfft: array size to use for FFT [2048] kstep: k step size to use for FFT [0.05] k_std: optional k array for standard chi(k). chi_std: optional chi array for standard chi(k). nclamp: number of energy end-points for clamp [2] clamp_lo: weight of low-energy clamp [1] clamp_hi: weight of high-energy clamp [1] calc_uncertaintites: Flag to calculate uncertainties in mu_0(E) and chi(k) [True] err_sigma: sigma level for uncertainties in mu_0(E) and chi(k) [1] Output arrays are written to the provided group. Follows the 'First Argument Group' convention. """ msg = _larch.writer.write if 'kw' in kws: kweight = kws.pop('kw') if len(kws) > 0: msg('Unrecognized a:rguments for autobk():\n') msg(' %s\n' % (', '.join(kws.keys()))) return energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='autobk') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() energy = remove_dups(energy) # if e0 or edge_step are not specified, get them, either from the # passed-in group or from running pre_edge() group = set_xafsGroup(group, _larch=_larch) if edge_step is None and isgroup(group, 'edge_step'): edge_step = group.edge_step if e0 is None and isgroup(group, 'e0'): e0 = group.e0 if e0 is None or edge_step is None: # need to run pre_edge: pre_kws = dict(nnorm=3, nvict=0, pre1=None, pre2=-50., norm1=100., norm2=None) if pre_edge_kws is not None: pre_kws.update(pre_edge_kws) pre_edge(energy, mu, group=group, _larch=_larch, **pre_kws) if e0 is None: e0 = group.e0 if edge_step is None: edge_step = group.edge_step if e0 is None or edge_step is None: msg('autobk() could not determine e0 or edge_step!: trying running pre_edge first\n') return # get array indices for rkbg and e0: irbkg, ie0 ie0 = index_of(energy, e0) rgrid = np.pi/(kstep*nfft) if rbkg < 2*rgrid: rbkg = 2*rgrid irbkg = int(1.01 + rbkg/rgrid) # save ungridded k (kraw) and grided k (kout) # and ftwin (*k-weighting) for FT in residual enpe = energy[ie0:] - e0 kraw = np.sign(enpe)*np.sqrt(ETOK*abs(enpe)) if kmax is None: kmax = max(kraw) else: kmax = max(0, min(max(kraw), kmax)) kout = kstep * np.arange(int(1.01+kmax/kstep), dtype='float64') iemax = min(len(energy), 2+index_of(energy, e0+kmax*kmax/ETOK)) - 1 # interpolate provided chi(k) onto the kout grid if chi_std is not None and k_std is not None: chi_std = np.interp(kout, k_std, chi_std) # pre-load FT window ftwin = kout**kweight * ftwindow(kout, xmin=kmin, xmax=kmax, window=win, dx=dk) # calc k-value and initial guess for y-values of spline params nspl = max(4, min(128, 2*int(rbkg*(kmax-kmin)/np.pi) + 1)) spl_y, spl_k, spl_e = np.zeros(nspl), np.zeros(nspl), np.zeros(nspl) for i in range(nspl): q = kmin + i*(kmax-kmin)/(nspl - 1) ik = index_nearest(kraw, q) i1 = min(len(kraw)-1, ik + 5) i2 = max(0, ik - 5) spl_k[i] = kraw[ik] spl_e[i] = energy[ik+ie0] spl_y[i] = (2*mu[ik+ie0] + mu[i1+ie0] + mu[i2+ie0] ) / 4.0 # get spline represention: knots, coefs, order=3 # coefs will be varied in fit. knots, coefs, order = splrep(spl_k, spl_y) # set fit parameters from initial coefficients params = Group() for i in range(len(coefs)): name = FMT_COEF % i p = Parameter(coefs[i], name=name, vary=i<len(spl_y)) p._getval() setattr(params, name, p) initbkg, initchi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1], knots, coefs, order, kout) # do fit fit = Minimizer(__resid, params, _larch=_larch, toler=1.e-4, fcn_kws = dict(ncoefs=len(coefs), chi_std=chi_std, knots=knots, order=order, kraw=kraw[:iemax-ie0+1], mu=mu[ie0:iemax+1], irbkg=irbkg, kout=kout, ftwin=ftwin, kweight=kweight, nfft=nfft, nclamp=nclamp, clamp_lo=clamp_lo, clamp_hi=clamp_hi)) fit.leastsq() # write final results coefs = [getattr(params, FMT_COEF % i) for i in range(len(coefs))] bkg, chi = spline_eval(kraw[:iemax-ie0+1], mu[ie0:iemax+1], knots, coefs, order, kout) obkg = np.copy(mu) obkg[ie0:ie0+len(bkg)] = bkg # outputs to group group = set_xafsGroup(group, _larch=_larch) group.bkg = obkg group.chie = (mu-obkg)/edge_step group.k = kout group.chi = chi/edge_step # now fill in 'autobk_details' group params.init_bkg = np.copy(mu) params.init_bkg[ie0:ie0+len(bkg)] = initbkg params.init_chi = initchi/edge_step params.knots_e = spl_e params.knots_y = np.array([coefs[i] for i in range(nspl)]) params.init_knots_y = spl_y params.nfev = params.fit_details.nfev params.kmin = kmin params.kmax = kmax group.autobk_details = params # uncertainties in mu0 and chi: can be fairly slow. if calc_uncertainties: nchi = len(chi) nmue = iemax-ie0 + 1 redchi = params.chi_reduced covar = params.covar / redchi jac_chi = np.zeros(nchi*nspl).reshape((nspl, nchi)) jac_bkg = np.zeros(nmue*nspl).reshape((nspl, nmue)) cvals, cerrs = [], [] for i in range(len(coefs)): par = getattr(params, FMT_COEF % i) cvals.append(getattr(par, 'value', 0.0)) cdel = getattr(par, 'stderr', 0.0) if cdel is None: cdel = 0.0 cerrs.append(cdel/2.0) cvals = np.array(cvals) cerrs = np.array(cerrs) # find derivatives by hand! _k = kraw[:nmue] _m = mu[ie0:iemax+1] for i in range(nspl): cval0 = cvals[i] cvals[i] = cval0 + cerrs[i] bkg1, chi1 = spline_eval(_k, _m, knots, cvals, order, kout) cvals[i] = cval0 - cerrs[i] bkg2, chi2 = spline_eval(_k, _m, knots, cvals, order, kout) cvals[i] = cval0 jac_chi[i] = (chi1 - chi2) / (2*cerrs[i]) jac_bkg[i] = (bkg1 - bkg2) / (2*cerrs[i]) dfchi = np.zeros(nchi) dfbkg = np.zeros(nmue) for i in range(nspl): for j in range(nspl): dfchi += jac_chi[i]*jac_chi[j]*covar[i,j] dfbkg += jac_bkg[i]*jac_bkg[j]*covar[i,j] prob = 0.5*(1.0 + erf(err_sigma/np.sqrt(2.0))) dchi = t.ppf(prob, nchi-nspl) * np.sqrt(dfchi*redchi) dbkg = t.ppf(prob, nmue-nspl) * np.sqrt(dfbkg*redchi) group.delta_chi = dchi group.delta_bkg = 0.0*mu group.delta_bkg[ie0:ie0+len(dbkg)] = dbkg