def _define(self, name, shape='gaussian', sigma_params=None): self.shape = shape if name is None: return try: elem, line = [w.title() for w in name.split()] except: return if line == 'Ka': line = 'Ka1' dat = xray_line(elem, line, _larch=self._larch) if dat is not None: ecenter = dat[0] if self.center is None: self.center = Parameter(name='center', value=ecenter, vary=False, _larch=self._larch) if sigma_params is not None: if len(sigma_params) == 2 and self.sigma is None: if isParameter(sigma_params[0]): sigma_params = (sigma_params[0].name, sigma_params[1].name) expr = "%s + %s * %f" % (sigma_params[0], sigma_params[1], ecenter) self.sigma = Parameter(name='sigma', expr=expr, _larch=self._larch)
def _define(self, name, shape='gaussian', sigma_params=None): self.shape = shape if name is None: return try: elem, line = [w.title() for w in name.split()] except: return if line == 'Ka': line='Ka1' dat = xray_line(elem, line, _larch=self._larch) if dat is not None: ecenter = dat[0] if self.center is None: self.center = Parameter(name='center', value=ecenter, vary=False, _larch=self._larch) if sigma_params is not None: if len(sigma_params) == 2 and self.sigma is None: if isParameter(sigma_params[0]): sigma_params = (sigma_params[0].name, sigma_params[1].name) expr = "%s + %s * %f" % (sigma_params[0], sigma_params[1], ecenter) self.sigma = Parameter(name='sigma', expr=expr, _larch=self._larch)
def fluo_corr(energy, mu, formula, elem, group=None, edge='K', anginp=45, angout=45, _larch=None, **pre_kws): """correct over-absorption (self-absorption) for fluorescene XAFS using the FLUO alogrithm of D. Haskel. Arguments --------- energy array of energies mu uncorrected fluorescence mu formula string for sample stoichiometry elem atomic symbol or Z of absorbing element group output group [default None] edge name of edge ('K', 'L3', ...) [default 'K'] anginp input angle in degrees [default 45] angout output angle in degrees [default 45] Additional keywords will be passed to pre_edge(), which will be used to ensure consistent normalization. Returns -------- None, writes `mu_corr` and `norm_corr` (normalized `mu_corr`) to output group. Notes ----- Support First Argument Group convention, requiring group members 'energy' and 'mu' """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='fluo_corr') # generate normalized mu for correction preinp = preedge(energy, mu, **pre_kws) mu_inp = preinp['norm'] anginp = max(1.e-7, np.deg2rad(anginp)) angout = max(1.e-7, np.deg2rad(angout)) # find edge energies and fluorescence line energy e_edge = xray_edge(elem, edge, _larch=_larch)[0] e_fluor = xray_line(elem, edge, _larch=_larch)[0] # calculate mu(E) for fluorescence energy, above, below edge energies = np.array([e_fluor, e_edge-10.0, e_edge+10.0]) muvals = material_mu(formula, energies, density=1, _larch=_larch) mu_fluor = muvals[0] * np.sin(anginp)/np.sin(angout) mu_below = muvals[1] mu_celem = muvals[2] - muvals[1] alpha = (mu_fluor + mu_below)/mu_celem mu_corr = mu_inp*alpha/(alpha + 1 - mu_inp) preout = preedge(energy, mu_corr, **pre_kws) if group is not None: group = set_xafsGroup(group, _larch=_larch) group.mu_corr = mu_corr group.norm_corr = preout['norm']
def find_xray_line(z, edge): """ Finds most intense X-ray emission line energy for a given element and edge. """ intensity = 0 line = '' for key, value in xray_lines(z).items() : if value.initial_level == edge.upper(): if value.intensity > intensity: intensity = value.intensity line = key return xray_line(z, line[:-1])
def xrf_calib_init_roi(mca, roiname): """initial calibration step for MCA: find energy locations for one ROI """ if not isLarchMCAGroup(mca): print( 'Not a valid MCA') return energy = 1.0*mca.energy chans = 1.0*np.arange(len(energy)) counts = mca.counts bgr = getattr(mca, 'bgr', None) if bgr is not None: counts = counts - bgr if not hasattr(mca, 'init_calib'): mca.init_calib = OrderedDict() roi = None for xroi in mca.rois: if xroi.name == roiname: roi = xroi break if roi is None: return words = roiname.split() elem = words[0].title() family = 'Ka' if len(words) > 1: family = words[1].title() if family == 'Lb': family = 'Lb1' eknown = xray_line(elem, family)[0]/1000.0 llim = max(0, roi.left - roi.bgr_width) hlim = min(len(chans)-1, roi.right + roi.bgr_width) segcounts = counts[llim:hlim] maxcounts = max(segcounts) ccen = llim + np.where(segcounts==maxcounts)[0] ecen = ccen * mca.slope + mca.offset bkgcounts = counts[llim] + counts[hlim] if maxcounts < 2*bkgcounts: mca.init_calib[roiname] = (eknown, ecen, 0.0, ccen, None) else: model = GaussianModel() + ConstantModel() params = model.make_params(amplitude=maxcounts, sigma=(chans[hlim]-chans[llim])/2.0, center=ccen-llim, c=0.00) params['center'].min = -10 params['center'].max = hlim - llim + 10 params['c'].min = -10 out = model.fit(counts[llim:hlim], params, x=chans[llim:hlim]) ccen = llim + out.params['center'].value ecen = ccen * mca.slope + mca.offset fwhm = out.params['fwhm'].value * mca.slope mca.init_calib[roiname] = (eknown, ecen, fwhm, ccen, out)
def xrf_calib_fitrois(mca, _larch=None): """initial calibration step for MCA: find energy locations for all ROIs """ if not isLarchMCAGroup(mca): print('Not a valid MCA') return energy = 1.0 * mca.energy chans = 1.0 * np.arange(len(energy)) counts = mca.counts bgr = getattr(mca, 'bgr', None) if bgr is not None: counts = counts - bgr calib = OrderedDict() for roi in mca.rois: words = roi.name.split() elem = words[0].title() family = 'ka' if len(words) > 1: family = words[1] try: eknown = xray_line(elem, family, _larch=_larch)[0] / 1000.0 except: continue llim = max(0, roi.left - roi.bgr_width) hlim = min(len(chans) - 1, roi.right + roi.bgr_width) fit = fit_peak(chans[llim:hlim], counts[llim:hlim], 'Gaussian', background='constant', _larch=_larch) ccen = fit.params.center.value ecen = ccen * mca.slope + mca.offset fwhm = 2.354820 * fit.params.sigma.value * mca.slope calib[roi.name] = (eknown, ecen, fwhm, ccen, fit) mca.init_calib = calib
def xrf_calib_fitrois(mca, _larch=None): """initial calibration step for MCA: find energy locations for all ROIs """ if not isLarchMCAGroup(mca): print( 'Not a valid MCA') return energy = 1.0*mca.energy chans = 1.0*np.arange(len(energy)) counts = mca.counts bgr = getattr(mca, 'bgr', None) if bgr is not None: counts = counts - bgr calib = OrderedDict() for roi in mca.rois: words = roi.name.split() elem = words[0].title() family = 'ka' if len(words) > 1: family = words[1] try: eknown = xray_line(elem, family, _larch=_larch)[0]/1000.0 except: continue llim = max(0, roi.left - roi.bgr_width) hlim = min(len(chans)-1, roi.right + roi.bgr_width) fit = fit_peak(chans[llim:hlim], counts[llim:hlim], 'Gaussian', background='constant', _larch=_larch) ccen = fit.params['center'].value ecen = ccen * mca.slope + mca.offset fwhm = 2.354820 * fit.params['sigma'].value * mca.slope calib[roi.name] = (eknown, ecen, fwhm, ccen, fit) mca.init_calib = calib
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order=int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy)-1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0*(energy<e0) post = 1.0*(energy>e0+float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0*(energy<l2) l2_post = 1.0*(energy>l2+float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1*(energy<e0) weight2 = 1*(energy>e0) weight = np.sqrt(sum(weight1))*weight1 + np.sqrt(sum(weight2))*weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2=f2 if return_f1: group.f1=f1 n = edge if edge.lower().startswith('l'): n = 'L' params = Group(s = Parameter(1, vary=True, _larch=_larch), # scale of data xi = Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc em = Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid e0 = Parameter(e0, vary=False, _larch=_larch), # abs. edge energy ## various arrays need by the objective function en = energy, mu = mu, f2 = group.f2, weight = weight, theta = theta, leexiang = leexiang, _larch = _larch) if fit_erfc: params.a = Parameter(1, vary=True, _larch=_larch) # amplitude of erfc else: params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc for i in range(order): # polynomial coefficients setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch)) fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5) fit.leastsq() eoff = energy - params.e0.value normalization_function = params.a.value*erfc((energy-params.em.value)/params.xi.value) + params.c0.value for i in range(MAXORDER): j = i+1 attr = 'c%d' % j if hasattr(params, attr): normalization_function = normalization_function + getattr(getattr(params, attr), 'value') * eoff**j group.fpp = params.s*mu - normalization_function group.mback_params = params
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy) - 1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0 * (energy < e0) post = 1.0 * (energy > e0 + float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0 * (energy < l2) l2_post = 1.0 * (energy > l2 + float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1 * (energy < e0) weight2 = 1 * (energy > e0) weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2 = f2 if return_f1: group.f1 = f1 n = edge if edge.lower().startswith('l'): n = 'L' params = Group( s=Parameter(1, vary=True, _larch=_larch), # scale of data xi=Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc em=Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid e0=Parameter(e0, vary=False, _larch=_larch), # abs. edge energy ## various arrays need by the objective function en=energy, mu=mu, f2=group.f2, weight=weight, theta=theta, leexiang=leexiang, _larch=_larch) if fit_erfc: params.a = Parameter(1, vary=True, _larch=_larch) # amplitude of erfc else: params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc for i in range(order): # polynomial coefficients setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch)) fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5) fit.leastsq() eoff = energy - params.e0.value normalization_function = params.a.value * erfc( (energy - params.em.value) / params.xi.value) + params.c0.value for i in range(MAXORDER): j = i + 1 attr = 'c%d' % j if hasattr(params, attr): normalization_function = normalization_function + getattr( getattr(params, attr), 'value') * eoff**j group.fpp = params.s * mu - normalization_function group.mback_params = params
def mback(energy, mu=None, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy) - 1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0 * (energy < e0) post = 1.0 * (energy > e0 + float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0 * (energy < l2) l2_post = 1.0 * (energy > l2 + float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1 * (energy < e0) weight2 = 1 * (energy > e0) weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2 = f2 if return_f1: group.f1 = f1 em = xray_line(z, edge.upper(), _larch=_larch)[0] # erfc centroid params = Parameters() params.add(name='s', value=1, vary=True) # scale of data params.add(name='xi', value=50, vary=fit_erfc, min=0) # width of erfc params.add(name='a', value=0, vary=False) # amplitude of erfc if fit_erfc: params['a'].value = 1 params['a'].vary = True for i in range(order): # polynomial coefficients params.add(name='c%d' % i, value=0, vary=True) out = minimize(match_f2, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws=dict(en=energy, mu=mu, f2=f2, e0=e0, em=em, order=order, weight=weight, theta=theta, leexiang=leexiang)) opars = out.params.valuesdict() eoff = energy - e0 norm_function = opars['a'] * erfc( (energy - em) / opars['xi']) + opars['c0'] for i in range(order): j = i + 1 attr = 'c%d' % j if attr in opars: norm_function += opars[attr] * eoff**j group.e0 = e0 group.fpp = opars['s'] * mu - norm_function group.mback_params = opars tmp = Group(energy=energy, mu=group.f2 - norm_function, e0=0) # calculate edge step from f2 + norm_function: should be very smooth pre_f2 = preedge(energy, group.f2 + norm_function, e0=e0, nnorm=2, nvict=0) group.edge_step = pre_f2['edge_step'] / opars['s'] pre_fpp = preedge(energy, mu, e0=e0, nnorm=2, nvict=0) group.norm = (mu - pre_fpp['pre_edge']) / group.edge_step
def fluo_corr(energy, mu, formula, elem, group=None, edge='K', anginp=45, angout=45, _larch=None, **pre_kws): """correct over-absorption (self-absorption) for fluorescene XAFS using the FLUO alogrithm of D. Haskel. Arguments --------- energy array of energies mu uncorrected fluorescence mu formula string for sample stoichiometry elem atomic symbol or Z of absorbing element group output group [default None] edge name of edge ('K', 'L3', ...) [default 'K'] anginp input angle in degrees [default 45] angout output angle in degrees [default 45] Additional keywords will be passed to pre_edge(), which will be used to ensure consistent normalization. Returns -------- None, writes `mu_corr` and `norm_corr` (normalized `mu_corr`) to output group. Notes ----- Support First Argument Group convention, requiring group members 'energy' and 'mu' """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='fluo_corr') # generate normalized mu for correction preinp = preedge(energy, mu, **pre_kws) mu_inp = preinp['norm'] anginp = max(1.e-7, np.deg2rad(anginp)) angout = max(1.e-7, np.deg2rad(angout)) # find edge energies and fluorescence line energy e_edge = xray_edge(elem, edge, _larch=_larch)[0] e_fluor = xray_line(elem, edge, _larch=_larch)[0] # calculate mu(E) for fluorescence energy, above, below edge energies = np.array([e_fluor, e_edge - 10.0, e_edge + 10.0]) muvals = material_mu(formula, energies, density=1, _larch=_larch) mu_fluor = muvals[0] * np.sin(anginp) / np.sin(angout) mu_below = muvals[1] mu_celem = muvals[2] - muvals[1] alpha = (mu_fluor + mu_below) / mu_celem mu_corr = mu_inp * alpha / (alpha + 1 - mu_inp) preout = preedge(energy, mu_corr, **pre_kws) if group is not None: group = set_xafsGroup(group, _larch=_larch) group.mu_corr = mu_corr group.norm_corr = preout['norm']