def fluo_corr(energy, mu, formula, elem, group=None, edge='K', anginp=45, angout=45, _larch=None, **pre_kws): """correct over-absorption (self-absorption) for fluorescene XAFS using the FLUO alogrithm of D. Haskel. Arguments --------- energy array of energies mu uncorrected fluorescence mu formula string for sample stoichiometry elem atomic symbol or Z of absorbing element group output group [default None] edge name of edge ('K', 'L3', ...) [default 'K'] anginp input angle in degrees [default 45] angout output angle in degrees [default 45] Additional keywords will be passed to pre_edge(), which will be used to ensure consistent normalization. Returns -------- None, writes `mu_corr` and `norm_corr` (normalized `mu_corr`) to output group. Notes ----- Support First Argument Group convention, requiring group members 'energy' and 'mu' """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='fluo_corr') # generate normalized mu for correction preinp = preedge(energy, mu, **pre_kws) mu_inp = preinp['norm'] anginp = max(1.e-7, np.deg2rad(anginp)) angout = max(1.e-7, np.deg2rad(angout)) # find edge energies and fluorescence line energy e_edge = xray_edge(elem, edge, _larch=_larch)[0] e_fluor = xray_line(elem, edge, _larch=_larch)[0] # calculate mu(E) for fluorescence energy, above, below edge energies = np.array([e_fluor, e_edge-10.0, e_edge+10.0]) muvals = material_mu(formula, energies, density=1, _larch=_larch) mu_fluor = muvals[0] * np.sin(anginp)/np.sin(angout) mu_below = muvals[1] mu_celem = muvals[2] - muvals[1] alpha = (mu_fluor + mu_below)/mu_celem mu_corr = mu_inp*alpha/(alpha + 1 - mu_inp) preout = preedge(energy, mu_corr, **pre_kws) if group is not None: group = set_xafsGroup(group, _larch=_larch) group.mu_corr = mu_corr group.norm_corr = preout['norm']
def mback_norm(energy, mu=None, group=None, z=None, edge='K', e0=None, pre1=None, pre2=-50, norm1=100, norm2=None, nnorm=1, nvict=1, _larch=None): """ simplified version of MBACK to Match mu(E) data for tabulated f''(E) for normalization Arguments: energy, mu: arrays of energy and mu(E) group: output group (and input group for e0) z: Z number of absorber e0: edge energy pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve fit to the scaled f2. Default=1 (linear) Returns: group.norm_poly: normalized mu(E) from pre_edge() group.norm: normalized mu(E) from this method group.mback_mu: tabulated f2 scaled and pre_edge added to match mu(E) group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Chantler: http://dx.doi.org/10.1063/1.555974 """ ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) group.norm_poly = group.norm*1.0 if z is not None: # need to run find_e0: e0_nominal = xray_edge(z, edge)[0] if e0 is None: e0 = getattr(group, 'e0', None) if e0 is None: find_e0(energy, mu, group=group) e0 = group.e0 atsym = None if z is None or z < 2: atsym, edge = guess_edge(group.e0, _larch=_larch) z = atomic_number(atsym) if atsym is None and z is not None: atsym = atomic_symbol(z) if getattr(group, 'pre_edge_details', None) is None: # pre_edge never run preedge(energy, mu, pre1=pre1, pre2=pre2, nvict=nvict, norm1=norm1, norm2=norm2, e0=e0, nnorm=nnorm) mu_pre = mu - group.pre_edge f2 = f2_chantler(z, energy) weights = np.ones(len(energy))*1.0 if norm2 is None: norm2 = max(energy) - e0 if norm2 < 0: norm2 = max(energy) - e0 - norm2 # avoid l2 and higher edges if edge.lower().startswith('l'): if edge.lower() == 'l3': e_l2 = xray_edge(z, 'L2').edge norm2 = min(norm2, e_l2-e0) elif edge.lower() == 'l2': e_l2 = xray_edge(z, 'L1').edge norm2 = min(norm2, e_l1-e0) ipre2 = index_of(energy, e0+pre2) inor1 = index_of(energy, e0+norm1) inor2 = index_of(energy, e0+norm2) + 1 weights[ipre2:] = 0.0 weights[inor1:inor2] = np.linspace(0.1, 1.0, inor2-inor1) params = Parameters() params.add(name='slope', value=0.0, vary=True) params.add(name='offset', value=-f2[0], vary=True) params.add(name='scale', value=f2[-1], vary=True) out = minimize(f2norm, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws = dict(en=energy, mu=mu_pre, f2=f2, weights=weights)) p = out.params.valuesdict() model = (p['offset'] + p['slope']*energy + f2) * p['scale'] group.mback_mu = model + group.pre_edge pre_f2 = preedge(energy, model, nnorm=nnorm, nvict=nvict, e0=e0, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2) step_new = pre_f2['edge_step'] group.edge_step_poly = group.edge_step group.edge_step_mback = step_new group.norm_mback = mu_pre / step_new group.mback_params = Group(e0=e0, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2, nnorm=nnorm, fit_params=p, fit_weights=weights, model=model, f2=f2, pre_f2=pre_f2, atsym=atsym, edge=edge) if (abs(step_new - group.edge_step)/(1.e-13+group.edge_step)) > 0.75: print("Warning: mback edge step failed....") else: group.edge_step = step_new group.norm = group.norm_mback
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order=int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu,), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy)-1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0*(energy<e0) post = 1.0*(energy>e0+float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0*(energy<l2) l2_post = 1.0*(energy>l2+float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1*(energy<e0) weight2 = 1*(energy>e0) weight = np.sqrt(sum(weight1))*weight1 + np.sqrt(sum(weight2))*weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2=f2 if return_f1: group.f1=f1 n = edge if edge.lower().startswith('l'): n = 'L' params = Group(s = Parameter(1, vary=True, _larch=_larch), # scale of data xi = Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc em = Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid e0 = Parameter(e0, vary=False, _larch=_larch), # abs. edge energy ## various arrays need by the objective function en = energy, mu = mu, f2 = group.f2, weight = weight, theta = theta, leexiang = leexiang, _larch = _larch) if fit_erfc: params.a = Parameter(1, vary=True, _larch=_larch) # amplitude of erfc else: params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc for i in range(order): # polynomial coefficients setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch)) fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5) fit.leastsq() eoff = energy - params.e0.value normalization_function = params.a.value*erfc((energy-params.em.value)/params.xi.value) + params.c0.value for i in range(MAXORDER): j = i+1 attr = 'c%d' % j if hasattr(params, attr): normalization_function = normalization_function + getattr(getattr(params, attr), 'value') * eoff**j group.fpp = params.s*mu - normalization_function group.mback_params = params
def mback(energy, mu, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy) - 1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0 * (energy < e0) post = 1.0 * (energy > e0 + float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0 * (energy < l2) l2_post = 1.0 * (energy > l2 + float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1 * (energy < e0) weight2 = 1 * (energy > e0) weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2 = f2 if return_f1: group.f1 = f1 n = edge if edge.lower().startswith('l'): n = 'L' params = Group( s=Parameter(1, vary=True, _larch=_larch), # scale of data xi=Parameter(50, vary=fit_erfc, min=0, _larch=_larch), # width of erfc em=Parameter(xray_line(z, n, _larch=_larch)[0], vary=False, _larch=_larch), # erfc centroid e0=Parameter(e0, vary=False, _larch=_larch), # abs. edge energy ## various arrays need by the objective function en=energy, mu=mu, f2=group.f2, weight=weight, theta=theta, leexiang=leexiang, _larch=_larch) if fit_erfc: params.a = Parameter(1, vary=True, _larch=_larch) # amplitude of erfc else: params.a = Parameter(0, vary=False, _larch=_larch) # amplitude of erfc for i in range(order): # polynomial coefficients setattr(params, 'c%d' % i, Parameter(0, vary=True, _larch=_larch)) fit = Minimizer(match_f2, params, _larch=_larch, toler=1.e-5) fit.leastsq() eoff = energy - params.e0.value normalization_function = params.a.value * erfc( (energy - params.em.value) / params.xi.value) + params.c0.value for i in range(MAXORDER): j = i + 1 attr = 'c%d' % j if hasattr(params, attr): normalization_function = normalization_function + getattr( getattr(params, attr), 'value') * eoff**j group.fpp = params.s * mu - normalization_function group.mback_params = params
def concentration_resid(pars, lineData=None, mca=None, phiPrime=np.pi / 2., phiDblPrime=np.pi / 2., xray_energy=30000., larch=None): cscPhiPrime = 1. / np.sin(phiPrime) cscPhiDblPrime = 1. / np.sin(phiDblPrime) energy = mca.get_energy() * 1000. dE = energy[1] - energy[0] probeSpectrum = np.zeros(len(energy)) ind = index_of(energy, xray_energy) probeSpectrum[ind] = 1.0 C = np.array([pars[elem + '_con'].value for elem in lineData], ndmin=2).T Cal = np.array([pars[elem + '_cal'].value for elem in lineData]) Intensity = np.array([lineData[elem][2] for elem in lineData]) mu = np.zeros((len(lineData) + 1, len(energy))) for i, elem_i in enumerate(lineData): mu[i, :] = mu_elam(elem_i, energy, kind='total', _larch=larch) mu[-1, :] += C[i, 0] * mu[i, :] beta = np.zeros((len(lineData), len(lineData), len(energy))) delta = np.zeros((len(lineData), len(lineData), len(energy))) for i, elem_i in enumerate(lineData): line_en_i = lineData[elem_i][1] edge_i = lineData[elem_i][0][len(elem_i)].title() edge_en_i, _, r_i = xray_edge(elem_i, edge_i, _larch=larch) for j, elem_j in enumerate(lineData): line_en_j = lineData[elem_j][1] edge_j = lineData[elem_j][0][len(elem_j)].title() line_j = lineData[elem_j][0][len(elem_j):].title() edge_en_j, _, r_j = xray_edge(elem_j, edge_j, _larch=larch) yield_j, _, prob_j = fluo_yield(elem_j, edge_j, line_j, np.max(energy), _larch=larch) k_j = prob_j * yield_j * (r_j - 1.) / r_j mu_s_prime = mu[-1, :] * cscPhiPrime mu_s_line_en_j = np.interp(line_en_j, energy, mu[-1, :]) mu_s_dblPrime_line_en_i = np.interp(line_en_i, energy, mu[-1, :]) * cscPhiDblPrime P_ij = np.log(1. + mu_s_prime / mu_s_line_en_j) / mu_s_prime + \ np.log(1. + mu_s_dblPrime_line_en_i / mu_s_line_en_j) / mu_s_dblPrime_line_en_i beta[i,j,:] = mu[j,:] * cscPhiPrime + \ np.interp(line_en_i, energy, mu[j,:]) * cscPhiDblPrime beta[i,j,:] /= mu[i,:] * cscPhiPrime + \ np.interp(line_en_i, energy, mu[i,:]) * cscPhiDblPrime beta[i, j, :] -= 1. delta[i, j, :] = np.where(energy >= edge_en_j, 0.5, 0.0) if line_en_j <= edge_en_i: delta[i, j, :] *= 0. delta[i, j, :] *= k_j delta[i, j, :] *= mu[j, :] * cscPhiPrime delta[i, j, :] *= np.interp(line_en_j, energy, mu[i, :]) delta[i,j,:] /= (mu[-1,:] * cscPhiPrime) + \ (np.interp(line_en_i, energy, mu[-1,:]) * cscPhiDblPrime) delta[i, j, :] *= P_ij W = np.zeros((1, len(lineData), len(energy))) for i, elem in enumerate(lineData): line_en_i = lineData[elem_i][1] mu_i_star = mu[i,:] * cscPhiPrime +\ np.interp(line_en_i, energy, mu[i,:]) * cscPhiDblPrime W[0, i, :] = mu[i, :] * probeSpectrum * dE W[0, i, :] /= mu_i_star * (1 + np.sum(C * beta[i, :, :], axis=0)) alpha = np.sum(W * beta, axis=2) / np.sum(W, axis=2) epsilon = np.sum(W * delta, axis=2) / np.sum(W, axis=2) num = 1 + np.sum(C.T * epsilon, axis=1) den = 1 + np.sum(C.T * alpha, axis=1) C = np.reshape(C, (len(C))) print(pars['cu_con'].value, pars['zn_con'].value, pars['mn_con'].value) return Intensity - Cal * C * num / den
def mback(energy, mu=None, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy) - 1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0 * (energy < e0) post = 1.0 * (energy > e0 + float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0 * (energy < l2) l2_post = 1.0 * (energy > l2 + float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1 * (energy < e0) weight2 = 1 * (energy > e0) weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2 = f2 if return_f1: group.f1 = f1 em = xray_line(z, edge.upper(), _larch=_larch)[0] # erfc centroid params = Parameters() params.add(name='s', value=1, vary=True) # scale of data params.add(name='xi', value=50, vary=fit_erfc, min=0) # width of erfc params.add(name='a', value=0, vary=False) # amplitude of erfc if fit_erfc: params['a'].value = 1 params['a'].vary = True for i in range(order): # polynomial coefficients params.add(name='c%d' % i, value=0, vary=True) out = minimize(match_f2, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws=dict(en=energy, mu=mu, f2=f2, e0=e0, em=em, order=order, weight=weight, theta=theta, leexiang=leexiang)) opars = out.params.valuesdict() eoff = energy - e0 norm_function = opars['a'] * erfc( (energy - em) / opars['xi']) + opars['c0'] for i in range(order): j = i + 1 attr = 'c%d' % j if attr in opars: norm_function += opars[attr] * eoff**j group.e0 = e0 group.fpp = opars['s'] * mu - norm_function group.mback_params = opars tmp = Group(energy=energy, mu=group.f2 - norm_function, e0=0) # calculate edge step from f2 + norm_function: should be very smooth pre_f2 = preedge(energy, group.f2 + norm_function, e0=e0, nnorm=2, nvict=0) group.edge_step = pre_f2['edge_step'] / opars['s'] pre_fpp = preedge(energy, mu, e0=e0, nnorm=2, nvict=0) group.norm = (mu - pre_fpp['pre_edge']) / group.edge_step
def fluo_corr(energy, mu, formula, elem, group=None, edge='K', anginp=45, angout=45, _larch=None, **pre_kws): """correct over-absorption (self-absorption) for fluorescene XAFS using the FLUO alogrithm of D. Haskel. Arguments --------- energy array of energies mu uncorrected fluorescence mu formula string for sample stoichiometry elem atomic symbol or Z of absorbing element group output group [default None] edge name of edge ('K', 'L3', ...) [default 'K'] anginp input angle in degrees [default 45] angout output angle in degrees [default 45] Additional keywords will be passed to pre_edge(), which will be used to ensure consistent normalization. Returns -------- None, writes `mu_corr` and `norm_corr` (normalized `mu_corr`) to output group. Notes ----- Support First Argument Group convention, requiring group members 'energy' and 'mu' """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='fluo_corr') # generate normalized mu for correction preinp = preedge(energy, mu, **pre_kws) mu_inp = preinp['norm'] anginp = max(1.e-7, np.deg2rad(anginp)) angout = max(1.e-7, np.deg2rad(angout)) # find edge energies and fluorescence line energy e_edge = xray_edge(elem, edge, _larch=_larch)[0] e_fluor = xray_line(elem, edge, _larch=_larch)[0] # calculate mu(E) for fluorescence energy, above, below edge energies = np.array([e_fluor, e_edge - 10.0, e_edge + 10.0]) muvals = material_mu(formula, energies, density=1, _larch=_larch) mu_fluor = muvals[0] * np.sin(anginp) / np.sin(angout) mu_below = muvals[1] mu_celem = muvals[2] - muvals[1] alpha = (mu_fluor + mu_below) / mu_celem mu_corr = mu_inp * alpha / (alpha + 1 - mu_inp) preout = preedge(energy, mu_corr, **pre_kws) if group is not None: group = set_xafsGroup(group, _larch=_larch) group.mu_corr = mu_corr group.norm_corr = preout['norm']