def calculate_results(x, y, upperx, lowerx, power): """Taking the x and y coordinates and the upper and lower x energies to fit""" arr_lower_lim = find_array_equivalent(x, lowerx) arr_upper_lim = find_array_equivalent(x, upperx) if arr_upper_lim <= arr_lower_lim: x_cut = x[arr_upper_lim:arr_lower_lim] y_cut = y[arr_upper_lim:arr_lower_lim] elif arr_lower_lim < arr_upper_lim: x_cut = x[arr_lower_lim:arr_upper_lim] y_cut = y[arr_lower_lim:arr_upper_lim] a_poly = np.polyfit(x_cut, y_cut, power) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in x] a_bg = np.array(ia_bg) a_p_norm = y - a_bg x_return = x return x_return, a_p_norm, a_bg
def calculate_results1(x, y, lowerx, upperx, power, offset): """Taking the x and y coordinates and the upper and lower x energies to fit""" if y.ndim == 1: arr_lower_lim = int(find_array_equivalent(x, lowerx)) arr_upper_lim = int(find_array_equivalent(x, upperx)) if offset == "off": if arr_lower_lim <= arr_upper_lim: x_cut = np.concatenate((x[:arr_lower_lim], x[arr_upper_lim:]), axis=0) y_cut = np.concatenate((y[:arr_lower_lim], y[arr_upper_lim:]), axis=0) elif arr_lower_lim > arr_upper_lim: x_cut = np.concatenate((x[:arr_upper_lim], x[arr_lower_lim:]), axis=0) y_cut = np.concatenate((y[:arr_upper_lim], y[arr_lower_lim:]), axis=0) else: offset = y[arr_lower_lim] - y[arr_upper_lim] if arr_lower_lim <= arr_upper_lim: x_cut = np.concatenate((x[:arr_lower_lim], x[arr_upper_lim:]), axis=0) y_cut = np.concatenate( (y[:arr_lower_lim], y[arr_upper_lim:] + offset), axis=0) elif arr_lower_lim > arr_upper_lim: x_cut = np.concatenate((x[:arr_upper_lim], x[arr_lower_lim:]), axis=0) y_cut = np.concatenate( (y[:arr_upper_lim], y[arr_lower_lim:] + offset), axis=0) a_poly = np.polyfit(x_cut, y_cut, power) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in x] a_bg = np.array(ia_bg) a_p_norm = y - a_bg elif absorption.ndim == 2: print("Deal with at a higher level to avoid clutter") return x, a_p_norm, a_bg
def Normalise_spectra(x, y, p1, p2): """Normalises to 1""" if p1 and p2: normalised_spectra = y / (y[find_array_equivalent(x, p2)] - y[find_array_equivalent(x, p1)]) else: normalised_spectra = y return x, normalised_spectra
def Transpose_spectra(energy, absorption, args): """Transpose_spectra(energy, absorption, args) requires args.x_value_for_transpose and args.action""" loc_energy = np.array(energy) loc_absorption = np.array(absorption) if len(np.array(loc_absorption).shape) > 1: n_files = len(np.array(loc_absorption)) else: n_files = 1 transposed_spectra = [] for i in range(n_files): if args.action == "on": transposed_spectra_i = ( loc_absorption[i] - loc_absorption[i][find_array_equivalent( loc_energy[i], args.x_value_for_transpose)]) else: transposed_spectra_i = loc_absorption[i] transposed_spectra.append(transposed_spectra_i) return transposed_spectra
def calculate_pre_edge_fit(x, y, pre_feature_min, pre_feature_max, post_feature_min, post_feature_max): if x.ndim == 1: # find array point pre_feature_min_arr = int(find_array_equivalent(x, pre_feature_min)) pre_feature_max_arr = int(find_array_equivalent(x, pre_feature_max)) post_feature_min_arr = int(find_array_equivalent(x, post_feature_min)) post_feature_max_arr = int(find_array_equivalent(x, post_feature_max)) # concatenate data for fitting # fit_resolution = len(energy[:edge]) x_cut = np.concatenate(( x[pre_feature_min_arr:pre_feature_max_arr], x[post_feature_min_arr:post_feature_max_arr], )) y_cut = np.concatenate(( y[pre_feature_min_arr:pre_feature_max_arr], y[post_feature_min_arr:post_feature_max_arr], )) # cut data to edge y_out = y[pre_feature_min_arr:post_feature_max_arr] x_out = x[pre_feature_min_arr:post_feature_max_arr] initial_guess = [1.4, 6003, 2, 1] popt, pcov = scipy.optimize.curve_fit(gaussian_func, x_cut, y_cut, p0=initial_guess) # append guassian model to fit fit = [] for item in range(len(x_out)): fit.append(gaussian_func(x_out, *popt)[item]) fit = np.array(fit) y_out = y_out - fit return x_out, y_out, fit elif x.ndim == 2: print("2d")
def evaluate(self): x = np.array(self.inputarray.read()["data"][0]) y = np.array(self.inputarray.read()["data"][1]) if self.lookup.default: if x.ndim and y.ndim == 1: value = [y[find_array_equivalent(x, self.lookup.default)]] elif x.ndim and y.ndim == 2: value = [] for i in range(x.shape[0]): value_i = y[i][find_array_equivalent( x[i], self.lookup.default)] value.append(value_i) else: value = None self.value = value self.lines = [self.lookup.default]
def evaluate(self): x = np.array(self.input.read()["data"][0]) y = np.array(self.input.read()["data"][1]) if x.ndim and y.ndim == 1: start = find_array_equivalent(x, self.start.default) mid = find_array_equivalent(x, self.mid.default) end = find_array_equivalent(x, self.end.default) a_l3 = integrate.cumtrapz( y[start:mid], x[start:mid], initial=0, ) a_l2 = integrate.cumtrapz( y[mid:end], x[mid:end], initial=0, ) b_ratio = [a_l3[-1] / (a_l2[-1] + a_l3[-1])] elif x.ndim and y.ndim == 2: bratio_list = [] for i in range(x.shape[0]): start = find_array_equivalent(x[i], self.start.default) mid = find_array_equivalent(x[i], self.mid.default) end = find_array_equivalent(x[i], self.end.default) a_l3 = integrate.cumtrapz( y[i][start:mid], x[i][start:mid], initial=0, ) a_l2 = integrate.cumtrapz( y[i][mid:end], x[i][mid:end], initial=0, ) b_ratio = a_l3[-1] / (a_l2[-1] + a_l3[-1]) bratio_list.append(b_ratio) b_ratio = list(bratio_list) self.value_calc = b_ratio self.lines = [self.start.default, self.mid.default, self.end.default]
def background1(energy, absorption, args): loc_energy = np.array(energy) loc_absorption = np.array(absorption) if len(np.array(loc_absorption).shape) > 1: n_files = len(np.array(loc_absorption)) else: n_files = 1 a_bg = [] a_p_norm = [] for i in range(n_files): # if we have inital values if args.p_end and args.p_start: # Calc absorption level difference between end of pre-edge and start of post-edge if args.apply_offset == "off": a_offset = 0 a_offset1 = 0 else: # move post edge to preedge for fitting if called. post_edge_intensity = loc_absorption[i][find_array_equivalent( loc_energy[i], args.p_end)] pre_edge_intensity = loc_absorption[i][find_array_equivalent( loc_energy[i], args.p_start)] a_offset = post_edge_intensity - pre_edge_intensity # polynomial if (args.fit == "linear (fits for all e < 'Background_start' and e > ' Background_end') " ): # Polyfit and generate background series from it e_cut = np.concatenate(( loc_energy[i] [:find_array_equivalent(loc_energy[i], args.p_start)], loc_energy[i] [find_array_equivalent(loc_energy[i], args.p_end):], )) a_cut = np.concatenate(( loc_absorption[i] [:find_array_equivalent(loc_energy[i], args.p_start)], (loc_absorption[i] [find_array_equivalent(loc_energy[i], args.p_end):] - a_offset), )) a_poly = np.polyfit(e_cut, a_cut, 1) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in loc_energy[i]] if args.fit == "polynomial (fits for energies inside limits) ": # Polyfit and generate background series from it # e_cut = loc_energy[i] [ find_array_equivalent(loc_energy[i], args.p_start) : find_array_equivalent(loc_energy[i], args.p_end) ] # a_cut = loc_absorption[i] [ find_array_equivalent(loc_energy[i], args.p_start) : find_array_equivalent(loc_energy[i], args.p_end) ] if find_array_equivalent(loc_energy[i], args.p_end) <= find_array_equivalent( loc_energy[i], args.p_start): e_cut = loc_energy[i][find_array_equivalent( loc_energy[i], args.p_end ):find_array_equivalent(loc_energy[i], args.p_start)] a_cut = loc_absorption[i][find_array_equivalent( loc_energy[i], args.p_end ):find_array_equivalent(loc_energy[i], args.p_start)] elif find_array_equivalent( loc_energy[i], args.p_start) < find_array_equivalent( loc_energy[i], args.p_end): e_cut = loc_energy[i][find_array_equivalent( loc_energy[i], args.p_start ):find_array_equivalent(loc_energy[i], args.p_end)] a_cut = loc_absorption[i][find_array_equivalent( loc_energy[i], args.p_start ):find_array_equivalent(loc_energy[i], args.p_end)] a_poly = np.polyfit(e_cut, a_cut, args.power) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in loc_energy[i]] if (args.fit == "polynomial (fits for all e < 'Background_start' and e > ' Background_end')" ): # Polyfit and generate background series from it e_cut = np.concatenate(( loc_energy[i] [:find_array_equivalent(loc_energy[i], args.p_start)], loc_energy[i] [find_array_equivalent(loc_energy[i], args.p_end):], )) a_cut = np.concatenate(( loc_absorption[i] [:find_array_equivalent(loc_energy[i], args.p_start)], (loc_absorption[i] [find_array_equivalent(loc_energy[i], args.p_end):] - a_offset), )) a_poly = np.polyfit(e_cut, a_cut, args.power) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in loc_energy[i]] # exponential decay elif (args.fit == "exp decay (fits for all e < 'Background_start' and e > ' Background_end')" ): a_offset1 = 0 # ppp1-ooo1 e_cut = (np.concatenate(( loc_energy[i] [:find_array_equivalent(loc_energy[i], args.p_start)], loc_energy[i] [find_array_equivalent(loc_energy[i], args.p_end):], )) - 750) a_cut = np.concatenate(( loc_absorption[i] [:find_array_equivalent(loc_energy[i], args.p_start)], loc_absorption[i] [find_array_equivalent(loc_energy[i], args.p_end):] - a_offset1, )) transposed_energy = np.asarray(loc_energy[i]) - 750 # fit values, and mean def func(x, a, b, c): return a * np.exp(-b * x) + c y = func(transposed_energy, 2.3, 1.3, 1) popt, pcov = curve_fit(func, e_cut, a_cut) ia_bg = func(transposed_energy, *popt) # fit only considered 2 points at edge_start_index and edge_stop_index elif args.fit == "2 point linear (straight line between 2 points)": e_cut = [ loc_energy[i][find_array_equivalent( loc_energy[i], args.p_start)], loc_energy[i][find_array_equivalent( loc_energy[i], args.p_end)], ] a_cut = [ loc_absorption[i][find_array_equivalent( loc_energy[i], args.p_start)], loc_absorption[i][find_array_equivalent( loc_energy[i], args.p_end)], ] a_poly = np.polyfit(e_cut, a_cut, 1) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in loc_energy[i]] elif args.fit == "No fit": ia_bg = np.zeros(len(loc_absorption[i])) else: ia_bg = np.zeros(len(loc_absorption[i])) ia_bg = np.array(ia_bg) ia_p_norm = np.array(loc_absorption[i]) - np.array(ia_bg) a_p_norm.append(ia_p_norm) a_bg.append(ia_bg) return a_bg, a_p_norm
def step2(energy, absorption, other_spectra, args): loc_energy = energy loc_absorption = absorption loc_other_spectra = other_spectra if len(np.array(loc_absorption).shape) > 1: n_files = len(np.array(loc_absorption)) else: n_files = 1 step = [] subtracted_step = [] for i in range(n_files): if args.step_stop and args.step_start: if args.apply_step == "on": # L3_peak, L2_peak = Identify_peak_position(args, loc_energy, loc_absorption) peaks = find_peaks_scipy1(loc_energy[i], loc_absorption[i]) L3_peak_pos = peaks[0] L2_peak_pos = peaks[1] # peak_fwhm_calculation(args, L3_peak, L2_peak) # find array point step_stop_energy = find_array_equivalent( loc_energy[i], args.step_stop) step_start_energy = find_array_equivalent( loc_energy[i], args.step_start) step_intermediate_energy = find_array_equivalent( loc_energy[i], args.step_intermediate) # dummy variables fit_type = 1 l2_peak = L2_peak_pos l3_peak = L3_peak_pos # el3_cut = len(energy[int(l3_peak-l3_fwhm):int(l3_peak+l3_fwhm)]) # el2_cut = len(energy[int(l2_peak-l2_fwhm):int(l2_peak+l2_fwhm)]) el3_cut = len(loc_energy[i][int(l3_peak - 20):int(l3_peak + 20)]) el2_cut = len(loc_energy[i][int(l2_peak - 20):int(l2_peak + 20)]) # create a linspace of equal length. xl3 = np.linspace(0, 1, el3_cut) xl2 = np.linspace(0, 1, el2_cut) xl3_arctan = np.linspace(-10, 10, el3_cut) xl2_arctan = np.linspace(-10, 10, el2_cut) # make a step function using the linspace created. # voight or a tangent function if args.fit_function == "Voight": y = smoothstep(xl3, N=4) u = smoothstep(xl2, N=4) elif args.fit_function == "Arctan": y = np.arctan(xl3_arctan) u = np.arctan(xl2_arctan) y = y - min(y) u = u - min(u) y = y / max(y) u = u / max(u) if args.fit_type == "Alpha": """ 1-- Identifies the average intensity post step_stop and the intensity at step_start. 2-- Average the parallel and antiparallel spectra finding difference between average post step_stop and min post (L2 peak, pre step_stop) -- Using this we take difference in intensitys of 1 and subtract 2 to get step height. -- The step start is then shifted to intensity at step_start. """ # absorption_difference = ((min(loc_absorption[i][l2_peak:find_array_equivalent(loc_energy[i], args.step_stop)])) - (loc_absorption[i][find_array_equivalent(loc_energy[i], args.step_start)])) / 3 # average both north and south spectra together. average_spectra = (loc_absorption[i] + loc_other_spectra[i]) / 2 # find minima post L2_peak to step_stop minima_post_l2_step_stop = min( average_spectra[l2_peak:find_array_equivalent( loc_energy[i], args.step_stop)]) # (mean intensity post step stop - step_start intensity) - (average spectra post step stop - minima post l2 peak) absorption_difference = ( np.mean(loc_absorption[i][step_stop_energy:-1]) - (loc_absorption[i][step_start_energy]) - (np.mean(average_spectra[step_stop_energy:-1]) - minima_post_l2_step_stop)) / 3 # transpose the step to intensity at step start energy. l3_transpose = loc_absorption[i][find_array_equivalent( loc_energy[i], args.step_start)] elif args.fit_type == "Beta": """ --Simply identifies the intensity at step_stop and step_start and takes difference to find step height --step_start is then shifted so that it starts at step_start. --The step is always centered at the peak position with height 2/3 at L3 peak and 1/3 at L2 peak. """ # calculate difference in step stop and step start intensity absorption_difference = ( loc_absorption[i][step_stop_energy] - loc_absorption[i][step_start_energy]) / 3 # transpose the step to intensity at step start energy. l3_transpose = loc_absorption[i][find_array_equivalent( loc_energy[i], args.step_start)] else: absorption_difference = ( (min(average[l2_peak:edge_stop_index]) + (np.mean(loc_absorption[i][height:] - np.mean(average[height:])))) - (((average[edge_start_index])))) / 3 l3_transpose = loc_absorption[i][edge_start_index] y = y * absorption_difference * 2 u = u * absorption_difference # find max gradient in steps l3_p = np.diff(y, n=1) oo = find_peaks_scipy(l3_p, l3_p) l2_p = np.diff(u, n=1) oooo = find_peaks_scipy(l2_p, l2_p) # create zeros up to L3 step pre = zerolistmaker(int(l3_peak - oo[0]) - 1) # concatenate pre_plus_l3 = np.concatenate([pre, y]) # zeros of length than transpose to step. mid = (zerolistmaker(l2_peak - oooo[0] - len(pre_plus_l3)) + pre_plus_l3[-1]) # concatenate pre_l3_mid = np.concatenate([pre_plus_l3, mid, (u + mid[-1])]) # post region post = (zerolistmaker(len(loc_energy[i]) - len(pre_l3_mid)) + pre_l3_mid[-1]) # concatenate total = np.concatenate([pre_l3_mid, post]) total = total + l3_transpose elif args.apply_step == "off": # subtracted_step = loc_absorption[i] # stepfunction = np.zeros(len(loc_energy[i])) total = np.zeros(len(loc_energy[i])) # loc_energy = energy.read()[0] # loc_absorption = absorption.read()[0] else: total = np.zeros(len(loc_energy[i])) post_step = loc_absorption[i] - total step.append(total) subtracted_step.append(post_step) return step, subtracted_step
def step3(x, y, start, mid, end, function, fittype): if np.array(x).ndim == 1 and np.array(y).ndim == 1: # L3_peak, L2_peak = Identify_peak_position(args, loc_energy, loc_absorption) peaks = find_peaks_scipy1(x, y) L3_peak_pos = peaks[0] L2_peak_pos = peaks[1] # peak_fwhm_calculation(args, L3_peak, L2_peak) # find array point step_stop_arr = find_array_equivalent(x, end) step_start_arr = find_array_equivalent(x, start) step_intermediate_arr = find_array_equivalent(x, mid) # el3_cut = len(energy[int(l3_peak-l3_fwhm):int(l3_peak+l3_fwhm)]) # el2_cut = len(energy[int(l2_peak-l2_fwhm):int(l2_peak+l2_fwhm)]) el3_cut = len(x[int(L3_peak_pos - 20):int(L3_peak_pos + 20)]) el2_cut = len(x[int(L2_peak_pos - 20):int(L2_peak_pos + 20)]) # create a linspace of equal length. xl3 = np.linspace(0, 1, el3_cut) xl2 = np.linspace(0, 1, el2_cut) xl3_arctan = np.linspace(-10, 10, el3_cut) xl2_arctan = np.linspace(-10, 10, el2_cut) # make a step function using the linspace created. # voight or a tangent function if function == "Voight": yp = smoothstep(xl3, N=4) u = smoothstep(xl2, N=4) elif function == "Arctan": yp = np.arctan(xl3_arctan) u = np.arctan(xl2_arctan) yp = yp - min(yp) u = u - min(u) yp = yp / max(yp) u = u / max(u) if fittype == "Beta" or "Alpha": """ --Simply identifies the intensity at step_stop and step_start and takes difference to find step height --step_start is then shifted so that it starts at step_start. --The step is always centered at the peak position with height 2/3 at L3 peak and 1/3 at L2 peak. """ # calculate difference in step stop and step start intensity absorption_difference = (y[step_stop_arr] - y[step_start_arr]) / 3 # transpose the step to intensity at step start energy. l3_transpose = y[step_start_arr] yp = yp * absorption_difference * 2 u = u * absorption_difference # find max gradient in steps l3_p = np.diff(yp, n=1) oo = find_peaks_scipy(l3_p, l3_p) l2_p = np.diff(u, n=1) oooo = find_peaks_scipy(l2_p, l2_p) # create zeros up to L3 step pre = zerolistmaker(int(L3_peak_pos - oo[0]) - 1) # concatenate pre_plus_l3 = np.concatenate([pre, yp]) # zeros of length than transpose to step. mid = zerolistmaker(L2_peak_pos - oooo[0] - len(pre_plus_l3)) + pre_plus_l3[-1] # concatenate pre_l3_mid = np.concatenate([pre_plus_l3, mid, (u + mid[-1])]) # post region post = zerolistmaker(len(x) - len(pre_l3_mid)) + pre_l3_mid[-1] # concatenate total = np.concatenate([pre_l3_mid, post]) total = total + l3_transpose post_step = y - total return x, post_step, total
def single_step_xanes(energy: np.ndarray, absorption: np.ndarray, args): if energy.ndim == 1: # find array point step_stop_energy = find_array_equivalent(energy, args.step_stop) step_start_energy = find_array_equivalent(energy, args.step_start) edge = find_array_equivalent(energy, args.edge) # concatenate data for fitting fit_resolution = len(energy[:edge]) x = np.concatenate([ energy[:step_start_energy], energy[step_stop_energy:edge], ]) y = np.concatenate([ energy[:step_start_energy], energy[step_stop_energy:edge], ]) # cut data to edge output_ydata = absorption[:edge] x_all = energy[:edge] initial_guess = [1.4, 6003, 2, 6000] popt, pcov = scipy.optimize.curve_fit(gaussian_func, x, y, p0=initial_guess) output_xdata = x_all # append guassian model to fit fit = [] for item in range(len(output_xdata)): fit.append(gaussian_func(output_xdata, *popt)[item]) elif energy.ndim == 2: print("2d") if len(np.array(loc_absorption).shape) > 1: n_files = len(np.array(loc_absorption)) else: n_files = 1 fit_calc = [] subtracted_fit_calc = [] xdata_calc = [] for i in range(n_files): if args.step_stop and args.step_start and args.edge: if args.apply_step == "on": # find array point step_stop_energy = find_array_equivalent( loc_energy[i], args.step_stop) step_start_energy = find_array_equivalent( loc_energy[i], args.step_start) edge = find_array_equivalent(loc_energy[i], args.edge) # concatenate data for fitting fit_resolution = len(loc_energy[i][:edge]) x = np.concatenate([ loc_energy[i][:step_start_energy], loc_energy[i][step_stop_energy:edge], ]) y = np.concatenate([ loc_absorption[i][:step_start_energy], loc_absorption[i][step_stop_energy:edge], ]) # cut data to edge output_ydata = loc_absorption[i][:edge] x_all = loc_energy[i][:edge] initial_guess = [1.4, 6003, 2, 6000] popt, pcov = scipy.optimize.curve_fit(gaussian_func, x, y, p0=initial_guess) output_xdata = x_all # append guassian model to fit fit = [] for item in range(len(output_xdata)): fit.append(gaussian_func(output_xdata, *popt)[item]) elif args.apply_step == "off": # subtracted_step = loc_absorption[i] # stepfunction = np.zeros(len(loc_energy[i])) output_xdata = loc_energy[i] output_ydata = loc_absorption[i] fit = np.zeros(len(loc_energy[i])) else: output_xdata = loc_energy[i] output_ydata = loc_absorption[i] fit = np.zeros(len(loc_energy[i])) subtracted_fit = output_ydata - fit fit_calc.append(fit) subtracted_fit_calc.append(subtracted_fit) xdata_calc.append(output_xdata) return xdata_calc, fit_calc, subtracted_fit_calc
def evaluate(self): local_arguments = args_xas(self) ax = np.array(self.a_p_norm.read()["data"][0]) bx = np.array(self.a_a_norm.read()["data"][0]) ay = np.array(self.a_p_norm.read()["data"][1]) by = np.array(self.a_a_norm.read()["data"][1]) if ax.ndim and bx.ndim == 1: add = by + ay integral_start = find_array_equivalent( ax, local_arguments.background_start) integral_end = find_array_equivalent( ax, local_arguments.background_stop) xas_bg_i, xas_i = background_xmcd( energy=np.array(ax), xmcd=add, args=local_arguments, ) xas_integral_i = integrate.cumtrapz( xas_i[integral_start:integral_end], ax[integral_start:integral_end], ) # self.xas_integral = np.append(xas_integral[0], xas_integral[0][-1]) xas_integral1 = np.concatenate([ zerolistmaker(len(ax[0:integral_start] + 1)), xas_integral_i, (zerolistmaker(len(ax[integral_end:]) + 1) + xas_integral_i[-1]), ]) xas_integral = np.array(xas_integral1) xas_bg = np.array(xas_bg_i) xas = np.array(xas_i) elif ax.ndim and bx.ndim == 2: xas_bg = [] xas = [] xas_integral = [] for i in range(ax.shape[0]): add = (np.array(self.a_p_norm.read()["data"][1])[i] + np.array(self.a_a_norm.read()["data"][1])[i]) integral_start = find_array_equivalent( self.a_a_norm.read()["data"][0][i], local_arguments.background_start) integral_end = find_array_equivalent( self.a_a_norm.read()["data"][0][i], local_arguments.background_stop) xas_bg_i, xas_i = background_xmcd( energy=np.array(self.a_a_norm.read()["data"][0])[i], xmcd=add, args=local_arguments, ) xas_integral_i = integrate.cumtrapz( xas_i[integral_start:integral_end], self.a_a_norm.read()["data"][0][i] [integral_start:integral_end], ) # self.xas_integral = np.append(xas_integral[0], xas_integral[0][-1]) xas_integral1 = np.concatenate([ zerolistmaker( len(self.a_a_norm.read()["data"][0][0] [0:integral_start] + 1)), xas_integral_i, (zerolistmaker( len(self.a_a_norm.read()["data"][0][0][integral_end:]) + 1) + xas_integral_i[-1]), ]) xas_integral_i = list(xas_integral1) xas_integral.append(xas_integral_i) xas.append(xas_i) xas_bg.append(xas_bg_i) xas_bg = np.array(xas_bg) xas = np.array(xas) xas_integral = np.array(xas_integral) self.xas_calc = xas self.xas_bg_calc = xas_bg self.xas_integral_calc = xas_integral self.lines = [ self.background_start.default, self.background_stop.default ]
def background_xmcd(energy, xmcd, args): loc_energy = np.array(energy) loc_absorption = np.array(xmcd) if len(np.array(loc_absorption).shape) > 1: n_files = len(np.array(loc_absorption)) else: n_files = 1 a_bg = [] a_p_norm = [] if args.fit == "linear": # Polyfit and generate background series from it e_cut = np.concatenate(( loc_energy[:find_array_equivalent(loc_energy, args.background_start )], loc_energy[find_array_equivalent(loc_energy, args.background_stop ):], )) a_cut = np.concatenate(( loc_absorption[:find_array_equivalent(loc_energy, args. background_start)], (loc_absorption[find_array_equivalent(loc_energy, args. background_stop):]), )) a_poly = np.polyfit(e_cut, a_cut, 1) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in loc_energy] if args.fit == "polynomial": # Polyfit and generate background series from it e_cut = np.concatenate(( loc_energy[:find_array_equivalent(loc_energy, args.background_start )], loc_energy[find_array_equivalent(loc_energy, args.background_stop ):], )) a_cut = np.concatenate(( loc_absorption[:find_array_equivalent(loc_energy, args. background_start)], (loc_absorption[find_array_equivalent(loc_energy, args. background_stop):]), )) a_poly = np.polyfit(e_cut, a_cut, args.power) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in loc_energy] # exponential decay elif args.fit == "exp decay": a_offset1 = 0 # ppp1-ooo1 e_cut = (np.concatenate(( loc_energy[:find_array_equivalent(loc_energy, args.background_start )], loc_energy[find_array_equivalent(loc_energy, args.background_stop ):], )) - 750) a_cut = np.concatenate(( loc_absorption[:find_array_equivalent(loc_energy, args. background_start)], loc_absorption[find_array_equivalent(loc_energy, args. background_stop):], )) transposed_energy = np.asarray(loc_energy) - 750 # fit values, and mean def func(x, a, b, c): return a * np.exp(-b * x) + c y = func(transposed_energy, 2.3, 1.3, 1) popt, pcov = curve_fit(func, e_cut, a_cut) ia_bg = func(transposed_energy, *popt) # fit only considered 2 points at edge_start_index and edge_stop_index elif args.fit == "2 point linear": e_cut = [ loc_energy[find_array_equivalent(loc_energy, args.background_start)], loc_energy[find_array_equivalent(loc_energy, args.background_stop)], ] a_cut = [ loc_absorption[find_array_equivalent(loc_energy, args.background_start)], loc_absorption[find_array_equivalent(loc_energy, args.background_stop)], ] a_poly = np.polyfit(e_cut, a_cut, 1) a_polygen = np.poly1d(a_poly) ia_bg = [a_polygen(e) for e in loc_energy] elif args.fit == "Do Nothing!!": ia_bg = np.zeros(len(loc_energy)) # for i in range(n_files): # fit only considered 2 points at edge_start_index and edge_stop_index # e_cut=[loc_energy[find_array_equivalent(loc_energy, args.background_start)],loc_energy[find_array_equivalent(loc_energy, args.background_stop)]] # a_cut=[loc_xmcd[find_array_equivalent(loc_energy, args.background_start)],loc_xmcd[find_array_equivalent(loc_energy, args.background_stop)]] # a_poly=np.polyfit(e_cut, a_cut, 1) # a_polygen = np.poly1d(a_poly) # ia_bg = [a_polygen(e) for e in loc_energy] background = np.array(ia_bg) subtracted_background = np.array(loc_absorption) - np.array(ia_bg) # a_p_norm.append(subtracted_background) # a_bg.append(background) return background, subtracted_background