def MTF(Y, X, obtain_mtf_at, path): """ Fit a polynomial to the MTF curve Fails for scattered data - useless """ poly_mod = PolynomialModel(6) pars = poly_mod.guess(Y, x=X) model = poly_mod result = model.fit(Y, pars, x=X) c0 = result.best_values['c0'] c1 = result.best_values['c1'] c2 = result.best_values['c2'] c3 = result.best_values['c3'] c4 = result.best_values['c4'] c5 = result.best_values['c5'] params = [c0, c1, c2, c3, c4, c5] # Produce a table with values of contrast vs resolution if path != False: f = open(path + 'contrast_vs_distance.txt', 'w') for contrast in range(0, 20): resolution = polynomial(contrast, params) value = [contrast, resolution] f.write(repr(value)+ '\n') f.close() resolution = polynomial(obtain_mtf_at, params) return result.best_fit, resolution
def fit_polynomial(self, max_degree: int = 7) -> List[float]: """ Fit the highest degree polynomial possible with the available energy data. (0 degree is actually 1st degree with 0 shift) Parameters: max_degree (int): Max degree of polynomial to try and fit. (Max possible: 7) Returns: (List[Float]): The polynomial coefficients from highest degree to the constant term """ energies = self.refined_energies if self.refined_energies else self.energies if not energies: raise ValueError("No known energies found for strip {}".format( self.number)) degree = min( len(energies) - 1, max_degree) if max_degree is not None else len(energies) - 1 model = PolynomialModel(max(degree, 1)) x, y = [*zip(*energies)] pars = model.guess(y, x=x) if degree == 0: pars["c0"] = Parameter("c0", value=0, vary=False) out = model.fit(y, pars, x=x) self.polynomial_coefficients = list( reversed([p[1].value for p in out.params.items()])) return self.polynomial_coefficients
def MTF(Y, X): """ Fit a polynomial to the MTF curve """ poly_mod = PolynomialModel(6) pars = poly_mod.guess(Y, x=X) model = poly_mod result = model.fit(Y, pars, x=X) # write error report print result.fit_report() c0 = result.best_values['c0'] c1 = result.best_values['c1'] c2 = result.best_values['c2'] c3 = result.best_values['c3'] c4 = result.best_values['c4'] c5 = result.best_values['c5'] params = [c0, c1, c2, c3, c4, c5] resolution = polynomial(10., params) return result.best_fit, resolution
def readFits(inFile): #import the relevant modules table = pyfits.open(name) data = table[1].data flux = data.field('flux') wavelength = 10**(data.field('loglam')) ivar = data.field('ivar') weights = ivar redshift_data = table[2].data.field('Z') #y_av = toolkit.movingaverage(flux, 50) #coefs = poly.polyfit(wavelength, flux, 8, w=ivar) #ffit = poly.polyval(wavelength, coefs) mod2 = PolynomialModel(6) pars = mod2.guess(flux, x=wavelength) out = mod2.fit(flux, pars, x=wavelength) #print(out.fit_report(min_correl=0.25)) #plt.plot(wavelength, out.best_fit, 'r-', linewidth=2) #Choose to plot the results #plt.plot(wavelength, flux, label='flux') #plt.plot(wavelength, y_av, label='boxcar') #plt.plot(wavelength, ffit , label='fit') #legend() #plt.show() #plt.close('all') return { 'flux': flux, 'wavelength': wavelength, 'z': redshift_data, 'continuum': out.best_fit, 'error': weights }
def FitBaseline(self, x, y, xregion, show=False, degree=1): """ Fit of the baseline by using the `PolynomalModel() <https://lmfit.github.io/lmfit-py/builtin_models.html#lmfit.models.PolynomialModel>`_ from lmfit. Parameters ---------- x : numpy.array x-values of spectrum which should be background-corrected. y : numpy.array y-values of spectrum which should be background-corrected. show : boolean, default: False Decides whether the a window with the fitted baseline is opened or not. degree : int, default: 1 Degree of the polynomial that describes the background. Returns ------- baseline : numpy.array Baseline of the input spectrum. """ relevant = self.CreateMask(x, xregion) # polynomial to model the background background = PolynomialModel(degree=degree) pars = background.guess(y[relevant], x=x[relevant]) fitresult = background.fit(y[relevant], pars, x=x[relevant]) return fitresult
def fitPolynomial(x,y,order): model = PolynomialModel(order) pars = model.guess(y, x=x) out = model.fit(y, pars, x=x) # out.params.update({'order':order}) print out.fit_report() return out
def fit_background(): ''' Fit the background ''' bg_mod = PolynomialModel(vPolynomial.get(), prefix='bg_') # Background if vDelimiter.get() == 'space': delimiter = None elif vDelimiter.get() == 'comma': delimiter = ',' data = np.loadtxt(filename, skiprows=vSkipRows.get(), delimiter=delimiter) original_data = np.vstack((data[:, vColumnX.get()-1], data[:, vColumnY.get()-1])).T selectionRange = (vStartLeftX.get(), vStartRightX.get(), vEndLeftX.get(), vEndRightX.get()) x_bg, y_bg = select_bg_data(original_data, selectionRange) pars = bg_mod.guess(y_bg, x=x_bg) mod = bg_mod init = mod.eval(pars, x=x_bg) out = mod.fit(y_bg, pars, x=x_bg) axe.cla() axe.plot(data[:, vColumnX.get()-1], data[:, vColumnY.get()-1], 'b.') axe.plot(x_bg, out.eval(), 'r-') # Background plotting # axe.xlim([original_data[0, 0], original_data[1, -1]]) figWindowNew, canvasNew, toolbarNew = createFigWindow(fig) canvasNew.show() toolbarNew.update() # canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) figWindowNew.wm_attributes('-topmost') # Activate the plotting window
def MTF(Y, X): """ Fit a polynomial to the MTF curve """ pow_mod = PowerLawModel(prefix='pow_') lin_mod = LinearModel(prefix="lin_") const_mod = Model(sigmoid) poly_mod = PolynomialModel(3) #X = list(reversed(X)) pars = poly_mod.guess(Y, x=X) + lin_mod.guess(Y, x=X) model = poly_mod + lin_mod result = model.fit(Y, pars, x=X) # write error report print result.fit_report() c0 = result.best_values['c0'] c1 = result.best_values['c1'] c2 = result.best_values['c2'] slop = result.best_values['lin_slope'] inter = result.best_values['lin_intercept'] # c3 = result.best_values['c3'] # c4 = result.best_values['c4'] # c5 = result.best_values['c5'] # c6 = result.best_values['c6'] # A = result.best_values["amplitude"] # k = result.best_values["exponent"] limit = polynomial(c0,c1,c2,inter,slop,10.) # limit = A*9**k return result.best_fit, limit
def fit_data_bg(x, y, peak_pos, peak_type='LO', width=None, bg_ord=0): """ Builds a lmfit model of peaks in listed by index in `peak_pos` Parameters ---------- peak_type : string (default='lorentizian') Peaks can be of the following types: - 'LO' : symmetric lorentzian - 'GA' : symmetric gaussain - 'VO' : symmetric pseudo voigt max_width : int (default = total points/10) max width (in data points) that peak fitted can be bg_ord: int order of the background polynomial 0: constant, 1: linear, ... Returns ------- out: fitted model """ # need to define peak width finding if width is None: width = guess_peak_width(x, y) # start with polynomial background model = PolynomialModel(bg_ord, prefix='bg_') pars = model.make_params() if peak_type == 'LO': peak_function = lorentzian elif peak_type == 'GA': peak_function = gaussian elif peak_type == 'VO': peak_function = voigt # add peak type for all peaks for i, peak in enumerate(peak_pos): temp_model = Model(peak_function, prefix='p%s_' % i) pars.update(temp_model.make_params()) model += temp_model # set initial background as flat line at zeros for i in range(bg_ord + 1): pars['bg_c%i' % i].set(0) # give values for other peaks, keeping width and height positive for i, peak in enumerate(peak_pos): pars['p%s_x0' % i].set(x[peak]) pars['p%s_fwhm' % i].set(width, min=0) pars['p%s_amp' % i].set(y[peak], min=0) out = model.fit(y, pars, x=x) return out
def int_data(): ''' Integrate data in given area ''' bg_mod = PolynomialModel(vPolynomial.get(), prefix='bg_') # Background if vDelimiter.get() == 'space': delimiter = None elif vDelimiter.get() == 'comma': delimiter = ',' data = np.loadtxt(filename, skiprows=vSkipRows.get(), delimiter=delimiter) original_data = np.vstack((data[:, vColumnX.get()-1], data[:, vColumnY.get()-1])).T selectionRange = (vStartLeftX.get(), vStartRightX.get(), vEndLeftX.get(), vEndRightX.get()) x_bg, y_bg = select_bg_data(original_data, selectionRange) pars = bg_mod.guess(y_bg, x=x_bg) mod = bg_mod init = mod.eval(pars, x=x_bg) out = mod.fit(y_bg, pars, x=x_bg) x, y = data[:, vColumnX.get()-1], data[:, vColumnY.get()-1] comp = out.eval_components(x=x) out_param = out.params y_bg_fit = bg_mod.eval(params=out_param, x=x) y_bg_remove = y - y_bg_fit startLine, endLine = None, None for i in xrange(np.size(x)): if x[i] >= vIntLeftX.get() and startLine is None: startLine = i if startLine != None and endLine is None and x[i] >= vIntRightX.get(): endLine = i x_int = x[startLine:endLine] y_int = y_bg_remove[startLine:endLine] y_bg_fit_ = y_bg_fit[startLine:endLine] y_orig = y[startLine:endLine] integration = np.trapz(y_int, x_int) # plot axe.cla() axe.plot(x, y, 'b.') axe.plot(x_bg, out.best_fit, 'r-') # Background plotting # axe.xlim([x[0], x[-1]]) axe.fill_between(x_int, y_orig, y_bg_fit_, facecolor='green') figWindowNew, canvasNew, toolbarNew = createFigWindow(fig) canvasNew.show() toolbarNew.update() # canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) figWindowNew.wm_attributes('-topmost') # Activate the plotting window # report reportWindow = Tk.Tk() report = 'Integration area = ' + repr(integration) reportText = Tk.Text(reportWindow) reportText.insert(Tk.INSERT, report) reportText.pack() reportWindow.wm_attributes('-topmost')
def fit_experimental_data_gauss(exp_x, exp_y, expected_peak_pos, deg_of_bck_poly=5, maxfev=25000): num_of_peaks = len(expected_peak_pos) mod = PolynomialModel(deg_of_bck_poly, prefix='poly_') for c in range(num_of_peaks): mod = mod + PseudoVoigtModel(prefix='p{}_'.format(c)) params = mod.make_params() center = 0 sigma = 0 amplitude = 0 fraction = 0 for param in params: if 'center' in param: params[param].set(value=expected_peak_pos[center]) params[param].set(min=expected_peak_pos[center] - 0.5) params[param].set(max=expected_peak_pos[center] + 0.5) center += 1 if 'poly' in param: if param == 'poly_c0': params[param].set(value=50) params[param].set(min=-100) params[param].set(max=100) continue if param == 'poly_c1': params[param].set(value=-1) params[param].set(min=-100) params[param].set(max=100) continue params[param].set(value=0) ## params[param].set(min = 3e-1) ## params[param].set(max = 3e-1) if 'sigma' in param: params[param].set(value=0.5) params[param].set(min=0.0001) params[param].set(max=0.8) sigma += 1 if 'amplitude' in param: params[param].set(value=5.5) params[param].set(min=0.0001) amplitude += 1 if 'fraction' in param: params[param].set(value=0.0) params[param].set(min=0.000) params[param].set(max=0.000001) fraction += 1 result = mod.fit(np.asarray(exp_y), params, x=np.asarray(exp_x), fit_kws={'maxfev': maxfev}) print(result.fit_report()) return result
def fit_data_bg(x, y, peak_pos, peak_type="LO", width=None, bg_ord=0): """ Builds a lmfit model of peaks in listed by index in `peak_pos` Parameters ---------- peak_type : string (default='lorentizian') Peaks can be of the following types: - 'LO' : symmetric lorentzian - 'GA' : symmetric gaussain - 'VO' : symmetric pseudo voigt max_width : int (default = total points/10) max width (in data points) that peak fitted can be bg_ord: int order of the background polynomial 0: constant, 1: linear, ... Returns ------- out: fitted model """ # need to define peak width finding if width is None: width = guess_peak_width(x, y) # start with polynomial background model = PolynomialModel(bg_ord, prefix="bg_") pars = model.make_params() if peak_type == "LO": peak_function = lorentzian elif peak_type == "GA": peak_function = gaussian elif peak_type == "VO": peak_function = voigt # add peak type for all peaks for i, peak in enumerate(peak_pos): temp_model = Model(peak_function, prefix="p%s_" % i) pars.update(temp_model.make_params()) model += temp_model # set initial background as flat line at zeros for i in range(bg_ord + 1): pars["bg_c%i" % i].set(0) # give values for other peaks, keeping width and height positive for i, peak in enumerate(peak_pos): pars["p%s_x0" % i].set(x[peak]) pars["p%s_fwhm" % i].set(width, min=0) pars["p%s_amp" % i].set(y[peak], min=0) out = model.fit(y, pars, x=x) return out
def fit_single_line(xin, yin, line_c=8.04, use_weights=True, conf=True): ''' PURPOSE: Fit a model of Gaussian line on a polynomial(2) continuum INPUTS: xin - array, the energy channel (in keV) yin - array, the counts in channel line_c - float, is the initial energy of the line in same units as xin conf - bool, if confidence interval for the parameters is to be calculated OUTPUTS: a tuple of the full fit output class and confidence intervals (if asked) NOTES: * The Gaussian sigma of the line is only allowed within a certain range: 80 to 250 eV * The fit is performed with LMFIT ''' # poly_mod = PolynomialModel(2, prefix='poly_') pars = poly_mod.guess(yin, x=xin) # gauss1 = GaussianModel(prefix="g1_") pars.update(gauss1.make_params()) pars['g1_center'].set(line_c, min=line_c - 0.25, max=line_c + 0.25) pars['g1_sigma'].set(0.1, min=0.08, max=0.250) # mod = poly_mod + gauss1 # if (use_weights): yerr = np.sqrt(yin) w = np.divide(1.0, yerr, where=yerr != 0) try: out = mod.fit(yin, pars, x=xin, weights=w, nan_policy='omit') except: return None else: try: out = mod.fit(yin, pars, x=xin, nan_policy='omit') except: return None if (conf): # # confidence intervals on parameters, if needed # ci_out = out.conf_interval() else: ci_out = None # return (out, ci_out)
def fitPoly(self, flux, wavelength): #Create the polynomial model from lmFit (from lmfit import PolynomialModel) mod = PolynomialModel(6) #Have an initial guess at the model parameters pars = mod.guess(flux, x=wavelength) #Use the parameters for the full model fit out = mod.fit(flux, pars, x=wavelength) #The output of the model is the fitted continuum continuum = out.best_fit #return this value return continuum
def bg_sub(raman_spectra, plot_each=False, reduce_region=[3090, 4000], cut_region=[3150, 3722], order=2, to_run='all'): bg = PolynomialModel(order) flag = False if type(raman_spectra) == AHR.RamanSpectrum: raman_spectra = {'a': raman_spectra} flag = True BGsub = {} for key, spec in raman_spectra.items(): if to_run != 'all': if key not in to_run: continue spec_red = spec.reduce_wn_region(reduce_region) spec_cut = spec_red.cut_wn_region(cut_region) spec_cut_x = spec_cut.wn spec_cut_y = np.squeeze(spec_cut.spec_data.T) bg_params = bg.make_params(c0=0, c1=0, c2=0, c3=0, c4=0, c5=5, c6=0, c7=0) bg_fit = bg.fit(spec_cut_y, x=spec_cut_x, params=bg_params) spec_red_bg_sub = AHR.RamanSpectrum( spec_red.wn, np.squeeze(spec_red.spec_data.T) - bg_fit.eval(x=spec_red.wn)) BGsub[key] = spec_red_bg_sub if plot_each: fig, ax = plt.subplots() ax.plot(spec_red.wn, spec_red.spec_data.T) ax.scatter(spec_cut.wn, spec_cut.spec_data.T, s=3, color='orange') ax.plot(spec_red.wn, bg_fit.eval(x=spec_red.wn)) ax.set_title(key) if flag: return BGsub['a'] else: return BGsub
def fit_cu_region(xin, yin, use_weights=True): ''' PURPOSE: Fit 4 lines in the Cu-Ka region, Gaussian models for Cu-Ka (8.04 keV), Ni-Ka (7.47 keV), Zn-Ka (8.63 keV), Cu-Kb (8.9 keV) and a polynomial(2) continuum INPUTS: xin is the energy channel (in keV) yin is the counts OUTPUTS: a tuple of the full fit output class and the results line in ascii. NOTES: the Gaussian sigma of the line is only allowed within a certain range: 80 to 250 eV ''' i1max = np.argmax(yin) y1max = yin[i1max] # poly_mod = PolynomialModel(1, prefix='poly_') pars = poly_mod.guess(yin, x=xin) # lines = [7.47, 8.04, 8.63, 8.90] # keV prfx = ['nika', 'cuka', 'znka', 'cukb'] mod = poly_mod for i, j in zip(lines, prfx): gauss = GaussianModel(prefix=f'{j}_') pars.update(gauss.make_params()) pars[f'{j}_center'].set(i, min=i - 0.25, max=i + 0.25) pars[f'{j}_sigma'].set(0.1, min=0.06, max=0.250) mod += gauss if (use_weights): yerr = np.sqrt(yin) w = np.divide(1.0, yerr, where=yerr != 0) try: out = mod.fit(yin, pars, x=xin, weights=w, nan_policy='omit') except: return None else: try: out = mod.fit(yin, pars, x=xin, nan_policy='omit') except: return None # return out
def fit_mn_line(xin, yin, line_c=5.8988, use_weights=True): ''' PURPOSE: Fit the Mn Ka line (5.8988 keV), the model is a polynomial(2) + a Gaussian line. INPUTS: xin is the energy channel (in keV) yin is the counts line_c is the initial energy of the line (in keV) OUTPUTS: a tuple of the full fit output class and the results line in ascii. NOTES: the Gaussian sigma of the line is only allowed within a certain range: 80 to 250 eV ''' i1max = np.argmax(yin) y1max = yin[i1max] # poly_mod = PolynomialModel(1, prefix='poly_') pars = poly_mod.guess(yin, x=xin) # pname = 'mnka' gauss1 = GaussianModel(prefix=f"{pname}_") pars.update(gauss1.make_params()) pars[f'{pname}_center'].set(line_c, min=line_c - 0.25, max=line_c + 0.25) pars[f'{pname}_sigma'].set(0.1, min=0.08, max=0.250) #pars[f'{pname}_amplitude'].set(y1max,min=1.0,max=y1max) # mod = poly_mod + gauss1 #init = mod.eval(pars, x=x) #out = mod.fit(yin, pars, x=xin, weights=1.0/np.sqrt(yin)) if (use_weights): yerr = np.sqrt(yin) w = np.divide(1.0, yerr, where=yerr != 0) try: out = mod.fit(yin, pars, x=xin, weights=w, nan_policy='omit') except: return None else: try: out = mod.fit(yin, pars, x=xin, nan_policy='omit') except: return None return out
def fit_qvalue(self, x_data, y_data, zoom_factor=1): """ Least square fit of Lorentzian and polynomial background to mode picture. :param x_data: Iterable containing x-data of mode picture in points. :param y_data: Iterable containing y-data of mode picture in a.u.. :param zoom_factor: Zoom factor (scaling factor of x-axis). :returns: (q_value, fit_result) where `fit_result` is a """ # get first guess parameters for Lorentzian fit peak_center, fwhm, peak_area = self._get_fit_starting_points( x_data, y_data) # set up fit models for polynomial background and Lorentzian dip pmod = PolynomialModel(degree=7) lmodel = Model(lorentz_peak) mode_picture_model = pmod - lmodel # isolate back ground area from resonance dip idx1 = sum((x_data < (peak_center - 3 * fwhm))) idx2 = sum((x_data > (peak_center + 3 * fwhm))) x_bg = np.concatenate((x_data[0:idx1], x_data[-idx2:-1])) y_bg = np.concatenate((y_data[0:idx1], y_data[-idx2:-1])) # get first guess parameters for background pars = pmod.guess(y_bg, x=x_bg) # add fit parameters for Lorentzian resonance dip pars.add_many( ("x0", peak_center, True, None, None, None, None), ("w", fwhm, True, None, None, None, None), ("a", peak_area, True, None, None, None, None), ) # perform full fit fit_result = mode_picture_model.fit(y_data, pars, x=x_data) # calculate Q-value from resonance width delta_freq = fit_result.best_values["w"] * 1e-3 / (2 * zoom_factor) q_value = round(self.freq0 / delta_freq, 1) return q_value, fit_result
def prepare_for_fitting(self, poly_order, maxwidth, centerrange): """ :param x_center: numpy array of initial x values at picked centers :param y_center: numpy array of initial y values at picked centers :param fwhm: single float number for initial fwhm value """ self.set_baseline(poly_order) baseline_mod = PolynomialModel(poly_order, prefix='b_') mod = baseline_mod pars = baseline_mod.make_params() peakinfo = {} for i in range(poly_order + 1): prefix = "b_c{0:d}".format(i) pars[prefix].set(value=self.baseline_in_queue[i]['value'], vary=self.baseline_in_queue[i]['vary']) i = 0 for peak in self.peaks_in_queue: prefix = "p{0:d}_".format(i) peak_mod = PseudoVoigtModel(prefix=prefix, ) pars.update(peak_mod.make_params()) pars[prefix + 'center'].set(value=peak['center'], min=peak['center'] - centerrange, max=peak['center'] + centerrange, vary=peak['center_vary']) pars[prefix + 'sigma'].set(value=peak['sigma'], min=0.0, vary=peak['sigma_vary'], max=maxwidth) pars[prefix + 'amplitude'].set(value=peak['amplitude'], min=0, vary=peak['amplitude_vary']) pars[prefix + 'fraction'].set(value=peak['fraction'], min=0., max=1., vary=peak['fraction_vary']) peakinfo[prefix + 'phasename'] = peak['phasename'] peakinfo[prefix + 'h'] = peak['h'] peakinfo[prefix + 'k'] = peak['k'] peakinfo[prefix + 'l'] = peak['l'] mod += peak_mod i += 1 self.parameters = pars self.peakinfo = peakinfo self.fit_model = mod
def fitTwoGaussians(x,y): background = PolynomialModel(2) pars = background.make_params() peak1 = GaussianModel(prefix='p1_') pars.update( peak1.make_params()) peak2 = GaussianModel(prefix='p2_') pars.update( peak2.make_params()) # Guess some parameters from data to help the fitting span = max(x)-min(x) c1Guess = (y[-1]-y[0])/(x[-1]-x[0]) c0Guess = y[0]-c1Guess*x[0] bgGuess = background.func(x=x,c0=c0Guess,c1=c1Guess,c2=0.) signalGuess=min(y-bgGuess) sigmaGuess = span/30. amplitudeGuess = signalGuess*(sigmaGuess*np.sqrt(2.0*np.pi)) # Fit variables initialization # pars.add('splitting',0.0001,max=span) pars['c2'].set(0.,min=-0.000001,max=0.001) pars['c1'].set(c1Guess) pars['c0'].set(c0Guess) pars['p1_center'].set(min(x)+span*0.35,min=min(x),max=max(x)) pars['p2_center'].set(min(x)+span*0.55,min=min(x),max=max(x)) # pars['p2_center'].set(min(x)+span*0.65,expr='p1_center+splitting') pars['p1_amplitude'].set(amplitudeGuess,max=amplitudeGuess/10000.) pars['p2_amplitude'].set(amplitudeGuess,max=amplitudeGuess/10000.) pars['p1_sigma'].set(sigmaGuess, min=sigmaGuess/100.,max=sigmaGuess*10000.) pars['p2_sigma'].set(sigmaGuess, min=sigmaGuess/100.,max=sigmaGuess*10000.) #Add some useful parameters to evaluate pars.add('p1_signal', expr='p1_amplitude/(p1_sigma*sqrt(2.0*pi))') pars.add('p2_signal', expr='p2_amplitude/(p2_sigma*sqrt(2.0*pi))') pars.add('p1_contrast', expr='-p1_amplitude/(p1_sigma*sqrt(2.0*pi)*(c0+c1*p1_center+c2*p1_center**2))') pars.add('p2_contrast', expr='-p2_amplitude/(p2_sigma*sqrt(2.0*pi)*(c0+c1*p2_center+c2*p2_center**2))') pars.add('splitting',pars['p2_center']-pars['p1_center'],expr='p2_center-p1_center',min=0.00001) model = peak1 + peak2 + background init = model.eval(pars, x=x) out = model.fit(y, pars, x=x) # print out.fit_report() return init,out
def build_model_dd(bg_ord=1, peak_num=1, peak_type="lorentzian"): model = PolynomialModel(bg_ord, prefix="bg_") peak_str = peak_type.lower()[:2] if peak_str == "lo": peak_function = lorentzian_dd elif peak_str == "ga": peak_function = gaussian_dd elif peak_str == "vo" or peak_type == "ps": peak_function = voigt else: print( "'peak_type' should be one of 'lorentzian', 'gaussian' or 'pseudo-voight'" ) return for i in range(peak_num): model += Model(peak_function, prefix="p%s_" % i) params = model.make_params() return model, params
def readFits(inFile): table = pyfits.open(inFile) data = table[1].data flux = data.field('flux') wavelength = 10**(data.field('loglam')) ivar = data.field('ivar') weights = ivar redshift_data = table[2].data.field('Z') mod = PolynomialModel(6) pars = mod.guess(flux, x=wavelength) out = mod.fit(flux, pars, x=wavelength) continuum = out.best_fit #At the moment get a crude estimate of the observed normalised SED for redshift computation normalised_observed_flux = flux - continuum #Call the normaliseTemplate method to find the normalised SED of a given template normalised_template_flux = normaliseTemplate('K20_late_composite_original.dat') plt.close('all') #Choose to plot the results #plt.plot(wavelength, flux, label='flux') #plt.plot(wavelength, y_av, label='boxcar') #plt.plot(wavelength, ffit , label='fit') #legend() #plt.show() #plt.close('all') return {'flux' : flux, 'wavelength' : wavelength, 'z' : redshift_data, 'weights': weights, 'norm_flux':normalised_observed_flux}
def readFits(inFile): table = pyfits.open(inFile) data = table[1].data flux = data.field('flux') wavelength = 10**(data.field('loglam')) ivar = data.field('ivar') weights = ivar redshift_data = table[2].data.field('Z') mod = PolynomialModel(6) pars = mod.guess(flux, x=wavelength) out = mod.fit(flux, pars, x=wavelength) continuum = out.best_fit #At the moment get a crude estimate of the observed normalised SED for redshift computation normalised_observed_flux = flux - continuum #Call the normaliseTemplate method to find the normalised SED of a given template normalised_template_flux = normaliseTemplate( 'K20_late_composite_original.dat') plt.close('all') #Choose to plot the results #plt.plot(wavelength, flux, label='flux') #plt.plot(wavelength, y_av, label='boxcar') #plt.plot(wavelength, ffit , label='fit') #legend() #plt.show() #plt.close('all') return { 'flux': flux, 'wavelength': wavelength, 'z': redshift_data, 'weights': weights, 'norm_flux': normalised_observed_flux }
def fitLines(flux, wavelength, z, weights): #Convert all into numpy arrays flux = np.array(flux) wavelength = np.array(wavelength) z = np.array(z) weights = np.array(weights) error = np.sqrt(1 / weights) #Fit a polynomial to the continuum background emission of the galaxy #This is the crude way to do it mod = PolynomialModel(6) pars = mod.guess(flux, x=wavelength) out = mod.fit(flux, pars, x=wavelength) continuum_poly = out.best_fit #Can also compute the continuum in the more advanced way #masking the emission lines and using a moving average #Define the wavelength values of the relevant emission lines OII3727 = 3727.092 OII3729 = 3729.875 H_beta = 4862.721 OIII4959 = 4960.295 OIII5007 = 5008.239 H_alpha = 6564.614 NII6585 = 6585.27 SII6718 = 6718.29 SII6732 = 6732.68 #Now apply the redshift formula to find where this will be observed #Note that for these SDSS spectra the OII doublet is not in range OII3727_shifted = OII3727 * (1 + z) OII3729_shifted = OII3729 * (1 + z) H_beta_shifted = H_beta * (1 + z) OIII4959_shifted = OIII4959 * (1 + z) OIII5007_shifted = OIII5007 * (1 + z) H_alpha_shifted = H_alpha * (1 + z) NII6585_shifted = NII6585 * (1 + z) SII6718_shifted = SII6718 * (1 + z) SII6732_shifted = SII6732 * (1 + z) #hellofriend #Will choose to mask pm 15 for each of the lines H_beta_index = np.where(np.logical_and(wavelength>=(H_beta_shifted - 15), wavelength<=(H_beta_shifted + 15))) OIII_one_index = np.where(np.logical_and(wavelength>=(OIII4959_shifted - 15), wavelength<=(OIII4959_shifted + 15))) OIII_two_index = np.where(np.logical_and(wavelength>=(OIII5007_shifted - 15), wavelength<=(OIII5007_shifted + 15))) NII_one_index = np.where(np.logical_and(wavelength>=(NII6585_shifted - 15), wavelength<=(NII6585_shifted + 15))) H_alpha_index = np.where(np.logical_and(wavelength>=(H_alpha_shifted - 15), wavelength<=(H_alpha_shifted + 15))) SII_one_index = np.where(np.logical_and(wavelength>=(SII6718_shifted - 15), wavelength<=(SII6718_shifted + 15))) SII_two_index = np.where(np.logical_and(wavelength>=(SII6732_shifted - 15), wavelength<=(SII6732_shifted + 15))) #define the mask 1 values from the index values mask = np.zeros(len(flux)) mask[H_beta_index] = 1 mask[OIII_one_index] = 1 mask[OIII_two_index] = 1 mask[NII_one_index] = 1 mask[H_alpha_index] = 1 mask[SII_one_index] = 1 mask[SII_two_index] = 1 #Now apply these to the flux to mask masked_flux = ma.masked_array(flux, mask=mask) #Make my own with np.mean() continuum = np.empty(len(masked_flux)) for i in range(len(masked_flux)): if (i + 5) < len(masked_flux): continuum[i] = ma.mean(masked_flux[i:i+5]) if np.isnan(continuum[i]): continuum[i] = continuum[i - 1] else: continuum[i] = ma.mean(masked_flux[i-5:i]) if np.isnan(continuum[i]): continuum[i] = continuum[i - 1] #Subtract the continuum from the flux, just use polynomial fit right now counts = flux - continuum_poly #Construct a dictionary housing these shifted emission line values #Note that values for the OII doublet are not present line_dict = {'H_beta' : H_beta_shifted, 'OIII4959' : OIII4959_shifted, 'OIII5007' : OIII5007_shifted, 'H_alpha' : H_alpha_shifted, 'NII6585' : NII6585_shifted, 'SII6718' : SII6718_shifted, 'SII6732' : SII6732_shifted} #Plot the initial continuum subtracted spectrum plt.plot(wavelength, counts) #Initialise a dictionary for the results in the for loop results_dict = {} #Begin for loop to fit an arbitrary number of emission lines for key in line_dict: ######################################################################## #FITTING EACH OF THE EMISSION LINES IN TURN ######################################################################## #We don't want to include all the data in the gaussian fit #Look for the indices of the points closes to the wavelength value #The appropriate range is stored in fit_wavelength etc. #Use np.where to find the indices of data surrounding the gaussian new_index = np.where(np.logical_and(wavelength > (line_dict[key] - 10) , wavelength < (line_dict[key] + 10))) #Select only data for the fit with these indices fit_wavelength = wavelength[new_index] fit_counts = counts[new_index] fit_weights = weights[new_index] fit_continuum = continuum[new_index] fit_error = error[new_index] #Now use the lmfit package to perform gaussian fits to the data #Construct the gaussian model mod = GaussianModel() #Take an initial guess at what the model parameters are #In this case the gaussian model has three parameters, #Which are amplitude, center and sigma pars = mod.guess(fit_counts, x=fit_wavelength) #We know from the redshift what the center of the gaussian is, set this #And choose the option not to vary this parameter #Leave the guessed values of the other parameters pars['center'].set(value = line_dict[key]) pars['center'].set(vary = 'False') #Now perform the fit to the data using the set and guessed parameters #And the inverse variance weights form the fits file out = mod.fit(fit_counts, pars, weights = fit_weights, x=fit_wavelength) #print(out.fit_report(min_correl=0.25)) #Plot the results and the spectrum to check the fit plt.plot(fit_wavelength, out.best_fit, 'r-') #Return the error on the flux error_dict = fluxError(fit_counts, fit_wavelength, fit_error, continuum_poly) #Compute the equivalent width con_avg = np.mean(continuum_poly) E_w = out.best_values['amplitude'] / con_avg #The amplitude parameter is the area under the curve, equivalent to the flux results_dict[key] = [out.best_values['amplitude'], error_dict['flux_error'], out.best_values['sigma'], 2.3548200*out.best_values['sigma'], E_w, error_dict['E_W_error']] #The return dictionary for this method is a sequence of results vectors return results_dict
def polyModel(self, stateCode): from lmfit.models import PolynomialModel model = PolynomialModel(4) return self.guessAndFit(model, stateCode, lambda x: x.dropna())
def fit(spectra, sigma=4.0, ptreg=8., ord=4, iter=2): rej = [] for i, w in enumerate(spectra[1]): if w <= 0: rej.append(i) spectra[0] = np.delete(spectra[0], rej) spectra[1] = np.delete(spectra[1], rej) # prepare first kick poly = PolynomialModel(3) pars = poly.make_params() for p in range(4): label = 'c' + str(p) pars[label].set(value=1., vary=True) wkcopy = np.copy(spectra[1]) truesp = [i for i in wkcopy if i >= 0] truex = [ spectra[0][i] for i in range(len(spectra[1])) if spectra[1][i] >= 0 ] outcont = poly.fit(truesp, pars, x=truex) firstcont = outcont.eval(x=spectra[0]) xn = np.copy(spectra[0]) yn = np.copy(spectra[1]) / firstcont # start cont. cleaning iterations for i in range(iter): i_ = np.copy(i) niter = str(i_ + 1) sigma = sigma - i * 0.21 * sigma md = np.median(yn) n = len([i for i in yn if i > 0.1]) offset = (len(xn) - n) / 2 absor = md - min(yn[offset:n - offset]) freq, bin = np.histogram(yn, bins=50, range=(md - absor, md + absor)) rebin = [(bin[b + 1] + bin[b]) / 2 for b in range(len(bin) - 1)] gauss = SkewedGaussianModel() pars = gauss.make_params() pars['center'].set(vary=True) pars['amplitude'].set(vary=True) pars['sigma'].set(vary=True) pars['gamma'].set(vary=True) out = gauss.fit(freq, pars, x=rebin) var = sigma * out.best_values['sigma'] xrbn = np.linspace(rebin[0], rebin[-1], num=100) yrbn = list(out.eval(x=xrbn)) mode = xrbn[yrbn.index(max(yrbn))] # clean cont. ync = np.copy(spectra[1]) xnc = np.copy(spectra[0]) mask = [] for j in range(len(yn)): if (yn[j] > mode + var / 2) or (yn[j] < mode - var / 2): mask.append(False) else: mask.append(True) mask = np.array(mask) ync = ync[mask] xnc = xnc[mask] # re-fitting poly2 = PolynomialModel(ord) pars2 = poly2.make_params() for p in range(ord + 1): label = 'c' + str(p) pars2[label].set(value=1., vary=True) try: outcont2 = poly2.fit(ync, pars2, x=xnc) except: plt.plot(xn, yn, 'k-') plt.plot([xn[0], xn[-1]], [mode, mode], 'b-') plt.plot([xn[0], xn[-1]], [mode + var / 2, mode + var / 2], 'r-') plt.plot([xn[0], xn[-1]], [mode - var / 2, mode - var / 2], 'r-') plt.show() contf = outcont2.eval(x=xn) yn = spectra[1] / contf clspec = [xnc, ync] # start slicing firstv = clspec[0][0] wavrange = clspec[0][-1] - firstv sliceno = wavrange / ptreg slisize = wavrange / sliceno points = [[], []] # continuum point definition for s in range(int(sliceno)): i = bissec(clspec[0], firstv + s * slisize) f = bissec(clspec[0], firstv + (s + 1) * slisize) slc = [clspec[0][i:f], clspec[1][i:f]] if len(slc[1]) > 2.: md = np.median(slc[1]) absor = min(slc[1]) high = max(slc[1]) freq, bin = np.histogram(slc[1], bins=20, range=(absor, high)) rebin = [(bin[b + 1] + bin[b]) / 2 for b in range(len(bin) - 1)] fmode = rebin[list(freq).index(max(freq))] fsigma = rebin[-1] - rebin[0] gauss = GaussianModel() pars = gauss.make_params() pars['center'].set(value=fmode, vary=True) pars['amplitude'].set(value=max(freq), vary=True) pars['sigma'].set(value=fsigma, vary=True) out = gauss.fit(freq, pars, x=rebin) xrbn = np.linspace(rebin[0], rebin[-1], num=100) yrbn = list(out.eval(x=xrbn)) mode = xrbn[yrbn.index(max(yrbn))] xp = slc[0][len(slc[0]) / 2] points[0].append(xp) points[1].append(mode) spline = splrep(points[0], points[1], k=3) contx = splev(clspec[0], spline) continuum = splev(spectra[0], spline) return [spectra[0], spectra[1] / continuum]
def plot_data(data): signal_format = 'hist' # 'line' or 'hist' or None Total_SM_label = False # for Total SM black line in plot and legend plot_label = r'$Z \rightarrow ll$' signal_label = plot_label signal = None for s in ZBosonSamples.samples.keys(): if s not in stack_order and s != 'data': signal = s for x_variable, hist in ZBosonHistograms.hist_dict.items(): h_bin_width = hist['bin_width'] h_num_bins = hist['num_bins'] h_xrange_min = hist['xrange_min'] h_xlabel = hist['xlabel'] h_log_y = hist['log_y'] h_y_label_x_position = hist['y_label_x_position'] h_legend_loc = hist['legend_loc'] h_log_top_margin = hist[ 'log_top_margin'] # to decrease the separation between data and the top of the figure, remove a 0 h_linear_top_margin = hist[ 'linear_top_margin'] # to decrease the separation between data and the top of the figure, pick a number closer to 1 bins = [h_xrange_min + x * h_bin_width for x in range(h_num_bins + 1)] bin_centres = [ h_xrange_min + h_bin_width / 2 + x * h_bin_width for x in range(h_num_bins) ] if store_histograms: stored_histos = {} if load_histograms: # not doing line for now npzfile = np.load(f'histograms/{x_variable}_hist_{fraction}.npz') # load bins loaded_bins = npzfile['bins'] if not np.array_equal(bins, loaded_bins): print('Bins mismatch. That\'s a problem') raise Exception # load data data_x = npzfile['data'] data_x_errors = np.sqrt(data_x) # load weighted signal signal_x_reshaped = npzfile[signal] signal_color = ZBosonSamples.samples[signal]['color'] # load backgrounds mc_x_heights_list = [] # mc_weights = [] mc_colors = [] mc_labels = [] mc_x_tot = np.zeros(len(bin_centres)) for s in stack_order: if not s in npzfile: continue mc_labels.append(s) # mc_x.append(data[s][x_variable].values) mc_colors.append(ZBosonSamples.samples[s]['color']) # mc_weights.append(data[s].totalWeight.values) mc_x_heights = npzfile[s] mc_x_heights_list.append(mc_x_heights) mc_x_tot = np.add(mc_x_tot, mc_x_heights) mc_x_err = np.sqrt(mc_x_tot) else: # ======== This creates histograms for the raw data events ======== # # no weights necessary (it's data) data_x, _ = np.histogram(data['data'][x_variable].values, bins=bins) data_x_errors = np.sqrt(data_x) if store_histograms: stored_histos[ 'data'] = data_x # saving histograms for later loading # ======== This creates histograms for signal simulation (Z->ll) ======== # # need to consider the event weights here signal_x = None if signal_format == 'line': signal_x, _ = np.histogram( data[signal][x_variable].values, bins=bins, weights=data[signal].totalWeight.values) elif signal_format == 'hist': signal_x = data[signal][x_variable].values signal_weights = data[signal].totalWeight.values signal_color = ZBosonSamples.samples[signal]['color'] signal_x_reshaped, _ = np.histogram( data[signal][x_variable].values, bins=bins, weights=data[signal].totalWeight.values) if store_histograms: stored_histos[ signal] = signal_x_reshaped # saving histograms for later loading # ======== This creates histograms for all of the background simulation ======== # # weights are also necessary here, since we produce an arbitrary number of MC events mc_x_heights_list = [] mc_weights = [] mc_colors = [] mc_labels = [] mc_x_tot = np.zeros(len(bin_centres)) for s in stack_order: if not s in data: continue if data[s].empty: continue mc_labels.append(s) # mc_x.append(data[s][x_variable].values) mc_colors.append(ZBosonSamples.samples[s]['color']) mc_weights.append(data[s].totalWeight.values) mc_x_heights, _ = np.histogram( data[s][x_variable].values, bins=bins, weights=data[s].totalWeight.values) #mc_heights? mc_x_heights_list.append(mc_x_heights) mc_x_tot = np.add(mc_x_tot, mc_x_heights) if store_histograms: stored_histos[ s] = mc_x_heights #saving histograms for later loading mc_x_err = np.sqrt(mc_x_tot) data_x_without_bkg = data_x - mc_x_tot # data fit # get rid of zero errors (maybe messy) : TODO a better way to do this? for i, e in enumerate(data_x_errors): if e == 0: data_x_errors[i] = np.inf if 0 in data_x_errors: print('please don\'t divide by zero') raise Exception bin_centres_array = np.asarray(bin_centres) # ************* # Models # ************* doniach_mod = DoniachModel() pars_doniach = doniach_mod.guess(data_x_without_bkg, x=bin_centres_array, amplitude=2100000 * fraction, center=90.5, sigma=2.3, height=10000 * fraction / 0.01, gamma=0) doniach = doniach_mod.fit(data_x_without_bkg, pars_doniach, x=bin_centres_array, weights=1 / data_x_errors) params_dict_doniach = doniach.params.valuesdict() gaussian_mod = GaussianModel() pars_gaussian = gaussian_mod.guess(data_x_without_bkg, x=bin_centres_array, amplitude=6000000 * fraction, center=90.5, sigma=3) gaussian = gaussian_mod.fit(data_x_without_bkg, pars_gaussian, x=bin_centres_array, weights=1 / data_x_errors) params_dict_gaussian = gaussian.params.valuesdict() lorentzian_mod = LorentzianModel() pars = lorentzian_mod.guess(data_x_without_bkg, x=bin_centres_array, amplitude=6000000 * fraction, center=90.5, sigma=2.9, gamma=1) lorentzian = lorentzian_mod.fit(data_x_without_bkg, pars, x=bin_centres_array, weights=1 / data_x_errors) params_dict_lorentzian = lorentzian.params.valuesdict() voigt_mod = VoigtModel() pars = voigt_mod.guess(data_x_without_bkg, x=bin_centres_array, amplitude=6800000 * fraction, center=90.5, sigma=1.7) voigt = voigt_mod.fit(data_x_without_bkg, pars, x=bin_centres_array, weights=1 / data_x_errors) params_dict_voigt = voigt.params.valuesdict() voigt_mod_2 = VoigtModel() polynomial = PolynomialModel(2) pars = voigt_mod_2.guess(data_x_without_bkg, x=bin_centres_array, amplitude=6800000 * fraction, center=90.5, sigma=1.7) pars += polynomial.guess(data_x_without_bkg, x=bin_centres_array, c0=data_x_without_bkg.max(), c1=0, c2=0) voigt_poly_mod = voigt_mod_2 + polynomial voigt_poly = voigt_poly_mod.fit(data_x_without_bkg, pars, x=bin_centres_array, weights=1 / data_x_errors) params_dict_voigt_poly = voigt_poly.params.valuesdict() if store_histograms: # save all histograms in npz format. different file for each variable. bins are common os.makedirs('histograms', exist_ok=True) np.savez(f'histograms/{x_variable}_hist.npz', bins=bins, **stored_histos) # ======== Now we start doing the fit ======== # # ************* # Main plot # ************* plt.clf() plt.axes([0.1, 0.3, 0.85, 0.65]) # (left, bottom, width, height) main_axes = plt.gca() main_axes.errorbar(x=bin_centres, y=data_x, yerr=data_x_errors, fmt='ko', label='Data') # this effectively makes a stacked histogram bottoms = np.zeros_like(bin_centres) for mc_x_height, mc_color, mc_label in zip(mc_x_heights_list, mc_colors, mc_labels): main_axes.bar(bin_centres, mc_x_height, bottom=bottoms, color=mc_color, label=mc_label, width=h_bin_width * 1.01) bottoms = np.add(bottoms, mc_x_height) main_axes.plot(bin_centres, doniach.best_fit, '-r', label='Doniach') main_axes.plot(bin_centres, gaussian.best_fit, '-g', label='Gaussian') main_axes.plot(bin_centres, lorentzian.best_fit, '-y', label='Lorentzian') main_axes.plot(bin_centres, voigt.best_fit, '--', label='Voigt') main_axes.plot(bin_centres, voigt_poly.best_fit, '-v', label='Voigt and Polynomial') if Total_SM_label: totalSM_handle, = main_axes.step(bins, np.insert(mc_x_tot, 0, mc_x_tot[0]), color='black') if signal_format == 'line': main_axes.step(bins, np.insert(signal_x, 0, signal_x[0]), color=ZBosonSamples.samples[signal]['color'], linestyle='--', label=signal) elif signal_format == 'hist': main_axes.bar(bin_centres, signal_x_reshaped, bottom=bottoms, color=signal_color, label=signal, width=h_bin_width * 1.01) bottoms = np.add(bottoms, signal_x_reshaped) main_axes.bar(bin_centres, 2 * mc_x_err, bottom=bottoms - mc_x_err, alpha=0.5, color='none', hatch="////", width=h_bin_width * 1.01, label='Stat. Unc.') mc_x_tot = bottoms main_axes.set_xlim(left=h_xrange_min, right=bins[-1]) main_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks main_axes.tick_params(which='both', direction='in', top=True, labeltop=False, labelbottom=False, right=True, labelright=False) if h_log_y: main_axes.set_yscale('log') smallest_contribution = mc_x_heights_list[ 0] # TODO: mc_heights or mc_x_heights smallest_contribution.sort() bottom = smallest_contribution[-2] if bottom == 0: bottom = 0.001 # log doesn't like zero top = np.amax(data_x) * h_log_top_margin main_axes.set_ylim(bottom=bottom, top=top) main_axes.yaxis.set_major_formatter(CustomTicker()) locmin = LogLocator(base=10.0, subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9), numticks=12) main_axes.yaxis.set_minor_locator(locmin) else: main_axes.set_ylim( bottom=0, top=(np.amax(data_x) + math.sqrt(np.amax(data_x))) * h_linear_top_margin) main_axes.yaxis.set_minor_locator(AutoMinorLocator()) main_axes.yaxis.get_major_ticks()[0].set_visible(False) plt.text(0.015, 0.97, 'ATLAS Open Data', ha="left", va="top", family='sans-serif', transform=main_axes.transAxes, fontsize=13) plt.text(0.015, 0.9, 'for education', ha="left", va="top", family='sans-serif', transform=main_axes.transAxes, style='italic', fontsize=8) plt.text(0.015, 0.86, r'$\sqrt{s}=13\,\mathrm{TeV},\;\int L\,dt=$' + str(lumi_used) + '$\,\mathrm{fb}^{-1}$', ha="left", va="top", family='sans-serif', transform=main_axes.transAxes) plt.text(0.015, 0.78, plot_label, ha="left", va="top", family='sans-serif', transform=main_axes.transAxes) plt.text(0.015, 0.72, r'$m_Z = $' + str(round(params_dict_doniach['center'], 4)) + ' GeV', ha="left", va="top", family='sans-serif', transform=main_axes.transAxes, fontsize=10) # Create new legend handles but use the colors from the existing ones handles, labels = main_axes.get_legend_handles_labels() if signal_format == 'line': handles[labels.index(signal)] = Line2D( [], [], c=ZBosonSamples.samples[signal]['color'], linestyle='dashed') uncertainty_handle = mpatches.Patch(facecolor='none', hatch='////') if Total_SM_label: handles.append((totalSM_handle, uncertainty_handle)) labels.append('Total SM') else: handles.append(uncertainty_handle) labels.append('Stat. Unc.') # specify order within legend new_handles = [ handles[labels.index('Data')], handles[labels.index('Doniach')], handles[labels.index('Gaussian')], handles[labels.index('Lorentzian')], handles[labels.index('Voigt')], handles[labels.index('Voigt and Polynomial')] ] new_labels = [ 'Data', 'Doniach', 'Gaussian', 'Lorentzian', 'Voigt', 'Voigt and Polynomial' ] for s in reversed(stack_order): if s not in labels: continue new_handles.append(handles[labels.index(s)]) new_labels.append(s) if signal is not None: new_handles.append(handles[labels.index(signal)]) new_labels.append(signal_label) if Total_SM_label: new_handles.append(handles[labels.index('Total SM')]) new_labels.append('Total SM') else: new_handles.append(handles[labels.index('Stat. Unc.')]) new_labels.append('Stat. Unc.') main_axes.legend(handles=new_handles, labels=new_labels, frameon=False, loc=h_legend_loc, fontsize='x-small') # ************* # Data / MC plot # ************* plt.axes([0.1, 0.1, 0.85, 0.2]) # (left, bottom, width, height) ratio_axes = plt.gca() ratio_axes.yaxis.set_major_locator( MaxNLocator(nbins='auto', symmetric=True)) ratio_axes.errorbar( x=bin_centres, y=data_x / signal_x_reshaped, fmt='ko' ) # TODO: yerr=data_x_errors produce error bars that are too big ratio_axes.set_xlim(left=h_xrange_min, right=bins[-1]) ratio_axes.plot(bins, np.ones(len(bins)), color='k') ratio_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks ratio_axes.xaxis.set_label_coords( 0.9, -0.2) # (x,y) of x axis label # 0.2 down from x axis ratio_axes.set_xlabel(h_xlabel, fontname='sans-serif', fontsize=11) ratio_axes.set_ylim(bottom=0, top=2) ratio_axes.set_yticks([0, 1]) ratio_axes.tick_params(which='both', direction='in', top=True, labeltop=False, right=True, labelright=False) ratio_axes.yaxis.set_minor_locator(AutoMinorLocator()) ratio_axes.set_ylabel(r'Data / Pred', fontname='sans-serif', x=1, fontsize=11) # Generic features for both plots main_axes.yaxis.set_label_coords(h_y_label_x_position, 1) ratio_axes.yaxis.set_label_coords(h_y_label_x_position, 0.5) plt.savefig("ZBoson_" + x_variable + ".pdf", bbox_inches='tight') # ========== Statistics ========== # ========== Doniach ========== chisqr_doniach = mychisqr(doniach.residual, doniach.best_fit) redchisqr_doniach = chisqr_doniach / doniach.nfree center_doniach = params_dict_doniach['center'] sigma_doniach = params_dict_doniach['sigma'] rel_unc_center_doniach = doniach.params[ 'center'].stderr / doniach.params['center'].value rel_unc_sigma_doniach = doniach.params[ 'sigma'].stderr / doniach.params['sigma'].value # ========== Gaussian ========== chisqr_gaussian = mychisqr(gaussian.residual, gaussian.best_fit) redchisqr_gaussian = chisqr_gaussian / gaussian.nfree center_gaussian = params_dict_gaussian['center'] sigma_gaussian = params_dict_gaussian['sigma'] rel_unc_center_gaussian = gaussian.params[ 'center'].stderr / gaussian.params['center'].value rel_unc_sigma_gaussian = gaussian.params[ 'sigma'].stderr / gaussian.params['sigma'].value # ========== Lorentzian ========== chisqr_lorentzian = mychisqr(lorentzian.residual, lorentzian.best_fit) redchisqr_lorentzian = chisqr_lorentzian / lorentzian.nfree center_lorentzian = params_dict_lorentzian['center'] sigma_lorentzian = params_dict_lorentzian['sigma'] rel_unc_center_lorentzian = lorentzian.params[ 'center'].stderr / lorentzian.params['center'].value rel_unc_sigma_lorentzian = lorentzian.params[ 'sigma'].stderr / lorentzian.params['sigma'].value # ========== Voigt ========== chisqr_voigt = mychisqr(voigt.residual, voigt.best_fit) redchisqr_voigt = chisqr_voigt / voigt.nfree center_voigt = params_dict_voigt['center'] sigma_voigt = params_dict_voigt['sigma'] rel_unc_center_voigt = voigt.params['center'].stderr / voigt.params[ 'center'].value rel_unc_sigma_voigt = voigt.params['sigma'].stderr / voigt.params[ 'sigma'].value # ========== Voigt and Polynomial ========== chisqr_voigt_poly = mychisqr(voigt_poly.residual, voigt_poly.best_fit) redchisqr_voigt_poly = chisqr_voigt_poly / voigt_poly.nfree center_voigt_poly = params_dict_voigt_poly['center'] sigma_voigt_poly = params_dict_voigt_poly['sigma'] rel_unc_center_voigt_poly = voigt_poly.params[ 'center'].stderr / voigt_poly.params['center'].value rel_unc_sigma_voigt_poly = voigt_poly.params[ 'sigma'].stderr / voigt_poly.params['sigma'].value df_dict = { 'fraction': [fraction], 'luminosity': [lumi_used], 'doniach chisqr': [chisqr_doniach], 'doniach redchisqr': [redchisqr_doniach], 'doniach center': [rel_unc_center_doniach], 'doniach sigma': [rel_unc_sigma_doniach], 'gaussian chisqr': [chisqr_gaussian], 'gaussian redchisqr': [redchisqr_gaussian], 'gaussian center': [rel_unc_center_gaussian], 'gaussian sigma': [rel_unc_sigma_gaussian], 'lorentzian chisqr': [chisqr_lorentzian], 'lorentzian redchisqr': [redchisqr_lorentzian], 'lorentzian center': [rel_unc_center_lorentzian], 'lorentzian sigma': [rel_unc_sigma_lorentzian], 'voigt chisqr': [chisqr_voigt], 'voigt redchisqr': [redchisqr_voigt], 'voigt center': [rel_unc_center_voigt], 'voigt sigma': [rel_unc_sigma_voigt], 'voigt poly chisqr': [chisqr_voigt_poly], 'voigt poly redchisqr': [redchisqr_voigt_poly], 'voigt poly center': [rel_unc_center_voigt_poly], 'voigt poly sigma': [rel_unc_sigma_voigt_poly] } temp = pd.DataFrame(df_dict) fit_results = pd.read_csv('fit_results.csv') fit_results_concat = pd.concat([fit_results, temp]) fit_results_concat.to_csv('fit_results.csv', index=False) print("=====================================================") print("Statistics for the Doniach Model: ") print("\n") print("chi^2 = " + str(chisqr_doniach)) print("chi^2/dof = " + str(redchisqr_doniach)) print("center = " + str(center_doniach)) print("sigma = " + str(sigma_doniach)) print("Relative Uncertainty of Center = " + str(rel_unc_center_doniach)) print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_doniach)) print("\n") print("=====================================================") print("Statistics for the Gaussian Model: ") print("\n") print("chi^2 = " + str(chisqr_gaussian)) print("chi^2/dof = " + str(redchisqr_gaussian)) print("center = " + str(center_gaussian)) print("sigma = " + str(sigma_gaussian)) print("Relative Uncertainty of Center = " + str(rel_unc_center_gaussian)) print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_gaussian)) print("\n") print("=====================================================") print("Statistics for the Lorentzian Model: ") print("\n") print("chi^2 = " + str(chisqr_lorentzian)) print("chi^2/dof = " + str(redchisqr_lorentzian)) print("center = " + str(center_lorentzian)) print("sigma = " + str(sigma_lorentzian)) print("Relative Uncertainty of Center = " + str(rel_unc_center_lorentzian)) print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_lorentzian)) print("\n") print("=====================================================") print("Statistics for the Voigt Model: ") print("\n") print("chi^2 = " + str(chisqr_voigt)) print("chi^2/dof = " + str(redchisqr_voigt)) print("center = " + str(center_voigt)) print("sigma = " + str(sigma_voigt)) print("Relative Uncertainty of Center = " + str(rel_unc_center_voigt)) print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_voigt)) print("\n") print("=====================================================") print("Statistics for the Voigt and Polynomial Model: ") print("\n") print("chi^2 = " + str(chisqr_voigt_poly)) print("chi^2/dof = " + str(redchisqr_voigt_poly)) print("center = " + str(center_voigt_poly)) print("sigma = " + str(sigma_voigt_poly)) print("Relative Uncertainty of Center = " + str(rel_unc_center_voigt_poly)) print("Relative Uncertainty of Sigma = " + str(rel_unc_sigma_voigt_poly)) # ========= Plotting Residuals ========= # ========= Doniach Residuals ========= plt.clf() plt.axes([0.1, 0.3, 0.85, 0.65]) # (left, bottom, width, height) main_axes = plt.gca() main_axes.set_title("Doniach Model Residuals") main_axes.errorbar(x=bin_centres, y=doniach.residual, fmt='ko') main_axes.set_xlim(left=h_xrange_min, right=bins[-1]) main_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks main_axes.tick_params(which='both', direction='in', top=True, labeltop=False, right=True, labelright=False) main_axes.set_xlabel(r'$M_Z$ GeV') main_axes.xaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylim(bottom=1.05 * doniach.residual.min(), top=1.05 * doniach.residual.max()) main_axes.yaxis.set_minor_locator(AutoMinorLocator()) main_axes.yaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylabel("Residual") plt.savefig("plots/doniach_residuals.pdf", bbox_inches='tight') # ========= Gaussian Residuals ========= plt.clf() plt.axes([0.1, 0.3, 0.85, 0.65]) # (left, bottom, width, height) main_axes = plt.gca() main_axes.set_title("Gaussian Model Residuals") main_axes.errorbar(x=bin_centres, y=gaussian.residual, fmt='ko') main_axes.set_xlim(left=h_xrange_min, right=bins[-1]) main_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks main_axes.tick_params(which='both', direction='in', top=True, labeltop=False, right=True, labelright=False) main_axes.set_xlabel(r'$M_Z$ GeV') main_axes.xaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylim(bottom=1.05 * gaussian.residual.min(), top=1.05 * gaussian.residual.max()) main_axes.yaxis.set_minor_locator(AutoMinorLocator()) main_axes.yaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylabel("Residual") plt.savefig("plots/gaussian_residuals.pdf", bbox_inches='tight') # ========= Lorentzian Residuals ========= plt.clf() plt.axes([0.1, 0.3, 0.85, 0.65]) # (left, bottom, width, height) main_axes = plt.gca() main_axes.set_title("Lorentzian Model Residuals") main_axes.errorbar(x=bin_centres, y=lorentzian.residual, fmt='ko') main_axes.set_xlim(left=h_xrange_min, right=bins[-1]) main_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks main_axes.tick_params(which='both', direction='in', top=True, labeltop=False, right=True, labelright=False) main_axes.set_xlabel(r'$M_Z$ GeV') main_axes.xaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylim(bottom=1.05 * lorentzian.residual.min(), top=1.05 * lorentzian.residual.max()) main_axes.yaxis.set_minor_locator(AutoMinorLocator()) main_axes.yaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylabel("Residual") plt.savefig("plots/lorentzian_residuals.pdf", bbox_inches='tight') # ========= Voigt Residuals ========= plt.clf() plt.axes([0.1, 0.3, 0.85, 0.65]) # (left, bottom, width, height) main_axes = plt.gca() main_axes.set_title("Voigt Model Residuals") main_axes.errorbar(x=bin_centres, y=voigt.residual, fmt='ko') main_axes.set_xlim(left=h_xrange_min, right=bins[-1]) main_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks main_axes.tick_params(which='both', direction='in', top=True, labeltop=False, right=True, labelright=False) main_axes.set_xlabel(r'$M_Z$ GeV') main_axes.xaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylim(bottom=1.05 * voigt.residual.min(), top=1.05 * voigt.residual.max()) main_axes.yaxis.set_minor_locator(AutoMinorLocator()) main_axes.yaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylabel("Residual") plt.savefig("plots/voigt_residuals.pdf", bbox_inches='tight') # ========= Voigt and Polynomial Residuals ========= plt.clf() plt.axes([0.1, 0.3, 0.85, 0.65]) # (left, bottom, width, height) main_axes = plt.gca() main_axes.set_title("Voigt and Polynomial Model Residuals") main_axes.errorbar(x=bin_centres, y=voigt_poly.residual, fmt='ko') main_axes.set_xlim(left=h_xrange_min, right=bins[-1]) main_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks main_axes.tick_params(which='both', direction='in', top=True, labeltop=False, right=True, labelright=False) main_axes.set_xlabel(r'$M_Z$ GeV') main_axes.xaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylim(bottom=1.05 * voigt_poly.residual.min(), top=1.05 * voigt_poly.residual.max()) main_axes.yaxis.set_minor_locator(AutoMinorLocator()) main_axes.yaxis.get_major_ticks()[0].set_visible(False) main_axes.set_ylabel("Residual") plt.savefig("plots/voigt_poly_residuals.pdf", bbox_inches='tight') if load_histograms: return None, None return signal_x, mc_x_tot
def fit(spectra, obj, sigma=2.0, ord=4, iter=4): poly = PolynomialModel(3) pars = poly.make_params() for p in range(4): label = 'c'+str(p) pars[label].set(value=1., vary=True) wkcopy = np.copy(spectra[1]) truesp = [i for i in wkcopy if i > 5] truex = [spectra[0][i] for i in range(len(spectra[1])) if spectra[1][i] > 5] outcont = poly.fit(truesp, pars, x=truex) firstcont = outcont.eval(x=spectra[0]) xn = np.copy(spectra[0]) yn = np.copy(spectra[1])/firstcont pl1=plt.subplot((iter+1)*100+11) pl1.plot(xn, spectra[1], 'k-', linewidth=0.3) pl1.plot(xn, firstcont, 'r-', linewidth=0.6) pl1.set_ylim([0, np.mean(firstcont)*1.5]) for i in range(iter): i_=np.copy(i) niter=str(i_+1) sigma = sigma-i*0.21*sigma md = np.median(yn) n = len([i for i in yn if i > 0.1]) offset = (len(xn)-n)/2 absor = md - min(yn[offset:n-offset]) freq, bin = np.histogram(yn, bins=50, range=(md-absor, md+absor)) rebin = [(bin[b+1]+bin[b])/2 for b in range(len(bin)-1)] gauss = SkewedGaussianModel() pars = gauss.make_params() pars['center'].set(value=md, vary=True) pars['amplitude'].set(vary=True) pars['sigma'].set(vary=True) pars['gamma'].set(vary=True) out = gauss.fit(freq, pars, x=rebin) var = sigma*out.best_values['sigma'] xrbn = np.linspace(rebin[0], rebin[-1], num=100) yrbn = list(out.eval(x=xrbn)) mode = xrbn[yrbn.index(max(yrbn))] ync = np.copy(spectra[1]) xnc = np.copy(spectra[0]) mask = [] for j in range(len(yn)): if (yn[j] > mode+var/2) or (yn[j] < mode-var/2): mask.append(False) else: mask.append(True) mask = np.array(mask) ync = ync[mask] xnc = xnc[mask] poly2 = PolynomialModel(ord) pars2 = poly2.make_params() for p in range(ord+1): label = 'c'+str(p) pars2[label].set(value=1., vary=True) outcont2 = poly2.fit(ync, pars2, x=xnc) contf = outcont2.eval(x=xn) yn = spectra[1]/contf err = spectra[2]/contf pln=plt.subplot(int((iter+1)*100+10+(i_+2))) pln.plot(xn, yn*(np.mean(contf)*0.8), 'k-', linewidth=0.3) pln.plot(xnc, ync, 'r-', linewidth=0.3) pln.plot(xn, contf, 'b-', linewidth=0.6) pln.set_ylim([0, np.mean(contf)*1.2]) plt.savefig(obj[0]+'_fit.png', dpi=300) plt.clf() return np.array([xn, yn, err])
def plot_data(data): signal_format = None # 'line' or 'hist' or None Total_SM_label = False # for Total SM black line in plot and legend plot_label = r'$H \rightarrow \gamma\gamma$' signal_label = '' # ******************* # general definitions (shouldn't need to change) lumi_used = str(lumi * fraction) signal = None for s in HyySamples.samples.keys(): if s not in stack_order and s != 'data': signal = s for x_variable, hist in HyyHistograms.hist_dict.items(): h_bin_width = hist['bin_width'] h_num_bins = hist['num_bins'] h_xrange_min = hist['xrange_min'] h_xlabel = hist['xlabel'] h_log_y = hist['log_y'] h_y_label_x_position = hist['y_label_x_position'] h_legend_loc = hist['legend_loc'] h_log_top_margin = hist[ 'log_top_margin'] # to decrease the separation between data and the top of the figure, remove a 0 h_linear_top_margin = hist[ 'linear_top_margin'] # to decrease the separation between data and the top of the figure, pick a number closer to 1 bins = [h_xrange_min + x * h_bin_width for x in range(h_num_bins + 1)] bin_centres = [ h_xrange_min + h_bin_width / 2 + x * h_bin_width for x in range(h_num_bins) ] data_x, _ = np.histogram(data['data'][x_variable].values, bins=bins) data_x_errors = np.sqrt(data_x) # data fit polynomial_mod = PolynomialModel(4) gaussian_mod = GaussianModel() bin_centres_array = np.asarray(bin_centres) pars = polynomial_mod.guess(data_x, x=bin_centres_array, c0=data_x.max(), c1=0, c2=0, c3=0, c4=0) pars += gaussian_mod.guess(data_x, x=bin_centres_array, amplitude=91.7, center=125., sigma=2.4) model = polynomial_mod + gaussian_mod out = model.fit(data_x, pars, x=bin_centres_array, weights=1 / data_x_errors) # background part of fit params_dict = out.params.valuesdict() c0 = params_dict['c0'] c1 = params_dict['c1'] c2 = params_dict['c2'] c3 = params_dict['c3'] c4 = params_dict['c4'] background = c0 + c1 * bin_centres_array + c2 * bin_centres_array**2 + c3 * bin_centres_array**3 + c4 * bin_centres_array**4 signal_x = None if signal_format == 'line': signal_x, _ = np.histogram(data[signal][x_variable].values, bins=bins, weights=data[signal].totalWeight.values) elif signal_format == 'hist': signal_x = data[signal][x_variable].values signal_weights = data[signal].totalWeight.values signal_color = HyySamples.samples[signal]['color'] signal_x = data_x - background mc_x = [] mc_weights = [] mc_colors = [] mc_labels = [] mc_x_tot = np.zeros(len(bin_centres)) for s in stack_order: mc_labels.append(s) mc_x.append(data[s][x_variable].values) mc_colors.append(HyySamples.samples[s]['color']) mc_weights.append(data[s].totalWeight.values) mc_x_heights, _ = np.histogram(data[s][x_variable].values, bins=bins, weights=data[s].totalWeight.values) mc_x_tot = np.add(mc_x_tot, mc_x_heights) mc_x_err = np.sqrt(mc_x_tot) # ************* # Main plot # ************* plt.clf() plt.axes([0.1, 0.3, 0.85, 0.65]) #(left, bottom, width, height) main_axes = plt.gca() main_axes.errorbar(x=bin_centres, y=data_x, yerr=data_x_errors, fmt='ko', label='Data') if Total_SM_label: totalSM_handle, = main_axes.step(bins, np.insert(mc_x_tot, 0, mc_x_tot[0]), color='black') if signal_format == 'line': main_axes.step(bins, np.insert(signal_x, 0, signal_x[0]), color=HyySamples.samples[signal]['color'], linestyle='--', label=signal) elif signal_format == 'hist': main_axes.hist(signal_x, bins=bins, bottom=mc_x_tot, weights=signal_weights, color=signal_color, label=signal) main_axes.bar(bin_centres, 2 * mc_x_err, bottom=mc_x_tot - mc_x_err, alpha=0.5, color='none', hatch="////", width=h_bin_width, label='Stat. Unc.') main_axes.plot(bin_centres, out.best_fit, '-r', label='Sig+Bkg Fit ($m_H=125$ GeV)') main_axes.plot(bin_centres, background, '--r', label='Bkg (4th order polynomial)') main_axes.set_xlim(left=h_xrange_min, right=bins[-1]) main_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks main_axes.tick_params(which='both', direction='in', top=True, labeltop=False, labelbottom=False, right=True, labelright=False) if len(h_xlabel.split('[')) > 1: y_units = ' ' + h_xlabel[h_xlabel.find("[") + 1:h_xlabel.find("]")] else: y_units = '' main_axes.set_ylabel(r'Events / ' + str(h_bin_width) + y_units, fontname='sans-serif', horizontalalignment='right', y=1.0, fontsize=11) if h_log_y: main_axes.set_yscale('log') smallest_contribution = mc_heights[0][0] smallest_contribution.sort() bottom = smallest_contribution[-2] top = np.amax(data_x) * h_log_top_margin main_axes.set_ylim(bottom=bottom, top=top) main_axes.yaxis.set_major_formatter(CustomTicker()) locmin = LogLocator(base=10.0, subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9), numticks=12) main_axes.yaxis.set_minor_locator(locmin) else: main_axes.set_ylim( bottom=0, top=(np.amax(data_x) + math.sqrt(np.amax(data_x))) * h_linear_top_margin) main_axes.yaxis.set_minor_locator(AutoMinorLocator()) main_axes.yaxis.get_major_ticks()[0].set_visible(False) plt.text(0.2, 0.97, 'ATLAS Open Data', ha="left", va="top", family='sans-serif', transform=main_axes.transAxes, fontsize=13) plt.text(0.2, 0.9, 'for education', ha="left", va="top", family='sans-serif', transform=main_axes.transAxes, style='italic', fontsize=8) plt.text(0.2, 0.86, r'$\sqrt{s}=13\,\mathrm{TeV},\;\int L\,dt=$' + lumi_used + '$\,\mathrm{fb}^{-1}$', ha="left", va="top", family='sans-serif', transform=main_axes.transAxes) plt.text(0.2, 0.78, plot_label, ha="left", va="top", family='sans-serif', transform=main_axes.transAxes) # Create new legend handles but use the colors from the existing ones handles, labels = main_axes.get_legend_handles_labels() if signal_format == 'line': handles[labels.index(signal)] = Line2D( [], [], c=HyySamples.samples[signal]['color'], linestyle='dashed') if Total_SM_label: uncertainty_handle = mpatches.Patch(facecolor='none', hatch='////') handles.append((totalSM_handle, uncertainty_handle)) labels.append('Total SM') # specify order within legend new_handles = [handles[labels.index('Data')]] new_labels = ['Data'] for s in reversed(stack_order): new_handles.append(handles[labels.index(s)]) new_labels.append(s) if Total_SM_label: new_handles.append(handles[labels.index('Total SM')]) new_labels.append('Total SM') else: new_handles.append( handles[labels.index('Sig+Bkg Fit ($m_H=125$ GeV)')]) new_handles.append( handles[labels.index('Bkg (4th order polynomial)')]) new_labels.append('Sig+Bkg Fit ($m_H=125$ GeV)') new_labels.append('Bkg (4th order polynomial)') if signal is not None: new_handles.append(handles[labels.index(signal)]) new_labels.append(signal_label) main_axes.legend(handles=new_handles, labels=new_labels, frameon=False, loc=h_legend_loc) # ************* # Data-Bkg plot # ************* plt.axes([0.1, 0.1, 0.85, 0.2]) #(left, bottom, width, height) ratio_axes = plt.gca() ratio_axes.yaxis.set_major_locator( MaxNLocator(nbins='auto', symmetric=True)) ratio_axes.errorbar(x=bin_centres, y=signal_x, yerr=data_x_errors, fmt='ko') ratio_axes.plot(bin_centres, out.best_fit - background, '-r') ratio_axes.plot(bin_centres, background - background, '--r') ratio_axes.set_xlim(left=h_xrange_min, right=bins[-1]) ratio_axes.xaxis.set_minor_locator( AutoMinorLocator()) # separation of x axis minor ticks ratio_axes.xaxis.set_label_coords( 0.9, -0.2) # (x,y) of x axis label # 0.2 down from x axis ratio_axes.set_xlabel(h_xlabel, fontname='sans-serif', fontsize=11) ratio_axes.tick_params(which='both', direction='in', top=True, labeltop=False, right=True, labelright=False) ratio_axes.yaxis.set_minor_locator(AutoMinorLocator()) if signal_format == 'line' or signal_format == 'hist': ratio_axes.set_ylabel(r'Data/SM', fontname='sans-serif', x=1, fontsize=11) else: ratio_axes.set_ylabel(r'Events-Bkg', fontname='sans-serif', x=1, fontsize=11) # Generic features for both plots main_axes.yaxis.set_label_coords(h_y_label_x_position, 1) ratio_axes.yaxis.set_label_coords(h_y_label_x_position, 0.5) plt.savefig("Hyy_" + x_variable + ".pdf", bbox_inches='tight') print('chi^2 = ' + str(out.chisqr)) print('gaussian centre = ' + str(params_dict['center'])) print('gaussian sigma = ' + str(params_dict['sigma'])) print('gaussian fwhm = ' + str(params_dict['fwhm'])) return signal_x, mc_x_tot
def build_model(self, peak_type="LO", max_width=None, bg_ord=2): """ Builds a lmfit model of peaks in listed by index in `peak_pos` Uses some basic algorithms to determine initial parameters for amplitude and fwhm (limit on fwhm to avoid fitting background as peaks) Parameters ---------- peak_type : string (default='lorentizian') Peaks can be of the following types: - 'LO' : symmetric lorentzian - 'GA' : symmetric gaussain max_width : int (default = total points/10) max width (in data points) that peak fitted can be bg_ord: int order of the background polynomial 0: constant, 1: linear, ... Returns ------- pars : model parameters model : model object """ x = self.x y = self.y pw = self.test_peak_width peak_guess = self.x[self.peak_pos] print("Building model ... ") # start with polynomial background # second order model = PolynomialModel(bg_ord, prefix="bg_") pars = model.make_params() if peak_type == "LO": peak_function = lorentzian self.afactor = pi self.wfactor = 2.0 elif peak_type == "GA": peak_function = gaussian self.afactor = sqrt(2 * pi) self.wfactor = 2.354820 elif peak_type == "VO": peak_function = voigt self.afactor = sqrt(2 * pi) self.wfactor = 3.60131 # add lorentizian peak for all peaks for i, peak in enumerate(peak_guess): temp_model = Model(peak_function, prefix="p%s_" % i) pars.update(temp_model.make_params()) model += temp_model # set inital background as flat line at zeros for i in range(bg_ord + 1): pars["bg_c%i" % i].set(0) # give values for other peaks for i, peak in enumerate(self.peak_pos): print("Peak %i: pos %s, height %s" % (i, x[peak], y[peak])) # could set bounds #, min=x[peak]-5, max=x[peak]+5) pars["p%s_center" % i].set(x[peak]) pars["p%s_sigma" % i].set(pw / 2, min=pw * 0.25, max=pw * 2) # here as well #, min=0, max=2*max(y)) pars["p%s_amplitude" % i].set(self.amplitude(y[peak], (pw / 2))) self.pars = pars self.model = model return self.pars, self.model
def fit_spectra(): ''' The fitting function ''' bg_mod = PolynomialModel(vPolynomial.get(), prefix='bg_') # Background if vDelimiter.get() == 'space': delimiter = None elif vDelimiter.get() == 'comma': delimiter = ',' data = np.loadtxt(filename, skiprows=vSkipRows.get(), delimiter=delimiter) original_data = np.vstack((data[:, vColumnX.get()-1], data[:, vColumnY.get()-1])).T selectionRange = (vStartLeftX.get(), vStartRightX.get(), vEndLeftX.get(), vEndRightX.get()) fittingRange = (vStartLeftX.get(), vEndRightX.get(), vEndRightX.get(), vEndRightX.get()) # x = data[:, vColumnX.get()-1] # y = data[:, vColumnY.get()-1] x_bg, y_bg = select_bg_data(original_data, selectionRange) x, y = select_bg_data(original_data, fittingRange) # Background fitting pars = bg_mod.guess(y_bg, x=x_bg) mod = bg_mod init = mod.eval(pars, x=x_bg) out = mod.fit(y_bg, pars, x=x_bg) pars = out.params # Peak fitting peaks = [] peakParameters = {} for i, peak in enumerate(PEAK_NAMES): peakParameters[peak] = {} peakParameters[peak]['center'] = peakCenter[i].get() peakParameters[peak]['amplitude'] = peakAmplitude[i].get() peakParameters[peak]['sigma'] = peakSigma[i].get() mod = bg_mod for i, peak in enumerate(PEAK_NAMES): peaks.append(VoigtModel(prefix=peak)) # Peak information pars.update(peaks[i].make_params()) pars[peak + 'center'].set( peakParameters[peak]['center'] # peakParameters[peak]['center'][0], # min=peakParameters[peak]['center'][1], # max=peakParameters[peak]['center'][2] ) pars[peak + 'amplitude'].set( peakParameters[peak]['amplitude'] # peakParameters[peak]['amplitude'][0], # min=peakParameters[peak]['amplitude'][1] ) pars[peak + 'sigma'].set( peakParameters[peak]['sigma'] # peakParameters[peak]['sigma'][0], # min=peakParameters[peak]['sigma'][1], # max=peakParameters[peak]['sigma'][2] ) mod += peaks[i] out = mod.fit(y, pars, x=x) # Fit comps = out.eval_components(x=x) pars = out.params # Update parameters for i, peak in enumerate(PEAK_NAMES): peakParameters[peak] = {} peakParameters[peak]['center'] = pars[peak + 'center'].value peakParameters[peak]['amplitude'] = pars[peak + 'amplitude'].value peakParameters[peak]['sigma'] = pars[peak + 'sigma'].value peakCenter[i].set(peakParameters[peak]['center']) peakAmplitude[i].set(peakParameters[peak]['amplitude']) peakSigma[i].set(peakParameters[peak]['sigma']) axe.cla() axe.plot(x, y, 'b.') axe.plot(x, out.best_fit, 'r-') # plot fitting result axe.plot(x, comps['bg_'], 'g-') # plot background and the peaks for i, peak in enumerate(PEAK_NAMES): axe.plot(x, comps[peak] + comps['bg_'], 'k-') figWindowNew, canvasNew, toolbarNew = createFigWindow(fig) canvasNew.show() toolbarNew.update() figWindowNew.wm_attributes('-topmost') # Activate the plotting window # Report result_txt = out.fit_report(min_correl=0.5) result_txt += '\n' result_txt += '===================\n' for i, peak in enumerate(PEAK_NAMES): area = simps(comps[peak], x) # Integration results result_txt += 'Area ' + repr(i) + ': ' + repr(area) + '\n' reportWindow = Tk.Tk() reportWindow.wm_title('Report') reportFrame = Tk.Frame(reportWindow) reportFrame.pack(side=Tk.TOP) reportText = Tk.Text(reportFrame, width=80, height=40, wrap=Tk.NONE) scrollbarX = Tk.Scrollbar(reportFrame, orient=Tk.HORIZONTAL) scrollbarY = Tk.Scrollbar(reportFrame, orient=Tk.VERTICAL) scrollbarX.pack(side=Tk.BOTTOM, fill=Tk.X) scrollbarY.pack(side=Tk.RIGHT, fill=Tk.Y) reportText.config(xscrollcommand=scrollbarX.set) reportText.config(yscrollcommand=scrollbarY.set) scrollbarX.config(command=reportText.xview) scrollbarY.config(command=reportText.yview) reportText.pack(fill=Tk.BOTH) reportText.insert(Tk.INSERT, result_txt) resultFittingData = np.vstack((x, out.data, out.best_fit, comps['bg_'])) # Fitting result headerStr = 'x OriginalData Fit Background' for i, peak in enumerate(PEAK_NAMES): resultFittingData = np.vstack((resultFittingData, comps[peak])) headerStr += ' peak' + repr(i) graphFit = np.transpose(resultFittingData) def save_report(): resultFile = open(filename + '_result.txt', 'w') resultFile.write(result_txt) resultFile.close() np.savetxt( filename + '_graph.txt', graphFit, newline='\n', header=headerStr ) if sys.version_info[0] < 3: tkMessageBox.showinfo('Message', 'The files are successfully saved as: ' + filename + '_result.txt' + ' and ' + filename + '_graph.txt') buttonSave = Tk.Button(reportWindow, text='Save report', command=save_report) buttonSave.pack(side=Tk.BOTTOM)
def fit_preview(): ''' The fitting-preview function ''' bg_mod = PolynomialModel(vPolynomial.get(), prefix='bg_') # Background if vDelimiter.get() == 'space': delimiter = None elif vDelimiter.get() == 'comma': delimiter = ',' data = np.loadtxt(filename, skiprows=vSkipRows.get(), delimiter=delimiter) original_data = np.vstack((data[:, vColumnX.get()-1], data[:, vColumnY.get()-1])).T selectionRange = (vStartLeftX.get(), vStartRightX.get(), vEndLeftX.get(), vEndRightX.get()) x = data[:, vColumnX.get()-1] y = data[:, vColumnY.get()-1] x_bg, y_bg = select_bg_data(original_data, selectionRange) # Background fitting pars = bg_mod.guess(y_bg, x=x_bg) mod = bg_mod init = mod.eval(pars, x=x_bg) out = mod.fit(y_bg, pars, x=x_bg) pars = out.params # Peak fitting peaks = [] peakParameters = {} for i, peak in enumerate(PEAK_NAMES): peakParameters[peak] = {} peakParameters[peak]['center'] = peakCenter[i].get() peakParameters[peak]['amplitude'] = peakAmplitude[i].get() peakParameters[peak]['sigma'] = peakSigma[i].get() mod = bg_mod for i, peak in enumerate(PEAK_NAMES): peaks.append(VoigtModel(prefix=peak)) # Peak information pars.update(peaks[i].make_params()) pars[peak + 'center'].set( peakParameters[peak]['center'] # peakParameters[peak]['center'][0], # min=peakParameters[peak]['center'][1], # max=peakParameters[peak]['center'][2] ) pars[peak + 'amplitude'].set( peakParameters[peak]['amplitude'] # peakParameters[peak]['amplitude'][0], # min=peakParameters[peak]['amplitude'][1] ) pars[peak + 'sigma'].set( peakParameters[peak]['sigma'] # peakParameters[peak]['sigma'][0], # min=peakParameters[peak]['sigma'][1], # max=peakParameters[peak]['sigma'][2] ) mod += peaks[i] init = mod.eval(pars, x=x) # Initial guess axe.cla() axe.plot(x, y, 'b.') axe.plot(x, init, 'k--') figWindowNew, canvasNew, toolbarNew = createFigWindow(fig) canvasNew.show() toolbarNew.update() figWindowNew.wm_attributes('-topmost') # Activate the plotting window
def _fit_polynomial(x, y, order, pars=None): """ Internal function to fit a polynomial using `lmfit`.""" model = PolynomialModel(order) if not pars: pars = model.guess(y, x=x) return model.fit(y, pars, x=x)
endLine1, endLine2 = None, None for i in xrange(np.size(x_original)): if x_original[i] >= head1 and startLine1 is None: startLine1 = i if startLine1 != None and startLine2 is None and x_original[i] >= head2: startLine2 = i if x_original[i] >= end1 and endLine1 is None: endLine1 = i if endLine1 != None and endLine2 is None and x_original[i] >= end2: endLine2 = i x_bg = np.hstack((x_original[startLine1:startLine2], x_original[endLine1:endLine2])) y_bg = np.hstack((y_original[startLine1:startLine2], y_original[endLine1:endLine2])) bg_mod = PolynomialModel(1, prefix='bg_') # Background pars = bg_mod.guess(y_bg, x=x_bg) mod = bg_mod init = mod.eval(pars, x=x_bg) plt.plot(x, y, 'b.') out = mod.fit(y_bg, pars, x=x_bg) print(out.fit_report(min_correl=0.5)) # Parameter result plt.plot(x_bg, out.eval(), 'r-') # Background plotting plt.xlim([x[0], x[-1]]) plt.show()
from peakutils.plot import plot as pplot data = pd.read_csv('data.csv') x_raw = data.iloc[:, 0] y_raw = data.iloc[:, 1] x = np.hstack([x_raw[:370], x_raw[425:775], x_raw[-250:]]) y = np.hstack([y_raw[:370], y_raw[425:775], y_raw[-250:]]) raw = pd.read_csv('data.csv') x1 = raw.iloc[:, 0] y1 = raw.iloc[:, 1] shift = PolynomialModel(2, prefix='Poly_') pars = shift.guess(y, x=x) voight1 = VoigtModel(prefix='v1_') pars.update(voight1.make_params()) pars['v1_center'].set(0) pars['v1_sigma'].set(0.005) pars['v1_gamma'].set(0.005) pars['v1_amplitude'].set(-0.3) voight2 = VoigtModel(prefix='v2_') pars.update(voight2.make_params()) pars['v2_center'].set(0.02) pars['v2_sigma'].set(0.005) pars['v2_gamma'].set(0.005) pars['v2_amplitude'].set(-0.2)
def fit_data(x, y, peak_pos, peak_type='LO', max_width=None, bg_ord=2): """ Builds a lmfit model of peaks in listed by index in `peak_pos` Uses some basic algorithms to determine initial parameters for amplitude and fwhm (limit on fwhm to avoid fitting background as peaks) Parameters ---------- peak_type : string (default='lorentizian') Peaks can be of the following types: - 'LO' : symmetric lorentzian - 'GA' : symmetric gaussain - 'VO' : symmetric pseudo voigt max_width : int (default = total points/10) max width (in data points) that peak fitted can be bg_ord: int order of the background polynomial 0: constant, 1: linear, ... Returns ------- pars : model parameters model : model object """ # need to define peak width finding pw = guess_peak_width(x, y) peak_guess = x[peak_pos] # start with polynomial background model = PolynomialModel(bg_ord, prefix='bg_') pars = model.make_params() if peak_type == 'LO': peak_function = lorentzian elif peak_type == 'GA': peak_function = gaussian elif peak_type == 'VO': peak_function = voigt # add lorentizian peak for all peaks for i, peak in enumerate(peak_guess): temp_model = Model(peak_function, prefix='p%s_' % i) pars.update(temp_model.make_params()) model += temp_model # set inital background as flat line at zeros for i in range(bg_ord + 1): pars['bg_c%i' % i].set(0) # give values for other peaks for i, peak in enumerate(peak_pos): # could set bounds #, min=x[peak]-5, max=x[peak]+5) pars['p%s_x0' % i].set(x[peak]) pars['p%s_fwhm' % i].set(pw / 2, min=pw * 0.25, max=pw * 2) # here as well #, min=0, max=2*max(y)) pars['p%s_amp' % i].set(y[peak]) out = model.fit(y, pars, x=x) return out
def fit_cu_line(xin, yin, line_c=8.04, use_weights=True): ''' PURPOSE: Fit the Cu Ka line (8.04 keV), the model is a polynomial(2) + a Gaussian line. INPUTS: xin is the energy channel (in keV) yin is the counts line_c is the initial energy of the line (in keV) OUTPUTS: a tuple of the full fit output class and the results line in ascii. NOTES: the Gaussian sigma of the line is only allowed within a certain range: 80 to 250 eV ''' i1max = np.argmax(yin) y1max = yin[i1max] # poly_mod = PolynomialModel(1, prefix='poly_') pars = poly_mod.guess(yin, x=xin) # pname = 'cuka' gauss1 = GaussianModel(prefix=f"{pname}_") pars.update(gauss1.make_params()) pars[f'{pname}_center'].set(line_c, min=line_c - 0.25, max=line_c + 0.25) pars[f'{pname}_sigma'].set(0.1, min=0.08, max=0.250) #pars[f'{pname}_amplitude'].set(y1max,min=1.0,max=y1max) # mod = poly_mod + gauss1 #init = mod.eval(pars, x=x) #out = mod.fit(yin, pars, x=xin, weights=1.0/np.sqrt(yin)) if (use_weights): yerr = np.sqrt(yin) w = np.divide(1.0, yerr, where=yerr != 0) try: out = mod.fit(yin, pars, x=xin, weights=w, nan_policy='omit') except: return None else: try: out = mod.fit(yin, pars, x=xin, nan_policy='omit') except: return None # # confidence intervals on parameters, if needed # #ci_out = out.conf_interval() #print (ci_out['cuka_center']) # #cen = out.params['g1_center'].value #cen_err = out.params['g1_center'].stderr #fwhm = out.params['g1_fwhm'].value #fwhm_err = out.params['g1_fwhm'].stderr #chi2 = out.chisqr #df = len(xin) #try: # results = f"{cen:.3f},{cen_err:.3f},{fwhm:.5f},{fwhm_err:.5f},{chi2:.3f},{df}" #except: # results = None # return out
def fitLines(flux, wavelength, z, weights): #Convert all into numpy arrays flux = np.array(flux) wavelength = np.array(wavelength) z = np.array(z) weights = np.array(weights) error = np.sqrt(1 / weights) #Fit a polynomial to the continuum background emission of the galaxy #This is the crude way to do it mod = PolynomialModel(6) pars = mod.guess(flux, x=wavelength) out = mod.fit(flux, pars, x=wavelength) continuum_poly = out.best_fit #Can also compute the continuum in the more advanced way #masking the emission lines and using a moving average #Define the wavelength values of the relevant emission lines OII3727 = 3727.092 OII3729 = 3729.875 H_beta = 4862.721 OIII4959 = 4960.295 OIII5007 = 5008.239 H_alpha = 6564.614 NII6585 = 6585.27 SII6718 = 6718.29 SII6732 = 6732.68 #Now apply the redshift formula to find where this will be observed #Note that for these SDSS spectra the OII doublet is not in range OII3727_shifted = OII3727 * (1 + z) OII3729_shifted = OII3729 * (1 + z) H_beta_shifted = H_beta * (1 + z) OIII4959_shifted = OIII4959 * (1 + z) OIII5007_shifted = OIII5007 * (1 + z) H_alpha_shifted = H_alpha * (1 + z) NII6585_shifted = NII6585 * (1 + z) SII6718_shifted = SII6718 * (1 + z) SII6732_shifted = SII6732 * (1 + z) #hellofriend #Will choose to mask pm 15 for each of the lines H_beta_index = np.where( np.logical_and(wavelength >= (H_beta_shifted - 15), wavelength <= (H_beta_shifted + 15))) OIII_one_index = np.where( np.logical_and(wavelength >= (OIII4959_shifted - 15), wavelength <= (OIII4959_shifted + 15))) OIII_two_index = np.where( np.logical_and(wavelength >= (OIII5007_shifted - 15), wavelength <= (OIII5007_shifted + 15))) NII_one_index = np.where( np.logical_and(wavelength >= (NII6585_shifted - 15), wavelength <= (NII6585_shifted + 15))) H_alpha_index = np.where( np.logical_and(wavelength >= (H_alpha_shifted - 15), wavelength <= (H_alpha_shifted + 15))) SII_one_index = np.where( np.logical_and(wavelength >= (SII6718_shifted - 15), wavelength <= (SII6718_shifted + 15))) SII_two_index = np.where( np.logical_and(wavelength >= (SII6732_shifted - 15), wavelength <= (SII6732_shifted + 15))) #define the mask 1 values from the index values mask = np.zeros(len(flux)) mask[H_beta_index] = 1 mask[OIII_one_index] = 1 mask[OIII_two_index] = 1 mask[NII_one_index] = 1 mask[H_alpha_index] = 1 mask[SII_one_index] = 1 mask[SII_two_index] = 1 #Now apply these to the flux to mask masked_flux = ma.masked_array(flux, mask=mask) #Make my own with np.mean() continuum = np.empty(len(masked_flux)) for i in range(len(masked_flux)): if (i + 5) < len(masked_flux): continuum[i] = ma.mean(masked_flux[i:i + 5]) if np.isnan(continuum[i]): continuum[i] = continuum[i - 1] else: continuum[i] = ma.mean(masked_flux[i - 5:i]) if np.isnan(continuum[i]): continuum[i] = continuum[i - 1] #Subtract the continuum from the flux, just use polynomial fit right now counts = flux - continuum_poly #Construct a dictionary housing these shifted emission line values #Note that values for the OII doublet are not present line_dict = { 'H_beta': H_beta_shifted, 'OIII4959': OIII4959_shifted, 'OIII5007': OIII5007_shifted, 'H_alpha': H_alpha_shifted, 'NII6585': NII6585_shifted, 'SII6718': SII6718_shifted, 'SII6732': SII6732_shifted } #Plot the initial continuum subtracted spectrum plt.plot(wavelength, counts) #Initialise a dictionary for the results in the for loop results_dict = {} #Begin for loop to fit an arbitrary number of emission lines for key in line_dict: ######################################################################## #FITTING EACH OF THE EMISSION LINES IN TURN ######################################################################## #We don't want to include all the data in the gaussian fit #Look for the indices of the points closes to the wavelength value #The appropriate range is stored in fit_wavelength etc. #Use np.where to find the indices of data surrounding the gaussian new_index = np.where( np.logical_and(wavelength > (line_dict[key] - 10), wavelength < (line_dict[key] + 10))) #Select only data for the fit with these indices fit_wavelength = wavelength[new_index] fit_counts = counts[new_index] fit_weights = weights[new_index] fit_continuum = continuum[new_index] fit_error = error[new_index] #Now use the lmfit package to perform gaussian fits to the data #Construct the gaussian model mod = GaussianModel() #Take an initial guess at what the model parameters are #In this case the gaussian model has three parameters, #Which are amplitude, center and sigma pars = mod.guess(fit_counts, x=fit_wavelength) #We know from the redshift what the center of the gaussian is, set this #And choose the option not to vary this parameter #Leave the guessed values of the other parameters pars['center'].set(value=line_dict[key]) pars['center'].set(vary='False') #Now perform the fit to the data using the set and guessed parameters #And the inverse variance weights form the fits file out = mod.fit(fit_counts, pars, weights=fit_weights, x=fit_wavelength) #print(out.fit_report(min_correl=0.25)) #Plot the results and the spectrum to check the fit plt.plot(fit_wavelength, out.best_fit, 'r-') #Return the error on the flux error_dict = fluxError(fit_counts, fit_wavelength, fit_error, continuum_poly) #Compute the equivalent width con_avg = np.mean(continuum_poly) E_w = out.best_values['amplitude'] / con_avg #The amplitude parameter is the area under the curve, equivalent to the flux results_dict[key] = [ out.best_values['amplitude'], error_dict['flux_error'], out.best_values['sigma'], 2.3548200 * out.best_values['sigma'], E_w, error_dict['E_W_error'] ] #The return dictionary for this method is a sequence of results vectors return results_dict
def build_model(self, peak_type='LO', max_width=None, bg_ord=2): """ Builds a lmfit model of peaks in listed by index in `peak_pos` Uses some basic algorithms to determine initial parameters for amplitude and fwhm (limit on fwhm to avoid fitting background as peaks) Parameters ---------- peak_type : string (default='lorentizian') Peaks can be of the following types: - 'LO' : symmetric lorentzian - 'GA' : symmetric gaussain max_width : int (default = total points/10) max width (in data points) that peak fitted can be bg_ord: int order of the background polynomial 0: constant, 1: linear, ... Returns ------- pars : model parameters model : model object """ x = self.x y = self.y pw = self.test_peak_width peak_guess = self.x[self.peak_pos] print("Building model ... ") # start with polynomial background # second order model = PolynomialModel(bg_ord, prefix='bg_') pars = model.make_params() if peak_type == 'LO': peak_function = lorentzian self.afactor = pi self.wfactor = 2.0 elif peak_type == 'GA': peak_function = gaussian self.afactor = sqrt(2 * pi) self.wfactor = 2.354820 elif peak_type == 'VO': peak_function = voigt self.afactor = sqrt(2 * pi) self.wfactor = 3.60131 # add lorentizian peak for all peaks for i, peak in enumerate(peak_guess): temp_model = Model(peak_function, prefix='p%s_' % i) pars.update(temp_model.make_params()) model += temp_model # set inital background as flat line at zeros for i in range(bg_ord + 1): pars['bg_c%i' % i].set(0) # give values for other peaks for i, peak in enumerate(self.peak_pos): print('Peak %i: pos %s, height %s' % (i, x[peak], y[peak])) # could set bounds #, min=x[peak]-5, max=x[peak]+5) pars['p%s_center' % i].set(x[peak]) pars['p%s_sigma' % i].set(pw / 2, min=pw * 0.25, max=pw * 2) # here as well #, min=0, max=2*max(y)) pars['p%s_amplitude' % i].set(self.amplitude(y[peak], (pw / 2))) self.pars = pars self.model = model return self.pars, self.model
params1['sigma'].set(value=8, max=100) params1['slope'].set(value=slope1) params1['b'].set(value=np.min(Von_specm)) params2 = gmod.make_params() params2['A'].set(value=np.max(Voff_specm), min=0) params2['mu'].set(value=x_lambda[pts[n,0]-scan_l+int(*np.where(Voff_specm == Voff_specm.max()))], min=400, max=800) params2['sigma'].set(value=8, max=100) params1['slope'].set(value=slope2) params1['b'].set(value=np.min(Voff_specm)) result1 = gmod.fit(Von_specm, x=x, **params1) result2 = gmod.fit(Voff_specm, x=x, **params2) fitpeak1 = result1.best_values['mu'] fitpeak2 = result2.best_values['mu'] deltaL = fitpeak1 -fitpeak2 else: mod = PolynomialModel(7) pars1 = mod.guess(Von_specm, x=x) pars2 = mod.guess(Voff_specm, x=x) result1 = mod.fit(Von_specm, pars1, x=x) result2 = mod.fit(Voff_specm, pars2, x=x) fitpeak1 = x[np.where(result1.best_fit == np.max(result1.best_fit))] fitpeak2 = x[np.where(result2.best_fit == np.max(result2.best_fit))] deltaL = fitpeak1 -fitpeak2 dL['NR#{}'.format(n)] = deltaL P = np.sum(spectra_bgcr[:,:,n]*x, axis=1)/np.sum(spectra_bgcr[:,:,n],axis=1) Pon = np.array([P[i] for i in range(frame_start, frame) if i%2==1 and tt[i,n] > threshold[n]]) Poff = np.array([P[i] for i in range(frame_start, frame) if i%2==0 and tt[i,n] > threshold[n]]) Ton = [T[i] for i in range(frame_start,frame) if tt[i,n] > threshold[n] and i%2 == 1] Toff = [T[i] for i in range(frame_start,frame) if tt[i,n] > threshold[n]and i%2 == 0]