def ChoosePeakType(self, peaktype, i): """ This function helps to create the `CompositeModel() <https://lmfit.github.io/lmfit-py/model.html#lmfit.model.CompositeModel>`_ . Implemented models are: `GaussianModel() <https://lmfit.github.io/lmfit-py/builtin_models.html#lmfit.models.GaussianModel>`_ , `LorentzianModel() <https://lmfit.github.io/lmfit-py/builtin_models.html#lmfit.models.LorentzianModel>`_ , `VoigtModel() <https://lmfit.github.io/lmfit-py/builtin_models.html#lmfit.models.VoigtModel>`_ , `BreitWignerModel() <https://lmfit.github.io/lmfit-py/builtin_models.html#lmfit.models.BreitWignerModel>`_ . Parameters ---------- peaktype : string Possible line shapes of the peaks to fit are 'breit_wigner', 'lorentzian', 'gaussian', and 'voigt'. i : int Integer between 0 and (N-1) to distinguish between N peaks of the same peaktype. It is used in the prefix. Returns ------- lmfit.models.* : class Returns either VoigtModel(), BreitWignerModel(), LorentzianModel(), or GaussianModel() depending on the peaktype with *Model(prefix = prefix, nan_policy = 'omit'). The prefix contains the peaktype and i. """ prefix = peaktype + '_p' + str(i + 1) + '_' if peaktype == 'voigt': return VoigtModel(prefix=prefix, nan_policy='omit') elif peaktype == 'breit_wigner': return BreitWignerModel(prefix=prefix, nan_policy='omit') elif peaktype == 'lorentzian': return LorentzianModel(prefix=prefix, nan_policy='omit') elif peaktype == 'gaussian': return GaussianModel(prefix=prefix, nan_policy='omit')
def test_bounds_expression(): # load data to be fitted data = np.loadtxt(os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) x = data[:, 0] y = data[:, 1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) params['amplitude'].set(min=0, max=100) params['center'].set(min=5, max=10) # do fit, here with leastsq model result = mod.fit(y, params, x=x) # assert that stderr and correlations are correct [cf. lmfit v0.9.10] assert_almost_equal(result.params['sigma'].stderr, 0.00368468, decimal=6) assert_almost_equal(result.params['center'].stderr, 0.00505496, decimal=6) assert_almost_equal(result.params['amplitude'].stderr, 0.13861506, decimal=6) assert_almost_equal(result.params['gamma'].stderr, 0.00368468, decimal=6) assert_almost_equal(result.params['fwhm'].stderr, 0.00806917, decimal=6) assert_almost_equal(result.params['height'].stderr, 0.03009459, decimal=6) assert_almost_equal(result.params['sigma'].correl['center'], -4.6623973788006615e-05, decimal=6) assert_almost_equal(result.params['sigma'].correl['amplitude'], 0.651304091954038, decimal=6) assert_almost_equal(result.params['center'].correl['amplitude'], -4.390334984618851e-05, decimal=6)
def fit_voigt_over_linear(q, I, cen=1, sig=0.002, sigmin=1e-4, sigmax=0.01, amplmin=0, amplmax=500, trim=0.06, plot=False): trim = logical_and(q < cen + trim, q > cen - trim) q = q[trim] I = I[trim] mod = LinearModel() mod.set_param_hint('slope', value=-20) mod.set_param_hint('intercept', value=10) lineout = mod.fit(I, x=q) pars = lineout.params mod += VoigtModel() pars.add('center', value=cen) pars.add('sigma', value=sig, max=sigmax, min=sigmin) pars.add('amplitude', value=amplmin / 2 + amplmax / 2, min=amplmin, max=amplmax) out = mod.fit(I, pars, x=q) return out
def __init__(self, type): self.peak = [None] * (defPar.NumPeaks) if type == 0: for i in range(0, defPar.NumPeaks): self.peak[i] = PseudoVoigtModel(prefix="p" + str(i) + "_") self.typec = "PseudoVoigt" elif type == 1: for i in range(0, defPar.NumPeaks): self.peak[i] = GaussianModel(prefix="p" + str(i) + "_") self.typec = "Gauss" elif type == 2: for i in range(0, defPar.NumPeaks): self.peak[i] = LorentzianModel(prefix="p" + str(i) + "_") self.typec = "Lorentz" elif type == 3: for i in range(0, defPar.NumPeaks): self.peak[i] = VoigtModel(prefix="p" + str(i) + "_") self.typec = "Voigt" else: print("Warning: type undefined. Using PseudoVoigt") for i in range(0, defPar.NumPeaks): self.peak[i] = PseudoVoigtModel(prefix="p" + str(i) + "_") self.typec = "PVoigt"
def fit(self, xx, yy, fitType): xx = np.asarray(xx) yy = np.asarray(yy) print("XX", xx) print("YY", yy) print(len(xx)) print(len(yy)) print("XX", xx) x1 = xx[0] x2 = xx[-1] y1 = yy[0] y2 = yy[-1] m = (y2 - y1) / (x2 - x1) b = y2 - m * x2 if fitType == "Gaussian": mod = GaussianModel() elif fitType == "Lorentzian": mod = LorentzianModel() else: mod = VoigtModel() pars = mod.guess(yy, x=xx, slope=m) print(pars) mod = mod + LinearModel() pars.add('intercept', value=b, vary=True) pars.add('slope', value=m, vary=True) out = mod.fit(yy, pars, x=xx) return out.best_fit
def test_numdifftools_calc_covar_false(): pytest.importorskip("numdifftools") # load data to be fitted data = np.loadtxt( os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) x = data[:, 0] y = data[:, 1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) params['sigma'].set(min=-np.inf) # do fit, with leastsq and nelder result = mod.fit(y, params, x=x, method='leastsq') result_ndt = mod.fit(y, params, x=x, method='nelder', calc_covar=False) # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_ndt = [ result_ndt.params[p].value for p in result_ndt.params.valuesdict() ] assert_allclose(vals_ndt, vals, rtol=5e-3) assert_allclose(result_ndt.chisqr, result.chisqr) assert result_ndt.covar is None assert result_ndt.errorbars is False
def test_param_set(): np.random.seed(2015) x = np.arange(0, 20, 0.05) y = gaussian(x, amplitude=15.43, center=4.5, sigma=2.13) y = y + 0.05 - 0.01*x + np.random.normal(scale=0.03, size=len(x)) model = VoigtModel() params = model.guess(y, x=x) # test #1: gamma is constrained to equal sigma sigval = params['gamma'].value assert(params['gamma'].expr == 'sigma') assert_allclose(params['gamma'].value, sigval, 1e-4, 1e-4, '', True) # test #2: explicitly setting a param value should work, even when # it had been an expression. The value will be left as fixed gamval = 0.87543 params['gamma'].set(value=gamval) assert(params['gamma'].expr is None) assert(not params['gamma'].vary) assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True) # test #3: explicitly setting an expression should work params['gamma'].set(expr='sigma/2.0') assert(params['gamma'].expr is not None) assert(not params['gamma'].vary) assert_allclose(params['gamma'].value, sigval/2.0, 1e-4, 1e-4, '', True) # test #4: explicitly setting a param value WITH vary=True # will set it to be variable gamval = 0.7777 params['gamma'].set(value=gamval, vary=True) assert(params['gamma'].expr is None) assert(params['gamma'].vary) assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
def get_model(self): self.x = np.array([0, 1, 2, 6, 12, 24]) self.y = np.array(self.norm_vals) # Compound model with Voigt curve. self.background = ExponentialModel(prefix='b_') self.pars = self.background.guess(self.y, x=self.x) self.peak = VoigtModel(prefix='p_') self.pars += self.peak.guess(self.y, x=self.x) self.comp_mod = self.peak + self.background self.init = self.comp_mod.eval(self.pars, x=self.x) self.comp_out = self.comp_mod.fit( self.y, x=self.x, fit_kws={'nan_policy': 'propagate' }) # instead of 'omit', it keeps up the zero vals. self.comp_list = self.comp_out.fit_report().split('\n') self.comp_chisq = float(self.comp_list[6][-5:]) self.out = self.comp_out self.chisq = float(self.comp_list[6][-5:]) self.usedmod = self.comp_mod self.model_flag = "composite (exponential+Voigt)" return self.comp_out, self.comp_chisq, self.out, self.chisq, self.usedmod, self.model_flag
def voigt_response(self, sigma=None, gamma=None, weights=True): ''' Fit the background with a Voigt profile to determine the response of the spectrometer If you have a good, clear signal, set sigma and gamma to None (done by default) If your signal is poor, set sigma and gamma using a fit to a good signal, and then only the position of the central wavelength will be altered. ''' vm = VoigtModel() par_v = vm.guess(self.bkgd, x=self.lamb) par_v['center'].set(value=532e-9, vary=True) if sigma is not None: #if a width is provided, fix it. par_v['sigma'].set(value=sigma, vary=False) if gamma is not None: #if a width is provided, fix it. par_v['gamma'].set(value=gamma, vary=False, expr='') elif gamma is None: #vary gamma for better fit - this is not done by default par_v['gamma'].set(value=par_v['sigma'].value, vary=True, expr='') ##Fit the Voigt Model to the data if weights is True: weights = self.bkgd / self.bkgd_err if weights is False: weights = np.ones_like(self.bkgd) self.vm_fit = vm.fit(self.bkgd, par_v, x=self.lamb, weights=weights) self.l0 = self.vm_fit.best_values['center'] self.sigma = self.vm_fit.best_values['sigma']
def test_saveload_usersyms(): """Test save/load of modelresult with non-trivial user symbols, this example uses a VoigtModel, wheree `wofz()` is used in a constraint expression""" x = np.linspace(0, 20, 501) y = gaussian(x, 1.1, 8.5, 2) + lorentzian(x, 1.7, 8.5, 1.5) np.random.seed(20) y = y + np.random.normal(size=len(x), scale=0.025) model = VoigtModel() pars = model.guess(y, x=x) result = model.fit(y, pars, x=x) savefile = 'tmpvoigt_modelresult.sav' save_modelresult(result, savefile) assert_param_between(result.params['sigma'], 0.7, 2.1) assert_param_between(result.params['center'], 8.4, 8.6) assert_param_between(result.params['height'], 0.2, 1.0) time.sleep(0.25) result2 = load_modelresult(savefile) assert_param_between(result2.params['sigma'], 0.7, 2.1) assert_param_between(result2.params['center'], 8.4, 8.6) assert_param_between(result2.params['height'], 0.2, 1.0)
def fitting(filename): data = np.genfromtxt(filename) x = data[:, 0] y = data[:, 1] lin_shift = LinearModel(prefix='lin_') pars = lin_shift.guess(y, x=x) voight1 = VoigtModel(prefix='v1_') pars.update(voight1.make_params()) pars['v1_center'].set(-0.65) pars['v1_sigma'].set(0.1) pars['v1_gamma'].set(0.1) pars['v1_amplitude'].set(-0.4) voight2 = VoigtModel(prefix='v2_') pars.update(voight2.make_params()) pars['v2_center'].set(0) pars['v2_sigma'].set(0.1) pars['v2_gamma'].set(0.1) pars['v2_amplitude'].set(-1.0) voight3 = VoigtModel(prefix='v3_') pars.update(voight3.make_params()) pars['v3_center'].set(0.75) pars['v3_sigma'].set(0.5) pars['v3_gamma'].set(0.5) pars['v3_amplitude'].set(-1.4) voight4 = VoigtModel(prefix='v4_') pars.update(voight4.make_params()) pars['v4_center'].set(1.1) pars['v4_sigma'].set(0.15) pars['v4_gamma'].set(0.15) pars['v4_amplitude'].set(-0.6) mod = lin_shift + voight1 + voight2 + voight3 + voight4 init = mod.eval(pars, x=x) out = mod.fit(y, pars, x=x) y_fit = out.model.func(x, **out.best_values) # print(out.fit_report()) out.plot(datafmt='g-', fitfmt='r--') plt.show()
def methodfunciont(key): dic = { "VoigtModel": VoigtModel(), "PseudoVoigtModel": PseudoVoigtModel(), "GaussianModel": GaussianModel() } return dic[key]
def call_voigt(x, y, cen, count, pars): label='v'+str(count)+'_' voigt = VoigtModel(prefix=label) pars.update(voigt.make_params()) pars[label+'center'].set(cen, min=cen-0.01, max=cen+0.01) pars[label+'amplitude'].set(0, min=-(max(y)-min(y))*1.5, max=0.0001) pars[label+'sigma'].set(fw_set/4, min=0.005, max=fw_set/2.3548) pars[label+'gamma'].set(value=fw_set/4, vary=True, expr='') return voigt
def test_numdifftools_with_bounds(fit_method): pytest.importorskip("numdifftools") if fit_method in ['shgo', 'dual_annealing']: pytest.importorskip("scipy", minversion="1.2") # load data to be fitted data = np.loadtxt( os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) x = data[:, 0] y = data[:, 1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) params['amplitude'].set(min=25, max=70) params['sigma'].set(max=1) params['center'].set(min=5, max=15) # do fit, here with leastsq model result = mod.fit(y, params, x=x, method='leastsq') result_ndt = mod.fit(y, params, x=x, method=fit_method) # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_ndt = [ result_ndt.params[p].value for p in result_ndt.params.valuesdict() ] assert_allclose(vals_ndt, vals, rtol=0.1) assert_allclose(result_ndt.chisqr, result.chisqr, rtol=1e-5) # assert that parameter uncertaintes from leastsq and calculated from # the covariance matrix using numdifftools are very similar stderr = [result.params[p].stderr for p in result.params.valuesdict()] stderr_ndt = [ result_ndt.params[p].stderr for p in result_ndt.params.valuesdict() ] perr = np.array(stderr) / np.array(vals) perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt) assert_almost_equal(perr_ndt, perr, decimal=3) # assert that parameter correlatations from leastsq and calculated from # the covariance matrix using numdifftools are very similar for par1 in result.var_names: cor = [ result.params[par1].correl[par2] for par2 in result.params[par1].correl.keys() ] cor_ndt = [ result_ndt.params[par1].correl[par2] for par2 in result_ndt.params[par1].correl.keys() ] assert_almost_equal(cor_ndt, cor, decimal=2)
def curve_fitting_voigt(dref, pars=None): xdata = dref.index.to_numpy() ydata = dref.to_numpy() mod = VoigtModel() if pars is None: pars = mod.guess(ydata, x=xdata) out = mod.fit(ydata, pars, x=xdata) return out
def VoigtCalc(x, y, x1, y1): y = removerBackground(y) y1 = removerBackground(y1) mod = VoigtModel() pars = mod.guess(y, x=x) pars['gamma'].set(value=0.7, vary=True, expr='') out = mod.fit(y, pars, x=x) mod = VoigtModel() pars1 = mod.guess(y1, x=x1) pars1['gamma'].set(value=0.7, vary=True, expr='') out1 = mod.fit(y1, pars1, x=x1) center = out.best_values['center'] sigma = Decon_Gau(out.best_values['sigma'], out1.best_values['sigma']) gamma = Decon_Lor(out.best_values['gamma'], out1.best_values['gamma']) return SingleLineEquation(sigma, gamma, center)
def test_least_squares_solver_options(peakdata, capsys): """Test least_squares algorithm, pass options to solver.""" x = peakdata[0] y = peakdata[1] mod = VoigtModel() params = mod.guess(y, x=x) solver_kws = {'verbose': 2} mod.fit(y, params, x=x, method='least_squares', fit_kws=solver_kws) captured = capsys.readouterr() assert 'Iteration' in captured.out assert 'final cost' in captured.out
def singleline(x, y, tipo, arquivo): ## pdb.set_trace() mod = VoigtModel() pars = mod.guess(y, x=x) pars['gamma'].set(value=0.7, vary=True, expr='') ## pars['sigma'].set(value=0.7, vary=True, expr='') out = mod.fit(y, pars, x=x) gamma = out.best_values['gamma'] sigma = out.best_values['sigma'] center = out.best_values['center'] calcsingleline(gamma, sigma, center, tipo, arquivo)
def __init__(self, ModelString): self.NumModels = {} self.NumModels['Total'] = len(ModelString) self.NumModels['Linear'] = len(re.findall('L', ModelString)) self.NumModels['Gaussian'] = len(re.findall('G', ModelString)) self.NumModels['Voigt'] = len(re.findall('V', ModelString)) if self.NumModels['Total'] != self.NumModels['Linear'] + self.NumModels[ 'Gaussian'] + self.NumModels['Voigt']: print( 'Warning: Number of total functions does not equal number of summed functions' ) ModelCounter = 0 i = 0 while i < self.NumModels['Linear']: if ModelCounter == 0: self.Model = LinearModel(prefix='L' + str(i + 1) + '_') else: self.Model = self.Model + LinearModel(prefix='L' + str(i + 1) + '_') ModelCounter = ModelCounter + 1 i += 1 i = 0 while i < self.NumModels['Gaussian']: if ModelCounter == 0: self.Model = GaussianModel(prefix='G' + str(i + 1) + '_') else: self.Model = self.Model + GaussianModel(prefix='G' + str(i + 1) + '_') ModelCounter = ModelCounter + 1 i += 1 i = 0 while i < self.NumModels['Voigt']: if ModelCounter == 0: self.Model = VoigtModel(prefix='V' + str(i + 1) + '_') else: self.Model = self.Model + VoigtModel(prefix='V' + str(i + 1) + '_') ModelCounter = ModelCounter + 1 i += 1
def fit_peak_1d( xdata: np.ndarray, ydata: np.ndarray, engine: str = 'lmfit', ) -> np.ndarray: """ Description ----------- Perform 1D peak fitting using Voigt function Parameters ---------- xdata: np.ndarray independent var array ydata: np.ndarray dependent var array engien: str engine name, [lmfit, tomoproc] Returns ------- dict dictionary of peak parameters NOTE ---- Return dictionary have different entries. """ if engine.lower() in ['lmfit', 'external']: mod = VoigtModel() pars = mod.guess(ydata, x=xdata) out = mod.fit(ydata, pars, x=xdata) return out.best_values else: popt, pcov = curve_fit( voigt1d, xdata, ydata, maxfev=int(1e6), p0=[ydata.max(), xdata.mean(), 1, 1], bounds=([0, xdata.min(), 0, 0], [ ydata.max() * 10, xdata.max(), xdata.max() - xdata.min(), np.inf ]), ) return { 'amplitude': popt[0], 'center': popt[1], 'fwhm': popt[2], 'shape': popt[3], }
def correlate_spectra(obs_flx, obs_wvl, ref_flx, ref_wvl): # convert spectra sampling to logspace obs_flux_res_log, _ = spectra_logspace(obs_flx, obs_wvl) ref_flux_sub_log, wvl_log = spectra_logspace(ref_flx, ref_wvl) wvl_step = ref_wvl[1] - ref_wvl[0] # correlate the two spectra min_flux = 0.95 ref_flux_sub_log[ref_flux_sub_log > min_flux] = 0. obs_flux_res_log[obs_flux_res_log > min_flux] = 0. corr_res = correlate(ref_flux_sub_log, obs_flux_res_log, mode='same', method='fft') # plt.plot(corr_res) # plt.show() # plt.close() # create a correlation subset that will actually be analysed corr_w_size = 100 corr_c_off = np.int64(len(corr_res) / 2.) corr_pos_min = corr_c_off - corr_w_size corr_pos_max = corr_c_off + corr_w_size # print corr_pos_min, corr_pos_max corr_res_sub = corr_res[corr_pos_min:corr_pos_max] corr_res_sub -= np.median(corr_res_sub) corr_res_sub_x = np.arange(len(corr_res_sub)) # analyze correlation function by fitting gaussian/voigt/lorentzian distribution to it fit_model = VoigtModel() parameters = fit_model.guess(corr_res_sub, x=corr_res_sub_x) corr_fit_res = fit_model.fit(corr_res_sub, parameters, x=corr_res_sub_x) corr_center = corr_fit_res.params['center'].value # plt.plot(corr_res_sub) # plt.axvline(corr_center) # plt.show() # plt.close() # determine the actual shift idx_no_shift = np.int32(len(corr_res) / 2.) idx_center = corr_c_off - corr_w_size + corr_center log_shift_px = idx_no_shift - idx_center log_shift_wvl = log_shift_px * wvl_step wvl_log_new = wvl_log - log_shift_wvl rv_shifts = (wvl_log_new[1:] - wvl_log_new[:-1]) / wvl_log_new[:-1] * 299792.458 * log_shift_px if log_shift_wvl < 2: return np.nanmedian(rv_shifts) else: # something went wrong return np.nan
def test_numdifftools_no_bounds(): numdifftools = pytest.importorskip("numdifftools") # load data to be fitted data = np.loadtxt( os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) x = data[:, 0] y = data[:, 1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) params['sigma'].set(min=-np.inf) # do fit, here with leastsq model result = mod.fit(y, params, x=x, method='leastsq') for fit_method in ['nelder', 'basinhopping', 'ampgo']: result_ndt = mod.fit(y, params, x=x, method=fit_method) # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_ndt = [ result_ndt.params[p].value for p in result_ndt.params.valuesdict() ] assert_allclose(vals_ndt, vals, rtol=5e-3) assert_allclose(result_ndt.chisqr, result.chisqr) # assert that parameter uncertaintes from leastsq and calculated from # the covariance matrix using numdifftools are very similar stderr = [result.params[p].stderr for p in result.params.valuesdict()] stderr_ndt = [ result_ndt.params[p].stderr for p in result_ndt.params.valuesdict() ] perr = np.array(stderr) / np.array(vals) perr_ndt = np.array(stderr_ndt) / np.array(vals_ndt) assert_almost_equal(perr_ndt, perr, decimal=4) # assert that parameter correlatations from leastsq and calculated from # the covariance matrix using numdifftools are very similar for par1 in result.var_names: cor = [ result.params[par1].correl[par2] for par2 in result.params[par1].correl.keys() ] cor_ndt = [ result_ndt.params[par1].correl[par2] for par2 in result_ndt.params[par1].correl.keys() ] assert_almost_equal(cor_ndt, cor, decimal=2)
def onePeakVoigtFit(self): try: nRow, nCol = self.dockedOpt.fileInfo() self.gausFit.binFitData = plab.zeros((nRow, 0)) self.gausFit.OnePkFitData = plab.zeros( (nCol, 6)) # Creates the empty 2D List for j in range(nCol): yy = self.dockedOpt.TT[:, j] xx = plab.arange(0, len(yy)) x1 = xx[0] x2 = xx[-1] y1 = yy[0] y2 = yy[-1] m = (y2 - y1) / (x2 - x1) b = y2 - m * x2 mod = VoigtModel() mod.guess(yy, x=xx) pars = mod.guess(yy, x=xx) mod = mod + LinearModel() pars.add('intercept', value=b, vary=True) pars.add('slope', value=m, vary=True) out = mod.fit(yy, pars, x=xx) amplitude = out.best_values['amplitude'] fitError = self.getFitError(out.fit_report(sort_pars=True), amplitude) self.gausFit.OnePkFitData[j, :] = (amplitude, 0, out.best_values['center'], 0, out.best_values['sigma'], 0) # Saves fitted data of each fit fitData = out.best_fit binFit = np.reshape(fitData, (len(fitData), 1)) self.gausFit.binFitData = np.concatenate( (self.gausFit.binFitData, binFit), axis=1) if self.gausFit.continueGraphingEachFit == True: self.gausFit.graphEachFitRawData(xx, yy, out.best_fit, 'V') return False except Exception as e: qtWidgets.QMessageBox.warning( self.myMainWindow, "Error", "There was an error \n\n Exception: " + str(e)) return True
def xrdCalculationProcessing(spectrumData, centerXValsList, heightList, axs, setupOptions): proposedUserSubstrateTwoTheta = centerXValsList[heightList.index(max(heightList))] substrateModel = VoigtModel() params = substrateModel.guess(spectrumData.bgSubIntensity, x=spectrumData.xVals, negative=False) out = substrateModel.fit(spectrumData.bgSubIntensity, params, x=spectrumData.xVals) fullModelSubstrateTwoTheta = out.best_values['center'] if abs(fullModelSubstrateTwoTheta - proposedUserSubstrateTwoTheta) <= 0.1: # looks like the user selected the substrate as a peak, use their value substrateTwoTheta = proposedUserSubstrateTwoTheta else: # Looks like the user did not select the substrate as a peak, use a global value from fitting all data substrateTwoTheta = fullModelSubstrateTwoTheta literatureSubstrateTwoTheta = calculateTwoTheta(snContentPercent=0) # Reusing Sn content to 2theta equation twoThetaOffset = substrateTwoTheta - literatureSubstrateTwoTheta offsetCorrectedCenterTwoThetaList = np.asarray(centerXValsList) - twoThetaOffset for centerTwoTheta in offsetCorrectedCenterTwoThetaList: michaelSnContent = round(calculateXRDSnContent(centerTwoTheta), 1) print("Michael Comp:", michaelSnContent) print("Zach Comp:", round(calculateXRDSnContent_Zach(centerTwoTheta), 1)) if abs(centerTwoTheta - literatureSubstrateTwoTheta) > 0.05: # Don't draw one for the substrate _, centerIndex = closestNumAndIndex(spectrumData.xVals, centerTwoTheta + twoThetaOffset) if setupOptions.isLogPlot: basePlot = spectrumData.lnIntensity subtractedPlot = spectrumData.lnBgSubIntensity else: basePlot = spectrumData.intensity subtractedPlot = spectrumData.bgSubIntensity if setupOptions.doBackgroundSubtraction: an0 = axs[0].annotate(str(abs(michaelSnContent)), xy=(centerTwoTheta + twoThetaOffset, basePlot[centerIndex]), xycoords='data', xytext=(0, 72), textcoords='offset points', arrowprops=dict(arrowstyle="->", shrinkA=10, shrinkB=5, patchA=None, patchB=None)) an0.draggable() an1 = axs[1].annotate(str(abs(michaelSnContent)), xy=( centerTwoTheta + twoThetaOffset, subtractedPlot[centerIndex]), xycoords='data', xytext=(0, 72), textcoords='offset points', arrowprops=dict(arrowstyle="->", shrinkA=10, shrinkB=5, patchA=None, patchB=None)) an1.draggable() else: an0 = axs.annotate(str(abs(michaelSnContent)), xy=(centerTwoTheta + twoThetaOffset, subtractedPlot[centerIndex]), xycoords='data', xytext=(0, 72), textcoords='offset points', arrowprops=dict(arrowstyle="->", shrinkA=10, shrinkB=5, patchA=None, patchB=None)) an0.draggable()
def voigtFit(filename, xloc=0, yloc=1, stats=False, plot=False): # Read Data df = pd.read_csv(filename, header=None) # Remove bad pixel df.drop(df.index[446], inplace=True) df.fillna(method='bfill', inplace=True) # Narrow region for later delays if 'd5' in filename: df = df[(df.iloc[:, xloc] > 287.75) & (df.iloc[:, xloc] < 288.6)] if 'd4' in filename and ('m1r' or 'm2' in filename): df = df[(df.iloc[:, xloc] > 287.75) & (df.iloc[:, xloc] < 288.6)] x = np.array(df.iloc[:, xloc]) y = np.array(df.iloc[:, yloc]) # Set Voigt fit parameters mod = VoigtModel() pars = mod.guess(y, x=x) pars['gamma'].set(value=0.7, vary=True, expr='') # Perform Voigt fit out = mod.fit(y, pars, x=x) # Print fit statistics if stats: print(out.fit_report(min_correl=0.25, show_correl=False)) # Plot Voigt fit if plot: plt.plot(x, y, 'o', markersize=2.0, c='blue') plt.plot(x, out.best_fit, 'r-') dely = out.eval_uncertainty(sigma=5) plt.fill_between(x, out.best_fit - dely, out.best_fit + dely, color="#bc8f8f") plt.xlabel = 'Wavelength (nm)' plt.ylabel = 'Intensity (a.u.)' plt.xlim((287, 289.5)) plt.show() # Save fit statistics for par_name, param in out.params.items(): if par_name == 'gamma': return pd.DataFrame({ 'fid': [filename], 'fwhm_L': [2 * param.value], 'error': [2 * param.stderr], 'R^2': [out.redchi] })
def test_least_squares_cov_x(peakdata, bounds): """Test calculation of cov. matrix from Jacobian, with/without bounds.""" x = peakdata[0] y = peakdata[1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) if bounds: params['amplitude'].set(min=25, max=70) params['sigma'].set(min=0, max=1) params['center'].set(min=5, max=15) else: params['sigma'].set(min=-np.inf) # do fit with least_squares and leastsq algorithm result = mod.fit(y, params, x=x, method='least_squares') result_lsq = mod.fit(y, params, x=x, method='leastsq') # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_lsq = [ result_lsq.params[p].value for p in result_lsq.params.valuesdict() ] assert_allclose(vals, vals_lsq, rtol=1e-5) assert_allclose(result.chisqr, result_lsq.chisqr) # assert that parameter uncertaintes obtained from the leastsq method and # those from the covariance matrix estimated from the Jacbian matrix in # least_squares are similar stderr = [result.params[p].stderr for p in result.params.valuesdict()] stderr_lsq = [ result_lsq.params[p].stderr for p in result_lsq.params.valuesdict() ] assert_allclose(stderr, stderr_lsq, rtol=1e-4) # assert that parameter correlations obtained from the leastsq method and # those from the covariance matrix estimated from the Jacbian matrix in # least_squares are similar for par1 in result.var_names: cor = [ result.params[par1].correl[par2] for par2 in result.params[par1].correl.keys() ] cor_lsq = [ result_lsq.params[par1].correl[par2] for par2 in result_lsq.params[par1].correl.keys() ] assert_allclose(cor, cor_lsq, rtol=1e-2)
def test_cov_x_with_bounds(): # load data to be fitted data = np.loadtxt( os.path.join(os.path.dirname(__file__), '..', 'examples', 'test_peak.dat')) x = data[:, 0] y = data[:, 1] # define the model and initialize parameters mod = VoigtModel() params = mod.guess(y, x=x) params['amplitude'].set(min=25, max=70) params['sigma'].set(min=0, max=1) params['center'].set(min=5, max=15) # do fit, here with leastsq model result = mod.fit(y, params, x=x, method='least_squares') result_lsq = mod.fit(y, params, x=x, method='leastsq') # assert that fit converged to the same result vals = [result.params[p].value for p in result.params.valuesdict()] vals_lsq = [ result_lsq.params[p].value for p in result_lsq.params.valuesdict() ] assert_allclose(vals_lsq, vals, rtol=1e-5) assert_allclose(result_lsq.chisqr, result.chisqr) # assert that parameter uncertaintes obtained from the leastsq method and # those from the covariance matrix estimated from the Jacbian matrix in # least_squares are similar stderr = [result.params[p].stderr for p in result.params.valuesdict()] stderr_lsq = [ result_lsq.params[p].stderr for p in result_lsq.params.valuesdict() ] assert_almost_equal(stderr_lsq, stderr, decimal=6) # assert that parameter correlations obtained from the leastsq method and # those from the covariance matrix estimated from the Jacbian matrix in # least_squares are similar for par1 in result.var_names: cor = [ result.params[par1].correl[par2] for par2 in result.params[par1].correl.keys() ] cor_lsq = [ result_lsq.params[par1].correl[par2] for par2 in result_lsq.params[par1].correl.keys() ] assert_almost_equal(cor_lsq, cor, decimal=6)
def BuiltInModels(self) : FitInfo = self.FitInfo ModelString = list() for key in FitInfo['Models'] : ModelString.append((key,FitInfo['Models'][key]['model'])) for Model in ModelString : try : FitModel except : if Model[1] == 'Constant' : FitModel = ConstantModel(prefix=Model[0]+'_') if Model[1] == 'Linear' : FitModel = LinearModel(prefix=Model[0]+'_') if Model[1] == 'Gaussian' : FitModel = GaussianModel(prefix=Model[0]+'_') if Model[1] == 'SkewedGaussian' : FitModel = SkewedGaussianModel(prefix=Model[0]+'_') if Model[1] == 'Voigt' : FitModel = VoigtModel(prefix=Model[0]+'_') else : if Model[1] == 'Constant' : FitModel = FitModel + ConstantModel(prefix=Model[0]+'_') if Model[1] == 'Linear' : FitModel = FitModel + LinearModel(prefix=Model[0]+'_') if Model[1] == 'Gaussian' : FitModel = FitModel + GaussianModel(prefix=Model[0]+'_') if Model[1] == 'SkewedGaussian' : FitModel = FitModel + SkewedGaussianModel(prefix=Model[0]+'_') if Model[1] == 'Voigt' : FitModel = FitModel + VoigtModel(prefix=Model[0]+'_') self.FitModel = FitModel self.ModelParameters = FitModel.make_params()
def prepareFittingModels(roiCoordsList, modelType): modelList = [] paramList = [] index = 1 for region in roiCoordsList: individualModelsList = [] individualParamsList = [] if isinstance(region, dict): # If the region is just a single region, make it a list so the for loops pulls a dict rather than a dict entry region = [region] for entry in region: prefixName = 'v' + str(index) + '_' index += 1 # pull info out of region dict selectedXVals = entry['x'] selectedYVals = entry['y'] mod = None if modelType.lower() == 'voigt': mod = VoigtModel(prefix=prefixName) elif modelType.lower() == 'psuedovoigt': mod = PseudoVoigtModel(prefix=prefixName) elif modelType.lower() == 'lorentzian': mod = LorentzianModel(prefix=prefixName) elif modelType.lower() == 'gaussian': mod = GaussianModel(prefix=prefixName) elif modelType.lower() == 'pearsonvii': mod = Pearson7Model(prefix=prefixName) assert mod, "Entered model type is not supported" individualModelsList.append(mod) pars = mod.guess(selectedYVals, x=selectedXVals, negative=False) pars[prefixName + 'center'].set(min=min(selectedXVals), max=max(selectedXVals)) pars[prefixName + 'amplitude'].set(min=0) pars[prefixName + 'sigma'].set(min=0) if modelType.lower() == 'voigt': pars[prefixName + 'gamma'].set(value=0.3, vary=True, expr='', min=0) individualParamsList.append(pars) combinedModel = individualModelsList[0] combinedParams = individualParamsList[0] if len(individualModelsList) > 1: for model, params in zip(individualModelsList[1:], individualParamsList[1:]): combinedModel += model combinedParams += params modelList.append(combinedModel) paramList.append(combinedParams) return modelList, paramList
def peakfit(xvals, yvals, yerrors=None): """ Fit peak to scans """ peak_mod = VoigtModel() # peak_mod = GaussianModel() bkg_mod = LinearModel() pars = peak_mod.guess(yvals, x=xvals) pars += bkg_mod.make_params(intercept=np.min(yvals), slope=0) # pars['gamma'].set(value=0.7, vary=True, expr='') # don't fix gamma mod = peak_mod + bkg_mod out = mod.fit(yvals, pars, x=xvals, weights=yerrors) return out