def Fit_CosmicRay(t, Amp): # create a set of Parameters # time in microsecond unit params = Parameters() params.add('a', value=Amp[0], min=0, max=10) params.add('A', value=1) params.add('t0', value=t[np.argmax(Amp)], min=t[np.argmax(Amp) - 150], max=t[np.argmax(Amp) + 100]) params.add('tau', value=100, min=1, max=400) params.add('taur', value=1, min=0.2, max=20) # do fit, here with leastsq model result = minimize(Fit_CosmicRay_Func, params, args=(t, Amp)) # calculate final result residual = result.residual print(fit_report(result)) # Calculate Qc and Qi a = result.params['a'].value a_err = np.abs(result.params['a'].stderr / a) A = result.params['A'].value A_err = np.abs(result.params['A'].stderr / A) t0 = result.params['t0'].value t0_err = np.abs(result.params['t0'].stderr / t0) tau = result.params['tau'].value tau_err = np.abs(result.params['tau'].stderr / tau) taur = result.params['taur'].value taur_err = np.abs(result.params['taur'].stderr / taur) return a, a_err, A, A_err, t0, t0_err, tau, tau_err, taur, taur_err, fit_report( result), residual
def Fit_frvsT(temp, freq, fitpara): # create a set of Parameters params = Parameters() print fitpara params.add_many(('CPWC', fitpara[0], False, None, None, None), ('CPWG', fitpara[1], False, None, None, None), ('thick', fitpara[2], False, None, None, None), ('BCS', fitpara[3], False, None, None, None), ('Tc', fitpara[4], True, None, None, None), ('f0', fitpara[5], False, None, None, None), ('sigman',fitpara[6], False, None, None, None), ('A', fitpara[7], True, fitpara[7]*0.9, fitpara[7]*1.1, None)) # do fit, here with leastsq model result = minimize(Fit_frvsT_Func, params, args=(temp, freq)) # calculate final result residual = result.residual Tc = result.params['Tc'].value Tc_err = np.abs(result.params['Tc'].stderr/Tc) A = result.params['A'].value A_err = np.abs(result.params['A'].stderr/A) print fit_report(result) return Tc, Tc_err, A, A_err, fit_report(result)
def Fit_SkewedLorentizian(f, t): params = Parameters() params.add('A1', value=np.power(10, t[0] / 10.), min=0, max=10) # Background params.add('A2', value=0) #, min = -0.5, max = 0.5) # Slope params.add('A3', value=np.power(10, np.amin(t) / 10.)) # Lowest point params.add('A4', value=0) params.add('fr', value=np.median(f), min=f[0], max=f[len(f) - 1]) params.add('Qr', value=10000, min=1e3, max=1e8) result = minimize(Fit_SkewedLorentizian_Func, params, args=(f, t)) # print the fitting result print(fit_report(result)) # get fitted value and 1 sigma error value A1 = result.params['A1'].value A1_err = result.params['A1'].stderr A2 = result.params['A2'].value A2_err = result.params['A2'].stderr A3 = result.params['A3'].value A3_err = result.params['A3'].stderr A4 = result.params['A4'].value A4_err = result.params['A4'].stderr fr = result.params['fr'].value fr_err = result.params['fr'].stderr Qr = result.params['Qr'].value Qr_err = result.params['Qr'].stderr return A1, A1_err, A2, A2_err, A3, A3_err, A4, A4_err, fr, fr_err, Qr, Qr_err, fit_report( result)
def Fit_frvsT(temp, freq, fitpara): # create a set of Parameters params = Parameters() print fitpara params.add_many( ('CPWC', fitpara[0], False, None, None, None), ('CPWG', fitpara[1], False, None, None, None), ('thick', fitpara[2], False, None, None, None), ('BCS', fitpara[3], False, None, None, None), ('Tc', fitpara[4], True, None, None, None), ('f0', fitpara[5], False, None, None, None), ('sigman', fitpara[6], False, None, None, None), ('A', fitpara[7], True, fitpara[7] * 0.9, fitpara[7] * 1.1, None)) # do fit, here with leastsq model result = minimize(Fit_frvsT_Func, params, args=(temp, freq)) # calculate final result residual = result.residual Tc = result.params['Tc'].value Tc_err = np.abs(result.params['Tc'].stderr / Tc) A = result.params['A'].value A_err = np.abs(result.params['A'].stderr / A) print fit_report(result) return Tc, Tc_err, A, A_err, fit_report(result)
def display_mle_fit(self, scaled=False, **kwargs): """Give a readable overview of the result of the MLE fitting routine. Warning ------- The uncertainty shown is the largest of the asymmetrical errors! Work is being done to incorporate asymmetrical errors in the report; for now, rely on the correlation plot.""" if hasattr(self, 'fit_mle'): if 'show_correl' not in kwargs: kwargs['show_correl'] = False print('NDoF: {:d}, Chisquare: {:.8G}, Reduced Chisquare: {:.8G}'. format(self.ndof_mle, self.chisqr_mle, self.redchi_mle)) if scaled: print('Errors scaled with reduced chisquare.') par = copy.deepcopy(self.fit_mle) for p in par: if par[p].stderr is not None: par[p].stderr *= (self.redchi_mle**0.5) print(lm.fit_report(par, **kwargs)) else: print('Errors not scaled with reduced chisquare.') print(lm.fit_report(self.fit_mle, **kwargs)) else: print('Model has not yet been fitted with this method!')
def display_chisquare_fit(self, scaled=False, **kwargs): """Display all relevent info of the least-squares fitting routine, if this has been performed. Parameters ---------- kwargs: misc Keywords passed on to :func:`fit_report` from the LMFit package.""" if hasattr(self, 'chisq_res_par'): print('NDoF: {:d}, Chisquare: {:.8G}, Reduced Chisquare: {:.8G}'. format(self.ndof_chi, self.chisqr_chi, self.redchi_chi)) print( 'Akaike Information Criterium: {:.8G}, Bayesian Information Criterium: {:.8G}' .format(self.aic_chi, self.bic_chi)) if scaled: print('Errors scaled with reduced chisquare.') par = copy.deepcopy(self.chisq_res_par) for p in par: if par[p].stderr is not None: par[p].stderr *= (self.redchi_chi**0.5) print(lm.fit_report(par, **kwargs)) else: print('Errors not scaled with reduced chisquare.') print(lm.fit_report(self.chisq_res_par, **kwargs)) else: print('Spectrum has not yet been fitted with this method!')
def fit(self, mode='sig', nmodes=1, fitglobal=[], init={}, fix={}, lmfit_pars={}, fitqdep={}): ''' Function that computes the fits using lmfit's minimizer ''' self.fitglobal = fitglobal self.nmodes = nmodes self.fitqdep = fitqdep self.fix = fix # make the parameters if not lmfit_pars.get('is_weighted', True): init['__lnsigma'] = 1 self._init_parameters(init, fix) # setting the weights of the data self._get_weights(mode) self._minimizer = lmfit.Minimizer(self._residuals, params=self._lmpars, reduce_fcn=self._reduce_func, iter_cb=self._iter_cb, nan_policy='omit') if bool(self.fitglobal): # set data to fit self._fit_data = self.cf[self.nq] self._fit_weights = self._weights[self.nq] # do the fit out = self._minimizer.minimize(**lmfit_pars) self._write_to_pars(out) self.fit_result.append((out, lmfit.fit_report(out))) else: for line, qi in enumerate(self.nq): # set data to fit self._fit_data = self.cf[qi] self._fit_weights = self._weights[qi] # do the fit out = self._minimizer.minimize(**lmfit_pars) self._write_to_pars(out, line=line) self.fit_result.append((out, lmfit.fit_report(out))) self.pars['q'] = self.qv[self.nq] self.pars = self.pars.apply(pd.to_numeric) return self.pars, self.fit_result
def print_pars(self,fitresult=False): """ Print a list with the parameters """ if fitresult == True: params = self.result.params else: params = self.params print lm.fit_report(params,show_correl=0)
def spectra_fit(fit_params, *args, **kwargs): print '\nperforming minimization' fit_kws={'nan_policy': 'omit'} if len(args) in (1, 2, 3): sim_data_1, meas_data_full_1, meas_data_1 = args[0] if len(args) == 1: print ' single sprectrum fit' res = lmfit.minimize(minimize, fit_params, method='nelder', args=((sim_data_1, meas_data_1),), **fit_kws) if len(args) in (2, 3): sim_data_2, meas_data_full_2, meas_data_2= args[1] if len(args) == 2: print ' double spectrum fit' res = lmfit.minimize(minimize, fit_params, args=((sim_data_1, meas_data_1), (sim_data_2, meas_data_2)), **fit_kws) if len(args) == 3: sim_data_3, meas_data_full_3, meas_data_3= args[2] if len(args) == 3: print ' triple spectrum fit' res = lmfit.minimize(minimize, fit_params, args=((sim_data_1, meas_data_1), (sim_data_2, meas_data_2), (sim_data_3, meas_data_3)), **fit_kws) if kwargs['print_info']: print '\n',res.message print lmfit.fit_report(res) if kwargs['show_plots']: if len(args) == 1: plot_fitted_spectra( res.params['shift'].value, res.params['spread'].value, ( (res.params['alpha_1'].value,), (res.params['beta_1'].value,), (res.params['gamma_1'].value,), (res.params['c1_1'].value,), (res.params['c2_1'].value,), (res.params['y_scale_1'].value,), (sim_data_1,), (meas_data_full_1,), (meas_data_1,) ) ) if len(args) == 2: plot_fitted_spectra( res.params['shift'].value, res.params['spread'].value, ( (res.params['alpha_1'].value, res.params['alpha_2'].value), (res.params['beta_1'].value, res.params['beta_2'].value), (res.params['gamma_1'].value, res.params['gamma_2'].value), (res.params['c1_1'].value, res.params['c1_2'].value), (res.params['c2_1'].value, res.params['c2_2'].value), (res.params['y_scale_1'].value, res.params['y_scale_2'].value), (sim_data_1, sim_data_2), (meas_data_full_1, meas_data_full_2), (meas_data_1, meas_data_2) ) ) if len(args) == 3: plot_fitted_spectra( res.params['shift'].value, res.params['spread'].value, ( (res.params['alpha_1'].value, res.params['alpha_2'].value, res.params['alpha_3'].value), (res.params['beta_1'].value, res.params['beta_2'].value, res.params['beta_3'].value), (res.params['gamma_1'].value, res.params['gamma_2'].value, res.params['gamma_3'].value), (res.params['c1_1'].value, res.params['c1_2'].value, res.params['c1_3'].value), (res.params['c2_1'].value, res.params['c2_2'].value, res.params['c2_3'].value), (res.params['y_scale_1'].value, res.params['y_scale_2'].value, res.params['y_scale_3']), (sim_data_1, sim_data_2, sim_data_3), (meas_data_full_1, meas_data_full_2, meas_data_full_3), (meas_data_1, meas_data_2, meas_data_3) ) ) # get shift term shift_term = res.params['shift'].value spread_term = res.params['spread'].value return shift_term, spread_term
def determine_excess(self): B_up_slice = np.logical_and(self.min_B < self.B, self.B < self.max_B) B_down_slice = np.logical_and(-self.max_B < self.B, self.B < -self.min_B) B_up = self.B[B_up_slice] M_up = self.M[B_up_slice] sM_up = self.sM[B_up_slice] p_upper_fit = lmfit.Parameters() m_up_estimate = (M_up[-1] - M_up[0]) / (B_up[-1] - B_up[0]) p_upper_fit.add("m", m_up_estimate) p_upper_fit.add("n", B_up[0]) B_down = self.B[B_down_slice] M_down = self.M[B_down_slice] sM_down = self.sM[B_down_slice] m_down_estimate = (M_down[-1] - M_down[0]) / (B_down[-1] - B_down[0]) p_lower_fit = lmfit.Parameters() p_lower_fit.add("m", m_down_estimate) p_lower_fit.add("n", B_down[0]) res_up = lmfit.minimize(self.residuum, p_upper_fit, args=(B_up, M_up, sM_up), method='leastsq') print(lmfit.fit_report(res_up)) res_down = lmfit.minimize(self.residuum, p_lower_fit, args=(B_down, M_down, sM_down), method='leastsq') print(lmfit.fit_report(res_down)) p_upper_fit = res_up.params p_lower_fit = res_down.params self.mup = p_upper_fit["m"].value self.nup = p_upper_fit["n"].value self.mlow = p_lower_fit["m"].value self.nlow = p_lower_fit["n"].value self.smup = p_upper_fit["m"].stderr self.snup = p_upper_fit["n"].stderr self.smlow = p_lower_fit["m"].stderr self.snlow = p_lower_fit["n"].stderr self.sm = 1. / self.smup**2 + 1. / self.smlow**2 self.m = (self.mup / self.smup**2 + self.mlow / self.smlow**2) / self.sm self.sm = np.sqrt(1. / self.sm) self.sn = 1. / self.snup**2 + 1. / self.snlow**2 self.n = (self.nup / self.snup**2 - self.nlow / self.snlow**2) / self.sn self.sn = np.sqrt(1. / self.sn) self.M_corr = self.M - self.m * self.B self.sM_corr = self.sM
def fit_report(fit_result, modelpars=None, show_correl=True, min_correl=0.1, sort_pars=True, _larch=None, **kws): """generate a report of fitting results wrapper around lmfit.fit_report The report contains the best-fit values for the parameters and their uncertainties and correlations. Parameters ---------- fit_result : result from fit Fit Group output from fit, or lmfit.MinimizerResult returned from a fit. modelpars : Parameters, optional Known Model Parameters. show_correl : bool, optional Whether to show list of sorted correlations (default is True). min_correl : float, optional Smallest correlation in absolute value to show (default is 0.1). sort_pars : bool or callable, optional Whether to show parameter names sorted in alphanumerical order. If False, then the parameters will be listed in the order they were added to the Parameters dictionary. If callable, then this (one argument) function is used to extract a comparison key from each list element. Returns ------- string Multi-line text of fit report. """ result = getattr(fit_result, 'fit_details', fit_result) if isinstance(result, MinimizerResult): return lmfit.fit_report(result, modelpars=modelpars, show_correl=show_correl, min_correl=min_correl, sort_pars=sort_pars) elif isinstance(result, ModelResult): return result.fit_report(modelpars=modelpars, show_correl=show_correl, min_correl=min_correl, sort_pars=sort_pars) else: result = getattr(fit_result, 'params', fit_result) if isinstance(result, Parameters): return lmfit.fit_report(result, modelpars=modelpars, show_correl=show_correl, min_correl=min_correl, sort_pars=sort_pars) else: try: result = group2params(fit_result, _larch=_larch) return lmfit.fit_report(result, modelpars=modelpars, show_correl=show_correl, min_correl=min_correl, sort_pars=sort_pars) except (ValueError, AttributeError): pass return "Cannot make fit report with %s" % repr(fit_result)
def get_fit(x,y,xer,yer, nsfh='Del.'): from lmfit import Model, Parameters, minimize, fit_report, Minimizer fit_params = Parameters() #fit_params.add('t0', value=1., min=0, max=np.max(tt)) fit_params.add('t0', value=.5, min=0, max=14) fit_params.add('tau', value=.1, min=0, max=100) fit_params.add('A', value=1, min=0, max=5000) def residual_tmp(pars): vals = pars.valuesdict() t0_tmp, tau_tmp, A_tmp = vals['t0'],vals['tau'],vals['A'] if nsfh == 'Del.': model = SFH_del(t0_tmp, tau_tmp, A_tmp, tt=x) elif nsfh == 'Decl.': model = SFH_dec(t0_tmp, tau_tmp, A_tmp, tt=x) elif nsfh == 'Cons.': model = SFH_cons(t0_tmp, tau_tmp, A_tmp, tt=x) #con = (model>minsfr) con = (model>0) #print(model[con]) #resid = np.abs(model - y)[con] / np.sqrt(yer[con]) #resid = np.square(model - y)[con] / np.square(yer[con]) #resid = np.square(np.log10(model[con]) - y[con]) / np.square(yer[con]) #resid = (np.log10(model[con]) - y[con]) / np.sqrt(yer[con]) resid = (np.log10(model[con]) - y[con]) / yer[con] #print(yer[con]) #resid = (model - y)[con] / (yer[con]) # i.e. residual/sigma return resid out = minimize(residual_tmp, fit_params, method='powell') #out = minimize(residual, fit_params, method='nelder') print(fit_report(out)) t0 = out.params['t0'].value tau = out.params['tau'].value A = out.params['A'].value param = [t0, tau, A] keys = fit_report(out).split('\n') for key in keys: if key[4:7] == 'chi': skey = key.split(' ') csq = float(skey[14]) if key[4:7] == 'red': skey = key.split(' ') rcsq = float(skey[7]) return param, rcsq
def fit(self): self.result = lm.minimize(fitfunc, self.params, args=(self.X, self.Y,self.EY),ftol=1e-10) # calculate final result if self.EY is None: self.Yfit = self.Y + self.result.residual else: self.Yfit = self.Y + self.result.residual*self.EY # write error report print '='*80 print 'success:',self.result.success print self.result.message print lm.fit_report(self.result,show_correl=0) self.plot(fitresult = True)
def residuum(self, p): self.model.params = p self.ptrFit.iteration += 1 self.model.updateModel() resi = [] for i in range(self.model.nModelsets): data = self.data.getDataset(i) weight = self.data.dataWeights[i] model = self.model.getModelset(i) q_data = data.getDomain() I_data = data.getValues() I_error = data.getErrors() I_model = model.getValues() if self.fit_range is not None: fit_range = np.logical_and(q_data > self.fit_range[0], q_data < self.fit_range[1]) q_data = q_data[fit_range] I_data = I_data[fit_range] I_error = I_error[fit_range] I_model = I_model[fit_range] addResi = np.sqrt(weight) * self.residuumFormula(q_data, I_data, I_error, I_model) resi = np.concatenate([resi, addResi]) if self.ptrFit.printIteration is not None: if self.ptrFit.iteration % self.ptrFit.printIteration == 0: print(f'Iteration: {self.ptrFit.iteration}\tChi2:{np.sum(resi**2)}') print(lmfit.fit_report(p)) return resi
def on_read_data_from_lmfit(self): a = P4Rm() from lmfit import fit_report result = P4Rm.resultFit data = [] data.append(result.success) data.append(result.lmdif_message) data.append(result.ier) data.append(fit_report(result)) P4Rm.FitDict["Leastsq_report"] = data i = 0 if a.AllDataDict["model"] == 2: for param in result.params.values(): if i in range(1, 7): P4Rm.ParamDict["sp"][i] = param.value if i in range(7, 14): P4Rm.ParamDict["dwp"][i - 7] = param.value i += 1 else: len_sp = int(result.params["nb_sp_val"]) len_dwp = int(result.params["nb_dwp_val"]) for ii in range(len_dwp): name = "dwp_" + str(ii) P4Rm.ParamDict["dwp"][ii] = result.params[name].value for jj in range(len_sp): name = "sp_" + str(jj) P4Rm.ParamDict["sp"][jj] = result.params[name].value
def Fit_Million(freq, t, para_guess): # Planar superconducting resonators with internal quality factors above one million params = Parameters() params.add('a', value=para_guess[0], min=-100, max=3000) params.add('phi0', value=para_guess[1], min=-np.pi, max=np.pi) params.add('Qi', value=para_guess[2], min=1e3, max=1e8) params.add('Qc_scaled', value=para_guess[3], min=1e2, max=1e8) params.add('fr', value=para_guess[4], min=freq[0], max=freq[len(freq) - 1]) params.add('aphi0', value=para_guess[5], min=-np.pi, max=np.pi) # do fit, here with leastsq model result = minimize(Fit_Million_Func, params, args=(freq, t)) a = result.params['a'].value a_err = result.params['a'].stderr phi0 = result.params['phi0'].value phi0_err = result.params['phi0'].stderr Qi = result.params['Qi'].value Qi_err = result.params['Qi'].stderr Qc_scaled = result.params['Qc_scaled'].value Qc_scaled_err = result.params['Qc_scaled'].stderr fr = result.params['fr'].value fr_err = result.params['fr'].stderr aphi0 = result.params['aphi0'].value aphi0_err = result.params['aphi0'].stderr # print the fitting result print(fit_report(result)) return a, a_err, phi0, phi0_err, Qi, Qi_err, Qc_scaled, Qc_scaled_err, fr, fr_err, aphi0, aphi0_err, result
def lmfit(self, star, logger=None): """Fit parameters of the given star using lmfit (Levenberg-Marquardt minimization algorithm). :param star: A Star to fit. :param logger: A logger object for logging debug info. [default: None] :returns: (flux, dx, dy, scale, g1, g2, flag) """ import lmfit logger = galsim.config.LoggerWrapper(logger) params = self._lmfit_params(star) results = self._lmfit_minimize(params, star, logger=logger) if logger: logger.debug(lmfit.fit_report(results)) flux, du, dv, scale, g1, g2 = results.params.valuesdict().values() if not results.success: raise RuntimeError("Error fitting with lmfit.") try: params_var = np.diag(results.covar) except (ValueError, AttributeError) as e: logger.warning("Failed to get params_var") logger.warning(" -- Caught exception: %s", e) # results.covar is either None or does not exist params_var = np.zeros(6) return flux, du, dv, scale, g1, g2, params_var
def fit_dunham(q, d): print('Starting fit...') molids, mols = read_in_dunham_config() Ys = mols['AlCl62X_Bernath'].keys() allowed_Ys = [ 'y00', 'y01', 'y10', 'y11', 'y20', 'y21', 'y22', 'y12', 'y02' ] params = Parameters() for p in Ys: if p != 'matrix': #if p in allowed_Ys: if p == 'y00': params.add(p + 'A', value=0.0, vary=True) elif p == 'y10': params.add(p + 'A', value=0.0, max=600.0, vary=True) elif p == 'y20': params.add(p + 'A', value=0.0, vary=True) else: params.add(p + 'A', value=0.0, vary=True) #params.add(p+'X', value = 0.0, min = -500, max = 500, vary = True) # do fit, here with leastsq model minner = Minimizer(fcn2min, params, fcn_args=(q, d)) result = minner.minimize() # Store the Confidence data from the fit con_report = lmfit.fit_report(result.params) model = fcn2min(result.params, q, d, get_fit=True) return (result.params, params, con_report, model)
def mostRecentModelFitReport(self): """returns the lmfit fit report of the most recent lmfit model results object""" if self.mostRecentModelResult is not None: return lmfit.fit_report(self.mostRecentModelResult) + "\n\n" else: return "No fit performed"
def do_fit(x, y): params = Parameters() params.add('Te_1', value=0.0, min=0.0, max=340.0, vary=False) params.add('Te_2', value=38237.0, min=0.0, max=40000.0, vary=True) params.add('we_1', value=480.0, min=0.0, max=500.0, vary=True) params.add('we_2', value=440.0, min=0.0, max=500.0, vary=True) params.add('wexe_1', value=2.037, min=0.0, max=5.0, vary=True) params.add('wexe_2', value=2.81, min=0.0, max=8.0, vary=True) params.add('weye_1', value=0.5, min=0.0, max=5.0, vary=True) params.add('weye_2', value=0.1, min=0.0, max=8.0, vary=True) params.add('weze_1', value=0.5, min=0.0, max=5.0, vary=True) params.add('weze_2', value=0.1, min=0.0, max=8.0, vary=True) # do fit, here with leastsq model minner = Minimizer(fcn2min, params, fcn_args=(x, y)) result = minner.minimize() # Store the Confidence data from the fit con_report = lmfit.fit_report(result.params) return (result)
def CalcGwAge(self, tracers, FieldData, DischData, use_disch=0, DischargeError=0.10): '''Estimate the best groundwater mean age to match the given stream concentrations of tracers with age information - has_age=1.''' C_tau = self.C_tau[1] lb = 1. ub = 1000. * 365. #upper limit of 100 years mean age right now. Making this longer will make inherent problems with the convolution technique be a problem. Needs to be dealt with at some point m = CreateLmParams(C_tau, 'cTau', lb, ub) mini = lm.Minimizer(self.MisfitAge, m, fcn_args=(tracers, FieldData, DischData), fcn_kws={ 'use_disch': use_disch, 'DischargeError': DischargeError }) # fit = mini.minimize(params=m,xtol=1.e-5,ftol=1e-5,epsfcn=1.e-3) fit = mini.minimize(params=m) C_tau = ExtractLmParamVec(fit.params, 'cTau') self.C_tau = (self.C_tau[0], C_tau) self.SimRes.fit_solution = fit self.CalcTran() print(lm.fit_report(fit))
def CalcGwInflow(self, tracers, FieldData, DischData, DischargeError=0.10, use_disch=1, log_flag=1): '''Estimate the best groundwater inflow to match the given stream concentrations.''' qlin = self.q_lin lb = 0. ub = 1. if log_flag: qlin = np.log(qlin) lb = -20 up = 0. m = CreateLmParams(qlin, 'qlin', lb, ub) mini = lm.Minimizer(self.MisfitFlux, m, fcn_args=(tracers, FieldData, DischData), fcn_kws={ 'DischargeError': DischargeError, 'use_disch': use_disch, 'log_flag': log_flag }) fit = mini.minimize(params=m, ftol=1.e-5, gtol=1.e-5, xtol=1e-5) print(lm.fit_report(fit)) self.q_lin = ExtractLmParamVec(fit.params, 'qlin') if log_flag: self.q_lin = np.exp(self.q_lin) self.SimRes.fit_solution = fit #run a last best solution self.CalcTran()
def export_params(self, outfilename="params.txt"): with open(outfilename, 'w') as f: f.write("chisqr={0: f}\n".format(self.edp.chisqr)) f.write("D={0: f}\n".format(self.latt_par['D'].value)) f.write("lambda_r={0: f}\n".format(self.latt_par['lambda_r'].value)) f.write("gamma={0: f}\n\n".format(self.latt_par['gamma'].value)) f.write(lmfit.fit_report(self.edp_par))
def Gaussian_beam_propagation(meas_points,widths,lambda_beam,plot=False): params_BeamPropagation = Parameters() params_BeamPropagation.add('omega_zero',value=widths[0]) params_BeamPropagation.add('z0',value=0) fit = Minimizer(__w_Gauss_freespace_residual,params_BeamPropagation,fcn_args=(meas_points,),\ fcn_kws={"meas_beamwidths":widths,"lam":lambda_beam}) fit_res = fit.minimize(maxfev=10**8) print(fit_report(fit_res)) if plot is True: print("Let's plot it") fitted_w0 = fit_res.params.valuesdict()["omega_zero"] fitted_z0 = fit_res.params.valuesdict()["z0"] if fitted_z0<meas_points[0]: plotpoints = np.linspace(fitted_w0,meas_points[-1],100) elif meas_points[0]<fitted_z0<meas_points[-1]: plotpoints = np.linspace(meas_points[0],meas_points[-1],100) else: plotpoints = np.linspace(meas_points[0],fitted_z0,100) model_beamwidth = __w_Gauss_freespace_residual(fit_res.params,plotpoints,meas_beamwidths=None,lam=lambda_beam) plt.plot(plotpoints,model_beamwidth,'b') plt.scatter(meas_points,widths,c="r") plt.show()
def save_txt(self, full_path: str = None, mode: str = 'wt', cmd: str = '') -> None: '''Save the optimization settings to disk as a textfile''' logger = logging.getLogger(__name__) if full_path is None: # pragma: no cover full_path = save_file_full_name(self.cte.lattice, 'optimization') + '.txt' logger.info('Saving solution as text to {}.'.format(full_path)) # print cte with open(full_path, mode) as csvfile: csvfile.write('Settings:\n') csvfile.write(self.cte['config_file']) csvfile.write('\n\nCommand used to generate data:\n') csvfile.write(cmd) csvfile.write('\n\n\nOptimization progress:\n') csvfile.write(self.optim_progress[0] + '\r\n') for update in self.optim_progress[1:]: csvfile.write(update + '\r\n') csvfile.write('\nOptimization statistics:\n') csvfile.write(fit_report(self.result) + '\r\n') csvfile.write('\r\n') csvfile.write(f'Total time: {self.formatted_time}.' + '\r\n') csvfile.write(f'Optimized RMS error: {self.min_f:.3e}.' + '\r\n') csvfile.write('Parameters name and value:' + '\r\n') for name, best_val in zip(self.result.params.keys(), self.best_params.T): csvfile.write(f'{name}: {best_val:.3e}.' + '\r\n')
def fit_lmfit(self, report=True, printdot=True): import lmfit self.lmfitparams = lmfit.Parameters() for name in self.parameters.index: p = self.parameters.loc[name] self.lmfitparams.add(name, value=p['initial'], min=p['pmin'], max=p['pmax']) fit_kws = {"epsfcn": 1e-4} self.fitresult = lmfit.minimize(self.residuals_lmfit, self.lmfitparams, method="leastsq", kws={"printdot": printdot}, **fit_kws) print('', flush=True) print(self.fitresult.message) if self.fitresult.success: for name in self.parameters.index: self.parameters.loc[name, 'optimal'] = \ self.fitresult.params.valuesdict()[name] if hasattr(self.fitresult, 'covar'): self.parameters['std'] = np.sqrt(np.diag(self.fitresult.covar)) self.parameters['perc_std'] = 100 * self.parameters['std'] / \ np.abs(self.parameters['optimal']) else: self.parameters['std'] = np.nan self.parameters['perc_std'] = np.nan if report: print(lmfit.fit_report(self.fitresult))
def Fit_7para_tau(freq, t, estimateparas): params = Parameters() params.add('a', value=estimateparas[0]) #, min = -50, max = 50) params.add('alpha', value=estimateparas[1], min=-np.pi, max=np.pi) params.add('tau', value=estimateparas[2], vary=False) params.add('phi0', value=estimateparas[3], min=-2 * np.pi, max=2 * np.pi) params.add('fr', value=estimateparas[4], min=freq[0], max=freq[len(freq) - 1]) params.add('Qr', value=estimateparas[5], min=1e3, max=1e8) params.add('Qc', value=estimateparas[6], min=1e3, max=1e8) result = minimize(Fit_7para_Func, params, args=(freq, t)) # print the fitting result print(fit_report(result)) # get fitted value and 1 sigma error value a = result.params['a'].value a_err = result.params['a'].stderr alpha = result.params['alpha'].value alpha_err = result.params['alpha'].stderr tau = result.params['tau'].value tau_err = result.params['tau'].stderr phi0 = result.params['phi0'].value phi0_err = result.params['phi0'].stderr fr = result.params['fr'].value fr_err = result.params['fr'].stderr Qr = result.params['Qr'].value Qr_err = result.params['Qr'].stderr Qc = result.params['Qc'].value Qc_err = result.params['Qc'].stderr return a, a_err, alpha, alpha_err, tau, tau_err, phi0, phi0_err, fr, fr_err, Qr, Qr_err, Qc, Qc_err, fit_report( result)
def gradient_descent(self, p, flat=True): """ Optimizes parameters following specified parameter dependencies on task conditions (i.e. depends_on={param: cond}) ::Arguments:: p (dict): parameter dictionary flat (bool): if flat, yhat have ndim=1, else ndim>1 and popt lmParams will have conditional param names ::Returns:: yhat (array): model-predicted data array finfo (pd.Series): fit info (AIC, BIC, chi2, redchi, etc) popt (dict): optimized parameters dictionary """ fp = self.fitparams optkws = {'xtol': fp['tol'], 'ftol': fp['tol'], 'maxfev': fp['maxfev']} # make lmfit Parameters object to keep track of # parameter names and dependencies during fir lmParams = theta.loadParameters(inits=p, pc_map=self.pc_map, is_flat=flat, kind=self.kind) self.lmMin = minimize(self.simulator.cost_fx, lmParams, method=fp['method'], options=optkws) #self.lmMinimizer = deepcopy(lmMinimizer) self.param_report = fit_report(self.lmMin.params) return self.assess_fit(flat=flat)
def lmfit(self, star, logger=None): """Fit parameters of the given star using lmfit (Levenberg-Marquardt minimization algorithm). :param star: A Star to fit. :param logger: A logger object for logging debug info. [default: None] :returns: (flux, dx, dy, scale, g1, g2, flag) """ import lmfit logger = galsim.config.LoggerWrapper(logger) params = self._lmfit_params(star) results = self._lmfit_minimize(params, star, logger=logger) if logger: logger.debug(lmfit.fit_report(results)) flux, du, dv, scale, g1, g2 = results.params.valuesdict().values() if not results.success: raise RuntimeError("Error fitting with lmfit.") try: params_var = np.diag(results.covar) except (ValueError, AttributeError) as e: logger.warning("Failed to get params_var") logger.warning(" -- Caught exception: %s",e) # results.covar is either None or does not exist params_var = np.zeros(6) return flux, du, dv, scale, g1, g2, params_var
def curveFitting_lmfit(y_data, struct, lowerbound, upperbound, method): y_data = np.array(y_data).reshape(len(struct.waves), len(struct.angles)) paras = Parameters() no = len([i for i in struct.layers if i.coherent]) for i, l in enumerate(struct.layers): if l.thickness < 10000: paras.add('layer{}'.format(i), value=l.thickness, vary=~l.coherent, min=lowerbound[i], max=upperbound[i], brute_step=2) else: paras.add('layer{}'.format(i), value=l.thickness, vary=False) def residual(params, x, y): tempstruct = copy.deepcopy(struct) # tempstruct.waves = x vals = params.valuesdict() for i in range(no): tempstruct.layers[i].thickness = vals['layer{}'.format(i)] tempstruct.initialize() print(np.sum(np.abs(y - tempstruct.R))) return np.sum(np.abs(y - tempstruct.R)) out = minimize(residual, paras, args=(struct.waves, y_data), method=method) print(fit_report(out)) ret = [] vals = out.params.valuesdict() print(vals) for i in range(len(vals)): ret.append(vals['layer{}'.format(i)]) print(ret) return ret
def doFitting(self): #t = np.arange(1.0, 5.0, 0.01) #s = np.sin(2*np.pi*t) #self.ui.xplot.canvas.ax.plot(t, s) #self.ui.xplot.canvas.draw() filename = self.ui.fileOutput.toPlainText() self.ui.FitResultsSummary.setPlainText("These are fitting results") self.ui.ErrorMessages.setPlainText("") try: data = file1.make_numpyarray(filename) except: self.ui.ErrorMessages.setPlainText("Something went wrong with fitting") return None fit1 = file1.fit_axis(data,0) report = fit_report(fit1[2]) self.ui.FitResultsSummary.append(report) # rotating by 90 degrees on the other axis doesn't work well yet #t = np.arange(1.0, 5.0, 0.01) #s = np.sin(2*np.pi*t) #self.myplot = self.ui.yplot.canvas.ax.plot(t, s) #self.rotated = ndimage.rotate(self.myplot,90) #self.rotated.draw() self.ui.BeamDisplay.canvas.ax.pcolorfast(data) self.ui.BeamDisplay.canvas.draw()
def on_read_data_from_lmfit(self): a = P4Rm() from lmfit import fit_report result = P4Rm.resultFit data = [] data.append(result.success) data.append(result.lmdif_message) data.append(result.ier) data.append(fit_report(result)) P4Rm.FitDict['Leastsq_report'] = data i = 0 if a.AllDataDict['model'] == 2: for param in result.params.values(): if i in range(1, 7): P4Rm.ParamDict['sp'][i] = param.value if i in range(7, 14): P4Rm.ParamDict['dwp'][i - 7] = param.value i += 1 else: len_sp = int(result.params['nb_sp_val']) len_dwp = int(result.params['nb_dwp_val']) for ii in range(len_dwp): name = 'dwp_' + str(ii) P4Rm.ParamDict['dwp'][ii] = result.params[name].value for jj in range(len_sp): name = 'sp_' + str(jj) P4Rm.ParamDict['sp'][jj] = result.params[name].value
def fit_yb_T(x, y): params = Parameters() params.add('a', value=-5.0, min=-10.0, max=0.0, vary = True) #params.add('w', value=50.0, min=1.0, max=2000, vary = True) params.add('x_offset', value=50, min=np.min(x), max = np.max(x), vary = True) params.add('y_offset', value=0.0, min=-2.0, max=2.0, vary = True) params.add('T', value = 1.0, min=0.0, max=100.0, vary = True) iso_abund = np.array([12.887, 31.896, 16.098, 16.098, 21.754, 14.216, 14.216, 3.023, 0.126]) for k in range(len(iso_abund)): params.add('a' + str(k), value = 1.0, min = 0.0, max = 10.0, vary = True) # print(params) # do fit, here with leastsq model minner = Minimizer(fcn2min, params, fcn_args=(x, y)) result = minner.minimize() # Store the Confidence data from the fit con_report = lmfit.fit_report(result.params) (x_plot, model) = fcn2min(result.params, x, y, plot_fit = True) return (x_plot, model, result)
def __init__(self, model, noise=True, weights=None, **kwargs): import lmfit # Import Lmfit here, so it is no dependency BaseSolver.__init__(self) # Deal with the parameters parameters = lmfit.Parameters() p = model.parameters[['initial', 'pmin', 'pmax', 'vary']] for k in p.index: pp = np.where(p.loc[k].isnull(), None, p.loc[k]) parameters.add(k, value=pp[0], min=pp[1], max=pp[2], vary=pp[3]) # set ftol and epsfcn if no options for lmfit are provided. Only # work with Lmfit's least squares solver method. if not kwargs: kwargs = {"ftol": 1e-3, "epsfcn": 1e-4} self.fit = lmfit.minimize(fcn=self.objfunction, params=parameters, args=(noise, model, weights), **kwargs) # Set all parameter attributes self.optimal_params = np.array([p.value for p in self.fit.params.values()]) self.stderr = np.array([p.stderr for p in self.fit.params.values()]) if self.fit.covar is not None: self.pcov = self.fit.covar # Set all optimization attributes self.nfev = self.fit.nfev self.report = lmfit.fit_report(self.fit)
def test_reports_created(fitresult): """Verify that the fit reports are created and all headers are present.""" report_headers = [ '[[Model]]', '[[Fit Statistics]]', '[[Variables]]', '[[Correlations]] (unreported correlations are < 0.100)' ] report = fitresult.fit_report() assert len(report) > 500 for header in report_headers: assert header in report report1 = fit_report(fitresult) for header in report_headers[1:]: assert header in report1 html_params = fitresult.params._repr_html_() assert len(html_params) > 500 assert 'brute' in html_params assert 'standard error' in html_params assert 'relative error' in html_params html_report = fitresult._repr_html_() assert len(html_report) > 1000 for header in report_headers: header_title = header.replace('[', '').replace(']', '').strip() assert header_title in html_report
def fit(self) -> None: self.debug('START', inspect.currentframe()) self.set_parameter() self.result = lf.minimize( fcn=self.objective, params=self.scf_model_parameters, kws={'E':self.xd}) if self.result.success: self.info(self.result.message) self.info('BEST FIT VALUES') for name in self.result.var_names: self.info(f'parameter of {name}') self.info( f' {self.result.params[name].value} +- {self.result.params[name].stderr}') with open(self.log_file, 'w') as log: log.write( '--------------------------------------------------------------------\n') log.write(lf.fit_report(self.result)) log.write('\n') log.write( '--------------------------------------------------------------------\n') log.write( f' Chi-squared value / d.o.f. = {self.result.chisqr} / {self.result.nfree}\n') log.write(f' Reduced Chi-squared value = {self.result.redchi}\n') log.write('\n') self.info(f'Fitting results were recorded to {self.log_file}') self.create_result_qdp() if self.plot_flag: self.plot() self.debug('END', inspect.currentframe())
def test_bounded_jacobian(): pars = Parameters() pars.add('x0', value=2.0) pars.add('x1', value=2.0, min=1.5) global jac_count jac_count = 0 def resid(params): x0 = params['x0'].value x1 = params['x1'].value return np.array([10 * (x1 - x0*x0), 1-x0]) def jac(params): global jac_count jac_count += 1 x0 = params['x0'].value return np.array([[-20*x0, 10], [-1, 0]]) out0 = minimize(resid, pars, Dfun=None) assert_paramval(out0.params['x0'], 1.2243, tol=0.02) assert_paramval(out0.params['x1'], 1.5000, tol=0.02) assert(jac_count == 0) out1 = minimize(resid, pars, Dfun=jac) assert_paramval(out1.params['x0'], 1.2243, tol=0.02) assert_paramval(out1.params['x1'], 1.5000, tol=0.02) assert(jac_count > 5) print(fit_report(out1, show_correl=True))
def refine_angles(self, method='nelder', **opts): self.set_idx() from lmfit import minimize, fit_report p0 = self.define_parameters(lattice=True, **opts) self.result = minimize(self.angle_residuals, p0, method=method) self.fit_report = fit_report(self.result) if self.result.success: self.get_parameters(self.result.params)
def refine_orientation_matrix(self, **opts): self.set_idx() from lmfit import minimize, fit_report p0 = self.define_orientation_matrix() self.result = minimize(self.orient_residuals, p0, **opts) self.fit_report = fit_report(self.result) if self.result.success: self.get_orientation_matrix(self.result.params)
def refine_hkls(self, method='leastsq', **opts): self.set_idx() from lmfit import minimize, fit_report if self.Umat is None: raise NeXusError('No orientation matrix defined') p0 = self.define_parameters(**opts) self.result = minimize(self.hkl_residuals, p0, method=method) self.fit_report = fit_report(self.result) if self.result.success: self.get_parameters(self.result.params)
def write_statistics(name, outf, pars, fit_result): outf.write(name) outf.write(fit_report(pars)) outf.write('\n') outf.write('Norm residuals/root(N) is ' \ + repr(LA.norm(fit_result.residual)/math.sqrt(len(fit_result.residual)))) outf.write('\n') outf.write('Mean absolute error ' + repr(np.mean(np.abs(fit_result.residual)))) outf.write('\n') outf.write('-------------\n')
def save_result(self, fname=None): """ Parameters ---------- fname : str, optional name of output file """ if not fname: fname = self.data_title+'_out.txt' filepath = os.path.join(self.result_folder, fname) with open(filepath, 'w') as myfile: myfile.write(fit_report(self.fit_result, sort_pars=True)) logger.warning('Results are saved to {}'.format(filepath))
def Fit_Qr(x, y): # create a set of Parameters params = Parameters() params.add('sdelta', value= 1e-4, min = 1e-10, max = 1e-2) params.add('Tc', value= 1.2, min = 0.1, max = 1.8) params.add('alpha', value= 1, min = 1e-10, max = 1) # do fit, here with leastsq model result = minimize(Fit_Func, params, args=(x, y)) # calculate final result residual = result.residual # Calculate Qc and Qi sdelta = result.params['sdelta'].value sdelta_err = np.abs(result.params['sdelta'].stderr/sdelta) Tc = result.params['Tc'].value Tc_err = np.abs(result.params['Tc'].stderr/Tc) alpha = result.params['alpha'].value alpha_err = np.abs(result.params['alpha'].stderr/Tc) print fit_report(result) return sdelta, sdelta_err, Tc, Tc_err, alpha, alpha_err, fit_report(result), residual
def fit_gaussian(x_data, y_data): """ Fit a gaussian to data """ # Setup fit # Start values need to be ballpark correct! params = Parameters() params.add('A', value=-2000) params.add('mu', value=6562.801) params.add('sigma', value=1) out = minimize(residual, params, args=(x_data, y_data)) print(fit_report(out.params)) plt.plot(x_data, y_data) plt.plot(x_data, gaussian(out.params, x_data)) plt.show()
def test_bounds(): if not HAS_LEAST_SQUARES: raise nose.SkipTest p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.4321) p_true.add('shift', value=0.12345) p_true.add('decay', value=0.01000) def residual(pars, x, data=None): amp = pars['amp'] per = pars['period'] shift = pars['shift'] decay = pars['decay'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = amp*sin(shift + x/per) * exp(-x*x*decay*decay) if data is None: return model return (model - data) n = 1500 xmin = 0. xmax = 250.0 random.seed(0) noise = random.normal(scale=2.80, size=n) x = linspace(xmin, xmax, n) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0, max=20, min=0.0) fit_params.add('period', value=2, max=10) fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.) fit_params.add('decay', value=0.02, max=0.10, min=0.00) min = Minimizer(residual, fit_params, (x, data)) out = min.least_squares() assert(out.nfev > 10) assert(out.nfree > 50) assert(out.chisqr > 1.0) print(fit_report(out, show_correl=True, modelpars=p_true)) assert_paramval(out.params['decay'], 0.01, tol=1.e-2) assert_paramval(out.params['shift'], 0.123, tol=1.e-2)
def solve(self, X0, IR='IRF', RM='linear', TR=None, Cf=[1.0], method='leastsq', period = ['01-01-1900', '01-01-2000', '01-01-2100'], solver=1): # Define the TFN Model #self.IR = eval(IR) # Save the impulse response function self.IR = getattr(self, IR) self.RM = getattr(self, RM) # Save the recharge calculation function if TR != None: self.TR = getattr(self, TR) self._TFN = IR # Save the name of the impulse response function self._RM = RM # Save the name of the recharge model self._index_calibration = self.data.t[(self.data.index > period[0]) & (self.data.index < period[1]) & (self.data.Ho > -999)].tolist() self._index_validation = self.data.t[(self.data.index > period[1]) & (self.data.index < period[2]) & (self.data.Ho > -999)].tolist() self.period = period if method == 'leastsq': X0.add('d', value=self.data.Ho.mean(), vary=True) self.result = minimize(self.objective_function, X0, method='leastsq', scale_covar=True) self.parameters_optimized = self.result.params.valuesdict() if self.result.success: print 'Optimization completed succesfully!' print(report_fit(self.result)) np.savetxt('Figures/fit_report_%s_%s.txt' %(self.bore, self._TFN), (fit_report(self.result),), fmt='%str') else: X0.add('d', value=np.mean(self.head_observed)) self.result = minimize(self.objective_function, X0, args=(InputData,), method=method) self.parameters_optimized = self.result.params.valuesdict() # Calculate statistics for both the calibration and validation period self.result.SWSI_Cal = np.sqrt(sum(self.swsi(self._index_calibration))) self.result.SWSI_Val = np.sqrt(sum(self.swsi(self._index_validation))) self.result.RMSE_Cal = np.sqrt(sum(self.rmse(self._index_calibration))) self.result.RMSE_Val = np.sqrt(sum(self.rmse(self._index_validation))) self.result.AVGDEV_Cal = self.avg_deviation(self._index_calibration) self.result.AVGDEV_Val = self.avg_deviation(self._index_validation) self.result.EXPVAR_Cal = self.explained_variance(self._index_calibration) self.result.EXPVAR_Val = self.explained_variance(self._index_validation)
def test_bounds(): p_true = Parameters() p_true.add("amp", value=14.0) p_true.add("period", value=5.4321) p_true.add("shift", value=0.12345) p_true.add("decay", value=0.01000) def residual(pars, x, data=None): amp = pars["amp"].value per = pars["period"].value shift = pars["shift"].value decay = pars["decay"].value if abs(shift) > pi / 2: shift = shift - sign(shift) * pi model = amp * sin(shift + x / per) * exp(-x * x * decay * decay) if data is None: return model return model - data n = 1500 xmin = 0.0 xmax = 250.0 random.seed(0) noise = random.normal(scale=2.80, size=n) x = linspace(xmin, xmax, n) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add("amp", value=13.0, max=20, min=0.0) fit_params.add("period", value=2, max=10) fit_params.add("shift", value=0.0, max=pi / 2.0, min=-pi / 2.0) fit_params.add("decay", value=0.02, max=0.10, min=0.00) out = minimize(residual, fit_params, args=(x,), kws={"data": data}) fit = residual(out.params, x) assert out.nfev > 10 assert out.nfree > 50 assert out.chisqr > 1.0 print(fit_report(out, show_correl=True, modelpars=p_true)) assert_paramval(out.params["decay"], 0.01, tol=1.0e-2) assert_paramval(out.params["shift"], 0.123, tol=1.0e-2)
def produce_tauspectrum_noplot(freqMHz,taus,tauserr): freqGHz = freqMHz/1000. powmod = PowerLawModel() powparstau = powmod.guess(taus,x=freqGHz) tauserr = tauserr[np.nonzero(tauserr)] taus = taus[np.nonzero(tauserr)] freqGHz = freqGHz[np.nonzero(tauserr)] freqMHz = freqMHz[np.nonzero(tauserr)] powout = powmod.fit(taus,powparstau,x=freqGHz,weights=1/(np.power(tauserr,2))) print(fit_report(powout.params)) fit = powout.best_fit alpha = -powout.best_values['exponent'] alphaerr = powout.params['exponent'].stderr return freqMHz, alpha, alphaerr, fit
def export_ref_fit(self): pname = os.path.join('data', 'fit', self.label) os.mkdir(pname) for stim, result in zip(REF_STIM_LIST, self.ref_result_list): fname_report = 'ref_fit_%d.txt' % stim fname_pickle = 'ref_fit_%d.pkl' % stim with open(os.path.join(pname, fname_report), 'w') as f: f.write(fit_report(result)) with open(os.path.join(pname, fname_pickle), 'wb') as f: pickle.dump(result, f) with open(os.path.join(pname, 'ref_mean_lmpars.pkl'), 'wb') as f: pickle.dump(self.ref_mean_lmpars, f) # Plot fig, axs = self.plot_ref_fit(roll=True) fig.savefig(os.path.join(pname, 'ref_fit_roll.png'), dpi=300) plt.close(fig) fig, axs = self.plot_ref_fit(roll=False) fig.savefig(os.path.join(pname, 'ref_fit_inst.png'), dpi=300) plt.close(fig)
def fit_spectrum(self, energy, counts, energy_min=None, energy_max=None): work_energy = 1.0*energy work_counts = 1.0*counts floor = 1.e-12*np.percentile(counts, [99])[0] work_counts[np.where(counts<floor)] = floor if max(energy) < 250.0: # input energies are in keV work_energy = 1000.0 * energy self.set_fit_weight(work_energy, work_counts) imin, imax = 0, len(counts) if energy_min is None: energy_min = self.energy_min if energy_min is not None: imin = index_of(work_energy, energy_min) if energy_max is None: energy_max = self.energy_max if energy_max is not None: imax = index_of(work_energy, energy_max) self.fit_weight[:imin-1] = 0.0 self.fit_weight[imax+1:] = 0.0 self.fit_iter = 0 userkws = dict(data=work_counts, index=np.arange(len(counts))) kws = dict(method='leastsq', maxfev=4000, gtol=self.fit_toler, ftol=self.fit_toler, xtol=self.fit_toler, epsfcn=1.e-5) self.result = minimize(self.__resid, self.params, kws=userkws, **kws) self.fit_report = fit_report(self.result, min_correl=0.5) self.best_fit = self.calc_spectrum(energy, params=self.result.params) # calculate transfer matrix for linear analysis using this model tmat= [] for key, val in self.comps.items(): tmat.append(val / self.eigenvalues[key]) self.transfer_matrix = np.array(tmat)
def test_ci_report(): """test confidence interval report""" def residual(pars, x, data=None): argu = (x*pars['decay'])**2 shift = pars['shift'] if abs(shift) > np.pi/2: shift = shift - np.sign(shift)*np.pi model = pars['amp']*np.sin(shift + x/pars['period']) * np.exp(-argu) if data is None: return model return model - data p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) n = 2500 xmin = 0. xmax = 250.0 x = np.linspace(xmin, xmax, n) data = residual(p_true, x) + np.random.normal(scale=0.7215, size=n) fit_params = Parameters() fit_params.add('amp', value=13.0) fit_params.add('period', value=2) fit_params.add('shift', value=0.0) fit_params.add('decay', value=0.02) mini = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data': data}) out = mini.leastsq() report = fit_report(out) assert(len(report) > 500) ci, tr = conf_interval(mini, out, trace=True) report = ci_report(ci) assert(len(report) > 250)
def produce_tauspectrum_highHBA(freqMHz,taus,tauserr,freqMHzhigh,tauhigh,tauerrhigh): freqGHz = freqMHz/1000. powmod = PowerLawModel() powparstau = powmod.guess(taus,x=freqGHz) #remove tauserr = 0 entries tauserr = tauserr[np.nonzero(tauserr)] taus = taus[np.nonzero(tauserr)] freqGHz = freqGHz[np.nonzero(tauserr)] freqMHz = freqMHz[np.nonzero(tauserr)] powout = powmod.fit(taus,powparstau,x=freqGHz,weights=1/(np.power(tauserr,2))) print(fit_report(powout.params)) fit = powout.best_fit alpha = -powout.best_values['exponent'] alphaerr = powout.params['exponent'].stderr amp = powout.params['amplitude'] allfreq = np.append(freqMHz,freqMHzhigh) fithigh = amp*np.power(allfreq/1000, -alpha) fig = plt.figure(figsize=(12,6)) plt.errorbar(freqMHz,taus,yerr=tauserr,fmt='*-',markersize=10.0,capthick=2,linewidth=1.5,label=r'$\alpha = %.1f \pm %.1f$'%(alpha,alphaerr)) plt.errorbar(freqMHzhigh,tauhigh,yerr=tauerrhigh,fmt='o') plt.plot(freqMHz,fit,'k--',linewidth=1.5) plt.xscale('log') plt.yscale('log') plt.yticks(fontsize=12) ticksMHz = (freqMHz).astype(np.int)[0:len(freqMHz):2] plt.xticks(ticksMHz,ticksMHz,fontsize=12) plt.legend(fontsize=12,numpoints=None) plt.xlabel(r'$\nu$ (MHz)',fontsize=14, labelpad=15.0) plt.ylabel(r'$\tau$ (sec)',fontsize=14) plt.xlim(xmin = 0.95*freqMHz[0],xmax=1.05*freqMHzhigh[-1]) plt.gcf().subplots_adjust(bottom=0.15) return freqMHZ, alpha, alphaerr, fit, fithigh
def fit2(self): xdata = np.linspace(-30000.0, 30000.0, num=8000) #ydata = UIUtils.pz_bxy_scan_data(B=xdata, B01=130, B02=-830, B03=-800, T_long=2.5*0.002, T_trans=0.002, Rp=1./(2*0.002), Rprb=1./(4*0.002), gamma=2.2, s=-1.0, xdata=xdata) #ydata = UIUtils.pz_bxy_scan(B=xdata, B01=100, B02=1000, B03=0, T_long=2.5*0.002, T_trans=0.002, Rp=1./(2*0.002), Rprb=1./(4*0.002), gamma=2.2, s=-1.0) ydata = UIUtils.func_scan(B=xdata, B01=0, B02=100, B03=-100, T1=2.5*0.002, T2=0.002, Rp=1./(2*0.002), Rpr=1./(4*0.002), gamma=2.2, s=-1) #self.gui.plot.plot(xdata, ydata, pen=(0,0,255)) # create a set of Parameters # 'value' is the initial condition # 'min' and 'max' define your boundaries params = Parameters() params.add('B01', value= 0.0, vary=True, min=-10., max=300.) params.add('B02', value= 0.0, vary=True, min=-10., max=300.) params.add('B03', value= 0.0, vary=True, min=-300., max=300.) params.add('T_long', value= 2.5*0.002, vary=False, min=0, max=1.0) params.add('T_trans', value= 0.002, vary=False, min=0, max=1.0) params.add('Rp', value= 250.0, vary=False, min=0.001, max=500) params.add('Rprb', value= 125.0, vary=False, min=0.001, max=500) params.add('off', value=0.0, min=0.0, max = 0.5) # do fit result = minimize(UIUtils.fit2, params, args=(xdata, ydata), method='nelder') #rp = result.values['B01'], result.values['B02'], result.values['B03'], result.values['T_long'], result.values['T_trans'], result.values['Rp'], result.values['Rprb'] # write error report print(fit_report(result)) #print(result.residual) xplot = np.linspace(min(xdata), max(xdata), 1000) final = ydata + result.residual try: self.gui.plot.plot(xdata, ydata, pen=(0,0,255), symbol='o') self.gui.plot.plot(xdata, final, pen=(0,255,0)) except: print('fit not successful') pass #print(*params) #print(result.values['B01']) print(final)
def inverse(self, y_obs): type, guess = self.guess(y_obs) p = Parameters() with open(self.report, 'w') as file: file.write('** Juno Retrieval Begin **\n') file.write('type = %s, initial guess = ' % type) for i in range(len(guess)): file.write('%f ' % guess[i]) file.write('\n') print type, guess p.add('NH3', value = guess[0], min = 0.1, max = 20.) p.add('H2O', value = guess[1], min = 0.1, max = 20.) if type == 'sat': p.add('stretch', value = 1., vary = False) result = minimize(self.loss_function, p, iter_cb = self.write_iter_report, col_deriv = True, Dfun = self.jacobian, args = (y_obs, 'f'), xtol = 1.E-4, ftol = 1.E-8, maxfev = 100) elif type == 'stretch': p.add('stretch', value = guess[2], min = 0.8, max = 5.) result = minimize(self.loss_function, p, iter_cb = self.write_iter_report, col_deriv = True, Dfun = self.jacobian, args = (y_obs, 'f'), xtol = 1.E-4, ftol = 1.E-8, maxfev = 100) else: raise ValueError('Model type not understood') with open(self.report, 'a') as file: file.write(fit_report(p)) file.write('\n') # write jacobian #file.write('Jacobian = \n') #jacob = self.jacobian(p, y_obs) #m, n = jacob.shape #for i in range(m): # for j in range(n): # file.write('%16.4e' % jacob[i, j]) # file.write('\n') file.write('** Juno Retrieval End **\n') return type, parameter_to_list(p)
def Minimize(self): result=None report=None # create a set of Parameters, except x #initial values are defined here params = lm.Parameters() for i in range(len(self._Variables)-1): params.add(self._Variables[i],value=0.1) myfit = lm.Minimizer(self._ObjFunc, params, \ args=(self._Xvalues, self._Yvalues), \ fcn_kws={}) myfit.prepare_fit() result = lm.minimize(self._ObjFunc, params, \ args=(self._Xvalues,self._Yvalues), \ method='leastsq') report = lm.fit_report(params) #Calculate both quality factors - R2 and ReducedChi2. R2 = 1-result.chisqr/self._getSStotal(self._Yvalues) RChi2 = result.redchi return CFitReport(self._Function,R2,RChi2,result,report)
def polish(match, params0, psf, psf_err, angstrom_per_node=20): """Polish parameters Given a list of match object, polish the parameters segment by segment Args: match (smsyn.match.Match): Match objects. params0 (lmfit.Parameters): lmfit.Parameters object with initial guesses angstrom_per_node (float): approximate separation between continuum and spline nodes. Number of nodes will be rounded to nearest integer. objective_method (string): name of objective function. Must be a method of the Match object {'chi2med','nresid'} Returns dict: with following keys: - result - model - wav - resid - objective """ params = lmfit.Parameters() for name in params0.keys(): params.add(name) params[name].value = params0[name].value params[name].vary = params0[name].vary params[name].min = params0[name].min params[name].max = params0[name].max params['vsini'].min = 0 params['logg'].min = 1.0 params['logg'].max = 5.0 params['teff'].min = 4500 params['teff'].max = 7000 params['fe'].min = -2.0 params['fe'].max = 0.5 params.add('psf') params['psf'].vary = False params['psf'].min = 0 params['psf'].value = psf wavlim = match.spec['wav'][[0,-1]] node_wav = smsyn.match.spline_nodes( wavlim[0], wavlim[1], angstroms_per_node=angstrom_per_node, ) for _node_wav in node_wav: key = 'sp%d' % _node_wav params.add(key) params[key].value = 1.0 def chisq(params): nresid = match.masked_nresid(params) _chisq = np.sum(nresid**2) return _chisq def rchisq(params): return chisq(params) / num_points def objective(params): _chisq = chisq(params) _penalty = ((params['psf'].value - psf)/psf_err)**2.0 return _chisq + _penalty def print_params(lmout): lmout.params.pretty_print(columns=['value', 'vary']) print "rchisq = {}".format(rchisq(lmout.params)) num_points = len(match.masked_nresid(params)) params_out = {} params_out['rchisq0'] = rchisq(params) # Fit a first time to get the chisq minimum lmout = lmfit.minimize( objective, params, method='powell', options=dict(disp=True) ) print lmfit.fit_report(lmout) print_params(lmout) # Re-fit, but allow a prior on the psf parameter rchisq_min = objective(lmout.params) / num_points params = lmout.params params['psf'].vary =True def objective(params): _chisq = chisq(params) _penalty = rchisq_min * ((params['psf'].value - psf)/psf_err)**2.0 return _chisq + _penalty lmout = lmfit.minimize( objective, params, method='powell', options=dict(disp=True) ) print lmfit.fit_report(lmout) print_params(lmout) for k in 'teff logg fe vsini psf'.split(): params_out[k] = lmout.params[k].value params_out['rchisq1'] = rchisq(lmout.params) resid = match.resid(lmout.params) d = dict( params_out=params_out, flux=match.spec.flux, wav=match.spec.wav, resid=resid, wavmask=match.wavmask, ) return d
model_i = fitfuncs[i](x[i], original_params[i], *original_userargs[i], **original_kws[i]) model = np.append(model, model_i) return model if __name__ == '__main__': from lmfit import fit_report def gauss(x, params, *args): """Calculates a Gaussian model""" p = values(params) return p[0] + p[1] * np.exp(-((x - p[2]) / p[3])**2) xdata = np.linspace(-4, 4, 100) p0 = np.array([0., 1., 0., 1.]) bounds = [(-1., 1.), (0., 2.), (-3., 3.), (0.001, 2.)] temp_pars = to_parameters(p0, bounds=bounds) pars = to_parameters(p0 + 0.2, bounds=bounds) ydata = gauss(xdata, temp_pars) + 0.1 * np.random.random(xdata.size) f = CurveFitter(gauss, (xdata, ydata), pars) f.fit() print(fit_report(f.params))
fit_params = Parameters() fit_params.add('scalePara', value=3.77156,vary=False) fit_params.add('scaleCorr', value=np.random.uniform(5,25),min=0,max=30) fit_params.add('width', value=0.2,vary=False) fit_params.add('damp', value=100,vary=False)#1.5,min=1.0,max=100) x=np.array([0]) data=expDr[np.logical_and(expr>rmin+0.5*rstep,expr<=rmax+0.5*rstep)] dataErr=err[np.logical_and(expr>rmin+0.5*rstep,expr<=rmax+0.5*rstep)] fit_kws={'sigma':err,'absolute_sigma':True} out = minimize(residual, fit_params, args=(x,), kws={'data':data},**fit_kws) fit = residual(fit_params, x) print fit_report(fit_params) rcomp = expr[np.logical_and(expr>rmin+0.5*rstep,expr<=rmax+0.5*rstep)] diff = data-fit chisq=np.sum((diff)**2/len(diff)) print chisq rfull=expr[np.logical_and(expr>rcalcmin+0.5*rstep,expr<=rcalcmax+0.5*rstep)] datafull=expDr[np.logical_and(expr>rcalcmin+0.5*rstep,expr<=rcalcmax+0.5*rstep)] offset = 1.25*np.abs(np.min(data)) fig=plt.figure() ax=fig.add_subplot(111) ax.plot(rfull,datafull,marker='o',mfc='none',mec='b',linestyle='none')
def fit_report(self): """Return the report created by lmfit.""" return str(fit_report(self.parameters))