class LmModel(object): """ Base class for all models. Models take x and y and return """ def __init__(self,x,y): self.x, self.y=x,y self.parameters=Parameters() self.min=Minimizer(self.residual, self.parameters) def print_para(self): for i in self.parameters.values: print i def func(self,paras): raise NotImplementedError def est_startvals(self): raise NotImplementedError def residual(self,paras): return self.func(paras)-self.y def fit(self): self.min.leastsq() self.y_model=self.func(self.parameters)
def test_multidimensional_fit_GH205(): # test that you don't need to flatten the output from the objective # function. Tests regression for GH205. pos = np.linspace(0, 99, 100) xv, yv = np.meshgrid(pos, pos) f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1) + np.cos(yv * lambda2)) data = f(xv, yv, 0.3, 3) assert_(data.ndim, 2) def fcn2min(params, xv, yv, data): """ model decaying sine wave, subtract data""" lambda1 = params['lambda1'].value lambda2 = params['lambda2'].value model = f(xv, yv, lambda1, lambda2) return model - data # create a set of Parameters params = Parameters() params.add('lambda1', value=0.4) params.add('lambda2', value=3.2) mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data)) res = mini.minimize()
def NIST_Test(DataSet, start='start2', plot=True): NISTdata = ReadNistData(DataSet) resid, npar, dimx = Models[DataSet] y = NISTdata['y'] x = NISTdata['x'] params = Parameters() for i in range(npar): pname = 'b%i' % (i+1) cval = NISTdata['cert_values'][i] cerr = NISTdata['cert_stderr'][i] pval1 = NISTdata[start][i] params.add(pname, value=pval1) myfit = Minimizer(resid, params, fcn_args=(x,), fcn_kws={'y':y}, scale_covar=True) myfit.prepare_fit() myfit.leastsq() digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata) if plot and HASPYLAB: fit = -resid(params, x, ) pylab.plot(x, y, 'r+') pylab.plot(x, fit, 'ko--') pylab.show() return digs > 2
def setup(self): self.x = np.linspace(1, 10, 250) np.random.seed(0) self.y = (3.0 * np.exp(-self.x / 2) - 5.0 * np.exp(-(self.x - 0.1) / 10.) + 0.1 * np.random.randn(len(self.x))) self.p = Parameters() self.p.add_many(('a1', 4., True, 0., 10.), ('a2', 4., True, -10., 10.), ('t1', 3., True, 0.01, 10.), ('t2', 3., True, 0.01, 20.)) self.p_emcee = deepcopy(self.p) self.p_emcee.add('noise', 0.2, True, 0.001, 1.) self.mini_de = Minimizer(Minimizer_Residual, self.p, fcn_args=(self.x, self.y), kws={'seed': 1, 'polish': False, 'maxiter': 100}) self.mini_emcee = Minimizer(Minimizer_lnprob, self.p_emcee, fcn_args=(self.x, self.y))
def fit_single_line(self, x, y, zero_lev, err_continuum, fitting_parameters, bootstrap_iterations = 1000): #Simple fit if self.fit_dict['MC_iterations'] == 1: fit_output = lmfit_minimize(residual_gauss, fitting_parameters, args=(x, y, zero_lev, err_continuum)) self.fit_dict['area_intg'] = simps(y, x) - simps(zero_lev, x) self.fit_dict['area_intg_err'] = 0.0 #Bootstrap else: mini_posterior = Minimizer(lnprob_gaussCurve, fitting_parameters, fcn_args = ([x, y, zero_lev, err_continuum])) fit_output = mini_posterior.emcee(steps=200, params = fitting_parameters) #Bootstrap for the area of the lines area_array = empty(bootstrap_iterations) len_x_array = len(x) for i in range(bootstrap_iterations): y_new = y + np_normal_dist(0.0, err_continuum, len_x_array) area_array[i] = simps(y_new, x) - simps(zero_lev, x) self.fit_dict['area_intg'] = mean(area_array) self.fit_dict['area_intg_err'] = std(area_array) #Store the fitting parameters output_params = fit_output.params for key in self.fit_dict['parameters_list']: self.fit_dict[key + '_norm'] = output_params[key].value self.fit_dict[key + '_norm_er'] = output_params[key].stderr return
def polyfit(x,y,u=None): '''Determine the weighted least-squares fit for 2nd order polynomial''' x = np.asarray(x) y = np.asarray(y) if u is not None: u[u==0]=1 weight = 1./np.asarray(u) else: weight = np.ones_like(x) params = Parameters() params.add('a', value=0) params.add('b', value=(y.max()-y.min())/(x.max()-x.min())) params.add('c', value=0.0) def residual(pars, x, data=None,w=None): model = pars['a'].value + pars['b'].value*x + pars['c'].value*x**2 if data is None: return model return (model - data) #* w myfit = Minimizer(residual, params,fcn_args=(x,), fcn_kws={'data':y,'w':weight}) myfit.leastsq() return [params['c'].value,params['b'].value,params['a'].value]
def autobk(energy, mu, rbkg=1, nknots=None, group=None, e0=None, kmin=0, kmax=None, kw=1, dk=0, win=None, vary_e0=True, chi_std=None, nfft=2048, kstep=0.05, _larch=None): if _larch is None: raise Warning("cannot calculate autobk spline -- larch broken?") # get array indices for rkbg and e0: irbkg, ie0 rgrid = np.pi/(kstep*nfft) if rbkg < 2*rgrid: rbkg = 2*rgrid irbkg = int(1.01 + rbkg/rgrid) if e0 is None: e0 = find_e0(energy, mu, group=group, _larch=_larch) ie0 = _index_nearest(energy, e0) # save ungridded k (kraw) and grided k (kout) # and ftwin (*k-weighting) for FT in residual kraw = np.sqrt(ETOK*(energy[ie0:] - e0)) if kmax is None: kmax = max(kraw) kout = kstep * np.arange(int(1.01+kmax/kstep)) ftwin = kout**kw * ftwindow(kout, xmin=kmin, xmax=kmax, window=win, dx=dk) # calc k-value and initial guess for y-values of spline params nspline = max(4, min(60, 2*int(rbkg*(kmax-kmin)/np.pi) + 1)) spl_y = np.zeros(nspline) spl_k = np.zeros(nspline) for i in range(nspline): q = kmin + i*(kmax-kmin)/(nspline - 1) ik = _index_nearest(kraw, q) i1 = min(len(kraw)-1, ik + 5) i2 = max(0, ik - 5) spl_k[i] = kraw[ik] spl_y[i] = (2*mu[ik] + mu[i1] + mu[i2] ) / 4.0 # get spline represention: knots, coefs, order=3 # coefs will be varied in fit. knots, coefs, order = splrep(spl_k, spl_y) # set fit parameters from initial coefficients fparams = Parameters() for i, v in enumerate(coefs): fparams.add("c%i" % i, value=v, vary=i<len(spl_y)) fitkws = dict(knots=knots, order=order, kraw=kraw, mu=mu[ie0:], irbkg=irbkg, kout=kout, ftwin=ftwin, nfft=nfft) # do fit fit = Minimizer(__resid, fparams, fcn_kws=fitkws) fit.leastsq() # write final results coefs = [p.value for p in fparams.values()] bkg, chi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout) obkg = np.zeros(len(mu)) obkg[:ie0] = mu[:ie0] obkg[ie0:] = bkg if _larch.symtable.isgroup(group): setattr(group, 'bkg', obkg) setattr(group, 'chie', mu-obkg) setattr(group, 'k', kout) setattr(group, 'chi', chi)
def test_constraints1(): def residual(pars, x, sigma=None, data=None): yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g']) yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l']) model = yg + yl + pars['line_off'] + x * pars['line_slope'] if data is None: return model if sigma is None: return (model - data) return (model - data)/sigma n = 601 xmin = 0. xmax = 20.0 x = linspace(xmin, xmax, n) data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) + random.normal(scale=0.23, size=n) + x*0.5) pfit = Parameters() pfit.add(name='amp_g', value=10) pfit.add(name='cen_g', value=9) pfit.add(name='wid_g', value=1) pfit.add(name='amp_tot', value=20) pfit.add(name='amp_l', expr='amp_tot - amp_g') pfit.add(name='cen_l', expr='1.5+cen_g') pfit.add(name='wid_l', expr='2*wid_g') pfit.add(name='line_slope', value=0.0) pfit.add(name='line_off', value=0.0) sigma = 0.021 # estimate of data error (for all data points) myfit = Minimizer(residual, pfit, fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data}, scale_covar=True) myfit.prepare_fit() init = residual(myfit.params, x) result = myfit.leastsq() print(' Nfev = ', result.nfev) print( result.chisqr, result.redchi, result.nfree) report_fit(result.params) pfit= result.params fit = residual(result.params, x) assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value) assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value) assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
def test_minizers(): """ test scale minimizers except newton-cg (needs jacobian) and anneal (doesn't work out of the box). """ methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP'] p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) def residual(pars, x, data=None): amp = pars['amp'].value per = pars['period'].value shift = pars['shift'].value decay = pars['decay'].value if abs(shift) > pi/2: shift = shift - np.sign(shift)*pi model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay) if data is None: return model return (model - data) n = 2500 xmin = 0. xmax = 250.0 noise = np.random.normal(scale=0.7215, size=n) x = np.linspace(xmin, xmax, n) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=11.0, min=5, max=20) fit_params.add('period', value=5., min=1., max=7) fit_params.add('shift', value=.10, min=0.0, max=0.2) fit_params.add('decay', value=6.e-3, min=0, max=0.1) init = residual(fit_params, x) mini = Minimizer(residual, fit_params, [x, data]) for m in methods: print(m) mini.scalar_minimize(m, x) fit = residual(fit_params, x) for name, par in fit_params.items(): nout = "%s:%s" % (name, ' '*(20-len(name))) print("%s: %s (%s) " % (nout, par.value, p_true[name].value)) for para, true_para in zip(fit_params.values(), p_true.values()): check_wo_stderr(para, true_para.value)
def test_peakfit(): from lmfit.utilfuncs import gaussian def residual(pars, x, data=None): g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value) g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value) model = g1 + g2 if data is None: return model return (model - data) n = 601 xmin = 0. xmax = 15.0 noise = np.random.normal(scale=.65, size=n) x = np.linspace(xmin, xmax, n) org_params = Parameters() org_params.add_many(('a1', 12.0, True, None, None, None), ('c1', 5.3, True, None, None, None), ('w1', 1.0, True, None, None, None), ('a2', 9.1, True, None, None, None), ('c2', 8.1, True, None, None, None), ('w2', 2.5, True, None, None, None)) data = residual(org_params, x) + noise fit_params = Parameters() fit_params.add_many(('a1', 8.0, True, None, 14., None), ('c1', 5.0, True, None, None, None), ('w1', 0.7, True, None, None, None), ('a2', 3.1, True, None, None, None), ('c2', 8.8, True, None, None, None)) fit_params.add('w2', expr='2.5*w1') myfit = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data':data}) myfit.prepare_fit() init = residual(fit_params, x) myfit.leastsq() print(' N fev = ', myfit.nfev) print(myfit.chisqr, myfit.redchi, myfit.nfree) report_fit(fit_params) fit = residual(fit_params, x) check_paras(fit_params, org_params)
def fit_axis(image_nparray2D,axis,minim_method="nelder"): axis_data = np.sum(pic_data,axis = 1) if (axis == 0) else np.sum(pic_data,axis = 0) axis_points = np.linspace(1,len(axis_data),len(axis_data)) param_estimates = startparams_estimate(axis_data) params_for_fit = Parameters() params_for_fit.add('I_zero',value=param_estimates[0],min=0,max=np.amax(axis_data)) params_for_fit.add('r_zero',value=param_estimates[1],min=1,max=len(axis_data)) params_for_fit.add('omega_zero',value=param_estimates[2],min=1,max=len(axis_data)) params_for_fit.add('backgr',value=param_estimates[3]) fit = Minimizer(residual,params_for_fit,fcn_args=(axis_points,),\ fcn_kws={"data":axis_data}) fit_res = fit.minimize(minim_method) return (axis_points,axis_data,fit_res)
def fitevent(self, edat, initguess): try: dt = 1000./self.Fs # time-step in ms. # control numpy error reporting np.seterr(invalid='ignore', over='ignore', under='ignore') ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64') self.nStates=len(initguess) initRCConst=dt*5. # setup fit params params=Parameters() for i in range(0, len(initguess)): params.add('a'+str(i), value=initguess[i][0]) params.add('mu'+str(i), value=initguess[i][1]) if self.LinkRCConst: if i==0: params.add('tau'+str(i), value=initRCConst) else: params.add('tau'+str(i), value=initRCConst, expr='tau0') else: params.add('tau'+str(i), value=initRCConst) params.add('b', value=self.baseMean ) igdict=params.valuesdict() optfit=Minimizer(self._objfunc, params, fcn_args=(ts,edat,)) optfit.prepare_fit() result=optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters) if result.success: tt=[init[0] for init, final in zip(igdict.items(), (result.params.valuesdict()).items()) if init==final] if len(tt) > 0: self.flagEvent('wInitGuessUnchanged') self._recordevent(result) else: #print optfit.message, optfit.lmdif_message self.rejectEvent('eFitConvergence') except KeyboardInterrupt: self.rejectEvent('eFitUserStop') raise except InvalidEvent: self.rejectEvent('eInvalidEvent') except: self.rejectEvent('eFitFailure')
def __fit2D(self,minim_method="nelder",rotation=False): self.__fit_axis(0,minim_method) self.__fit_axis(1,minim_method) # we first take all the initial parameters from 1D fits bgr2D_est = self.axis0fitparams.valuesdict()["backgr"]/len(self.axis0pts) x2D_est = self.axis0fitparams.valuesdict()["r_zero"] omegaX2D_est = self.axis0fitparams.valuesdict()["omega_zero"] y2D_est = self.axis1fitparams.valuesdict()["r_zero"] omegaY2D_est = self.axis1fitparams.valuesdict()["omega_zero"] smoothened_image = gaussian_filter(self.image_array,50) peakheight2D_est = np.amax(smoothened_image) #now we need to programatically cut out the region of interest out of the #whole picture so that fitting takes way less time # NOTE! In this implementation, if the beam is small compared to picture size # and is very close to the edge, the fitting will fail, because the x and y # center position estimates will be off self.__format_picture(x2D_est,omegaX2D_est,y2D_est,omegaY2D_est) cropped_data = self.formatted_array xvals = np.linspace(1,cropped_data.shape[0],cropped_data.shape[0]) yvals = np.linspace(1,cropped_data.shape[1],cropped_data.shape[1]) x, y = np.meshgrid(yvals,xvals) # NOTE! there's apparently some weird convention, this has to do with # Cartesian vs. matrix indexing, which is explain in numpy.meshgrid manual estimates_2D = Parameters() estimates_2D.add("I_zero",value=peakheight2D_est,min=bgr2D_est) estimates_2D.add("x_zero",value=0.5*len(yvals),min=0,max=len(yvals)) # NOTE! weird indexing conventions estimates_2D.add("y_zero",value=0.5*len(xvals),min=0,max=len(xvals)) # NOTE! weird indexing conventions estimates_2D.add("omegaX_zero",value=omegaX2D_est) estimates_2D.add("omegaY_zero",value=omegaY2D_est) estimates_2D.add("theta_rot",value=0*np.pi,min = 0,max = np.pi) #just starting with 0 estimates_2D.add("backgr",value=bgr2D_est) if rotation: fit2D = Minimizer(residual_G2D,estimates_2D,fcn_args=(x,y),fcn_kws={"data":cropped_data}) print("Including rotation") else: fit2D = Minimizer(residual_G2D_norotation,estimates_2D,fcn_args=(x,y),fcn_kws={"data":cropped_data}) print("Not including rotation") fit_res2D = fit2D.minimize(minim_method) self.x2Dgrid = x self.y2Dgrid = y self.fit2Dparams = fit_res2D.params
def fit(self, image): """Fit a image of a hologram with the current attribute parameters. Example: >>> p = {'x':0, 'y':0, 'z':100, 'a_p':0.5, 'n_p':1.5, 'n_m':1.337, ... 'mpp':0.135, 'lamb':0.447} >>> mie_fit = Mie_Fitter(p) >>> mit_fit.result(image) """ dim = image.shape minner = Minimizer(mie_loss, self.p, fcn_args=(image, dim)) self.result = minner.minimize() return self.result
def __call__(self): #out = minimize(self.residual, # self.params, # scale_covar = False # #method = 'cg' # ) mini = Minimizer(self.residual, self.params) out = mini.emcee(burn = 10000, steps = 60000, thin = 1, workers = 1, params = self.params) self.H0 = 10**(out.params['a_nu'].value + 5 + 0.2 * (out.params['m04258'].value - 5*log10(out.params['mu_geometric'].value) - 25)) #print 5*log10(out.params['mu_geometric'].value) + 25 self.e_H0 = model.H0 * sqrt((out.params['a_nu'].stderr * log(10))**2 + (log(10)/5 *out.params['m04258'].stderr )**2 + (out.params['mu_geometric'].stderr/out.params['mu_geometric'].value)**2) return out
def setUp(self): """ test scale minimizers except newton-cg (needs jacobian) and anneal (doesn't work out of the box). """ p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) self.p_true = p_true n = 2500 xmin = 0. xmax = 250.0 noise = np.random.normal(scale=0.7215, size=n) self.x = np.linspace(xmin, xmax, n) self.data = self.residual(p_true, self.x) + noise fit_params = Parameters() fit_params.add('amp', value=11.0, min=5, max=20) fit_params.add('period', value=5., min=1., max=7) fit_params.add('shift', value=.10, min=0.0, max=0.2) fit_params.add('decay', value=6.e-3, min=0, max=0.1) self.fit_params = fit_params self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])
def test_bounds(): if not HAS_LEAST_SQUARES: raise nose.SkipTest p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.4321) p_true.add('shift', value=0.12345) p_true.add('decay', value=0.01000) def residual(pars, x, data=None): amp = pars['amp'] per = pars['period'] shift = pars['shift'] decay = pars['decay'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = amp*sin(shift + x/per) * exp(-x*x*decay*decay) if data is None: return model return (model - data) n = 1500 xmin = 0. xmax = 250.0 random.seed(0) noise = random.normal(scale=2.80, size=n) x = linspace(xmin, xmax, n) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0, max=20, min=0.0) fit_params.add('period', value=2, max=10) fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.) fit_params.add('decay', value=0.02, max=0.10, min=0.00) min = Minimizer(residual, fit_params, (x, data)) out = min.least_squares() assert(out.nfev > 10) assert(out.nfree > 50) assert(out.chisqr > 1.0) print(fit_report(out, show_correl=True, modelpars=p_true)) assert_paramval(out.params['decay'], 0.01, tol=1.e-2) assert_paramval(out.params['shift'], 0.123, tol=1.e-2)
def test_scalar_minimize_has_no_uncertainties(): # scalar_minimize doesn't calculate uncertainties. # when a scalar_minimize is run the stderr and correl for each parameter # should be None. (stderr and correl are set to None when a Parameter is # initialised). # This requires a reset after a leastsq fit has been done. # Only when scalar_minimize calculates stderr and correl can this test # be removed. np.random.seed(1) x = np.linspace(0, 15, 301) data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) + np.random.normal(size=len(x), scale=0.2) ) # define objective function: returns the array to be minimized def fcn2min(params, x, data): """ model decaying sine wave, subtract data""" amp = params['amp'].value shift = params['shift'].value omega = params['omega'].value decay = params['decay'].value model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value= 10, min=0) params.add('decay', value= 0.1) params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2) params.add('omega', value= 3.0) mini = Minimizer(fcn2min, params, fcn_args=(x, data)) out = mini.minimize() assert_(np.isfinite(out.params['amp'].stderr)) print(out.errorbars) assert_(out.errorbars == True) out2 = mini.minimize(method='nelder-mead') assert_(out2.params['amp'].stderr is None) assert_(out2.params['decay'].stderr is None) assert_(out2.params['shift'].stderr is None) assert_(out2.params['omega'].stderr is None) assert_(out2.params['amp'].correl is None) assert_(out2.params['decay'].correl is None) assert_(out2.params['shift'].correl is None) assert_(out2.params['omega'].correl is None) assert_(out2.errorbars == False)
def fit(self, y, x=None, dy=None, **kws): fcn_kws={'y':y, 'x':x, 'dy':dy} fcn_kws.update(kws) self.minimizer = Minimizer(self.__objective, self.params, fcn_kws=fcn_kws, scale_covar=True) self.minimizer.prepare_fit() self.init = self.model(self.params, x=x, **kws) self.minimizer.leastsq()
def time_confinterval(self): np.random.seed(0) x = np.linspace(0.3,10,100) y = 1/(0.1*x)+2+0.1*np.random.randn(x.size) p = Parameters() p.add_many(('a', 0.1), ('b', 1)) def residual(p): a = p['a'].value b = p['b'].value return 1/(a*x)+b-y minimizer = Minimizer(residual, p) out = minimizer.leastsq() return conf_interval(minimizer, out)
def test_scalar_minimize_neg_value(): x0 = 3.14 fmin = -1.1 xtol = 0.001 ftol = 2.0 * xtol def objective(pars): return (pars['x'] - x0) ** 2.0 + fmin params = Parameters() params.add('x', value=2*x0) minr = Minimizer(objective, params) result = minr.scalar_minimize(method='Nelder-Mead', options={'xatol': xtol, 'fatol': ftol}) assert abs(result.params['x'].value - x0) < xtol assert abs(result.fun - fmin) < ftol
def __FitEvent(self): try: dt = 1000./self.Fs # time-step in ms. # edat=np.asarray( np.abs(self.eventData), dtype='float64' ) edat=self.dataPolarity*np.asarray( self.eventData, dtype='float64' ) # control numpy error reporting np.seterr(invalid='ignore', over='ignore', under='ignore') ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64') # estimate initial guess for events initguess=self._characterizeevent(edat, np.abs(util.avg(edat[:10])), self.baseSD, self.InitThreshold, 6.) self.nStates=len(initguess)-1 # setup fit params params=Parameters() for i in range(1, len(initguess)): params.add('a'+str(i-1), value=initguess[i][0]-initguess[i-1][0]) params.add('mu'+str(i-1), value=initguess[i][1]*dt) params.add('tau'+str(i-1), value=dt*7.5) params.add('b', value=initguess[0][0]) optfit=Minimizer(self.__objfunc, params, fcn_args=(ts,edat,)) optfit.prepare_fit() optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters) if optfit.success: self.__recordevent(optfit) else: #print optfit.message, optfit.lmdif_message self.rejectEvent('eFitConvergence') except KeyboardInterrupt: self.rejectEvent('eFitUserStop') raise except InvalidEvent: self.rejectEvent('eInvalidEvent') except: self.rejectEvent('eFitFailure') raise
def fit(self, params0): r"""Perform a fit with the provided parameters. Parameters ---------- params0 : list Initial fitting parameters """ self.params0 = params0 p = Parameters() if self.parinfo is None: self.parinfo = [None] * len(self.params0) else: assert (len(self.params0) == len(self.parinfo)) for i, (p0, parin) in enumerate(zip(self.params0, self.parinfo)): p.add(name='p{0}'.format(i), value=p0) if parin is not None: if 'limits' in parin: p['p{0}'.format(i)].set(min=parin['limits'][0]) p['p{0}'.format(i)].set(max=parin['limits'][1]) if 'fixed' in parin: p['p{0}'.format(i)].set(vary=not parin['fixed']) if np.all([not value.vary for value in p.values()]): raise Exception('All parameters are fixed!') self.lmfit_minimizer = Minimizer(self.residuals, p, nan_policy=self.nan_policy, fcn_args=(self.data,)) self.result.orignorm = np.sum(self.residuals(params0, self.data) ** 2) result = self.lmfit_minimizer.minimize(Dfun=self.deriv, method='leastsq', ftol=self.ftol, xtol=self.xtol, gtol=self.gtol, maxfev=self.maxfev, epsfcn=self.epsfcn, factor=self.stepfactor) self.result.bestnorm = result.chisqr self.result.redchi = result.redchi self._m = result.ndata self.result.nfree = result.nfree self.result.resid = result.residual self.result.status = result.ier self.result.covar = result.covar self.result.xerror = [result.params['p{0}'.format(i)].stderr for i in range(len(result.params))] self.result.params = [result.params['p{0}'.format(i)].value for i in range(len(result.params))] self.result.message = result.message self.lmfit_result = result if not result.errorbars or not result.success: warnings.warn(self.result.message) return result.success
def fit(self, y, x=None, dy=None, **kws): fcn_kws={'y':y, 'x':x, 'dy':dy} fcn_kws.update(kws) if not self.has_initial_guess: self.guess_starting_values(y, x=x, **kws) self.minimizer = Minimizer(self.__objective, self.params, fcn_kws=fcn_kws, scale_covar=True) self.minimizer.prepare_fit() self.init = self.model(self.params, x=x, **kws) self.minimizer.leastsq()
def fit2D(image_nparray2D,fit_axis0=None,fit_axis1=None,minim_method="nelder"): if fit_axis0 is None: fit_axis0 = fit_axis(image_nparray2D,0,minim_method) if fit_axis1 is None: fit_axis1 = fit_axis(image_nparray2D,1,minim_method) # we first take all the initial parameters from 1D fits bgr2D_est = fit_axis0[2].params.valuesdict()["backgr"]/len(fit_axis1[0]) x2D_est = fit_resultsA0[2].params.valuesdict()["r_zero"] omegaX2D_est = fit_axis0[2].params.valuesdict()["omega_zero"] y2D_est = fit_axis1[2].params.valuesdict()["r_zero"] omegaY2D_est = fit_axis1[2].params.valuesdict()["omega_zero"] smoothened_image = gaussian_filter(image_nparray2D,50) peakheight2D_est = np.amax(smoothened_image) #now we need to programatically cut out the region of interest out of the #whole picture so that fitting takes way less time # NOTE! In this implementation, if the beam is small compared to picture size # and is very close to the edge, the fitting will fail, because the x and y # center position estimates will be off cropped_data = format_picture(image_nparray2D,x2D_est,omegaX2D_est,y2D_est,omegaY2D_est) xvals = np.linspace(1,cropped_data.shape[0],cropped_data.shape[0]) yvals = np.linspace(1,cropped_data.shape[1],cropped_data.shape[1]) x, y = np.meshgrid(yvals,xvals) # NOTE! there's apparently some weird convention, this has to do with # Cartesian vs. matrix indexing, which is explain in numpy.meshgrid manual estimates_2D = Parameters() estimates_2D.add("I_zero",value=peakheight2D_est,min=bgr2D_est) estimates_2D.add("x_zero",value=0.5*len(yvals),min=0,max=len(xvals)) # NOTE! weird indexing conventions estimates_2D.add("y_zero",value=0.5*len(xvals),min=0,max=len(yvals)) # NOTE! weird indexing conventions estimates_2D.add("omegaX_zero",value=omegaX2D_est) estimates_2D.add("omegaY_zero",value=omegaY2D_est) estimates_2D.add("backgr",value=bgr2D_est) fit2D = Minimizer(residual_2D,estimates_2D,fcn_args=(x,y),fcn_kws={"data":cropped_data}) fit_res2D = fit2D.minimize(minim_method) print(estimates_2D.valuesdict()["x_zero"]) return (x,y,fit_res2D)
def fit_axis(image_nparray2D,axis,minim_method="nelder"): """ This function fits one axis of a 2D array representing an image by doing a summation along the other axis fit_axis(image_nparray2D,axis,minim_method="nelder") """ axis_data = np.sum(image_nparray2D,axis = 1) if (axis == 0) else np.sum(image_nparray2D,axis = 0) axis_points = np.linspace(1,len(axis_data),len(axis_data)) param_estimates = startparams_estimate(axis_data) params_for_fit = Parameters() params_for_fit.add('I_zero',value=param_estimates[0],min=0,max=np.amax(axis_data)) params_for_fit.add('r_zero',value=param_estimates[1],min=1,max=len(axis_data)) params_for_fit.add('omega_zero',value=param_estimates[2],min=1,max=len(axis_data)) params_for_fit.add('backgr',value=param_estimates[3]) fit = Minimizer(residual_G1D,params_for_fit,fcn_args=(axis_points,),\ fcn_kws={"data":axis_data}) fit_res = fit.minimize(minim_method) return (axis_points,axis_data,fit_res)
def test_derive(): def func(pars, x, data=None): model = pars['a'] * np.exp(-pars['b'] * x) + pars['c'] if data is None: return model return model - data def dfunc(pars, x, data=None): v = np.exp(-pars['b']*x) return np.array([v, -pars['a']*x*v, np.ones(len(x))]) def f(var, x): return var[0] * np.exp(-var[1] * x) + var[2] params1 = Parameters() params1.add('a', value=10) params1.add('b', value=10) params1.add('c', value=10) params2 = Parameters() params2.add('a', value=10) params2.add('b', value=10) params2.add('c', value=10) a, b, c = 2.5, 1.3, 0.8 x = np.linspace(0, 4, 50) y = f([a, b, c], x) data = y + 0.15*np.random.normal(size=len(x)) # fit without analytic derivative min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data': data}) out1 = min1.leastsq() # fit with analytic derivative min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data': data}) out2 = min2.leastsq(Dfun=dfunc, col_deriv=1) check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005) check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005) check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005)
def test_peakfit(): def residual(pars, x, data=None): g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1']) g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2']) model = g1 + g2 if data is None: return model return (model - data) n = 601 xmin = 0. xmax = 15.0 noise = np.random.normal(scale=.65, size=n) x = np.linspace(xmin, xmax, n) org_params = Parameters() org_params.add_many(('a1', 12.0, True, None, None, None), ('c1', 5.3, True, None, None, None), ('w1', 1.0, True, None, None, None), ('a2', 9.1, True, None, None, None), ('c2', 8.1, True, None, None, None), ('w2', 2.5, True, None, None, None)) data = residual(org_params, x) + noise fit_params = Parameters() fit_params.add_many(('a1', 8.0, True, None, 14., None), ('c1', 5.0, True, None, None, None), ('w1', 0.7, True, None, None, None), ('a2', 3.1, True, None, None, None), ('c2', 8.8, True, None, None, None)) fit_params.add('w2', expr='2.5*w1') myfit = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data': data}) myfit.prepare_fit() out = myfit.leastsq() check_paras(out.params, org_params)
def test_ci_report(): """test confidence interval report""" def residual(pars, x, data=None): argu = (x*pars['decay'])**2 shift = pars['shift'] if abs(shift) > np.pi/2: shift = shift - np.sign(shift)*np.pi model = pars['amp']*np.sin(shift + x/pars['period']) * np.exp(-argu) if data is None: return model return model - data p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) n = 2500 xmin = 0. xmax = 250.0 x = np.linspace(xmin, xmax, n) data = residual(p_true, x) + np.random.normal(scale=0.7215, size=n) fit_params = Parameters() fit_params.add('amp', value=13.0) fit_params.add('period', value=2) fit_params.add('shift', value=0.0) fit_params.add('decay', value=0.02) mini = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data': data}) out = mini.leastsq() report = fit_report(out) assert(len(report) > 500) ci, tr = conf_interval(mini, out, trace=True) report = ci_report(ci) assert(len(report) > 250)
def minimize(fcn, paramgroup, method='leastsq', args=None, kws=None, scale_covar=True, iter_cb=None, reduce_fcn=None, nan_polcy='omit', _larch=None, **fit_kws): """ wrapper around lmfit minimizer for Larch """ fiteval = _larch.symtable._sys.fiteval if isinstance(paramgroup, ParameterGroup): params = paramgroup.__params__ elif isgroup(paramgroup): params = group2params(paramgroup, _larch=_larch) elif isinstance(Parameters): params = paramgroup else: raise ValueError('minimize takes ParamterGroup or Group as first argument') if args is None: args = () if kws is None: kws = {} def _residual(params): params2group(params, paramgroup) return fcn(paramgroup, *args, **kws) fitter = Minimizer(_residual, params, iter_cb=iter_cb, reduce_fcn=reduce_fcn, nan_policy='omit', **fit_kws) result = fitter.minimize(method=method) params2group(result.params, paramgroup) out = Group(name='minimize results', fitter=fitter, fit_details=result, chi_square=result.chisqr, chi_reduced=result.redchi) for attr in ('aic', 'bic', 'covar', 'params', 'nvarys', 'nfree', 'ndata', 'var_names', 'nfev', 'success', 'errorbars', 'message', 'lmdif_message', 'residual'): setattr(out, attr, getattr(result, attr, None)) return out
plt.grid() plt.legend() plt.tight_layout() plt.show() # %% # Least-squares minimization with LMFIT # ------------------------------------- # Use the :func:`~mrsimulator.utils.spectral_fitting.make_LMFIT_params` for a quick # setup of the fitting parameters. params = make_LMFIT_params(sim, processor) print(params.pretty_print(columns=["value", "min", "max", "vary", "expr"])) # %% # **Solve the minimizer using LMFIT** minner = Minimizer(LMFIT_min_function, params, fcn_args=(sim, processor)) result = minner.minimize() report_fit(result) # %% # The best fit solution # --------------------- sim.run() processed_data = processor.apply_operations( data=sim.methods[0].simulation).real # Plot the spectrum ax = plt.subplot(projection="csdm") ax.plot(experiment, "k", alpha=0.5, linewidth=2, label="Experiment") ax.plot(processed_data, "r--", label="Best Fit") ax.set_xlim(50, -25)
pfit = [Parameter(name='amp_g', value=10), Parameter(name='cen_g', value=9), Parameter(name='wid_g', value=1), Parameter(name='amp_tot', value=20), Parameter(name='amp_l', expr='amp_tot - amp_g'), Parameter(name='cen_l', expr='1.5+cen_g'), Parameter(name='wid_l', expr='2*wid_g'), Parameter(name='line_slope', value=0.0), Parameter(name='line_off', value=0.0)] sigma = 0.021 # estimate of data error (for all data points) myfit = Minimizer(residual, pfit, fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data}, scale_covar=True) myfit.prepare_fit() init = residual(myfit.params, x) if HASPYLAB: pylab.plot(x, init, 'b--') myfit.leastsq() print(' Nfev = ', myfit.nfev) print( myfit.chisqr, myfit.redchi, myfit.nfree) report_fit(myfit.params)
# ------------------------------------- # Use the :func:`~mrsimulator.utils.spectral_fitting.make_LMFIT_params` for a quick # setup of the fitting parameters. Note, the first two arguments of this function is # the simulator object and a list of SignalProcessor objects, ``processors``. The # fitting parameters corresponding to the signal processor objects are generated using # ``SP_i_operation_j_FunctionName_FunctionArg``, where *i* is the *ith* signal # processor within the list, *j* is the operation index of the *ith* processor, and # *FunctionName* and *FunctionArg* are the operation function name and function # argument, respectively. params = sf.make_LMFIT_params(sim, processors, include={"rotor_frequency"}) print(params.pretty_print(columns=["value", "min", "max", "vary", "expr"])) # %% # **Solve the minimizer using LMFIT** minner = Minimizer(sf.LMFIT_min_function, params, fcn_args=(sim, processors, sigmas)) result = minner.minimize() result # %% # The best fit solution # --------------------- all_best_fit = sf.bestfit(sim, processors) # a list of best fit simulations all_residuals = sf.residuals(sim, processors) # a list of residuals # Plot the spectrum fig, ax = plt.subplots(1, 3, figsize=(12, 3), subplot_kw={"projection": "csdm"})
def fit(x_tofit, y_tofit, func='Growth', daystofit=60, nfreq=7): y0 = y_tofit[0] y1 = y_tofit[-1] x1 = x_tofit[-1] pars = Parameters() pars.add('a', value=y1, min=y1) pars.add('b', value=0., min=0.) pars.add('c', value=y0, min=0.) if func == 'Growth': lfunc = lmfunc growf = growth elif func == 'Sigmoid': lfunc = lmsig growf = sigm mfit = Minimizer(lfunc, pars, fcn_args=(x_tofit, ), fcn_kws={'y': y_tofit}, reduce_fcn='neglogcauchy') nfit = mfit.minimize(method='nedeler') #nfit = mfit.minimize(method='leastsq') #nfit = mfit.leastsq() dfit = mfit.minimize(method='leastsq', params=nfit.params) print(fit_report(dfit)) a = dfit.params['a'] b = dfit.params['b'] c = dfit.params['c'] ## to look ahead nstd = 3 a_err = nstd * a.stderr b_err = nstd * b.stderr c_err = nstd * c.stderr xfit = np.arange(0, daystofit, 1.) yfit = lfunc(dfit.params, xfit) fig, ax = plt.subplots() #yfit1 = growf(a+a_err,b-b_err,c+c_err,xfit) #yfit2 = growf(a-a_err,b+b_err,c-c_err,xfit) yfit1 = growf(a + a_err, b, c, xfit) yfit2 = growf(a - a_err, b, c, xfit) #plt.fill_between(xfit, yfit2,yfit1, color="#ABABAB") plt.fill_between(xfit, yfit2, yfit1, color="#ABABAB", label='CI: 99.75%') ax.plot(xfit, yfit, 'r-', label="Best fit") ax.scatter(x_tofit, y_tofit, c='black', s=50, label="# Diagnosed in Mainland China") #xlabel = [date[s].date() for s in range(daystofit) if s%7==0] #print(date.astype(str)[0][5:]) nfreq = 3 date = pd.date_range('2020-01-16', periods=daystofit) datestr = date.astype(str) ypred = lfunc(dfit.params, x1 + 1) yerr = a.stderr / a ypred_1 = ypred * (1 + 1.732 * yerr) ypred_2 = ypred * (1 - 1.732 * yerr) print("Predicted diagnosed number on %s: %d (total: %d-%d, 95%% CI)" % (datestr[x1 + 1], ypred - y1, ypred_2, ypred_1)) xticks = [xfit[s] for s in range(daystofit) if s % nfreq == 0] xlabel = [datestr[s][5:] for s in range(daystofit) if s % nfreq == 0] plt.xticks(xticks, xlabel, rotation=60) plt.legend() fig.tight_layout() plt.show() fig.savefig('FitbyGompertzGrowth.pdf') plt.close()
def grid_fit(src_y, src_x, ncols, nrows, params, vary_theta=False, method='least_squares', bbox=None, normalized_shifts=None): """Optimize grid model parameters to match detected source centroids. Parameters ---------- src_y : `numpy.ndarray`, (N,) An array of Y-axis centroid coordinates. src_x : `numpy.ndarray`, (N,) An array of X-axis centroid coordinates. ncols : `int` Number of grid columns. nrows : `int` Number of grid rows. params: `list` or `tuple` Sequence of initial guesses for the grid model parameters. vary_theta : `bool`, optional Allow the grid rotation angle parameter to vary during model fit if `True`. method : `str`, optional Name of the fitting method to use (the default is 'least_squares'). bbox : `lsst.geom.Box2I`, optional An integer coordinate rectangle corresponding to detector geometry (the default is `None`, which implies no detector geometry information will be used). normalized_shifts : `tuple` [`numpy.ndarray`], optional A sequence of arrays of normalized shifts in Y-axis and X-axis (the default is `None`, which implies no normalized shifts to be included). Returns ------- grid : `mixcoatl.sourcegrid.DistortedGrid` Optimized grid model. result : `lmfit.minimizer.MinimizerResult` The results of the grid model optimization. """ ystep, xstep, theta, y0, x0 = params ## Define fit parameters params = Parameters() params.add('ystep', value=ystep, vary=False) params.add('xstep', value=xstep, vary=False) params.add('y0', value=y0, min=y0 - 3., max=y0 + 3., vary=True) params.add('x0', value=x0, min=x0 - 3., max=x0 + 3., vary=True) params.add('theta', value=theta, min=theta - 0.5*np.pi/180., max=theta + 0.5*np.pi/180., vary=False) minner = Minimizer(fit_error, params, fcn_args=(src_y, src_x, ncols, nrows), fcn_kws={'normalized_shifts' : normalized_shifts, 'bbox' : bbox}, nan_policy='omit') result = minner.minimize(params=params, method=method, max_nfev=None) if vary_theta: result_params = result.params result_values = result_params.valuesdict() params['y0'].set(value=result_values['y0'], vary=False) params['x0'].set(value=result_values['x0'], vary=False) params['theta'].set(vary=True) theta_minner = Minimizer(fit_error, params, fcn_args=(src_y, src_x, ncols, nrows), fcn_kws={'normalized_shifts' : normalized_shifts, 'bbox' : bbox}, nan_policy='omit') theta_result = theta_minner.minimize(params=params, method=method, max_nfev=None) result.params['theta'] = theta_result.params['theta'] parvals = result.params.valuesdict() grid = DistortedGrid(parvals['ystep'], parvals['xstep'], parvals['theta'], parvals['y0'], parvals['x0'], ncols, nrows, normalized_shifts=normalized_shifts) return grid, result
def BIC_minimisation_region_full(ind1, uc, peak_regions, grouped_peaks, total_spectral_ydata, corr_distance, std): ################################################################################################################ # initialise process ################################################################################################################ # print("minimising region " + str(ind1) + " of " + str(len(peak_regions))) BIC_param = 15 region = np.array(peak_regions[ind1]) region_y = total_spectral_ydata[region] fit_y = np.zeros(len(region_y)) copy_peaks = np.array(grouped_peaks[ind1]) params = Parameters() fitted_peaks = [] ttotal = 0 ################################################################################################################ # build initial model ################################################################################################################ # params.add('vregion' + str(ind1), value=2.5, max=5, min=1) params.add('vregion' + str(ind1), value=0.5, max=1, min=0) distance = uc(0, "hz") - uc(5, "hz") std_upper = uc(0, "hz") - uc(1, "hz") av_std = uc(0, "hz") - uc(0.2, "hz") std_lower = uc(0, "hz") - uc(0.1, "hz") # build model while (len(copy_peaks) > 0): # pick peak that is furthest from fitted data: diff_array = region_y - fit_y ind2 = np.argmax(diff_array[copy_peaks - region[0]]) maxpeak = copy_peaks[ind2] copy_peaks = np.delete(copy_peaks, ind2) # only allow params < distance away vary at a time # add new params fitted_peaks.append(maxpeak) fitted_peaks = sorted(fitted_peaks) params.add('A' + str(maxpeak), value=total_spectral_ydata[maxpeak], min=0, max=1, vary=True) # params.add('std' + str(maxpeak), value=av_std, vary=True, min = std_lower, # max = std_upper) params.add('std' + str(maxpeak), value=av_std, vary=True) params.add('mu' + str(maxpeak), value=maxpeak, vary=True , min=maxpeak - 4 * corr_distance, max=maxpeak + 4 * corr_distance) # adjust amplitudes and widths of the current model initial_y = p7sim(params, region, fitted_peaks, ind1) inty = np.sum(region_y[region_y > 0]) intmodel = np.sum(initial_y) # check the region can be optimised this way # find peak with max amplitude maxamp = 0 for peak in fitted_peaks: amp = params['A' + str(peak)] if amp > maxamp: maxamp = copy.copy(amp) maxintegral = maxamp * len(region) if maxintegral > inty: # set initial conditions while (intmodel / inty < 0.99) or (intmodel / inty > 1.01): for f in fitted_peaks: params['std' + str(f)].set(value=params['std' + str(f)] * inty / intmodel) initial_y = p7sim(params, region, fitted_peaks, ind1) for f in fitted_peaks: params['A' + str(f)].set( value=params['A' + str(f)] * region_y[int(params['mu' + str(f)]) - region[0]] / ( initial_y[f - region[0]])) initial_y = p7sim(params, region, fitted_peaks, ind1) intmodel = np.sum(initial_y) # print('built model region ' + str(ind1)) ################################################################################################################ # now relax all params ################################################################################################################ # allow all params to vary params['vregion' + str(ind1)].set(vary=True) for peak in fitted_peaks: params['A' + str(peak)].set(vary=False, min=max(0, params['A' + str(peak)] - 0.01), max=min(params['A' + str(peak)] + 0.01, 1)) params['mu' + str(peak)].set(vary=False) params['std' + str(peak)].set(vary=False, min=min(std_lower, params['std' + str(peak)] - av_std), max=max(params['std' + str(peak)] + av_std, std_upper)) out = Minimizer(p7residual, params, fcn_args=(region, fitted_peaks, region_y, ind1, False)) results = out.minimize() params = results.params # print('relaxed params region ' + str(ind1)) ################################################################################################################ # now remove peaks in turn ################################################################################################################ trial_y = p7sim(params, region, fitted_peaks, ind1) trial_peaks = np.array(fitted_peaks) amps = [] for peak in trial_peaks: amps.append(params['A' + str(peak)]) r = trial_y - region_y chi2 = r ** 2 N = len(chi2) BIC = N * np.log(np.sum(chi2) / N) + np.log(N) * (3 * len(fitted_peaks) + 2) while (len(trial_peaks) > 0): new_params = copy.copy(params) # find peak with smallest amp minpeak = trial_peaks[np.argmin(amps)] # remove this peak from the set left to try trial_peaks = np.delete(trial_peaks, np.argmin(amps)) amps = np.delete(amps, np.argmin(amps)) # remove this peak from the trial peaks list and the trial params new_params.__delitem__('A' + str(minpeak)) new_params.__delitem__('mu' + str(minpeak)) new_params.__delitem__('std' + str(minpeak)) new_fitted_peaks = np.delete(fitted_peaks, np.where(fitted_peaks == minpeak)) # simulate data with one fewer peak new_trial_y = p7sim(new_params, region, new_fitted_peaks, ind1) r = new_trial_y - region_y chi2 = np.sum(r ** 2) N = len(new_trial_y) new_BIC = N * np.log(chi2 / N) + np.log(N) * (3 * len(new_fitted_peaks) + 2) # if the fit is significantly better remove this peak if new_BIC < BIC - BIC_param: fitted_peaks = copy.copy(new_fitted_peaks) params = copy.copy(new_params) BIC = copy.copy(new_BIC) fitted_peaks = sorted(fitted_peaks) fit_y = p7sim(params, region, fitted_peaks, ind1) ################################################################################################################ print(" done region " + str(ind1 + 1) + "of" + str(len(peak_regions))) return fitted_peaks, params, fit_y
def ACMEWLRhybrid(y, corr_distance): def residual_function(params, im, real): # phase the region data = ps(params, im, real, 0) # make new baseline for this region r = np.linspace(data[0], data[-1], len(real)) # find negative area data -= r ds1 = np.abs((data[1:] - data[:-1])) p1 = ds1 / np.sum(ds1) # Calculation of entropy p1[p1 == 0] = 1 h1 = -p1 * np.log(p1) h1s = np.sum(h1) # Calculation of penalty pfun = 0.0 as_ = data - np.abs(data) sumas = np.sum(as_) if sumas < 0: pfun = (as_[1:] / 2) ** 2 p = np.sum(pfun) return h1s + 1000 * p # find regions classification, sigma = baseline_find_signal(y, corr_distance, True, 1) c1 = np.roll(classification, 1) diff = classification - c1 s_start = np.where(diff == 1)[0] s_end = np.where(diff == -1)[0] - 1 peak_regions = [] for r in range(len(s_start)): peak_regions.append(np.arange(s_start[r], s_end[r])) # for region in peak_regions: # plt.plot(region,y[region],color = 'C1') # phase each region independently phase_angles = [] weights = [] centres = [] for region in peak_regions: params = Parameters() params.add('p0', value=0, min=-np.pi, max=np.pi) out = Minimizer(residual_function, params, fcn_args=(np.imag(y[region]), np.real(y[region]))) results = out.minimize('brute') p = results.params phase_angles.append(p['p0'] * 1) # find weight data = ps(p, np.imag(y[region]), np.real(y[region]), 0) # make new baseline for this region r = np.linspace(data[0], data[-1], len(data)) # find negative area res = data - r weights.append(abs(np.sum(res[res > 0] / np.sum(y[y > 0])))) centres.append(np.median(region) / len(y)) sw = sum(weights) weights = [w / sw for w in weights] # do weighted linear regression on the regions # do outlier analysis switch = 0 centres = np.array(centres) weights = np.array(weights) sweights = np.argsort(weights)[::-1] phase_angles = np.array(phase_angles) ind1 = 0 while switch == 0: intercept, gradient = np.polynomial.polynomial.polyfit(centres, phase_angles, deg=1, w=weights) predicted_angles = gradient * centres + intercept weighted_res = np.abs(predicted_angles - phase_angles) * weights # find where largest weighted residual is max_res = sweights[ind1] s = 0 if phase_angles[max_res] > 0: s = -1 phase_angles[max_res] -= 2 * np.pi else: s = +1 phase_angles[max_res] += 2 * np.pi intercept1, gradient1 = np.polynomial.polynomial.polyfit(centres, phase_angles, deg=1, w=weights) new_predicted_angles = gradient1 * centres + intercept1 new_weighted_res = np.abs(new_predicted_angles - phase_angles) * weights if np.sum(new_weighted_res) > np.sum(weighted_res): switch = 1 phase_angles[max_res] += -2*np.pi*s ind1 +=1 # phase the data p_final = Parameters() p_final.add('p0', value=intercept) p_final.add('p1', value=gradient) # p_final.pretty_print() y = ps(p_final, np.imag(y), np.real(y), 1) classification, sigma = baseline_find_signal(y, corr_distance, True, 1) r = gen_baseline(np.real(y), classification, corr_distance) y -= r return np.real(y)
def acme(y, corr_distance): params = Parameters() phase_order = 3 for p in range(phase_order + 1): params.add('p' + str(p), value=0, min=-np.pi, max=np.pi) def acmescore(params, im, real, phase_order): """ Phase correction using ACME algorithm by Chen Li et al. Journal of Magnetic Resonance 158 (2002) 164-168 Parameters ---------- pd : tuple Current p0 and p1 values data : ndarray Array of NMR data. Returns ------- score : float Value of the objective function (phase score) """ data = ps(params, im, real, phase_order) ########## # calculate entropy of non corrected data, calculate penalty for baseline corrected data # - keep as vector to use the default lmfit method # Calculation of first derivatives of signal regions ds1 = np.abs((data[1:] - data[:-1])) p1 = ds1 / np.sum(ds1) # Calculation of entropy p1[p1 == 0] = 1 h1 = -p1 * np.log(p1) # h1s = np.sum(h1) # Calculation of penalty pfun = 0.0 as_ = data - np.abs(data) # as_ = databl - np.abs(databl) sumas = np.sum(as_) if sumas < 0: # pfun = pfun + np.sum((as_ / 2) ** 2) pfun = (as_[1:] / 2) ** 2 p = 1000 * pfun return h1 + p out = Minimizer(acmescore, params, fcn_args=(np.imag(y), np.real(y), phase_order)) results = out.minimize() p = results.params p.pretty_print() y = ps(p, np.imag(y), np.real(y), phase_order) classification, sigma = baseline_find_signal(y, corr_distance, True, 1) r = gen_baseline(np.real(y), classification, corr_distance) y -= r return y
class CommonMinimizerTest(unittest.TestCase): def setUp(self): """ test scale minimizers except newton-cg (needs jacobian) and anneal (doesn't work out of the box). """ p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) self.p_true = p_true n = 2500 xmin = 0. xmax = 250.0 noise = np.random.normal(scale=0.7215, size=n) self.x = np.linspace(xmin, xmax, n) data = self.residual(p_true, self.x) + noise fit_params = Parameters() fit_params.add('amp', value=11.0, min=5, max=20) fit_params.add('period', value=5., min=1., max=7) fit_params.add('shift', value=.10, min=0.0, max=0.2) fit_params.add('decay', value=6.e-3, min=0, max=0.1) self.fit_params = fit_params init = self.residual(fit_params, self.x) self.mini = Minimizer(self.residual, fit_params, [self.x, data]) def residual(self, pars, x, data=None): amp = pars['amp'].value per = pars['period'].value shift = pars['shift'].value decay = pars['decay'].value if abs(shift) > pi / 2: shift = shift - np.sign(shift) * pi model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay) if data is None: return model return (model - data) def test_diffev_bounds_check(self): # You need finite (min, max) for each parameter if you're using # differential_evolution. self.fit_params['decay'].min = None self.minimizer = 'differential_evolution' np.testing.assert_raises(ValueError, self.scalar_minimizer) def test_scalar_minimizers(self): # test all the scalar minimizers for method in SCALAR_METHODS: if method in ['newton', 'dogleg', 'trust-ncg']: continue self.minimizer = SCALAR_METHODS[method] if method == 'Nelder-Mead': sig = 0.2 else: sig = 0.15 self.scalar_minimizer(sig=sig) def scalar_minimizer(self, sig=0.15): try: from scipy.optimize import minimize as scipy_minimize except ImportError: raise SkipTest print(self.minimizer) self.mini.scalar_minimize(method=self.minimizer) fit = self.residual(self.fit_params, self.x) for name, par in self.fit_params.items(): nout = "%s:%s" % (name, ' ' * (20 - len(name))) print("%s: %s (%s) " % (nout, par.value, self.p_true[name].value)) for para, true_para in zip(self.fit_params.values(), self.p_true.values()): check_wo_stderr(para, true_para.value, sig=sig)
n = 2500 xmin = 0. xmax = 250.0 noise = random.normal(scale=0.7215, size=n) x = linspace(xmin, xmax, n) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0) fit_params.add('period', value=2) fit_params.add('shift', value=0.0) fit_params.add('decay', value=0.02) mini = Minimizer(residual, fit_params, fcn_args=(x, ), fcn_kws={'data': data}) out = mini.leastsq() fit = residual(out.params, x) report_fit(out) ci, tr = conf_interval(mini, out, trace=True) report_ci(ci) if HASPYLAB: names = out.params.keys() i = 0 gs = plt.GridSpec(4, 4) sx = {} sy = {} for fixed in names:
def solve_general(self, nh, delfstar, overlayer, prop_guess={}): ''' solve the property of a single test. nh: list of int delfstar: dict {harm(int): complex, ...} overlayer: dicr e.g.: {'drho': 0, 'grho_rh': 0, 'phi': 0} return drho, grho_rh, phi, dlam_rh, err ''' # input variables - this is helpfulf for the error analysis # define sensibly names partial derivatives for further use deriv = {} err = {} err_names = ['drho', 'grho_rh', 'phi'] # first pass at solution comes from rh and rd rd_exp = self.rdexp(nh, delfstar) # nh[2] rh_exp = self.rhexp(nh, delfstar) # nh[0], nh[1] logger.info('rd_exp', rd_exp) logger.info('rh_exp', rh_exp) n1 = nh[0] n2 = nh[1] n3 = nh[2] # solve the problem if ~np.isnan(rd_exp) or ~np.isnan(rh_exp): logger.info('rd_exp, rh_exp is not nan') # TODO change here for the model selection if prop_guess: # value{'drho', 'grho_rh', 'phi'} dlam_rh, phi = self.guess_from_props(**prop_guess) elif rd_exp > 0.5: dlam_rh, phi = self.bulk_guess(delfstar) else: dlam_rh, phi = self.thinfilm_guess(delfstar) logger.info('dlam_rh', dlam_rh) logger.info('phi', phi) if fit_method == 'lmfit': params1 = Parameters() params1.add('dlam_rh', value=dlam_rh, min=dlam_rh_range[0], max=dlam_rh_range[1]) params1.add('phi', value=phi, min=phi_range[0], max=phi_range[1]) def residual1(params, rh_exp, rd_exp): # dlam_rh = params['dlam_rh'].value # phi = params['phi'].value return [ self.rhcalc(nh, dlam_rh, phi) - rh_exp, self.rdcalc(nh, dlam_rh, phi) - rd_exp ] mini = Minimizer( residual1, params1, fcn_args=(rh_exp, rd_exp), # nan_policy='omit', ) soln1 = mini.leastsq( # xtol=1e-7, # ftol=1e-7, ) print(fit_report(soln1)) #testprint logger.info('success', soln1.success) logger.info('message', soln1.message) logger.info('lmdif_message', soln1.lmdif_message) dlam_rh = soln1.params.get('dlam_rh').value phi = soln1.params.get('phi').value drho = self.drho(n1, delfstar, dlam_rh, phi) grho_rh = self.grho_from_dlam(self.rh, drho, dlam_rh, phi) else: # scipy lb = np.array([dlam_rh_range[0], phi_range[0]]) # lower bounds on dlam3 and phi ub = np.array([dlam_rh_range[1], phi_range[1]]) # upper bonds on dlam3 and phi def ftosolve(x): return [ self.rhcalc(nh, x[0], x[1]) - rh_exp, self.rdcalc(nh, x[0], x[1]) - rd_exp ] x0 = np.array([dlam_rh, phi]) logger.info(x0) soln1 = least_squares(ftosolve, x0, bounds=(lb, ub)) logger.info(soln1['x']) dlam_rh = soln1['x'][0] phi = soln1['x'][1] drho = self.drho(n1, delfstar, dlam_rh, phi) grho_rh = self.grho_from_dlam(self.rh, drho, dlam_rh, phi) logger.info('solution of 1st solving:') logger.info('dlam_rh', dlam_rh) logger.info('phi', phi) logger.info('drho', phi) logger.info('grho_rh', grho_rh) # we solve it again to get the Jacobian with respect to our actual if drho_range[0] <= drho <= drho_range[1] and grho_rh_range[ 0] <= grho_rh <= grho_rh_range[1] and phi_range[ 0] <= phi <= phi_range[1]: logger.info('1st solution in range') if fit_method == 'lmfit': params2 = Parameters() params2.add('drho', value=dlam_rh, min=drho_range[0], max=drho_range[1]) params2.add('grho_rh', value=grho_rh, min=grho_rh_range[0], max=grho_rh_range[1]) params2.add('phi', value=phi, min=phi_range[0], max=phi_range[1]) def residual2(params, delfstar, overlayer, n1, n2, n3): drho = params['drho'].value grho_rh = params['grho_rh'].value phi = params['phi'].value return ([ np.real(delfstar[n1]) - np.real( self.delfstarcalc(n1, drho, grho_rh, phi, overlayer)), np.real(delfstar[n2]) - np.real( self.delfstarcalc(n2, drho, grho_rh, phi, overlayer)), np.imag(delfstar[n3]) - np.imag( self.delfstarcalc(n3, drho, grho_rh, phi, overlayer)) ]) mini = Minimizer( residual2, params2, fcn_args=(delfstar, overlayer, n1, n2, n3), # nan_policy='omit', ) soln2 = mini.least_squares( # xtol=1e-7, # ftol=1e-7, ) logger.info(soln2.params.keys()) logger.info(soln2.params['drho']) print(fit_report(soln2)) print('success', soln2.success) print('message', soln2.message) print('lmdif_message', soln1.lmdif_message) # put the input uncertainties into a 3 element vector delfstar_err = np.zeros(3) delfstar_err[0] = np.real(self.fstar_err_calc( delfstar[n1])) delfstar_err[1] = np.real(self.fstar_err_calc( delfstar[n2])) delfstar_err[2] = np.imag(self.fstar_err_calc( delfstar[n3])) # initialize the uncertainties # recalculate solution to give the uncertainty, if solution is viable drho = soln2.params.get('drho').value grho_rh = soln2.params.get('grho_rh').value phi = soln2.params.get('phi').value dlam_rh = self.d_lamcalc(self.rh, drho, grho_rh, phi) jac = soln2.params.get('jac') #TODO ??? logger.info('jac', jac) jac_inv = np.linalg.inv(jac) for i, k in enumerate(err_names): deriv[k] = { 0: jac_inv[i, 0], 1: jac_inv[i, 1], 2: jac_inv[i, 2] } err[k] = ((jac_inv[i, 0] * delfstar_err[0])**2 + (jac_inv[i, 1] * delfstar_err[1])**2 + (jac_inv[i, 2] * delfstar_err[2])**2)**0.5 else: # scipy x0 = np.array([drho, grho_rh, phi]) lb = np.array( [drho_range[0], grho_rh_range[0], phi_range[0]]) # lower bounds drho, grho3, phi ub = np.array( [drho_range[1], grho_rh_range[1], phi_range[1]]) # upper bounds drho, grho3, phi def ftosolve2(x): return ([ np.real(delfstar[n1]) - np.real( self.delfstarcalc(n1, x[0], x[1], x[2], overlayer)), np.real(delfstar[n2]) - np.real( self.delfstarcalc(n2, x[0], x[1], x[2], overlayer)), np.imag(delfstar[n3]) - np.imag( self.delfstarcalc(n3, x[0], x[1], x[2], overlayer)) ]) # put the input uncertainties into a 3 element vector delfstar_err = np.zeros(3) delfstar_err[0] = np.real(self.fstar_err_calc( delfstar[n1])) delfstar_err[1] = np.real(self.fstar_err_calc( delfstar[n2])) delfstar_err[2] = np.imag(self.fstar_err_calc( delfstar[n3])) # recalculate solution to give the uncertainty, if solution is viable soln2 = least_squares(ftosolve2, x0, bounds=(lb, ub)) drho = soln2['x'][0] grho_rh = soln2['x'][1] phi = soln2['x'][2] dlam_rh = self.d_lamcalc(self.rh, drho, grho_rh, phi) jac = soln2['jac'] logger.info('jac', jac) jac_inv = np.linalg.inv(jac) logger.info('jac_inv', jac_inv) for i, k in enumerate(err_names): deriv[k] = { 0: jac_inv[i, 0], 1: jac_inv[i, 1], 2: jac_inv[i, 2] } err[k] = ((jac_inv[i, 0] * delfstar_err[0])**2 + (jac_inv[i, 1] * delfstar_err[1])**2 + (jac_inv[i, 2] * delfstar_err[2])**2)**0.5 if np.isnan(rd_exp) or np.isnan( rh_exp) or not deriv or not err: # failed to solve the problem print('2nd solving failed') # assign the default value first drho = np.nan grho_rh = np.nan phi = np.nan dlam_rh = np.nan for k in err_names: err[k] = np.nan logger.info('drho', drho) logger.info('grho_rh', grho_rh) logger.info('phi', phi) logger.info('dlam_rh', phi) logger.info('err', err) return drho, grho_rh, phi, dlam_rh, err
OutputFormatter.printConfig(configJson) # close config file configFile.close() config = Config(configJson) # retrieve params from config params = config.getParams() data = config.getData() soly = config.getSoly() OutputFormatter.printExperimentaldata(data, soly) minimizer = Minimizer(ExcessSaltSolubilityModel.residual, params, fcn_args=(data, soly)) out = minimizer.leastsq() # show output #lmfit.printfuncs.report_fit(out.params) print(lmfit.fit_report(out)) # confidence # ci = lmfit.conf_interval(minimizer, out) # show output # lmfit.printfuncs.report_ci(ci) # print results
def f(params): return f1(params) + f2(params) + f3(params) ############################################################################### # Just as in the documentation we will do a grid search between ``-4`` and # ``4`` and use a stepsize of ``0.25``. The bounds can be set as usual with # the ``min`` and ``max`` attributes, and the stepsize is set using # ``brute_step``. params['x'].set(min=-4, max=4, brute_step=0.25) params['y'].set(min=-4, max=4, brute_step=0.25) ############################################################################### # Performing the actual grid search is done with: fitter = Minimizer(f, params) result = fitter.minimize(method='brute') ############################################################################### # , which will increment ``x`` and ``y`` between ``-4`` in increments of # ``0.25`` until ``4`` (not inclusive). grid_x, grid_y = (np.unique(par.ravel()) for par in result.brute_grid) print(grid_x) ############################################################################### # The objective function is evaluated on this grid, and the raw output from # ``scipy.optimize.brute`` is stored in the MinimizerResult as # ``brute_<parname>`` attributes. These attributes are: # # ``result.brute_x0`` -- A 1-D array containing the coordinates of a point at # which the objective function had its minimum value.
"""Calculate cubic growth and subtract data""" #Get an ordered dictionary of parameter values v = params.valuesdict() #Cubic model model = v['a'] * t**3 + v['b'] * t**2 + v['c'] * t + v['d'] return model - data #Return residuals # In[7]: #Create a Minimizer object minner = Minimizer(residuals_linear, params_linear, fcn_args=(t, np.log(N_rand))) #Perform the minimization fit_linear_NLLS = minner.minimize() # The variable `fit_linear` belongs to a class called [`MinimizerResult`](https://lmfit.github.io/lmfit-py/fitting.html#lmfit.minimizer.MinimizerResult), which include data such as status and error messages, fit statistics, and the updated (i.e., best-fit) parameters themselves in the params attribute. # # Now get the summary of the fit: # In[8]: report_fit(fit_linear_NLLS) # ### Using OLS #
def fit_multigaussian(spec, vcent=None, err=None, max_comp=10, amp_const=None, cent_const=None, sigma_const=None, sigma_init=10., verbose=True, plot_fit=True, min_delta_BIC=5., min_sigma_intensity=5, return_model=False, discrete_fitter=False, discrete_oversamp=2): ''' Increase the number of fitted Gaussians to find a minimum in AIC or BIC. ''' spec = spec.with_spectral_unit(u.km / u.s) vels = spec.spectral_axis # Set parameter limits: if amp_const is None: amp_min = 0. amp_max = 1.1 * np.nanmax(spec.filled_data[:].value) else: amp_min = amp_const[0] amp_max = amp_const[1] if cent_const is None: cent_min = vels.value.min() - 0.1 * np.ptp(vels.value) cent_max = vels.value.max() + 0.1 * np.ptp(vels.value) else: cent_min = cent_const[0] cent_max = cent_const[1] if sigma_const is None: sig_min = np.abs(np.diff(vels.value)[0]) sig_max = 0.3 * np.ptp(vels.value) else: sig_min = sigma_const[0] sig_max = sigma_const[1] if vcent is None: vcent = np.mean(vels.value) else: vcent = vcent.to(u.km / u.s).value # Currently assuming all spectra have some signal in them. aics = [] bics = [] fit_outputs = [] pfit = Parameters() valid_data = np.isfinite(spec.filled_data[:]) yfit = spec.filled_data[:].value[valid_data] xfit = spec.spectral_axis.value[valid_data] # Upsample for computing over discrete bins chan_width = np.abs(np.diff(vels.value)[0]) order_sign = 1. if vels[-1] > vels[0] else -1. # You really need to rewrite this to be faster. assert discrete_oversamp > 1. xfit_upsamp = np.linspace(vels.value[0] - order_sign * 0.5 * chan_width, vels.value[-1] + order_sign * 0.5 * chan_width, vels.size * discrete_oversamp) for nc in range(1, max_comp + 1): if verbose: print(f"Now fitting with {nc} components.") # Place the centre at the largest positive residual within the bounds. if nc > 1: tpeak = 20 vel_peakresid = spec.spectral_axis.value[np.argmax(fit_residual)] if vel_peakresid >= cent_min and vel_peakresid <= cent_max: v_guess = vel_peakresid tpeak = fit_residual[np.argmax(fit_residual)] else: v_guess = vcent if tpeak < amp_min: tpeak = 20. pfit.add(name=f'amp{nc}', value=tpeak, min=amp_min, max=amp_max) pfit.add(name=f'cent{nc}', value=v_guess, min=cent_min, max=cent_max) else: tpeak = 20. pfit.add(name=f'amp{nc}', value=tpeak, min=amp_min, max=amp_max) pfit.add(name=f'cent{nc}', value=vcent, min=cent_min, max=cent_max) # Setup a minimum relation between the amp. and line width. # pfit.add(name=f'integral{nc}', # value=sigma_init * tpeak, # min=err.value * sig_min * min_sigma_intensity, # max=amp_max * sig_max) # # expr=f'amp{nc} * sigma{nc}') pfit.add(name=f'sigma{nc}', value=np.random.uniform(sigma_init - 2, sigma_init + 2), # expr=f'integral{nc} / amp{nc}', min=sig_min, max=sig_max,) mini = Minimizer(residual_multigauss, pfit, fcn_args=(xfit, xfit_upsamp, yfit, err if err is not None else 1., discrete_fitter), max_nfev=vels.size * 1000) out = mini.leastsq() # out = mini.minimize(method='differential_evolution') if not out.success: raise ValueError("Fit failed.") if verbose: report_fit(out) model = multigaussian(vels.value, out.params) if plot_fit: plt.plot(vels.value, spec.filled_data[:], drawstyle='steps-mid') plt.plot(vels.value, model) for n in range(1, nc + 1): plt.plot(vels.value, gaussian(vels.value, out.params[f"amp{n}"], out.params[f"cent{n}"], out.params[f"sigma{n}"])) plt.plot(vels.value, spec.filled_data[:].value - model, '--', zorder=-10) plt.draw() input(f"{nc}?") plt.clf() if nc > 1: if verbose: print(f"BIC1: {out.bic}; BIC0: {bics[-1]}") if bics[-1] - out.bic < min_delta_BIC: if verbose: print(f"Final model with {nc - 1} components.") break else: # n=1 cases tests against a noise model. err_norm = err.value if err is not None else 1. no_model_rss = np.nansum((yfit / err_norm)**2) no_model_bic = yfit.size * np.log(no_model_rss / yfit.size) no_fit_model = False if verbose: print(f"BIC1: {out.bic}; BIC0: {no_model_bic}") if no_model_bic - out.bic < min_delta_BIC: if verbose: print("No components preferred. Consistent with noise.") no_fit_model = True bics.append(no_model_bic) pfit = Parameters() pfit.add('amp1', value=0.) pfit.add('cent1', value=0.) # pfit.add('integral1', value=0.) pfit.add('sigma1', value=0.) fit_outputs.append(pfit) break # Smooth the residual to ensure the peak chosen for # the next component # is not a single large noise value # fit_residual = np.abs(convolve_fft(yfit - model, # Gaussian1DKernel(3))) fit_residual = yfit - model aics.append(out.aic) bics.append(out.bic) fit_outputs.append(out) # Exit if max residual is small # if fit_residual.max() < 3 * err.value: # if verbose: # print("Max residual below 3-sigma.") # break fit_residual = convolve_fft(fit_residual, Gaussian1DKernel(3)) # Update parameters for next fit # With too few components, we often get bright, extremely wide # components # To avoid their influence, we will update the component amp and # cent, only. for ncc in range(1, nc + 1): pfit[f'amp{ncc}'].value = out.params[f'amp{ncc}'].value pfit[f'cent{ncc}'].value = out.params[f'cent{ncc}'].value # if out.params[f'sigma{ncc}'].value > sigma_init: # pfit[f'integral{ncc}'].value = sigma_init * out.params[f'amp{ncc}'].value # else: # pfit[f'integral{ncc}'].value = out.params[f'integral{ncc}'].value # pfit[f'sigma{ncc}'].value = min(out.params[f'sigma{ncc}'].value, # sigma_init) # new_sigma = out.params[f'sigma{ncc}'].value # if new_sigma < sig_min: new_sigma = np.random.uniform(sigma_init - 2, sigma_init + 2) pfit[f'sigma{ncc}'].value = max(new_sigma, 2 * sig_min) # pfit = out.params.copy() if return_model: if no_fit_model: return fit_outputs[0], vels.value, np.zeros_like(vels.value) model = multigaussian(vels.value, fit_outputs[-1].params) return fit_outputs[-1], vels.value, model if no_fit_model: return fit_outputs[0] return fit_outputs[-1]
params.add('satco1', value=5e-06, max=1e-04, min=1e-07) # , vary=False) params.add('poros1', value=0.450, max=0.5, min=0.3) # , vary=False) params.add('bee26', value=7.12, max=15.0, min=2.0) params.add('phsat26', value=-0.2, max=-0.01, min=-0.5) params.add('satco26', value=5e-06, max=1e-04, min=1e-07) params.add('poros2', value=0.450, max=0.5, min=0.3) params.add('poros3', value=0.450, max=0.5, min=0.3) params.add('poros4', value=0.450, max=0.5, min=0.3) params.add('poros5', value=0.450, max=0.5, min=0.3) params.add('poros6', value=0.450, max=0.5, min=0.3) otimiza = Minimizer(residualSiB2, params, reduce_fcn=None, calc_covar=True, fcn_args=(www1_o, swc_o, posval, nlinha)) # out_leastsq = otimiza.leastsq() out_leastsq = otimiza.minimize(method='leastsq') # Levenberg-Marquardt # report_fit(out_leastsq.params) # report_fit(out_leastsq) print('###################################################') print('Modulo: Umidade do solo') print('---Parametros---') params.pretty_print() print('---Otimizacao---') report_fit(out_leastsq)
def fit_gaussian(spec, vels=None, vcent=None, err=None, amp_const=None, cent_const=None, sigma_const=None, verbose=True, plot_fit=True, use_emcee=False, emcee_kwargs={}): ''' ''' if vels is None: spec = spec.with_spectral_unit(u.km / u.s) vels = spec.spectral_axis # Set parameter limits: if amp_const is None: amp_min = 0. amp_max = 1.1 * np.nanmax(spec.value) else: amp_min = amp_const[0] amp_max = amp_const[1] if cent_const is None: cent_min = vels.value.min() - 0.1 * np.ptp(vels.value) cent_max = vels.value.max() + 0.1 * np.ptp(vels.value) else: cent_min = cent_const[0] cent_max = cent_const[1] if sigma_const is None: sig_min = np.abs(np.diff(vels.value)[0]) sig_max = 0.3 * np.ptp(vels.value) else: sig_min = sigma_const[0] sig_max = sigma_const[1] if vcent is None: vcent = np.mean(vels.value) else: vcent = vcent.to(u.km / u.s).value pfit = Parameters() pfit.add(name='amp', value=20., min=amp_min, max=amp_max) pfit.add(name='cent', value=vcent, min=cent_min, max=cent_max) pfit.add(name='sigma', value=10., min=sig_min, max=sig_max) # valid_data = np.isfinite(spec.filled_data[:]) # yfit = spec.filled_data[:].value[valid_data] # xfit = spec.spectral_axis.value[valid_data] yfit = spec.value xfit = vels.value mini = Minimizer(residual_single, pfit, fcn_args=(xfit, yfit, err if err is not None else 1.)) out = mini.leastsq() if use_emcee: mini = Minimizer(residual_single, out.params, fcn_args=(xfit, yfit, err if err is not None else 1.)) out = mini.emcee(**emcee_kwargs) if plot_fit: plt.plot(vels.value, spec.value, drawstyle='steps-mid') model = gaussian(vels.value, out.params["amp"], out.params["cent"], out.params["sigma"]) plt.plot(vels.value, model) plt.plot(vels.value, spec.value - model, '--', zorder=-10) plt.draw() return out
def main(raft_id, directory): sensor_names = [ 'S00', 'S01', 'S02', 'S10', 'S11', 'S12', 'S20', 'S21', 'S22' ] for sensor_name in sensor_names: sensor_id = '{0}_{1}'.format(raft_id, sensor_name) print("Starting sensor {0}".format(sensor_id)) try: #### ## ## Fit Local Electronic Offset Effect ## #### ## Config variables start = 3 stop = 13 max_signal = 150000. error = 7.0 / np.sqrt(2000.) ## Get existing overscan analysis results hdulist = fits.open( join(directory, raft_id, sensor_name, '{0}_overscan_results.fits'.format(sensor_id))) cti_results = {i: 0.0 for i in range(1, 17)} drift_scales = {i: 0.0 for i in range(1, 17)} decay_times = {i: 0.0 for i in range(1, 17)} ## CCD geometry info ncols = ITL_AMP_GEOM.nx + ITL_AMP_GEOM.prescan_width for amp in range(1, 17): ## Signals all_signals = hdulist[amp].data['FLATFIELD_SIGNAL'] signals = all_signals[all_signals < max_signal] ## Data data = hdulist[amp].data['COLUMN_MEAN'][ all_signals < max_signal, start:stop + 1] params = Parameters() params.add('ctiexp', value=-6, min=-7, max=-5, vary=False) params.add('trapsize', value=0.0, min=0., max=10., vary=False) params.add('scaling', value=0.08, min=0, max=1.0, vary=False) params.add('emissiontime', value=0.4, min=0.1, max=1.0, vary=False) params.add('driftscale', value=0.00022, min=0., max=0.001) params.add('decaytime', value=2.4, min=0.1, max=4.0) model = SimpleModel() minner = Minimizer(model.difference, params, fcn_args=(signals, data, error, ncols), fcn_kws={ 'start': start, 'stop': stop }) result = minner.minimize() if result.success: cti = 10**result.params['ctiexp'] drift_scale = result.params['driftscale'] decay_time = result.params['decaytime'] cti_results[amp] = cti drift_scales[amp] = drift_scale.value decay_times[amp] = decay_time.value else: print("Electronics fitting failure: Amp{0}".format(amp)) cti = 10**result.params['ctiexp'] cti_results[amp] = cti drift_scales[amp] = 0.0 decay_times[amp] = 2.4 param_results = OverscanParameterResults(sensor_id, cti_results, drift_scales, decay_times) #### ## ## Fit Global CTI ## #### start = 1 stop = 2 max_signal = 10000. error = 7.0 / np.sqrt(2000.) num_transfers = ITL_AMP_GEOM.nx + ITL_AMP_GEOM.prescan_width cti_results = {amp: 0.0 for amp in range(1, 17)} drift_scales = param_results.drift_scales decay_times = param_results.decay_times ncols = ITL_AMP_GEOM.nx + ITL_AMP_GEOM.prescan_width for amp in range(1, 17): ## Signals all_signals = hdulist[amp].data['FLATFIELD_SIGNAL'] signals = all_signals[all_signals < max_signal] ## Data data = hdulist[amp].data['COLUMN_MEAN'][ all_signals < max_signal, start:stop + 1] ## CTI test lastpixel = signals overscan1 = data[:, 0] overscan2 = data[:, 1] test = (overscan1 + overscan2) / (ncols * lastpixel) if np.median(test) > 5.E-6: params = Parameters() params.add('ctiexp', value=-6, min=-7, max=-5, vary=True) params.add('trapsize', value=5.0, min=0., max=30., vary=True) params.add('scaling', value=0.08, min=0, max=1.0, vary=True) params.add('emissiontime', value=0.35, min=0.1, max=1.0, vary=True) params.add('driftscale', value=drift_scales[amp], min=0., max=0.001, vary=False) params.add('decaytime', value=decay_times[amp], min=0.1, max=4.0, vary=False) model = SimulatedModel() minner = Minimizer(model.difference, params, fcn_args=(signals, data, error, num_transfers, ITL_AMP_GEOM), fcn_kws={ 'start': start, 'stop': stop, 'trap_type': 'linear' }) result = minner.minimize() else: params = Parameters() params.add('ctiexp', value=-6, min=-7, max=-5, vary=True) params.add('trapsize', value=0.0, min=0., max=10., vary=False) params.add('scaling', value=0.08, min=0, max=1.0, vary=False) params.add('emissiontime', value=0.35, min=0.1, max=1.0, vary=False) params.add('driftscale', value=drift_scales[amp], min=0., max=0.001, vary=False) params.add('decaytime', value=decay_times[amp], min=0.1, max=4.0, vary=False) model = SimulatedModel() minner = Minimizer(model.difference, params, fcn_args=(signals, data, error, num_transfers, ITL_AMP_GEOM), fcn_kws={ 'start': start, 'stop': stop, 'trap_type': 'linear' }) result = minner.minimize() cti_results[amp] = 10**result.params['ctiexp'].value param_results.cti_results = cti_results outfile = join(directory, raft_id, sensor_name, '{0}_parameter_results.fits'.format(sensor_id)) param_results.write_fits(outfile, overwrite=True) #### ## ## Determine Localized Trapping ## #### start = 1 stop = 20 max_signal = 150000. for amp in range(1, 17): ## Signals all_signals = hdulist[amp].data['FLATFIELD_SIGNAL'] signals = all_signals[all_signals < max_signal] ## Data data = hdulist[amp].data['COLUMN_MEAN'][ all_signals < max_signal, start:stop + 1] ## Second model: model with electronics params = Parameters() params.add('ctiexp', value=np.log10(param_results.cti_results[amp]), min=-7, max=-4, vary=False) params.add('trapsize', value=0.0, min=0., max=10., vary=False) params.add('scaling', value=0.08, min=0, max=1.0, vary=False) params.add('emissiontime', value=0.35, min=0.1, max=1.0, vary=False) params.add('driftscale', value=param_results.drift_scales[amp], min=0., max=0.001, vary=False) params.add('decaytime', value=param_results.decay_times[amp], min=0.1, max=4.0, vary=False) model = SimpleModel.model_results(params, signals, num_transfers, start=start, stop=stop) res = np.sum((data - model)[:, :3], axis=1) new_signals = hdulist[amp].data['COLUMN_MEAN'][ all_signals < max_signal, 0] rescale = param_results.drift_scales[amp] * new_signals new_signals = np.asarray(new_signals - rescale, dtype=np.float64) x = new_signals y = np.maximum(0, res) # Pad left with ramp y = np.pad(y, (10, 0), 'linear_ramp', end_values=(0, 0)) x = np.pad(x, (10, 0), 'linear_ramp', end_values=(0, 0)) # Pad right with constant y = np.pad(y, (1, 1), 'constant', constant_values=(0, y[-1])) x = np.pad(x, (1, 1), 'constant', constant_values=(-1, 200000.)) f = interp.interp1d(x, y) spltrap = SplineTrap(f, 0.4, 1) pickle.dump( spltrap, open( join(directory, raft_id, sensor_name, '{0}_amp{1}_trap.pkl'.format(sensor_id, amp)), 'wb')) hdulist.close() except Exception as e: print("Error occurred for {0}!".format(sensor_id)) print(e) continue
def pre_edge(energy, mu=None, group=None, e0=None, step=None, nnorm=3, nvict=0, pre1=None, pre2=-50, norm1=100, norm2=None, make_flat=True, _larch=None): """pre edge subtraction, normalization for XAFS This performs a number of steps: 1. determine E0 (if not supplied) from max of deriv(mu) 2. fit a line of polymonial to the region below the edge 3. fit a polymonial to the region above the edge 4. extrapolae the two curves to E0 to determine the edge jump Arguments ---------- energy: array of x-ray energies, in eV, or group (see note) mu: array of mu(E) group: output group e0: edge energy, in eV. If None, it will be determined here. step: edge jump. If None, it will be determined here. pre1: low E range (relative to E0) for pre-edge fit pre2: high E range (relative to E0) for pre-edge fit nvict: energy exponent to use for pre-edg fit. See Note norm1: low E range (relative to E0) for post-edge fit norm2: high E range (relative to E0) for post-edge fit nnorm: degree of polynomial (ie, nnorm+1 coefficients will be found) for post-edge normalization curve. Default=3 (quadratic), max=5 make_flat: boolean (Default True) to calculate flattened output. Returns ------- None The following attributes will be written to the output group: e0 energy origin edge_step edge step norm normalized mu(E) flat flattened, normalized mu(E) pre_edge determined pre-edge curve post_edge determined post-edge, normalization curve dmude derivative of mu(E) (if the output group is None, _sys.xafsGroup will be written to) Notes ----- 1 nvict gives an exponent to the energy term for the fits to the pre-edge and the post-edge region. For the pre-edge, a line (m * energy + b) is fit to mu(energy)*energy**nvict over the pre-edge region, energy=[e0+pre1, e0+pre2]. For the post-edge, a polynomial of order nnorm will be fit to mu(energy)*energy**nvict of the post-edge region energy=[e0+norm1, e0+norm2]. 2 If the first argument is a Group, it must contain 'energy' and 'mu'. If it exists, group.e0 will be used as e0. See First Argrument Group in Documentation """ energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='pre_edge') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() pre_dat = preedge(energy, mu, e0=e0, step=step, nnorm=nnorm, nvict=nvict, pre1=pre1, pre2=pre2, norm1=norm1, norm2=norm2) group = set_xafsGroup(group, _larch=_larch) e0 = pre_dat['e0'] norm = pre_dat['norm'] norm1 = pre_dat['norm1'] norm2 = pre_dat['norm2'] # generate flattened spectra, by fitting a quadratic to .norm # and removing that. flat = norm ie0 = index_nearest(energy, e0) p1 = index_of(energy, norm1 + e0) p2 = index_nearest(energy, norm2 + e0) if p2 - p1 < 2: p2 = min(len(energy), p1 + 2) if make_flat and p2 - p1 > 4: enx, mux = remove_nans2(energy[p1:p2], norm[p1:p2]) # enx, mux = (energy[p1:p2], norm[p1:p2]) fpars = Parameters() fpars.add('c0', value=0, vary=True) fpars.add('c1', value=0, vary=True) fpars.add('c2', value=0, vary=True) fit = Minimizer(flat_resid, fpars, fcn_args=(enx, mux)) result = fit.leastsq(xtol=1.e-6, ftol=1.e-6) fc0 = result.params['c0'].value fc1 = result.params['c1'].value fc2 = result.params['c2'].value flat_diff = fc0 + energy * (fc1 + energy * fc2) flat = norm - flat_diff + flat_diff[ie0] flat[:ie0] = norm[:ie0] group.e0 = e0 group.norm = norm group.flat = flat group.dmude = np.gradient(mu) / np.gradient(energy) group.edge_step = pre_dat['edge_step'] group.pre_edge = pre_dat['pre_edge'] group.post_edge = pre_dat['post_edge'] group.pre_edge_details = Group() group.pre_edge_details.pre1 = pre_dat['pre1'] group.pre_edge_details.pre2 = pre_dat['pre2'] group.pre_edge_details.nnorm = pre_dat['nnorm'] group.pre_edge_details.norm1 = pre_dat['norm1'] group.pre_edge_details.norm2 = pre_dat['norm2'] group.pre_edge_details.pre_slope = pre_dat['precoefs'][0] group.pre_edge_details.pre_offset = pre_dat['precoefs'][1] for i in range(MAX_NNORM): if hasattr(group, 'norm_c%i' % i): delattr(group, 'norm_c%i' % i) for i, c in enumerate(pre_dat['norm_coefs']): setattr(group.pre_edge_details, 'norm_c%i' % i, c) return
def test_constraints(with_plot=True): with_plot = with_plot and WITHPLOT def residual(pars, x, sigma=None, data=None): yg = gaussian(x, pars['amp_g'].value, pars['cen_g'].value, pars['wid_g'].value) yl = lorentzian(x, pars['amp_l'].value, pars['cen_l'].value, pars['wid_l'].value) slope = pars['line_slope'].value offset = pars['line_off'].value model = yg + yl + offset + x * slope if data is None: return model if sigma is None: return (model - data) return (model - data) / sigma n = 201 xmin = 0. xmax = 20.0 x = linspace(xmin, xmax, n) data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) + random.normal(scale=0.23, size=n) + x*0.5) if with_plot: pylab.plot(x, data, 'r+') pfit = Parameters() pfit.add(name='amp_g', value=10) pfit.add(name='cen_g', value=9) pfit.add(name='wid_g', value=1) pfit.add(name='amp_tot', value=20) pfit.add(name='amp_l', expr='amp_tot - amp_g') pfit.add(name='cen_l', expr='1.5+cen_g') pfit.add(name='wid_l', expr='2*wid_g') pfit.add(name='line_slope', value=0.0) pfit.add(name='line_off', value=0.0) sigma = 0.021 # estimate of data error (for all data points) myfit = Minimizer(residual, pfit, fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data}, scale_covar=True) myfit.prepare_fit() init = residual(myfit.params, x) result = myfit.leastsq() print(' Nfev = ', result.nfev) print( result.chisqr, result.redchi, result.nfree) report_fit(result.params, min_correl=0.3) fit = residual(result.params, x) if with_plot: pylab.plot(x, fit, 'b-') assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value) assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value) assert(result.params['wid_l'].value == 2 * result.params['wid_g'].value) # now, change fit slightly and re-run myfit.params['wid_l'].expr = '1.25*wid_g' result = myfit.leastsq() report_fit(result.params, min_correl=0.4) fit2 = residual(result.params, x) if with_plot: pylab.plot(x, fit2, 'k') pylab.show() assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value) assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value) assert(result.params['wid_l'].value == 1.25 * result.params['wid_g'].value)
m2 = parvals['poly'] b = parvals['intercept'] a = parvals['amp'] newe = m1 * channel + m2 * channel * channel + b newe[0] = 0.0001 model = np.interp(energy, newe, counts) return model * a - simcount params = Parameters() params.add('slope', value=0) params.add('intercept', value=0) params.add('poly', value=0) params.add('amp', value=100, min=0) minner = Minimizer(residual, params, fcn_args=(channel, simcount, counts), iter_cb=1000, nan_policy='propagate') result = minner.minimize() final = simcount + result.residual """ report_fit(result) plt.semilogy(energy, simcount, 'r') plt.semilogy(channel, counts, 'b') plt.show() """ print(len(simcount))
def refit_multigaussian(spec, init_params, vels=None, vcent=None, err=None, amp_const=None, cent_const=None, sigma_const=None, component_sigma=5., nchan_component_sigma=3., discrete_fitter=False): ''' Given full set of initial parameters, refit the spectrum. ''' # if len(init_params) < 3: # raise ValueError("Less than 3 initial parameters given.") if vels is None: spec = spec.with_spectral_unit(u.m / u.s) vels = spec.spectral_axis chan_width = np.abs(np.diff(vels)[:1]).value if err is None: # Can't remove components if we don't know what the error is def comp_sig(amp, sigma): return True else: def comp_sig(amp, sigma): return (amp * sigma) / \ (err * chan_width * nchan_component_sigma) # Set parameter limits: if amp_const is None: amp_min = 0. amp_max = 1.1 * np.nanmax(spec.value) else: amp_min = amp_const[0] amp_max = amp_const[1] if cent_const is None: cent_min = vels.value.min() - 0.1 * np.ptp(vels.value) cent_max = vels.value.max() + 0.1 * np.ptp(vels.value) else: cent_min = cent_const[0] cent_max = cent_const[1] if sigma_const is None: sig_min = np.abs(np.diff(vels.value)[0]) sig_max = 0.5 * np.ptp(vels.value) / 2.35 else: sig_min = sigma_const[0] sig_max = sigma_const[1] # Create the fit parameter pars = Parameters() for i in range(len(init_params) // 3): pars.add(name=f'amp{i + 1}', value=init_params[3 * i], min=amp_min, max=amp_max) pars.add(name=f'cent{i + 1}', value=init_params[3 * i + 1], min=cent_min, max=cent_max) pars.add(name=f'sigma{i + 1}', value=init_params[3 * i + 2], min=sig_min, max=sig_max) valid_data = np.isfinite(spec.filled_data[:]) yfit = spec.filled_data[:].value[valid_data] xfit = vels.value[valid_data] if discrete_fitter: vels = xfit.copy() # Upsample for computing over discrete bins chan_width = np.abs(np.diff(vels.value)[0]) order_sign = 1. if vels[-1] > vels[0] else -1. # You really need to rewrite this to be faster. discrete_oversamp = 4 assert discrete_oversamp > 1. xfit_upsamp = np.linspace(vels.value[0] - order_sign * 0.5 * chan_width, vels.value[-1] + order_sign * 0.5 * chan_width, vels.size * discrete_oversamp) else: xfit_upsamp = None comp_deletes = [] while True: mini = Minimizer(residual_multigauss, pars, fcn_args=(xfit, xfit_upsamp, yfit, err if err is not None else 1., discrete_fitter), max_nfev=vels.size * 1000) out = mini.leastsq() # Testing null model. Nothing to check if len(pars) == 0: break params_fit = [value.value for value in out.params.values()] params_fit = np.array(params_fit) # Make sure all components are significant component_signif = comp_sig(params_fit[::3], params_fit[2::3]) if np.all(component_signif >= component_sigma): break comp_del = np.argmin(component_signif) comp_deletes.append(comp_del) remain_comps = np.arange(len(init_params) // 3) for dcomp in comp_deletes: remain_comps = np.delete(remain_comps, dcomp) pars = Parameters() for i, comp in enumerate(remain_comps): pars.add(name=f'amp{i + 1}', value=init_params[3 * i], min=amp_min, max=amp_max) pars.add(name=f'cent{i + 1}', value=init_params[3 * i + 1], min=cent_min, max=cent_max) pars.add(name=f'sigma{i + 1}', value=init_params[3 * i + 2], min=sig_min, max=sig_max) return out
shift = params['shift'] omega = params['omega'] decay = params['decay'] model = amp * np.sin(x * omega + shift) * np.exp(-x * x * decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=10, min=0) params.add('decay', value=0.1) params.add('shift', value=0.0, min=-np.pi / 2., max=np.pi / 2) params.add('omega', value=3.0) # do fit, here with leastsq model minner = Minimizer(fcn2min, params, fcn_args=(x, data)) result = minner.minimize() # calculate final result final = data + result.residual # write error report report_fit(result) # try to plot results try: import matplotlib.pyplot as plt plt.plot(x, data, 'k+') plt.plot(x, final, 'r') plt.show() except ImportError:
data = residual(fit_params, x) + noise if HASPYLAB: pylab.plot(x, data, 'r+') fit_params = Parameters() fit_params.add_many(('a1', 8.0, True, None, 14., None), ('c1', 5.0, True, None, None, None), ('w1', 0.7, True, None, None, None), ('a2', 3.1, True, None, None, None), ('c2', 8.8, True, None, None, None)) fit_params.add('w2', expr='2.5*w1') myfit = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data':data}) myfit.prepare_fit() init = residual(fit_params, x) if HASPYLAB: pylab.plot(x, init, 'b--') myfit.leastsq() print ' N fev = ', myfit.nfev print myfit.chisqr, myfit.redchi, myfit.nfree report_errors(fit_params)
def fret_fit(self, ax, vary): dis_peak_n = len(self.x_ini) params = Parameters() if dis_peak_n == 1: params.add('h', value=self.y_ini[0]) # height params.add('c', value=self.x_ini[0]) # center params.add('w', value=.05) # width #params.add('o', value = 0) # offset minner = Minimizer(one_gau, params, fcn_args=(self.xhist, self.yhist)) if dis_peak_n == 2: params.add('h1', value=self.y_ini[0]) params.add('h2', value=self.y_ini[1]) params.add('c1', value=self.x_ini[0]) params.add('c2', value=self.x_ini[1], vary=vary) params.add('w1', value=.05) params.add('w2', value=.05) params.add('o', value=0) minner = Minimizer(two_gau, params, fcn_args=(self.xhist, self.yhist)) if dis_peak_n == 3: params.add('h1', value=self.y_ini[0]) params.add('h2', value=self.y_ini[1]) params.add('h3', value=self.y_ini[2]) params.add('c1', value=self.x_ini[0]) params.add('c2', value=self.x_ini[1], vary=vary) params.add('c3', value=self.x_ini[2]) params.add('w1', value=.05) params.add('w2', value=.05) params.add('w3', value=.05) params.add('o', value=0) minner = Minimizer(three_gau, params, fcn_args=(self.xhist, self.yhist)) self.fret_fit_result = minner.minimize() self.yhist_fit = self.yhist + self.fret_fit_result.residual ax.plot(self.xhist, self.yhist_fit, 'black') ax.set_ylabel('Frequency') if dis_peak_n == 1: params1 = Parameters() params1.add('h', value=self.fret_fit_result.params['h'].value) params1.add('c', value=self.fret_fit_result.params['c'].value) params1.add('w', value=self.fret_fit_result.params['w'].value) #params1.add('o', value = self.fret_fit_result.params['o'].value) params1.add('h_std', value=self.fret_fit_result.params['h'].stderr) params1.add('c_std', value=self.fret_fit_result.params['c'].stderr) params1.add('w_std', value=self.fret_fit_result.params['w'].stderr) #params1.add('o_std', value = self.fret_fit_result.params['o'].stderr) self.fit_notes = '\npeak 1:\n' \ +'center: '+str(round(params1['c'].value, 2))+'+/-'+str(round(params1['c_std'].value, 2))+' ('+str(round(params1['c_std'].value/params1['c'].value, 2)*100)+'%)\n' \ +'height: '+str(round(params1['h'].value, 2))+'+/-'+str(round(params1['h_std'].value, 2))+' ('+str(round(params1['h_std'].value/params1['h'].value, 2)*100)+'%)\n' \ +'sigma: '+str(round(params1['w'].value, 2))+'+/-'+str(round(params1['w_std'].value, 2))+' ('+str(round(params1['w_std'].value/params1['w'].value, 2)*100)+'%)\n' \ +'area: '+str(round(params1['h'].value*params1['w'].value/0.3989, 2))+'\n\n' \ +'reduced chisqr: '+str(self.fret_fit_result.redchi) #+'offset: '+str(round(params1['o'].value, 2))+'+/-'+str(round(params1['o_std'].value, 2))+' ('+str(round(params1['o_std'].value/params1['o'].value, 2)*100)+'%)\n' \ if dis_peak_n == 2: params1 = Parameters() params2 = Parameters() params1.add('h', value=self.fret_fit_result.params['h1'].value) params1.add('c', value=self.fret_fit_result.params['c1'].value) params1.add('w', value=self.fret_fit_result.params['w1'].value) params1.add('h_std', value=self.fret_fit_result.params['h1'].stderr) params1.add('c_std', value=self.fret_fit_result.params['c1'].stderr) params1.add('w_std', value=self.fret_fit_result.params['w1'].stderr) params2.add('h', value=self.fret_fit_result.params['h2'].value) params2.add('c', value=self.fret_fit_result.params['c2'].value) params2.add('w', value=self.fret_fit_result.params['w2'].value) params2.add('h_std', value=self.fret_fit_result.params['h2'].stderr) params2.add('c_std', value=self.fret_fit_result.params['c2'].stderr) params2.add('w_std', value=self.fret_fit_result.params['w2'].stderr) params1.add('o', value=self.fret_fit_result.params['o'].value) params2.add('o', value=self.fret_fit_result.params['o'].value) params1.add('o_std', value=self.fret_fit_result.params['o'].stderr) params2.add('o_std', value=self.fret_fit_result.params['o'].stderr) self.fit_notes = '\npeak 1:\n' \ +'center: '+str(round(params1['c'].value, 2))+'+/-'+str(round(params1['c_std'].value, 2))+' ('+str(round(params1['c_std'].value/params1['c'].value, 2)*100)+'%)\n' \ +'height: '+str(round(params1['h'].value, 2))+'+/-'+str(round(params1['h_std'].value, 2))+' ('+str(round(params1['h_std'].value/params1['h'].value, 2)*100)+'%)\n' \ +'sigma: '+str(round(params1['w'].value, 2))+'+/-'+str(round(params1['w_std'].value, 2))+' ('+str(round(params1['w_std'].value/params1['w'].value, 2)*100)+'%)\n' \ +'area: '+str(round(params1['h'].value*params1['w'].value/0.3989, 2)) \ +'\n\npeak 2:\n' \ +'center: '+str(round(params2['c'].value, 2))+'+/-'+str(round(params2['c_std'].value, 2))+' ('+str(round(params2['c_std'].value/params2['c'].value, 2)*100)+'%)\n' \ +'height: '+str(round(params2['h'].value, 2))+'+/-'+str(round(params2['h_std'].value, 2))+' ('+str(round(params2['h_std'].value/params2['h'].value, 2)*100)+'%)\n' \ +'sigma: '+str(round(params2['w'].value, 2))+'+/-'+str(round(params2['w_std'].value, 2))+' ('+str(round(params2['w_std'].value/params2['w'].value, 2)*100)+'%)\n' \ +'area: '+str(round(params2['h'].value*params2['w'].value/0.3989, 2))+'\n\n' \ +'reduced chisqr: '+str(self.fret_fit_result.redchi) +'\n\n' \ +'offset: '+str(round(params1['o'].value, 2))+'+/-'+str(round(params1['o_std'].value, 2))+' ('+str(round(params1['o_std'].value/params1['o'].value, 2)*100)+'%)\n' \ mask1 = (self.xhist > params1['c'] - params1['w'] * 3) & ( self.xhist < params1['c'] + params1['w'] * 3) mask2 = (self.xhist > params2['c'] - params2['w'] * 3) & ( self.xhist < params2['c'] + params2['w'] * 3) ax.plot(self.xhist[mask1], one_gau(params1, self.xhist[mask1], self.yhist[mask1]) + self.yhist[mask1], color='blue') ax.plot(self.xhist[mask2], one_gau(params2, self.xhist[mask2], self.yhist[mask2]) + self.yhist[mask2], color='blue') if dis_peak_n == 3: params1 = Parameters() params2 = Parameters() params3 = Parameters() params1.add('h', value=self.fret_fit_result.params['h1'].value) params1.add('c', value=self.fret_fit_result.params['c1'].value) params1.add('w', value=self.fret_fit_result.params['w1'].value) params1.add('h_std', value=self.fret_fit_result.params['h1'].stderr) params1.add('c_std', value=self.fret_fit_result.params['c1'].stderr) params1.add('w_std', value=self.fret_fit_result.params['w1'].stderr) params2.add('h', value=self.fret_fit_result.params['h2'].value) params2.add('c', value=self.fret_fit_result.params['c2'].value) params2.add('w', value=self.fret_fit_result.params['w2'].value) params2.add('h_std', value=self.fret_fit_result.params['h2'].stderr) params2.add('c_std', value=self.fret_fit_result.params['c2'].stderr) params2.add('w_std', value=self.fret_fit_result.params['w2'].stderr) params3.add('h', value=self.fret_fit_result.params['h3'].value) params3.add('c', value=self.fret_fit_result.params['c3'].value) params3.add('w', value=self.fret_fit_result.params['w3'].value) params3.add('h_std', value=self.fret_fit_result.params['h3'].stderr) params3.add('c_std', value=self.fret_fit_result.params['c3'].stderr) params3.add('w_std', value=self.fret_fit_result.params['w3'].stderr) params1.add('o', value=self.fret_fit_result.params['o'].value) params2.add('o', value=self.fret_fit_result.params['o'].value) params3.add('o', value=self.fret_fit_result.params['o'].value) params1.add('o_std', value=self.fret_fit_result.params['o'].stderr) params2.add('o_std', value=self.fret_fit_result.params['o'].stderr) params3.add('o_std', value=self.fret_fit_result.params['o'].stderr) self.fit_notes = '\npeak 1:\n' \ +'center: '+str(round(params1['c'].value, 2))+'+/-'+str(round(params1['c_std'].value, 2))+' ('+str(round(params1['c_std'].value/params1['c'].value, 2)*100)+'%)\n' \ +'height: '+str(round(params1['h'].value, 2))+'+/-'+str(round(params1['h_std'].value, 2))+' ('+str(round(params1['h_std'].value/params1['h'].value, 2)*100)+'%)\n' \ +'sigma: '+str(round(params1['w'].value, 2))+'+/-'+str(round(params1['w_std'].value, 2))+' ('+str(round(params1['w_std'].value/params1['w'].value, 2)*100)+'%)\n' \ +'area: '+str(round(params1['h'].value*params1['w'].value/0.3989, 2)) \ +'\n\npeak 2:\n' \ +'center: '+str(round(params2['c'].value, 2))+'+/-'+str(round(params2['c_std'].value, 2))+' ('+str(round(params2['c_std'].value/params2['c'].value, 2)*100)+'%)\n' \ +'height: '+str(round(params2['h'].value, 2))+'+/-'+str(round(params2['h_std'].value, 2))+' ('+str(round(params2['h_std'].value/params2['h'].value, 2)*100)+'%)\n' \ +'sigma: '+str(round(params2['w'].value, 2))+'+/-'+str(round(params2['w_std'].value, 2))+' ('+str(round(params2['w_std'].value/params2['w'].value, 2)*100)+'%)\n' \ +'area: '+str(round(params2['h'].value*params2['w'].value/0.3989, 2)) \ +'\n\npeak 3:\n' \ +'center: '+str(round(params3['c'].value, 2))+'+/-'+str(round(params3['c_std'].value, 2))+' ('+str(round(params3['c_std'].value/params3['c'].value, 2)*100)+'%)\n' \ +'height: '+str(round(params3['h'].value, 2))+'+/-'+str(round(params3['h_std'].value, 2))+' ('+str(round(params3['h_std'].value/params3['h'].value, 2)*100)+'%)\n' \ +'sigma: '+str(round(params3['w'].value, 2))+'+/-'+str(round(params3['w_std'].value, 2))+' ('+str(round(params3['w_std'].value/params3['w'].value, 2)*100)+'%)\n' \ +'area: '+str(round(params3['h'].value*params3['w'].value/0.3989, 2))+'\n\n' \ +'reduced chisqr: '+str(self.fret_fit_result.redchi)+'\n\n' \ +'offset: '+str(round(params1['o'].value, 2))+'+/-'+str(round(params1['o_std'].value, 2))+' ('+str(round(params1['o_std'].value/params1['o'].value, 2)*100)+'%)\n' \ mask1 = (self.xhist > params1['c'] - params1['w'] * 3) & ( self.xhist < params1['c'] + params1['w'] * 3) mask2 = (self.xhist > params2['c'] - params2['w'] * 3) & ( self.xhist < params2['c'] + params2['w'] * 3) mask3 = (self.xhist > params3['c'] - params3['w'] * 3) & ( self.xhist < params3['c'] + params3['w'] * 3) ax.plot(self.xhist[mask1], one_gau(params1, self.xhist[mask1], self.yhist[mask1]) + self.yhist[mask1], color='blue') ax.plot(self.xhist[mask2], one_gau(params2, self.xhist[mask2], self.yhist[mask2]) + self.yhist[mask2], color='blue') ax.plot(self.xhist[mask3], one_gau(params3, self.xhist[mask3], self.yhist[mask3]) + self.yhist[mask3], color='blue')
def optimize(function: Callable, cte: settings.Settings, average: bool = False, material_text: str = '', N_samples: int = None, full_path: str = None) -> OptimSolution: ''' Minimize the error between experimental data and simulation for the settings in cte average = True -> optimize average rate equations instead of microscopic ones. function returns the error vector and accepts: parameters, sim, and average. ''' logger = logging.getLogger(__name__) optim_progress = [] # type: List[str] def callback_fun(params: Parameters, iter_num: int, resid: np.array, sim: simulations.Simulations, average: bool = False, N_samples: int = None) -> None: ''' This function is called after every minimization step It prints the current parameters and error from the cache ''' optim_progbar.update(1) if not cte['no_console']: val_list = ', '.join('{:.3e}'.format(par.value) for par in params.values()) error = np.sqrt((resid * resid).sum()) msg = '{}, \t\t{}, \t{:.4e},\t[{}]'.format( iter_num, datetime.datetime.now().strftime('%H:%M:%S'), error, val_list) tqdm.tqdm.write(msg) logger.info(msg) optim_progress.append(msg) start_time = datetime.datetime.now() logger.info('Decay curves optimization of ' + material_text) cte['no_plot'] = True sim = simulations.Simulations(cte, full_path=full_path) method, parameters, options_dict = setup_optim(cte) optim_progbar = tqdm.tqdm(desc='Optimizing', unit='points', disable=cte['no_console']) param_names = ', '.join(name for name in parameters.keys()) header = 'Iter num\tTime\t\tRMSD\t\tParameters ({})'.format(param_names) optim_progress.append(header) tqdm.tqdm.write(header) minimizer = Minimizer(function, parameters, fcn_args=(sim, average, N_samples), iter_cb=callback_fun) # minimize logging only warnings or worse to console. with disable_loggers([ 'simetuc.simulations', 'simetuc.precalculate', 'simetuc.lattice', 'simetuc.simulations.conc_dep' ]): with disable_console_handler(__name__): result = minimizer.minimize(method=method, **options_dict) optim_progbar.update(1) optim_progbar.close() # fit results report_fit(result.params) logger.info(fit_report(result)) best_x = np.array([par.value for par in result.params.values()]) if 'brute' in method: min_f = np.sqrt(result.candidates[0].score) else: min_f = np.sqrt((result.residual**2).sum()) total_time = datetime.datetime.now() - start_time hours, remainder = divmod(total_time.total_seconds(), 3600) minutes, seconds = divmod(remainder, 60) tqdm.tqdm.write('') formatted_time = '{:.0f}h {:02.0f}m {:02.0f}s'.format( hours, minutes, seconds) logger.info('Minimum reached! Total time: %s.', formatted_time) logger.info('Optimized RMS error: %.3e.', min_f) logger.info('Parameters name and value:') for name, best_val in zip(parameters.keys(), best_x.T): logger.info('%s: %.3e.', name, best_val) optim_solution = OptimSolution(result, cte, optim_progress, total_time.total_seconds()) return optim_solution
def fit_temp_sta(temporal_sta, time, fit_time, tau1=None, tau2=None, amp1=None, amp2=None, min_time=None, max_time=None, min_amp=-1, max_amp=1, max_n=20): """Fit the temporal integration of the sta. Use the difference of two cascades of low-pass filters to fit the raw temporal integration of STA. It uses the time before of the spike to compute the fitting. Parameters ---------- temporal_sta: ndarray array with the raw temporal integration of the sta. time: ndarray array with the time of the raw temporal integration. fit_time: ndarray array with the time of fitting curve. tau1: float default:None estimated time for positive peak of temporal integration tau2: float default:None estimated time for negative peak of temporal integration amp1: float default:None estimated amplitude for positive peak of temporal integration amp2: float default:None estimated amplitude for negative peak of temporal integration min_time: float default:None minimum time to fit tau1 or tau2 max_time: float default:None maximum time to fit tau1 or tau2 min_amp: float default:-1 minimum amplitude to fit amp1 or amp2 max_amp: float default:1 minimum amplitude to fit amp1 or amp2 max_n=: float default:20 maximum order of a model to fit Returns ------- fit_parameters: lmfit.Params.params parameters of the fitting for two_cascades model fit_temp: ndarray array with the values of the fitting using fit_time """ tau1 = tau1 if tau1 else time[temporal_sta.argmax()] tau2 = tau2 if tau2 else time[temporal_sta.argmin()] amp1 = amp1 if amp1 else np.abs(temporal_sta.max()) amp2 = amp2 if amp2 else np.abs(temporal_sta.min()) min_time = min_time if min_time else time[1] max_time = max_time if max_time else time[-2] params_fit = Parameters() params_fit.add('amp1', value=amp1, min=min_amp, max=max_amp) params_fit.add('amp2', value=amp2, min=min_amp, max=max_amp) params_fit.add('tau1', value=tau1, min=min_time, max=max_time) params_fit.add('tau2', value=tau2, min=min_time, max=max_time) params_fit.add('n', value=3, max=max_n) minner = Minimizer(two_cascades_min, params_fit, fcn_args=(time, temporal_sta)) try: result = minner.minimize(method='Nelder') fit_parameters = result.params fit_temp = two_cascades(fit_parameters, fit_time) except ValueError: try: result = minner.minimize() fit_parameters = result.params fit_temp = two_cascades(fit_parameters, fit_time) except ValueError: for key in params_fit: params_fit[key].set(value=0) fit_parameters = params_fit fit_temp = np.full_like(fit_time, np.nan) return fit_parameters, fit_temp
np.random.normal(scale=0.1, size=x.size)) ############################################################################### # Create the fitting parameters and set an inequality constraint for ``cen_l``. # First, we add a new fitting parameter ``peak_split``, which can take values # between 0 and 5. Afterwards, we constrain the value for ``cen_l`` using the # expression to be ``'peak_split+cen_g'``: pfit = Parameters() pfit.add(name='amp_g', value=10) pfit.add(name='amp_l', value=10) pfit.add(name='cen_g', value=5) pfit.add(name='peak_split', value=2.5, min=0, max=5, vary=True) pfit.add(name='cen_l', expr='peak_split+cen_g') pfit.add(name='wid_g', value=1) pfit.add(name='wid_l', expr='wid_g') mini = Minimizer(residual, pfit, fcn_args=(x, data)) out = mini.leastsq() best_fit = data + out.residual ############################################################################### # Performing a fit, here using the ``leastsq`` algorithm, gives the following # fitting results: report_fit(out.params) ############################################################################### # and figure: plt.plot(x, data, 'o') plt.plot(x, best_fit, '--', label='best fit') plt.legend()
def test_derive(): def func(pars, x, data=None): a = pars['a'].value b = pars['b'].value c = pars['c'].value model = a * np.exp(-b * x) + c if data is None: return model return (model - data) def dfunc(pars, x, data=None): a = pars['a'].value b = pars['b'].value c = pars['c'].value v = np.exp(-b * x) return [v, -a * x * v, np.ones(len(x))] def f(var, x): return var[0] * np.exp(-var[1] * x) + var[2] params1 = Parameters() params1.add('a', value=10) params1.add('b', value=10) params1.add('c', value=10) params2 = Parameters() params2.add('a', value=10) params2.add('b', value=10) params2.add('c', value=10) a, b, c = 2.5, 1.3, 0.8 x = np.linspace(0, 4, 50) y = f([a, b, c], x) data = y + 0.15 * np.random.normal(size=len(x)) # fit without analytic derivative min1 = Minimizer(func, params1, fcn_args=(x, ), fcn_kws={'data': data}) min1.leastsq() fit1 = func(params1, x) # fit with analytic derivative min2 = Minimizer(func, params2, fcn_args=(x, ), fcn_kws={'data': data}) min2.leastsq(Dfun=dfunc, col_deriv=1) fit2 = func(params2, x) print('''Comparison of fit to exponential decay with and without analytic derivatives, to model = a*exp(-b*x) + c for a = %.2f, b = %.2f, c = %.2f ============================================== Statistic/Parameter| Without | With | ---------------------------------------------- N Function Calls | %3i | %3i | Chi-square | %.4f | %.4f | a | %.4f | %.4f | b | %.4f | %.4f | c | %.4f | %.4f | ---------------------------------------------- ''' % (a, b, c, min1.nfev, min2.nfev, min1.chisqr, min2.chisqr, params1['a'].value, params2['a'].value, params1['b'].value, params2['b'].value, params1['c'].value, params2['c'].value)) check_wo_stderr(min1.params['a'], min2.params['a'].value, 0.00005) check_wo_stderr(min1.params['b'], min2.params['b'].value, 0.00005) check_wo_stderr(min1.params['c'], min2.params['c'].value, 0.00005)
def _fit(self): import numpy as np x0, x1 = self.lr.getRegion() start_idx = fi(self.spectrum.data[:, 0], x0) end_idx = fi(self.spectrum.data[:, 0], x1) + 1 x_data = self.spectrum.data[start_idx:end_idx, 0] y_data = self.spectrum.data[start_idx:end_idx, 1] tab_idx = self.tabWidget.currentIndex() if tab_idx == 0: self._setup_model() # fill the parameters from fields for i, p in enumerate((self.current_model.params if tab_idx == 0 else self.general_model_params).values()): p.value = float(self.value_list[i][tab_idx].text()) p.min = float(self.lower_bound_list[i][tab_idx].text()) p.max = float(self.upper_bound_list[i][tab_idx].text()) p.vary = not self.fixed_list[i][tab_idx].isChecked() def y_fit(params): if tab_idx == 0: y = self.current_model.wrapper_func(x_data, params) else: init, coefs, rates, y0 = self.get_values_from_params(params) sol = self._simul_custom_model(init, rates, x_data) y = (coefs * sol).sum(axis=1, keepdims=False) + y0 return y def residuals(params): y = y_fit(params) e = y - y_data if self.cbPropWeighting.isChecked(): e /= y * y return e minimizer = Minimizer( residuals, self.current_model.params if tab_idx == 0 else self.general_model_params) method = self.methods[self.cbMethod.currentIndex()]['abbr'] result = minimizer.minimize(method=method) # fit if tab_idx == 0: self.current_model.params = result.params else: self.general_model_params = result.params # fill fields values_errors = np.zeros((len(result.params), 2), dtype=np.float64) for i, p in enumerate(result.params.values()): values_errors[i, 0] = p.value values_errors[i, 1] = p.stderr if p.stderr is not None else 0 self.value_list[i][tab_idx].setText(f"{p.value:.4g}") self.error_list[i][tab_idx].setText( f"{p.stderr:.4g}" if p.stderr else '') y_fit_data = y_fit(result.params) y_residuals = y_data - y_fit_data # self.remove_last_fit() self.clear_plot() self.plot_fit = self.plot_widget.plotItem.plot( x_data, y_fit_data, pen=pg.mkPen(color=QColor(0, 0, 0, 200), width=2.5), name="Fit of {}".format(self.spectrum.name)) self.plot_residuals = self.plot_widget.plotItem.plot( x_data, y_residuals, pen=pg.mkPen(color=QColor(255, 0, 0, 150), width=1), name="Residuals of {}".format(self.spectrum.name)) self.fitted_spectrum = SpectrumItem.from_xy_values( x_data, y_fit_data, name="Fit of {}".format(self.spectrum.name), color='black', line_width=2.5, line_type=Qt.SolidLine) self.residual_spectrum = SpectrumItem.from_xy_values( x_data, y_residuals, name="Residuals of {}".format(self.spectrum.name), color='red', line_width=1, line_type=Qt.SolidLine) self.fit_result = FitResult(result, minimizer, values_errors, (self.current_model if tab_idx == 0 else self.current_general_model), data_item=self.spectrum, fit_item=self.fitted_spectrum, residuals_item=self.residual_spectrum)