def _minimize(self): t0 = time.time() par_names = [name for d in self.data for name in d.pars_init] kwargs = {name:val for d in self.data for name, val in d.pars_init.items()} kwargs.update({name:err for d in self.data for name, err in d.par_error.items()}) kwargs.update({name:lim for d in self.data for name, lim in d.par_limit.items()}) kwargs.update({name:fix for d in self.data for name, fix in d.par_fixed.items()}) ## do an initial "fast" minimization fixing everything except the biases kwargs_init = {} for k,v in kwargs.items(): kwargs_init[k] = v for name in par_names: if name[:4] != "bias": kwargs_init["fix_"+name] = True mig_init = iminuit.Minuit(self,forced_parameters=self.par_names,errordef=1,print_level=1,**kwargs_init) mig_init.migrad() mig_init.print_param() ## now get the best fit values for the biases and start a full minimization for name, value in mig_init.values.items(): kwargs[name] = value mig = iminuit.Minuit(self,forced_parameters=self.par_names,errordef=1,print_level=1,**kwargs) mig.migrad() mig.print_param() print("INFO: minimized in {}".format(time.time()-t0)) sys.stdout.flush() return mig
def fit(ens, params): y, y_bs = get_data(ens, params) cov = np.cov(y_bs, rowvar=False) cov_inv = sp.linalg.inv(cov) chisq_fh = ChisqFH(y, cov_inv, ens, params) ini_vals = dict() for k in params[ens]['fit_ini']: ini_vals[k] = params[ens]['fit_ini'][k] if k in params['errors']: ini_vals['error_' + k] = abs(params['errors'][k] * params[ens]['fit_ini'][k]) else: ini_vals['error_' + k] = 0.02 * abs(params[ens]['fit_ini'][k]) for k in params['limits']: ini_vals['limit_' + k] = params['limits'][k] ''' ini_vals['limit_dE_10'] = (0,10) ini_vals['limit_zs_0'] = (0,1) ini_vals['limit_zs_1'] = (0,1) ini_vals['limit_gA_11'] = (-20,10) ini_vals['limit_gA_10'] = (-5,5) ini_vals['limit_gV_11'] = (-20,10) ini_vals['limit_gV_10'] = (-5,5) ''' min_fh = mn.Minuit(chisq_fh, pedantic=False, print_level=1, **ini_vals) min_fh.migrad() #min_fh.minos() lam = [] for k in ini_vals: if 'error' not in k and 'limit' not in k: lam.append(k) dof = len(y) - len(lam) print("chi^2 = %.4f, dof = %d, Q=%.4f" % (min_fh.fval, dof, fit_fh.p_val(min_fh.fval, dof))) if params['bs']: ini_vals_bs = dict(ini_vals) bs_lams = dict() for k in min_fh.values: ini_vals_bs[k] = min_fh.values[k] ini_vals['error_' + k] = 0.2 * min_fh.errors[k] bs_lams[k] = np.zeros([params[ens]['Nbs']]) bs_fits = [] #for bs in tqdm.tqdm(range(params[ens]['Nbs']),desc='Nbs'): for bs in tqdm.tqdm(range(params[ens]['Nbs']), desc='Nbs'): chisq_fh = ChisqFH(y_bs[bs], cov_inv, ens, params) min_fh_bs = mn.Minuit(chisq_fh, pedantic=False, print_level=0, **ini_vals_bs) min_fh_bs.migrad() bs_fits.append(min_fh_bs) for k in min_fh_bs.values: bs_lams[k][bs] = min_fh_bs.values[k] print(bs_lams['gA_00'].mean(), bs_lams['gA_00'].std()) return min_fh
def _runFourierFit(self): if self.verbose == True: _min = minuit.Minuit(self.FourFitClass) else: _min = minuit.Minuit(self.FourFitClass, pedantic=False) _min.migrad() self.fitDict = _fb.mergeDicts(self.fitDict, _min.values)
def _runHyperbolicFit(self): #This MUST be called AFTER _runFourierFit() and AFTER _getHyperbolicData if self.verbose == True: _min = minuit.Minuit(self.HypFitClass) else: _min = minuit.Minuit(self.HypFitClass, pedantic=False) _min.migrad() self.fitDict = _fb.mergeDicts(self.fitDict, _min.values)
def do_minuit(x,y,covarin,guess,functname=thepolynomial, fixpars = False, hesse=False): # check if covariance or error bars were given covar=covarin if np.size(np.shape(covarin)) == 1: err=covarin covar=np.zeros((np.size(err),np.size(err))) covar[np.arange(np.size(err)),np.arange(np.size(err))]=err**2 # instantiate minimizer chi2=MyChi2(x,y,covar,functname) # variables ndim=np.size(guess) parnames=[] for i in range(ndim): parnames.append('c'+np.str(i)) # initial guess theguess=dict(zip(parnames,guess)) # fixed parameters dfix = {} if fixpars: for i in range(len(parnames)): dfix['fix_'+parnames[i]]=fixpars[i] else: for i in range(len(parnames)): dfix['fix_'+parnames[i]]=False #stop # Run Minuit print('Fitting with Minuit') theargs = dict(theguess.items()) theargs.update(dfix.items()) if theargs is None: m = iminuit.Minuit(chi2,forced_parameters=parnames,errordef=1.) else: m = iminuit.Minuit(chi2,forced_parameters=parnames,errordef=1.,**theargs) m.migrad() if hesse: m.hesse() # build np.array output parfit=[] for i in parnames: parfit.append(m.values[i]) errfit=[] for i in parnames: errfit.append(m.errors[i]) ndimfit = int(np.sqrt(len(m.covariance))) covariance=np.zeros((ndimfit,ndimfit)) if fixpars: parnamesfit = [] for i in range(len(parnames)): if fixpars[i] == False: parnamesfit.append(parnames[i]) if fixpars[i] == True: errfit[i]=0 else: parnamesfit = parnames for i in range(ndimfit): for j in range(ndimfit): covariance[i,j]=m.covariance[(parnamesfit[i],parnamesfit[j])] print('Chi2=',chi2(*parfit)) print('ndf=',np.size(x)-ndim) return(m,np.array(parfit), np.array(errfit), np.array(covariance))
def _minimize(self): t0 = time.time() par_names = [name for d in self.data for name in d.pars_init] par_val_init = { name: val for d in self.data for name, val in d.pars_init.items() } par_err = { k.split('error_')[1]: err for d in self.data for k, err in d.par_error.items() } par_lim = { k.split('limit_')[1]: lim for d in self.data for k, lim in d.par_limit.items() } par_fix = { k.split('fix_')[1]: fix for d in self.data for k, fix in d.par_fixed.items() } ## do an initial "fast" minimization fixing everything except the biases mig_init = iminuit.Minuit(self, name=self.par_names, **par_val_init) for name in par_names: mig_init.errors[name] = par_err[name] mig_init.limits[name] = par_lim[name] mig_init.fixed[name] = par_fix[name] if name[:4] != "bias": mig_init.fixed[name] = True mig_init.errordef = 1 mig_init.print_level = 1 mig_init.migrad() print(mig_init.fmin) print(mig_init.params) ## now get the best fit values for the biases and start a full minimization par_val = {} for name, value in mig_init.values.to_dict().items(): par_val[name] = value mig = iminuit.Minuit(self, name=self.par_names, **par_val) for name in par_names: mig.errors[name] = par_err[name] mig.limits[name] = par_lim[name] mig.fixed[name] = par_fix[name] mig.errordef = 1 mig.print_level = 1 mig.migrad() print(mig.fmin) print(mig.params) userprint("INFO: minimized in {}".format(time.time() - t0)) sys.stdout.flush() return mig
def Minuit_chi2(self): def _compute_chi2(alpha1, alpha2, alpha3, beta, mb): self.comp_chi2(alpha1, alpha2, alpha3, beta, mb) return self.chi2 Find_param = minuit.Minuit(_compute_chi2, alpha1=0, alpha2=0, alpha3=0, beta=0, mb=0) Find_param.migrad() self.Params = Find_param.values self.Params_Covariance = Find_param.covariance self.comp_chi2(self.Params['alpha1'], self.Params['alpha2'], self.Params['alpha3'], self.Params['beta'], self.Params['mb']) calls = 0 if abs((self.chi2 / (self.dof)) - 1.) > 0.1: while abs((self.chi2 / (self.dof)) - 1.) > 0.001: if calls < 100: print 'je cherche de la dispersion pour la %i eme fois' % ( calls + 1) self._compute_dispertion() self.dispertion_intrinseque = copy.deepcopy(self.disp) Find_param = minuit.Minuit(_compute_chi2, alpha1=0, alpha2=0, alpha3=0, beta=0, mb=0) Find_param.migrad() self.Params = Find_param.values self.Params_Covariance = Find_param.covariance self.comp_chi2(self.Params['alpha1'], self.Params['alpha2'], self.Params['alpha3'], self.Params['beta'], self.Params['mb']) calls += 1 else: print 'error : calls limit are exceeded' break self.y_corrected = self.residu + self.Params['mb'] self.y_error_corrected = N.sqrt(self.VAR - self.disp_intrinseque**2) self.WRMS, self.WRMS_err = comp_rms(self.residu, self.dof, err=True, variance=self.VAR)
def Migrad(self, method='chi2', fitlow=None, fithigh=None, **kwargs): prof = self.profile if prof.profile is None: print('Error: No valid profile exists in provided object') return model = self.mod.model if prof.psfmat is not None: psfmat = np.transpose(prof.psfmat) else: psfmat = None if method == 'chi2': # Define the fitting algorithm chi2 = ChiSquared(model, prof.bins, prof.profile, prof.eprof, psfmat=psfmat, fitlow=fitlow, fithigh=fithigh) # Construct iminuit object minuit = iminuit.Minuit(chi2, **kwargs) elif method == 'cstat': # Define the fitting algorithm cstat = Cstat(model, prof.bins, prof.counts, prof.area, prof.effexp, prof.bkgcounts, psfmat=psfmat, fitlow=fitlow, fithigh=fithigh) # Construct iminuit object minuit = iminuit.Minuit(cstat, **kwargs) else: print('Unknown method ', method) return fmin, param = minuit.migrad() npar = len(minuit.values) outval = np.empty(npar) outerr = np.empty(npar) for i in range(npar): outval[i] = minuit.values[i] outerr[i] = minuit.errors[i] self.mod.SetParameters(outval) self.mod.SetErrors(outerr) self.mod.parnames = minuit.parameters self.params = minuit.values self.errors = minuit.errors self.mlike = fmin self.minuit = minuit self.out = param
def test__run_minos(caplog): caplog.set_level(logging.DEBUG) def func_to_minimize(pars): # mock NLL return (np.sum( np.power(pars - 2 * np.ones_like(pars), 2) + np.power(pars - 1 * np.ones_like(pars), 4)) + pars[0]) m = iminuit.Minuit( func_to_minimize, [1.0, 1.0], name=["a", "b"], ) m.errordef = 1 m.migrad() fit._run_minos(m, ["b"], ["a", "b"]) assert "running MINOS for b" in [rec.message for rec in caplog.records] assert "b = 1.5909 -0.7262 +0.4738" in [ rec.message for rec in caplog.records ] caplog.clear() # proper labels not known to iminuit m = iminuit.Minuit( func_to_minimize, [1.0, 1.0], ) m.errordef = 1 m.migrad() fit._run_minos(m, ["x0"], ["a", "b"]) assert "running MINOS for a" in [rec.message for rec in caplog.records] assert "a = 1.3827 -0.8713 +0.5715" in [ rec.message for rec in caplog.records ] caplog.clear() # unknown parameter, MINOS does not run m = iminuit.Minuit( func_to_minimize, [1.0, 1.0], ) m.errordef = 1 m.migrad() fit._run_minos(m, ["x2"], ["a", "b"]) assert [rec.message for rec in caplog.records] == [ "parameter x2 not found in model", "MINOS results:", ] caplog.clear()
def cont_fit(self): lmax = forest.lmax_rest+sp.log10(1+self.zqso) lmin = forest.lmin_rest+sp.log10(1+self.zqso) mc = forest.mean_cont(self.ll-sp.log10(1+self.zqso)) if not self.T_dla is None: mc*=self.T_dla var_lss = forest.var_lss(self.ll) eta = forest.eta(self.ll) def model(p0,p1): line = p1*(self.ll-lmin)/(lmax-lmin)+p0*(lmax-self.ll)/(lmax-lmin) return line*mc def chi2(p0,p1): m = model(p0,p1) iv = self.iv/eta we = iv/(iv*var_lss*m**2+1) v = (self.fl-m)**2*we return v.sum()-sp.log(we).sum() p0 = p1 = (self.fl*self.iv).sum()/self.iv.sum() mig = iminuit.Minuit(chi2,p0=p0,p1=p1,error_p0=p0/2.,error_p1=p1/2.,errordef=1.,print_level=0) mig.migrad() self.co=model(mig.values["p0"],mig.values["p1"]) self.p0 = mig.values["p0"] self.p1 = mig.values["p1"]
def LP_minuit(param_name, fixed_var): args = {} args_name = [] x0 = [] bounds_dict = { param_name: (fixed_var, fixed_var), "Zc_4160_m0:0": (4.1, 4.22), "Zc_4160_g0:0": (0, 10), } for i in a.Amp.trainable_variables: args[i.name] = i.numpy() x0.append(i.numpy()) args_name.append(i.name) args["error_" + i.name] = 0.1 if i.name not in bounds_dict: bounds_dict[i.name] = (0.0, None) for i in bounds_dict: if i in args_name: args["limit_{}".format(i)] = bounds_dict[i] m = iminuit.Minuit( fcn, forced_parameters=args_name, errordef=0.5, grad=fcn.grad, print_level=2, use_array_call=True, **args, ) now = time.time() with tf.device("/device:GPU:0"): print(m.migrad(ncall=10000)) # ,precision=5e-7)) print(time.time() - now) print(m.get_param_states()) return m
def _make_minuit(self, objective, data, pdf, init_pars, init_bounds, constrained_mu=None): def f(pars): result = objective(pars, data, pdf) logpdf = result[0] return logpdf parnames = ['p{}'.format(i) for i in range(len(init_pars))] kw = {'limit_p{}'.format(i): b for i, b in enumerate(init_bounds)} initvals = {'p{}'.format(i): v for i, v in enumerate(init_pars)} if constrained_mu is not None: constraints = {'fix_p{}'.format(pdf.config.poi_index): True} initvals['p{}'.format(pdf.config.poi_index)] = constrained_mu else: constraints = {} kwargs = {} for d in [kw, constraints, initvals]: kwargs.update(**d) mm = iminuit.Minuit(f, print_level=1 if self.verbose else 0, use_array_call=True, forced_parameters=parnames, **kwargs) return mm
def minimize(likelihood, line, model, wave, flux, ivar, noise=None, x=None, **init_pars): like = partial(likelihood, line=line, wave=wave, flux=flux, ivar=ivar, x=x, model=model, noise=noise) m = iminuit.Minuit(like, forced_parameters=line.parnames, errordef=1, pedantic=False, **init_pars) fmin = m.migrad() return m, fmin
def futur_get_minimizer(compute_nll, calibrated_param, calibrated_param_error): MIN_VALUE = 0.01 MAX_VALUE = 10 minimizer = iminuit.Minuit( compute_nll, tes=calibrated_param.tes, # error_tes=calibrated_param_error.tes, # limit_tes=(MIN_VALUE, MAX_VALUE), jes=calibrated_param.jes, # error_jes=calibrated_param_error.jes, # limit_jes=(MIN_VALUE, MAX_VALUE), les=calibrated_param.les, # error_les=calibrated_param_error.les, # limit_les=(MIN_VALUE, MAX_VALUE), nasty_bkg=calibrated_param.nasty_bkg, # error_nasty_bkg=calibrated_param_error.nasty_bkg, # limit_nasty_bkg=(MIN_VALUE, MAX_VALUE), sigma_soft=calibrated_param.sigma_soft, # error_sigma_soft=calibrated_param_error.sigma_soft, # limit_sigma_soft=(0, MAX_VALUE), mu=calibrated_param.mu, # error_mu=calibrated_param_error.mu, # limit_mu=(MIN_VALUE, MAX_VALUE), ) minimizer.errordef = iminuit.Minuit.LIKELIHOOD minimizer.limits = [(MIN_VALUE, MAX_VALUE), (MIN_VALUE, MAX_VALUE), (MIN_VALUE, MAX_VALUE), (MIN_VALUE, MAX_VALUE), (MIN_VALUE, MAX_VALUE), (MIN_VALUE, MAX_VALUE)] minimizer.errors = [ calibrated_param_error.tes, calibrated_param_error.jes, calibrated_param_error.les, calibrated_param_error.nasty_bkg, calibrated_param_error.sigma_soft, calibrated_param_error.mu ] minimizer.tol = 0.5 # Should I increase tolerance to help ???? (default is 0.1 according to doc) return minimizer
def MB_fitter(T_fit, Qi_fit, f_fit): fit_result = [] def chisq(f0, Delta0, alpha, Qi0): alpha_Q = alpha alpha_f = alpha var_Qi = np.var(Qi_fit) var_f = np.var(f_fit) return sum((Qi_T(T_fit, f0, Qi0, Delta0, alpha_Q) - Qi_fit)**2. / var_Qi + (f_T(T_fit, f0, Delta0, alpha_f) - f_fit)**2. / var_f) #return sum((f_T(T_fit, f0, Delta0, alpha_f) - f_fit)**2./var_f ) def fit_chisq_test(T_fit, f_fit, Qi_fit, f0, Delta0, alpha, Qi0): var_Qi = np.var(Qi_fit) var_f = np.var(f_fit) return sum( (Qi_T(T_fit, f0, Qi0, Delta0, alpha) - Qi_fit)**2. / var_Qi + (f_T(T_fit, f0, Delta0, alpha) - f_fit)**2. / var_f) / 4. f0_in = f_fit[0] Delta0_in = 4.e-4 alpha_in = 0.03801 Qi0_in = Qi_fit[0] for j in range(500): minimizer = iminuit.Minuit(chisq, f0=f0_in, Delta0=Delta0_in, alpha=alpha_in, Qi0=Qi0_in, limit_f0=(f_fit[0] / 1.1, f_fit[0] * 1.1), limit_Delta0=(1.2e-4, 2.2e-4), limit_alpha=(0.002, 0.05), limit_Qi0=(1.e2, 1.e7), pedantic=False, print_level=-1) f0_in = minimizer.values["f0"] Delta0_in = minimizer.values["Delta0"] alpha_in = minimizer.values["alpha"] Qi0_in = minimizer.values["Qi0"] minimizer.migrad() f0 = minimizer.values["f0"] Delta0 = minimizer.values["Delta0"] alpha = minimizer.values["alpha"] Qi0 = minimizer.values["Qi0"] chi_sq_dof = fit_chisq_test(T_fit, f_fit, Qi_fit, f0, Delta0, alpha, Qi0) fit_result.append([f0 / 1.e9, Delta0 * 1000, alpha, Qi0, chi_sq_dof]) T_smooth = np.linspace(T_fit[0], T_fit[-1], 10000) return f0 / 1.e9, Delta0 * 1000., alpha, Qi0, chi_sq_dof
def run(self): if self.data_field == None: print 'No data to fit too!' return print 'Fitting to', len(self.data_field), 'data points' #set sensible limits for the coil parameters otherwise it can c**k up completely rIlow = 0.256 #Simply from the bore rIhi = self.rInner * 1.15 #15% increase rOlow = self.rOuter * 0.85 #15% decrease rOhi = self.rOuter * 1.15 lenLow = self.length * 0.85 lenHi = self.length * 1.15 minimizer = minuit.Minuit(self._min_func, rInner=self.rInner, limit_rInner=(rIlow, rIhi), \ rOuter=self.rOuter, limit_rOuter=(rOlow, rOhi), \ length=self.length, limit_length=(lenLow, lenHi), \ centre=self.centre, thetaX=self.thetaX, thetaY=self.thetaY, \ px=self.px, py=self.py, pedantic=False) minimizer.migrad() fitDict = minimizer.values fitDict['nLayers'] = self.nLayers fitDict['nTurns'] = self.nTurns return fitDict
def run_grid(self, grid={}): params = grid.get('params', []) points = grid.get('points', []) if not len(points): return self.update_minuitargs() fitarg = copy.deepcopy(self.minuitargs) for par in params: fitarg['fix_{}'.format(par)] = True for par in params: fitarg['limit_{}'.format(par)] = (-scipy.inf, scipy.inf) toret = [] self.logger.info('Grid of {} and size {:d}.'.format( params, len(points))) for point in points: pfitarg = copy.deepcopy(fitarg) for par, val in zip(params, point): pfitarg[par] = val minuit = iminuit.Minuit(self.chi2args, **pfitarg) minuit.migrad(**self.params['migrad']) tmp = dict(minuit.values) tmp['chi2'] = self.chi2args(*[tmp[key] for key in self.parameters]) toret.append(tmp) params = toret[0].keys() toret = {par: scipy.array([ret[par] for ret in toret])} self.grid = { par: scipy.concatenate([self.grid.get(par, []), toret[par]], axis=0) for par in toret }
def release_parameter(self, parameter): ''' Release parameter <`parameter`>. **parameter** : string Name of the parameter to release. ''' if isinstance(parameter, int): par_id = parameter parameter = self.parameter_names[parameter] else: try: par_id = self.parameter_names.index(parameter) except ValueError: raise ValueError("No parameter named '%s'" % (parameter, )) logger.info("Releasing parameter %d in Minuit" % (par_id, )) fitparam = self.__iminuit.fitarg.copy() # copy minimizer arguments #fitparam[parameter] = v fitparam['fix_%s' % parameter] = False # set fix-flag for parameter # replace minimizer ##del self.__iminuit self.__iminuit = iminuit.Minuit(self.function_to_minimize, print_level=self.print_level, forced_parameters=self.parameter_names, **fitparam)
def full_order_shift_scale(self, order=7, verbose=False, veryVerbose=False, robustSearch=False): """docstring for dictionaryShift""" try: m = mi.Minuit(self.order_shift_and_scale_Akima, order=order, fix_order=True, **self.fit_starting['initial']) if veryVerbose == True: m.printMode = 1 if robustSearch == True: print "Robust search. Beginning initial scan..." m.scan(("fshift", 20, -0.5, 0.5)) print "done." print "Finding initial full order shift/fit", '\n', datetime.datetime.now( ).strftime("%Y-%m-%d %H:%M:%S") m.set_strategy(2) m.migrad() # print "done." self.fitResults['order'][order]['values'] = m.values try: del self.fitResults['order'][order]['values']['order'] except: pass self.fitResults['order'][order]['errors'] = m.errors except: print "Serious problem with order:", order pass
def _init_minuit(f, x = None, x_fix = None, x_err = None, x_lim = None, errordef = 1, **kwargs): """Initialize minuit using no-nonsense interface.""" try: import iminuit except ImportError: raise ImportError( "This function requires that the module iminuit is installed.") N = len(x) if x_err is not None: assert len(x_err) == N if x_lim is not None: assert len(x_lim) == N if x_fix is not None: assert len(x_fix) == N varnames = ["x"+str(i) for i in range(N)] def wf(*args): x = np.array(args) return f(x) for i, var in enumerate(varnames): kwargs[var] = x[i] if x_lim is not None: kwargs["limit_"+var] = x_lim[i] if x_err is not None: kwargs["error_"+var] = x_err[i] if x_fix is not None: kwargs["fix_"+var] = x_fix[i] return iminuit.Minuit(wf, forced_parameters = varnames, errordef = errordef, **kwargs)
def iterativeOptim(self, nCats, addCut, start): currMin = 0. ovSigSq = 0. nCat = 0 cats = [] currCut = start while nCat < nCats: minuCategorization = iminuit.Minuit( lambda cut: -self.optimizeOneCut( currCut, cut, nCat + 1, addCut=addCut), cut=currCut[nCat + 1], error_cut=0.5 * currCut[nCat + 1], limit_cut=(currCut[nCat], None)) minuCategorization.migrad() optimCut = minuCategorization.np_values() fval = np.array([minuCategorization.fval]) ovSigSq += fval**2 # cats.append(optimCut) currMin = optimCut[0] currCut[nCat + 1] = currMin if self.debug: print('Significance: ', fval) print('Cut Value: ', optimCut[0]) nCat += 1 ovSig = self.sigMultCat(currCut, addCut) return currCut, ovSig
def iminuit_fit(self, x, y, p0, limits=None): fit = sipm_spe_fit if limits is None: limits = {} def minimizehist(norm1, norm2, norm3, eped, eped_sigma, spe, spe_sigma, lambda_1, lambda_2, lambda_3, opct, pap, dap): p1, p2, p3 = self.fit_function(x, norm1, norm2, norm3, eped, eped_sigma, spe, spe_sigma, lambda_1, lambda_2, lambda_3, opct, pap, dap) like1 = -2 * poisson.logpmf(y[0], p1) like2 = -2 * poisson.logpmf(y[1], p2) like3 = -2 * poisson.logpmf(y[2], p3) like = np.hstack([like1, like2, like3]) return np.nansum(like) m0 = iminuit.Minuit(minimizehist, **p0, **limits, print_level=0, pedantic=False, throw_nan=False) m0.migrad() return m0.values
def fix_parameter(self, parameter): ''' Fix parameter <`parameter`>. **parameter** : string Name of the parameter to fix. ''' # cannot do this directly in iminuit # must create new minimizer with fixed parameters if isinstance(parameter, int): par_id = parameter parameter = self.parameter_names[parameter] else: try: par_id = self.parameter_names.index(parameter) except ValueError: raise ValueError("No parameter named '%s'" % (parameter, )) logger.info("Fixing parameter %d in Minuit" % (par_id, )) fitparam = self.__iminuit.fitarg.copy() # copy minimizer arguments #fitparam[parameter] = v fitparam['fix_%s' % parameter] = True # set fix-flag for parameter # replace minimizer ##del self.__iminuit self.__iminuit = iminuit.Minuit(self.function_to_minimize, print_level=self.print_level, forced_parameters=self.parameter_names, errordef=self.errordef, **fitparam)
def main(): f = np.loadtxt( "/Users/Jason/Software/CHECLabPy_sandbox/mc_config/spe_spectrum_fix/SPEspectrum_57.50_2.500_0.080.dat", unpack=True) x = f[0] spe = f[1] def mini(spe_sigma, lambda_, opct, pap, dap): return minimize(spe, x, spe_sigma, lambda_, opct, pap, dap) p0 = dict(spe_sigma=1, lambda_=1, opct=0.1, pap=0.1, dap=0.1) limits = dict( limit_spe_sigma=(0.01, 2), limit_lambda_=(0.01, 2), limit_opct=(0.01, 1), limit_pap=(0.01, 0.2), limit_dap=(0.01, 0.2), ) m0 = iminuit.Minuit( mini, **p0, **limits, print_level=0, pedantic=False, throw_nan=True, # forced_parameters=['spe_sigma', 'lambda_', 'opct', 'pap', 'dap'] ) m0.migrad() embed()
def _perform_fit(self): """ Run iminuit on the fit function to find the best fit """ self.coeff = {} self.p0 = self.initial.copy() limits = self.limits.copy() fix = self.fix.copy() self._prepare_params(self.p0, limits, fix) m0 = iminuit.Minuit(self._minimize_function, **self.p0, **limits, **fix, print_level=0, pedantic=False, throw_nan=True, forced_parameters=self.coeff_names) m0.migrad() self.coeff = m0.values with warnings.catch_warnings(): warnings.simplefilter('ignore', HesseFailedWarning) m0.hesse() self.errors = m0.errors
def scan(self, par_name='alpha', par_min=0.8, par_max=1.2, par_nsteps=400): init_pars = {} for par in self.model.pars.iteritems(): name = par[0] value = par[1] init_pars[name] = value init_pars['error_' + name] = abs(value) / 10. if value != 0 else 0.1 init_pars['fix_' + par_name] = True par_grid = N.linspace(par_min, par_max, par_nsteps) chi2_grid = N.zeros(par_nsteps) if self.fixes: for key in self.fixes: init_pars[key] = self.fixes[key] init_pars['fix_' + key] = True for i in range(par_nsteps): value = par_grid[i] init_pars[par_name] = value mig = iminuit.Minuit(self, forced_parameters=self.model.pars_names, \ print_level=1, errordef=1, \ frontend=iminuit.frontends.ConsoleFrontend(), \ **init_pars) mig.migrad() print('scanning: %s = %.5f chi2 = %.4f' % (par_name, value, mig.fval)) chi2_grid[i] = mig.fval return par_grid, chi2_grid
def _get_minimizer(self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False): step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds] fixed_vals = fixed_vals or [] # Minuit wants True/False for each parameter fixed_bools = [False] * len(init_pars) for index, val in fixed_vals: fixed_bools[index] = True init_pars[index] = val step_sizes[index] = 0.0 # Minuit requires jac=callable if do_grad: wrapped_objective = lambda pars: objective_and_grad(pars)[ 0] # noqa: E731 jac = lambda pars: objective_and_grad(pars)[1] # noqa: E731 else: wrapped_objective = objective_and_grad jac = None minuit = iminuit.Minuit(wrapped_objective, init_pars, grad=jac) minuit.errors = step_sizes minuit.limits = init_bounds minuit.fixed = fixed_bools minuit.print_level = self.verbose minuit.errordef = self.errordef return minuit
def _set_minuit_func(self, p_init, bounds, p_error=None): #print('=>Hi') if p_error == None: p_error = [0.1] * len(p_init) p_names = ['par_{}'.format(_) for _ in range(len(p_init))] error_names = ['error_par_{}'.format(_) for _ in range(len(p_init))] p_bound_names = ['limit_par_{}'.format(_) for _ in range(len(p_init))] #This dict contains all the kw for #par kwdarg = {} for n, p, bn, b, en, e in zip(p_names, p_init, p_bound_names, bounds, error_names, p_error): kwdarg[n] = p kwdarg[bn] = b kwdarg[en] = e #print( kwdarg[n] ,p, kwdarg[bn] ,b, kwdarg[en] ,e) self.minuit_par_name_dict = {} self.minuit_bounds_dict = {} self.par_dict = {} for ID, par in enumerate(self.model.fit_par_free): self.minuit_par_name_dict[par.name] = p_names[ID] self.minuit_bounds_dict[par.name] = bounds[ID] self.par_dict[par.name] = par self.minuit_fun = iminuit.Minuit(fcn=self.chisq_func, forced_parameters=p_names, pedantic=False, errordef=1, **kwdarg)
def migrad(use_simplex=True, strategy=2): m = iminuit.Minuit(fcn, initial_guess) # m.throw_nan = True if print_level is not None: assert 0 <= print_level <= 3 m.print_level = print_level m.errordef = iminuit.Minuit.LEAST_SQUARES m.strategy = strategy if parameter_error is not None: if None in parameter_error: m.errors = [ ig if pe is None else pe for pe, ig in zip(parameter_error, initial_guess) ] else: m.errors = parameter_error if bounds is not None: m.limits = bounds if fix_parameter is not None: m.fixed = fix_parameter if use_simplex: m.simplex().migrad() else: m.migrad() if m.fmin.hesse_failed: m.errors[:] = [np.nan] * len(m.values) return m
def minimize(self, params=None): """Minimize the chi2. Parameters ---------- params : dict, optional Dictionary of sample parameters, used to change starting value and/or fix parameters, by default None """ t0 = time.time() kwargs = self._config.copy() if params is not None: for param, val in params['values'].items(): kwargs[param] = val kwargs['fix_' + param] = params['fix'][param] # Do an initial "fast" minimization over biases bias_flag = bool(len([par for par in self._names if 'bias' in par])) if bias_flag: kwargs_init = kwargs.copy() for param in self._names: if 'bias' not in param: kwargs_init['fix_' + param] = True minuit_init = iminuit.Minuit(self.chi2, forced_parameters=self._names, errordef=1, print_level=1, **kwargs_init) minuit_init.migrad() minuit_init.print_param() for param, value in minuit_init.values.items(): kwargs[param] = value # Do the actual minimization self._minuit = iminuit.Minuit(self.chi2, forced_parameters=self._names, errordef=1, print_level=1, **kwargs) self._minuit.migrad() self._minuit.print_param() print("INFO: minimized in {}".format(time.time() - t0)) stdout.flush() self._run_flag = True