def group2params(paramgroup, _larch=None): """take a Group of Parameter objects (and maybe other things) and put them into Larch's current fiteval namespace returns a lmfit Parameters set, ready for use in fitting """ if _larch is None: return None if isinstance(paramgroup, ParameterGroup): return paramgroup.__params__ fiteval = _larch.symtable._sys.fiteval params = Parameters(asteval=fiteval) if paramgroup is not None: for name in dir(paramgroup): par = getattr(paramgroup, name) if isParameter(par): params.add(name, value=par.value, vary=par.vary, min=par.min, max=par.max, brute_step=par.brute_step) else: fiteval.symtable[name] = par # now set any expression (that is, after all symbols are defined) for name in dir(paramgroup): par = getattr(paramgroup, name) if isParameter(par) and par.expr is not None: params[name].expr = par.expr return params
def NIST_Test(DataSet, method='leastsq', start='start2', plot=True): NISTdata = ReadNistData(DataSet) resid, npar, dimx = Models[DataSet] y = NISTdata['y'] x = NISTdata['x'] params = Parameters() for i in range(npar): pname = 'b%i' % (i+1) cval = NISTdata['cert_values'][i] cerr = NISTdata['cert_stderr'][i] pval1 = NISTdata[start][i] params.add(pname, value=pval1) myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y}) digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata) if plot and HASPYLAB: fit = -resid(params, x, ) pylab.plot(x, y, 'ro') pylab.plot(x, fit, 'k+-') pylab.show() return digs > 2
def __extract_pars(self): """ __extract_pars() Extracts the paramers from the function list and converts them to a single lmfit Parameters instance, which can then be manipulated by the residual minimization routines. Parameters ---------- None Returns ------- An lmfit `Parameters` instance containing the parameters of *all* the fittable functions in a single place. """ oPars=Parameters() for indFunc,cFunc in enumerate(self.funclist): cParlist = cFunc['params'] for cPar in cParlist.values(): oPars.add(self.__func_ident(indFunc)+cPar.name, value=cPar.value,vary=cPar.vary, min=cPar.min,max=cPar.max, expr=cPar.expr) return oPars
def test_pickle_parameters(self): # test that we can pickle a Parameters object p = Parameters() p.add('a', 10, True, 0, 100) p.add('b', 10, True, 0, 100, 'a * sin(1)') p.update_constraints() p._asteval.symtable['abc'] = '2 * 3.142' pkl = pickle.dumps(p, -1) q = pickle.loads(pkl) q.update_constraints() assert_(p == q) assert_(not p is q) # now test if the asteval machinery survived assert_(q._asteval.symtable['abc'] == '2 * 3.142') # check that unpickling of Parameters is not affected by expr that # refer to Parameter that are added later on. In the following # example var_0.expr refers to var_1, which is a Parameter later # on in the Parameters OrderedDict. p = Parameters() p.add('var_0', value=1) p.add('var_1', value=2) p['var_0'].expr = 'var_1' pkl = pickle.dumps(p) q = pickle.loads(pkl)
def test_bounded_jacobian(): pars = Parameters() pars.add('x0', value=2.0) pars.add('x1', value=2.0, min=1.5) global jac_count jac_count = 0 def resid(params): x0 = params['x0'] x1 = params['x1'] return np.array([10 * (x1 - x0*x0), 1-x0]) def jac(params): global jac_count jac_count += 1 x0 = params['x0'] return np.array([[-20*x0, 10], [-1, 0]]) out0 = minimize(resid, pars, Dfun=None) assert_paramval(out0.params['x0'], 1.2243, tol=0.02) assert_paramval(out0.params['x1'], 1.5000, tol=0.02) assert(jac_count == 0) out1 = minimize(resid, pars, Dfun=jac) assert_paramval(out1.params['x0'], 1.2243, tol=0.02) assert_paramval(out1.params['x1'], 1.5000, tol=0.02) assert(jac_count > 5)
def lmfitter(x, y): params = Parameters() params.add('m', value=0.01, vary=True) out = minimize(residual, params, args=(x, y)) report_fit(params) return out.params['m'].value, 0.0, out.params['m'].stderr, 0.0
def parameters(ini_values): from lmfit import Parameters k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, k11, UA, mu_0, E, q, prim_stab_0, LDH_0 = ini_values p = Parameters() # ( Name, Value, Vary, Min, Max) p.add_many(( 'k1', k1, True, 1.6, 2.1), ( 'k2', k2, True, 8.0, 46.0), ( 'k3', k3, True, 0.0, 6.0), ( 'k4', k4, True, 0.0, 2.1), ( 'k5', k5, True, 0.0, 0.03), ( 'k6', k6, True, 0.0, 39.0), ( 'k7', k7, True, 0.0, 2.7), ( 'k8', k8, True, 0.0, 7.9), ( 'k9', k9, True, 0.0, 13.1), ( 'k10', k10, True, 0.7, 10.9), ( 'k11', k11, True, 2.0, 3.6), ( 'UA', UA, True, 275.0, 402.0), ( 'mu_0', mu_0, False, 0.0, 0.1), ( 'E', E, False, 5000.0, None), ( 'q', q, False, 0.0, 17.0), ('prim_stab_0', prim_stab_0, False, 0.5, 1.3), ( 'LDH_0', LDH_0, False, None, None)) return p
def setup_model_params(self): """ Setup parameters """ params = Parameters() params.add('g0', value=0.0, vary=False) params.add('g1', value=2.0, min=0.0) return params
def amplitude_of_best_fit_greybody(Trf = None, b = 2.0, Lrf = None, zin = None): ''' Same as single_simple_flux_from_greybody, but to made an amplitude lookup table ''' nsed = 1e4 lambda_mod = loggen(1e3, 8.0, nsed) # microns nu_mod = c * 1.e6/lambda_mod # Hz #cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273) conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz) Lir = Lrf / conversion # Jy x Hz Ain = 1.0e-36 #good starting parameter betain = b alphain= 2.0 fit_params = Parameters() fit_params.add('Ain', value= Ain) #THE LM FIT IS HERE Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,Trf/(1.+zin),b,alphain)) #pdb.set_trace() return Pfin.params['Ain'].value
def test_multidimensional_fit_GH205(): # test that you don't need to flatten the output from the objective # function. Tests regression for GH205. pos = np.linspace(0, 99, 100) xv, yv = np.meshgrid(pos, pos) f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1) + np.cos(yv * lambda2)) data = f(xv, yv, 0.3, 3) assert_(data.ndim, 2) def fcn2min(params, xv, yv, data): """ model decaying sine wave, subtract data""" lambda1 = params['lambda1'].value lambda2 = params['lambda2'].value model = f(xv, yv, lambda1, lambda2) return model - data # create a set of Parameters params = Parameters() params.add('lambda1', value=0.4) params.add('lambda2', value=3.2) mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data)) res = mini.minimize()
def build_fitmodel(self): """ use fit components to build model""" dgroup = self.get_datagroup() model = None params = Parameters() self.summary = {"components": [], "options": {}} for comp in self.fit_components.values(): if comp.usebox is not None and comp.usebox.IsChecked(): for parwids in comp.parwids.values(): params.add(parwids.param) self.summary["components"].append((comp.mclass.__name__, comp.mclass_kws)) thismodel = comp.mclass(**comp.mclass_kws) if model is None: model = thismodel else: model += thismodel self.fit_model = model self.fit_params = params self.plot1 = self.larch.symtable._plotter.plot1 if dgroup is not None: i1, i2, xv1, xv2 = self.get_xranges(dgroup.x) xsel = dgroup.x[slice(i1, i2)] dgroup.xfit = xsel dgroup.yfit = self.fit_model.eval(self.fit_params, x=xsel) dgroup.ycomps = self.fit_model.eval_components(params=self.fit_params, x=xsel) return dgroup
def isotropic(filename, v0, x0, rho, g=9.81): try: results = read_results_file(filename) Volume_array_normalized = v0 - results['External_volume'] spring_position_array_normalized = x0 - results['Ylow'] params = Parameters() params.add('A', value=1) params.add('B', value=0) try: result = minimize(residual_isotropic, params, args=(spring_position_array_normalized, Volume_array_normalized)) except TypeError: result = minimize(residual_isotropic, params, args=(spring_position_array_normalized, Volume_array_normalized),method="nelder") v = result.params.valuesdict() x_th = np.arange(np.amin(Volume_array_normalized), np.amax(Volume_array_normalized), 0.0000001) y_th = v['A'] * x_th + v['B'] # report_fit(result.params, min_correl=0.5) filename_list = filename.split("\\") txt = filename_list[-1].split("_") txt = "_".join(txt[0:-1]) k = -rho * g / v['A'] print "Stiffness: " + str(k) list_results = [txt, k, v['B']] return list_results except: return None
def Fit_frvsT(temp, freq, fitpara): # create a set of Parameters params = Parameters() print fitpara params.add_many(('CPWC', fitpara[0], False, None, None, None), ('CPWG', fitpara[1], False, None, None, None), ('thick', fitpara[2], False, None, None, None), ('BCS', fitpara[3], False, None, None, None), ('Tc', fitpara[4], True, None, None, None), ('f0', fitpara[5], False, None, None, None), ('sigman',fitpara[6], False, None, None, None), ('A', fitpara[7], True, fitpara[7]*0.9, fitpara[7]*1.1, None)) # do fit, here with leastsq model result = minimize(Fit_frvsT_Func, params, args=(temp, freq)) # calculate final result residual = result.residual Tc = result.params['Tc'].value Tc_err = np.abs(result.params['Tc'].stderr/Tc) A = result.params['A'].value A_err = np.abs(result.params['A'].stderr/A) print fit_report(result) return Tc, Tc_err, A, A_err, fit_report(result)
def optimize_density_and_scaling(self, density_min, density_max, bkg_min, bkg_max, iterations, callback_fcn = None, output_txt=None): params = Parameters() params.add("density", value=self.density, min=density_min, max=density_max) params.add("background_scaling", value=self.background_scaling, min=bkg_min, max=bkg_max) self.iteration = 0 def optimization_fcn(params): density = params['density'].value background_scaling = params['background_scaling'].value self.background_spectrum.scaling = background_scaling self.calculate_spectra() self.optimize_sq(iterations,fcn_callback=callback_fcn) r, fr = self.limit_spectrum(self.fr_spectrum, 0, self.r_cutoff).data output = (-fr - 4 * np.pi * convert_density_to_atoms_per_cubic_angstrom(self.composition, density) * r) ** 2 self.write_output(u'{} X: {:.3f} Den: {:.3f}'.format(self.iteration, np.sum(output)/(r[1]-r[0]), density)) self.iteration+=1 return output minimize(optimization_fcn, params) self.write_fit_results(params)
class rabifit: def __init__(self, nmax=10000): self.params = Parameters() self.params.add('nbar', value=.1, vary=False, min=0.) self.params.add('delta', value=0, min=-0.05, max=.1, vary=False) self.params.add('time_2pi', value=20, vary=True, min=0.1) self.params.add('coh_time', value=2000, vary=False, min=0) self.params.add('eta', value=0.06, vary=False, min=0) self.eta = 0.06 self.sideband = 0 self.result = None self.nmax = nmax def residual(self, params, x, data=None, eps=None): # unpack parameters: # extract .value attribute for each parameter nbar = params['nbar'].value delta = params['delta'].value time_2pi = params['time_2pi'].value coh_time = params['coh_time'].value eta = params['eta'].value te = rabi_flop_time_evolution(self.sideband ,eta, nmax=self.nmax) model = te.compute_evolution_decay_thermal(abs(coh_time), nbar = nbar, delta = delta, time_2pi = time_2pi, t = x) if data is None: return model if eps is None: return (model - data) return (model - data)/eps def minimize(self, data): self.result = minimize(self.residual,self.params, args = (data[:,0], data[:,1], data[:,2]+.01))
def rescale_reframe(scisub, refsub, verbose=False): """Example function with types documented in the docstring. `PEP 484`_ type annotations are supported. If attribute, parameter, and return types are annotated according to `PEP 484`_, they do not need to be included in the docstring: Args: param1 (int): The first parameter. param2 (str): The second parameter. Returns: bool: The return value. True for success, False otherwise. .. _PEP 484: https://www.python.org/dev/peps/pep-0484/ """ params = Parameters() params.add('sigma', 1.0, True, 0.0, inf) image_sub = Model(res_sigma_image_flat, independent_vars=['scisub', 'refsub']) image_sub_results = image_sub.fit(data=scisub.ravel(), params=params, scisub=scisub, refsub=refsub) if verbose: print('Sigma of Rescale: {}'.format(image_sub_results.params['sigma'].value)) return partial_res_sigma_image(image_sub_results.params['sigma'].value)
def fit_for_b(bins, x, y, error=None): """ Fit a fractional energy loss histogram for the parameter b """ magic_ice_const = 0.917 outer_bounds = (min(bins), max(bins)) # Beginning and ending position select_bounds = np.vectorize(lambda xx: mlc.get_bounding_elements(xx, bins)) # Returns the bin edges on either side of a point E_diff = lambda xx, b: np.exp(-b*xx[1]) - np.exp(-b*xx[0]) # Proportional to Delta E (comes from -dE/dx=a+b*E) fit_func = lambda xx, b: E_diff(select_bounds(xx), b*magic_ice_const) / E_diff(outer_bounds, b*magic_ice_const) # We are fitting to the ratio of differences params = Parameters() params.add('b', value=0.4*10**(-3)) # Add b as a fit parameter if error is not None: l_fit_func = lambda params, x, data: np.sqrt((fit_func(x, params['b']) - data)**2 / error**2) else: l_fit_func = lambda params, x, data: fit_func(x, params['b']) - data result = minimize(l_fit_func, params, args=(x, y)) b = result.params['b'].value if b == 0.4*10**(-3): print fit print x print y print fit_func(x, 0.36*10**(-3)) print w raise ValueError("Fit doesn't make sense.") return b
def replot(self): params = Parameters() for key,value in self.paramDict.items(): params.add(key, value=float(value.text())) for i in np.arange(self.fitNumber): sequence = 'g'+str(i+1)+'_' center_value = params[sequence+'center'].value params[sequence+'center'].set(center_value, min=center_value-0.05, max=center_value+0.05) sigma_value = params[sequence+'sigma'].value params[sequence+'sigma'].set(sigma_value, min=sigma_value-0.05, max=sigma_value+0.05) ampl_value = params[sequence+'amplitude'].value params[sequence+'amplitude'].set(ampl_value, min=ampl_value-0.5, max=ampl_value+0.5) result = minimize(lmLeast(self.fitNumber).residuals, params, args=(self.fitResult.fitDf['field'], self.fitResult.fitDf['IRM_norm']), method='cg') self.params = result.params #FitMplCanvas.fitPlot(self) pdf_adjust = lmLeast(self.fitNumber).func(self.fitResult.fitDf['field'].values,self.params) pdf_adjust = pdf_adjust/np.max(np.sum(pdf_adjust,axis=0)) ax=self.axes fit_plots(ax=ax, xfit=self.fitResult.fitDf['field'], xraw=self.fitResult.rawDf['field_log'], yfit=np.array(pdf_adjust).transpose(), yraw=self.fitResult.rawDf['rem_grad_norm'])
def test_args_kwds_are_used(self): # check that user defined args and kwds make their way into the user # function a = [1., 2.] x = np.linspace(0, 10, 11) y = a[0] + 1 + 2 * a[1] * x par = Parameters() par.add('p0', 1.5) par.add('p1', 2.5) def fun(x, p, *args, **kwds): assert_equal(args, a) return args[0] + p['p0'] + p['p1'] * a[1] * x g = CurveFitter(fun, (x, y), par, fcn_args=a) res = g.fit() assert_almost_equal(values(res.params), [1., 2.]) d = {'a': 1, 'b': 2} def fun(x, p, *args, **kwds): return kwds['a'] + p['p0'] + p['p1'] * kwds['b'] * x g = CurveFitter(fun, (x, y), par, fcn_kws=d) res = g.fit() assert_almost_equal(values(res.params), [1., 2.])
def NIST_Test(DataSet, start='start2', plot=True): NISTdata = ReadNistData(DataSet) resid, npar, dimx = Models[DataSet] y = NISTdata['y'] x = NISTdata['x'] params = Parameters() for i in range(npar): pname = 'b%i' % (i+1) cval = NISTdata['cert_values'][i] cerr = NISTdata['cert_stderr'][i] pval1 = NISTdata[start][i] params.add(pname, value=pval1) myfit = Minimizer(resid, params, fcn_args=(x,), fcn_kws={'y':y}, scale_covar=True) myfit.prepare_fit() myfit.leastsq() digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata) if plot and HASPYLAB: fit = -resid(params, x, ) pylab.plot(x, y, 'r+') pylab.plot(x, fit, 'ko--') pylab.show() return digs > 2
def as_parameter_dict(self) -> Parameters: """ Creates a lmfit.Parameters dictionary from the group. Notes ----- Only for internal use. """ params = Parameters() for label, p in self.all(seperator="_"): p.name = "_" + label if p.non_neg: p = copy.deepcopy(p) if p.value == 1: p.value += 1e-10 if p.min == 1: p.min += 1e-10 if p.max == 1: p.max += 1e-10 else: try: p.value = log(p.value) p.min = log(p.min) if np.isfinite(p.min) else p.min p.max = log(p.max) if np.isfinite(p.max) else p.max except Exception: raise Exception("Could not take log of parameter" f" '{label}' with value '{p.value}'") params.add(p) return params
def autobk(energy, mu, rbkg=1, nknots=None, group=None, e0=None, kmin=0, kmax=None, kw=1, dk=0, win=None, vary_e0=True, chi_std=None, nfft=2048, kstep=0.05, _larch=None): if _larch is None: raise Warning("cannot calculate autobk spline -- larch broken?") # get array indices for rkbg and e0: irbkg, ie0 rgrid = np.pi/(kstep*nfft) if rbkg < 2*rgrid: rbkg = 2*rgrid irbkg = int(1.01 + rbkg/rgrid) if e0 is None: e0 = find_e0(energy, mu, group=group, _larch=_larch) ie0 = _index_nearest(energy, e0) # save ungridded k (kraw) and grided k (kout) # and ftwin (*k-weighting) for FT in residual kraw = np.sqrt(ETOK*(energy[ie0:] - e0)) if kmax is None: kmax = max(kraw) kout = kstep * np.arange(int(1.01+kmax/kstep)) ftwin = kout**kw * ftwindow(kout, xmin=kmin, xmax=kmax, window=win, dx=dk) # calc k-value and initial guess for y-values of spline params nspline = max(4, min(60, 2*int(rbkg*(kmax-kmin)/np.pi) + 1)) spl_y = np.zeros(nspline) spl_k = np.zeros(nspline) for i in range(nspline): q = kmin + i*(kmax-kmin)/(nspline - 1) ik = _index_nearest(kraw, q) i1 = min(len(kraw)-1, ik + 5) i2 = max(0, ik - 5) spl_k[i] = kraw[ik] spl_y[i] = (2*mu[ik] + mu[i1] + mu[i2] ) / 4.0 # get spline represention: knots, coefs, order=3 # coefs will be varied in fit. knots, coefs, order = splrep(spl_k, spl_y) # set fit parameters from initial coefficients fparams = Parameters() for i, v in enumerate(coefs): fparams.add("c%i" % i, value=v, vary=i<len(spl_y)) fitkws = dict(knots=knots, order=order, kraw=kraw, mu=mu[ie0:], irbkg=irbkg, kout=kout, ftwin=ftwin, nfft=nfft) # do fit fit = Minimizer(__resid, fparams, fcn_kws=fitkws) fit.leastsq() # write final results coefs = [p.value for p in fparams.values()] bkg, chi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout) obkg = np.zeros(len(mu)) obkg[:ie0] = mu[:ie0] obkg[ie0:] = bkg if _larch.symtable.isgroup(group): setattr(group, 'bkg', obkg) setattr(group, 'chie', mu-obkg) setattr(group, 'k', kout) setattr(group, 'chi', chi)
def pca_fit(group, pca_model, ncomps=None, rescale=True, _larch=None): """ fit a spectrum from a group to a PCA training model from pca_train() Arguments --------- group group with data to fit pca_model PCA model as found from pca_train() ncomps number of components to included rescale whether to allow data to be renormalized (True) Returns ------- None, the group will have a subgroup name `pca_result` created with the following members: x x or energy value from model ydat input data interpolated onto `x` yfit linear least-squares fit using model components weights weights for PCA components chi_square goodness-of-fit measure pca_model the input PCA model """ # get first nerate arrays and interpolate components onto the unknown x array xdat, ydat = get_arrays(group, pca_model.arrayname) if xdat is None or ydat is None: raise ValueError("cannot get arrays for arrayname='%s'" % arrayname) ydat = interp(xdat, ydat, pca_model.x, kind='cubic') params = Parameters() params.add('scale', value=1.0, vary=True, min=0) if ncomps is None: ncomps=len(pca_model.components) comps = pca_model.components[:ncomps].transpose() if rescale: weights, chi2, rank, s = np.linalg.lstsq(comps, ydat-pca_model.mean) yfit = (weights * comps).sum(axis=1) + pca_model.mean result = minimize(_pca_scale_resid, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws = dict(ydat=ydat, comps=comps, pca_model=pca_model)) scale = result.params['scale'].value ydat *= scale weights, chi2, rank, s = np.linalg.lstsq(comps, ydat-pca_model.mean) yfit = (weights * comps).sum(axis=1) + pca_model.mean else: weights, chi2, rank, s = np.linalg.lstsq(comps, ydat-pca_model.mean) yfit = (weights * comps).sum(axis=1) + pca_model.mean scale = 1.0 group.pca_result = Group(x=pca_model.x, ydat=ydat, yfit=yfit, pca_model=pca_model, chi_square=chi2[0], data_scale=scale, weights=weights) return
def define_orientation_matrix(self): from lmfit import Parameters p = Parameters() for i in range(3): for j in range(3): p.add('U%d%d' % (i,j), self.Umat[i,j]) self.init_p = self.Umat return p
def parameters(ini_values): from lmfit import Parameters from Adjust_Kinetics import params p = Parameters() pi = params(ini_values) for i,tup in enumerate(pi): p.add_many(tup) return p
def simple_flux_from_greybody(lambdavector, Trf = None, b = None, Lrf = None, zin = None, ngal = None): ''' Return flux densities at any wavelength of interest (in the range 1-10000 micron), assuming a galaxy (at given redshift) graybody spectral energy distribution (SED), with a power law replacing the Wien part of the spectrum to account for the variability of dust temperatures within the galaxy. The two different functional forms are stitched together by imposing that the two functions and their first derivatives coincide. The code contains the nitty-gritty details explicitly. Cosmology assumed: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009) Inputs: alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003] betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985] Trf = rest-frame temperature [in K; default = 20K] Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10] zin = galaxy redshift [default = 0.001] lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)]; AUTHOR: Lorenzo Moncelsi [[email protected]] HISTORY: 20June2012: created in IDL November2015: converted to Python ''' nwv = len(lambdavector) nuvector = c * 1.e6 / lambdavector # Hz nsed = 1e4 lambda_mod = loggen(1e3, 8.0, nsed) # microns nu_mod = c * 1.e6/lambda_mod # Hz #Lorenzo's version had: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009) cosmo = FlatLambdaCDM(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273) conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz) Lir = Lrf / conversion # Jy x Hz Ain = np.zeros(ngal) + 1.0e-36 #good starting parameter betain = np.zeros(ngal) + b alphain= np.zeros(ngal) + 2.0 fit_params = Parameters() fit_params.add('Ain', value= Ain) #fit_params.add('Tin', value= Trf/(1.+zin), vary = False) #fit_params.add('betain', value= b, vary = False) #fit_params.add('alphain', value= alphain, vary = False) #pdb.set_trace() #THE LM FIT IS HERE #Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal)) Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal,Trf/(1.+zin),b,alphain)) #pdb.set_trace() flux_mJy=sed(Pfin.params,nuvector,ngal,Trf/(1.+zin),b,alphain) return flux_mJy
def test_add_many_params(self): # test that we can add many parameters, but only parameters are added. a = Parameter('a', 1) b = Parameter('b', 2) p = Parameters() p.add_many(a, b) assert_(list(p.keys()) == ['a', 'b'])
def buildLmfitParameters(self, parameters): lp = Parameters() for p in parameters: lp.add(p.name, value=p.init, min=p.min, max=p.max) for k in p.kws: setattr(lp[p.name], k, p.kws[k]) return lp
def FPolyp(x, data, Np): #generate parameters for FD2 params = FPolypGuess(x, data, Np) p = Parameters() p.add('Np', value=Np, vary=False) for ii in range(Np + 1): p.add('c%s' % ii, value = params[ii+1], vary=True) return p
def test_eval(self): # check that eval() works with usersyms and parameter values def myfun(x): return 2.0 * x p = Parameters(usersyms={"myfun": myfun}) p.add("a", value=4.0) p.add("b", value=3.0) assert_almost_equal(p.eval("myfun(2.0) * a"), 16) assert_almost_equal(p.eval("b / myfun(3.0)"), 0.5)
def fitcorrplot(quadScanPath, fitNoiseQ=False): # path to CSV with 2-quad scan data #quadScanPath = 'CorrelationPlot-QUAD_IN20_511_BCTRL-2016-10-20-053315.mat'; nquads = 2 # maybe one day we'll try 3 quad scans # import data to be fitted [data, legend] = corrplot(quadScanPath) # read in corrplot data # meshgrid for the fit #x = y = np.linspace(-3, 3, ngrid) x = np.array([a for a in data[0]]) y = np.array([a for a in data[1]]) z = np.array([a for a in data[2]]) dz = np.array([a for a in data[3]]) # grab peak value & locate peak zpeak = np.max(z) elpeak = (z == zpeak).nonzero() xpeak = np.mean(x[elpeak]) ypeak = np.mean(y[elpeak]) dzpeak = np.mean(dz[elpeak]) # grab half-max region & find width of peak elhm = (z >= 0.5 * zpeak).nonzero() xfwhm = np.max(x[elhm]) - np.min(x[elhm]) yfwhm = np.max(y[elhm]) - np.min(y[elhm]) # in case we want to fit the map of fluctuations if (fitNoiseQ): z = dz dzpeak = 1. # define objective function: returns the array to be minimized def fcn2min(params, x, y, z, dz): """ model decaying sine wave, subtract data""" amp = params['amp'] xm = params['xm'] sx = params['sx'] ym = params['ym'] sy = params['sy'] rho = params['rho'] if fitNoiseQ: bg = params['bg'] model = bg + amp * np.exp(-0.5 * ((x - xm) * (x - xm) / sx / sx + (y - ym) * (y - ym) / sy / sy - 2. * rho * (x - xm) * (y - ym) / sx / sy) / (1. - rho * rho)) else: model = amp * np.exp(-0.5 * ((x - xm) * (x - xm) / sx / sx + (y - ym) * (y - ym) / sy / sy - 2. * rho * (x - xm) * (y - ym) / sx / sy) / (1. - rho * rho)) resid = ( model - z ) / dzpeak # replace dzpeak with dz to include some uncertainty in the fit #print(np.sum(resid**2)) return resid.flatten() # create a set of Parameters params = Parameters() params.add('amp', value=zpeak, min=0., max=zpeak + 3. * dzpeak) params.add('xm', value=xpeak, min=xpeak - xfwhm, max=xpeak + xfwhm) params.add('sx', value=xfwhm / 2.35, min=0.5 * xfwhm / 2.35, max=2. * xfwhm) params.add('ym', value=ypeak, min=ypeak - yfwhm, max=ypeak + yfwhm) params.add('sy', value=yfwhm / 2.35, min=0.5 * yfwhm / 2.35, max=2. * yfwhm) params.add('rho', value=0., min=-1., max=1.) if fitNoiseQ: params.add('bg', value=0., min=-0.1, max=0.1) # do fit, here with leastsq model minner = Minimizer(fcn2min, params, fcn_args=(x, y, z, dz)) kws = {'options': {'maxiter': 100}} result = minner.minimize() # calculate final result #final = data + result.residual # write error report report_fit(result) #print(result.params.items()) #print([par for pname,par in result.params.items()]) if fitNoiseQ: parnames = ['amp', 'xm', 'sx', 'ym', 'sy', 'rho', 'bg'] else: parnames = ['amp', 'xm', 'sx', 'ym', 'sy', 'rho'] parvals = [result.params.valuesdict()[p] for p in parnames] #print(parnames) #print(parvals) return parvals
I = current[0] sol = [ t*k*I/Cr + k*I*Jr/Cr**2*np.exp(-t*Cr/Jr) - k*I*Jr/Cr**2 for t in time] print(k*I/Cr) return sol def energy(params, data): E = 0. for run in data: y = predict(params, run[0], run[1]) for i in range(len(run[2])): E = E + ((y[i]-run[2][i])/run[2][-1]) **2 print(params.valuesdict(),"\n", E) return E params = Parameters() #params.add('k', value=0.0369, min=.0001, max=.5) params.add('Jr', value=0.001184, min=.00001, max=.5) params.add('Cr', value=0.00001, min=.0, max=.5) runs = [] runs = runs + ['vise-constant-0.25a.csv'] #runs = runs + ['vise-constant-0.375a.csv'] runs = runs + ['vise-constant-0.5a.csv'] #runs = runs + ['vise-constant-0.625a.csv'] runs = runs + ['vise-constant-0.75a.csv'] #runs = runs + ['vise-constant-0.875a.csv'] runs = runs + ['vise-constant-1a.csv'] #runs = runs + ['vise-constant--1a.csv'] print(runs)
from lmfit import Parameters, minimize, report_fit from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign try: import pylab HASPYLAB = True except ImportError: HASPYLAB = False p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) def residual(pars, x, data=None): argu = (x * pars['decay'])**2 shift = pars['shift'] if abs(shift) > pi / 2: shift = shift - sign(shift) * pi model = pars['amp'] * sin(shift + x / pars['period']) * exp(-argu) if data is None: return model return (model - data) n = 2500 xmin = 0. xmax = 250.0
def get_params(Ug, Ue, Dg, De, vary_groundstate): # make parameters params = Parameters() if vary_groundstate: print("Don't vary groundstate") asd # ground state for k in range(len(Ug)): for l in range(len(Ug[k])): val = Ug[k][l] params.add('Ug' + str(k) + str(l), value=val, vary=True) # adding Born-Oppenheimer correction params.add('Dg' + str(k) + str(l), value=0.0, vary=False) else: # use Bernath values and don't vary the values Ug, Dg = get_Ug_Bernath() for k in range(len(Ug)): for l in range(len(Ug[k])): val = Ug[k][l] params.add('Ug' + str(k) + str(l), value=val, vary=False) # adding Born-Oppenheimer correction val = Dg[k][l] params.add('Dg' + str(k) + str(l), value=val, vary=False) # excited state for k in range(len(Ue)): for l in range(len(Ue[k])): val = Ue[k][l] params.add('Ue' + str(k) + str(l), value=val, vary=True) # Born-Oppenheimer corrections for excited state # Should throw error if Ue and De don't have the same dimensions val = De[k][l] # adding Born-Oppenheimer correction if not val == 0.0: params.add('De' + str(k) + str(l), value=val, vary=True) else: params.add('De' + str(k) + str(l), value=0.0, vary=False) return params
def create_model(self): params = Parameters() params.add('ocv', value=self.voltage[-1], min=0, max=10) taus = [math.pow(10, i) for i in range(self.circuits)] weights = np.zeros(self.circuits) params.add('t0', value=taus[0], min=0.01) params.add('w0', value=weights[0]) for i in range(1, self.circuits): params.add('delta' + str(i), value=taus[i] - taus[i - 1], min=0.0) params.add('t' + str(i), expr='delta' + str(i) + '+t' + str(i - 1)) params.add('w' + str(i), value=weights[i]) for i in range(self.circuits, 5): params.add('t' + str(i), value=1, vary=False) params.add('w' + str(i), value=0, vary=False) self.params = params self.model = Model(self._model)
def lcf(collection: Collection, fit_region: str = 'xanes', fit_range: list = [-inf, inf], scantag: str = 'scan', reftag: str = 'ref', kweight: int = 2, sum_one: bool = True, method: str = 'leastsq') -> Dataset: """Performs linear combination fitting on a XAFS spectrum. Parameters ---------- collection Collection containing the group for LCF analysis and the groups with the reference scans. fit_region XAFS region to perform the LCF. Accepted values are 'dxanes', 'xanes', or 'exafs'. The default is 'xanes'. fit_range Domain range in absolute values. Energy units are expected for 'dxanes' or 'xanes', while wavenumber (k) units are expected for 'exafs'. The default is [-:data:`~numpy.inf`, :data:`~numpy.inf`]. scantag Key to filter the scan group in the collection based on the ``tags`` attribute. The default is 'scan'. reftag Key to filter the reference groups in the collection based on the ``tags`` attribute. The default is 'scan'. kweight Exponent for weighting chi(k) by k^kweight. Only valid for ``fit_region='exafs'``. The default is 2. sum_one Conditional to force sum of fractions to be one. The default is True. method Fitting method. Currently only local optimization methods are supported. See the :func:`~lmfit.minimizer.minimize` function of ``lmfit`` for a list of valid methods. The default is ``leastsq``. Returns ------- : Fit group with the following arguments: - ``energy`` : array with energy values. Returned only if ``fit_region='xanes'`` or ``fit_region='dxanes'``. - ``k`` : array with wavenumber values. Returned only if ``fit_region='exafs'``. - ``scangroup``: name of the group containing the fitted spectrum. - ``refgroups``: list with names of groups containing reference spectra. - ``scan`` : array with values of the fitted spectrum. - ``ref`` : array with interpolated values for each reference spectrum. - ``fit`` : array with fit result. - ``min_pars`` : object with the optimized parameters and goodness-of-fit statistics. - ``lcf_pars`` : dictionary with lcf parameters. Raises ------ TypeError If ``collection`` is not a valid Collection instance. AttributeError If ``collection`` has no ``tags`` attribute. AttributeError If groups have no ``energy`` or ``norm`` attribute. Only verified if ``fit_region='dxanes'`` or ``fit_region='xanes'``. AttributeError If groups have no ``k`` or ``chi`` attribute. Only verified if and ``fit_region='exafs'``. KeyError If ``scantag`` or ``refttag`` are not keys of the ``tags`` attribute. ValueError If ``fit_region`` is not recognized. ValueError If ``fit_range`` is outside the doamin of a reference group. Important --------- If more than one group in ``collection`` is tagged with ``scantag``, a warning will be raised and only the first group will be fitted. Notes ----- The ``min_pars`` object is returned by the :func:`minimize` function of ``lmfit``, and contains the following attributes (non-exhaustive list): - ``params`` : dictionary with the optimized parameters. - ``var_names`` : ordered list of parameter names used in optimization. - ``covar`` : covariance matrix from minimization. - ``init_vals`` : list of initial values for variable parameters using ``var_names``. - ``success`` : True if the fit succeeded, otherwise False. - ``nvarys`` : number of variables. - ``ndata`` : number of data points. - ``chisqr`` : chi-square. - ``redchi`` : reduced chi-square. - ``residual`` : array with fit residuals. Example ------- >>> from numpy.random import seed, normal >>> from numpy import arange, sin, pi >>> from araucaria import Group, Dataset, Collection >>> from araucaria.fit import lcf >>> from araucaria.utils import check_objattrs >>> seed(1234) # seed of random values >>> k = arange(0, 12, 0.05) >>> eps = normal(0, 0.1, len(k)) >>> f1 = 1.2 # freq 1 >>> f2 = 2.6 # freq 2 >>> amp1 = 0.4 # amp 1 >>> amp2 = 0.6 # amp 2 >>> group1 = Group(**{'name': 'group1', 'k': k, 'chi': sin(2*pi*f1*k)}) >>> group2 = Group(**{'name': 'group2', 'k': k, 'chi': sin(2*pi*f2*k)}) >>> group3 = Group(**{'name': 'group3', 'k': k, ... 'chi' : amp1 * group1.chi + amp2 * group2.chi + eps}) >>> collection = Collection() >>> tags = ['ref', 'ref', 'scan'] >>> for i, group in enumerate((group1,group2, group3)): ... collection.add_group(group, tag=tags[i]) >>> # performing lcf >>> out = lcf(collection, fit_region='exafs', fit_range=[3,10], ... kweight=0, sum_one=False) >>> check_objattrs(out, Dataset, ... attrlist=['k', 'scangroup', 'refgroups', ... 'scan', 'ref1', 'ref2', 'fit', 'min_pars', 'lcf_pars']) [True, True, True, True, True, True, True, True, True] >>> for key, val in out.min_pars.params.items(): ... print('%1.4f +/- %1.4f' % (val.value, val.stderr)) 0.4003 +/- 0.0120 0.5943 +/- 0.0120 """ # checking class and attributes check_objattrs(collection, Collection, attrlist=['tags'], exceptions=True) # verifying fit type fit_valid = ['dxanes', 'xanes', 'exafs'] if fit_region not in fit_valid: raise ValueError('fit_region %s not recognized.' % fit_region) # required groups # at least a spectrum and a single reference must be provided for tag in (scantag, reftag): if tag not in collection.tags: raise KeyError("'%s' is not a valid key for the collection." % tag) # scan and ref tags scangroup = collection.tags[scantag] if len(scangroup) > 1: warn( "More than one group is tagged as scan. Only the first group will be considered." ) scangroup = scangroup[0] refgroups = collection.tags[reftag] refgroups.sort() # the first element is the scan group groups = [scangroup] + refgroups # storing report parameters lcf_pars = { 'fit_region': fit_region, 'fit_range': fit_range, 'sum_one': sum_one } # report parameters for exafs lcf if fit_region == 'exafs': lcf_pars['kweight'] = kweight # storing name of x-variable (exafs) xvar = 'k' # report parameters for xanes/dxanes lcf else: # storing name of x-variable (xanes/dxanes) xvar = 'energy' # content dictionary content = {'scangroup': scangroup, 'refgroups': refgroups} # reading and processing spectra for i, name in enumerate(groups): dname = 'scan' if i == 0 else 'ref' + str(i) group = collection.get_group(name).copy() if fit_region == 'exafs': check_objattrs(group, Group, attrlist=['k', 'chi'], exceptions=True) else: # fit_region == 'xanes' or 'dxanes' check_objattrs(group, Group, attrlist=['energy', 'norm'], exceptions=True) if i == 0: # first value is the spectrum, so we extract the # interpolation values from xvar xvals = getattr(group, xvar) index = index_xrange(fit_range, xvals) xvals = xvals[index] # storing the y-variable if fit_region == 'exafs': yvals = xvals**kweight * group.chi[index] elif fit_region == 'xanes': yvals = group.norm[index] else: # derivative lcf yvals = gradient(group.norm[index]) / gradient( group.energy[index]) else: # spline interpolation of references if fit_region == 'exafs': s = interp1d(group.k, group.k**kweight * group.chi, kind='cubic') elif fit_region == 'xanes': s = interp1d(group.energy, group.norm, kind='cubic') else: s = interp1d(group.energy, gradient(group.norm) / gradient(group.energy), kind='cubic') # interpolating in the fit range try: yvals = s(xvals) except: raise ValueError( 'fit_range is outside the domain of group %s' % name) # saving yvals in the dictionary content[dname] = yvals # setting xvar as an attribute of datgroup content[xvar] = xvals # setting initial values and parameters for fit model initval = around(1 / (len(groups) - 1), decimals=1) params = Parameters() expr = str(1) for i in range(len(groups) - 1): parname = 'amp' + str(i + 1) if ((i == len(groups) - 2) and (sum_one == True)): params.add(parname, expr=expr) else: params.add(parname, value=initval, min=0, max=1, vary=True) expr += ' - amp' + str(i + 1) # perform fit min = minimize(residuals, params, method=method, args=(content, )) # storing fit data, parameters, and results content['fit'] = sum_references(min.params, content) content['lcf_pars'] = lcf_pars content['min_pars'] = min out = Dataset(**content) return out
def Lorentz_params(p0): pars = Parameters() f, A, f0, df, y0 = p0[0], p0[1], p0[2], p0[3], p0[4] pars.add('A', value=A) pars.add('f', value=f.tolist(), vary=False) pars.add('f0', value=f0) pars.add('df', value=df, min=0.) pars.add('y0', value=y0) return pars
def S21p0i2params(f, p0i): pars = Parameters() (A, f0, Q, Qe, dx, theta) = p0i pars.add('A', value=1.e-3) pars.add('f', value=f, vary=False) pars.add('f0', value=6.28, min=0.) pars.add('Q', value=Q, min=0.) pars.add('Qe', value=Qe, min=0.) pars.add('Qi', expr='1./(1./Q-1./Qe*cos(theta))', vary=False) pars.add('Qc', expr='Qe/cos(theta)', vary=False) pars.add('df', value=dx) pars.add('theta', value=theta, min=-np.pi / 2, max=np.pi / 2) return pars
def _Parameters(*arg, _larch=None, **kws): return Parameters(*arg, **kws)
def test_bounded_parameters(): # create data to be fitted np.random.seed(1) x = np.linspace(0, 15, 301) data = (5. * np.sin(2 * x - 0.1) * np.exp(-x * x * 0.025) + np.random.normal(size=len(x), scale=0.2)) # define objective function: returns the array to be minimized def fcn2min(params, x, data): """ model decaying sine wave, subtract data""" amp = params['amp'] shift = params['shift'] omega = params['omega'] decay = params['decay'] model = amp * np.sin(x * omega + shift) * np.exp(-x * x * decay) return model - data # create a set of Parameters params = Parameters() params.add('amp', value=10, min=0, max=50) params.add('decay', value=0.1, min=0, max=10) params.add('shift', value=0.0, min=-pi / 2., max=pi / 2.) params.add('omega', value=3.0, min=0, max=np.inf) # do fit, here with leastsq model result = minimize(fcn2min, params, args=(x, data)) # assert that the real parameters are found for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]): check(para, val) # assert that the covariance matrix is correct [cf. lmfit v0.9.10] cov_x = np.array( [[1.42428250e-03, 9.45395985e-06, -4.33997922e-05, 1.07362106e-05], [9.45395985e-06, 1.84110424e-07, -2.90588963e-07, 7.19107184e-08], [-4.33997922e-05, -2.90588963e-07, 9.53427031e-05, -2.37750362e-05], [1.07362106e-05, 7.19107184e-08, -2.37750362e-05, 9.60952336e-06]]) assert_allclose(result.covar, cov_x, rtol=1e-6) # assert that stderr and correlations are correct [cf. lmfit v0.9.10] assert_almost_equal(result.params['amp'].stderr, 0.03773967, decimal=6) assert_almost_equal(result.params['decay'].stderr, 4.2908e-04, decimal=6) assert_almost_equal(result.params['shift'].stderr, 0.00976436, decimal=6) assert_almost_equal(result.params['omega'].stderr, 0.00309992, decimal=6) assert_almost_equal(result.params['amp'].correl['decay'], 0.5838166760743324, decimal=6) assert_almost_equal(result.params['amp'].correl['shift'], -0.11777303073961824, decimal=6) assert_almost_equal(result.params['amp'].correl['omega'], 0.09177027400788784, decimal=6) assert_almost_equal(result.params['decay'].correl['shift'], -0.0693579417651835, decimal=6) assert_almost_equal(result.params['decay'].correl['omega'], 0.05406342001021014, decimal=6) assert_almost_equal(result.params['shift'].correl['omega'], -0.7854644476455469, decimal=6)
import shelve import numpy as np import matplotlib.pyplot as plt # custom imports from lmfit import Parameters # not a standard library -- install using PIP import pygisaxs.io.pilatus as pil # from my (very small) library import pygisaxs.utilities as util # from my (very small) library # geometry of interest in Pilatus Files pilatus_rectangle = [361, 7] valid_pixels = range(0, 166) + range(195, 361) # fitting function and parameter guesses fnc2min = util.ode1sol localguess = Parameters() localguess.add('R', value=-5e-4) localguess.add('beta', value=1e-8, min=0.0) localguess.add('I0', value=1e-4, min=0.0) # global parameters for hierarchical fitting globalguess = None globalguess = Parameters( ) # COMMENT OUT THESE TWO LINES TO FIT ALL PARAMETERS LOCALLY globalguess.add( 'beta', value=1e-8, min=0.0) # (useful to identify potential global parameters and guesses) # in principle this can be run over multiple directories in sequence (just to save typing and waiting) directories = sys.argv[1:] for directory in directories:
field_residuum_telluric[idx_correction_skip] = 0 # resample and shift residuals from observed to restframe field_residuum_telluric = spectra_resample( field_residuum_telluric, wvl_read_finer / (1 + velocity_shift * 1000. / C_LIGHT), wvl_read) field_residuum_telluric_binary = spectra_resample( np.logical_not(idx_correction_skip), wvl_read_finer / (1 + velocity_shift * 1000. / C_LIGHT), wvl_read) > 0.5 # set the residuum outside observed wavelengths to 0 object_spectra_corrected_s1 = object_spectra - field_residuum_telluric template_res_before_s1 = template_spectra - object_spectra template_res_after_s1 = template_spectra - object_spectra_corrected_s1 # the best residuum scaling factor based on reference spectra fit_param = Parameters() fit_param.add('scale', value=1, min=0., max=10., brute_step=0.1) fit_res = minimize( minimize_scale, fit_param, # method='brute', args=(template_res_before_s1, field_residuum_telluric), **{'nan_policy': 'omit'}) fit_res.params.pretty_print() residuum_scale = fit_res.params['scale'].value object_spectra_corrected_s1_scaled = object_spectra - residuum_scale * field_residuum_telluric template_res_after_s1_scaled = template_spectra - object_spectra_corrected_s1_scaled print np.nansum(template_res_after_s1**2), np.nansum( template_res_after_s1_scaled**2) fig, axes = plt.subplots(2, 1)
def fitz_galaxy_emcee(spec, zguess, fluxpoly=True, steps=5000, burn=2000, progress=True, printReport=True, saveCorner='', zMin=None, zMax=None, saveSpecPlot=''): flux_median = np.median(spec['flux']) parameters = Parameters() # (Name, Value, Vary, Min, Max, Expr) parameters.add_many(('z', zguess, True, zMin, zMax, None), ('eigen1', flux_median*0.4, True, None, None, None), ('eigen2', flux_median*0.4, True, None, None, None), ('eigen3', flux_median*0.1, True, None, None, None), ('eigen4', flux_median*0.1, True, None, None, None), ('fluxcal0', 0.0, fluxpoly, None, None, None), ('fluxcal1', 0.0, fluxpoly, None, None, None), ('fluxcal2', 0.0, fluxpoly, None, None, None)) galaxy_model = Model(eigensum_galaxy, missing='drop') result = galaxy_model.fit(spec['flux'], wave=spec['wave'], weights=1/spec['error'], params=parameters, missing='drop') emcee_kws = dict(steps=steps, burn=burn, is_weighted=True, progress=progress) emcee_params = result.params.copy() result_emcee = galaxy_model.fit(spec['flux'], wave=spec['wave'], weights=1/spec['error'], params=emcee_params, method='emcee', nan_policy='omit', missing='drop', fit_kws=emcee_kws, show_titles=True) result_emcee.conf_interval # find the maximum likelihood solution highest_prob = np.argmax(result_emcee.lnprob) hp_loc = np.unravel_index(highest_prob, result_emcee.lnprob.shape) mle_soln = result_emcee.chain[hp_loc] #result_emcee.conf_interval() if printReport: print('Initial results') print(result.fit_report()) print('MCMC results') print(result_emcee.fit_report()) #print(result_emcee.ci_report()) z_marginalized = np.percentile(result_emcee.flatchain['z'], [50])[0] zErrUp = np.percentile(result_emcee.flatchain['z'], [84.1])[0] - np.percentile(result_emcee.flatchain['z'], [50])[0] zErrDown = np.percentile(result_emcee.flatchain['z'], [50])[0] - np.percentile(result_emcee.flatchain['z'], [15.9])[0] std_z = np.std(result_emcee.flatchain['z']) print('z = {:0.5f} +{:0.5f} -{:0.5f} sigma(v) & +/- = {:0.1f} & +{:0.1f} -{:0.1f} km/s'.format(z_marginalized, zErrUp, zErrDown, std_z/(1 + z_marginalized)*c_kms, zErrUp/(1 + z_marginalized)*c_kms, zErrDown/(1 + z_marginalized)*c_kms)) interval68 = np.percentile(result_emcee.flatchain['z'], [15.9, 84.1]) interval95 = np.percentile(result_emcee.flatchain['z'], [2.28, 97.7]) print('68% C.I:') print(interval68) print('95% C.I:') print(interval95) if saveCorner != '': emcee_corner = corner.corner(result_emcee.flatchain, labels=['z', 'eigen1', 'eigen2', 'eigen3', 'eigen4', 'fluxcal0', 'fluxcal1', 'fluxcal2'], truths=mle_soln) emcee_corner.savefig(saveCorner) plt.close() if saveSpecPlot != '': fig, ax = plt.subplots(2, figsize=(7, 10)) ax[0].plot(spec['wave'], spec['flux'], color='black', drawstyle='steps-mid') ax[0].plot(spec['wave'], spec['error'], color='blue', drawstyle='steps-mid') ax[0].plot(spec['wave'], result_emcee.best_fit, color='red') ax[0].minorticks_on() ax[0].set_xlabel(r'$\rm observed\ wavelength\ [\AA]$') ax[0].set_ylabel(r'$f_\lambda\ [{\rm arbitrary}]$') tempString = saveSpecPlot.replace('.pdf', '').replace('_', '\ ').replace('spec', '') titleString = r'$\rm {}'.format(tempString) + r'$z={:0.5f} +{:0.5f} -{:0.5f}$'.format(z_marginalized, zErrUp, zErrDown) titleString = titleString.split('/')[1] ax[0].set_title(titleString) ax[0].minorticks_on() ax[1].hist(result_emcee.flatchain['z'], color='black', histtype='step', density=True) ax[1].minorticks_on() ax[1].set_xlabel(r'$\rm marginalized\ redshift\ posterior$') ax[1].set_ylabel(r'$P$') fig.tight_layout() plt.savefig(saveSpecPlot) return result_emcee
def getParams(N, p, beta, gamma, sigma, tc, eps): fit_params = Parameters() fit_params.add('N', value=N, vary=False) fit_params.add('p', value=p, min=1, max=1e6) fit_params.add('beta', value=beta, min=0, max=10) #0.2 fit_params.add('gamma', value=gamma, min=0, max=1.0) #0.02 fit_params.add('sigma', value=sigma, min=0, max=1.0) #0.01 fit_params.add('tc', value=tc, vary=False) #20 fit_params.add('eps', value=eps, vary=False) #8 return fit_params
def chemical_potential(n_e: u.m**-3, T: u.K): r""" Calculate the ideal chemical potential. Parameters ---------- n_e: ~astropy.units.Quantity Electron number density. T : ~astropy.units.Quantity The temperature. Returns ------- beta_mu: ~astropy.units.Quantity The dimensionless ideal chemical potential. That is the ratio of the ideal chemical potential to the thermal energy. Raises ------ TypeError If argument is not a `~astropy.units.Quantity`. ~astropy.units.UnitConversionError If argument is in incorrect units. ValueError If argument contains invalid values. Warns ----- ~astropy.units.UnitsWarning If units are not provided, SI units are assumed. Notes ----- The ideal chemical potential is given by [1]_: .. math:: \chi_a = I_{1/2}(\beta \mu_a^{ideal}) where :math:`\chi` is the degeneracy parameter, :math:`I_{1/2}` is the Fermi integral with order 1/2, :math:`\beta` is the inverse thermal energy :math:`\beta = 1/(k_B T)`, and :math:`\mu_a^{ideal}` is the ideal chemical potential. The definition for the ideal chemical potential is implicit, so it must be obtained numerically by solving for the Fermi integral for values of chemical potential approaching the degeneracy parameter. Since values returned from the Fermi_integral are complex, a nonlinear Levenberg-Marquardt least squares method is used to iteratively approach a value of :math:`\mu` which minimizes :math:`I_{1/2}(\beta \mu_a^{ideal}) - \chi_a` This function returns :math:`\beta \mu^{ideal}` the dimensionless ideal chemical potential. Warning: at present this function is limited to relatively small arguments due to limitations in the `~mpmath` package's implementation of `~mpmath.polylog`, which PlasmaPy uses in calculating the Fermi integral. References ---------- .. [1] Bonitz, Michael. Quantum kinetic theory. Stuttgart: Teubner, 1998. Example ------- >>> from astropy import units as u >>> chemical_potential(n_e=1e21*u.cm**-3,T=11000*u.K) <Quantity 2.00039985e-12> """ from lmfit import minimize, Parameters # deBroglie wavelength lambdaDB = thermal_deBroglie_wavelength(T) # degeneracy parameter degen = (n_e * lambdaDB**3).to(u.dimensionless_unscaled) def residual(params, data, eps_data): """Residual function for fitting parameters to Fermi_integral.""" alpha = params['alpha'].value # note that alpha = mu / (k_B * T) model = mathematics.Fermi_integral(alpha, 0.5) complexResidue = (data - model) / eps_data return complexResidue.view(np.float) # setting parameters for fitting along with bounds alphaGuess = 1 * u.dimensionless_unscaled params = Parameters() params.add('alpha', value=alphaGuess, min=0.0) # calling minimize function from lmfit to fit by minimizing the residual data = np.array([degen]) # result of Fermi_integral - degen should be zero eps_data = np.array([1e-15]) # numerical error minFit = minimize(residual, params, args=(data, eps_data)) beta_mu = minFit.params['alpha'].value * u.dimensionless_unscaled return beta_mu
n = 601 xmin = 0. xmax = 20.0 random.seed(0) x = linspace(xmin, xmax, n) data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) + random.normal(scale=0.23, size=n) + x*0.5) if HASPYLAB: pylab.plot(x, data, 'r+') pfit = Parameters() pfit.add(name='amp_g', value=10) pfit.add(name='cen_g', value=9) pfit.add(name='wid_g', value=1) pfit.add(name='amp_tot', value=20) pfit.add(name='amp_l', expr='amp_tot - amp_g') pfit.add(name='cen_l', expr='1.5+cen_g') pfit.add(name='wid_l', expr='2*wid_g') pfit.add(name='line_slope', value=0.0) pfit.add(name='line_off', value=0.0) sigma = 0.021 # estimate of data error (for all data points) myfit = Minimizer(residual, pfit, fcn_args=(x,), fcn_kws={'sigma': sigma, 'data': data}, scale_covar=True)
class FittingProfile(object): def __init__(self, wave, flux, restWave, lineName, zone, rp, fluxError=None, xAxis='vel', initVals='vel'): """The input vel and flux must be limited to a single emission line profile""" self.flux = flux self.fluxError = fluxError self.restWave = restWave self.lineName = lineName self.zone = zone self.weights = self._weights() self.rp = rp self.xAxis = xAxis self.initVals = initVals if xAxis == 'vel': if fluxError is None: vel, self.flux = wave_to_vel(restWave, wave, flux) else: vel, self.flux, self.fluxError = wave_to_vel(restWave, wave, flux, fluxError) self.x = vel else: self.x = wave self.linGaussParams = Parameters() def _weights(self): if self.fluxError is None: return None else: fluxErrorCR = self.fluxError# - self.continuum return 1./fluxErrorCR def _get_amplitude(self, numOfComponents, modelFit): amplitudeTotal = 0. for i in range(numOfComponents): amplitudeTotal = amplitudeTotal + modelFit.best_values['g%d_amplitude' % (i+1)] print("Amplitude Total is %f" % amplitudeTotal) return amplitudeTotal def _gaussian_component(self, pars, prefix, c, s, a, limits): """Fits a gaussian with given parameters. pars is the lmfit Parameters for the fit, prefix is the label of the gaussian, c is the center, s is sigma, a is amplitude. Returns the Gaussian model""" varyCentre = True varySigma = True varyAmp = True if limits['c'] is False: varyCentre = False cMin, cMax = -np.inf, np.inf elif type(limits['c']) is tuple: cMin = limits['c'][0] cMax = limits['c'][1] else: cMin = c - c*limits['c'] cMax = c + c*limits['c'] if limits['s'] is False: varySigma = False sMin, sMax = -np.inf, np.inf elif type(limits['s']) is tuple: sMin = limits['s'][0] sMax = limits['s'][1] else: sMin = s - s * limits['s'] sMax = s + s * limits['s'] if limits['a'] is False: varyAmp = False aMin, aMax = -np.inf, np.inf elif type(limits['a']) is tuple: aMin = limits['a'][0] aMax = limits['a'][1] else: aMin = a - a * limits['a'] aMax = a + a * limits['a'] g = GaussianModel(prefix=prefix) pars.update(g.make_params()) if isinstance(c, str): pars[prefix + 'center'].set(expr=c, min=cMin, max=cMax, vary=varyCentre) else: pars[prefix + 'center'].set(c, min=cMin, max=cMax, vary=varyCentre) if isinstance(s, str): pars[prefix + 'sigma'].set(expr=s, min=sMin, max=sMax, vary=varySigma) else: pars[prefix + 'sigma'].set(s, min=sMin, max=sMax, vary=varySigma) if isinstance(a, str): pars[prefix + 'amplitude'].set(expr=a, min=aMin, max=aMax, vary=varyAmp) else: pars[prefix + 'amplitude'].set(a, min=aMin, max=aMax, vary=varyAmp) return g def multiple_close_emission_lines(self, lineNames, cListInit, sListInit, lS, lI): """All lists should be the same length""" gList = [] # Assume initial parameters are in velocity lin = LinearModel(prefix='lin_') self.linGaussParams = lin.guess(self.flux, x=self.x) self.linGaussParams.update(lin.make_params()) self.linGaussParams['lin_slope'].set(lS, vary=True) self.linGaussParams['lin_intercept'].set(lI, vary=True) for j, lineName in enumerate(lineNames): numComps = self.rp.emProfiles[lineName]['numComps'] restWave = self.rp.emProfiles[lineName]['restWavelength'] copyFrom = self.rp.emProfiles[lineName]['copyFrom'] if copyFrom is not None: copyFromRestWave = self.rp.emProfiles[copyFrom]['restWavelength'] cList = ['g{0}{1}_center*{2}'.format(copyFrom.replace('-', ''), (i + 1), (restWave / copyFromRestWave)) for i in range(numComps)] sList = ['g{0}{1}_sigma'.format(copyFrom.replace('-', ''), i + 1) for i in range(numComps)] if type(self.rp.emProfiles[lineName]['ampList']) is list: aList = self.rp.emProfiles[lineName]['ampList'] if self.xAxis == 'vel': aList = vel_to_wave(restWave, vel=0, flux=np.array(aList))[1] else: ampRatio = self.rp.emProfiles[lineName]['ampList'] aList = ['g{0}{1}_amplitude*{2}'.format(copyFrom.replace('-', ''), i + 1, ampRatio) for i in range(numComps)] else: cList = vel_to_wave(restWave, vel=np.array(cListInit), flux=0)[0] sList = vel_to_wave(restWave, vel=np.array(sListInit), flux=0, delta=True)[0] aListInit = self.rp.emProfiles[lineName]['ampList'] aList = vel_to_wave(restWave, vel=0, flux=np.array(aListInit))[1] limits = self.rp.emProfiles[lineName]['compLimits'] for i in range(numComps): if type(limits['c']) is list: cLimit = limits['c'][i] else: cLimit = limits['c'] if type(limits['s']) is list: sLimit = limits['s'][i] else: sLimit = limits['s'] if type(limits['a']) is list: aLimit = limits['a'][i] else: aLimit = limits['a'] lims = {'c': cLimit, 's': sLimit, 'a': aLimit} if len(lineNames) == 1: prefix = 'g{0}_'.format(i + 1) else: prefix = 'g{0}{1}_'.format(lineName.replace('-', ''), i + 1) gList.append(self._gaussian_component(self.linGaussParams, prefix, cList[i], sList[i], aList[i], lims)) gList = np.array(gList) mod = lin + gList.sum() init = mod.eval(self.linGaussParams, x=self.x) out = mod.fit(self.flux, self.linGaussParams, x=self.x, weights=self.weights) f = open(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, "{0}_Log.txt".format(self.rp.regionName)), "a") print("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName)) print(out.fit_report()) f.write("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName)) f.write(out.fit_report()) f.close() components = out.eval_components() if not hasattr(self.rp, 'plotResiduals'): self.rp.plotResiduals = True numComps = self.rp.emProfiles[lineName]['numComps'] self.plot_emission_line(numComps, components, out, self.rp.plotResiduals, lineNames, init=init, scaleFlux=self.rp.scaleFlux) return out, components def lin_and_multi_gaussian(self, numOfComponents, cList, sList, aList, lS, lI, limits): """All lists should be the same length""" gList = [] if self.xAxis == 'wave' and self.initVals == 'vel': cList = vel_to_wave(self.restWave, vel=np.array(cList), flux=0)[0] sList = vel_to_wave(self.restWave, vel=np.array(sList), flux=0, delta=True)[0] aList = vel_to_wave(self.restWave, vel=0, flux=np.array(aList))[1] elif self.xAxis == 'vel' and self.initVals == 'wave': cList = wave_to_vel(self.restWave, wave=np.array(cList), flux=0)[0] sList = wave_to_vel(self.restWave, wave=np.array(sList), flux=0, delta=True)[0] aList = wave_to_vel(self.restWave, wave=0, flux=np.array(aList))[1] lin = LinearModel(prefix='lin_') self.linGaussParams = lin.guess(self.flux, x=self.x) self.linGaussParams.update(lin.make_params()) self.linGaussParams['lin_slope'].set(lS, vary=True) self.linGaussParams['lin_intercept'].set(lI, vary=True) for i in range(numOfComponents): if type(limits['c']) is list: cLimit = limits['c'][i] else: cLimit = limits['c'] if type(limits['s']) is list: sLimit = limits['s'][i] else: sLimit = limits['s'] if type(limits['a']) is list: aLimit = limits['a'][i] else: aLimit = limits['a'] lims = {'c': cLimit, 's': sLimit, 'a': aLimit} prefix = 'g{0}_'.format(i+1) gList.append(self._gaussian_component(self.linGaussParams, prefix, cList[i], sList[i], aList[i], lims)) gList = np.array(gList) mod = lin + gList.sum() init = mod.eval(self.linGaussParams, x=self.x) out = mod.fit(self.flux, self.linGaussParams, x=self.x, weights=self.weights) f = open(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, "{0}_Log.txt".format(self.rp.regionName)), "a") print("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName)) print(out.fit_report()) f.write("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName)) f.write(out.fit_report()) f.close() components = out.eval_components() if not hasattr(self.rp, 'plotResiduals'): self.rp.plotResiduals = True self.plot_emission_line(numOfComponents, components, out, self.rp.plotResiduals, init=init, scaleFlux=self.rp.scaleFlux) self._get_amplitude(numOfComponents, out) return out, components def plot_emission_line(self, numOfComponents, components, out, plotResiduals=True, lineNames=None, init=None, scaleFlux=1e14): ion, lambdaZero = line_label(self.lineName, self.restWave) fig = plt.figure("%s %s %s" % (self.rp.regionName, ion, lambdaZero)) if plotResiduals is True: frame1 = fig.add_axes((.1, .3, .8, .6)) plt.title("%s %s" % (ion, lambdaZero)) if self.xAxis == 'wave': x = self.x xLabel = constants.WAVE_AXIS_LABEL yLabel = constants.FluxUnitsLabels(scaleFlux).FLUX_WAVE_AXIS_LABEL elif self.xAxis == 'vel': if hasattr(self.rp, 'showSystemicVelocity') and self.rp.showSystemicVelocity is True: x = self.x - self.rp.systemicVelocity xLabel = constants.DELTA_VEL_AXIS_LABEL else: x = self.x xLabel = constants.VEL_AXIS_LABEL if hasattr(self.rp, 'rp.plottingXRange') and self.rp.plottingXRange is not None: plt.xlim(self.rp.plottingXRange) yLabel = constants.FluxUnitsLabels(scaleFlux).FLUX_VEL_AXIS_LABEL else: raise Exception("Invalid xAxis argument. Must be either 'wave' or 'vel'. ") plt.plot(x, self.flux, label='Data') for i in range(numOfComponents): labelComp = self.rp.componentLabels if lineNames is None: plt.plot(x, components['g%d_' % (i+1)]+components['lin_'], color=self.rp.componentColours[i], linestyle=':', label=labelComp[i]) else: for j, lineName in enumerate(lineNames): plt.plot(x, components['g{0}{1}_'.format(lineName.replace('-', ''), i + 1)] + components['lin_'], color=self.rp.componentColours[i], linestyle=':', label=labelComp[i]) # plt.plot(x, components['lin_'], label='lin_') plt.plot(x, out.best_fit, color='black', linestyle='--', label='Fit') # plt.plot(x, init, label='init') plt.legend(loc='upper left') plt.ylabel(yLabel) if plotResiduals is True: frame1 = plt.gca() frame1.axes.get_xaxis().set_visible(False) frame2 = fig.add_axes((.1, .1, .8, .2)) plt.plot(x, self.flux - out.best_fit) plt.axhline(y=0, linestyle='--', color='black') plt.ylabel('Residuals') # plt.locator_params(axis='y', nbins=3) # nbins = len(frame2.get_yticklabels()) frame2.yaxis.set_major_locator(MaxNLocator(nbins=3, prune='upper')) plt.xlabel(xLabel) plt.savefig(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, self.lineName + " {0} Component Linear-Gaussian Model".format(numOfComponents)), bbox_inches='tight')
return ((data - model) / data_err)**2 from lmfit import Parameters, Minimizer from multiprocessing import cpu_count res = op.minimize(chisq_sp, params, args=(args), method='powell') nrg_ratio_fit, temp_night_fit, delta_T_fit = res.x fit_model = generate_spiderman_model(times, planet_info, nrg_ratio_fit, temp_night_fit, delta_T_fit, T_star, spider_params) print('Initializing Parameters') initialParams = Parameters() initialParams.add_many( ('nrg_ratio', np.max([nrg_ratio_fit, 0.0]), True, 0.0, 1.0), ('temp_night', temp_night_fit, True, 0.0, np.inf), ('delta_T', delta_T_fit, True, 0.0, np.inf)) from functools import partial partial_residuals = partial(chisq_lmfit, args=args) mle0 = Minimizer(partial_residuals, initialParams, nan_policy='omit') fitResult = mle0.leastsq(initialParams) def logprior_func(p): for key, val in p.items():
def chemical_potential(n_e: u.m**-3, T: u.K) -> u.dimensionless_unscaled: r""" Calculate the ideal chemical potential. Parameters ---------- n_e : `~astropy.units.Quantity` Electron number density. T : ~astropy.units.Quantity The temperature. Returns ------- beta_mu : `~astropy.units.Quantity` The dimensionless ideal chemical potential. That is the ratio of the ideal chemical potential to the thermal energy. Raises ------ `TypeError` If argument is not a `~astropy.units.Quantity`. `~astropy.units.UnitConversionError` If argument is in incorrect units. `ValueError` If argument contains invalid values. Warns ----- : `~astropy.units.UnitsWarning` If units are not provided, SI units are assumed. Notes ----- The ideal chemical potential is given by [1]_: .. math:: χ_a = I_{1/2}(β μ_a^{ideal}) where :math:`χ` is the degeneracy parameter, :math:`I_{1/2}` is the Fermi integral with order 1/2, :math:`β` is the inverse thermal energy :math:`β = 1/(k_B T)`, and :math:`μ_a^{ideal}` is the ideal chemical potential. The definition for the ideal chemical potential is implicit, so it must be obtained numerically by solving for the Fermi integral for values of chemical potential approaching the degeneracy parameter. Since values returned from the Fermi_integral are complex, a nonlinear Levenberg-Marquardt least squares method is used to iteratively approach a value of :math:`μ` which minimizes :math:`I_{1/2}(β μ_a^{ideal}) - χ_a` This function returns :math:`β μ^{ideal}` the dimensionless ideal chemical potential. Warning: at present this function is limited to relatively small arguments due to limitations in the `~mpmath` package's implementation of `~mpmath.polylog`, which PlasmaPy uses in calculating the Fermi integral. References ---------- .. [1] Bonitz, Michael. Quantum kinetic theory. Stuttgart: Teubner, 1998. Example ------- >>> from astropy import units as u >>> chemical_potential(n_e=1e21*u.cm**-3,T=11000*u.K) # doctest: +SKIP <Quantity 2.00039985e-12> """ raise NotImplementedError( "This function has been temporarily disabled due to a bug.\n" "Please refer to https://github.com/PlasmaPy/PlasmaPy/issues/726 \n" "and https://github.com/astropy/astropy/issues/9721 " "for progress in fixing it.") # deBroglie wavelength lambdaDB = thermal_deBroglie_wavelength(T) # degeneracy parameter degen = (n_e * lambdaDB**3).to(u.dimensionless_unscaled) def residual(params, data, eps_data): """Residual function for fitting parameters to Fermi_integral.""" alpha = params["alpha"].value # note that alpha = mu / (k_B * T) model = mathematics.Fermi_integral(alpha, 0.5) complexResidue = (data - model) / eps_data return complexResidue.view(np.float) # setting parameters for fitting along with bounds alphaGuess = 1 * u.dimensionless_unscaled try: from lmfit import minimize, Parameters except (ImportError, ModuleNotFoundError) as e: from plasmapy.optional_deps import lmfit_import_error raise lmfit_import_error from e params = Parameters() params.add("alpha", value=alphaGuess, min=0.0) # calling minimize function from lmfit to fit by minimizing the residual data = np.array([degen]) # result of Fermi_integral - degen should be zero eps_data = np.array([1e-15]) # numerical error minFit = minimize(residual, params, args=(data, eps_data)) beta_mu = minFit.params["alpha"].value * u.dimensionless_unscaled return beta_mu
# Pre1 ------------------------------------------------------------------------- pre1_time = numpy.memmap("orion331-TP2B-48000.raw", dtype="float32", mode='r') pre1_freq = numpy.fft.rfft(pre1_time) pre1_reference = pre1_freq samples = pre1_time.size delta_time = numpy.zeros(samples) delta_time[0] = 1.0 x_freq = numpy.linspace(0.0, sample_rate / 2, (samples / 2) + 1) pre1_filters = '-eadb:-{adb} -el:RThighshelf,{RThighshelf_A},{RThighshelf_f0},{RThighshelf_Q} -el:RTlowshelf,{RTlowshelf_A},{RTlowshelf_f0},{RTlowshelf_Q}' fmin = 20 fmax = 22000 pre1 = Parameters() # (Name, Value, Vary, Min, Max, Expr) pre1.add_many(('adb', 9.28136887, True, 0, None, None), ('RThighshelf_A', 4.38211718, True, 0, None, None), ('RThighshelf_f0', 134.645409, True, 100, 200, None), ('RThighshelf_Q', 0.48178881, True, 0.1, 3, None), ('RTlowshelf_A', 3.42318251, True, 0, None, None), ('RTlowshelf_f0', 1747.22195, True, 1600, 1800, None), ('RTlowshelf_Q', 0.48331721, True, 0.1, 3, None), ('sample_rate', sample_rate, False, None, None, None)) #out = minimize(residual, pre1, args=(pre1_filters, x_freq, fmin, fmax, delta_time), kws={'data':pre1_reference}, method='nelder') #pre1 = out.params #print(fit_report(out))
data_file_name_3 = "70mM_cleaneddata_RAD_F.csv" #create arrays from signal data signal1 = create_numpy_array(data_file_path, data_file_name_1) #columns Time / ns, D, C, F signal2 = create_numpy_array(data_file_path, data_file_name_2) #columns Time / ns, D, C, F signal3 = create_numpy_array(data_file_path, data_file_name_3) #columns Time / ns, D, F #take time series from signal data t1 = signal1[:, 0] * (1e-9) t2 = signal2[:, 0] * (1e-9) t3 = signal3[:, 0] * (1e-9) #Initilaise parameter class and add parameters to model params = Parameters() #rate coefficients k = [1e9, 1.3e10, 1.5e9, 1e6, 3.03e11, 3.1e5] params.add('k1', value=k[0]) params.add('k2', value=k[1], vary=False) params.add('k3', value=k[2]) params.add('k4', value=k[3]) params.add('k5', value=k[4], vary=False) params.add('k6', value=k[5], vary=False) #static and diffusive radical conentrations params.add('radical', value=3.8e-5, vary=False) #radical conc in M params.add('static_radical', value=1e-5, vary=False) #static radical conc #amplitudes to convert from concentration to integrated signal params.add('amp_C', value=30) params.add('amp_F', value=17) params.add('amp_D', value=5700)
# Variable Definitions Z = 18 # Atomic Number of Argon X_0 = 14.1 # Radiation length # Energy Energy = np.arange(200, 3200, 200) E_s = 0.511 * np.sqrt(137 * 4 * np.pi) # Scale energy E_c = 35.2 # Critical Energy TMax_Truth = np.array([18, 26, 30, 34, 42, 42, 42, 42, 54, 42, 42, 54, 54, 54, 54]) params = Parameters() params.add('X0', value = X_0, min = 0, max = 30, vary = True ) params.add('Ec', value = E_c, min = 10, max = 60, vary = True) # do fit, here with leastsq model minner = Minimizer(fTmax_Residual, params, fcn_args=(Energy, TMax_Truth)) result = minner.minimize() # calculate final result final = TMax_Truth + result.residual result.params.pretty_print() print result.params['X0'].value
# ============================================================================= path_data = "../data/" df_fahey = pd.read_csv(path_data + "fahey_data.csv") df_fahey.loc[df_fahey["cell"] == "NonTfh", "cell"] = "nonTfh" data_arm = df_fahey[df_fahey.name == "Arm"] data_cl13 = df_fahey[df_fahey.name == "Cl13"] # get model time = np.arange(0, 80, 0.1) sim = Sim(time=time, name=today, params=d, virus_model=vir_model_const) # ============================================================================= # set parameters # ============================================================================= params = Parameters() params.add('death_tr1', value=0.05, min=0, max=0.2) params.add('death_tfhc', value=0.01, min=0, max=0.2) params.add('prolif_tr1', value=2.8, min=2, max=4.0) params.add('prolif_tfhc', value=4.1, min=3, max=5.0) params.add("pth1", value=0.06, min=0, max=1.0) params.add("ptfh", value=0.04, min=0, max=1.0) params.add("ptr1", value=0.89, min=0, max=1.0) params.add("ptfhc", expr="1.0-pth1-ptfh-ptr1") params.add("r_mem", value=0.01, min=0, max=0.2) params.add("deg_myc", value=0.32, min=0.28, max=0.35) # ============================================================================= # run fitting procedure # ============================================================================= modelname = "fahey"
ndata, _ = data.shape resid = 0.0 * data[:] # make residual per data set for i in range(ndata): resid[i, :] = data[i, :] - gauss_dataset(params, i, x) # now flatten this to a 1D array, as minimize() needs return resid.flatten() #Create five simulated Gaussian data sets x = np.linspace(-1, 2, 151) data = [] for i in np.arange(5): params = Parameters() amp = 0.60 + 9.50 * np.random.rand() cen = -0.20 + 1.20 * np.random.rand() sig = 0.25 + 0.03 * np.random.rand() dat = gauss(x, amp, cen, sig) + np.random.normal(size=x.size, scale=0.1) data.append(dat) data = np.array(data) #Create five sets of fitting parameters, one per data set fit_params = Parameters() for iy, y in enumerate(data): fit_params.add('amp_%i' % (iy + 1), value=0.5, min=0.0, max=200) fit_params.add('cen_%i' % (iy + 1), value=0.4, min=-2.0, max=2.0) fit_params.add('sig_%i' % (iy + 1), value=0.3, min=0.01, max=3.0) #Constrain the values of sigma to be the same for all peaks by assigning sig_2, …, sig_5 to be equal to sig_1.
def main(self, ID0, PA0, zgal, flag_m, zprev, Cz0, Cz1, mcmcplot=True, fzvis=1, specplot=1, fneld=0, ntemp=5, sigz=1.0, ezmin=0.01, f_move=False, f_disp=False ): # flag_m related to redshift error in redshift check func. # # sigz (float): confidence interval for redshift fit. # ezmin (float): minimum error in redshift. # print('########################') print('### Fitting Function ###') print('########################') start = timeit.default_timer() inputs = self.inputs DIR_TMP = inputs['DIR_TEMP'] if os.path.exists(DIR_TMP) == False: os.mkdir(DIR_TMP) # For error parameter ferr = 0 # # Age # age = inputs['AGE'] age = [float(x.strip()) for x in age.split(',')] nage = np.arange(0, len(age), 1) # # Metallicity # Zmax, Zmin = float(inputs['ZMAX']), float(inputs['ZMIN']) delZ = float(inputs['DELZ']) Zall = np.arange(Zmin, Zmax, delZ) # in logZsun # For minimizer. delZtmp = delZ #delZtmp = 0.4 # to increase speed. # For z prior. delzz = 0.001 zlimu = 6. snlim = 1 zliml = zgal - 0.5 agemax = cd.age(zgal, use_flat=True, **cosmo) / cc.Gyr_s # N of param: try: ndim = int(inputs['NDIM']) print('No of params are : %d' % (ndim)) except: if int(inputs['ZEVOL']) == 1: ndim = int(len(nage) * 2 + 1) print('Metallicity evolution is on.') if int(inputs['ZMC']) == 1: ndim += 1 print('No of params are : %d' % (ndim)) else: ndim = int(len(nage) + 1 + 1) print('Metallicity evolution is off.') if int(inputs['ZMC']) == 1: ndim += 1 print('No of params are : %d' % (ndim)) pass # # Line # LW0 = inputs['LINE'] LW0 = [float(x.strip()) for x in LW0.split(',')] # # Params for MCMC # nmc = int(inputs['NMC']) nwalk = int(inputs['NWALK']) nmc_cz = int(inputs['NMCZ']) nwalk_cz = int(inputs['NWALKZ']) f_Zevol = int(inputs['ZEVOL']) #f_zvis = int(inputs['ZVIS']) try: fzmc = int(inputs['ZMC']) except: fzmc = 0 # # If FIR data; # try: DT0 = float(inputs['TDUST_LOW']) DT1 = float(inputs['TDUST_HIG']) dDT = float(inputs['TDUST_DEL']) Temp = np.arange(DT0, DT1, dDT) f_dust = True print('FIR fit is on.') except: f_dust = False pass # # Tau for MCMC parameter; not as fitting parameters. # tau0 = inputs['TAU0'] tau0 = [float(x.strip()) for x in tau0.split(',')] # # Dust model specification; # try: dust_model = int(inputs['DUST_MODEL']) except: dust_model = 0 from .function_class import Func from .basic_func import Basic fnc = Func(Zall, nage, dust_model=dust_model, DIR_TMP=DIR_TMP) # Set up the number of Age/ZZ bfnc = Basic(Zall) # Open ascii file and stock to array. #lib = open_spec(ID0, PA0) lib = fnc.open_spec_fits(ID0, PA0, fall=0, tau0=tau0) lib_all = fnc.open_spec_fits(ID0, PA0, fall=1, tau0=tau0) if f_dust: lib_dust = fnc.open_spec_dust_fits(ID0, PA0, Temp, fall=0, tau0=tau0) lib_dust_all = fnc.open_spec_dust_fits(ID0, PA0, Temp, fall=1, tau0=tau0) ################# # Observed Data ################# ############## # Spectrum ############## dat = np.loadtxt(DIR_TMP + 'spec_obs_' + ID0 + '_PA' + PA0 + '.cat', comments='#') NR = dat[:, 0] x = dat[:, 1] fy00 = dat[:, 2] ey00 = dat[:, 3] con0 = (NR < 1000) fy0 = fy00[con0] * Cz0 ey0 = ey00[con0] * Cz0 con1 = (NR >= 1000) & (NR < 10000) fy1 = fy00[con1] * Cz1 ey1 = ey00[con1] * Cz1 # BB data in spec_obs are not in use. #con2 = (NR>=10000) # BB #fy2 = fy00[con2] #ey2 = ey00[con2] ############## # Broadband ############## dat = np.loadtxt(DIR_TMP + 'bb_obs_' + ID0 + '_PA' + PA0 + '.cat', comments='#') NRbb = dat[:, 0] xbb = dat[:, 1] fybb = dat[:, 2] eybb = dat[:, 3] exbb = dat[:, 4] fy2 = fybb ey2 = eybb fy01 = np.append(fy0, fy1) fy = np.append(fy01, fy2) ey01 = np.append(ey0, ey1) ey = np.append(ey01, ey2) wht = 1. / np.square(ey) wht2 = check_line_man(fy, x, wht, fy, zprev, LW0) sn = fy / ey ##################### # Function fo MCMC ##################### def residual( pars, fy, wht2, f_fir, out=False): # x, y, wht are taken from out of the definition. # # Returns: residual of model and data. # out: model as second output. For lnprob func. # f_fir: syntax. If dust component is on or off. vals = pars.valuesdict() model, x1 = fnc.tmp04(ID0, PA0, vals, zprev, lib, tau0=tau0) if f_fir: model_dust, x1_dust = fnc.tmp04_dust(ID0, PA0, vals, zprev, lib_dust, tau0=tau0) n_optir = len(model) # Add dust flux to opt/IR grid. model[:] += model_dust[:n_optir] #print(model_dust) # then append only FIR flux grid. model = np.append(model, model_dust[n_optir:]) x1 = np.append(x1, x1_dust[n_optir:]) #plt.plot(x1,model,'r.') #plt.show() if ferr == 1: f = vals['f'] else: f = 0 # temporary... (if f is param, then take from vals dictionary.) con_res = (model > 0) & (wht2 > 0) #& (fy>0) sig = np.sqrt(1. / wht2 + f**2 * model**2) ''' contmp = x1>1e6 & (wht2>0) try: print(x1[contmp],model[contmp],fy[contmp],np.log10(vals['MDUST'])) except: pass ''' if not out: if fy is None: print('Data is none') return model[con_res] else: return (model - fy)[con_res] / sig[ con_res] # i.e. residual/sigma. Because is_weighted = True. if out: if fy is None: print('Data is none') return model[con_res], model else: return (model - fy)[con_res] / sig[ con_res], model # i.e. residual/sigma. Because is_weighted = True. def lnprob(pars, fy, wht2, f_fir): # # Returns: posterior. # vals = pars.valuesdict() if ferr == 1: f = vals['f'] else: f = 0 # temporary... (if f is param, then take from vals dictionary.) resid, model = residual(pars, fy, wht2, f_fir, out=True) con_res = (model > 0) & (wht2 > 0) sig = np.sqrt(1. / wht2 + f**2 * model**2) lnlike = -0.5 * np.sum(resid**2 + np.log(2 * 3.14 * sig[con_res]**2)) #print(np.log(2 * 3.14 * 1) * len(sig[con_res]), np.sum(np.log(2 * 3.14 * sig[con_res]**2))) #Av = vals['Av'] #if Av<0: # return -np.inf #else: # respr = 0 #np.log(1) respr = 0 # Flat prior... return lnlike + respr ############################### # Add parameters ############################### fit_params = Parameters() for aa in range(len(age)): if age[aa] == 99 or age[aa] > agemax: fit_params.add('A' + str(aa), value=0, min=0, max=1e-10) else: fit_params.add('A' + str(aa), value=1, min=0, max=1e3) ##################### # Dust attenuation ##################### try: Avmin = float(inputs['AVMIN']) Avmax = float(inputs['AVMAX']) fit_params.add('Av', value=Avmin, min=Avmin, max=Avmax) except: fit_params.add('Av', value=0.2, min=0, max=4.0) ##################### # Metallicity ##################### if int(inputs['ZEVOL']) == 1: for aa in range(len(age)): if age[aa] == 99 or age[aa] > agemax: fit_params.add('Z' + str(aa), value=0, min=0, max=1e-10) else: fit_params.add('Z' + str(aa), value=0, min=np.min(Zall), max=np.max(Zall)) elif inputs['ZFIX']: #print('Z is fixed') ZFIX = float(inputs['ZFIX']) aa = 0 fit_params.add('Z' + str(aa), value=0, min=ZFIX, max=ZFIX + 0.01) else: aa = 0 fit_params.add('Z' + str(aa), value=0, min=np.min(Zall), max=np.max(Zall)) #################################### # Initial Metallicity Determination #################################### chidef = 1e5 Zbest = 0 fwz = open('Z_' + ID0 + '_PA' + PA0 + '.cat', 'w') fwz.write('# ID Zini chi/nu AA Av Zbest\n') fwz.write('# FNELD = %d\n' % fneld) nZtmp = int((Zmax - Zmin) / delZtmp) ZZtmp = np.arange(Zmin, Zmax, delZtmp) # How to get initial parameters? # Nelder; if fneld == 1: fit_name = 'nelder' for zz in range(len(ZZtmp)): ZZ = ZZtmp[zz] if int(inputs['ZEVOL']) == 1: for aa in range(len(age)): fit_params['Z' + str(aa)].value = ZZ else: aa = 0 fit_params['Z' + str(aa)].value = ZZ out_tmp = minimize( residual, fit_params, args=(fy, wht2, False), method=fit_name) # nelder is the most efficient. keys = fit_report(out_tmp).split('\n') csq = 99999 rcsq = 99999 for key in keys: if key[4:7] == 'chi': skey = key.split(' ') csq = float(skey[14]) if key[4:7] == 'red': skey = key.split(' ') rcsq = float(skey[7]) fitc = [csq, rcsq] # Chi2, Reduced-chi2 fwz.write('%s %.2f %.5f' % (ID0, ZZ, fitc[1])) AA_tmp = np.zeros(len(age), dtype='float32') ZZ_tmp = np.zeros(len(age), dtype='float32') for aa in range(len(age)): AA_tmp[aa] = out_tmp.params['A' + str(aa)].value fwz.write(' %.5f' % (AA_tmp[aa])) Av_tmp = out_tmp.params['Av'].value fwz.write(' %.5f' % (Av_tmp)) if int(inputs['ZEVOL']) == 1: for aa in range(len(age)): ZZ_tmp[aa] = out_tmp.params['Z' + str(aa)].value fwz.write(' %.5f' % (ZZ_tmp[aa])) else: aa = 0 ZZ_tmp[aa] = out_tmp.params['Z' + str(aa)].value fwz.write(' %.5f' % (ZZ_tmp[aa])) fwz.write('\n') if fitc[1] < chidef: chidef = fitc[1] out = out_tmp # Or # Powell; else: fit_name = 'powell' for zz in range(0, nZtmp, 2): ZZ = zz * delZtmp + np.min(Zall) if int(inputs['ZEVOL']) == 1: for aa in range(len(age)): fit_params['Z' + str(aa)].value = ZZ else: aa = 0 fit_params['Z' + str(aa)].value = ZZ out_tmp = minimize( residual, fit_params, args=(fy, wht2, False), method=fit_name) # powel is the more accurate. keys = fit_report(out_tmp).split('\n') csq = 99999 rcsq = 99999 for key in keys: if key[4:7] == 'chi': skey = key.split(' ') csq = float(skey[14]) if key[4:7] == 'red': skey = key.split(' ') rcsq = float(skey[7]) fitc = [csq, rcsq] # Chi2, Reduced-chi2 fwz.write('%s %.2f %.5f' % (ID0, ZZ, fitc[1])) AA_tmp = np.zeros(len(age), dtype='float32') ZZ_tmp = np.zeros(len(age), dtype='float32') for aa in range(len(age)): AA_tmp[aa] = out_tmp.params['A' + str(aa)].value fwz.write(' %.5f' % (AA_tmp[aa])) Av_tmp = out_tmp.params['Av'].value fwz.write(' %.5f' % (Av_tmp)) if int(inputs['ZEVOL']) == 1: for aa in range(len(age)): ZZ_tmp[aa] = out_tmp.params['Z' + str(aa)].value fwz.write(' %.5f' % (ZZ_tmp[aa])) else: aa = 0 fwz.write(' %.5f' % (ZZ_tmp[aa])) fwz.write('\n') if fitc[1] < chidef: chidef = fitc[1] out = out_tmp # # Best fit # keys = fit_report(out).split('\n') for key in keys: if key[4:7] == 'chi': skey = key.split(' ') csq = float(skey[14]) if key[4:7] == 'red': skey = key.split(' ') rcsq = float(skey[7]) fitc = [csq, rcsq] # Chi2, Reduced-chi2 #fitc = fit_report_chi(out) # Chi2, Reduced-chi2 ZZ = Zbest # This is really important/does affect lnprob/residual. print('\n\n') print('#####################################') print('Zbest, chi are;', Zbest, chidef) print('Params are;', fit_report(out)) print('#####################################') print('\n\n') fwz.close() Av_tmp = out.params['Av'].value AA_tmp = np.zeros(len(age), dtype='float32') ZZ_tmp = np.zeros(len(age), dtype='float32') fm_tmp, xm_tmp = fnc.tmp04_val(ID0, PA0, out, zprev, lib, tau0=tau0) ######################## # Check redshift ######################## zrecom = zprev # Observed data. con_cz = (NR < 10000) #& (sn>snlim) fy_cz = fy[con_cz] ey_cz = ey[con_cz] x_cz = x[con_cz] # Observed range NR_cz = NR[con_cz] xm_s = xm_tmp / (1 + zprev) * (1 + zrecom) fm_s = np.interp(x_cz, xm_s, fm_tmp) if fzvis == 1: plt.plot(x_cz / (1 + zprev) * (1. + zrecom), fm_s, 'gray', linestyle='--', linewidth=0.5, label='Default ($z=%.5f$)' % (zprev)) # Model based on input z. plt.plot(x_cz, fy_cz, 'b', linestyle='-', linewidth=0.5, label='Obs.') # Observation plt.errorbar(x_cz, fy_cz, yerr=ey_cz, color='b', capsize=0, linewidth=0.5) # Observation if flag_m == 0: dez = 0.5 else: dez = 0.2 # # For Eazy # ''' dprob = np.loadtxt(eaz_path + 'photz_' + str(int(ID0)) + '.pz', comments='#') zprob = dprob[:,0] cprob = dprob[:,1] zz_prob = np.arange(0,13,delzz) cprob_s = np.interp(zz_prob, zprob, cprob) prior_s = 1/cprob_s prior_s /= np.sum(prior_s) ''' zz_prob = np.arange(0, 13, delzz) prior_s = zz_prob * 0 + 1. prior_s /= np.sum(prior_s) try: print('############################') print('Start MCMC for redshift fit') print('############################') res_cz, fitc_cz = check_redshift(fy_cz, ey_cz, x_cz, fm_tmp, xm_tmp / (1 + zprev), zprev, dez, prior_s, NR_cz, zliml, zlimu, delzz, nmc_cz, nwalk_cz) z_cz = np.percentile(res_cz.flatchain['z'], [16, 50, 84]) scl_cz0 = np.percentile(res_cz.flatchain['Cz0'], [16, 50, 84]) scl_cz1 = np.percentile(res_cz.flatchain['Cz1'], [16, 50, 84]) zrecom = z_cz[1] Czrec0 = scl_cz0[1] Czrec1 = scl_cz1[1] # Switch to peak redshift: from scipy import stats from scipy.stats import norm # find minimum and maximum of xticks, so we know # where we should compute theoretical distribution ser = res_cz.flatchain['z'] xmin, xmax = zprev - 0.2, zprev + 0.2 lnspc = np.linspace(xmin, xmax, len(ser)) print('\n\n') print( 'Recommended redshift, Cz0 and Cz1, %.5f %.5f %.5f, with chi2/nu=%.3f' % (zrecom, Cz0 * Czrec0, Cz1 * Czrec1, fitc_cz[1])) print('\n\n') except: print('z fit failed. No spectral data set?') try: ezl = float(inputs['EZL']) ezu = float(inputs['EZU']) print('Redshift error is taken from input file.') if ezl < ezmin: ezl = ezmin #0.03 if ezu < ezmin: ezu = ezmin #0.03 except: ezl = ezmin ezu = ezmin print('Redshift error is assumed to %.1f.' % (ezl)) z_cz = [zprev - ezl, zprev, zprev + ezu] zrecom = z_cz[1] scl_cz0 = [1., 1., 1.] scl_cz1 = [1., 1., 1.] Czrec0 = scl_cz0[1] Czrec1 = scl_cz1[1] ''' try: # lets try the normal distribution first m, s = stats.norm.fit(ser) # get mean and standard deviation pdf_g = stats.norm.pdf(lnspc, m, s) # now get theoretical values in our interval z_cz[:] = [m-s, m, m+s] zrecom = z_cz[1] f_fitgauss = 1 except: print('Guassian fitting to z distribution failed.') f_fitgauss=0 ''' f_fitgauss = 0 xm_s = xm_tmp / (1 + zprev) * (1 + zrecom) fm_s = np.interp(x_cz, xm_s, fm_tmp) whtl = 1 / np.square(ey_cz) try: wht3, ypoly = check_line_cz_man(fy_cz, x_cz, whtl, fm_s, zrecom, LW0) except: wht3, ypoly = whtl, fy_cz con_line = (wht3 == 0) if fzvis == 1: plt.plot(x_cz, fm_s, 'r', linestyle='-', linewidth=0.5, label='Updated model ($z=%.5f$)' % (zrecom)) # Model based on recomended z. plt.plot(x_cz[con_line], fm_s[con_line], color='orange', marker='o', linestyle='', linewidth=3.) # Plot lines for reference for ll in range(len(LW)): try: conpoly = (x_cz / (1. + zrecom) > 3000) & (x_cz / (1. + zrecom) < 8000) yline = np.max(ypoly[conpoly]) yy = np.arange(yline / 1.02, yline * 1.1) xxpre = yy * 0 + LW[ll] * (1. + zprev) xx = yy * 0 + LW[ll] * (1. + zrecom) plt.plot(xxpre, yy / 1.02, linewidth=0.5, linestyle='--', color='gray') plt.text(LW[ll] * (1. + zprev), yline / 1.05, '%s' % (LN[ll]), fontsize=8, color='gray') plt.plot(xx, yy, linewidth=0.5, linestyle='-', color='orangered') plt.text(LW[ll] * (1. + zrecom), yline, '%s' % (LN[ll]), fontsize=8, color='orangered') except: pass plt.plot(xbb, fybb, '.r', linestyle='', linewidth=0, zorder=4, label='Obs.(BB)') plt.plot(xm_tmp, fm_tmp, color='gray', marker='.', ms=0.5, linestyle='', linewidth=0.5, zorder=4, label='Model') try: xmin, xmax = np.min(x_cz) / 1.1, np.max(x_cz) * 1.1 except: xmin, xmax = 2000, 10000 plt.xlim(xmin, xmax) try: plt.ylim(0, yline * 1.1) except: pass plt.xlabel('Wavelength ($\mathrm{\AA}$)') plt.ylabel('$F_\\nu$ (arb.)') plt.legend(loc=0) zzsigma = ((z_cz[2] - z_cz[0]) / 2.) / zprev zsigma = np.abs(zprev - zrecom) / (zprev) C0sigma = np.abs(Czrec0 - Cz0) / Cz0 eC0sigma = ((scl_cz0[2] - scl_cz0[0]) / 2.) / Cz0 C1sigma = np.abs(Czrec1 - Cz1) / Cz1 eC1sigma = ((scl_cz1[2] - scl_cz1[0]) / 2.) / Cz1 print('Input redshift is %.3f per cent agreement.' % ((1. - zsigma) * 100)) print('Error is %.3f per cent.' % (zzsigma * 100)) print('Input Cz0 is %.3f per cent agreement.' % ((1. - C0sigma) * 100)) print('Error is %.3f per cent.' % (eC0sigma * 100)) print('Input Cz1 is %.3f per cent agreement.' % ((1. - C1sigma) * 100)) print('Error is %.3f per cent.' % (eC1sigma * 100)) plt.show() # # Ask interactively; # flag_z = raw_input( 'Do you want to continue with original redshift, Cz0 and Cz1, %.5f %.5f %.5f? ([y]/n/m) ' % (zprev, Cz0, Cz1)) else: flag_z = 'y' ################################################# # Gor for mcmc phase ################################################# if flag_z == 'y' or flag_z == '': zrecom = zprev Czrec0 = Cz0 Czrec1 = Cz1 ####################### # Added ####################### if fzmc == 1: out_keep = out #sigz = 1.0 #3.0 fit_params.add('zmc', value=zrecom, min=zrecom - (z_cz[1] - z_cz[0]) * sigz, max=zrecom + (z_cz[2] - z_cz[1]) * sigz) #print(zrecom,zrecom-(z_cz[1]-z_cz[0])*sigz,zrecom+(z_cz[2]-z_cz[1])*sigz) ##################### # Error parameter ##################### try: ferr = int(inputs['F_ERR']) if ferr == 1: fit_params.add('f', value=1e-2, min=0, max=1e2) ndim += 1 except: ferr = 0 pass ##################### # Dust; ##################### if f_dust: Tdust = np.arange(DT0, DT1, dDT) fit_params.add('TDUST', value=len(Tdust) / 2., min=0, max=len(Tdust) - 1) #fit_params.add('TDUST', value=1, min=0, max=len(Tdust)-1) fit_params.add('MDUST', value=1e6, min=0, max=1e10) ndim += 2 # Append data; dat_d = np.loadtxt(DIR_TMP + 'spec_dust_obs_' + ID0 + '_PA' + PA0 + '.cat', comments='#') x_d = dat_d[:, 1] fy_d = dat_d[:, 2] ey_d = dat_d[:, 3] fy = np.append(fy, fy_d) x = np.append(x, x_d) wht = np.append(wht, 1. / np.square(ey_d)) wht2 = check_line_man(fy, x, wht, fy, zprev, LW0) # Then, minimize again. out = minimize( residual, fit_params, args=(fy, wht2, f_dust), method=fit_name ) # It needs to define out with redshift constrain. print(fit_report(out)) # Fix params to what we had before. out.params['zmc'].value = zrecom out.params['Av'].value = out_keep.params['Av'].value for aa in range(len(age)): out.params['A' + str(aa)].value = out_keep.params['A' + str(aa)].value try: out.params['Z' + str(aa)].value = out_keep.params[ 'Z' + str(aa)].value except: out.params['Z0'].value = out_keep.params['Z0'].value ############################## # Save fig of z-distribution. ############################## try: # if spectrum; fig = plt.figure(figsize=(6.5, 2.5)) fig.subplots_adjust(top=0.96, bottom=0.16, left=0.09, right=0.99, hspace=0.15, wspace=0.25) ax1 = fig.add_subplot(111) #n, nbins, patches = ax1.hist(res_cz.flatchain['z'], bins=200, normed=False, color='gray',label='') n, nbins, patches = ax1.hist(res_cz.flatchain['z'], bins=200, normed=True, color='gray', label='') if f_fitgauss == 1: ax1.plot(lnspc, pdf_g, label='Gaussian fit', color='g', linestyle='-') # plot it ax1.set_xlim(m - s * 3, m + s * 3) yy = np.arange(0, np.max(n), 1) xx = yy * 0 + z_cz[1] ax1.plot( xx, yy, linestyle='-', linewidth=1, color='orangered', label='$z=%.5f_{-%.5f}^{+%.5f}$\n$C_z0=%.3f$\n$C_z1=%.3f$' % (z_cz[1], z_cz[1] - z_cz[0], z_cz[2] - z_cz[1], Czrec0, Czrec1)) xx = yy * 0 + z_cz[0] ax1.plot(xx, yy, linestyle='--', linewidth=1, color='orangered') xx = yy * 0 + z_cz[2] ax1.plot(xx, yy, linestyle='--', linewidth=1, color='orangered') xx = yy * 0 + zprev ax1.plot(xx, yy, linestyle='-', linewidth=1, color='royalblue') ax1.set_xlabel('Redshift') ax1.set_ylabel('$dn/dz$') ax1.legend(loc=0) plt.savefig('zprob_' + ID0 + '_PA' + PA0 + '.pdf', dpi=300) plt.close() except: print('z-distribution figure is not generated.') pass ############################## print('\n\n') print('###############################') print('Input redshift is adopted.') print('Starting long journey in MCMC.') print('###############################') print('\n\n') ################# # Initialize mm. ################# mm = 0 # add a noise parameter # out.params.add('f', value=1, min=0.001, max=20) wht2 = wht ################################ print('\nMinimizer Defined\n') mini = Minimizer(lnprob, out.params, fcn_args=[fy, wht2, f_dust], f_disp=f_disp, f_move=f_move) print('######################') print('### Starting emcee ###') print('######################') import multiprocess ncpu0 = int(multiprocess.cpu_count() / 2) try: ncpu = int(inputs['NCPU']) if ncpu > ncpu0: print('!!! NCPU is larger than No. of CPU. !!!') #print('Now set to %d'%(ncpu0)) #ncpu = ncpu0 except: ncpu = ncpu0 pass print('No. of CPU is set to %d' % (ncpu)) start_mc = timeit.default_timer() res = mini.emcee(burn=int(nmc / 2), steps=nmc, thin=10, nwalkers=nwalk, params=out.params, is_weighted=True, ntemps=ntemp, workers=ncpu) stop_mc = timeit.default_timer() tcalc_mc = stop_mc - start_mc print('###############################') print('### MCMC part took %.1f sec ###' % (tcalc_mc)) print('###############################') #----------- Save pckl file #-------- store chain into a cpkl file start_mc = timeit.default_timer() import corner burnin = int(nmc / 2) savepath = './' cpklname = 'chain_' + ID0 + '_PA' + PA0 + '_corner.cpkl' savecpkl( { 'chain': res.flatchain, 'burnin': burnin, 'nwalkers': nwalk, 'niter': nmc, 'ndim': ndim }, savepath + cpklname) # Already burn in stop_mc = timeit.default_timer() tcalc_mc = stop_mc - start_mc print('#################################') print('### Saving chain took %.1f sec' % (tcalc_mc)) print('#################################') Avmc = np.percentile(res.flatchain['Av'], [16, 50, 84]) #Zmc = np.percentile(res.flatchain['Z'], [16,50,84]) Avpar = np.zeros((1, 3), dtype='float32') Avpar[0, :] = Avmc out = res #################### # Best parameters #################### Amc = np.zeros((len(age), 3), dtype='float32') Ab = np.zeros(len(age), dtype='float32') Zmc = np.zeros((len(age), 3), dtype='float32') Zb = np.zeros(len(age), dtype='float32') NZbest = np.zeros(len(age), dtype='int') f0 = fits.open(DIR_TMP + 'ms_' + ID0 + '_PA' + PA0 + '.fits') sedpar = f0[1] ms = np.zeros(len(age), dtype='float32') for aa in range(len(age)): Ab[aa] = out.params['A' + str(aa)].value Amc[aa, :] = np.percentile(res.flatchain['A' + str(aa)], [16, 50, 84]) try: Zb[aa] = out.params['Z' + str(aa)].value Zmc[aa, :] = np.percentile(res.flatchain['Z' + str(aa)], [16, 50, 84]) except: Zb[aa] = out.params['Z0'].value Zmc[aa, :] = np.percentile(res.flatchain['Z0'], [16, 50, 84]) NZbest[aa] = bfnc.Z2NZ(Zb[aa]) ms[aa] = sedpar.data['ML_' + str(NZbest[aa])][aa] Avb = out.params['Av'].value if f_dust: Mdust_mc = np.zeros(3, dtype='float32') Tdust_mc = np.zeros(3, dtype='float32') Mdust_mc[:] = np.percentile(res.flatchain['MDUST'], [16, 50, 84]) Tdust_mc[:] = np.percentile(res.flatchain['TDUST'], [16, 50, 84]) print(Mdust_mc) print(Tdust_mc) #################### # MCMC corner plot. #################### if mcmcplot: fig1 = corner.corner(res.flatchain, labels=res.var_names, \ label_kwargs={'fontsize':16}, quantiles=[0.16, 0.84], show_titles=False, \ title_kwargs={"fontsize": 14}, truths=list(res.params.valuesdict().values()), \ plot_datapoints=False, plot_contours=True, no_fill_contours=True, \ plot_density=False, levels=[0.68, 0.95, 0.997], truth_color='gray', color='#4682b4') fig1.savefig('SPEC_' + ID0 + '_PA' + PA0 + '_corner.pdf') plt.close() ######################### msmc0 = 0 for aa in range(len(age)): msmc0 += res.flatchain['A' + str(aa)] * ms[aa] msmc = np.percentile(msmc0, [16, 50, 84]) # Do analysis on MCMC results. # Write to file. stop = timeit.default_timer() tcalc = stop - start # Load writing package; from .writing import Analyze wrt = Analyze(inputs) # Set up for input start_mc = timeit.default_timer() wrt.get_param(res, lib_all, zrecom, Czrec0, Czrec1, z_cz[:], scl_cz0[:], scl_cz1[:], fitc[:], tau0=tau0, tcalc=tcalc) stop_mc = timeit.default_timer() tcalc_mc = stop_mc - start_mc print('##############################################') print('### Writing params tp file took %.1f sec ###' % (tcalc_mc)) print('##############################################') return 0, zrecom, Czrec0, Czrec1 ################################################################### elif flag_z == 'm': zrecom = float( raw_input('What is your manual input for redshift? ')) Czrec0 = float(raw_input('What is your manual input for Cz0? ')) Czrec1 = float(raw_input('What is your manual input for Cz1? ')) print('\n\n') print('Generate model templates with input redshift and Scale.') print('\n\n') return 1, zrecom, Czrec0, Czrec1 else: print('\n\n') print('Terminated because of redshift estimate.') print('Generate model templates with recommended redshift.') print('\n\n') flag_gen = raw_input( 'Do you want to make templates with recommended redshift, Cz0, and Cz1 , %.5f %.5f %.5f? ([y]/n) ' % (zrecom, Czrec0, Czrec1)) if flag_gen == 'y' or flag_gen == '': #return 1, zrecom, Cz0 * Czrec0, Cz1 * Czrec1 return 1, zrecom, Czrec0, Czrec1 else: print('\n\n') print('There is nothing to do.') print('\n\n') return 0, zprev, Czrec0, Czrec1
def main(): # sets up command line argument parser args = init_argparse() # The file format is: # X values of size n # Number of replicates # A values # Y values with n values on each line if (args.input): # has i flag, read from specified file f = open(args.input, 'r') x = array([float(val) for val in f.readline().split()]) num_rep = int(f.readline()) a = array([float(val) for val in f.readline().split()]) y = zeros((len(a), len(x))) stddev = zeros((len(a), len(x))) for i in range(len(a)): all_y = zeros((num_rep, len(x))) for j in range(num_rep): all_y[j] = array([float(val) for val in f.readline().split()]) y[i] = average(all_y, axis=0) stddev[i] = std(all_y, axis=0) f.close() else: #read from cmdline x = array([float(val) for val in raw_input().split()]) num_rep = int(raw_input()) a = array([float(val) for val in raw_input().split()]) y = zeros((len(a), len(x))) stddev = zeros((len(a), len(x))) for i in range(len(a)): all_y = zeros((num_rep, len(x))) for j in range(num_rep): all_y[j] = array([float(val) for val in raw_input().split()]) y[i] = average(all_y, axis=0) stddev[i] = std(all_y, axis=0) #adding parameters, initial guesses, and constraints params = Parameters() params.add('Kd', value=1, min=0) params.add('EmFRETMAX', value=1, min=0) return_data = {} ci = [] emfretmax = [] emfretmax_error = [] #run fitting procedure and display results #this needs to be repeated for each A value. Note that A[i] corresponds to Y[i] #X is assumed to hold constant across all of these, so it remains unchanged across iterations for i in range(len(a)): result = minimize(residuals, params, args=(x, y[i], a[i])) ci.append(conf_interval(result, maxiter=1000)) emfretmax.append(params['EmFRETMAX'].value) emfretmax_error.append(params['EmFRETMAX'].stderr) # generate table of results with tabulate return_data[a[i]] = [ a[i], round(params['Kd'].value, 4), round(params['Kd'].stderr, 4), round(params['EmFRETMAX'].value, 4), round(params['EmFRETMAX'].stderr, 4), round(1 - result.residual.var() / var(y), 4) ] # plots data and curve on graph and displays if output file is given if (args.scatter): create_scatter(args.scatter, result, x, y[i], a[i], stddev[i], i, args.unit) if (args.bar): create_bar(args.bar, emfretmax, emfretmax_error, a, args.unit) print(json.dumps(return_data))
slope = params['slope'].value intercept = params['intercept'].value return slope * x + intercept def residual(params, x, data, error): return (data - analyticLinear(params, x) ) #/error # this is where I weight the error. def residualError(params, x, data, error): return (data - analyticLinear( params, x)) / error # this is where I weight the error. params = Parameters() # you can add attributes to the fitting dictionary in such a way params.add('slope', value=1) params.add('intercept', value=0.5) ### I make an nddata set of a T1 of power measurement # The arrays for data, error, and, x-dim (power). dataArray = array( [0.48909572, 0.43322032, 0.3944982, 0.37726134, 0.36782925, 0.48865151]) dataError = array( [0.00304622, 0.00074815, 0.00119058, 0.00384469, 0.00288963, 0.00680168]) dataPower = array([ 1.18040107e-01, 1.49209244e+00, 2.91886587e+00, 4.44269384e+00, 5.02700108e+00, 4.13047502e-10 ]) # Throw everything into an nddata set - Zach this is cool and a nice way to handle your data with axes and associated error.
def __init__(self, model): self.model = model self.lmfitparams = Parameters() self.parameterdict = {} self.seriesdict = {}
p = np.random.uniform(0,1,size=4) p = enzyme_total*(p/(p[0]+p[1]+p[2]+p[3])) #Add the random parameter set 'p' to a permanent list of all random parameter sets Params.append(p) f6p = simulate(p,enzyme_total) #add the resulting F6P value to a permanent list of parameters F6ps.append(f6p) #Sorting F6P values and rearranging parameters to match accordingly F6ps = np.asarray(F6ps) Params = np.asarray(Params) indices = F6ps.argsort() Params = Params[indices] F6ps = F6ps[indices] '''Levenberg-Marquardt Algorithm''' #setting initial parameters to be used for LM simulation pi = Parameters() pi.add('protALDPase', value=Params[-1,0], min=0.0, max=1.0) pi.add('protGAPDH', value=Params[-1,1], min=0.0, max=1.0) pi.add('protPGK', value=Params[-1,2], min=0.0, max=1.0) pi.add('protTIM', value=Params[-1,3], min=0.0, max=1.0) #Run the LM algorithm using lmfit: result = minimize(objFunction,pi) print '\n',"The following is the set of enzyme concentrations which result in optimal production of F6P for the reconstituted system:",'\n',result.params print '\n',"The above parameters result in a theoretical F6P concentration of:",1/(result.residual[0]),"ug/mL"
class ParameterGroup(Group): """ Group for Fitting Parameters """ def __init__(self, name=None, _larch=None, **kws): if name is not None: self.__name__ = name self._larch = _larch self.__params__ = None if _larch is not None: self.__params__ = Parameters(asteval=_larch.symtable._sys.fiteval) Group.__init__(self) self.__exprsave__ = {} for key, val in kws.items(): expr = getattr(val, 'expr', None) if expr is not None: self.__exprsave__[key] = expr val.expr = None setattr(self, key, val) for key, val in self.__exprsave__.items(): self.__params__[key].expr = val def __repr__(self): return '<Param Group {:s}>'.format(self.__name__) def __setattr__(self, name, val): if isinstance(val, Parameter): if val.name != name: # allow 'a=Parameter(2, ..)' to mean Parameter(name='a', value=2, ...) nval = None try: nval = float(val.name) except (ValueError, TypeError): pass if nval is not None: val.value = nval self.__params__.add(name, value=val.value, vary=val.vary, min=val.min, max=val.max, expr=val.expr, brute_step=val.brute_step) val = self.__params__[name] self.__dict__[name] = val def __add(self, name, value=None, vary=True, min=-np.inf, max=np.inf, expr=None, stderr=None, correl=None, brute_step=None): if expr is None and isinstance(value, str): expr = value value = None if self.__params__ is not None: self.__params__.add(name, value=value, vary=vary, min=min, max=max, expr=expr, brute_step=brute_step) self.__params__[name].stderr = stderr self.__params__[name].correl = correl self.__dict__[name] = self.__params__[name]