def rescale_reframe(scisub, refsub, verbose=False): """Example function with types documented in the docstring. `PEP 484`_ type annotations are supported. If attribute, parameter, and return types are annotated according to `PEP 484`_, they do not need to be included in the docstring: Args: param1 (int): The first parameter. param2 (str): The second parameter. Returns: bool: The return value. True for success, False otherwise. .. _PEP 484: https://www.python.org/dev/peps/pep-0484/ """ params = Parameters() params.add('sigma', 1.0, True, 0.0, inf) image_sub = Model(res_sigma_image_flat, independent_vars=['scisub', 'refsub']) image_sub_results = image_sub.fit(data=scisub.ravel(), params=params, scisub=scisub, refsub=refsub) if verbose: print('Sigma of Rescale: {}'.format(image_sub_results.params['sigma'].value)) return partial_res_sigma_image(image_sub_results.params['sigma'].value)
def optimize_density_and_scaling(self, density_min, density_max, bkg_min, bkg_max, iterations, callback_fcn = None, output_txt=None): params = Parameters() params.add("density", value=self.density, min=density_min, max=density_max) params.add("background_scaling", value=self.background_scaling, min=bkg_min, max=bkg_max) self.iteration = 0 def optimization_fcn(params): density = params['density'].value background_scaling = params['background_scaling'].value self.background_spectrum.scaling = background_scaling self.calculate_spectra() self.optimize_sq(iterations,fcn_callback=callback_fcn) r, fr = self.limit_spectrum(self.fr_spectrum, 0, self.r_cutoff).data output = (-fr - 4 * np.pi * convert_density_to_atoms_per_cubic_angstrom(self.composition, density) * r) ** 2 self.write_output(u'{} X: {:.3f} Den: {:.3f}'.format(self.iteration, np.sum(output)/(r[1]-r[0]), density)) self.iteration+=1 return output minimize(optimization_fcn, params) self.write_fit_results(params)
def model3_fits(fitData, exc, outf, c_included, beta_included, beta2_included, end_diff=False): # Set up paramters bval = 0 b2val = 0 if beta_included: bval = -1.0 if beta2_included: b2val = -1.0 pars = Parameters() pars.add('alpha', value=5.0, vary=True) pars.add('const', value=0.0, vary=c_included) pars.add('beta', value=bval, vary=beta_included) pars.add('beta2', value=b2val, vary=beta2_included) pars.add('delta', value=0.0, vary=end_diff) fit_result = cross_validate(model3, fitData, pars, outf) parvals = pars.valuesdict() beta_mod3 = (parvals['const'], parvals['beta'], parvals['beta2']) name = 'Thiophene model3: delta varied=%r\n' % end_diff write_statistics(name, outf, pars, fit_result) #title = "Coupling = const + beta2 cos^2(theta) \n" plot_something(model3, pars, exc, title="", filename='thio_mod3%r_%r_%r_%r.eps' % (c_included,beta_included,beta2_included,end_diff)) return beta_mod3
def autobk(energy, mu, rbkg=1, nknots=None, group=None, e0=None, kmin=0, kmax=None, kw=1, dk=0, win=None, vary_e0=True, chi_std=None, nfft=2048, kstep=0.05, _larch=None): if _larch is None: raise Warning("cannot calculate autobk spline -- larch broken?") # get array indices for rkbg and e0: irbkg, ie0 rgrid = np.pi/(kstep*nfft) if rbkg < 2*rgrid: rbkg = 2*rgrid irbkg = int(1.01 + rbkg/rgrid) if e0 is None: e0 = find_e0(energy, mu, group=group, _larch=_larch) ie0 = _index_nearest(energy, e0) # save ungridded k (kraw) and grided k (kout) # and ftwin (*k-weighting) for FT in residual kraw = np.sqrt(ETOK*(energy[ie0:] - e0)) if kmax is None: kmax = max(kraw) kout = kstep * np.arange(int(1.01+kmax/kstep)) ftwin = kout**kw * ftwindow(kout, xmin=kmin, xmax=kmax, window=win, dx=dk) # calc k-value and initial guess for y-values of spline params nspline = max(4, min(60, 2*int(rbkg*(kmax-kmin)/np.pi) + 1)) spl_y = np.zeros(nspline) spl_k = np.zeros(nspline) for i in range(nspline): q = kmin + i*(kmax-kmin)/(nspline - 1) ik = _index_nearest(kraw, q) i1 = min(len(kraw)-1, ik + 5) i2 = max(0, ik - 5) spl_k[i] = kraw[ik] spl_y[i] = (2*mu[ik] + mu[i1] + mu[i2] ) / 4.0 # get spline represention: knots, coefs, order=3 # coefs will be varied in fit. knots, coefs, order = splrep(spl_k, spl_y) # set fit parameters from initial coefficients fparams = Parameters() for i, v in enumerate(coefs): fparams.add("c%i" % i, value=v, vary=i<len(spl_y)) fitkws = dict(knots=knots, order=order, kraw=kraw, mu=mu[ie0:], irbkg=irbkg, kout=kout, ftwin=ftwin, nfft=nfft) # do fit fit = Minimizer(__resid, fparams, fcn_kws=fitkws) fit.leastsq() # write final results coefs = [p.value for p in fparams.values()] bkg, chi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout) obkg = np.zeros(len(mu)) obkg[:ie0] = mu[:ie0] obkg[ie0:] = bkg if _larch.symtable.isgroup(group): setattr(group, 'bkg', obkg) setattr(group, 'chie', mu-obkg) setattr(group, 'k', kout) setattr(group, 'chi', chi)
def as_parameter_dict(self) -> Parameters: """ Creates a lmfit.Parameters dictionary from the group. Notes ----- Only for internal use. """ params = Parameters() for label, p in self.all(seperator="_"): p.name = "_" + label if p.non_neg: p = copy.deepcopy(p) if p.value == 1: p.value += 1e-10 if p.min == 1: p.min += 1e-10 if p.max == 1: p.max += 1e-10 else: try: p.value = log(p.value) p.min = log(p.min) if np.isfinite(p.min) else p.min p.max = log(p.max) if np.isfinite(p.max) else p.max except Exception: raise Exception("Could not take log of parameter" f" '{label}' with value '{p.value}'") params.add(p) return params
def isotropic(filename, v0, x0, rho, g=9.81): try: results = read_results_file(filename) Volume_array_normalized = v0 - results['External_volume'] spring_position_array_normalized = x0 - results['Ylow'] params = Parameters() params.add('A', value=1) params.add('B', value=0) try: result = minimize(residual_isotropic, params, args=(spring_position_array_normalized, Volume_array_normalized)) except TypeError: result = minimize(residual_isotropic, params, args=(spring_position_array_normalized, Volume_array_normalized),method="nelder") v = result.params.valuesdict() x_th = np.arange(np.amin(Volume_array_normalized), np.amax(Volume_array_normalized), 0.0000001) y_th = v['A'] * x_th + v['B'] # report_fit(result.params, min_correl=0.5) filename_list = filename.split("\\") txt = filename_list[-1].split("_") txt = "_".join(txt[0:-1]) k = -rho * g / v['A'] print "Stiffness: " + str(k) list_results = [txt, k, v['B']] return list_results except: return None
class rabifit: def __init__(self, nmax=10000): self.params = Parameters() self.params.add('nbar', value=.1, vary=False, min=0.) self.params.add('delta', value=0, min=-0.05, max=.1, vary=False) self.params.add('time_2pi', value=20, vary=True, min=0.1) self.params.add('coh_time', value=2000, vary=False, min=0) self.params.add('eta', value=0.06, vary=False, min=0) self.eta = 0.06 self.sideband = 0 self.result = None self.nmax = nmax def residual(self, params, x, data=None, eps=None): # unpack parameters: # extract .value attribute for each parameter nbar = params['nbar'].value delta = params['delta'].value time_2pi = params['time_2pi'].value coh_time = params['coh_time'].value eta = params['eta'].value te = rabi_flop_time_evolution(self.sideband ,eta, nmax=self.nmax) model = te.compute_evolution_decay_thermal(abs(coh_time), nbar = nbar, delta = delta, time_2pi = time_2pi, t = x) if data is None: return model if eps is None: return (model - data) return (model - data)/eps def minimize(self, data): self.result = minimize(self.residual,self.params, args = (data[:,0], data[:,1], data[:,2]+.01))
def fit_for_b(bins, x, y, error=None): """ Fit a fractional energy loss histogram for the parameter b """ magic_ice_const = 0.917 outer_bounds = (min(bins), max(bins)) # Beginning and ending position select_bounds = np.vectorize(lambda xx: mlc.get_bounding_elements(xx, bins)) # Returns the bin edges on either side of a point E_diff = lambda xx, b: np.exp(-b*xx[1]) - np.exp(-b*xx[0]) # Proportional to Delta E (comes from -dE/dx=a+b*E) fit_func = lambda xx, b: E_diff(select_bounds(xx), b*magic_ice_const) / E_diff(outer_bounds, b*magic_ice_const) # We are fitting to the ratio of differences params = Parameters() params.add('b', value=0.4*10**(-3)) # Add b as a fit parameter if error is not None: l_fit_func = lambda params, x, data: np.sqrt((fit_func(x, params['b']) - data)**2 / error**2) else: l_fit_func = lambda params, x, data: fit_func(x, params['b']) - data result = minimize(l_fit_func, params, args=(x, y)) b = result.params['b'].value if b == 0.4*10**(-3): print fit print x print y print fit_func(x, 0.36*10**(-3)) print w raise ValueError("Fit doesn't make sense.") return b
def NIST_Test(DataSet, method='leastsq', start='start2', plot=True): NISTdata = ReadNistData(DataSet) resid, npar, dimx = Models[DataSet] y = NISTdata['y'] x = NISTdata['x'] params = Parameters() for i in range(npar): pname = 'b%i' % (i+1) cval = NISTdata['cert_values'][i] cerr = NISTdata['cert_stderr'][i] pval1 = NISTdata[start][i] params.add(pname, value=pval1) myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y}) digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata) if plot and HASPYLAB: fit = -resid(params, x, ) pylab.plot(x, y, 'ro') pylab.plot(x, fit, 'k+-') pylab.show() return digs > 2
def group2params(paramgroup, _larch=None): """take a Group of Parameter objects (and maybe other things) and put them into Larch's current fiteval namespace returns a lmfit Parameters set, ready for use in fitting """ if _larch is None: return None if isinstance(paramgroup, ParameterGroup): return paramgroup.__params__ fiteval = _larch.symtable._sys.fiteval params = Parameters(asteval=fiteval) if paramgroup is not None: for name in dir(paramgroup): par = getattr(paramgroup, name) if isParameter(par): params.add(name, value=par.value, vary=par.vary, min=par.min, max=par.max, brute_step=par.brute_step) else: fiteval.symtable[name] = par # now set any expression (that is, after all symbols are defined) for name in dir(paramgroup): par = getattr(paramgroup, name) if isParameter(par) and par.expr is not None: params[name].expr = par.expr return params
def setup_model_params(self): """ Setup parameters """ params = Parameters() params.add('g0', value=0.0, vary=False) params.add('g1', value=2.0, min=0.0) return params
def amplitude_of_best_fit_greybody(Trf = None, b = 2.0, Lrf = None, zin = None): ''' Same as single_simple_flux_from_greybody, but to made an amplitude lookup table ''' nsed = 1e4 lambda_mod = loggen(1e3, 8.0, nsed) # microns nu_mod = c * 1.e6/lambda_mod # Hz #cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273) conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz) Lir = Lrf / conversion # Jy x Hz Ain = 1.0e-36 #good starting parameter betain = b alphain= 2.0 fit_params = Parameters() fit_params.add('Ain', value= Ain) #THE LM FIT IS HERE Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,Trf/(1.+zin),b,alphain)) #pdb.set_trace() return Pfin.params['Ain'].value
def neon_init(x_list, y_list): """ Initialize parameters for neon peaks x_list: list of x peaks y_list: list of y peaks returns: params """ params = Parameters() BG = 100. params.add("BG", value = BG) n = len(x_list) A_variables = [] X_variables = [] W_variables = [] MU_variables = [] for i in range(n): A_variables.append("A%d"%i) X_variables.append("X%d"%i) W_variables.append("W%d"%i) MU_variables.append("MU%d"%i) W = np.ones(n) MU = W*0.5 for i in range(n): params.add(X_variables[i], value = x_list[i], min = x_list[i]-2., max = x_list[i]+2.) params.add(A_variables[i], value = y_list[i]) params.add(W_variables[i], value = W[i]) params.add(MU_variables[i], value = MU[i]) print "number of params: %d"%len(params.keys()) return params
def test_multidimensional_fit_GH205(): # test that you don't need to flatten the output from the objective # function. Tests regression for GH205. pos = np.linspace(0, 99, 100) xv, yv = np.meshgrid(pos, pos) f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1) + np.cos(yv * lambda2)) data = f(xv, yv, 0.3, 3) assert_(data.ndim, 2) def fcn2min(params, xv, yv, data): """ model decaying sine wave, subtract data""" lambda1 = params['lambda1'].value lambda2 = params['lambda2'].value model = f(xv, yv, lambda1, lambda2) return model - data # create a set of Parameters params = Parameters() params.add('lambda1', value=0.4) params.add('lambda2', value=3.2) mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data)) res = mini.minimize()
def __extract_pars(self): """ __extract_pars() Extracts the paramers from the function list and converts them to a single lmfit Parameters instance, which can then be manipulated by the residual minimization routines. Parameters ---------- None Returns ------- An lmfit `Parameters` instance containing the parameters of *all* the fittable functions in a single place. """ oPars=Parameters() for indFunc,cFunc in enumerate(self.funclist): cParlist = cFunc['params'] for cPar in cParlist.values(): oPars.add(self.__func_ident(indFunc)+cPar.name, value=cPar.value,vary=cPar.vary, min=cPar.min,max=cPar.max, expr=cPar.expr) return oPars
def test_args_kwds_are_used(self): # check that user defined args and kwds make their way into the user # function a = [1., 2.] x = np.linspace(0, 10, 11) y = a[0] + 1 + 2 * a[1] * x par = Parameters() par.add('p0', 1.5) par.add('p1', 2.5) def fun(x, p, *args, **kwds): assert_equal(args, a) return args[0] + p['p0'] + p['p1'] * a[1] * x g = CurveFitter(fun, (x, y), par, fcn_args=a) res = g.fit() assert_almost_equal(values(res.params), [1., 2.]) d = {'a': 1, 'b': 2} def fun(x, p, *args, **kwds): return kwds['a'] + p['p0'] + p['p1'] * kwds['b'] * x g = CurveFitter(fun, (x, y), par, fcn_kws=d) res = g.fit() assert_almost_equal(values(res.params), [1., 2.])
def test_bounded_jacobian(): pars = Parameters() pars.add('x0', value=2.0) pars.add('x1', value=2.0, min=1.5) global jac_count jac_count = 0 def resid(params): x0 = params['x0'] x1 = params['x1'] return np.array([10 * (x1 - x0*x0), 1-x0]) def jac(params): global jac_count jac_count += 1 x0 = params['x0'] return np.array([[-20*x0, 10], [-1, 0]]) out0 = minimize(resid, pars, Dfun=None) assert_paramval(out0.params['x0'], 1.2243, tol=0.02) assert_paramval(out0.params['x1'], 1.5000, tol=0.02) assert(jac_count == 0) out1 = minimize(resid, pars, Dfun=jac) assert_paramval(out1.params['x0'], 1.2243, tol=0.02) assert_paramval(out1.params['x1'], 1.5000, tol=0.02) assert(jac_count > 5)
def lmfitter(x, y): params = Parameters() params.add('m', value=0.01, vary=True) out = minimize(residual, params, args=(x, y)) report_fit(params) return out.params['m'].value, 0.0, out.params['m'].stderr, 0.0
def gaussian_constant_delta_chi_squared(light_curve, num_attempts=1): """ Compute the difference in chi-squared between a Gaussian and a straight (constant) line. """ gaussian_chisqr = 1E6 for ii in range(num_attempts): gaussian_params = Parameters() t0 = np.random.normal(light_curve.mjd[np.argmin(light_curve.mag)], 1.) if t0 > light_curve.mjd.max() or t0 < light_curve.mjd.min(): t0 = light_curve.mjd[np.argmin(light_curve.mag)] gaussian_params.add('A', value=np.random.uniform(-1., -20.), min=-1E4, max=0.) gaussian_params.add('mu', value=t0, min=light_curve.mjd.min(), max=light_curve.mjd.max()) gaussian_params.add('sigma', value=abs(np.random.normal(10., 2.)), min=1.) gaussian_params.add('B', value=np.random.normal(np.median(light_curve.mag), 0.5)) gaussian_result = minimize(gaussian_error_func, gaussian_params, args=(light_curve.mjd, light_curve.mag, light_curve.error)) if gaussian_result.chisqr < gaussian_chisqr: gaussian_chisqr = gaussian_result.chisqr constant_chisqr = 1E6 for ii in range(num_attempts): constant_params = Parameters() constant_params.add('b', value=np.random.normal(np.median(light_curve.mag), 0.5)) constant_result = minimize(constant_error_func, constant_params, args=(light_curve.mjd, light_curve.mag, light_curve.error)) if constant_result.chisqr < constant_chisqr: constant_chisqr = constant_result.chisqr return constant_chisqr - gaussian_chisqr
def NIST_Test(DataSet, start='start2', plot=True): NISTdata = ReadNistData(DataSet) resid, npar, dimx = Models[DataSet] y = NISTdata['y'] x = NISTdata['x'] params = Parameters() for i in range(npar): pname = 'b%i' % (i+1) cval = NISTdata['cert_values'][i] cerr = NISTdata['cert_stderr'][i] pval1 = NISTdata[start][i] params.add(pname, value=pval1) myfit = Minimizer(resid, params, fcn_args=(x,), fcn_kws={'y':y}, scale_covar=True) myfit.prepare_fit() myfit.leastsq() digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata) if plot and HASPYLAB: fit = -resid(params, x, ) pylab.plot(x, y, 'r+') pylab.plot(x, fit, 'ko--') pylab.show() return digs > 2
def build_fitmodel(self): """ use fit components to build model""" dgroup = self.get_datagroup() model = None params = Parameters() self.summary = {"components": [], "options": {}} for comp in self.fit_components.values(): if comp.usebox is not None and comp.usebox.IsChecked(): for parwids in comp.parwids.values(): params.add(parwids.param) self.summary["components"].append((comp.mclass.__name__, comp.mclass_kws)) thismodel = comp.mclass(**comp.mclass_kws) if model is None: model = thismodel else: model += thismodel self.fit_model = model self.fit_params = params self.plot1 = self.larch.symtable._plotter.plot1 if dgroup is not None: i1, i2, xv1, xv2 = self.get_xranges(dgroup.x) xsel = dgroup.x[slice(i1, i2)] dgroup.xfit = xsel dgroup.yfit = self.fit_model.eval(self.fit_params, x=xsel) dgroup.ycomps = self.fit_model.eval_components(params=self.fit_params, x=xsel) return dgroup
def replot(self): params = Parameters() for key,value in self.paramDict.items(): params.add(key, value=float(value.text())) for i in np.arange(self.fitNumber): sequence = 'g'+str(i+1)+'_' center_value = params[sequence+'center'].value params[sequence+'center'].set(center_value, min=center_value-0.05, max=center_value+0.05) sigma_value = params[sequence+'sigma'].value params[sequence+'sigma'].set(sigma_value, min=sigma_value-0.05, max=sigma_value+0.05) ampl_value = params[sequence+'amplitude'].value params[sequence+'amplitude'].set(ampl_value, min=ampl_value-0.5, max=ampl_value+0.5) result = minimize(lmLeast(self.fitNumber).residuals, params, args=(self.fitResult.fitDf['field'], self.fitResult.fitDf['IRM_norm']), method='cg') self.params = result.params #FitMplCanvas.fitPlot(self) pdf_adjust = lmLeast(self.fitNumber).func(self.fitResult.fitDf['field'].values,self.params) pdf_adjust = pdf_adjust/np.max(np.sum(pdf_adjust,axis=0)) ax=self.axes fit_plots(ax=ax, xfit=self.fitResult.fitDf['field'], xraw=self.fitResult.rawDf['field_log'], yfit=np.array(pdf_adjust).transpose(), yraw=self.fitResult.rawDf['rem_grad_norm'])
def pca_fit(group, pca_model, ncomps=None, rescale=True, _larch=None): """ fit a spectrum from a group to a PCA training model from pca_train() Arguments --------- group group with data to fit pca_model PCA model as found from pca_train() ncomps number of components to included rescale whether to allow data to be renormalized (True) Returns ------- None, the group will have a subgroup name `pca_result` created with the following members: x x or energy value from model ydat input data interpolated onto `x` yfit linear least-squares fit using model components weights weights for PCA components chi_square goodness-of-fit measure pca_model the input PCA model """ # get first nerate arrays and interpolate components onto the unknown x array xdat, ydat = get_arrays(group, pca_model.arrayname) if xdat is None or ydat is None: raise ValueError("cannot get arrays for arrayname='%s'" % arrayname) ydat = interp(xdat, ydat, pca_model.x, kind='cubic') params = Parameters() params.add('scale', value=1.0, vary=True, min=0) if ncomps is None: ncomps=len(pca_model.components) comps = pca_model.components[:ncomps].transpose() if rescale: weights, chi2, rank, s = np.linalg.lstsq(comps, ydat-pca_model.mean) yfit = (weights * comps).sum(axis=1) + pca_model.mean result = minimize(_pca_scale_resid, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws = dict(ydat=ydat, comps=comps, pca_model=pca_model)) scale = result.params['scale'].value ydat *= scale weights, chi2, rank, s = np.linalg.lstsq(comps, ydat-pca_model.mean) yfit = (weights * comps).sum(axis=1) + pca_model.mean else: weights, chi2, rank, s = np.linalg.lstsq(comps, ydat-pca_model.mean) yfit = (weights * comps).sum(axis=1) + pca_model.mean scale = 1.0 group.pca_result = Group(x=pca_model.x, ydat=ydat, yfit=yfit, pca_model=pca_model, chi_square=chi2[0], data_scale=scale, weights=weights) return
def define_orientation_matrix(self): from lmfit import Parameters p = Parameters() for i in range(3): for j in range(3): p.add('U%d%d' % (i,j), self.Umat[i,j]) self.init_p = self.Umat return p
def simple_flux_from_greybody(lambdavector, Trf = None, b = None, Lrf = None, zin = None, ngal = None): ''' Return flux densities at any wavelength of interest (in the range 1-10000 micron), assuming a galaxy (at given redshift) graybody spectral energy distribution (SED), with a power law replacing the Wien part of the spectrum to account for the variability of dust temperatures within the galaxy. The two different functional forms are stitched together by imposing that the two functions and their first derivatives coincide. The code contains the nitty-gritty details explicitly. Cosmology assumed: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009) Inputs: alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003] betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985] Trf = rest-frame temperature [in K; default = 20K] Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10] zin = galaxy redshift [default = 0.001] lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)]; AUTHOR: Lorenzo Moncelsi [[email protected]] HISTORY: 20June2012: created in IDL November2015: converted to Python ''' nwv = len(lambdavector) nuvector = c * 1.e6 / lambdavector # Hz nsed = 1e4 lambda_mod = loggen(1e3, 8.0, nsed) # microns nu_mod = c * 1.e6/lambda_mod # Hz #Lorenzo's version had: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009) cosmo = FlatLambdaCDM(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273) conversion = 4.0 * np.pi *(1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22)**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz) Lir = Lrf / conversion # Jy x Hz Ain = np.zeros(ngal) + 1.0e-36 #good starting parameter betain = np.zeros(ngal) + b alphain= np.zeros(ngal) + 2.0 fit_params = Parameters() fit_params.add('Ain', value= Ain) #fit_params.add('Tin', value= Trf/(1.+zin), vary = False) #fit_params.add('betain', value= b, vary = False) #fit_params.add('alphain', value= alphain, vary = False) #pdb.set_trace() #THE LM FIT IS HERE #Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal)) Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal,Trf/(1.+zin),b,alphain)) #pdb.set_trace() flux_mJy=sed(Pfin.params,nuvector,ngal,Trf/(1.+zin),b,alphain) return flux_mJy
def buildLmfitParameters(self, parameters): lp = Parameters() for p in parameters: lp.add(p.name, value=p.init, min=p.min, max=p.max) for k in p.kws: setattr(lp[p.name], k, p.kws[k]) return lp
def test_eval(self): # check that eval() works with usersyms and parameter values def myfun(x): return 2.0 * x p = Parameters(usersyms={"myfun": myfun}) p.add("a", value=4.0) p.add("b", value=3.0) assert_almost_equal(p.eval("myfun(2.0) * a"), 16) assert_almost_equal(p.eval("b / myfun(3.0)"), 0.5)
def fit(self, params0): r"""Perform a fit with the provided parameters. Parameters ---------- params0 : list Initial fitting parameters """ self.params0 = params0 p = Parameters() if self.parinfo is None: self.parinfo = [None] * len(self.params0) else: assert (len(self.params0) == len(self.parinfo)) for i, (p0, parin) in enumerate(zip(self.params0, self.parinfo)): p.add(name='p{0}'.format(i), value=p0) if parin is not None: if 'limits' in parin: p['p{0}'.format(i)].set(min=parin['limits'][0]) p['p{0}'.format(i)].set(max=parin['limits'][1]) if 'fixed' in parin: p['p{0}'.format(i)].set(vary=not parin['fixed']) if np.all([not value.vary for value in p.values()]): raise Exception('All parameters are fixed!') self.lmfit_minimizer = Minimizer(self.residuals, p, nan_policy=self.nan_policy, fcn_args=(self.data,)) self.result.orignorm = np.sum(self.residuals(params0, self.data) ** 2) result = self.lmfit_minimizer.minimize(Dfun=self.deriv, method='leastsq', ftol=self.ftol, xtol=self.xtol, gtol=self.gtol, maxfev=self.maxfev, epsfcn=self.epsfcn, factor=self.stepfactor) self.result.bestnorm = result.chisqr self.result.redchi = result.redchi self._m = result.ndata self.result.nfree = result.nfree self.result.resid = result.residual self.result.status = result.ier self.result.covar = result.covar self.result.xerror = [result.params['p{0}'.format(i)].stderr for i in range(len(result.params))] self.result.params = [result.params['p{0}'.format(i)].value for i in range(len(result.params))] self.result.message = result.message self.lmfit_result = result if not result.errorbars or not result.success: warnings.warn(self.result.message) return result.success
def Lorentz_params(p0): pars = Parameters() f,A,f0,df,y0 = p0[0],p0[1],p0[2],p0[3],p0[4] pars.add('A',value = A) pars.add('f',value = f.tolist(), vary = False) pars.add('f0',value = f0) pars.add('df', value = df, min = 0.) pars.add('y0', value = y0) return pars
def FPolyp(x, data, Np): #generate parameters for FD2 params = FPolypGuess(x, data, Np) p = Parameters() p.add('Np', value=Np, vary=False) for ii in range(Np + 1): p.add('c%s' % ii, value = params[ii+1], vary=True) return p
def fit_jd_hist( hists: list, dt: float, D: list, fit_D: list, F: list, fit_F: list, sigma: float, fit_sigma: bool, verbose=False, ): """ Fits jd probability functions to a jd histograms. Parameters: hist (list): histogram values D (list): init values for MSD F (list): fractions for D, sum = 1 sigma (float): localization precision guess funcs (dict): dictionary with functions sigma, gamma, center, amplitude Returns: popt (lmfit.minimizerResult): optimized parameters """ from lmfit import Parameters, Parameter, minimize def residual(fit_params, data): res = cumulative_error_jd_hist(fit_params, data, len(D)) return res fit_params = Parameters() # fit_params.add('sigma', value=sigma, vary=fit_sigma, min=0.) fit_params.add("dt", value=dt, vary=False) try: fit_params.add("max_lag", value=max([h.lag for h in hists]), vary=False) except TypeError as e: logger.error( f"problem with `hists`: expected `list`,\ got `{type(hists)}`" ) raise e for i, (d, f_d, f, f_f) in enumerate(zip(D, fit_D, F, fit_F)): fit_params.add(f"D{i}", value=d, vary=f_d, min=0.0) fit_params.add(f"F{i}", value=f, min=0.0, max=1.0, vary=f_f) f_expr = "1" for i, f in enumerate(F[:-1]): f_expr += f" - F{i}" fit_params[f"F{i+1}"] = Parameter(name=f"F{i+1}", min=0.0, max=1.0, expr=f_expr) for i, (s, f_s, min_s, max_s) in enumerate( zip(sigma, fit_sigma, (0, sigma[0]), (3 * sigma[0], D[-1])) ): fit_params.add(f"sigma{i}", value=s, min=min_s, max=max_s, vary=f_s) logger.debug("start minimize") minimizer_result = minimize(residual, fit_params, args=(hists,)) if verbose: logger.info(f"completed in {minimizer_result.nfev} steps") minimizer_result.params.pretty_print() return minimizer_result
model = yg + offset + x * slope if data is None: return model if sigma is None: return (model - data) return (model - data) / sigma n = 201 xmin = 0. xmax = 20.0 x = linspace(xmin, xmax, n) p_true = Parameters() p_true.add('amp_g', value=21.0) p_true.add('cen_g', value=8.1) p_true.add('wid_g', value=1.6) p_true.add('line_off', value=-1.023) p_true.add('line_slope', value=0.62) data = (gaussian(x, p_true['amp_g'].value, p_true['cen_g'].value, p_true['wid_g'].value) + random.normal(scale=0.23, size=n) + x * p_true['line_slope'].value + p_true['line_off'].value) if HASPYLAB: pylab.plot(x, data, 'r+') p_fit = Parameters() p_fit.add('amp_g', value=10.0) p_fit.add('cen_g', value=9)
class XRF_Model: """model for X-ray fluorescence data consists of parameterized components for incident beam (energy, angle_in, angle_out) matrix (list of material, thickness) filters (list of material, thickness) detector (material, thickness, step, tail, beta, gamma) """ def __init__(self, xray_energy=None, energy_min=1.5, energy_max=30., count_time=1, bgr=None, iter_callback=None, **kws): self.xray_energy = xray_energy self.energy_min = energy_min self.energy_max = energy_max self.count_time = count_time self.iter_callback = None self.params = Parameters() self.elements = [] self.scatter = [] self.comps = {} self.eigenvalues = {} self.transfer_matrix = None self.matrix_layers = [] self.matrix = None self.matrix_atten = 1.0 self.filters = [] self.fit_iter = 0 self.fit_toler = 1.e-5 self.fit_log = False self.bgr = None self.use_pileup = False self.use_escape = False self.escape_scale = None self.script = '' self.mca = None if bgr is not None: self.add_background(bgr) def set_detector(self, material='Si', thickness=0.40, noise=0.05, peak_step=1e-3, peak_tail=0.01, peak_gamma=0, peak_beta=0.5, cal_offset=0, cal_slope=10., cal_quad=0, vary_thickness=False, vary_noise=True, vary_peak_step=True, vary_peak_tail=True, vary_peak_gamma=False, vary_peak_beta=False, vary_cal_offset=True, vary_cal_slope=True, vary_cal_quad=False): """ set up detector material, calibration, and general settings for the hypermet functions for the fluorescence and scatter peaks """ self.detector = XRF_Material(material, thickness) matname = material.title() if matname not in FanoFactors: matname = 'Si' self.efano = FanoFactors[matname] self.params.add('det_thickness', value=thickness, vary=vary_thickness, min=0) self.params.add('det_noise', value=noise, vary=vary_noise, min=0) self.params.add('cal_offset', value=cal_offset, vary=vary_cal_offset, min=-500, max=500) self.params.add('cal_slope', value=cal_slope, vary=vary_cal_slope, min=0) self.params.add('cal_quad', value=cal_quad, vary=vary_cal_quad) self.params.add('peak_step', value=peak_step, vary=vary_peak_step, min=0, max=10) self.params.add('peak_tail', value=peak_tail, vary=vary_peak_tail, min=0, max=10) self.params.add('peak_beta', value=peak_beta, vary=vary_peak_beta, min=0) self.params.add('peak_gamma', value=peak_gamma, vary=vary_peak_gamma, min=0) def add_scatter_peak(self, name='elastic', amplitude=1000, center=None, step=0.010, tail=0.5, sigmax=1.0, beta=0.5, vary_center=True, vary_step=True, vary_tail=True, vary_sigmax=True, vary_beta=False): """add Rayleigh (elastic) or Compton (inelastic) scattering peak """ if name not in self.scatter: self.scatter.append( xrf_peak(name, amplitude, center, step, tail, sigmax, beta, 0.0, vary_center, vary_step, vary_tail, vary_sigmax, vary_beta, False)) if center is None: center = self.xray_energy self.params.add('%s_amp' % name, value=amplitude, vary=True, min=0) self.params.add('%s_center' % name, value=center, vary=vary_center, min=center * 0.5, max=center * 1.25) self.params.add('%s_step' % name, value=step, vary=vary_step, min=0, max=10) self.params.add('%s_tail' % name, value=tail, vary=vary_tail, min=0, max=20) self.params.add('%s_beta' % name, value=beta, vary=vary_beta, min=0, max=20) self.params.add('%s_sigmax' % name, value=sigmax, vary=vary_sigmax, min=0, max=100) def add_element(self, elem, amplitude=1.e6, vary_amplitude=True): """add Element to XRF model """ self.elements.append( XRF_Element(elem, xray_energy=self.xray_energy, energy_min=self.energy_min)) self.params.add('amp_%s' % elem.lower(), value=amplitude, vary=vary_amplitude, min=0) def add_filter(self, material, thickness, density=None, vary_thickness=False): self.filters.append( XRF_Material(material=material, density=density, thickness=thickness)) self.params.add('filterlen_%s' % material, value=thickness, min=0, vary=vary_thickness) def set_matrix(self, material, thickness, density=None): self.matrix = XRF_Material(material=material, density=density, thickness=thickness) self.matrix_atten = 1.0 def add_background(self, data, vary=True): self.bgr = data self.params.add('background_amp', value=1.0, min=0, vary=vary) def add_escape(self, scale=1.0, vary=True): self.use_escape = True self.params.add('escape_amp', value=scale, min=0, vary=vary) def add_pileup(self, scale=1.0, vary=True): self.use_pileup = True self.params.add('pileup_amp', value=scale, min=0, vary=vary) def clear_background(self): self.bgr = None self.params.pop('background_amp') def calc_matrix_attenuation(self, energy): """ calculate beam attenuation by a matrix built from layers note that matrix layers and composition cannot be variable so the calculation can be done once, ahead of time. """ atten = 1.0 if self.matrix is not None: ixray_en = index_of(energy, self.xray_energy) print("MATRIX ", ixray_en, self.matrix) # layer_trans = self.matrix.transmission(energy) # transmission through layer # incid_trans = layer_trans[ixray_en] # incident beam trans to lower layers # ncid_absor = 1.0 - incid_trans # incident beam absorption by layer # atten = layer_trans * incid_absor self.matrix_atten = atten def calc_escape_scale(self, energy, thickness=None): """ calculate energy dependence of escape effect X-rays penetrate a depth 1/mu(material, energy) and the detector fluorescence escapes from that depth as exp(-mu(material, KaEnergy)*thickness) with a fluorecence yield of the material """ det = self.detector # note material_mu, xray_edge, xray_line work in eV! escape_energy_ev = xray_line(det.material, 'Ka').energy mu_emit = material_mu(det.material, escape_energy_ev) self.escape_energy = 0.001 * escape_energy_ev mu_input = material_mu(det.material, 1000 * energy) edge = xray_edge(det.material, 'K') self.escape_scale = edge.fyield * np.exp(-mu_emit / (2 * mu_input)) self.escape_scale[np.where(energy < 0.001 * edge.energy)] = 0.0 def det_sigma(self, energy, noise=0): """ energy width of peak """ return np.sqrt(self.efano * energy + noise**2) def calc_spectrum(self, energy, params=None): if params is None: params = self.params pars = params.valuesdict() self.comps = {} self.eigenvalues = {} det_noise = pars['det_noise'] step = pars['peak_step'] tail = pars['peak_tail'] beta = pars['peak_beta'] gamma = pars['peak_gamma'] # detector attenuation atten = self.detector.absorbance(energy, thickness=pars['det_thickness']) # filters for f in self.filters: thickness = pars.get('filterlen_%s' % f.material, None) if thickness is not None and int(thickness * 1e6) > 1: atten *= f.transmission(energy, thickness=thickness) self.atten = atten # matrix # if self.matrix_atten is None: # self.calc_matrix_attenuation(energy) # atten *= self.matrix_atten if self.use_escape: if self.escape_scale is None: self.calc_escape_scale(energy, thickness=pars['det_thickness']) escape_amp = pars.get('escape_amp', 0.0) * self.escape_scale for elem in self.elements: comp = 0. * energy amp = pars.get('amp_%s' % elem.symbol.lower(), None) if amp is None: continue for key, line in elem.lines.items(): ecen = 0.001 * line.energy line_amp = line.intensity * elem.mu * elem.fyields[ line.initial_level] sigma = self.det_sigma(ecen, det_noise) comp += hypermet(energy, amplitude=line_amp, center=ecen, sigma=sigma, step=step, tail=tail, beta=beta, gamma=gamma) comp *= amp * atten * self.count_time if self.use_escape: comp += escape_amp * interp(energy - self.escape_energy, comp, energy) self.comps[elem.symbol] = comp self.eigenvalues[elem.symbol] = amp # scatter peaks for Rayleigh and Compton for peak in self.scatter: p = peak.name amp = pars.get('%s_amp' % p, None) if amp is None: continue ecen = pars['%s_center' % p] step = pars['%s_step' % p] tail = pars['%s_tail' % p] beta = pars['%s_beta' % p] sigma = pars['%s_sigmax' % p] sigma *= self.det_sigma(ecen, det_noise) comp = hypermet(energy, amplitude=1.0, center=ecen, sigma=sigma, step=step, tail=tail, beta=beta, gamma=gamma) comp *= amp * atten * self.count_time if self.use_escape: comp += escape_amp * interp(energy - self.escape_energy, comp, energy) self.comps[p] = comp self.eigenvalues[p] = amp if self.bgr is not None: bgr_amp = pars.get('background_amp', 0.0) self.comps['background'] = bgr_amp * self.bgr self.eigenvalues['background'] = bgr_amp # calculate total spectrum total = 0. * energy for comp in self.comps.values(): total += comp if self.use_pileup: pamp = pars.get('pileup_amp', 0.0) npts = len(energy) pileup = pamp * 1.e-9 * np.convolve(total, total * 1.0, 'full')[:npts] self.comps['pileup'] = pileup self.eigenvalues['pileup'] = pamp total += pileup # remove tiny values so that log plots are usable floor = 1.e-10 * max(total) total[np.where(total < floor)] = floor self.current_model = total return total def __resid(self, params, data, index): pars = params.valuesdict() self.best_en = (pars['cal_offset'] + pars['cal_slope'] * index + pars['cal_quad'] * index**2) self.fit_iter += 1 model = self.calc_spectrum(self.best_en, params=params) if callable(self.iter_callback): self.iter_callback(iter=self.fit_iter, pars=pars) return ((data - model) * self.fit_weight)[self.imin:self.imax] def set_fit_weight(self, energy, counts, emin, emax, ewid=0.050): """ set weighting factor to smoothed square-root of data """ ewin = ftwindow(energy, xmin=emin, xmax=emax, dx=ewid, window='hanning') self.fit_window = ewin fit_wt = 0.5 + savitzky_golay(np.sqrt(counts + 1.0), 25, 1) self.fit_weight = 1.0 / fit_wt def fit_spectrum(self, mca, energy_min=None, energy_max=None): self.mca = mca work_energy = 1.0 * mca.energy work_counts = 1.0 * mca.counts floor = 1.e-10 * np.percentile(work_counts, [99])[0] work_counts[np.where(work_counts < floor)] = floor if max(work_energy) > 250.0: # if input energies are in eV work_energy /= 1000.0 imin, imax = 0, len(work_counts) if energy_min is None: energy_min = self.energy_min if energy_min is not None: imin = index_of(work_energy, energy_min) if energy_max is None: energy_max = self.energy_max if energy_max is not None: imax = index_of(work_energy, energy_max) self.imin = max(0, imin - 5) self.imax = min(len(work_counts), imax + 5) self.npts = (self.imax - self.imin) self.set_fit_weight(work_energy, work_counts, energy_min, energy_max) self.fit_iter = 0 # reset attenuation calcs for matrix, detector, filters self.matrix_atten = 1.0 self.escape_scale = None self.detector.mu_total = None for f in self.filters: f.mu_total = None self.init_fit = self.calc_spectrum(work_energy, params=self.params) index = np.arange(len(work_counts)) userkws = dict(data=work_counts, index=index) tol = self.fit_toler self.result = minimize(self.__resid, self.params, kws=userkws, method='leastsq', maxfev=10000, scale_covar=True, gtol=tol, ftol=tol, epsfcn=1.e-5) self.fit_report = fit_report(self.result, min_correl=0.5) pars = self.result.params self.best_en = (pars['cal_offset'] + pars['cal_slope'] * index + pars['cal_quad'] * index**2) self.fit_iter += 1 self.best_fit = self.calc_spectrum(work_energy, params=self.result.params) # calculate transfer matrix for linear analysis using this model tmat = [] for key, val in self.comps.items(): arr = val / self.eigenvalues[key] floor = 1.e-12 * max(arr) arr[np.where(arr < floor)] = 0.0 tmat.append(arr) self.transfer_matrix = np.array(tmat).transpose() return self.get_fitresult() def get_fitresult(self, label='XRF fit result', script='# no script supplied'): """a simple compilation of fit settings results to be able to easily save and inspect""" out = XRFFitResult(label=label, script=script, mca=self.mca) for attr in ('filename', 'label'): setattr(out, 'mca' + attr, getattr(self.mca, attr, 'unknown')) for attr in ('params', 'var_names', 'chisqr', 'redchi', 'nvarys', 'nfev', 'ndata', 'aic', 'bic', 'aborted', 'covar', 'ier', 'message', 'method', 'nfree', 'init_values', 'success', 'residual', 'errorbars', 'lmdif_message', 'nfree'): setattr(out, attr, getattr(self.result, attr, None)) for attr in ('atten', 'best_en', 'best_fit', 'bgr', 'comps', 'count_time', 'eigenvalues', 'energy_max', 'energy_min', 'fit_iter', 'fit_log', 'fit_report', 'fit_toler', 'fit_weight', 'fit_window', 'init_fit', 'scatter', 'script', 'transfer_matrix', 'xray_energy'): setattr(out, attr, getattr(self, attr, None)) elem_attrs = ('all_lines', 'edges', 'fyields', 'lines', 'mu', 'symbol', 'xray_energy') out.elements = [] for el in self.elements: out.elements.append( {attr: getattr(el, attr) for attr in elem_attrs}) mater_attrs = ('material', 'mu_photo', 'mu_total', 'thickness') out.detector = { attr: getattr(self.detector, attr) for attr in mater_attrs } out.matrix = None if self.matrix is not None: out.matrix = { attr: getattr(self.matrix, attr) for attr in mater_attrs } out.filters = [] for ft in self.filters: out.filters.append( {attr: getattr(ft, attr) for attr in mater_attrs}) return out
ADMRObject = ADMR([condObject]) ADMRObject.Btheta_array = Btheta_array ADMRObject.runADMR() print("ADMR time : %.6s seconds" % (time.time() - start_total_time)) diff_0 = rzz_0 - ADMRObject.rzz_array[0, :] diff_15 = rzz_15 - ADMRObject.rzz_array[1, :] diff_30 = rzz_30 - ADMRObject.rzz_array[2, :] diff_45 = rzz_45 - ADMRObject.rzz_array[3, :] return np.concatenate((diff_0, diff_15, diff_30, diff_45)) ## Initialize pars = Parameters() pars.add("gamma_0", value=gamma_0_ini, vary=gamma_0_vary, min=0) pars.add("gamma_dos", value=gamma_dos_ini, vary=gamma_dos_vary, min=0) pars.add("gamma_k", value=gamma_k_ini, vary=gamma_k_vary, min=0) pars.add("power", value=power_ini, vary=power_vary, min=2) pars.add("mu", value=mu_ini, vary=mu_vary) pars.add("M", value=M_ini, vary=M_vary, min=0.001) ## Run fit algorithm out = minimize(residualFunc, pars, args=(bandObject, rzz_0, rzz_15, rzz_30, rzz_45)) ## Display fit report print(fit_report(out.params)) ## Export final parameters from the fit
def fit_lm(self): # use Levenberg Mardquart method # define objective function: returns the array to be minimized def fcn2min(params, x, y, yerr): n = len(x) model = np.zeros(n,dtype=ctypes.c_double) model = np.require(model,dtype=ctypes.c_double,requirements='C') occultquadC( x,params['RpRs'].value,params['aRs'].value, params['period'].value, params['inc'].value, params['gamma1'].value, params['gamma2'].value, params['ecc'].value, params['omega'].value, params['tmid'].value, n, model ) model *= (params['a0'] + x*params['a1'] + x*x*params['a2']) return (model - y)/yerr #Rp,aR,P,i,u1,u2,e,omega,tmid,a0,a1,a2 = self.p_init v = [ (i[0] != i[1]) for i in self.bounds ] # boolean array to vary parameters pnames = ['RpRs','aRs','period','inc','gamma1','gamma2','ecc','omega','tmid','a0','a1','a2'] params = Parameters() for j in range(len(self.p_init)): # algorithm does not like distance between min and max to be zero if v[j] == True: params.add(pnames[j], value= self.p_init[j], vary=v[j], min=self.bounds[j][0], max=self.bounds[j][1] ) else: if (self.bounds[j][0] == None): if (self.bounds[j][1] == None): # no upper bound params.add(pnames[j], value= self.p_init[j], vary=True ) else: # upper bound params.add(pnames[j], value= self.p_init[j], vary=True,max = self.bounds[j][1] ) elif (self.bounds[j][1] == None): if (self.bounds[j][0] == None): # no lower bound params.add(pnames[j], value= self.p_init[j], vary=True ) else: # lower bound params.add(pnames[j], value= self.p_init[j], vary=True,min = self.bounds[j][0] ) else: params.add(pnames[j], value= self.p_init[j], vary=v[j] ) # do fit, here with leastsq model result = lminimize(fcn2min, params, args=(self.t,self.y,self.yerr)) params = result.params n = len(self.t) model = np.zeros(n,dtype=ctypes.c_double) model = np.require(model,dtype=ctypes.c_double,requirements='C') occultquadC( self.t,params['RpRs'].value,params['aRs'].value, params['period'].value, params['inc'].value, params['gamma1'].value, params['gamma2'].value, params['ecc'].value, params['omega'].value, params['tmid'].value, n, model ) self.final_model = model self.residuals = result.residual self.params = result.params self.result = result A0 = params['a0'].value A1 = params['a1'].value A2 = params['a2'].value self.amcurve = A0 + self.t*A1 + self.t*self.t*A2 self.final_curve = self.final_model/self.amcurve self.phase = (self.t-params['tmid'].value)/params['period']
https://lmfit.github.io/lmfit-py/bounds.html The example below shows how to set boundaries using the ``min`` and ``max`` attributes to fitting parameters. """ import matplotlib.pyplot as plt from numpy import exp, linspace, pi, random, sign, sin from lmfit import Parameters, minimize from lmfit.printfuncs import report_fit ############################################################################### # Define the 'correct' Parameter values and residual function: p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.4321) p_true.add('shift', value=0.12345) p_true.add('decay', value=0.01000) def residual(pars, x, data=None): argu = (x * pars['decay'])**2 shift = pars['shift'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = pars['amp'] * sin(shift + x/pars['period']) * exp(-argu) if data is None: return model return model - data
class SphereAtInterface: #Please put the class name same as the function name def __init__(self, x=0.1, lam=1.0, Rc=10, Rsig=0.0, rhoc=4.68, D=60.0, cov=100, Zo=20.0, decay=3.0, rho_up=0.333, rho_down=0.38, zmin=-50, zmax=100, dz=1, roughness=3.0, rrf=1, mpar={}, qoff=0): """ Calculates X-ray reflectivity from a system of nanoparticle at an interface between two media x : array of wave-vector transfer along z-direction lam : wavelength of x-rays in invers units of x Rc : Radius of nanoparticles in inverse units of x rhoc : Electron density of the nanoparticles cov : Coverate of the nanoparticles in % D : The lattice constant of the two dimensional hcp structure formed by the particles Zo : Average distance between the center of the nanoparticles and the interface decay : Assuming exponential decay of the distribution of nanoparticles away from the interface rho_up : Electron density of the upper medium rho_down : Electron density of the lower medium zmin : Minimum z value for the electron density profile zmax : Maximum z value for the electron density profile dz : minimum slab thickness roughness : Roughness of the interface rrf : 1 for Frensnel normalized refelctivity and 0 for just reflectivity qoff : offset in the value of qz due to alignment errors """ if type(x) == list: self.x = np.array(x) else: self.x = x self.Rc = Rc self.lam = lam self.rhoc = rhoc self.Zo = Zo self.cov = cov self.D = D self.decay = decay self.rho_up = rho_up self.rho_down = rho_down self.zmin = zmin self.zmax = zmax self.dz = dz self.roughness = roughness self.rrf = rrf self.qoff = qoff self.choices = {'rrf': [1, 0]} self.output_params = {} self.__mpar__ = mpar def init_params(self): """ Define all the fitting parameters like self.param.add('sig',value=0,vary=0) """ self.params = Parameters() self.params.add('Rc', value=self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1) self.params.add('rhoc', value=self.rhoc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1) self.params.add('Zo', value=self.Zo, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1) self.params.add('D', value=self.D, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1) self.params.add('cov', value=self.cov, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1) self.params.add('decay', value=self.decay, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1) self.params.add('roughness', value=self.roughness, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1) self.params.add('qoff', value=self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1) def decayNp(self, z, Rc=10, D=30.0, z0=0.0, xi=1.0, cov=100.0, rhoc=4.65, rhos=[0.334, 0.38], sig=1.0): if sig < 1e-3: z2 = z else: zmin = z[0] - 5 * sig zmax = z[-1] + 5 * sig z2 = np.arange(zmin, zmax, self.dz) intf = np.where(z2 <= 0, rhos[0], rhos[1]) if z0 <= 0: z1 = np.linspace(-5 * xi + z0, z0, 101) dec = np.exp((z1 - z0) / xi) / xi else: z1 = np.linspace(z0, z0 + 5 * xi, 101) dec = np.exp((z0 - z1) / xi) / xi rhoz = np.zeros_like(z2) for i in range(len(z1)): rhoz = rhoz + self.rhoNPz( z2, z0=z1[i], rhoc=rhoc, Rc=Rc, D=D, rhos=rhos) * dec[i] / sum(dec) rhoz = cov * rhoz / 100.0 + (100 - cov) * intf / 100.0 x = np.arange(-5 * sig, 5 * sig, self.dz) if sig > 1e-3: rough = np.exp(-x**2 / 2.0 / sig**2) / np.sqrt(2 * np.pi) / sig res = np.convolve(rhoz, rough, mode='valid') * self.dz if len(res) > len(z): return res[0:len(z)] else: return res else: return rhoz def rhoNPz(self, z, z0=0, rhoc=4.65, Rc=10.0, D=28.0, rhos=[0.334, 0.38]): rhob = np.where(z > 0, rhos[1], rhos[0]) #D=D/2 return np.where( np.abs(z - z0) <= Rc, (2 * np.pi * (rhoc - rhob) * (Rc**2 - (z - z0)**2) + 1.732 * rhob * D**2) / (1.732 * D**2), rhob) def y(self): """ Define the function in terms of x to return some value """ Rc = self.params['Rc'].value D = self.params['D'].value Zo = self.params['Zo'].value cov = self.params['cov'].value sig = self.params['roughness'].value xi = self.params['decay'].value rhoc = self.params['rhoc'].value qoff = self.params['qoff'].value rhos = [self.rho_up, self.rho_down] lam = self.lam z = np.arange(self.zmin, self.zmax, self.dz) d = np.ones_like(z) * self.dz edp = self.decayNp(z, Rc=Rc, z0=Zo, xi=xi, cov=cov, rhos=rhos, rhoc=rhoc, sig=sig, D=D) self.output_params['EDP'] = {'x': z, 'y': edp} beta = np.zeros_like(z) rho = np.array(edp, dtype='float') refq, r2 = parratt(self.x + qoff, lam, d, rho, beta) if self.rrf > 0: ref, r2 = parratt(self.x + qoff, lam, [0.0, 1.0], rhos, [0.0, 0.0]) refq = refq / ref return refq
def diag_results(cube_id): def f_doublet(x, c, i1, i2, sigma_gal, z, sigma_inst): """ function for Gaussian doublet """ dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths l1 = dblt_mu[0] * (1 + z) l2 = dblt_mu[1] * (1 + z) sigma = np.sqrt(sigma_gal**2 + sigma_inst**2) norm = (sigma * np.sqrt(2 * np.pi)) term1 = (i1 / norm) * np.exp(-(x - l1)**2 / (2 * sigma**2)) term2 = (i2 / norm) * np.exp(-(x - l2)**2 / (2 * sigma**2)) return (c * x + term1 + term2) with PdfPages('diagnostics/cube_' + str(cube_id) + '_diagnostic.pdf') as pdf: analysis = cube_reader.analysis( "/Volumes/Jacky_Cao/University/level4/" + "project/cubes_better/cube_" + str(cube_id) + ".fits", "data/skyvariance_csub.fits") # calling data into variables icd = analysis['image_data'] segd = analysis['spectra_data']['segmentation'] sr = analysis['sr'] df_data = analysis['df_data'] gs_data = analysis['gs_data'] snw_data = analysis['snw_data'] # images of the galaxy f, (ax1, ax2) = plt.subplots(1, 2) ax1.imshow(icd['median'], cmap='gray_r') ax1.set_title(r'\textbf{Galaxy Image: Median}', fontsize=13) ax1.set_xlabel(r'\textbf{Pixels}', fontsize=13) ax1.set_ylabel(r'\textbf{Pixels}', fontsize=13) ax2.imshow(icd['sum'], cmap='gray_r') ax2.set_title(r'\textbf{Galaxy Image: Sum}', fontsize=13) ax2.set_xlabel(r'\textbf{Pixels}', fontsize=13) ax2.set_ylabel(r'\textbf{Pixels}', fontsize=13) f.subplots_adjust(wspace=0.4) pdf.savefig() plt.close() # ---------------------------------------------------------------------- # # segmentation area used to extract the 1D spectra segd_mask = ((segd == cube_id)) plt.figure() plt.title(r'\textbf{Segmentation area used to extract 1D spectra}', fontsize=13) plt.imshow(np.rot90(segd_mask, 1), cmap='Paired') plt.xlabel(r'\textbf{Pixels}', fontsize=13) plt.ylabel(r'\textbf{Pixels}', fontsize=13) pdf.savefig() plt.close() # ---------------------------------------------------------------------- # # spectra plotting f, (ax1, ax2) = plt.subplots(2, 1) # --- redshifted data plotting cbd_x = np.linspace(sr['begin'], sr['end'], sr['steps']) ## plotting our cube data cbs_y = gs_data['gd_shifted'] ax1.plot(cbd_x, cbs_y, linewidth=0.5, color="#000000") ## plotting our sky noise data snd_y = snw_data['sky_regions'][:, 1] ax1.plot(cbd_x, snd_y, linewidth=0.5, color="#f44336", alpha=0.5) ## plotting our [OII] region ot_x = df_data['x_region'] ot_y = df_data['y_region'] ax1.plot(ot_x, ot_y, linewidth=0.5, color="#00c853") ## plotting the standard deviation region in the [OII] section std_x = df_data['std_x'] std_y = df_data['std_y'] ax1.plot(std_x, std_y, linewidth=0.5, color="#00acc1") pu_lines = gs_data['pu_peaks'] for i in range(len(pu_lines)): srb = sr['begin'] ax1.axvline(x=(pu_lines[i]), linewidth=0.5, color="#ec407a", alpha=0.2) ax1.set_title(r'\textbf{Spectra: cross-section redshifted}', fontsize=13) ax1.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13) ax1.set_ylabel(r'\textbf{Flux}', fontsize=13) ax1.set_ylim([-1000, 5000]) # setting manual limits for now # --- corrected redshift crs_x = np.linspace(sr['begin'], sr['end'], sr['steps']) rdst = gs_data['redshift'] sp_lines = gs_data['spectra'] ## corrected wavelengths corr_x = crs_x / (1 + rdst) ## plotting our cube data cps_y = gs_data['gd_shifted'] ax2.plot(corr_x, cps_y, linewidth=0.5, color="#000000") ## plotting our sky noise data sn_y = gs_data['sky_noise'] ax2.plot(corr_x, sn_y, linewidth=0.5, color="#e53935") ## plotting spectra lines for e_key, e_val in sp_lines['emis'].items(): spec_line = float(e_val) ax2.axvline(x=spec_line, linewidth=0.5, color="#00c853") ax2.text(spec_line - 10, 4800, e_key, rotation=-90) ax2.set_title(r'\textbf{Spectra: cross-section corrected}', fontsize=13) ax2.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13) ax2.set_ylabel(r'\textbf{Flux}', fontsize=13) ax2.set_ylim([-500, 5000]) # setting manual limits for now f.subplots_adjust(hspace=0.5) pdf.savefig() plt.close() # ---------------------------------------------------------------------- # # OII doublet region ot_fig = plt.figure() # plotting the data for the cutout [OII] region ot_x = df_data['x_region'] ot_y = df_data['y_region'] plt.plot(ot_x, ot_y, linewidth=0.5, color="#000000") ## plotting the standard deviation region in the [OII] section std_x = df_data['std_x'] std_y = df_data['std_y'] plt.plot(std_x, std_y, linewidth=0.5, color="#00acc1") dblt_rng = df_data['doublet_range'] ot_x_b, ot_x_e = dblt_rng[0], dblt_rng[-1] x_ax_vals = np.linspace(ot_x_b, ot_x_e, 1000) # lmfit lm_init = df_data['lm_init_fit'] lm_best = df_data['lm_best_fit'] plt.plot(ot_x, lm_best, linewidth=0.5, color="#1e88e5") plt.plot(ot_x, lm_init, linewidth=0.5, color="#43a047", alpha=0.5) lm_params = df_data['lm_best_param'] lm_params = [prm_value for prm_key, prm_value in lm_params.items()] c, i_val1, i_val2, sig_g, rdsh, sig_i = lm_params dblt_mu = [3727.092, 3729.875] # the actual non-redshifted wavelengths for OII l1 = dblt_mu[0] * (1 + rdsh) l2 = dblt_mu[1] * (1 + rdsh) sig = np.sqrt(sig_g**2 + sig_i**2) norm = (sig * np.sqrt(2 * np.pi)) lm_y1 = c + (i_val1 / norm) * np.exp(-(ot_x - l1)**2 / (2 * sig**2)) lm_y2 = c + (i_val2 / norm) * np.exp(-(ot_x - l2)**2 / (2 * sig**2)) plt.plot(ot_x, lm_y1, linewidth=0.5, color="#e64a19", alpha=0.7) plt.plot(ot_x, lm_y2, linewidth=0.5, color="#1a237e", alpha=0.7) # plotting signal-to-noise straight line and gaussian to verify it works sn_line = df_data['sn_line'] sn_gauss = df_data['sn_gauss'] plt.title(r'\textbf{OII doublet region}', fontsize=13) plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13) plt.ylabel(r'\textbf{Flux}', fontsize=13) plt.ylim([-500, 5000]) # setting manual limits for now pdf.savefig() plt.close() # ---------------------------------------------------------------------- # # plotting pPXF data # defining wavelength as the x-axis x_data = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id)) + "_lamgal.npy") # defining the flux from the data and model y_data = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id)) + "_flux.npy") y_model = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id)) + "_model.npy") # scaled down y data y_data_scaled = y_data / np.median(y_data) # opening cube to obtain the segmentation data cube_file = ( "/Volumes/Jacky_Cao/University/level4/project/cubes_better/cube_" + str(cube_id) + ".fits") hdu = fits.open(cube_file) segmentation_data = hdu[2].data seg_loc_rows, seg_loc_cols = np.where(segmentation_data == cube_id) signal_pixels = len(seg_loc_rows) # noise spectra will be used as in the chi-squared calculation noise = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id)) + "_noise.npy") noise_median = np.median(noise) noise_stddev = np.std(noise) residual = y_data_scaled - y_model res_median = np.median(residual) res_stddev = np.std(residual) noise = noise mask = ((residual < res_stddev) & (residual > -res_stddev)) chi_sq = (y_data_scaled[mask] - y_model[mask])**2 / noise[mask]**2 total_chi_sq = np.sum(chi_sq) total_points = len(chi_sq) reduced_chi_sq = total_chi_sq / total_points # spectral lines sl = spectra_data.spectral_lines() # parameters from lmfit lm_params = spectra_data.lmfit_data(cube_id) c = lm_params['c'] i1 = lm_params['i1'] i2 = lm_params['i2'] sigma_gal = lm_params['sigma_gal'] z = lm_params['z'] sigma_inst = lm_params['sigma_inst'] plt.figure() plt.plot(x_data, y_data_scaled, linewidth=1.1, color="#000000") plt.plot(x_data, y_data_scaled + noise_stddev, linewidth=0.1, color="#616161", alpha=0.1) plt.plot(x_data, y_data_scaled - noise_stddev, linewidth=0.1, color="#616161", alpha=0.1) # plotting over the OII doublet doublets = np.array([3727.092, 3728.875]) #dblt_av = np.average(doublets) * (1+z) dblt_av = np.average(doublets) dblt_x_mask = ((x_data > dblt_av - 20) & (x_data < dblt_av + 20)) doublet_x_data = x_data[dblt_x_mask] doublet_data = f_doublet(doublet_x_data, c, i1, i2, sigma_gal, z, sigma_inst) doublet_data = doublet_data / np.median(y_data) plt.plot(doublet_x_data, doublet_data, linewidth=0.5, color="#9c27b0") max_y = np.max(y_data_scaled) # plotting spectral lines for e_key, e_val in sl['emis'].items(): spec_line = float(e_val) #spec_line = float(e_val) * (1+z) spec_label = e_key if (e_val in str(doublets)): alpha_line = 0.2 else: alpha_line = 0.7 alpha_text = 0.75 plt.axvline(x=spec_line, linewidth=0.5, color="#1e88e5", alpha=alpha_line) plt.text(spec_line - 3, max_y, spec_label, rotation=-90, alpha=alpha_text, weight="bold", fontsize=15) for e_key, e_val in sl['abs'].items(): spec_line = float(e_val) #spec_line = float(e_val) * (1+z) spec_label = e_key plt.axvline(x=spec_line, linewidth=0.5, color="#ff8f00", alpha=0.7) plt.text(spec_line - 3, max_y, spec_label, rotation=-90, alpha=0.75, weight="bold", fontsize=15) # iron spectral lines for e_key, e_val in sl['iron'].items(): spec_line = float(e_val) #spec_line = float(e_val) * (1+z) plt.axvline(x=spec_line, linewidth=0.5, color="#bdbdbd", alpha=0.3) plt.plot(x_data, y_model, linewidth=1.5, color="#b71c1c") residuals_mask = (residual > res_stddev) rmask = residuals_mask #plt.scatter(x_data[rmask], residual[rmask], s=3, color="#f44336", alpha=0.5) plt.scatter(x_data[mask], residual[mask] - 1, s=3, color="#43a047") plt.tick_params(labelsize=13) plt.title(r'\textbf{Spectra with pPXF overlayed}', fontsize=13) plt.xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13) plt.ylabel(r'\textbf{Relative Flux}', fontsize=13) plt.tight_layout() pdf.savefig() plt.close() # ---------------------------------------------------------------------- # # Voigt fitted region # Running pPXF fitting routine best_fit = ppxf_fitter_kinematics_sdss.kinematics_sdss( cube_id, 0, "all") best_fit_vars = best_fit['variables'] data_wl = np.load("cube_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id)) + "_cbd_x.npy") # 'x-data' data_spec = np.load("cube_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id)) + "_cbs_y.npy") # 'y-data' # y-data which has been reduced down by median during pPXF running galaxy = best_fit['y_data'] model_wl = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id)) + "_lamgal.npy") model_spec = np.load("ppxf_results/cube_" + str(int(cube_id)) + "/cube_" + str(int(cube_id)) + "_model.npy") # parameters from lmfit lm_params = spectra_data.lmfit_data(cube_id) z = lm_params['z'] sigma_inst = lm_params['sigma_inst'] # masking out the region of CaH and CaK calc_rgn = np.array([3900, 4000]) data_rgn = calc_rgn * (1 + z) data_mask = ((data_wl > data_rgn[0]) & (data_wl < data_rgn[1])) data_wl_masked = data_wl[data_mask] data_spec_masked = data_spec[data_mask] data_spec_masked = data_spec_masked / np.median(data_spec_masked) model_rgn = calc_rgn model_mask = ((model_wl > calc_rgn[0]) & (model_wl < calc_rgn[1])) model_wl_masked = model_wl[model_mask] model_spec_masked = model_spec[model_mask] z_wl_masked = model_wl_masked * (1 + z) # redshifted wavelength range galaxy_masked = galaxy[model_mask] # Applying the lmfit routine to fit two Voigt profiles over our spectra data vgt_pars = Parameters() vgt_pars.add('sigma_inst', value=sigma_inst, vary=False) vgt_pars.add('sigma_gal', value=1.0, min=0.0) vgt_pars.add('z', value=z) vgt_pars.add('v1_amplitude', value=-0.1, max=0.0) vgt_pars.add('v1_center', expr='3934.777*(1+z)') vgt_pars.add('v1_sigma', expr='sqrt(sigma_inst**2 + sigma_gal**2)', min=0.0) #vgt_pars.add('v1_gamma', value=0.01) vgt_pars.add('v2_amplitude', value=-0.1, max=0.0) vgt_pars.add('v2_center', expr='3969.588*(1+z)') vgt_pars.add('v2_sigma', expr='v1_sigma') #vgt_pars.add('v2_gamma', value=0.01) vgt_pars.add('c', value=0) voigt = VoigtModel(prefix='v1_') + VoigtModel( prefix='v2_') + ConstantModel() vgt_result = voigt.fit(galaxy_masked, x=z_wl_masked, params=vgt_pars) opt_pars = vgt_result.best_values best_fit = vgt_result.best_fit # Plotting the spectra fig, ax = plt.subplots() ax.plot(z_wl_masked, galaxy_masked, lw=1.5, c="#000000", alpha=0.3) ax.plot(z_wl_masked, model_spec_masked, lw=1.5, c="#00c853") ax.plot(z_wl_masked, best_fit, lw=1.5, c="#e53935") ax.tick_params(labelsize=13) ax.set_ylabel(r'\textbf{Relative Flux}', fontsize=13) ax.set_xlabel(r'\textbf{Wavelength (\AA)}', fontsize=13) plt.title(r'\textbf{Voigt Fitted Region}', fontsize=15) fig.tight_layout() pdf.savefig() plt.close() # ---------------------------------------------------------------------- # # Values for diagnostics catalogue = np.load("data/matched_catalogue.npy") cat_loc = np.where(catalogue[:, 0] == cube_id)[0] cube_data = catalogue[cat_loc][0] vmag = cube_data[5] sigma_sn_data = np.load("data/ppxf_fitter_data.npy") sigma_sn_loc = np.where(sigma_sn_data[:][:, 0][:, 0] == cube_id)[0] ss_indiv_data = sigma_sn_data[sigma_sn_loc][0][0] ssid = ss_indiv_data plt.figure() plt.title('Variables and numbers for cube ' + str(cube_id), fontsize=15) plt.text(0.0, 0.9, "HST V-band magnitude: " + str(vmag)) plt.text(0.0, 0.85, "S/N from spectra: " + str(ssid[7])) plt.text(0.0, 0.75, "OII sigma lmfit: " + str(ssid[1])) plt.text(0.0, 0.7, "OII sigma pPXF: " + str(ssid[5])) plt.text(0.0, 0.6, "Voigt sigma lmfit: " + str(ssid[11])) plt.text(0.0, 0.55, "Voigt sigma pPXF: " + str(ssid[10])) plt.axis('off') pdf.savefig() plt.close() # We can also set the file's metadata via the PdfPages object: d = pdf.infodict() d['Title'] = 'cube_' + str(cube_id) + ' diagnostics' d['Author'] = u'Jacky Cao' #d['Subject'] = 'How to create a multipage pdf file and set its metadata' #d['Keywords'] = 'PdfPages multipage keywords author title subject' #d['CreationDate'] = datetime.datetime(2009, 11, 13) d['CreationDate'] = datetime.datetime.today()
def fit_sine(t, data): params = Parameters() params.add('frequency', value = rabi) params.add('phaseShift', value = 0) params.add('amplitude', value = 0.01) params.add('offset', value = 0.005) params.add('frequency2', value = rabi_2) params.add('amp2', value = 0.1) params.add('phase2', value = 0) out = minimize(residual, params, args = (t, data)) return out
1.6762046817955716 * 100e-6, 1.538489775708743 * 100e-6, 1.2667996551684635 * 100e-6, 0.9227732749310865 * 100e-6, 0.8606704620816109 * 100e-6, 0.6315702583843055 * 100e-6, 1.0875502579337843 * 100e-6, 2.364140282852443 * 100e-6, 2.319778855771209 * 100e-6, 3.8146331190426035 * 100e-6, 3.458809626725272 * 100e-6 ] sigma_y_error = [ 0.004200939478237076, 0.002775160952841804, 0.005456730565488788, 0.006134716355516601, 0.013508877486089588, 0.0019198433852926633, 0.005618933751150967, 0.053682807653258724, 0.017732913060791056, 0.007950141650115095, 0.004731836802477574 ] z = np.linspace(5, 16) #params, cov = curve_fit(omega, height, sigma_y, p_0=[1e-4]) #error = np.sqrt(np.diag(cov)) params = Parameters() params.add('omega', value=1e-4) out = minimize(omega, params, args=(height, sigma_y)) #plt.errorbar( height, sigma_y, xerr = 0.05, yerr = sigma_y_error, fmt ='x') print('Parameter', params) print('errors', error) plt.plot(z, omega(params[0], z)) plt.show()
def lmfit2(record): import numpy as np from datetime import datetime from lmfit import Minimizer, Parameters now = datetime.now() #first get globally used values tfreq = record['tfreq'] * 1000.0 mpinc = record['mpinc'] / 1000000.0 smsep = record['smsep'] / 1000000.0 lagfr = record['lagfr'] / 1000000.0 nrang = record['nrang'] ltab = record['ltab'] mplgs = record['mplgs'] nave = record['nave'] acfd = record['acfd'] lag0_power = np.array(record['pwr0']) #ignore the second lag 0 in the lag table ltab = ltab[0:-1] fit_record = build_fit_record(record) c = 299792458.0 lamda = c/tfreq k = 2*np.pi/lamda nyquist_velocity = lamda/(4.*mpinc) lags=[] for pair in ltab: lags.append(pair[1]-pair[0]) lags.sort() max_lag = lags[-1] t = np.array(lags)*mpinc ranges = np.arange(0,nrang) #Estimate the noise noise = estimate_noise(lag0_power) #Setup the fitted parameter lists fitted_power = list() fitted_width = list() fitted_vels = list() fitted_phis = list() fitted_power_e = list() fitted_width_e = list() fitted_vels_e = list() fitted_phis_e = list() slist = list() #next, iterate through all range gates and do fitting for gate in ranges: print gate re = [x[0] for x in acfd[gate]] im = [x[1] for x in acfd[gate]] time = list(t) #estimate the upper limit of the self clutter clutter = list(estimate_selfclutter(nrang,ltab,smsep,mpinc,lagfr/2,gate,lag0_power)[lags]) #find lags blanked due to Tx and identify "good" lags blanked = determine_tx_blanked(nrang,ltab,smsep,mpinc,lagfr/2,gate) blank_lags = [0 if len(blanked[x])==0 else 1 for x in blanked] #don't include any lags that are blanked j=0 for i,bl in enumerate(blank_lags): if (bl == 1): re.pop(i-j) im.pop(i-j) time.pop(i-j) clutter.pop(i-j) j += 1 re = np.array(re) im = np.array(im) time = np.array(time) clutter = np.array(clutter) #Now fit each acf using first order errors first_error = first_order_errors(lag0_power[gate],noise,clutter,nave) #set up the fitter and fit for the first time init_vels = np.linspace(-nyquist_velocity/2.,nyquist_velocity/2.,num=30) outs = list() for vel in init_vels: params = Parameters() params.add('power', value=lag0_power[gate]) params.add('width', value=200.0,min=-100) #Need minimum, to stop magnitude model from diverging to infinity params.add('velocity', value=vel, min=-nyquist_velocity/2., max=nyquist_velocity/2.) minner = Minimizer(acf_residual, params, fcn_args=(time, re, im, first_error, first_error, lamda)) outs.append(minner.minimize()) chi2 = np.array([out.chisqr for out in outs]) ind = np.where(chi2 == np.min(chi2))[0] if (ind.size != 1): print "SOMETHING WEIRD IS HAPPENING" else: ind = ind[0] pwr_fit = outs[ind].params['power'].value wid_fit = outs[ind].params['width'].value vel_fit = outs[ind].params['velocity'].value #Now get proper errorbars using fitted parameters and model acf_model = pwr_fit*np.exp(-time*2.*np.pi*wid_fit/lamda)*np.exp(1j*4.*np.pi*vel_fit*time/lamda) mag_model = np.abs(acf_model) rho_re = np.cos(4.*np.pi*vel_fit*time/lamda) rho_im = np.sin(4.*np.pi*vel_fit*time/lamda) rho = mag_model/mag_model[0] for i in range(len(rho)): if (rho[i] > 0.999): rho[i] = 0.999 rho[i] = rho[i] * pwr_fit / (pwr_fit + noise + clutter[i]) re_error = acf_error(pwr_fit,noise,clutter,nave,rho,rho_re) im_error = acf_error(pwr_fit,noise,clutter,nave,rho,rho_im) #Now second LMFIT outs2 = list() for vel in init_vels: params = Parameters() params.add('power', value=pwr_fit) params.add('width', value=wid_fit,min=-100) params.add('velocity', value=vel, min=-nyquist_velocity/2., max=nyquist_velocity/2.) minner = Minimizer(acf_residual, params, fcn_args=(time, re, im, re_error, im_error, lamda)) outs2.append(minner.minimize()) chi2 = np.array([out.chisqr for out in outs2]) ind = np.where(chi2 == np.min(chi2))[0] if (ind.size != 1): print "SOMETHING WEIRD IS HAPPENING" else: ind = ind[0] pwr_fit = outs2[ind].params['power'].value wid_fit = outs2[ind].params['width'].value vel_fit = outs2[ind].params['velocity'].value # TO DO, implement errors that compare relative chi2 of the # multiple minima found. Just like in the C version. pwr_e = outs2[ind].params['power'].stderr wid_e = outs2[ind].params['width'].stderr vel_e = outs2[ind].params['velocity'].stderr #Now save fitted quantities into array slist.append(gate) fitted_power.append(pwr_fit) fitted_width.append(wid_fit) fitted_vels.append(vel_fit) fitted_power_e.append(pwr_e) fitted_width_e.append(wid_e) fitted_vels_e.append(vel_e) print "It took "+str((datetime.now()-now).total_seconds())+" to fit one beam." #set ground scatter flags gflg = list() p_l = list() p_l_e = list() for i in range(len(slist)): if (np.abs(fitted_vels[i])-(30.-1./3.*np.abs(fitted_width[i])) < 0.): gflg.append(1) else: gflg.append(0) p_l.append(10.0*np.log10(fitted_power[i]/noise)) p_l_e.append(10.0*np.log10((fitted_power_e[i]+fitted_power[i])/noise)-10.0*np.log10(fitted_power[i]/noise)) #construct the fitted data dictionary that will be written to the fit file fit_record['slist'] = np.array(slist,dtype=np.int16) fit_record['nlag'] = mplgs * np.ones(len(slist),dtype=np.int16) fit_record['qflg'] = [1]*len(slist) fit_record['gflg'] = gflg fit_record['p_l'] = np.array(p_l,dtype=np.float32) fit_record['p_l_e'] = np.array(p_l_e,dtype=np.float32) fit_record['noise.sky'] = noise fit_record['noise.search'] = noise fit_record['noise.mean'] = noise # fit_record['p_s'] # fit_record['p_s_e'] fit_record['v'] = np.array(fitted_vels,dtype=np.float32) fit_record['v_e'] = np.array(fitted_vels_e,dtype=np.float32) fit_record['w_l'] = np.array(fitted_width,dtype=np.float32) fit_record['w_l_e'] = np.array(fitted_width_e,dtype=np.float32) # fit_record['w_s'] = # fit_record['w_s_e'] = # fit_record['sd_l'] = # fit_record['sd_s'] = # fit_record['sd_phi'] = # fit_record['x_qflg'] = # fit_record['x_gflg'] = # fit_record['x_p_l'] = # fit_record['x_p_l_e'] = # fit_record['x_p_s'] = # fit_record['x_p_s_e'] = # fit_record['x_v'] = # fit_record['x_v_e'] = # fit_record['x_w_l'] = # fit_record['x_w_l_e'] = # fit_record['x_w_s'] = # fit_record['x_w_s_e'] = # fit_record['phi0'] = # fit_record['phi0_e'] = # fit_record['elv'] = # fit_record['elv_low'] = # fit_record['elv_high'] = # fit_record['x_sd_l'] = # fit_record['x_sd_s'] = # fit_record['x_sd_phi'] = return fit_record
offset = pars['line_off'].value model = (1 - frac) * yg + frac * yl + offset + x * slope if data is None: return model if sigma is None: return (model - data) return (model - data) / sigma n = 601 xmin = 0. xmax = 20.0 x = linspace(xmin, xmax, n) p_true = Parameters() p_true.add('amp_g', value=21.0) p_true.add('cen_g', value=8.1) p_true.add('wid_g', value=1.6) p_true.add('frac', value=0.37) p_true.add('line_off', value=-1.023) p_true.add('line_slope', value=0.62) data = (pvoigt(x, p_true['amp_g'].value, p_true['cen_g'].value, p_true['wid_g'].value, p_true['frac'].value) + random.normal(scale=0.23, size=n) + x * p_true['line_slope'].value + p_true['line_off'].value) if HASPYLAB: pylab.plot(x, data, 'r+') pfit = [
path_data = "../data/" df_fahey = pd.read_csv(path_data + "fahey_data.csv") df_fahey.loc[df_fahey["cell"] == "NonTfh", "cell"] = "nonTfh" data_arm = df_fahey[df_fahey.name == "Arm"] data_cl13 = df_fahey[df_fahey.name == "Cl13"] # get model time = np.linspace(0,80,300) sim = Sim(time = time, name = today, params = d, virus_model=vir_model_const) # ============================================================================= # set parameters # ============================================================================= params = Parameters() params.add('death_tr1', value=0.05, min=0, max=0.2) params.add('death_tfhc', value=0.01, min=0, max=0.2) params.add('prolif_tr1', value=2.5, min=1, max=5.0) params.add('prolif_tfhc', value=2.5, min=1, max=5.0) params.add("pth1", value=0.3, min=0, max=1.0) params.add("ptfh", value=0.2, min=0, max=1.0) params.add("ptr1", value=0.3, min=0, max=1.0) params.add("ptfhc", expr="1.0-pth1-ptfh-ptr1") params.add("K_il2", value = 0.0001, min = 1e-7, max=1) # ============================================================================= # run fitting procedure # ============================================================================= out = minimize(fit_fun, params, args=(sim, data_arm, data_cl13)) out_values = out.params.valuesdict() print(out_values)
def test_constraints(with_plot=True): with_plot = with_plot and WITHPLOT def residual(pars, x, sigma=None, data=None): yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g']) yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l']) model = yg + yl + pars['line_off'] + x * pars['line_slope'] if data is None: return model if sigma is None: return (model - data) return (model - data) / sigma n = 201 xmin = 0. xmax = 20.0 x = linspace(xmin, xmax, n) data = (gaussian(x, 21, 8.1, 1.2) + lorentzian(x, 10, 9.6, 2.4) + random.normal(scale=0.23, size=n) + x*0.5) if with_plot: pylab.plot(x, data, 'r+') pfit = Parameters() pfit.add(name='amp_g', value=10) pfit.add(name='cen_g', value=9) pfit.add(name='wid_g', value=1) pfit.add(name='amp_tot', value=20) pfit.add(name='amp_l', expr='amp_tot - amp_g') pfit.add(name='cen_l', expr='1.5+cen_g') pfit.add(name='wid_l', expr='2*wid_g') pfit.add(name='line_slope', value=0.0) pfit.add(name='line_off', value=0.0) sigma = 0.021 # estimate of data error (for all data points) myfit = Minimizer(residual, pfit, fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data}, scale_covar=True) myfit.prepare_fit() init = residual(myfit.params, x) result = myfit.leastsq() print(' Nfev = ', result.nfev) print( result.chisqr, result.redchi, result.nfree) report_fit(result.params, min_correl=0.3) fit = residual(result.params, x) if with_plot: pylab.plot(x, fit, 'b-') assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value) assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value) assert(result.params['wid_l'].value == 2 * result.params['wid_g'].value) # now, change fit slightly and re-run myfit.params['wid_l'].expr = '1.25*wid_g' result = myfit.leastsq() report_fit(result.params, min_correl=0.4) fit2 = residual(result.params, x) if with_plot: pylab.plot(x, fit2, 'k') pylab.show() assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value) assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value) assert(result.params['wid_l'].value == 1.25 * result.params['wid_g'].value)
def fitcurve(val0): if elem.matchX is False: return ydat = measdata.D1complex[:, measdata.findex] data = np.empty(len(ydat) * 2, dtype='float64') data[0::2] = ydat.real data[1::2] = ydat.imag preFit(False) xaxis3 = np.linspace(squid.start, squid.stop, (squid.pt * 2)) # Using standard curve_fit settings # Define fitting parameters params = Parameters() params.add('CapfF', value=squid.Cap * 1e15, vary=True, min=30, max=90) params.add('IcuA', value=squid.Ic * 1e6, vary=True, min=3.0, max=4.5) params.add('WbpH', value=squid.Wb * 1e12, vary=True, min=0, max=1500) params.add('LooppH', value=squid.LOOP * 1e12, vary=False, min=0.0, max=100) params.add('alpha', value=squid.ALP, vary=False, min=0.98, max=1.02) params.add('R', value=squid.R, vary=True, min=1, max=20e3) params.add('Z1', value=elem.Z1, vary=False, min=40, max=60) params.add('Z2', value=elem.Z2, vary=False, min=40, max=60) params.add('Z3', value=elem.Z3, vary=False, min=40, max=60) params.add('L2', value=elem.L2, vary=False, min=0.00, max=0.09) # Crop region to fit elem.midx = find_nearest(xaxis3, elem.xmin) elem.madx = find_nearest(xaxis3, elem.xmax) # Do Fit result = minimize(gta1, params, args=(xaxis3[elem.midx:elem.madx], data)) # Present results of fitting print report_fit(result) paramsToMem(result.params) update2(0) preFit(True) # Calculate and plot residual there S11 = getfit() residual = data - S11 plt.figure(4) plt.clf() plt.plot(xaxis3[elem.midx:elem.madx], residual[elem.midx:elem.madx]) plt.axis('tight') plt.draw() print 'Avg-sqr Residuals', abs(np.mean((residual * residual))) * 1e8 return
class FieldFitter: """Input field measurements, perform parametric fit, return relevant quantities. The :class:`mu2e.fieldfitter.FieldFitter` takes a 3D set of field measurements and their associated position values, and performs a parametric fit. The parameters and fit model are handled by the :mod:`lmfit` package, which in turn wraps the :mod:`scipy.optimize` module, which actually performs the parameter optimization. The default optimizer is the Levenberg-Marquardt algorithm. The :func:`mu2e.fieldfitter.FieldFitter.fit` requires multiple cfg `namedtuples`, and performs the actual fitting (or recreates a fit for a given set of saved parameters). After fitting, the generated class members can be used for further analysis. Args: input_data (pandas.DataFrame): DF that contains the field component values to be fit. cfg_geom (namedtuple): namedtuple with the following members: 'geom z_steps r_steps phi_steps x_steps y_steps bad_calibration' Attributes: input_data (pandas.DataFrame): The input DF, with possible modifications. phi_steps (List[float]): The axial values of the field data (cylindrial coords) r_steps (List[float]): The radial values of the field data (cylindrial coords) x_steps (List[float]): The x values of the field data (cartesian coords) y_steps (List[float]): The y values of the field data (cartesian coords) pickle_path (str): Location to read/write the pickled fit parameter values params (lmfit.Parameters): Set of Parameters, inherited from `lmfit` result (lmfit.ModelResult): Container for resulting fit information, inherited from `lmfit` """ def __init__(self, input_data, cfg_geom): self.input_data = input_data if cfg_geom.geom == 'cyl': self.phi_steps = cfg_geom.phi_steps self.r_steps = cfg_geom.r_steps elif cfg_geom.geom == 'cart': self.x_steps = cfg_geom.x_steps self.y_steps = cfg_geom.y_steps self.pickle_path = mu2e_ext_path + 'fit_params/' self.geom = cfg_geom.geom def fit(self, geom, cfg_params, cfg_pickle): """Helper function that chooses one of the subsequent fitting functions.""" self.fit_solenoid(cfg_params, cfg_pickle) def fit_solenoid(self, cfg_params, cfg_pickle): """Main fitting function for FieldFitter class. The typical magnetic field geometry for the Mu2E experiment is determined by one or more solenoids, with some contaminating external fields. The purpose of this function is to fit a set of sparse magnetic field data that would, in practice, be generated by a field measurement device. The following assumptions must hold for the input data: * The data is represented in a cylindrical coordiante system. * The data forms a series of planes, where all planes intersect at R=0. * All planes has the same R and Z values. * All positive Phi values have an associated negative phi value, which uniquely defines a single plane in R-Z space. Args: cfg_params (namedtuple): 'ns ms cns cms Reff func_version' cfg_pickle (namedtuple): 'use_pickle save_pickle load_name save_name recreate' Returns: Nothing. Generates class attributes after fitting, and saves parameter values, if saving is specified. """ func_version = cfg_params.func_version Bz = [] Br = [] Bphi = [] RR = [] ZZ = [] PP = [] XX = [] YY = [] # Load pre-defined starting values for parameters, or start a new set if cfg_pickle.use_pickle or cfg_pickle.recreate: try: self.params = pkl.load( open( self.pickle_path + cfg_pickle.load_name + '_results.p', "rb")) except UnicodeDecodeError: self.params = pkl.load(open( self.pickle_path + cfg_pickle.load_name + '_results.p', "rb"), encoding='latin1') else: self.params = Parameters() self.add_params_default(cfg_params) ZZ = self.input_data.Z.values RR = self.input_data.R.values PP = self.input_data.Phi.values Bz = self.input_data.Bz.values Br = self.input_data.Br.values Bphi = self.input_data.Bphi.values if func_version in [ 6, 8, 105, 110, 115, 116, 117, 118, 119, 120, 121, 122, 1000 ]: XX = self.input_data.X.values YY = self.input_data.Y.values # Choose the type of fitting function we'll be using. pvd = self.params.valuesdict( ) # Quicker way to grab params and init the fit functions if func_version == 5: fit_func = ff.brzphi_3d_producer_modbessel_phase( ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms']) elif func_version == 6: fit_func = ff.brzphi_3d_producer_modbessel_phase_ext( ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['cns'], pvd['cms']) elif func_version == 7: fit_func = ff.brzphi_3d_producer_modbessel_phase_hybrid( ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['cns'], pvd['cms']) elif func_version == 8: fit_func = ff.brzphi_3d_producer_modbessel_v8( ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['cns'], pvd['cms']) elif func_version == 100: fit_func = ff.brzphi_3d_producer_hel_v0(ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms']) elif func_version == 115: fit_func = ff.brzphi_3d_producer_hel_v15(ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['n_scale']) elif func_version == 117: fit_func = ff.brzphi_3d_producer_hel_v17(ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['n_scale']) elif func_version == 118: fit_func = ff.brzphi_3d_producer_hel_v18(ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['cns'], pvd['cms'], pvd['n_scale']) elif func_version == 119: fit_func = ff.brzphi_3d_producer_hel_v19(ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['cns'], pvd['cms'], pvd['n_scale']) elif func_version == 120: fit_func = ff.brzphi_3d_producer_hel_v20(ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['cns'], pvd['cms'], pvd['n_scale'], pvd['m_scale']) elif func_version == 121: fit_func = ff.brzphi_3d_producer_hel_v21(ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['cns'], pvd['cms'], pvd['n_scale'], pvd['m_scale']) elif func_version == 122: fit_func = ff.brzphi_3d_producer_hel_v22(ZZ, RR, PP, pvd['R'], pvd['ns'], pvd['ms'], pvd['cns'], pvd['cms'], pvd['n_scale'], pvd['m_scale']) else: raise NotImplementedError( f'Function version={func_version} not implemented.') # Generate an lmfit Model if func_version in [6, 8, 110, 115, 116, 117, 118, 119, 120, 121, 122]: self.mod = Model(fit_func, independent_vars=['r', 'z', 'phi', 'x', 'y']) else: self.mod = Model(fit_func, independent_vars=['r', 'z', 'phi']) # Start loading in additional parameters based on the function version. # Functions with version < 100 are cyclindrical expansions. # Functions with version > 100 are helical expansions. if func_version == 5: self.add_params_AB() self.add_params_phase_shift() elif func_version == 6: self.add_params_AB() self.add_params_phase_shift() self.add_params_cart_simple(on_list=['k3']) if func_version == 7: self.add_params_AB() self.add_params_phase_shift() elif func_version == 8: self.add_params_AB() self.add_params_phase_shift() self.add_params_cart_simple(on_list=['k3']) self.add_params_biot_savart(xyz_tuples=((1000, 0, -4600), (1000, 0, 4600))) elif func_version == 100: self.add_params_ABCD() elif func_version == 115: self.add_params_AB(skip_zero_n=True) self.add_params_cart_simple(all_on=True) elif func_version == 116: self.add_params_AB(skip_zero_n=True) self.add_params_finite_wire() elif func_version == 117: self.add_params_AB(skip_zero_n=True) self.add_params_cart_simple(all_on=True) self.add_params_phase_shift() elif func_version == 118: self.add_params_AB(skip_zero_n=True) self.add_params_CD(skip_zero_cn=True) self.add_params_cart_simple(all_on=True) elif func_version == 119: self.add_params_AB(skip_zero_n=True) self.add_params_CD(skip_zero_cn=True) self.add_params_cart_simple(on_list=['k3']) # self.add_params_cart_simple(all_on=False) # self.add_params_biot_savart(xyz_tuples=( # (1000, 0, -4600), # (1000, 0, 4600))) # self.add_params_biot_savart(xyz_tuples=( # (0.25, 0, -46), # (0.25, 0, 46)), # xy_bounds=0.05, z_bounds=0.05, v_bounds=100) # self.add_params_biot_savart( # xyz_tuples=( # # (0, 0, 3532.85), # # (0, 1000, 3963.66), # # (0, 0, 4393.47), # # (0, 0, 5034.67), # # (0, 0, 5691.88), # # (0, 0, 6382.01), # # (0, 0, 7208.56), # # (-100, -100, 7868.01), # # (-500, 1150, 7868.01), # # (-400, 1050, 7868.01), # # (100, 100, 9710.86), # # (-500, 1150, 9710.86), # # (-400, 1050, 9710.86), # # (-200, 1000, 10000), # # (-500, 1150, 11553.71), # # (-400, 1050, 11553.71), # # (-200, 1000, 13454.53), # # (200, -1000, 7868.01), # # (200, -1000, 9710.86), # # (200, -1000, 11553.71), # # (200, -1000, 13454.53), # ), # xy_bounds=500, z_bounds=20, v_bounds=100) elif func_version == 120: self.add_params_AB(skip_zero_n=False, skip_zero_m=False) self.add_params_CD(skip_zero_cn=True) self.add_params_cart_simple(on_list=['k3']) # self.add_params_cart_simple(all_on=True) self.add_params_biot_savart( xyz_tuples=((0.25, 0, -46), (0.25, 0, 46)), # (0.25, 0, -4.6), # (0.25, 0, 4.6)), xy_bounds=0.1, z_bounds=46, v_bounds=100) elif func_version == 121: self.add_params_AB(skip_zero_n=False, skip_zero_m=False) self.add_params_phase_shift() self.add_params_cart_simple(on_list=['k3']) # self.add_params_cart_simple(all_on=True) self.add_params_biot_savart(xyz_tuples=((0.25, 0, -46), (0.25, 0, 46)), xy_bounds=0.1, z_bounds=46, v_bounds=100) elif func_version == 122: self.add_params_AB(skip_zero_n=False, skip_zero_m=False) self.add_params_CD(skip_zero_cn=False) self.add_params_cart_simple(on_list=['k3']) # self.add_params_cart_simple(all_on=True) self.add_params_biot_savart(xyz_tuples=((0.25, 0, -46), (0.25, 0, 46)), xy_bounds=0.01, z_bounds=0.01, v_bounds=100) if not cfg_pickle.recreate: print( f'fitting with func_version={func_version},\n' f'n={cfg_params.ns}, m={cfg_params.ms}, cn={cfg_params.cns}, cm={cfg_params.cms}' ) else: print( f'recreating fit with func_version={func_version},\n' f'n={cfg_params.ns}, m={cfg_params.ms}, cn={cfg_params.cns}, cm={cfg_params.cms}' ) start_time = time() # Functions with r, z, phi dependence only if func_version in [5, 100]: if cfg_pickle.recreate: for param in self.params: self.params[param].vary = False self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(), r=RR, z=ZZ, phi=PP, params=self.params, method='leastsq', fit_kws={'maxfev': 1}) elif cfg_pickle.use_pickle: # mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2) self.result = self.mod.fit( np.concatenate([Br, Bz, Bphi]).ravel(), # weights=np.concatenate([mag, mag, mag]).ravel(), r=RR, z=ZZ, phi=PP, params=self.params, method='leastsq', fit_kws={'maxfev': 10000}) else: self.result = self.mod.fit( np.concatenate([Br, Bz, Bphi]).ravel(), # weights=np.concatenate( # [np.ones(Br.shape), np.ones(Bz.shape), # np.ones(Bphi.shape)*100000]).ravel(), r=np.abs(RR), z=ZZ, phi=PP, params=self.params, # method='leastsq', fit_kws={'maxfev': 10000}) method='least_squares', fit_kws={'max_nfev': 100}) # Functions with r, z, phi, x, y dependence elif func_version in [ 6, 8, 105, 115, 116, 117, 118, 119, 120, 121, 122 ]: if cfg_pickle.recreate: for param in self.params: self.params[param].vary = False self.result = self.mod.fit(np.concatenate([Br, Bz, Bphi]).ravel(), r=RR, z=ZZ, phi=PP, x=XX, y=YY, params=self.params, method='leastsq', fit_kws={'maxfev': 1}) elif cfg_pickle.use_pickle: # mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2) self.result = self.mod.fit( np.concatenate([Br, Bz, Bphi]).ravel(), # weights=np.concatenate([mag, mag, mag]).ravel(), r=RR, z=ZZ, phi=PP, x=XX, y=YY, params=self.params, method='leastsq', fit_kws={'maxfev': 10000}) else: # mag = 1/np.sqrt(Br**2+Bz**2+Bphi**2) self.result = self.mod.fit( np.concatenate([Br, Bz, Bphi]).ravel(), # weights=np.concatenate([mag, mag, mag]).ravel(), r=RR, z=ZZ, phi=PP, x=XX, y=YY, params=self.params, # method='leastsq', fit_kws={'maxfev': 10000}) method='least_squares', fit_kws={ 'verbose': 1, 'gtol': 1e-15, 'ftol': 1e-15, 'xtol': 1e-15, # 'tr_solver': 'lsmr', # 'tr_options': # {'regularize': True} }) self.params = self.result.params end_time = time() print(("Elapsed time was %g seconds" % (end_time - start_time))) report_fit(self.result, show_correl=False) if cfg_pickle.save_pickle: # and not cfg_pickle.recreate: self.pickle_results(self.pickle_path + cfg_pickle.save_name) def fit_external(self, cfg_params, cfg_pickle, profile=False): raise NotImplementedError('Oh no! you got lazy during refactoring') def pickle_results(self, pickle_name='default'): """Pickle the resulting Parameters after a fit is performed.""" pkl.dump(self.result.params, open(pickle_name + '_results.p', "wb"), pkl.HIGHEST_PROTOCOL) def merge_data_fit_res(self): """Combine the fit results and the input data into one dataframe for easier comparison of results. Adds three columns to input_data: `Br_fit, Bphi_fit, Bz_fit` or `Bx_fit, By_fit, Bz_fit`, depending on the geometry. """ bf = self.result.best_fit self.input_data.loc[:, 'Br_fit'] = bf[0:len(bf) // 3] self.input_data.loc[:, 'Bz_fit'] = bf[len(bf) // 3:2 * len(bf) // 3] self.input_data.loc[:, 'Bphi_fit'] = bf[2 * len(bf) // 3:] def add_params_default(self, cfg_params): if 'R' not in self.params: self.params.add('R', value=cfg_params.Reff, vary=False) if 'ns' not in self.params: self.params.add('ns', value=cfg_params.ns, vary=False) if 'ms' not in self.params: self.params.add('ms', value=cfg_params.ms, vary=False) if 'n_scale' not in self.params: self.params.add('n_scale', value=cfg_params.n_scale, vary=False) if 'm_scale' not in self.params: self.params.add('m_scale', value=cfg_params.m_scale, vary=False) if 'cns' not in self.params: self.params.add('cns', value=cfg_params.cns, vary=False) if 'cms' not in self.params: self.params.add('cms', value=cfg_params.cms, vary=False) def add_params_AB(self, skip_zero_n=False, skip_zero_m=False): if skip_zero_n: ns_range = range(1, self.params['ns'].value) else: ns_range = range(self.params['ns'].value) if skip_zero_m: ms_range = range(1, self.params['ms'].value) else: ms_range = range(self.params['ms'].value) for n in ns_range: for m in ms_range: if n == m == 0: if f'A_{n}_{m}' not in self.params: self.params.add(f'A_{n}_{m}', value=0, vary=False) if f'B_{n}_{m}' not in self.params: self.params.add(f'B_{n}_{m}', value=0, vary=False) else: if f'A_{n}_{m}' not in self.params: self.params.add(f'A_{n}_{m}', value=0, vary=True) if f'B_{n}_{m}' not in self.params: self.params.add(f'B_{n}_{m}', value=0, vary=True) def add_params_CD(self, skip_zero_cn=False): if skip_zero_cn: cns_range = range(1, self.params['cns'].value) else: cns_range = range(self.params['cns'].value) cms_range = range(self.params['cms'].value) for cn in cns_range: for cm in cms_range: if f'C_{cn}_{cm}' not in self.params: self.params.add(f'C_{cn}_{cm}', value=0, vary=True) if f'D_{cn}_{cm}' not in self.params: self.params.add(f'D_{cn}_{cm}', value=0, vary=True) def add_params_phase_shift(self): # `D` parameter is a scaling parameters that is equivalent to a phase shift. # Instead of using a term like cos(phi+D), it is D*cos(phi)+(1-D)*sin(phi). # This allows the free paramns to remain linear, and greatly decreases run time. for n in range(self.params['ns'].value): if f'D_{n}' not in self.params: self.params.add(f'D_{n}', value=0.5, min=0, max=1, vary=True) def add_params_ABCD(self): # Add parameters A,B,C,D, and turn off the off-diagonals that are unphysical. ns_range = range(self.params['ns'].value) ms_range = range(self.params['ms'].value) n_scale = self.params['n_scale'].value m_scale = self.params['m_scale'].value for n in ns_range: for m in ms_range: if f'A_{n}_{m}' not in self.params: self.params.add(f'A_{n}_{m}', value=0, vary=True) if f'B_{n}_{m}' not in self.params: self.params.add(f'B_{n}_{m}', value=0, vary=True) if f'C_{n}_{m}' not in self.params: self.params.add(f'C_{n}_{m}', value=0, vary=True) if f'D_{n}_{m}' not in self.params: self.params.add(f'D_{n}_{m}', value=0, vary=True) if (n * n_scale > m * m_scale) or n * n_scale == 0: self.params[f'A_{n}_{m}'].vary = False self.params[f'A_{n}_{m}'].value = 0 self.params[f'B_{n}_{m}'].vary = False self.params[f'B_{n}_{m}'].value = 0 self.params[f'C_{n}_{m}'].vary = False self.params[f'C_{n}_{m}'].value = 0 self.params[f'D_{n}_{m}'].vary = False self.params[f'D_{n}_{m}'].value = 0 def add_params_cart_simple(self, all_on=False, on_list=None): cart_names = [f'k{i}' for i in range(1, 11)] if on_list is None: on_list = [] for k in cart_names: if all_on: if k not in self.params: self.params.add(k, value=0, vary=True) else: if k not in self.params: self.params.add(k, value=0, vary=(k in on_list)) def add_params_finite_wire(self): if 'k1' not in self.params: self.params.add('k1', value=0, vary=True) if 'k2' not in self.params: self.params.add('k2', value=0, vary=True) if 'xp1' not in self.params: self.params.add('xp1', value=1050, vary=False, min=900, max=1200) if 'xp2' not in self.params: self.params.add('xp2', value=1050, vary=False, min=900, max=1200) if 'yp1' not in self.params: self.params.add('yp1', value=0, vary=False, min=-100, max=100) if 'yp2' not in self.params: self.params.add('yp2', value=0, vary=False, min=-100, max=100) if 'zp1' not in self.params: self.params.add('zp1', value=4575, vary=False, min=4300, max=4700) if 'zp2' not in self.params: self.params.add('zp2', value=-4575, vary=False, min=-4700, max=-4300) def add_params_biot_savart(self, xyz_tuples=None, v_tuples=None, xy_bounds=100, z_bounds=100, v_bounds=100): if v_tuples and len(v_tuples) != len(xyz_tuples): raise AttributeError( 'If v_tuples is specified it must be same size as xyz_tuples') for i in range(1, len(xyz_tuples) + 1): x, y, z = xyz_tuples[i - 1] if f'x{i}' not in self.params: self.params.add(f'x{i}', value=x, vary=True, min=x - xy_bounds, max=x + xy_bounds) if f'y{i}' not in self.params: self.params.add(f'y{i}', value=y, vary=True, min=y - xy_bounds, max=y + xy_bounds) if f'z{i}' not in self.params: self.params.add(f'z{i}', value=z, vary=True, min=z - z_bounds, max=z + z_bounds) if v_tuples: vx, vy, vz = v_tuples[i - 1] else: vx = vy = vz = 0 if f'vx{i}' not in self.params: self.params.add(f'vx{i}', value=vx, vary=True, min=vx - v_bounds, max=vx + v_bounds) if f'vy{i}' not in self.params: self.params.add(f'vy{i}', value=vy, vary=True, min=vy - v_bounds, max=vy + v_bounds) if f'vz{i}' not in self.params: self.params.add(f'vz{i}', value=vz, vary=True, min=vz - v_bounds, max=vz + v_bounds)
minner = Minimizer(fcn2min, params, fcn_args=(x, y, func)) result = minner.minimize() ## Store the Confidence data from the fit #con_report = lmfit.fit_report(result.params) (x_plot, model) = fcn2min(result.params, x, y, func=func, plot_fit=True) return (x_plot, model, result) if __name__ == '__main__': params = Parameters() params.add('p0', value=2.4, min=-2.0, max=4.0, vary=True) params.add('p1', value=100.0, min=0.0, max=2000.0, vary=True) params.add('p2', value=0.0, min=-2, max=2, vary=True) params.add('p3', value=2.3, min=-2.0, max=4.0, vary=True) x = np.linspace(-10, 10, 100) y = 2.4 + 0.2 * np.exp(-x**2 / (2 * 2.3**2)) (x_fit, y_fit, result) = fit_func(x, y, params, gauss) print(result.params) import matplotlib.pyplot as plt plt.plot(x, y, 'o') plt.plot(x_fit, y_fit)
def fit_isoturbHI_model_simple(vels, spec, vcent, delta_vcent=5 * u.km / u.s, err=None, verbose=True, plot_fit=True, return_model=False, use_emcee=False, emcee_kwargs={}): vels = vels.copy().to(u.km / u.s) vel_min = (vcent - delta_vcent).to(vels.unit).value vel_max = (vcent + delta_vcent).to(vels.unit).value # Create the parameter list. pfit = Parameters() # pfit.add(name='Ts', value=100., min=10**1.2, max=8000) pfit.add(name='Ts', value=np.nanmax(spec.value), min=10**1.2, max=8000) pfit.add(name='sigma', value=15., min=0.30, max=31.6) # min v at Ts=16 K # pfit.add(name='log_NH', value=21., min=20., max=23.5) pfit.add(name='Tpeak', value=np.nanmax(spec.value), min=0, max=5. * np.nanmax(spec.value)) pfit.add(name='vcent', value=vcent.to(vels.unit).value, min=vel_min, max=vel_max) try: finite_mask = np.isfinite(spec.filled_data[:]) except AttributeError: finite_mask = np.isfinite(spec) # Some cases are failing with a TypeError due to a lack # of data. This really shouldn't happen, but I'll throw in this # to catch those cases. if finite_mask.sum() <= 4: return None if err is not None: fcn_args = (vels[finite_mask].value, spec[finite_mask].value, err.value) else: fcn_args = (vels[finite_mask].value, spec[finite_mask].value, 1.) try: mini = Minimizer(residual, pfit, fcn_args=fcn_args, max_nfev=vels.size * 1000, nan_policy='omit') out = mini.leastsq() except TypeError: return None if use_emcee: mini = Minimizer(residual, out.params, fcn_args=fcn_args, max_nfev=vels.size * 1000) out = mini.emcee(**emcee_kwargs) if verbose: report_fit(out) pars = out.params model = isoturbHI_simple(vels.value, pars['Ts'].value, pars['sigma'].value, pars['Tpeak'].value, pars['vcent'].value) if plot_fit: plt.plot(vels.value, spec.value, drawstyle='steps-mid') plt.plot(vels.value, model) if return_model: return out, vels.value, model return out
def mback(energy, mu=None, group=None, order=3, z=None, edge='K', e0=None, emin=None, emax=None, whiteline=None, leexiang=False, tables='chantler', fit_erfc=False, return_f1=False, _larch=None): """ Match mu(E) data for tabulated f''(E) using the MBACK algorithm and, optionally, the Lee & Xiang extension Arguments: energy, mu: arrays of energy and mu(E) order: order of polynomial [3] group: output group (and input group for e0) z: Z number of absorber edge: absorption edge (K, L3) e0: edge energy emin: beginning energy for fit emax: ending energy for fit whiteline: exclusion zone around white lines leexiang: flag to use the Lee & Xiang extension tables: 'chantler' (default) or 'cl' fit_erfc: True to float parameters of error function return_f1: True to put the f1 array in the group Returns: group.f2: tabulated f2(E) group.f1: tabulated f1(E) (if return_f1 is True) group.fpp: matched data group.mback_params: Group of parameters for the minimization References: * MBACK (Weng, Waldo, Penner-Hahn): http://dx.doi.org/10.1086/303711 * Lee and Xiang: http://dx.doi.org/10.1088/0004-637X/702/2/970 * Cromer-Liberman: http://dx.doi.org/10.1063/1.1674266 * Chantler: http://dx.doi.org/10.1063/1.555974 """ order = int(order) if order < 1: order = 1 # set order of polynomial if order > MAXORDER: order = MAXORDER ### implement the First Argument Group convention energy, mu, group = parse_group_args(energy, members=('energy', 'mu'), defaults=(mu, ), group=group, fcn_name='mback') if len(energy.shape) > 1: energy = energy.squeeze() if len(mu.shape) > 1: mu = mu.squeeze() group = set_xafsGroup(group, _larch=_larch) if e0 is None: # need to run find_e0: e0 = xray_edge(z, edge, _larch=_larch)[0] if e0 is None: e0 = group.e0 if e0 is None: find_e0(energy, mu, group=group) ### theta is an array used to exclude the regions <emin, >emax, and ### around white lines, theta=0.0 in excluded regions, theta=1.0 elsewhere (i1, i2) = (0, len(energy) - 1) if emin is not None: i1 = index_of(energy, emin) if emax is not None: i2 = index_of(energy, emax) theta = np.ones(len(energy)) # default: 1 throughout theta[0:i1] = 0 theta[i2:-1] = 0 if whiteline: pre = 1.0 * (energy < e0) post = 1.0 * (energy > e0 + float(whiteline)) theta = theta * (pre + post) if edge.lower().startswith('l'): l2 = xray_edge(z, 'L2', _larch=_larch)[0] l2_pre = 1.0 * (energy < l2) l2_post = 1.0 * (energy > l2 + float(whiteline)) theta = theta * (l2_pre + l2_post) ## this is used to weight the pre- and post-edge differently as ## defined in the MBACK paper weight1 = 1 * (energy < e0) weight2 = 1 * (energy > e0) weight = np.sqrt(sum(weight1)) * weight1 + np.sqrt(sum(weight2)) * weight2 ## get the f'' function from CL or Chantler if tables.lower() == 'chantler': f1 = f1_chantler(z, energy, _larch=_larch) f2 = f2_chantler(z, energy, _larch=_larch) else: (f1, f2) = f1f2(z, energy, edge=edge, _larch=_larch) group.f2 = f2 if return_f1: group.f1 = f1 em = xray_line(z, edge.upper(), _larch=_larch)[0] # erfc centroid params = Parameters() params.add(name='s', value=1, vary=True) # scale of data params.add(name='xi', value=50, vary=fit_erfc, min=0) # width of erfc params.add(name='a', value=0, vary=False) # amplitude of erfc if fit_erfc: params['a'].value = 1 params['a'].vary = True for i in range(order): # polynomial coefficients params.add(name='c%d' % i, value=0, vary=True) out = minimize(match_f2, params, method='leastsq', gtol=1.e-5, ftol=1.e-5, xtol=1.e-5, epsfcn=1.e-5, kws=dict(en=energy, mu=mu, f2=f2, e0=e0, em=em, order=order, weight=weight, theta=theta, leexiang=leexiang)) opars = out.params.valuesdict() eoff = energy - e0 norm_function = opars['a'] * erfc( (energy - em) / opars['xi']) + opars['c0'] for i in range(order): j = i + 1 attr = 'c%d' % j if attr in opars: norm_function += opars[attr] * eoff**j group.e0 = e0 group.fpp = opars['s'] * mu - norm_function group.mback_params = opars tmp = Group(energy=energy, mu=group.f2 - norm_function, e0=0) # calculate edge step from f2 + norm_function: should be very smooth pre_f2 = preedge(energy, group.f2 + norm_function, e0=e0, nnorm=2, nvict=0) group.edge_step = pre_f2['edge_step'] / opars['s'] pre_fpp = preedge(energy, mu, e0=e0, nnorm=2, nvict=0) group.norm = (mu - pre_fpp['pre_edge']) / group.edge_step
def setup_model_params(self, jmax_guess=None, vcmax_guess=None, rd_guess=None, hd_guess=None, ea_guess=None, dels_guess=None): """ Setup lmfit Parameters object Parameters ---------- jmax_guess : value initial parameter guess, if nothing is passed, i.e. it is None, then parameter is not fitted vcmax_guess : value initial parameter guess, if nothing is passed, i.e. it is None, then parameter is not fitted rd_guess : value initial parameter guess, if nothing is passed, i.e. it is None, then parameter is not fitted hd_guess : value initial parameter guess, if nothing is passed, i.e. it is None, then parameter is not fitted ea_guess : value initial parameter guess, if nothing is passed, i.e. it is None, then parameter is not fitted dels_guess : value initial parameter guess, if nothing is passed, i.e. it is None, then parameter is not fitted Returns ------- params : object lmfit object containing parameters to fit """ params = Parameters() if jmax_guess is not None: params.add('Jmax', value=jmax_guess, min=0.0) if vcmax_guess is not None: params.add('Vcmax', value=vcmax_guess, min=0.0) if rd_guess is not None: params.add('Rd', value=rd_guess, min=0.0) if ea_guess is not None: params.add('Ea', value=ea_guess, min=0.0) if hd_guess is not None: params.add('Hd', value=hd_guess, vary=False) if dels_guess is not None: params.add('delS', value=dels_guess, min=0.0, max=700.0) return params
def simple_flux_from_greybody(lambdavector, Trf=None, b=None, Lrf=None, zin=None, ngal=None): ''' Return flux densities at any wavelength of interest (in the range 1-10000 micron), assuming a galaxy (at given redshift) graybody spectral energy distribution (SED), with a power law replacing the Wien part of the spectrum to account for the variability of dust temperatures within the galaxy. The two different functional forms are stitched together by imposing that the two functions and their first derivatives coincide. The code contains the nitty-gritty details explicitly. Inputs: alphain = spectral index of the power law replacing the Wien part of the spectrum, to account for the variability of dust temperatures within a galaxy [default = 2; see Blain 1999 and Blain et al. 2003] betain = spectral index of the emissivity law for the graybody [default = 2; see Hildebrand 1985] Trf = rest-frame temperature [in K; default = 20K] Lrf = rest-frame FIR bolometric luminosity [in L_sun; default = 10^10] zin = galaxy redshift [default = 0.001] lambdavector = array of wavelengths of interest [in microns; default = (24, 70, 160, 250, 350, 500)]; AUTHOR: Lorenzo Moncelsi [[email protected]] HISTORY: 20June2012: created in IDL November2015: converted to Python ''' nwv = len(lambdavector) nuvector = c * 1.e6 / lambdavector # Hz nsed = 1e4 lambda_mod = loggen(1e3, 8.0, nsed) # microns nu_mod = c * 1.e6 / lambda_mod # Hz #Lorenzo's version had: H0=70.5, Omega_M=0.274, Omega_L=0.726 (Hinshaw et al. 2009) #cosmo = Planck15#(H0 = 70.5 * u.km / u.s / u.Mpc, Om0 = 0.273) conversion = 4.0 * np.pi * ( 1.0E-13 * cosmo.luminosity_distance(zin) * 3.08568025E22 )**2.0 / L_sun # 4 * pi * D_L^2 units are L_sun/(Jy x Hz) Lir = Lrf / conversion # Jy x Hz Ain = np.zeros(ngal) + 1.0e-36 #good starting parameter betain = np.zeros(ngal) + b alphain = np.zeros(ngal) + 2.0 fit_params = Parameters() fit_params.add('Ain', value=Ain) #fit_params.add('Tin', value= Trf/(1.+zin), vary = False) #fit_params.add('betain', value= b, vary = False) #fit_params.add('alphain', value= alphain, vary = False) #pdb.set_trace() #THE LM FIT IS HERE #Pfin = minimize(sedint, fit_params, args=(nu_mod,Lir.value,ngal)) Pfin = minimize(sedint, fit_params, args=(nu_mod, Lir.value, ngal, Trf / (1. + zin), b, alphain)) #pdb.set_trace() flux_mJy = sed(Pfin.params, nuvector, ngal, Trf / (1. + zin), b, alphain) return flux_mJy
def plot(): set_plot_properties() # change style cs = palettable.colorbrewer.qualitative.Set1_8.mpl_colors with open('/home/lc585/qsosed/input.yml', 'r') as f: parfile = yaml.load(f) fittingobj = load(parfile) wavlen = fittingobj.get_wavlen() lin = fittingobj.get_lin() galspc = fittingobj.get_galspc() ext = fittingobj.get_ext() galcnt = fittingobj.get_galcnt() ignmin = fittingobj.get_ignmin() ignmax = fittingobj.get_ignmax() ztran = fittingobj.get_ztran() lyatmp = fittingobj.get_lyatmp() lybtmp = fittingobj.get_lybtmp() lyctmp = fittingobj.get_lyctmp() whmin = fittingobj.get_whmin() whmax = fittingobj.get_whmax() qsomag = fittingobj.get_qsomag() flxcorr = fittingobj.get_flxcorr() cosmo = fittingobj.get_cosmo() params = Parameters() params.add('plslp1', value = -0.478) params.add('plslp2', value = -0.199) params.add('plbrk', value = 2.40250) params.add('bbt', value = 1.30626) params.add('bbflxnrm', value = 2.673) params.add('elscal', value = 1.240) params.add('scahal',value = 0.713) params.add('galfra',value = 0.0) params.add('bcnrm',value = 0.135) params.add('ebv',value = 0.0) params.add('imod',value = 18.0) # Load median magnitudes with open('/home/lc585/qsosed/sdss_ukidss_wise_medmag_ext.dat') as f: datz = np.loadtxt(f, usecols=(0,)) datz = datz[:-5] # Load filters ftrlst = fittingobj.get_ftrlst()[2:-2] lameff = fittingobj.get_lameff()[2:-2] bp = fittingobj.get_bp()[2:-2] # these are in ab and data is in vega dlam = fittingobj.get_bp()[2:-2] zromag = fittingobj.get_zromag()[2:-2] with open('ftgd_dr7.dat') as f: ftgd = np.loadtxt(f, skiprows=1, usecols=(1,2,3,4,5,6,7,8,9)) modarr = residual(params, parfile, wavlen, datz, lin, bp, dlam, zromag, galspc, ext, galcnt, ignmin, ignmax, ztran, lyatmp, lybtmp, lyctmp, ftrlst, whmin, whmax, cosmo, flxcorr, qsomag, ftgd) fname = '/home/lc585/qsosed/sdss_ukidss_wise_medmag_ext.dat' datarr = np.genfromtxt(fname, usecols=(5,7,9,11,13,15,17,19,21)) datarr[datarr < 0.0] = np.nan datarr = datarr[:-5, :] # remove less than lyman break col1 = np.arange(8) col2 = col1 + 1 col_label = ['$r$ - $i$', '$i$ - $z$', '$z$ - $Y$', '$Y$ - $J$', '$J$ - $H$', '$H$ - $K$', '$K$ - $W1$', '$W1$ - $W2$'] df = get_data() df = df[(df.z_HW > 1) & (df.z_HW < 3)] colstr1 = ['rVEGA', 'iVEGA', 'zVEGA', 'YVEGA', 'JVEGA', 'HVEGA', 'KVEGA', 'W1VEGA'] colstr2 = ['iVEGA', 'zVEGA', 'YVEGA', 'JVEGA', 'HVEGA', 'KVEGA', 'W1VEGA', 'W2VEGA'] ylims = [[0, 0.6], [-0.1, 0.5], [-0.1, 0.5], [-0.1, 0.5], [0.2, 0.9], [0.2, 0.9], [0.5, 1.6], [0.8, 1.5]] fig, axs = plt.subplots(4, 2, figsize=figsize(1, vscale=2), sharex=True) for i, ax in enumerate(axs.flatten()): #data definition ydat = datarr[:, col1[i]] - datarr[:, col2[i]] ax.scatter(datz, ydat, color='black', s=5, label='Data') ax.plot(datz, modarr[:,col1[i]] - modarr[:, col2[i]], color=cs[1], label='Model') # ax.scatter(df.z_HW, df[colstr1[i]] - df[colstr2[i]], s=1, alpha=0.1) ax.set_title(col_label[i], size=10) ax.set_ylim(ylims[i]) ax.set_xlim(0.75, 3.25) axs[0, 0].legend(bbox_to_anchor=(0.7, 0.99), bbox_transform=plt.gcf().transFigure, fancybox=True, shadow=True, scatterpoints=1, ncol=2) axs[3, 0].set_xlabel(r'Redshift $z$') axs[3, 1].set_xlabel(r'Redshift $z$') fig.tight_layout() fig.subplots_adjust(wspace=0.2, hspace=0.15, top=0.93) fig.savefig('/home/lc585/thesis/figures/chapter05/sed_color_plot.pdf') plt.show() return None
class FeffPathGroup(Group): def __init__(self, filename=None, _larch=None, label=None, s02=None, degen=None, e0=None, ei=None, deltar=None, sigma2=None, third=None, fourth=None, **kws): kwargs = dict(name='FeffPath: %s' % filename) kwargs.update(kws) Group.__init__(self, **kwargs) self._larch = _larch self.filename = filename self.params = None self.label = label self.spline_coefs = None def_degen = 1 self._feffdat = None if filename is not None: self._feffdat = FeffDatFile(filename=filename, _larch=_larch) self.geom = self._feffdat.geom def_degen = self._feffdat.degen if self.label is None: self.label = self.__geom2label() self.degen = def_degen if degen is None else degen self.s02 = 1.0 if s02 is None else s02 self.e0 = 0.0 if e0 is None else e0 self.ei = 0.0 if ei is None else ei self.deltar = 0.0 if deltar is None else deltar self.sigma2 = 0.0 if sigma2 is None else sigma2 self.third = 0.0 if third is None else third self.fourth = 0.0 if fourth is None else fourth self.k = None self.chi = None if self._feffdat is not None: self.create_spline_coefs() def __geom2label(self): """generate label by hashing path geometry""" rep = [] if self.geom is not None: for atom in self.geom: rep.extend(atom) if self._feffdat is not None: rep.append(self._feffdat.degen) rep.append(self._feffdat.reff) for attr in ('s02', 'e0', 'ei', 'deltar', 'sigma2', 'third', 'fourth'): rep.append(getattr(self, attr, '_')) s = "|".join([str(i) for i in rep]) return "p%s" % (b32hash(s)[:8].lower()) def __copy__(self): return FeffPathGroup(filename=self.filename, _larch=self._larch, s02=self.s02, degen=self.degen, e0=self.e0, ei=self.ei, deltar=self.deltar, sigma2=self.sigma2, third=self.third, fourth=self.fourth) def __deepcopy__(self, memo): return FeffPathGroup(filename=self.filename, _larch=self._larch, s02=self.s02, degen=self.degen, e0=self.e0, ei=self.ei, deltar=self.deltar, sigma2=self.sigma2, third=self.third, fourth=self.fourth) @property def reff(self): return self._feffdat.reff @reff.setter def reff(self, val): pass @property def nleg(self): return self._feffdat.nleg @nleg.setter def nleg(self, val): pass @property def rmass(self): return self._feffdat.rmass @rmass.setter def rmass(self, val): pass def __repr__(self): if self.filename is not None: return '<FeffPath Group %s>' % self.filename return '<FeffPath Group (empty)>' def create_path_params(self): """ create Path Parameters within the current fiteval """ self.params = Parameters(asteval=self._larch.symtable._sys.fiteval) if self.label is None: self.label = self.__geom2label() self.store_feffdat() for pname in PATH_PARS: val = getattr(self, pname) attr = 'value' if isinstance(val, six.string_types): attr = 'expr' kws = {'vary': False, attr: val} parname = fix_varname(PATHPAR_FMT % (pname, self.label)) self.params.add(parname, **kws) def create_spline_coefs(self): """pre-calculate spline coefficients for feff data""" self.spline_coefs = {} fdat = self._feffdat self.spline_coefs['pha'] = UnivariateSpline(fdat.k, fdat.pha, s=0) self.spline_coefs['amp'] = UnivariateSpline(fdat.k, fdat.amp, s=0) self.spline_coefs['rep'] = UnivariateSpline(fdat.k, fdat.rep, s=0) self.spline_coefs['lam'] = UnivariateSpline(fdat.k, fdat.lam, s=0) def store_feffdat(self): """stores data about this Feff path in the fiteval symbol table for use as `reff` and in sigma2 calcs """ fiteval = self._larch.symtable._sys.fiteval fdat = self._feffdat fiteval.symtable['feffpath'] = fdat fiteval.symtable['reff'] = fdat.reff return fiteval def __path_params(self, **kws): """evaluate path parameter value. Returns (degen, s02, e0, ei, deltar, sigma2, third, fourth) """ # put 'reff' and '_feffdat' into the symboltable so that # they can be used in constraint expressions, and get # fiteval evaluator self.store_feffdat() if self.params is None: self.create_path_params() out = [] for pname in PATH_PARS: val = kws.get(pname, None) parname = fix_varname(PATHPAR_FMT % (pname, self.label)) if val is None: val = self.params[parname]._getval() out.append(val) return out def path_paramvals(self, **kws): (deg, s02, e0, ei, delr, ss2, c3, c4) = self.__path_params() return dict(degen=deg, s02=s02, e0=e0, ei=ei, deltar=delr, sigma2=ss2, third=c3, fourth=c4) def report(self): "return text report of parameters" (deg, s02, e0, ei, delr, ss2, c3, c4) = self.__path_params() geomlabel = ' atom x y z ipot' geomformat = ' %4s % .4f, % .4f, % .4f %i' out = [' Path %s, Feff.dat file = %s' % (self.label, self.filename)] out.append(geomlabel) for atsym, iz, ipot, amass, x, y, z in self.geom: s = geomformat % (atsym, x, y, z, ipot) if ipot == 0: s = "%s (absorber)" % s out.append(s) stderrs = {} out.append(' {:7s}= {:.5f}'.format('reff', self._feffdat.reff)) for pname in ('degen', 's02', 'e0', 'r', 'deltar', 'sigma2', 'third', 'fourth', 'ei'): val = strval = getattr(self, pname, 0) parname = fix_varname(PATHPAR_FMT % (pname, self.label)) std = None if pname == 'r': parname = fix_varname(PATHPAR_FMT % ('deltar', self.label)) par = self.params.get(parname, None) val = par.value + self._feffdat.reff strval = 'reff + ' + getattr(self, 'deltar', 0) std = par.stderr else: par = self.params.get(parname, None) if par is not None: val = par.value std = par.stderr if std is None or std <= 0: svalue = "{: 5f}".format(val) else: svalue = "{: 5f} +/- {:5f}".format(val, std) if pname == 's02': pname = 'n*s02' svalue = " {:7s}= {:s}".format(pname, svalue) if isinstance(strval, six.string_types): svalue = "{:s} '{:s}'".format(svalue, strval) if val == 0 and pname in ('third', 'fourth', 'ei'): continue out.append(svalue) return '\n'.join(out) def _calc_chi(self, k=None, kmax=None, kstep=None, degen=None, s02=None, e0=None, ei=None, deltar=None, sigma2=None, third=None, fourth=None, debug=False, interp='cubic', **kws): """calculate chi(k) with the provided parameters""" fdat = self._feffdat if fdat.reff < 0.05: self._larch.writer.write('reff is too small to calculate chi(k)') return # make sure we have a k array if k is None: if kmax is None: kmax = 30.0 kmax = min(max(fdat.k), kmax) if kstep is None: kstep = 0.05 k = kstep * np.arange(int(1.01 + kmax / kstep), dtype='float64') reff = fdat.reff # get values for all the path parameters (degen, s02, e0, ei, deltar, sigma2, third, fourth) = \ self.__path_params(degen=degen, s02=s02, e0=e0, ei=ei, deltar=deltar, sigma2=sigma2, third=third, fourth=fourth) # create e0-shifted energy and k, careful to look for |e0| ~= 0. en = k * k - e0 * ETOK if min(abs(en)) < SMALL: try: en[np.where(abs(en) < 2 * SMALL)] = SMALL except ValueError: pass # q is the e0-shifted wavenumber q = np.sign(en) * np.sqrt(abs(en)) # lookup Feff.dat values (pha, amp, rep, lam) if interp.startswith('lin'): pha = np.interp(q, fdat.k, fdat.pha) amp = np.interp(q, fdat.k, fdat.amp) rep = np.interp(q, fdat.k, fdat.rep) lam = np.interp(q, fdat.k, fdat.lam) else: pha = self.spline_coefs['pha'](q) amp = self.spline_coefs['amp'](q) rep = self.spline_coefs['rep'](q) lam = self.spline_coefs['lam'](q) if debug: self.debug_k = q self.debug_pha = pha self.debug_amp = amp self.debug_rep = rep self.debug_lam = lam # p = complex wavenumber, and its square: pp = (rep + 1j / lam)**2 + 1j * ei * ETOK p = np.sqrt(pp) # the xafs equation: cchi = np.exp(-2 * reff * p.imag - 2 * pp * (sigma2 - pp * fourth / 3) + 1j * (2 * q * reff + pha + 2 * p * (deltar - 2 * sigma2 / reff - 2 * pp * third / 3))) cchi = degen * s02 * amp * cchi / (q * (reff + deltar)**2) cchi[0] = 2 * cchi[1] - cchi[2] # outputs: self.k = k self.p = p self.chi = cchi.imag self.chi_imag = -cchi.real
# ============================================================================= # get data # ============================================================================= path_data = "../../data/" df_fahey = pd.read_csv(path_data + "fahey_data.csv") data_arm = df_fahey[df_fahey.name == "Arm"] data_cl13 = df_fahey[df_fahey.name == "Cl13"] # get model sim = Sim(d, virus_model=vir_model_const) # ============================================================================= # set parameters # ============================================================================= params = Parameters() params.add('death_tr1', value=0.05, min=0, max=0.2) params.add('death_tfhc', value=0.01, min=0, max=0.2) params.add('prolif_tr1', value=2.8, min=2, max=4.0) params.add('prolif_tfhc', value=4.1, min=3, max=5.0) params.add("pth1", value=0.06, min=0, max=1.0) params.add("ptfh", value=0.04, min=0, max=1.0) params.add("ptr1", value=0.89, min=0, max=1.0) params.add("ptfhc", expr="1.0-pth1-ptfh-ptr1") params.add("r_mem", value=0.01, min=0, max=0.2) params.add("deg_myc", value=0.32, min=0.28, max=0.35) # ============================================================================= # run fitting procedure # ============================================================================= out = minimize(fit_fun, params, args=(sim, data_arm, data_cl13)) out_values = out.params.valuesdict() print(out_values)
class LMFitModel(): """ Wrapper class for the lmfit package. Acts both as module usable in scripts as well as core logic class for the lmfit part in kMap.py. For an example on how to use it please see the 'test_PTCDA' test in the 'kmap.tests.test_lmfit' file. ATTENTION: Please do not set any attributes manually. Instead use the appropriate "set_xxx" method instead. """ def __init__(self, sliced_data, orbitals): """ Args: sliced_data (SlicedData): A single SlicedData object. orbitals (OrbitalData or list): A single OrbitalData object or a list of OrbitalData objects. ATTENTION: An Orbital object is NOT sufficient. Please use the OrbitalData wrapping class instead. """ self.axis = None self.crosshair = None self.symmetrization = 'no' self.background_equation = ['0', []] self.Ak_type = 'no' self.polarization = 'p' self.slice_policy = [0, [0], False] self.method = ['leastsq', 1e-12] self.region = ['all', False] self._set_sliced_data(sliced_data) self._add_orbitals(orbitals) self._set_parameters() def set_crosshair(self, crosshair): """A setter method to set a custom crosshair. If none is set when a region restriction is applied, a CrosshairAnnulusModel will be created. Args: crosshair (CrosshairModel): A crosshair model for cutting the data for any region-restriction. ATTENTION: The passed CrosshairModel has to support the region restriction you want to use. """ if crosshair is None or isinstance(crosshair, CrosshairModel): self.crosshair = crosshair else: raise TypeError( 'crosshair has to be of type %s (is %s)' % ( type(CrosshairModel), type(crosshair))) def set_axis(self, axis): """A setter method to set an axis for the interpolation onto a common grid. Default is the x-axis of the first slice in the list of slices chosen to be fitted. Args: axis (np.array): 1D array defining the common axis (and grid as only square kmaps are supported) for the subtraction. """ self.axis = axis def set_axis_by_step_size(self, range_, step_size): """A convenience setter method to set an axis by defining the range and the step size. Args: range_ (list): A list of min and max value. step_size (float): A number denoting the step size. """ num = step_size_to_num(range_, step_size) self.set_axis(axis_from_range(range_, num)) def set_axis_by_num(self, range_, num): """A convenience setter method to set an axis by defining the range and the number of grid points. Args: range_ (list): A list of min and max value. num (int): An integer denoting the number of grid points. """ self.set_axis(axis_from_range(range_, num)) def set_symmetrization(self, symmetrization): """A setter method to set the type of symmetrization for the orbital kmaps. Default is 'no'. Args: symmetrization (str): See 'get_kmap' from 'kmap.library.orbital.py' for information. """ self.symmetrization = symmetrization def set_region(self, region, inverted=False): """A setter method to set the region restriction for the lmfit process. Default is no region restriction ('all'). Args: region (str): Supports all regions the crosshair model you supplied supports. See there for documentation. (default is a CrosshairAnnulusModel). inverted (bool): See your CrosshairModel for documentation. """ self.region = [region, inverted] if region != 'all' and self.crosshair is None: self.crosshair = CrosshairAnnulusModel() def set_polarization(self, Ak_type, polarization): """A setter method to set the type of polarization for the orbital kmaps. Default is 'toroid' and 'p'. Args: Ak_type (str): See 'get_kmap' from 'kmap.library.orbital.py' for information. polarization (str): See 'get_kmap' from 'kmap.library.orbital.py' for information. """ self.Ak_type = Ak_type self.polarization = polarization def set_slices(self, slice_indices, axis_index=0, combined=False): """A setter method to chose the slices to be fitted next time 'fit()' is called. Default is [0], 0 and False. Args: slice_indices (int or list or str): Either one or more indices for the slices to be fitted next. Pass 'all' to use all slices in this axis. axis_index (int): Which axis in the SlicedData is used as slice axis. combined (bool): Whether to fit all slices individually or add all the slices for one fit instead. """ if isinstance(slice_indices, str) and slice_indices == 'all': self.slice_policy = [axis_index, range(self.sliced_data.axes[axis_index].num), combined] elif isinstance(slice_indices, list): self.slice_policy = [axis_index, slice_indices, combined] elif isinstance(slice_indices, range): self.slice_policy = [axis_index, list(slice_indices), combined] else: self.slice_policy = [axis_index, [slice_indices], combined] def set_fit_method(self, method, xtol=1e-7): """A setter method to set the method and the tolerance for the fitting process. Default is 'leastsq' and 1e-7. Args: method (str): See the documentation for the lmfit module. polarization (str): See the documentation for the lmfit module. """ self.method = [method, xtol] def set_background_equation(self, equation): """A setter method to set an custom background equation. Default is '1'. Args: equation (str): An equation used to calculate the background profile. Can use python function (e.g. abs()) and basics methods from the numpy module (prefix by 'np.'; e.g. np.sqrt()). Can contain variables to be fitted. Variables have can only contain lower or upper case letters, underscores and numbers. They cannot start with numbers. The variables 'x' and 'y' are special and denote the x and y axis respectively. No variables already used outside the background equation (like phi) can be used. Here are some examples of valid variable names: x_s, x2, x_2, foo, this_is_a_valid_variable. Each variables starts with following default values: value=0, min=-99999.9, max=99999.9, vary=False, expr=None The equation will be parsed by eval. Please don't injected any code as it would be really easy to do so. There are no safeguards in place whatsoever so we (have to) trust you. Thanks, D.B. """ try: compile(equation, '', 'exec') except: raise ValueError( 'Equation is not parseable. Check for syntax errors.') # Pattern matches all numpy, math and builtin methods clean_pattern = 'np\\.[a-z1-9\\_]+|math\\.[a-z1-9\\_]+' for builtin in dir(builtins): clean_pattern += '|' + str(builtin) cleaned_equation = re.sub(clean_pattern, '', equation) # Pattern matches all text including optional underscore with # numbers. variable_pattern = '[a-zA-Z\\_]+[0-9]*' variables = list(set(re.findall(variable_pattern, cleaned_equation))) # x and y need special treatment if 'x' in variables: variables.remove('x') if 'y' in variables: variables.remove('y') new_variables = np.setdiff1d(variables, self.background_equation[1]) self.background_equation = [equation, variables] for variable in new_variables: self.parameters.add(variable, value=0, min=-99999.9, max=99999.9, vary=False, expr=None) return [self.parameters[variable] for variable in new_variables] def edit_parameter(self, parameter, *args, **kwargs): """A setter method to edit fitting settings for one parameter. Use this method to enable a parameter for fitting (vary=True) Args: parameter (str): Name of the parameter to be editted. *args & **kwargs (): Are being passed to the 'parameter.set' method of the lmfit module. See there for more documentation. """ self.parameters[parameter].set(*args, **kwargs) def fit(self): """Calling this method will trigger a lmfit with the current settings. Returns: (list): A list of MinimizerResults. One for each slice fitted. """ lmfit_padding = float(config.get_key('lmfit', 'padding')) for parameter in self.parameters.values(): if parameter.vary and parameter.value <= parameter.min: padded_value = parameter.min + lmfit_padding print('WARNING: Initial value for parameter \'%s\' had to be corrected to %f (was %f)' % ( parameter.name, padded_value, parameter.value)) parameter.value = padded_value results = [] for index in self.slice_policy[1]: slice_ = self.get_sliced_kmap(index) result = minimize(self._chi2, copy.deepcopy(self.parameters), kws={'slice_': slice_}, nan_policy='omit', method=self.method[0], xtol=self.method[1]) results.append([index, result]) return results def transpose(self, constant_axis): axis_order = transpose_axis_order(constant_axis) self.sliced_data.transpose(axis_order) def get_settings(self): settings = {'crosshair': self.crosshair, 'background': self.background_equation, 'symmetrization': self.symmetrization, 'polarization': [self.Ak_type, self.polarization], 'slice_policy': self.slice_policy, 'method': self.method, 'region': self.region, 'axis': self.axis} return copy.deepcopy(settings) def set_settings(self, settings): self.set_crosshair(settings['crosshair']) self.set_background_equation(settings['background'][0]) self.set_polarization(*settings['polarization']) slice_policy = settings['slice_policy'] self.set_slices(slice_policy[1], slice_policy[0], slice_policy[2]) self.set_region(*settings['region']) self.set_symmetrization(settings['symmetrization']) self.set_fit_method(*settings['method']) self.set_axis(settings['axis']) def get_sliced_kmap(self, slice_index): axis_index, slice_indices, is_combined = self.slice_policy if is_combined: kmaps = [] for slice_index in slice_indices: kmaps.append(self.sliced_data.slice_from_index(slice_index, axis_index)) kmap = np.nansum(kmaps, axis=axis_index) else: kmap = self.sliced_data.slice_from_index(slice_index, axis_index) if self.axis is not None: kmap = kmap.interpolate(self.axis, self.axis) else: self.axis = kmap.x_axis return kmap def get_orbital_kmap(self, ID, param=None): if param is None: param = self.parameters orbital = self.ID_to_orbital(ID) kmap = orbital.get_kmap(E_kin=param['E_kin'].value, dk=(self.axis, self.axis), phi=param['phi_' + str(ID)].value, theta=param['theta_' + str(ID)].value, psi=param['psi_' + str(ID)].value, alpha=param['alpha'].value, beta=param['beta'].value, Ak_type=self.Ak_type, polarization=self.polarization, symmetrization=self.symmetrization) return kmap def get_weighted_sum_kmap(self, param=None, with_background=True): if param is None: param = self.parameters orbital_kmaps = [] for orbital in self.orbitals: ID = orbital.ID weight = param['w_' + str(ID)].value kmap = weight * self.get_orbital_kmap(ID, param) orbital_kmaps.append(kmap) orbital_kmap = np.nansum(orbital_kmaps) if with_background: variables = {} for variable in self.background_equation[1]: variables.update({variable: param[variable].value}) background = self._get_background(variables) return orbital_kmap + background else: return orbital_kmap def get_residual(self, slice_, param=None, weight_sum_data=None): if param is None: param = self.parameters if weight_sum_data is None: orbital_kmap = self.get_weighted_sum_kmap(param) else: orbital_kmap = weight_sum_data if isinstance(slice_, int): sliced_kmap = self.get_sliced_kmap(slice_) residual = sliced_kmap - orbital_kmap else: residual = slice_ - orbital_kmap residual = self._cut_region(residual) return residual def get_reduced_chi2(self, slice_index, weight_sum_data=None): n = self._get_degrees_of_freedom() residual = self.get_residual( slice_index, weight_sum_data=weight_sum_data) reduced_chi2 = get_reduced_chi2(residual.data, n) return reduced_chi2 def ID_to_orbital(self, ID): for orbital in self.orbitals: if orbital.ID == ID: return orbital return None def _chi2(self, param=None, slice_=0): if param is None: param = self.parameters residual = self.get_residual(slice_, param) return residual.data def _get_degrees_of_freedom(self): n = 0 for parameter in self.parameters.values(): if parameter.vary: n += 1 return n def _get_background(self, variables=[]): variables.update({'x': self.axis, 'y': np.array([self.axis]).T}) background = eval(self.background_equation[0], None, variables) return background def _cut_region(self, data): if self.crosshair is None or self.region[0] == 'all': return data else: return self.crosshair.cut_from_data( data, region=self.region[0], inverted=self.region[1]) def _set_sliced_data(self, sliced_data): if isinstance(sliced_data, SlicedData): self.sliced_data = sliced_data else: raise TypeError( 'sliced_data has to be of type %s (is %s)' % ( type(SlicedData), type(sliced_data))) def _add_orbitals(self, orbitals): if (isinstance(orbitals, list) and all(isinstance(element, OrbitalData) for element in orbitals)): self.orbitals = orbitals elif isinstance(orbitals, OrbitalData): self.orbitals = [orbitals] else: raise TypeError( 'orbital has to be of (or list of) type %s (is %s)' % ( type(OrbitalData), type(orbitals))) def _set_parameters(self): self.parameters = Parameters() for orbital in self.orbitals: ID = orbital.ID self.parameters.add('w_' + str(ID), value=1, min=0, vary=False, expr=None) for angle in ['phi_', 'theta_', 'psi_']: self.parameters.add(angle + str(ID), value=0, min=90, max=-90, vary=False, expr=None) # LMFit doesn't work when the initial value is exactly the same # as the minimum value. For this reason the initial value will # be set ever so slightly above 0 to circumvent this problem. self.parameters.add('c', value=0, min=0, vary=False, expr=None) self.parameters.add('E_kin', value=30, min=5, max=150, vary=False, expr=None) for angle in ['alpha', 'beta']: self.parameters.add(angle, value=0, min=90, max=-90, vary=False, expr=None)
def _fit_punkt(n): """ :type n: int :return: Gefittete Amplitude und gefittete oder geglättete Phase im Bereich um Resonanzfrequenz +/- Versatz :rtype: list """ if not _weiter: return None # ---------------------------------------- # ----------- AMPLITUDE fitten ----------- # ---------------------------------------- amplitude = savgol_filter(_bereich(_amplitude_voll[n]), _par.filter_breite, _par.filter_ordnung) index_max = numpy.argmax(amplitude) start_freq = _frequenz[index_max] start_amp = amplitude[index_max] start_off = amplitude[ 0] # Erster betrachteter Wert ist bereits eine gute Näherung für den Untergrund # Fitparameter für die Fitfunktion par_amp = Parameters() par_amp.add('resfreq', value=start_freq, min=_par.fmin, max=_par.fmax) par_amp.add('amp', value=start_amp, min=_par.amp_min, max=_par.amp_max) par_amp.add('guete', value=0.5 * (_par.amp.guete_max + _par.amp.guete_min), min=_par.amp.guete_min, max=_par.amp.guete_max) par_amp.add('untergrund', value=start_off, min=_par.amp.off_min, max=_par.amp.off_max) amp = _mod_amp.fit(data=amplitude, freq=_frequenz, params=par_amp, fit_kws=_fit_genauigkeit) _puls(n) # Wenn keine Phase gefittet werden soll: if _mod_ph is KEIN_FIT: return Ergebnis(amp=amp.params['amp'].value, amp_fhlr=amp.params['amp'].stderr, resfreq=amp.params['resfreq'].value, resfreq_fhlr=amp.params['resfreq'].stderr, guete_amp=amp.params['guete'].value, guete_amp_fhlr=amp.params['guete'].stderr, untergrund=amp.best_values['untergrund']) # Resonanzfrequenz resfreq = amp.best_values['resfreq'] # ---------------------------------------- # ------------- PHASE fitten ------------- # ---------------------------------------- halb = abs(_par.phase_versatz ) + 10 * _par.df # Halbe Frequenzbreite des Phasenversatzes # +df, weil der Fit auch bei Versatz = 0 funktionieren muss von = resfreq - halb # Untere Versatzgrenze bis = resfreq + halb # Obere Versatzgrenze if von < _par.fmin: # Die Resonanzfrequenz liegt zu weit links: # Auswahlbereich nach rechts verschieben, aber nicht über den Frequenzbereich hinaus bis = min(bis - von + _par.fmin, _par.fmax) von = _par.fmin elif bis > _par.fmax: # Die Resonanz lieg zu weit rechts: von = max(von - bis + _par.fmax, _par.fmin) # Verschieben, aber nicht über linken Rand hinaus bis = _par.fmax # Phase beschneiden index_von = index_freq(_par, von) index_bis = index_freq(_par, bis) wahl_phase = _bereich(_phase_voll[n])[index_von:index_bis] if _mod_ph is GLAETTEN: # Nur glätten: phase = savgol_filter(wahl_phase, _par.filter_breite, _par.filter_ordnung) return Ergebnis(amp=amp.params['amp'].value, amp_fhlr=amp.params['amp'].stderr, resfreq=amp.params['resfreq'].value, resfreq_fhlr=amp.params['resfreq'].stderr, guete_amp=amp.params['guete'].value, guete_amp_fhlr=amp.params['guete'].stderr, untergrund=amp.best_values['untergrund'], phase=randwert(phase, _par.phase_versatz)) else: # Fitparameter für die Fitfunktion par_ph = Parameters() par_ph.add('resfreq', value=resfreq, min=von, max=bis) par_ph.add('guete', value=3, min=_par.phase.guete_min, max=_par.phase.guete_max) par_ph.add('rel', value=200, min=_par.phase.off_min, max=_par.phase.off_max) ph = _mod_ph.fit( data=wahl_phase, freq=_frequenz[index_von:index_bis], params=par_ph, method='cg' # 'differential_evolution' passt auch gut # TODO fit_kws=self.fit_genauigkeit ) return Ergebnis(amp=amp.params['amp'].value, amp_fhlr=amp.params['amp'].stderr, resfreq=amp.params['resfreq'].value, resfreq_fhlr=amp.params['resfreq'].stderr, guete_amp=amp.params['guete'].value, guete_amp_fhlr=amp.params['guete'].stderr, untergrund=amp.best_values['untergrund'], phase=randwert(ph.best_fit, _par.phase_versatz), guete_ph=ph.best_values['guete'], phase_rel=ph.best_values['rel'], phase_fhlr=ph.params['resfreq'].stderr)
def formatParameters(rVec, tVec, linearCoeffs, distCoeffs): ''' puts all intrinsic and extrinsic parameters in Parameters() format if there are several extrinsica paramters they are correctly Inputs rVec, tVec : arrays of shape (n,3,1) or (3,1) distCoeffs must have be reshapable to (5) ''' params = Parameters() if prod(rVec.shape) == 9: rVec = Rodrigues(rVec)[0] rVec = rVec.reshape(3) for i in range(3): params.add('rvec%d' % i, value=rVec[i], vary=False) params.add('tvec%d' % i, value=tVec[i], vary=False) if len(rVec.shape) == 3: for j in range(len(rVec)): for i in range(3): params.add('rvec%d%d' % (j, i), value=rVec[j, i, 0], vary=False) params.add('tvec%d%d' % (j, i), value=tVec[j, i, 0], vary=False) params.add('fX', value=linearCoeffs[0, 0], vary=False) params.add('fY', value=linearCoeffs[1, 1], vary=False) params.add('cX', value=linearCoeffs[0, 2], vary=False) params.add('cY', value=linearCoeffs[1, 2], vary=False) # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]]) distCoeffs = distCoeffs.reshape(5) for i in range(5): params.add('distCoeffs%d' % i, value=distCoeffs[i], vary=False) return params
def helium_abundance_elementalScheme(self, Te, ne, lineslog_frame, metal_ext=''): #Check temperatures are not nan before starting the treatment if (not isinstance(Te, float)) and (not isinstance(ne, float)): #HeI_indices = (lineslog_frame.Ion.str.contains('HeI_')) & (lineslog_frame.index != 'He1_8446A') & (lineslog_frame.index != 'He1_7818A') & (lineslog_frame.index != 'He1_5016A') HeI_indices = (lineslog_frame.Ion.str.contains('HeI_')) & ( lineslog_frame.index.isin( ['He1_4472A', 'He1_5876A', 'He1_6678A'])) HeI_labels = lineslog_frame.loc[HeI_indices].index.values HeI_ions = lineslog_frame.loc[HeI_indices].Ion.values Emis_Hbeta = self.H1_atom.getEmissivity(tem=Te, den=ne, label='4_2', product=False) #--Generating matrices with fluxes and emissivities for i in range(len(HeI_labels)): pyneb_code = float(HeI_ions[i][HeI_ions[i].find('_') + 1:len(HeI_ions[i])]) line_relative_Flux = self.lines_dict[ HeI_labels[i]] / self.Hbeta_flux line_relative_emissivity = self.He1_atom.getEmissivity( tem=Te, den=ne, wave=pyneb_code, product=False) / Emis_Hbeta line_relative_emissivity = self.check_nan_entries( line_relative_emissivity) if i == 0: matrix_HeI_fluxes = copy(line_relative_Flux) matrix_HeI_emis = copy(line_relative_emissivity) else: matrix_HeI_fluxes = vstack( (matrix_HeI_fluxes, line_relative_Flux)) matrix_HeI_emis = vstack( (matrix_HeI_emis, line_relative_emissivity)) matrix_HeI_fluxes = transpose(matrix_HeI_fluxes) matrix_HeI_emis = transpose(matrix_HeI_emis) #Perform the fit params = Parameters() params.add('Y', value=0.01) HeII_HII_array = zeros(len(matrix_HeI_fluxes)) HeII_HII_error = zeros(len(matrix_HeI_fluxes)) for i in range(len(matrix_HeI_fluxes)): fit_Output = lmfit_minimmize(residual_Y_v3, params, args=(matrix_HeI_emis[i], matrix_HeI_fluxes[i])) HeII_HII_array[i] = fit_Output.params['Y'].value HeII_HII_error[i] = fit_Output.params['Y'].stderr #NO SUMANDO LOS ERRORES CORRECTOS? #self.abunData['HeII_HII_from_' + metal_ext] = random.normal(mean(HeII_HII_array), mean(HeII_HII_error), size = self.MC_array_len) ionic_abund = random.normal(mean(HeII_HII_array), mean(HeII_HII_error), size=self.MC_array_len) #Evaluate the nan array nan_count = np_sum(isnan(ionic_abund)) if nan_count == 0: self.abunData['HeII_HII_from_' + metal_ext] = ionic_abund #Remove the nan entries performing a normal distribution elif nan_count < 0.90 * self.MC_array_len: mag, error = nanmean(ionic_abund), nanstd(ionic_abund) self.abunData['HeII_HII_from_' + metal_ext] = random.normal( mag, error, size=self.MC_array_len) if nan_count > self.MC_warning_limit: print '-- {} calculated with {}'.format( 'HeII_HII_from_' + metal_ext, nan_count) #Calculate the He+2 abundance if self.lines_dict.viewkeys() >= {'He2_4686A'}: #self.abunData['HeIII_HII_from_' + metal_ext] = self.He2_atom.getIonAbundance(int_ratio = self.lines_dict['He2_4686A'], tem=Te, den=ne, wave = 4685.6, Hbeta = self.Hbeta_flux) self.determine_ionic_abundance('HeIII_HII_from_' + metal_ext, self.He2_atom, 'L(4685)', self.lines_dict['He2_4686A'], Te, ne) #Calculate elemental abundance Helium_element_keys = [ 'HeII_HII_from_' + metal_ext, 'HeIII_HII_from_' + metal_ext ] if set(self.abunData.index) >= set(Helium_element_keys): self.abunData['HeI_HI_from_' + metal_ext] = self.abunData[Helium_element_keys[ 0]] + self.abunData[Helium_element_keys[1]] else: self.abunData['HeI_HI_from_' + metal_ext] = self.abunData[ Helium_element_keys[0]] #Proceed to get the Helium mass fraction Y Element_abund = metal_ext + 'I_HI' Y_fraction, Helium_abund = 'Ymass_' + metal_ext, 'HeI_HI_from_' + metal_ext if set(self.abunData.index) >= {Helium_abund, Element_abund}: self.abunData[Y_fraction] = ( 4 * self.abunData[Helium_abund] * (1 - 20 * self.abunData[Element_abund])) / ( 1 + 4 * self.abunData[Helium_abund])
print("Max surface strain = {:.5f}".format(strain[np.nonzero(surface)].max())) hist, bin_edges = np.histogram( strain[np.nonzero(surface)], bins=int( (strain[np.nonzero(surface)].max() - strain[np.nonzero(surface)].min()) / bin_step), ) hist = hist.astype(float) if normalize: hist = hist / nb_surface # normalize the histogram to the number of points x_axis = bin_edges[:-1] + (bin_edges[1] - bin_edges[0]) / 2 fit_params = Parameters() if fit_pdf == "skewed_gaussian": fit_params.add("amp_0", value=0.0005, min=0.000001, max=10000) fit_params.add("loc_0", value=0, min=-0.1, max=0.1) fit_params.add("sig_0", value=0.0005, min=0.0000001, max=0.1) fit_params.add("alpha_0", value=0, min=-10, max=10) else: # 'pseudovoigt' fit_params.add("amp_0", value=0.0005, min=0.000001, max=10000) fit_params.add("cen_0", value=0, min=-0.1, max=0.1) fit_params.add("sig_0", value=0.0005, min=0.0000001, max=0.1) fit_params.add("ratio_0", value=0.5, min=0, max=1) # run the global fit to all the data sets result = minimize(util.objective_lmfit, fit_params, args=(x_axis, hist, fit_pdf)) report_fit(result.params) strain_fit = util.function_lmfit(params=result.params,
def create_model(self): params = Parameters() params.add("ocv", value=self.voltage[-1], min=0, max=10) taus = [math.pow(10, i) for i in range(self.circuits)] weights = np.zeros(self.circuits) params.add("t0", value=taus[0], min=0.01) params.add("w0", value=weights[0]) for i in range(1, self.circuits): params.add("delta" + str(i), value=taus[i] - taus[i - 1], min=0.0) params.add("t" + str(i), expr="delta" + str(i) + "+t" + str(i - 1)) params.add("w" + str(i), value=weights[i]) for i in range(self.circuits, 5): params.add("t" + str(i), value=1, vary=False) params.add("w" + str(i), value=0, vary=False) self.params = params self.model = Model(self._model)