def fit_form_factor(self, nstates, chain=False, constrain=False, **fitter_kwargs): """Run the joint fit of 2- and 3-point functions for form factor.""" # Handle pedestal pedestal = fitter_kwargs.pop('pedestal', None) if pedestal is not None: self.pedestal = pedestal # Handle prior prior = fitter_kwargs.get('prior') if prior is not None: self.prior = prior else: prior = self.prior fitter_kwargs['prior'] = prior # Handle models models_list = [] for tag in self.ds: model = get_model(self.ds, tag, nstates, self.pedestal, constrain) if model is not None: models_list.append(model) # Abort if too few models found if len(models_list) != len(set(self.ds.keys())): self.fitter = None fit = None LOGGER.warning('Insufficient models found. Skipping joint fit.') return # Run fit self.fitter = cf.CorrFitter(models=models_list) if chain: _lsqfit = self.fitter.chained_lsqfit else: _lsqfit = self.fitter.lsqfit fit = _lsqfit(data=self.ds, **fitter_kwargs) # fit = serialize.SerializableNonlinearFit(fit) fit = serialize.SerializableFormFactor(fit, tags=self.ds.tags) self.fits['full'] = fit if fit.failed: LOGGER.warning('Full joint fit failed.') else: # Update mass estimates in dataset, since the visualization of # the ratio R (or Rbar) is rather sensitive to the masses self.ds.set_masses(fit.p['light-light:dE'][0], fit.p['heavy-light:dE'][0]) # Tidy up final results if self.pedestal is not None: sign = np.sign(self.pedestal) vnn = self.pedestal + sign * _abs(fit.p['fluctuation']) fit.p['Vnn'][0, 0] = vnn fit.palt['Vnn'][0, 0] = vnn else: vnn = fit.p['Vnn'][0, 0] self.r = convert_vnn_to_ratio(self.m_src, vnn)
def main(): data, basis = make_data('etab.h5') fitter = cf.CorrFitter(models=make_models()) p0 = None for N in range(1, 8): print(30 * '=', 'nterm =', N) prior = make_prior(N, basis) fit = fitter.lsqfit(data=data, prior=prior, p0=p0, svdcut=SVDCUT) print(fit.format(pstyle=None if N < 7 else 'v')) p0 = fit.pmean print_results(fit, basis, prior, data) if SHOWPLOTS: fit.show_plots(save='etab.{}.png', view='ratio') # check fit quality by adding noise print('\n==================== add svd, prior noise') noisy_fit = fitter.lsqfit( data=data, prior=prior, p0=fit.pmean, svdcut=SVDCUT, noise=True, ) print(noisy_fit.format(pstyle=None)) dE = fit.p['etab.dE'][:3] noisy_dE = noisy_fit.p['etab.dE'][:3] print(' dE:', dE) print('noisy dE:', noisy_dE) print(' ', gv.fmt_chi2(gv.chi2(dE - noisy_dE))) if SHOWPLOTS: fit.qqplot_residuals().show()
def main(): data, basis = make_data('etab.h5') fitter = cf.CorrFitter(models=make_models()) p0 = None for N in range(1, 8): print(30 * '=', 'nterm =', N) prior = make_prior(N, basis) fit = fitter.lsqfit(data=data, prior=prior, p0=p0, svdcut=SVDCUT) print(fit.format(pstyle=None if N < 7 else 'm')) p0 = fit.pmean print_results(fit, basis, prior, data) if DISPLAYPLOTS: fitter.display_plots() print('\n==================== add svd, prior noise') noisy_fit = fitter.lsqfit( data=data, prior=prior, p0=fit.pmean, svdcut=SVDCUT, noise=True, ) print(noisy_fit.format(pstyle=None)) dE = fit.p['etab.dE'][:3] noisy_dE = noisy_fit.p['etab.dE'][:3] print(' dE:', dE) print('noisy dE:', noisy_dE) print(' ', gv.fmt_chi2(gv.chi2(dE - noisy_dE)))
def run_fit(self, nstates=Nstates(1, 0), prior=None, **fitter_kwargs): """ Run the fit. Args: nstates: tuple (n_decay, n_osc) specifying the number of decaying and oscillating states, respectively. Defaults to (1,0). prior: BasicPrior object. Default is None, for which the fitter tries to constuct a prior itself. """ self._nstates = nstates if prior is None: prior = bayes_prior.MesonPrior(nstates.n, nstates.no, amps=['a', 'ao'], tag=self.tag, ffit=self.c2.fastfit, extend=True) self.prior = prior # Model construction infers the fit times from c2 model = get_two_point_model(self.c2, bool(nstates.no)) self.fitter = cf.CorrFitter(models=model) data = {self.tag: self.c2} fit = self.fitter.lsqfit(data=data, prior=prior, p0=prior.p0, **fitter_kwargs) fit = serialize.SerializableNonlinearFit(fit) self._fit = fit if fit.failed: fit = None return fit
def main(): data, basis = make_data(corrpath) fitter = cf.CorrFitter(models=make_models()) p0 = None for N in NEXP: print(30 * '=', 'nterm =', N) prior = make_prior(N, basis) fit = fitter.lsqfit(data=data, prior=prior, p0=p0, svdcut=SVDCUT) #print(fit.format(pstyle=None if N < 12 else 'm')) p0 = fit.pmean print_results(fit, basis, prior, data, l) if SHOWPLOTS: fit.show_plots(view='ratio')
def main(): data, basis = make_data(file) fitter = cf.CorrFitter(models=make_models()) p0 = None for N in NEXP: print(30 * '=', 'nterm =', N) prior = make_prior(N, basis) fit = fitter.lsqfit(data=data, prior=prior, p0=p0, svdcut=SVDCUT) print(fit.format(pstyle=None if N < 12 else 'm')) p0 = fit.pmean print_results(fit, basis, prior, data) if WRITE_LOG: write_results(fit, basis, prior, data, NEXP) if SHOWPLOTS: fit.show_plots(save='etac.{}.png', view='ratio')
def main(): data = make_data(filename='mydata') fitter = cf.CorrFitter(models=make_models()) p0 = None for N in range(2, 6): print(30 * '=', 'nterm =', N) prior = make_prior(N) fit = fitter.lsqfit(data=data, prior=prior, p0=p0) p0 = fit.pmean print_results(fit) fastfit = cf.fastfit(G=data['cdata'], ampl='1(1)', dE='0.5(5)', tmin=tm, tp=T) print(fastfit)
def main(): data = make_data('etas-Ds.h5') models = make_models() # 1a models = [ models[0], models[1], # 1b dict(nterm=(2, 1), svdcut=6.3e-5), # 1c (models[2], models[3]) # 1d ] fitter = cf.CorrFitter(models=models) # 1e p0 = None for N in [1, 2, 3, 4]: print(30 * '=', 'nterm =', N) prior = make_prior(N) fit = fitter.chained_lsqfit(data=data, prior=prior, p0=p0) # 2 print(fit.format(pstyle=None if N < 4 else 'm')) p0 = fit.pmean print_results(fit, prior, data) if DISPLAYPLOTS: fit.show_plots() # check fit quality by adding noise print('\n==================== add svd, prior noise') noisy_fit = fitter.chained_lsqfit( data=data, prior=prior, p0=fit.pmean, svdcut=SVDCUT, noise=True, ) print(noisy_fit.format(pstyle=None)) p = key_parameters(fit.p) noisy_p = key_parameters(noisy_fit.p) print(' fit:', p) print('noisy fit:', noisy_p) print(' ', gv.fmt_chi2(gv.chi2(p - noisy_p))) # simulated fit for sim_pdata in fitter.simulated_pdata_iter( n=2, dataset=cf.read_dataset('etas-Ds.h5'), p_exact=fit.pmean ): print('\n==================== simulation') sim_fit = fitter.chained_lsqfit( pdata=sim_pdata, prior=prior, p0=fit.pmean, svdcut=SVDCUT, ) print(sim_fit.format(pstyle=None)) p = key_parameters(fit.pmean) sim_p = key_parameters(sim_fit.p) print('simulated - exact:', sim_p - p) print(' ', gv.fmt_chi2(gv.chi2(p - sim_p)))
def main(): data = make_data('etas-Ds.h5') fitter = cf.CorrFitter(models=make_models()) p0 = None prior = make_prior(8) # 1 for N in [1, 2]: # 2 print(30 * '=', 'nterm =', N) fit = fitter.lsqfit( data=data, prior=prior, p0=p0, nterm=(N, N), svdcut=SVDCUT # 3 ) print(fit) # 4 p0 = fit.pmean print_results(fit, prior, data) if DISPLAYPLOTS: fit.show_plots() # check fit quality by adding noise print('\n==================== add svd, prior noise') noisy_fit = fitter.lsqfit( data=data, prior=prior, p0=fit.pmean, svdcut=SVDCUT, nterm=(N, N), noise=True, ) print(noisy_fit.format(pstyle=None)) p = key_parameters(fit.p) noisy_p = key_parameters(noisy_fit.p) print(' fit:', p) print('noisy fit:', noisy_p) print(' ', gv.fmt_chi2(gv.chi2(p - noisy_p))) # simulated fit for sim_pdata in fitter.simulated_pdata_iter( n=2, dataset=cf.read_dataset('etas-Ds.h5'), p_exact=fit.pmean ): print('\n==================== simulation') sim_fit = fitter.lsqfit( pdata=sim_pdata, prior=prior, p0=fit.pmean, svdcut=SVDCUT, nterm=(N, N), ) print(sim_fit.format(pstyle=None)) p = key_parameters(fit.pmean) sim_p = key_parameters(sim_fit.p) print('simulated - exact:', sim_p - p) print(' ', gv.fmt_chi2(gv.chi2(p - sim_p)))
def main(): data = make_data('Ds-Ds.h5') fitter = cf.CorrFitter(models=make_models()) p0 = None for N in [1, 2, 3, 4]: print(30 * '=', 'nterm =', N) prior = make_prior(N) fit = fitter.lsqfit(data=data, prior=prior, p0=p0, svdcut=SVDCUT) print(fit.format(pstyle=None if N < 4 else 'v')) p0 = fit.pmean print_results(fit, prior, data) if SHOWPLOTS: fit.show_plots(save='Ds-Ds.{}.png', view='ratio') # check fit quality by adding noise print('\n==================== add svd, prior noise') noisy_fit = fitter.lsqfit( data=data, prior=prior, p0=fit.pmean, svdcut=SVDCUT, noise=True, ) print(noisy_fit.format(pstyle=None)) p = key_parameters(fit.p) noisy_p = key_parameters(noisy_fit.p) print(' fit:', p) print('noisy fit:', noisy_p) print(' ', gv.fmt_chi2(gv.chi2(p - noisy_p))) # simulated fit for sim_pdata in fitter.simulated_pdata_iter( n=2, dataset=h5py.File('Ds-Ds.h5', 'r'), p_exact=fit.pmean ): print('\n==================== simulation') sim_fit = fitter.lsqfit( pdata=sim_pdata, prior=prior, p0=fit.pmean, svdcut=SVDCUT, ) print(sim_fit.format(pstyle=None)) p = key_parameters(fit.pmean) sim_p = key_parameters(sim_fit.p) print('simulated - exact:', sim_p - p) print(' ', gv.fmt_chi2(gv.chi2(p - sim_p)))
def fit_data(filename_in, key, otherkey): dset = cf.read_dataset(filename_in) # read data # If we don't have many samples this next part suggests an svdcut #s = gv.dataset.svd_diagnosis(dset, models=make_models(key, otherkey, n)) #print('svdcut =', s.svdcut) # suggested svdcut #s.plot_ratio(show=True) ############################################################################################ data = make_data(filename_in) fitter = cf.CorrFitter(models=make_models(key, otherkey)) p0 = None for N in NEXP: print(30 * '=', 'nterm =', N) prior = make_prior(N) fit = fitter.lsqfit( data=data, prior=prior, p0=p0) # add_svdnoise=True, add_priornoise=True, svdcut=s.svdcut p0 = fit.pmean print(fit.format(pstyle=None if N < 10 else 'm')) print_results(fit)