def make_data(filename): data = gv.dataset.avg_data(read_dataset(filename)) basis = EigenBasis( data, keyfmt='1s0.{s1}{s2}', srcs=['l', 'g', 'd', 'e'], t=(1,2), tdata=range(1,24), ) return data, basis
def make_data(filename): data = gv.dataset.avg_data(cf.read_dataset(filename)) basis = cf.EigenBasis( data, keyfmt='1s0.{s1}{s2}', srcs=['l', 'g', 'd', 'e'], t=(1, 2), tdata=range(1, 24), ) return basis.apply(data, keyfmt='1s0.{s1}{s2}'), basis # 3
def make_data(filename): data = gv.dataset.avg_data(cf.read_dataset(filename, grep='1s0')) basis = cf.EigenBasis( data, keyfmt=KEYFMT, srcs=SOURCES, t=(1, 2), tdata=TDATA, ) return data, basis
def main(): dset = cf.read_dataset('etas-Ds.h5') s = gv.dataset.svd_diagnosis(dset, models=make_models()) print('svdcut =', s.svdcut) s.plot_ratio(show=True) # chained fit models = make_models() models = models[:2] + [tuple(models[2:])] for m in models: s = gv.dataset.svd_diagnosis(dset, models=[m]) print('svdcut (chained) =', s.svdcut) s.plot_ratio(show=True)
def make_data(filename): dset = gv.dataset.Dataset(filename) data = c_hack * gv.dataset.avg_data(cf.read_dataset(filename, grep=ttag)) #data = gv.regulate(data, svdcut=0.5) if OSC: basis = cf.EigenBasis(data, keyfmt=KEYFMT, srcs=SRCs, t=(t0, t0 + 2), tdata=TDATA) else: basis = cf.EigenBasis(data, keyfmt=KEYFMT, srcs=SRCs, t=(t0, t0 + 1), tdata=TDATA) return data, basis
def main(): data = make_data('etas-Ds.h5') models = make_models() # 1a models = [ models[0], models[1], # 1b dict(nterm=(2, 1), svdcut=6.3e-5), # 1c (models[2], models[3]) # 1d ] fitter = cf.CorrFitter(models=models) # 1e p0 = None for N in [1, 2, 3, 4]: print(30 * '=', 'nterm =', N) prior = make_prior(N) fit = fitter.chained_lsqfit(data=data, prior=prior, p0=p0) # 2 print(fit.format(pstyle=None if N < 4 else 'm')) p0 = fit.pmean print_results(fit, prior, data) if DISPLAYPLOTS: fit.show_plots() # check fit quality by adding noise print('\n==================== add svd, prior noise') noisy_fit = fitter.chained_lsqfit( data=data, prior=prior, p0=fit.pmean, svdcut=SVDCUT, noise=True, ) print(noisy_fit.format(pstyle=None)) p = key_parameters(fit.p) noisy_p = key_parameters(noisy_fit.p) print(' fit:', p) print('noisy fit:', noisy_p) print(' ', gv.fmt_chi2(gv.chi2(p - noisy_p))) # simulated fit for sim_pdata in fitter.simulated_pdata_iter( n=2, dataset=cf.read_dataset('etas-Ds.h5'), p_exact=fit.pmean ): print('\n==================== simulation') sim_fit = fitter.chained_lsqfit( pdata=sim_pdata, prior=prior, p0=fit.pmean, svdcut=SVDCUT, ) print(sim_fit.format(pstyle=None)) p = key_parameters(fit.pmean) sim_p = key_parameters(sim_fit.p) print('simulated - exact:', sim_p - p) print(' ', gv.fmt_chi2(gv.chi2(p - sim_p)))
def main(): data = make_data('etas-Ds.h5') fitter = cf.CorrFitter(models=make_models()) p0 = None prior = make_prior(8) # 1 for N in [1, 2]: # 2 print(30 * '=', 'nterm =', N) fit = fitter.lsqfit( data=data, prior=prior, p0=p0, nterm=(N, N), svdcut=SVDCUT # 3 ) print(fit) # 4 p0 = fit.pmean print_results(fit, prior, data) if DISPLAYPLOTS: fit.show_plots() # check fit quality by adding noise print('\n==================== add svd, prior noise') noisy_fit = fitter.lsqfit( data=data, prior=prior, p0=fit.pmean, svdcut=SVDCUT, nterm=(N, N), noise=True, ) print(noisy_fit.format(pstyle=None)) p = key_parameters(fit.p) noisy_p = key_parameters(noisy_fit.p) print(' fit:', p) print('noisy fit:', noisy_p) print(' ', gv.fmt_chi2(gv.chi2(p - noisy_p))) # simulated fit for sim_pdata in fitter.simulated_pdata_iter( n=2, dataset=cf.read_dataset('etas-Ds.h5'), p_exact=fit.pmean ): print('\n==================== simulation') sim_fit = fitter.lsqfit( pdata=sim_pdata, prior=prior, p0=fit.pmean, svdcut=SVDCUT, nterm=(N, N), ) print(sim_fit.format(pstyle=None)) p = key_parameters(fit.pmean) sim_p = key_parameters(sim_fit.p) print('simulated - exact:', sim_p - p) print(' ', gv.fmt_chi2(gv.chi2(p - sim_p)))
def test_fit(fitter, datafile): """ Test the fit with simulated data """ gv.ranseed((623738625, 435880512, 1745400596)) print('\nRandom seed:', gv.ranseed.seed) dataset = read_dataset(datafile) pexact = fitter.fit.pmean prior = fitter.fit.prior for sdata in fitter.simulated_data_iter(n=2, dataset=dataset, pexact=pexact): print('\n============================== simulation') sfit = fitter.lsqfit(data=sdata, prior=prior, p0=pexact, nterm=(2, 2)) diff = [] # check chi**2 for leading parameters for k in prior: diff.append(sfit.p[k].flat[0] - pexact[k].flat[0]) chi2_diff = gv.chi2(diff) print( 'Leading parameter chi2/dof [dof] = %.2f' % (chi2_diff / chi2_diff.dof), '[%d]' % chi2_diff.dof, ' Q = %.1f' % chi2_diff.Q )
def test_fit(fitter, datafile): """ Test the fit with simulated data """ gv.ranseed((1487942813, 775399747, 906327435)) print('\nRandom seed:', gv.ranseed.seed) dataset = cf.read_dataset(datafile) pexact = fitter.fit.pmean prior = fitter.fit.prior for sdata in fitter.simulated_data_iter(n=2, dataset=dataset, pexact=pexact): print('\n============================== simulation') sfit = fitter.lsqfit(data=sdata, prior=prior, p0=pexact) diff = [] # check chi**2 for leading parameters for k in prior: diff.append(sfit.p[k].flat[0] - pexact[k].flat[0]) chi2diff = gv.chi2(diff) print( 'Leading parameter chi2/dof [dof] = %.2f' % (chi2diff / chi2diff.dof), '[%d]' % chi2diff.dof, ' Q = %.1f' % chi2diff.Q )
def fit_data(filename_in, key, otherkey): dset = cf.read_dataset(filename_in) # read data # If we don't have many samples this next part suggests an svdcut #s = gv.dataset.svd_diagnosis(dset, models=make_models(key, otherkey, n)) #print('svdcut =', s.svdcut) # suggested svdcut #s.plot_ratio(show=True) ############################################################################################ data = make_data(filename_in) fitter = cf.CorrFitter(models=make_models(key, otherkey)) p0 = None for N in NEXP: print(30 * '=', 'nterm =', N) prior = make_prior(N) fit = fitter.lsqfit( data=data, prior=prior, p0=p0) # add_svdnoise=True, add_priornoise=True, svdcut=s.svdcut p0 = fit.pmean print(fit.format(pstyle=None if N < 10 else 'm')) print_results(fit)
def make_data(filename): data = c_hack*gv.dataset.avg_data(cf.read_dataset(filename, grep=ttag)) basis = cf.EigenBasis(data, keyfmt=KEYFMT, srcs=SRC, t=(t0, t0+2), tdata=TDATA) return data, basis
def make_data(datafile): """ Read data from datafile and average it. """ return gv.dataset.avg_data(cf.read_dataset(datafile))
def main(): dset = cf.read_dataset('Ds-Ds.h5') s = gv.dataset.svd_diagnosis(dset, models=make_models()) print('svdcut =', s.svdcut) s.plot_ratio(show=True)
def make_data(file): return cf.read_dataset(file)
def make_data(filename): """ Read data, compute averages/covariance matrix for G(t). """ return gv.dataset.avg_data(cf.read_dataset(filename))
#l3248f211b580m002426m06730m8447\a\ #l3296f211b630m0074m037m440-coul-v5 ensemble = 'l3296f211b630m0074m037m440-coul-v5' corr = 't0_onemmHy_vec_m0.450.gpl' basedir = '../data/hybrid' corrpath = os.path.join(basedir, ensemble, corr) SRCs = ['H', 'h'] KEYFMT = 'onemm.{s1}{s2}' tag = KEYFMT[:6] ttag = tag[:-1] otag = ttag + '_o.' # get temporal extent of lattice dset = gv.dataset.Dataset(corrpath) data = gv.dataset.avg_data(cf.read_dataset(corrpath, grep=ttag)) T = data[dset.keys()[0]].size ## single channel parameters TDATA = range(T) TFIT = TDATA[1:20] TP = T NEXP = range(1, 10) s_coeff = (1, -1) key = tag + SRCs[1] + SRCs[1] corrtag = corr[:-4] otherkey = None ## flags for both single and matrix fits OSC = True CORRFIT = True
def make_data(datafile): """ Read data from datafile and average it. """ dset = cf.read_dataset(datafile) return gv.dataset.avg_data(dset)