def make_prior(): prior = {} prior['a'] = gv.gvar(1.0, 1.0) prior['E'] = gv.gvar(4.5, 100.0) prior['b'] = gv.gvar(-10.0, 50.0) return prior
def test_ravgdict_unwgtd(self): " unweighted RAvgDict " # scalar mean_s = np.random.uniform(-10., 10.) sdev_s = 0.1 x_s = gv.gvar(mean_s, sdev_s) # array mean_a = np.random.uniform(-10., 10., (2,)) cov_a = np.array([[1., 0.5], [0.5, 2.]]) / 10. x_a = gv.gvar(mean_a, cov_a) N = 30 r_a = gv.raniter(x_a, N) ravg = RAvgDict(dict(scalar=1.0, array=[[2., 3.]]), weighted=False) for ri in r_a: ravg.add(dict( scalar=gv.gvar(x_s(), sdev_s), array=[gv.gvar(ri, cov_a)] )) np_assert_allclose( ravg['scalar'].sdev, x_s.sdev / (N ** 0.5)) self.assertLess( abs(ravg['scalar'].mean - mean_s), 5 * ravg['scalar'].sdev ) np_assert_allclose(gv.evalcov(ravg['array'].flat), cov_a / N) for i in range(2): self.assertLess( abs(mean_a[i] - ravg['array'][0, i].mean), 5 * ravg['array'][0, i].sdev ) self.assertEqual(ravg.dof, 2 * N - 2 + N - 1) self.assertGreater(ravg.Q, 1e-3)
def test_ravgdict(self): " RAvgDict " a = RAvgDict(dict(s=1.0, a=[[2.0, 3.0]])) a.add(dict(s=gv.gvar(1, 1), a=[[gvar.gvar(1, 1), gvar.gvar(10,10)]])) a.add(dict(s=gv.gvar(2, 2), a=[[gvar.gvar(2, 2), gvar.gvar(20,20)]])) a.add(dict(s=gv.gvar(3, 3), a=[[gvar.gvar(3, 3), gvar.gvar(30,30)]])) self.assertEqual(a['a'].shape, (1, 2)) np_assert_allclose(a['a'][0, 0].mean, 1.346938775510204) np_assert_allclose(a['a'][0, 0].sdev, 0.8571428571428571) self.assertEqual(str(a['a'][0, 0]), '1.35(86)') self.assertEqual(str(a['a'][0, 1]), '13.5(8.6)') np_assert_allclose(a['s'].mean, 1.346938775510204) np_assert_allclose(a['s'].sdev, 0.8571428571428571) self.assertEqual(str(a['s']), '1.35(86)') self.assertEqual(a.dof, 6) np_assert_allclose(a.chi2, 3*0.5306122448979592) np_assert_allclose(a.Q, 0.953162484587) s = [ "itn integral wgt average chi2/dof Q", "-------------------------------------------------------", " 1 1.0(1.0) 1.0(1.0) 0.00 1.00", " 2 2.0(2.0) 1.20(89) 0.20 0.90", " 3 3.0(3.0) 1.35(86) 0.27 0.95", "" ] self.assertEqual(a.summary(), '\n'.join(s))
def make_prior(nexp): # make priors for fit parameters prior = gv.BufferDict() # dictionary-like prior['a'] = [gv.gvar(0.5,0.5) for i in range(nexp)] de = [gv.gvar(0.9,0.01) for i in range(nexp)] de[0] = gv.gvar(1,0.5) prior['E'] = [sum(de[:i+1]) for i in range(nexp)] return prior
def readLocal(): #[r1^3] <O> / M_D #pn #O1=gvar.gvar(0.183374991995,0.015273056) #O2=gvar.gvar(-0.338257485792,0.02443302) #O3=gvar.gvar(0.117911210974,0.009012166) #O4=gvar.gvar(0.653382176507,0.048047646) #O5=gvar.gvar(0.196645914598,0.015673252) #pr: Thesis numbers #O1=gvar.gvar(0.16461,0.01681) #O2=gvar.gvar(-0.305794829558,0.0164007415327) #O3=gvar.gvar(0.130212854553,0.00657230832578) #O4=gvar.gvar(0.61220412711,0.0376293022599) #O5=gvar.gvar(0.23034,0.0233) #npr #O1=gvar.gvar(0.154205591354,0.00909) #O2=gvar.gvar(-0.300761949713,0.01036) #O3=gvar.gvar(0.116129493883,0.00535) #O4=gvar.gvar(0.6191,0.03089) #O5=gvar.gvar(0.22149,0.0202541127012) O1=gvar.gvar(0.0394,0.0034) O2=gvar.gvar(0.078,0.004) O3=gvar.gvar(0.0213,0.002) O4=gvar.gvar(0.1573,0.0085) O5=gvar.gvar(0.0564,0.0052) O1, O2, O3, O4, O5 = convertGeV(O1, O2, O3, O4, O5) me = numpy.array([O1, O2, O3, O4, O5]) wilson_coef(me) return 0
def make_prior(nexp): # make priors for fit parameters prior = lsqfit.GPrior() # Gaussian prior -- dictionary-like prior['a'] = [gd.gvar(0.5,0.5) for i in range(nexp)] de = [gd.gvar(0.9,0.01) for i in range(nexp)] de[0] = gd.gvar(1,0.5) prior['E'] = [sum(de[:i+1]) for i in range(nexp)] return prior
def read_data(filename, dpath_pion_mp, dpath_pion_pp, dpath_etas_mp, dpath_etas_pp, plot='off'): pion_mp = c51.fold(c51.open_data(filename, dpath_pion_mp)) pion_pp = c51.fold(c51.open_data(filename, dpath_pion_pp)) etas_mp = c51.fold(c51.open_data(filename, dpath_etas_mp)) etas_pp = c51.fold(c51.open_data(filename, dpath_etas_pp)) #pion_mp = c51.open_data(filename, dpath_pion_mp) #pion_pp = c51.open_data(filename, dpath_pion_pp) #etas_mp = c51.open_data(filename, dpath_etas_mp) #etas_pp = c51.open_data(filename, dpath_etas_pp) pion = pion_mp/pion_pp etas = etas_mp/etas_pp pion_gv = c51.make_gvars(pion) etas_gv = c51.make_gvars(etas) pion_gv = gv.gvar(np.array([pion_gv[i].mean for i in range(len(pion_gv))]), np.array([pion_gv[j].sdev for j in range(len(pion_gv))])) etas_gv = gv.gvar(np.array([etas_gv[i].mean for i in range(len(pion_gv))]), np.array([etas_gv[j].sdev for j in range(len(pion_gv))])) if plot=='on': x = np.arange(len(pion_gv)) c51.scatter_plot(x, pion_gv, title='pion mres') c51.scatter_plot(x, etas_gv, title='etas mres') else: pass return pion_gv, etas_gv
def build_prior(self,params): """build prior from file""" prior = BufferDict() name = self.name nexp = self.nexp noxp = self.noxp prior_file = open(self.priorfile,'r') pp = yaml.load(prior_file) prior['log('+name+':dE)'] = [ gvar(0,0) for i in range(nexp) ] prior['log('+name+':a)'] = [ gvar(0,0) for i in range(nexp) ] # non-osc. state priors prior['log('+name+':dE)'][0] = log(gvar(pp['e0'][0],pp['e0'][1])) prior['log('+name+':a)'][0] = log(gvar(pp['a0'][0],pp['a0'][1])) for i in range(1,nexp): prior['log('+name+':dE)'][i] = log(gvar(pp['e1'][0],pp['e1'][1])) prior['log('+name+':a)'][i] = log(gvar(pp['a1'][0],pp['a1'][1])) # osc. state priors if self.name != 'pimom0': prior['log('+name+':dEo)'] = [ 0 for i in range(noxp) ] prior['log('+name+':ao)'] = [ 0 for i in range(noxp) ] prior['log('+name+':dEo)'][0] = log(gvar(pp['o0'][0],pp['o0'][1])) prior['log('+name+':ao)'][0] = log(gvar(pp['b0'][0],pp['b0'][1])) for i in range(1,noxp): prior['log('+name+':dEo)'][i] = log(gvar(pp['o1'][0],pp['o1'][1])) prior['log('+name+':ao)'][i] = log(gvar(pp['b1'][0],pp['b1'][1])) #print(prior) return prior
def make_prior(nexp): # make priors for fit parameters prior = gv.BufferDict() # Gaussian prior -- dictionary-like prior["a"] = [gv.gvar(0.5, 0.4) for i in range(nexp)] de = [gv.gvar(0.9, 0.01) for i in range(nexp)] de[0] = gv.gvar(1, 0.4) prior["E"] = [sum(de[: i + 1]) for i in range(nexp)] return prior
def make_data(N=100, eps=0.01): # make x,y fit data x = np.array([1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,12.,14.,16.,18.,20.])[4:] cr = gv.gvar(0.0,eps) c = [gv.gvar(cr(),cr.sdev) for n in range(N)] x_xmax = x/max(x) noise = 1+ sum(c[n]*x_xmax**n for n in range(N)) y = f_exact(x)*noise # noisy y[i]s return x,y
def make_data(nterm=100, eps=0.01): # make x,y fit data x = np.array([1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,12.,14.,16.,18.,20.]) cr = gd.gvar(0.0,eps) c = [gd.gvar(cr(),eps) for n in range(100)] x_xmax = x/max(x) noise = 1+ sum(c[n]*x_xmax**n for n in range(100)) y = f_exact(x, nterm)*noise # noisy y[i]s return x,y
def make_data(nterm=100, eps=0.01): # make x,y fit data x = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0])[4:] cr = gv.gvar(0.0, eps) c = [gv.gvar(cr(), eps) for n in range(100)] x_xmax = x / max(x) noise = 1 + sum(c[n] * x_xmax ** n for n in range(100)) y = f_exact(x, nterm) * noise # noisy y[i]s return x, y
def make_prior(): prior = gv.BufferDict(c=gv.gvar(['0(5)', '0(5)'])) if LSQFIT_ONLY: return prior if MULTI_W: prior['erfinv(2w-1)'] = gv.gvar(19 * ['0(1)']) / 2 ** 0.5 else: prior['erfinv(2w-1)'] = gv.gvar('0(1)') / 2 ** 0.5 return prior
def make_data(): # make x,y fit data x = np.array([1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,12.,14.,16.,18.,20.]) cr = gv.gvar(0.0,0.01) c = [gv.gvar(cr(),0.01) for n in range(100)] x_xmax = x/max(x) noise = 1+ sum(c[n]*x_xmax**n for n in range(100)) y = f_exact(x)*noise # noisy y[i]s xfac = gv.gvar(1.0,0.00001) # gaussian distrib'n: 1 +- 0.001% x = np.array([xi*gv.gvar(xfac(),xfac.sdev) for xi in x]) # noisy x[i]s return x,y
def stack_prior_states(prior,lkey,mstk,estk): """ Fill a prior dictionary with states which are determined from a set of stacks Prior energies within stacks are correlated, stacks are uncorrelated Other priors are chosen from some generic defaults """ gstack = gv.BufferDict() lorder = [] ## -- create independent gvars for each stack for i,ms,es in zip(range(len(mstk)),mstk,estk): mde = np.array(ms[1:])-np.array(ms[:-1]) mde = np.insert(mde,0,ms[0]) gstack[i] = gv.gvar(mde,es) for m in ms: lorder.append([m,i]) ## -- sorting of states lsort = np.transpose(sorted(lorder,key=lambda x: x[0])) ## -- sorted list of masses vs stacks ncount = [len(x) for x in mstk] ## -- number of states in stacks ## -- positions of elements in stacks npos = [[i for i,y in enumerate(lsort[1]) if x==y] for x in range(len(mstk))] niter = [0 for i in range(len(mstk))] lasti = -1 ## -- construct the gvars for the spectrum gstate = np.array([]) for i,x in zip(range(len(lsort[1])),lsort[1]): if lasti == int(x): ## -- correlated with most recent, just add next splitting gstate = np.append(gstate,gstack[int(x)][niter[int(x)]]) else: ## -- find out if this stack has been used yet if niter[int(x)] == 0: ## -- if not, decorrelate with all previous states gstate = np.append(gstate,gstack[int(x)][0] - np.sum(gstate)) else: ## -- else, decorrelate with all states between last occurrence lastpos = [j for j,y in enumerate(lsort[1][:i]) if x==int(y)][-1] gstate = np.append(gstate,gstack[int(x)][niter[int(x)]] - np.sum(gstate[lastpos+1:])) lasti = int(x) niter[lasti] += 1 ## -- defined again in DEFINES lAm = 1e2 # log amplitude mean lAcs = 1e3 # log amplitude sdev (source) lAks = 1e1 # log amplitude sdev (sink) Am = 0 # amplitude mean Acs = 1e3 # amplitude sdev (source) Aks = 1e1 # amplitude sdev (sink) ## -- append the prior states for this spectrum for p in gstate: pAmp = gv.gvar( [lAm]+[Am]*(len(lkey)-2), [lAcs]+[Acs]*((len(lkey)-3)/2)+[Aks]*((len(lkey)-1)/2) ) append_prior_state(prior,lkey,np.insert(pAmp,0,p)) pass
def readFile(): with open('/home/cchang5/code/chipt/output.txt','r') as f: for line in f: r = numpy.array(string.split(line,','),float) O1 = gvar.gvar(r[0],r[1]) O2 = gvar.gvar(r[2],r[3]) O3 = gvar.gvar(r[4],r[5]) O4 = gvar.gvar(r[6],r[7]) O5 = gvar.gvar(r[8],r[9]) convertGeV(O1, O2, O3, O4, O5) return 0
def print_model(self,par,outfile): """print the fit data and model and the difference""" fit = self.results t,g,dg,gth,dgth = self.fitter.collect_fitresults()['2pt'+self.name] ofile = open(outfile,'a') ofile.write( " t | {:12s} value | sigma_m sigma_v \n".format('2pt'+self.name) ) for it in range(0,self.tmax-self.tmin): data = gvar(g[it],dg[it]) model = gvar(gth[it],dgth[it]) diff1 = (data.mean-model.mean)/data.sdev diff2 = (data.mean-model.mean)/model.sdev ofile.write( " {:3d} | {:<20s} {:<20s} | {:+5.3f} {:+5.3f} \n".format( t[it],data.fmt(),model.fmt(),diff1,diff2) )
def test_ravg_unwgtd(self): " unweighted RAvg " # if not have_gvar: # return mean = np.random.uniform(-10., 10.) x = gv.gvar(mean, 0.1) ravg = RAvg(weighted=False) N = 30 for i in range(N): ravg.add(gv.gvar(x(), x.sdev)) np_assert_allclose( ravg.sdev, x.sdev / (N ** 0.5)) self.assertLess(abs(ravg.mean - mean), 5 * ravg.sdev) self.assertGreater(ravg.Q, 1e-3) self.assertEqual(ravg.dof, N - 1)
def add_noise(data, frac): """ add noise to correlators in list corrlist; frac = rel. size """ global_noise = gv.gvar(1, frac) ans = gv.BufferDict() for k in data: ## add: a) uncorr. noise (smear for zeros); b) corr. noise corr = data[k] dcorr = np.abs(corr * frac) dcorr[1:-1] = (dcorr[1:-1] + dcorr[2:] + dcorr[:-2]) / 3.0 dcorr = gv.gvar(np.zeros(dcorr.shape), dcorr) dcorr = next(gv.bootstrap_iter(dcorr)) ans[k] = (corr + dcorr) * global_noise return ans
def test_gvar_scalar(self): # exponential with errors gam = gv.gvar('1.0(1)') def f(x, y): return gam * y odeint = ode.Integrator(deriv=f, h=1, tol=1e-10) y0 = gv.gvar('1.0(1)') y1 = odeint(y0, (0, 2)) exact = y0 * np.exp(gam * 2) self.assertAlmostEqual((y1 / exact).mean, 1.) self.assertGreater(1e-8, (y1 / exact).sdev) self.assertTrue( gv.equivalent(odeint(y1, (2, 0)), y0, rtol=1e-6, atol=1e-6) )
def read_fit_file(fit_file): """ Reads a fit file and returns the fit parameters as a dictionary """ rdict = {} ## -- return dictionary mdict = {} ## -- temporary dictionary (mean) sdict = {} ## -- temporary dictionary (sdev) fitin = open(fit_file,'r') lread = fitin.read().split('\n') for line in lread: lspl = line.split(' ') if lspl[0] == 'chi2/dof': rdict['chi2'] = float(lspl[1]) elif lspl[0] == 'dof': rdict['dof'] = int(lspl[1]) elif lspl[0] == 'rdof': rdict['rdof'] = int(lspl[1]) elif lspl[0] == 'Q': rdict['Q'] = float(lspl[1]) elif lspl[0] == 'fit_mean': key = lspl[1] mdict[key] = [] for x in lspl[2:]: mdict[key].append(x) elif lspl[0] == 'fit_sdev': key = lspl[1] sdict[key] = [] for x in lspl[2:]: sdict[key].append(x) for key in mdict: rdict[key] = gv.gvar(mdict[key],sdict[key]) return rdict
def test_simulation(self): """ CorrFitter.simulated_data_iter """ models = [ self.mkcorr(a="a", b="a", dE="dE", tp=None) ] fitter = self.dofit(models) data = self.data diter = gv.BufferDict() k = list(data.keys())[0] # make n config dataset corresponding to data n = 100 diter = gv.raniter( g = gv.gvar(gv.mean(self.data[k]), gv.evalcov(self.data[k]) * n), n = n ) dataset = gv.dataset.Dataset() for d in diter: dataset.append(k, d) pexact = fitter.fit.pmean covexact = gv.evalcov(gv.dataset.avg_data(dataset)[k]) for sdata in fitter.simulated_data_iter(n=2, dataset=dataset): sfit = fitter.lsqfit( data=sdata, prior=self.prior, p0=pexact, print_fit=False ) diff = dict() for i in ['a', 'logdE']: diff[i] = sfit.p[i][0] - pexact[i][0] c2 = gv.chi2(diff) self.assertLess(c2/c2.dof, 15.) self.assert_arraysclose(gv.evalcov(sdata[k]), covexact)
def test_nopdf(self): " integrator(f ... nopdf=True) and pdf(p) " xarray = gv.gvar([5., 3.], [[4., 1.9], [1.9, 1.]]) xdict = gv.BufferDict([(0, 1), (1, 1)]) xdict = gv.BufferDict(xdict, buf=xarray) pdf = PDFIntegrator(xarray).pdf def farray(x): if hasattr(x, 'keys'): x = x.buf prob = pdf(x) return [x[0] * prob, x[0] ** 2 * prob, prob] def fdict(x): if hasattr(x, 'keys'): x = x.buf prob = pdf(x) return gv.BufferDict([(0, x[0] * prob), (1, x[0] ** 2 * prob), (3, prob)]) for x in [xarray, xdict]: x[0] -= 0.1 * x[0].sdev x[1] += 0.1 * x[1].sdev for f in [farray, fdict]: integ = PDFIntegrator(x) integ(f, neval=1000, nitn=5) r = integ(f, neval=1000, nitn=5, nopdf=True, adapt=False) rmean = r[0] rsdev = np.sqrt(r[1] - rmean ** 2) self.assertTrue(abs(rmean.mean - 5.) < 5. * rmean.sdev) self.assertTrue(abs(rsdev.mean - 2.) < 5. * rsdev.sdev)
def main(): # pendulum data exhibits experimental error in ability to measure theta t = gv.gvar([ '0.10(1)', '0.20(1)', '0.30(1)', '0.40(1)', '0.50(1)', '0.60(1)', '0.70(1)', '0.80(1)', '0.90(1)', '1.00(1)' ]) theta = gv.gvar([ '1.477(79)', '0.791(79)', '-0.046(79)', '-0.852(79)', '-1.523(79)', '-1.647(79)', '-1.216(79)', '-0.810(79)', '0.185(79)', '0.832(79)' ]) for t_n, theta_n in zip(t, theta): print("{} {:>10}".format(t_n.fmt(2), theta_n.fmt(3))) # prior: assume experimental error in ability to specify theta(0) prior = gv.BufferDict() prior['g/l'] = gv.gvar('40(20)') prior['theta(0)'] = gv.gvar('1.571(50)') prior['t'] = t # fit function: use class Pendulum object to integrate pendulum motion def fitfcn(p, t=None): if t is None: t = p['t'] pendulum = Pendulum(p['g/l']) return pendulum(p['theta(0)'], t) # do the fit and print results fit = lsqfit.nonlinear_fit(data=theta, prior=prior, fcn=fitfcn) sys.stdout = tee.tee(STDOUT, open('case-pendulum.out', 'w')) print(fit.format(maxline=True)) sys.stdout = STDOUT print('fit/exact for (g/l) =', fit.p['g/l'] / (2*np.pi) ** 2) print('fit/exact for theta(0) =', fit.p['theta(0)'] / (np.pi / 2.)) if MAKE_PLOT: # make figure (saved to file pendulum.pdf) plt.figure(figsize=(4,3)) # start plot with data plt.errorbar( x=gv.mean(t), xerr=gv.sdev(t), y=gv.mean(theta), yerr=gv.sdev(theta), fmt='k.', ) # use best-fit function to add smooth curve for 100 points t = np.linspace(0., 1.1, 100) th = fitfcn(fit.p, t) show_plot(t, th)
def main(): gv.ranseed([2009,2010,2011,2012,2013]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout for nexp in range(3,6): prior = make_prior(nexp,x) fit = lsqfit.nonlinear_fit(data=y,fcn=f,prior=prior,p0=p0) # ,svdcut=SVDCUT) if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) fit.check_roundoff() if nexp == 4: sys.stdout = tee.tee(sys.stdout,open("eg2.out","w")) print '************************************* nexp =',nexp print fit # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] sys.stdout = sys_stdout print # if DO_BOOTSTRAP: Nbs = 10 # number of bootstrap copies outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1]/E[0]) # accumulate results outputs['E2/E0'].append(E[2]/E[0]) outputs['a1/a0'].append(a[1]/a[0]) outputs['a2/a0'].append(a[2]/a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k])) print 'Bootstrap results:' print 'E1/E0 =',outputs['E1/E0'],' E2/E1 =',outputs['E2/E0'] print 'a1/a0 =',outputs['a1/a0'],' a2/a0 =',outputs['a2/a0'] print 'E1 =',outputs['E1'],' a1 =',outputs['a1'] if DO_PLOT: print fit.format(100) # print the fit results import pylab as pp from gvar import mean,sdev fity = f(x,fit.pmean) ratio = y/fity pp.xlim(0,21) pp.xlabel('x') pp.ylabel('y/f(x,p)') pp.errorbar(x=gv.mean(x),y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob') pp.plot([0.0,21.0],[1.0,1.0]) pp.show()
def test_ravgarray_unwgtd(self): " unweighted RAvgArray " # if not have_gvar: # return mean = np.random.uniform(-10., 10., (2,)) cov = np.array([[1., 0.5], [0.5, 2.]]) / 10. N = 30 x = gv.gvar(mean, cov) r = gv.raniter(x, N) ravg = RAvgArray((1, 2), weighted=False) for ri in r: ravg.add([gv.gvar(ri, cov)]) np_assert_allclose(gv.evalcov(ravg.flat), cov / N) for i in range(2): self.assertLess(abs(mean[i] - ravg[0, i].mean), 5 * ravg[0, i].sdev) self.assertEqual(ravg.dof, 2 * N - 2) self.assertGreater(ravg.Q, 1e-3)
def test_ravg_wgtd(self): " weighted RAvg " # if not have_gvar: # return mean = np.random.uniform(-10., 10.) xbig = gv.gvar(mean, 1.) xsmall = gv.gvar(mean, 0.1) ravg = RAvg() N = 30 for i in range(N): ravg.add(gv.gvar(xbig(), xbig.sdev)) ravg.add(gv.gvar(xsmall(), xsmall.sdev)) np_assert_allclose( ravg.sdev, 1/ (N * ( 1. / xbig.var + 1. / xsmall.var)) ** 0.5 ) self.assertLess(abs(ravg.mean - mean), 5 * ravg.sdev) self.assertGreater(ravg.Q, 1e-3) self.assertEqual(ravg.dof, 2 * N - 1)
def prior_analysis(): x, y = make_data() # loose prior sys.stdout = tee.tee(STDOUT, open(OUTDIR+'eg-appendix1d.out', 'w')) prior = gv.gvar(91 * ['0(3)']) # prior for the fit fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior, fcn=f) print fit.format(maxline=True) # really loose prior sys.stdout = tee.tee(STDOUT, open(OUTDIR+'eg-appendix1h.out', 'w')) prior = gv.gvar(91 * ['0(20)']) # prior for the fit fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior, fcn=f) print fit.format(maxline=True) make_plot(x, y, fit, xmax=0.96) # tight prior sys.stdout = tee.tee(STDOUT, open(OUTDIR+'eg-appendix1e.out', 'w')) prior = gv.gvar(91 * ['0.0(3)']) # prior for the fit fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior, fcn=f) print fit.format(maxline=True)
def print_model(self,par,outfile): """print the fit data and model and the difference""" fit = self.results names = ["2pt"+self.child.name,"2pt"+self.parent.name] [names.append("3pt"+self.name+"T"+str(T)) for T in self.Texts] print(names) ofile = open(outfile,'a') ofile.write("{:5s} models, {:d}+{:d} fit, {:d} to T-{:d} window "+strftime("%H:%M:%S")+"\n".format(self.name,self.nexp,self.nexp,self.child.tmin,self.child.tmax)) for name in names: t,g,dg,gth,dgth = self.fitter.collect_fitresults()[name] ofile.write( " t | {:<11s} theory | sigma_data sigma_th \n".format(name) ) for it in range(0,len(t)): data = gvar(g[it],dg[it]) model = gvar(gth[it],dgth[it]) diff1 = (data.mean-model.mean)/data.sdev diff2 = (data.mean-model.mean)/model.sdev ofile.write( " {:3d} | {:<20s} {:<20s} | {:+6.3f} {:+6.3f} \n".format( t[it],data.fmt(),model.fmt(),diff1,diff2) )
def main(): l = gv.gvar(0.25, 0.0005) # length of pendulum theta_max = gv.gvar(np.pi / 6, 0.025) # max angle of swing y = make_pendulum(theta_max, l) # y(t) = [theta(t), d/dt theta(t)] T = find_period(y, Tapprox=1.0) print('period T = {} sec'.format(T)) fmt = 'uncertainty = {:.2f} min/day\n' print(fmt.format((T.sdev / T.mean) * 60. * 24.)) # error budget for T inputs = dict(l=l, theta_max=theta_max) outputs = {'T':T} print(gv.fmt_errorbudget(outputs=outputs, inputs=inputs)) # check errors in T using a simulation Tlist = [] for i in range(200): y = make_pendulum(theta_max(), l()) Tlist.append(find_period(y, Tapprox=1.0)) print('period T = {:.4f} +- {:.4f}'.format(np.mean(Tlist), np.std(Tlist)))
matplotlib.use('Agg') import os import sys import lsqfit from corrfitter import Corr2, Corr3, CorrFitter import numpy as np from gvar import log, exp, evalcov import gvar as gv import math from math import exp as num_exp import matplotlib.pyplot as plt import g2tools as g2 lsqfit.LSQFit.fmt_parameter = '%8.4f +- %8.4f' w0 = gv.gvar('0.1715(9)') #w0overa = gv.gvar('1.1367(5)') # very coarse ensemble #ZV = gv.gvar('0.9837(20)') # vc w0overa = gv.gvar('1.4149(6)') # coarse ensemble ZV = gv.gvar('0.99220(40)') # coarse - at strange mass ZVqed = gv.gvar('0.999544(14)') * ZV # vc/c? - where are these listed hbarc = 0.197326968 a = (w0 / w0overa) / hbarc # in units of (GeV)^-1 print("lattice spacing: ", (w0 / w0overa))
}, 'sumintegx', axes=2) #### GENERATE FAKE DATA #### prior = gp.predfromdata({ 'suminteg': 1, 'sumintegx': 1, }, ['data', 'xdata']) priorsample = next(gvar.raniter(prior)) datamean = priorsample['data'] dataerr = np.full_like(datamean, 1) datamean = datamean + dataerr * np.random.randn(*dataerr.shape) data = gvar.gvar(datamean, dataerr) # check the integral is one with trapezoid rule x = xdata['x'] y = priorsample['xdata'] checksum = np.sum((y[:, 1:] + y[:, :-1]) / 2 * np.diff(x, axis=1)) print('sum_i int dx f_i(x) =', checksum) checksum = np.sum(((y * x)[:, 1:] + (y * x)[:, :-1]) / 2 * np.diff(x, axis=1)) print('sum_i int dx x f_i(x) =', checksum) #### FIT #### pred = gp.predfromdata({ 'suminteg': 1, 'sumintegx': 1, 'data': data,
def test_pickle_gvar(self): b = BufferDict(dict(a=gv.gvar(1, 2), b=[gv.gvar(3, 4), gv.gvar(5, 6)])) sb = pckl.dumps(b) c = pckl.loads(sb) for k in b: self.assert_gvclose(b[k], c[k], rtol=1e-6)
def m_e_inv_NM(x): k1 = gv.gvar ('0.89 (19)') return 1+ x*k1
array[t][62] = my_data[1][20] array[t][63] = my_data[3][20] #array[t][5] = 10.**(my_data[2][16]) #array[t][7] = (10.**(my_data[2][17]) / (10**(my_data[2][18]) + 10**(my_data[2][17]))) * 100. #array[t][2] = np.log(10.)* 10.**(my_data[2][17]) * (my_data[3][17] - my_data[1][17]) / 2. # Torus #array[t][4] = np.log(10.)* 10.**(my_data[2][18]) * (my_data[3][18] - my_data[1][18]) / 2. # SF Region Dust #array[t][6] = np.log(10.)* 10.**(my_data[2][16]) * (my_data[3][16] - my_data[1][16]) / 2. # Galaxy #array[t][8] = np.sqrt(array[t][2]**2. * (array[t][3]/(array[t][1] + array[t][3])**2.)**2. + array[t][4] **2. * (array[t][1]/(array[t][1] + array[t][3])**2.)**2.) #print array[t][8] #print #print array[t][1], array[t][2] x = gvar.gvar(my_data[2][17], (my_data[3][17] - my_data[1][17]) / 2.) y = gvar.gvar(my_data[2][18], (my_data[3][18] - my_data[1][18]) / 2.) z = 1. / (1. + 10.**(y - x)) array[t][64] = z.mean * 100. array[t][65] = z.sdev * 100. print z.sdev/z.mean #array[t][7] = 10.**(my_data[2][13]) # L_IR #array[t][8] = np.log(10.)* 10.**(my_data[2][13]) * (my_data[3][13] - my_data[1][13]) / 2. # STD L_IR #array[t][9] = my_data[2][19] #array[t][10] = (my_data[3][19] - my_data[1][19]) / 2.
def T_SM_eff (x): k1,k2 = gv.gvar([6.247360336420315, -16.916984419115057], [[0.12573594,-0.49209029], [-0.49209029, 2.58439037]]) m_e_inv = 1+ x*k1 + x**2 *k2 return T_SM(x)*m_e_inv
fpi_list = [] fpi_bs_list = [] pion_list = [] pion_bs_list = [] kaon_list = [] kaon_bs_list = [] for e in ens: print e #print '%s_mpi2' %e, ',', '+-', ',', '%s_mkmpi2' %e, ',', '+-', ',', '%s_L5' %e, ',', '+-' # read a_fm sql_cmd = "SELECT a_fm FROM callat_corr.hisq_ensembles WHERE tag='%s';" % ( e) psql.cur.execute(sql_cmd) a_fm = psql.cur.fetchone()[0] a_gv = gv.gvar( float(a_fm[:-4]), 10**(-1.0 * int(len(a_fm[:-4].split('.')[1]))) * float(a_fm[-3:-1])) # read mres # mres meta mmeta = cmeta[e]['mres'] # pion meta pmeta = cmeta[e]['pion'] # kaon meta kmeta = cmeta[e]['kaon'] # write output prior['a_%s' % (e)] = a_gv for ml in dataset[e]['ml']: # get ml mres mmetal = mmeta[ml] sql_cmd = "SELECT id, result->>'mres' FROM callat_proj.jmu WHERE corr1_id=%s AND corr2_id=%s AND tmin=%s AND tmax=%s;" % ( mmetal['meta_id']['mp'], mmetal['meta_id']['pp'],
covariance = np.cov(B) # Covariance matrix of B # print covariance correlation = np.corrcoef(B) #print correlation # Correlation matrix # print correlation.shape #Ybar = [0 for x in range(1,4)] #Ybar = [6.97, 6.38, 4.68] with open("meanA.dat") as f: mean = f.readlines() mean = [float(x.strip('\n')) for x in mean] #print mean Y = gv.gvar(mean, covariance) #print Y #X = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]) #X = np.array([3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]) X = np.array([3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]) #X = np.array([3., 4., 5., 6., 7., 8.]) #X = np.array([1., 2., 3., 4., 5., 6.]) ''' Define the function required for fit ''' def f(x, p): a = p['a'] # ****** # b = p['b'] E = p['E']
def T_SM_eff_1 (x): k1 = gv.gvar ('3.33 (18)') m_e_inv = 1+ x*k1 return T_SM(x)*m_e_inv
import lsqfitgp as lgp from matplotlib import pyplot as plt import numpy as np import gvar data_deriv = 1 time = np.linspace(-5, 5, 10) x = np.empty(len(time), dtype=[('time', float), ('label', int)]) x['time'] = time x['label'] = 1 data_error = 0.05 data_mean = np.cos(time) data_mean += data_error * np.random.randn(*data_mean.shape) data = gvar.gvar(data_mean, np.full_like(data_mean, data_error)) label_scale = 5 corr = lgp.ExpQuad(scale=label_scale)(0, 1) print(f'corr = {corr:.3g}') def makegp(params): kernel_time = lgp.ExpQuad(scale=params['time_scale'], dim='time') kernel_label = lgp.ExpQuad(scale=label_scale, dim='label') gp = lgp.GP(kernel_time * kernel_label) gp.addx(x, 'data', deriv=(data_deriv, 'time')) gp.addx(np.array([(0, 0)], dtype=x.dtype), 'fixed_point') return gp
def main(): x, y = make_data() # make fit data # y = gv.gvar(gv.mean(y), 0.75**2 * gv.evalcov(y)) p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout sys.stdout = tee.tee(sys.stdout, open("eg1.out", "w")) for nexp in range(1, 7): prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=p0) if fit.chi2 / fit.dof < 1.: p0 = fit.pmean # starting point for next fit (opt.) print '************************************* nexp =', nexp print fit.format() # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] if nexp > 2: print 'E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0] print 'a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0] print # error budget outputs = { 'E1/E0': E[1] / E[0], 'E2/E0': E[2] / E[0], 'a1/a0': a[1] / a[0], 'a2/a0': a[2] / a[0] } inputs = {'E': fit.prior['E'], 'a': fit.prior['a'], 'y': y} inputs = collections.OrderedDict() inputs['a'] = fit.prior['a'] inputs['E'] = fit.prior['E'] inputs['y'] = fit.data[1] print '================= Error Budget Analysis' print fit.fmt_values(outputs) print fit.fmt_errorbudget(outputs, inputs) sys.stdout = sys_stdout # print(gv.gvar(str(a[1])) / gv.gvar(str(a[0])) ) # print(gv.evalcorr([fit.p['a'][1], fit.p['E'][1]])) # print(fit.format(True)) # redo fit with 4 parameters since that is enough prior = make_prior(4) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=fit.pmean) sys.stdout = tee.tee(sys_stdout, open("eg1a.out", "w")) print '--------------------- original fit' print fit.format() E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0] print 'a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0] print # extra data 1 print '\n--------------------- new fit to extra information' def ratio(p): return p['a'][1] / p['a'][0] newfit = lsqfit.nonlinear_fit(data=gv.gvar(1, 1e-5), fcn=ratio, prior=fit.p) print(newfit.format()) E = newfit.p['E'] a = newfit.p['a'] print 'E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0] print 'a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0] if DO_PLOT: import matplotlib.pyplot as plt ratio = y / fit.fcn(x, fit.pmean) plt.xlim(4, 15) plt.ylim(0.95, 1.05) plt.xlabel('x') plt.ylabel('y / f(x,p)') plt.yticks([0.96, 0.98, 1.00, 1.02, 1.04], ['0.96', '0.98', '1.00', '1.02', '1.04']) plt.errorbar(x=x, y=gv.mean(ratio), yerr=gv.sdev(ratio), fmt='ob') plt.plot([4.0, 21.0], [1.0, 1.0], 'b:') plt.savefig('eg1.png', bbox_inches='tight') plt.show() # alternate method for extra data sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w")) fit.p['a1/a0'] = fit.p['a'][1] / fit.p['a'][0] new_data = {'a1/a0': gv.gvar(1, 1e-5)} new_p = lsqfit.wavg([fit.p, new_data]) print 'chi2/dof = %.2f\n' % (new_p.chi2 / new_p.dof) print 'E:', new_p['E'][:4] print 'a:', new_p['a'][:4] print 'a1/a0:', new_p['a1/a0'] if DO_BAYES: # Bayesian Fit gv.ranseed([123]) prior = make_prior(4) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=f, prior=prior, p0=fit.pmean) sys.stdout = tee.tee(sys_stdout, open("eg1c.out", "w")) # print fit expval = lsqfit.BayesIntegrator(fit, limit=10.) # adapt integrator to PDF expval(neval=40000, nitn=10) # calculate expectation value of function g(p) fit_hist = gv.PDFHistogram(fit.p['E'][0]) def g(p): parameters = [p['a'][0], p['E'][0]] return dict( mean=parameters, outer=np.outer(parameters, parameters), hist=fit_hist.count(p['E'][0]), ) r = expval(g, neval=40000, nitn=10, adapt=False) # print results print r.summary() means = r['mean'] cov = r['outer'] - np.outer(r['mean'], r['mean']) print 'Results from Bayesian Integration:' print 'a0: mean =', means[0], ' sdev =', cov[0, 0]**0.5 print 'E0: mean =', means[1], ' sdev =', cov[1, 1]**0.5 print 'covariance from Bayesian integral =', np.array2string( cov, prefix=36 * ' ') print print 'Results from Least-Squares Fit:' print 'a0: mean =', fit.p['a'][0].mean, ' sdev =', fit.p['a'][0].sdev print 'E0: mean =', fit.p['E'][0].mean, ' sdev =', fit.p['E'][0].sdev print 'covariance from least-squares fit =', np.array2string( gv.evalcov([fit.p['a'][0], fit.p['E'][0]]), prefix=36 * ' ', precision=3) sys.stdout = sys_stdout # make histogram of E[0] probabilty plt = fit_hist.make_plot(r['hist']) plt.xlabel('$E_0$') plt.ylabel('probability') plt.savefig('eg1c.png', bbox_inches='tight') # plt.show() if DO_BOOTSTRAP: Nbs = 40 # number of bootstrap copies outputs = { 'E1/E0': [], 'E2/E0': [], 'a1/a0': [], 'a2/a0': [], 'E1': [], 'a1': [] } # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1] / E[0]) # accumulate results outputs['E2/E0'].append(E[2] / E[0]) outputs['a1/a0'].append(a[1] / a[0]) outputs['a2/a0'].append(a[2] / a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]), np.std(outputs[k])) print 'Bootstrap results:' print 'E1/E0 =', outputs['E1/E0'], ' E2/E1 =', outputs['E2/E0'] print 'a1/a0 =', outputs['a1/a0'], ' a2/a0 =', outputs['a2/a0'] print 'E1 =', outputs['E1'], ' a1 =', outputs['a1']
def make_fake_data(x, p, f): f = f(x, p) df = gv.gvar(len(f) * ['0.000(1)']) return np.array([fi + dfi() / 2. + dfi for fi, dfi in zip(f, df)])
def pri(self): priors = gv.BufferDict() priors['E0'] = gv.gvar(1, 10) priors['log(dE1)'] = gv.gvar(0, 10) priors['z0'] = gv.gvar(1, 10) priors['z1'] = gv.gvar(1, 10) priors['pdf_re'] = gv.gvar(1, 10) priors['pdf_im'] = gv.gvar(1, 10) priors['re_r1'] = gv.gvar(1, 10) priors['re_r2'] = gv.gvar(1, 10) priors['im_r1'] = gv.gvar(1, 10) priors['im_r2'] = gv.gvar(1, 10) priors['re_f1'] = gv.gvar(1, 10) priors['re_f2'] = gv.gvar(1, 10) priors['im_f1'] = gv.gvar(1, 10) priors['im_f2'] = gv.gvar(1, 10) return priors
val['fh_re'].append(self.fh_re_fcn(t1, p)) t1 = x['fh_im'][idx] val['fh_im'].append(self.fh_im_fcn(t1, p)) return val if __name__ == '__main__': pass fit = FIT(fit_2pt=True, fit_ratio=False, fit_fh=True) #! 2pt+fh joint fit ## 2pt ## pt2_t = np.arange(1, 10) pt2 = [gv.gvar(1, 0.5) for i in range(1, 10)] #! 输入数据 ## ratio ## ra_tseq = [] ra_t = [] ra_re = [] ra_im = [] for tseq in range(10): for t in range(1, tseq): # 插入流从 1 到 tseq-1 ra_tseq.append(tseq) ra_t.append(t) val_re = gv.gvar(1, 0.5) #! 输入数据 val_im = gv.gvar(1, 0.5) #! 输入数据 ra_re.append(val_re) ra_im.append(val_im)
def build_prior(nexp): prior = gv.BufferDict() prior.add('log(a1:vec:s)', [log(gv.gvar(1, 1)) for i in range(nexp)]) prior['log(a1:vec:s)'][0] = log(gv.gvar(0.5, 1)) prior.add('log(ao:vec:s)', [log(gv.gvar(1, 1)) for i in range(nexp)]) prior['log(ao:vec:s)'][0] = log(gv.gvar(0.5, 1)) #prior.add('as1:etac',[gv.gvar(0.001,0.01) for i in range(nexp)]) prior.add('log(dE:vec:s)', [log(gv.gvar(2, 2)) for i in range(nexp)]) prior['log(dE:vec:s)'][0] = log(gv.gvar(1, 1)) prior.add('log(dEo:vec:s)', [log(gv.gvar(1, 1)) for i in range(nexp)]) prior['log(dEo:vec:s)'][0] = log(gv.gvar(1, 1)) #prior['logdE:etac'][0] = log(gv.gvar(0.1,0.05)) prior.add('log(a1:vec:qed:s)', [log(gv.gvar(1, 1)) for i in range(nexp)]) prior['log(a1:vec:qed:s)'][0] = log(gv.gvar(0.5, 1)) prior.add('log(ao:vec:qed:s)', [log(gv.gvar(1, 1)) for i in range(nexp)]) prior['log(ao:vec:qed:s)'][0] = log(gv.gvar(0.5, 1)) #prior.add('as1:etac',[gv.gvar(0.001,0.01) for i in range(nexp)]) prior.add('log(dE:vec:qed:s)', [log(gv.gvar(2, 2)) for i in range(nexp)]) prior['log(dE:vec:qed:s)'][0] = log(gv.gvar(1, 1)) prior.add('log(dEo:vec:qed:s)', [log(gv.gvar(1, 1)) for i in range(nexp)]) prior['log(dEo:vec:qed:s)'][0] = log(gv.gvar(1, 1)) return prior
def format(self, maxline=0, pstyle='v', nline=None, extend=True): """ Formats fit output details into a string for printing. The output tabulates the ``chi**2`` per degree of freedom of the fit (``chi2/dof``), the number of degrees of freedom, the ``Q`` value of the fit (ie, p-value), and the logarithm of the Gaussian Bayes Factor for the fit (``logGBF``). At the end it lists the SVD cut, the number of eigenmodes modified by the SVD cut, the tolerances used in the fit, and the time in seconds needed to do the fit. The tolerance used to terminate the fit is marked with an asterisk. It also lists information about the fitter used if it is other than the standard choice. Optionally, ``format`` will also list the best-fit values for the fit parameters together with the prior for each (in ``[]`` on each line). Lines for parameters that deviate from their prior by more than one (prior) standard deviation are marked with asterisks, with the number of asterisks equal to the number of standard deviations (up to five). Lines for parameters designated as linear (see ``linear`` keyword) are marked with a minus sign after their prior. ``format`` can also list all of the data and the corresponding values from the fit, again with asterisks on lines where there is a significant discrepancy. Args: maxline (int or bool): Maximum number of data points for which fit results and input data are tabulated. ``maxline<0`` implies that only ``chi2``, ``Q``, ``logGBF``, and ``itns`` are tabulated; no parameter values are included. Setting ``maxline=True`` prints all data points; setting it equal ``None`` or ``False`` is the same as setting it equal to ``-1``. Default is ``maxline=0``. pstyle (str or None): Style used for parameter list. Supported values are 'vv' for very verbose, 'v' for verbose, and 'm' for minimal. When 'm' is set, only parameters whose values differ from their prior values are listed. Setting ``pstyle=None`` implies no parameters are listed. extend (bool): If ``True``, extend the parameter list to include values derived from log-normal or other non-Gaussian parameters. So values for fit parameter ``p['log(a)']``, for example, are listed together with values ``p['a']`` for the exponential of the fit parameter. Setting ``extend=False`` means that only the value for ``p['log(a)']`` is listed. Default is ``True``. Returns: String containing detailed information about fit. """ # unpack arguments if nline is not None and maxline == 0: maxline = nline # for legacy code (old name) if maxline is True: # print all data maxline = sys.maxsize if maxline is False or maxline is None: maxline = -1 if pstyle is not None: if pstyle[:2] == 'vv': pstyle = 'vv' elif pstyle[:1] == 'v': pstyle = 'v' elif pstyle[:1] == 'm': pstyle = 'm' else: raise ValueError("Invalid pstyle: " + str(pstyle)) def collect(v1, v2, style='v', stride=1, extend=False): """ Collect data from v1 and v2 into table. Returns list of [label,v1fmt,v2fmt]s for each entry in v1 and v2. Here v1fmt and v2fmt are strings representing entries in v1 and v2, while label is assembled from the key/index of the entry. """ def nstar(v1, v2): sdev = max(v1.sdev, v2.sdev) if sdev == 0: nstar = 5 else: try: nstar = int(abs(v1.mean - v2.mean) / sdev) except: return ' ! (Error)' if nstar > 5: nstar = 5 elif nstar < 1: nstar = 0 return ' ' + nstar * '*' ct = 0 ans = [] width = [0, 0, 0] stars = [] if v1.shape is None: # BufferDict keys = list(v1.keys()) if extend: v1 = gv.BufferDict(v1) v2 = gv.BufferDict(v2) ekeys = v1.extension_keys() if len(ekeys) > 0: first_ekey = ekeys[0] keys += ekeys else: extend = False for k in keys: if extend and k == first_ekey: # marker indicating beginning of extra keys stars.append(None) ans.append(None) ktag = str(k) if np.shape(v1[k]) == (): if ct % stride != 0: ct += 1 continue if style in ['v', 'm']: v1fmt = v1[k].fmt(sep=' ') v2fmt = v2[k].fmt(sep=' ') else: v1fmt = v1[k].fmt(-1) v2fmt = v2[k].fmt(-1) if style == 'm' and v1fmt == v2fmt: ct += 1 continue stars.append(nstar(v1[k], v2[k])) ans.append([ktag, v1fmt, v2fmt]) w = [len(ai) for ai in ans[-1]] for i, (wo, wn) in enumerate(zip(width, w)): if wn > wo: width[i] = wn ct += 1 else: ktag = ktag + " " for i in np.ndindex(v1[k].shape): if ct % stride != 0: ct += 1 continue ifmt = (len(i) * "%d,")[:-1] % i if style in ['v', 'm']: v1fmt = v1[k][i].fmt(sep=' ') v2fmt = v2[k][i].fmt(sep=' ') else: v1fmt = v1[k][i].fmt(-1) v2fmt = v2[k][i].fmt(-1) if style == 'm' and v1fmt == v2fmt: ct += 1 continue stars.append(nstar(v1[k][i], v2[k][i])) ans.append([ktag + ifmt, v1fmt, v2fmt]) w = [len(ai) for ai in ans[-1]] for i, (wo, wn) in enumerate(zip(width, w)): if wn > wo: width[i] = wn ct += 1 ktag = "" else: # np array v2 = np.asarray(v2) for k in np.ndindex(v1.shape): # convert array(GVar) to GVar v1k = v1[k] if hasattr(v1[k], 'fmt') else v1[k].flat[0] v2k = v2[k] if hasattr(v2[k], 'fmt') else v2[k].flat[0] if ct % stride != 0: ct += 1 continue kfmt = (len(k) * "%d,")[:-1] % k if style in ['v', 'm']: v1fmt = v1k.fmt(sep=' ') v2fmt = v2k.fmt(sep=' ') else: v1fmt = v1k.fmt(-1) v2fmt = v2k.fmt(-1) if style == 'm' and v1fmt == v2fmt: ct += 1 continue stars.append(nstar(v1k, v2k)) ### ans.append([kfmt, v1fmt, v2fmt]) w = [len(ai) for ai in ans[-1]] for i, (wo, wn) in enumerate(zip(width, w)): if wn > wo: width[i] = wn ct += 1 collect.width = width collect.stars = stars return ans # build header # Bayesian statistics dof = self.dof if dof > 0: chi2_dof = self.chi2_aug / self.dof else: chi2_dof = self.chi2_aug try: Q = 'Q = %.2g' % self.Q except: Q = '' try: logGBF = 'logGBF = %.5g' % self.logGBF except: logGBF = '' # frequentist statistics dof_freq = self.ndata - self.nparams if dof_freq > 0: chi2_dof_freq = self.chi2 / dof_freq else: chi2_dof_freq = self.chi2 if self.prior is None: descr = ' (no prior)' else: descr = '' # table = ('Least Square Fit%s:\n chi2/dof [dof] = %.2g [%d] %s' # ' %s\n' % (descr, chi2_dof, dof, Q, logGBF)) table = ( f"{'#' * 80}\n" f"Least Square Fit{descr}:\n" "Bayesian summary:\n" " Counting using dof = ndata\n" " Counting using the augmented chi2 function\n" f" chi2_aug/dof [dof] = {chi2_dof:.2f} [{dof}] {Q} {logGBF}\n" "Frequentist summary:\n" " Counting dof = (ndata - nparams)\n" " Counting using the correlated chi2 function only\n" f" chi2/dof [dof] = {chi2_dof_freq:.2f} [{dof_freq}]" f" p={self.p_value:.2f}\n") if maxline < 0: return table # create parameter table if pstyle is not None: table = table + '\nParameters:\n' prior = self.prior if prior is None: if self.p0.shape is None: prior = gv.BufferDict(self.p0, buf=self.p0.flatten() + gv.gvar(0, float('inf'))) else: prior = self.p0 + gv.gvar(0, float('inf')) data = collect(self.palt, prior, style=pstyle, stride=1, extend=extend) w1, w2, w3 = collect.width fst = "%%%ds%s%%%ds%s[ %%%ds ]" % (max(w1, 15), 3 * ' ', max( w2, 10), int(max(w2, 10) / 2) * ' ', max(w3, 10)) if len(self.linear) > 0: spacer = [' ', '-'] else: spacer = ['', ''] for i, (di, stars) in enumerate(zip(data, collect.stars)): if di is None: # marker for boundary between true fit parameters and derived parameters ndashes = (max(w1, 15) + 3 + max(w2, 10) + int(max(w2, 10) / 2) + 4 + max(w3, 10)) table += ndashes * '-' + '\n' continue table += ((fst % tuple(di)) + spacer[i in self.linear] + stars + '\n') # settings settings = "\nSettings:" # add_svdnoise named arg changed to noise in lsqfit 11.6. try: _noise = self.add_svdnoise except AttributeError: _noise = self.noise if not _noise or self.svdcut is None or self.svdcut < 0: settings += "\n svdcut/n = {svdcut:.2g}/{svdn}".format( svdcut=self.svdcut if self.svdcut is not None else 0.0, svdn=self.svdn) else: settings += "\n svdcut/n = {svdcut:.2g}/{svdn}*".format( svdcut=self.svdcut, svdn=self.svdn) criterion = self.stopping_criterion try: fmtstr = [ " tol = ({:.2g},{:.2g},{:.2g})", " tol = ({:.2g}*,{:.2g},{:.2g})", " tol = ({:.2g},{:.2g}*,{:.2g})", " tol = ({:.2g},{:.2g},{:.2g}*)", ][criterion if criterion is not None else 0] settings += fmtstr.format(*self.tol) except: pass if criterion is not None and criterion == 0: settings += " (itns/time = {itns}*/{time:.1f})".format( itns=self.nit, time=self.time) else: settings += " (itns/time = {itns}/{time:.1f})".format( itns=self.nit, time=self.time) default_line = '\n fitter = gsl_multifit methods = lm/more/qr\n' newline = "\n fitter = {} {}\n".format(self.fitter, self.description) if newline != default_line: settings += newline else: settings += '\n' if maxline <= 0 or self.data is None: return table + settings # create table comparing fit results to data ny = self.y.size stride = 1 if maxline >= ny else (int(ny / maxline) + 1) if hasattr(self, 'fcn_p'): f = self.fcn_p elif self.x is False: f = self.fcn(self.p) else: f = self.fcn(self.x, self.p) if hasattr(f, 'keys'): f = gv.BufferDict(f) else: f = np.array(f) data = collect(self.y, f, style='v', stride=stride, extend=False) w1, w2, w3 = collect.width clabels = ("key", "y[key]", "f(p)[key]") if self.y.shape is not None and self.x is not False and self.x is not None: # use x[k] to label lines in table? try: x = np.array(self.x) xlist = [] ct = 0 for k in np.ndindex(x.shape): if ct % stride != 0: ct += 1 continue xlist.append("%g" % x[k]) assert len(xlist) == len(data) except: xlist = None if xlist is not None: for i, (d1, d2, d3) in enumerate(data): data[i] = (xlist[i], d2, d3) clabels = ("x[k]", "y[k]", "f(x[k],p)") w1, w2, w3 = max(9, w1 + 4), max(9, w2 + 4), max(9, w3 + 4) table += "\nFit:\n" fst = "%%%ds%%%ds%%%ds\n" % (w1, w2, w3) table += fst % clabels table += (w1 + w2 + w3) * "-" + "\n" for di, stars in zip(data, collect.stars): table += fst[:-1] % tuple(di) + stars + '\n' return table + settings
beta=3 return f_pot_SM(x,p) + lam * xt**5 * np.exp( -17* (x/0.16)**(beta/3) ) def f_SM(x,p): return T_SM(x) + f_pot_SM_c(x,p) # prior_e_SM = {} # Drischler prior # prior_e_SM['n_sat'] = gv.gvar(0.171, 0.016) # prior_e_SM['E_sat'] = gv.gvar(-15.16, 1.24) # prior_e_SM['K_sat'] = gv.gvar(214, 22) # prior_e_SM['Q_sat'] = gv.gvar(-139, 104) # prior_e_SM['Z_sat'] = gv.gvar(1306, 214) prior_e_SM = {} # Jerome priors prior_e_SM['n_sat'] = gv.gvar(0.16, 0.01) prior_e_SM['E_sat'] = gv.gvar(-15.5,1.0) prior_e_SM['K_sat'] = gv.gvar(230, 20) prior_e_SM['Q_sat'] = gv.gvar(-300, 400) prior_e_SM['Z_sat'] = gv.gvar(1300, 500) x = td y = te_SM_av fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior_e_SM, fcn=f_SM, debug=True,svdcut=ts_SM.svdcut) SM3_par = fit.p
def make_prior(): p = gv.gvar(['0(1)', '0(1)', '0(1)', '0(1)']) p[1] = 20 * p[0] + gv.gvar('0.0(1)') # p[1] correlated with p[0] return p
def T_NM_eff (x): k1,k2 = gv.gvar([2.630969540591772, -11.079806675726502], [[0.01866276, -0.23951577], [ -0.23951577, 3.58448543]]) m_e_inv = 1+ x*k1 + x**2 *k2 return T_NM(x)*m_e_inv
return f_pot_SM(x,p) + lam * xt**5 * np.exp( -17* (x/0.16)**(beta/3) ) def f_SM(x,p): return T_SM(x) + f_pot_SM_c(x,p) # prior_e_SM = {} # Drischler priors # prior_e_SM['n_sat'] = gv.gvar(0.171, 0.016) # prior_e_SM['E_sat'] = gv.gvar(-15.16, 1.24) # prior_e_SM['K_sat'] = gv.gvar(214, 22) # prior_e_SM['Q_sat'] = gv.gvar(-139, 104) # prior_e_SM['Z_sat'] = gv.gvar(1306, 214) # prior_e_SM['b'] = gv.gvar(0,50) prior_e_SM = {} # Jerome priors prior_e_SM['n_sat'] = gv.gvar(0.16, 0.01) prior_e_SM['E_sat'] = gv.gvar(-15.5,1.0) prior_e_SM['K_sat'] = gv.gvar(230, 20) prior_e_SM['Q_sat'] = gv.gvar(-300, 400) prior_e_SM['Z_sat'] = gv.gvar(1300, 500) x = td y = te_SM_av fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior_e_SM, fcn=f_SM, debug=True,svdcut=ts_SM.svdcut) SM3_par = fit.p ############################ Fit for NM ##################################
def make_data(): y = gv.gvar([ '0.5351(54)', '0.6762(67)', '0.9227(91)', '1.3803(131)', '4.0145(399)' ]) x = np.array([0.1, 0.3, 0.5, 0.7, 0.95]) return x, y
def fitargs(z): prior = dict(a=gv.gvar(1.0, z), b=gv.gvar(0.5, z)) return dict(prior=prior, data=self.data, chained=False)
def fitargs(z): dp = z prior = gv.gvar([gv.gvar(0, dp) for i in range(4)]) return dict(prior=prior, fcn=fcn, data=(x,y))
def build_prior(nexp, dErho=gv.gvar('0.476(100)')): """ build prior """ prior = gv.BufferDict() prior.add( 'a:rho', [gv.gvar(0, 0.1), gv.gvar(0, 0.1), gv.gvar(0, 0.1), gv.gvar(0, 0.1)]) # fix it so that the rho terms are opposite sign to the omega prior.add('b:rho', [ -prior['a:rho'][0], prior['a:rho'][1], prior['a:rho'][2], -prior['a:rho'][3] ]) prior.add('log(dE:rho)', [log(gv.gvar(0.5, 0.4)) for i in range(nexp)]) prior['log(dE:rho)'][0] = log(dErho) # rho prior['log(dE:rho)'][1] = log(gv.gvar('0.09(18)')) # omega prior['log(dE:rho)'][2] = log(gv.gvar('0.90(32)')) # excited omega prior['log(dE:rho)'][3] = log(gv.gvar('0.023(30)')) #excited rho prior.add( 'a:rhoo', [gv.gvar(0, 0.1), gv.gvar(0, 0.1), gv.gvar(0, 0.1), gv.gvar(0, 0.1)]) prior.add('b:rhoo', [ -prior['a:rhoo'][0], prior['a:rhoo'][1], prior['a:rhoo'][2], -prior['a:rhoo'][3] ]) prior.add('log(dE:rhoo)', [log(gv.gvar(1, 1)) for i in range(nexp)]) prior['log(dE:rhoo)'][0] = log(gv.gvar(2, 2)) ## return prior
def make_prior(N): """ Create priors for fit parameters. """ prior = gv.BufferDict() # etas metas = gv.gvar('0.4(2)') prior['log(etas:a)'] = gv.log(gv.gvar(N * ['0.3(3)'])) prior['log(etas:dE)'] = gv.log(gv.gvar(N * ['0.5(5)'])) prior['log(etas:dE)'][0] = gv.log(metas) # Ds mDs = gv.gvar('1.2(2)') prior['log(Ds:a)'] = gv.log(gv.gvar(N * ['0.3(3)'])) prior['log(Ds:dE)'] = gv.log(gv.gvar(N * ['0.5(5)'])) prior['log(Ds:dE)'][0] = gv.log(mDs) # Ds -- oscillating part prior['log(Dso:a)'] = gv.log(gv.gvar(N * ['0.1(1)'])) prior['log(Dso:dE)'] = gv.log(gv.gvar(N * ['0.5(5)'])) prior['log(Dso:dE)'][0] = gv.log(mDs + gv.gvar('0.3(3)')) # V prior['Vnn'] = gv.gvar(N * [N * ['0(1)']]) prior['Vno'] = gv.gvar(N * [N * ['0(1)']]) return prior
def m_e_inv_SM(x): k1 = gv.gvar ('3.33 (18)') return 1+ x*k1
def make_prior(nexp): # make priors for fit parameters prior = gv.BufferDict() # Gaussian prior -- dictionary-like prior['a'] = [gv.gvar(0.5, 0.4) for i in range(nexp)] prior['E'] = [gv.gvar(i + 1, 0.4) for i in range(nexp)] return prior
def _correlate(data, **kwargs): """Correlates the data, including correction of covariance matrix.""" mean = gv.mean(gv.dataset.avg_data(data)) cov = correct_covariance(data, **kwargs) return gv.gvar(mean, cov)
def main(): gv.ranseed(4) x = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]) y_samples = [ [ 2.8409, 4.8393, 6.8403, 8.8377, 10.8356, 12.8389, 14.8356, 16.8362, 18.8351, 20.8341 ], [ 2.8639, 4.8612, 6.8597, 8.8559, 10.8537, 12.8525, 14.8498, 16.8487, 18.8460, 20.8447 ], [ 3.1048, 5.1072, 7.1071, 9.1076, 11.1090, 13.1107, 15.1113, 17.1134, 19.1145, 21.1163 ], [ 3.0710, 5.0696, 7.0708, 9.0705, 11.0694, 13.0681, 15.0693, 17.0695, 19.0667, 21.0678 ], [ 3.0241, 5.0223, 7.0198, 9.0204, 11.0191, 13.0193, 15.0198, 17.0163, 19.0154, 21.0155 ], [ 2.9719, 4.9700, 6.9709, 8.9706, 10.9707, 12.9705, 14.9699, 16.9686, 18.9676, 20.9686 ], [ 3.0688, 5.0709, 7.0724, 9.0730, 11.0749, 13.0776, 15.0790, 17.0800, 19.0794, 21.0795 ], [ 3.1471, 5.1468, 7.1452, 9.1451, 11.1429, 13.1445, 15.1450, 17.1435, 19.1425, 21.1432 ], [ 3.0233, 5.0233, 7.0225, 9.0224, 11.0225, 13.0216, 15.0224, 17.0217, 19.0208, 21.0222 ], [ 2.8797, 4.8792, 6.8803, 8.8794, 10.8800, 12.8797, 14.8801, 16.8797, 18.8803, 20.8812 ], [ 3.0388, 5.0407, 7.0409, 9.0439, 11.0443, 13.0459, 15.0455, 17.0479, 19.0493, 21.0505 ], [ 3.1353, 5.1368, 7.1376, 9.1367, 11.1360, 13.1377, 15.1369, 17.1400, 19.1384, 21.1396 ], [ 3.0051, 5.0063, 7.0022, 9.0052, 11.0040, 13.0033, 15.0007, 16.9989, 18.9994, 20.9995 ], [ 3.0221, 5.0197, 7.0193, 9.0183, 11.0179, 13.0184, 15.0164, 17.0177, 19.0159, 21.0155 ], [ 3.0188, 5.0200, 7.0184, 9.0183, 11.0189, 13.0188, 15.0191, 17.0183, 19.0177, 21.0186 ], ] y = gv.dataset.avg_data(y_samples) svd = gv.dataset.svd_diagnosis(y_samples) y = gv.svd(y, svdcut=svd.svdcut) if SHOW_PLOTS: svd.plot_ratio(show=True) def fcn(p): return p['y0'] + p['s'] * x prior = gv.gvar(dict(y0='0(5)', s='0(5)')) fit = lsqfit.nonlinear_fit(data=y, fcn=fcn, prior=prior) print(fit)
def decay(pion_ss_ps_gv, kaon_ss_ps_gv): prior = dict() prior['Z0_s'] = gv.gvar(0.025, 0.01) prior['Z1_s'] = gv.gvar(0.025, 0.035) prior['Z2_s'] = gv.gvar(0.025, 0.035) prior['Z0_p'] = gv.gvar(0.27, 0.15) prior['Z1_p'] = gv.gvar(0.27, 0.35) prior['Z2_p'] = gv.gvar(0.27, 0.35) prior['E0'] = gv.gvar(0.23, 0.2) prior['E1'] = gv.gvar(0.0, 1.0) prior['E2'] = gv.gvar(0.0, 1.0) trange = dict() trange['tmin'] = [6, 6] trange['tmax'] = [20, 20] T = len(pion_ss_ps_gv) fitfcn = c51.fit_function(T=T, nstates=2) fit = c51.fitscript(trange, pion_ss_ps_gv, prior, fitfcn.twopt_fitfcn_ss_ps, sets=2, result_flag='off') print "pion" c51.stability_plot(fit, 'Z0_p') c51.stability_plot(fit, 'E0') ml = 0.0158 ms = 0.0902 mres_pi = gv.gvar(0.0009633, 0.0000065) Z0_p = fit['post'][0]['Z0_p'] E0 = fit['post'][0]['E0'] fpi = Z0_p * np.sqrt(2.) * (2. * ml + 2. * mres_pi) / E0**(3. / 2.) print 'fpi:', fpi print "kaon" prior['Z0_s'] = gv.gvar(0.02, 0.01) prior['Z1_s'] = gv.gvar(0.02, 0.03) prior['Z2_s'] = gv.gvar(0.02, 0.03) prior['Z0_p'] = gv.gvar(0.2, 0.1) prior['Z1_p'] = gv.gvar(0.2, 0.3) prior['Z2_p'] = gv.gvar(0.2, 0.3) prior['E0'] = gv.gvar(0.404, 0.2) prior['E1'] = gv.gvar(0.0, 1.0) prior['E2'] = gv.gvar(0.0, 1.0) fit = c51.fitscript(trange, kaon_ss_ps_gv, prior, fitfcn.twopt_fitfcn_ss_ps, sets=2, result_flag='off') c51.stability_plot(fit, 'Z0_p') c51.stability_plot(fit, 'E0') mres_kaon = gv.gvar(0.0006685, 0.0000044) Z0_p = fit['post'][0]['Z0_p'] E0 = fit['post'][0]['E0'] fk = Z0_p * np.sqrt(2.) * (ml + ms + mres_pi + mres_kaon) / E0**(3. / 2.) print 'fk:', fk fkfpi = fk / fpi print 'fk/fpi:', fkfpi