def solve(a, b): """ Find ``x`` such that ``a.dot(x) = b`` for matrix ``a``. Args: a: Two-dimensional, square matrix/array of numbers and/or :class:`gvar.GVar`\s. b: One-dimensional vector/array of numbers and/or :class:`gvar.GVar`\s, or an array of such vectors. Requires ``b.shape[0] == a.shape[1]``. Returns: The solution ``x`` of ``a.dot(x) = b``, which is equivalent to ``inv(a).dot(b)``. Raises: ValueError: If ``a`` is not square and two-dimensional. ValueError: If shape of ``b`` does not match that of ``a`` (that is ``b.shape[0] != a.shape[1]``). """ amean = gvar.mean(a) if amean.ndim != 2 or amean.shape[0] != amean.shape[1]: raise ValueError('Bad matrix shape: ' + str(a.shape)) bmean = gvar.mean(b) if bmean.shape[0] != a.shape[1]: raise ValueError( 'Mismatch between shapes of a and b: {} {}'.format(a.shape, b.shape) ) xmean = numpy.linalg.solve(amean, bmean) ainv = inv(a) return xmean + ainv.dot(b-bmean - (a-amean).dot(xmean))
def main(): gv.ranseed([2009,2010,2011,2012,2013]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout for nexp in range(3,6): prior = make_prior(nexp,x) fit = lsqfit.nonlinear_fit(data=y,fcn=f,prior=prior,p0=p0) # ,svdcut=SVDCUT) if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) fit.check_roundoff() if nexp == 4: sys.stdout = tee.tee(sys.stdout,open("eg2.out","w")) print '************************************* nexp =',nexp print fit # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] sys.stdout = sys_stdout print # if DO_BOOTSTRAP: Nbs = 10 # number of bootstrap copies outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1]/E[0]) # accumulate results outputs['E2/E0'].append(E[2]/E[0]) outputs['a1/a0'].append(a[1]/a[0]) outputs['a2/a0'].append(a[2]/a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k])) print 'Bootstrap results:' print 'E1/E0 =',outputs['E1/E0'],' E2/E1 =',outputs['E2/E0'] print 'a1/a0 =',outputs['a1/a0'],' a2/a0 =',outputs['a2/a0'] print 'E1 =',outputs['E1'],' a1 =',outputs['a1'] if DO_PLOT: print fit.format(100) # print the fit results import pylab as pp from gvar import mean,sdev fity = f(x,fit.pmean) ratio = y/fity pp.xlim(0,21) pp.xlabel('x') pp.ylabel('y/f(x,p)') pp.errorbar(x=gv.mean(x),y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob') pp.plot([0.0,21.0],[1.0,1.0]) pp.show()
def test_svd(self): " EigenBasis.svd " tdata = [1, 2, 3, 4] G = self.make_G(tdata, keyfmt="{s1}{s2}", srcs="ab") basis = EigenBasis(data=G, keyfmt="{s1}{s2}", srcs="ab", t=2, tdata=tdata) Gsvd = basis.svd(G, svdcut=0.9) self.assertEqual(basis.svdn, 15) self.assertEqual(str(basis.svdcorrection), "0.000(30)") for k in G: np.testing.assert_allclose(gv.mean(G[k]), gv.mean(Gsvd[k])) self.assertTrue(np.all(gv.sdev(Gsvd[k]) > gv.sdev(G[k])))
def make_plot(x, y, fit, ylabel='y(x)', xmax=1.0): if not MAKE_PLOTS: return plt.errorbar(x, gv.mean(y), gv.sdev(y), fmt='bo') x = np.arange(0., xmax, 0.01) yfit = f(x, fit.p) plt.plot(x, gv.mean(yfit), 'k--') yplus = gv.mean(yfit) + gv.sdev(yfit) yminus = gv.mean(yfit) - gv.sdev(yfit) plt.fill_between(x, yminus, yplus, color='0.8') plt.xlim(0,1) plt.ylim(0.3,1.9) plt.xlabel('x') plt.ylabel(ylabel) plt.show()
def test_simulation(self): """ CorrFitter.simulated_data_iter """ models = [ self.mkcorr(a="a", b="a", dE="dE", tp=None) ] fitter = self.dofit(models) data = self.data diter = gv.BufferDict() k = list(data.keys())[0] # make n config dataset corresponding to data n = 100 diter = gv.raniter( g = gv.gvar(gv.mean(self.data[k]), gv.evalcov(self.data[k]) * n), n = n ) dataset = gv.dataset.Dataset() for d in diter: dataset.append(k, d) pexact = fitter.fit.pmean covexact = gv.evalcov(gv.dataset.avg_data(dataset)[k]) for sdata in fitter.simulated_data_iter(n=2, dataset=dataset): sfit = fitter.lsqfit( data=sdata, prior=self.prior, p0=pexact, print_fit=False ) diff = dict() for i in ['a', 'logdE']: diff[i] = sfit.p[i][0] - pexact[i][0] c2 = gv.chi2(diff) self.assertLess(c2/c2.dof, 15.) self.assert_arraysclose(gv.evalcov(sdata[k]), covexact)
def eigvalsh(a, eigvec=False): """ Eigenvalues of Hermitian matrix ``a``. Args: a: Two-dimensional, square matrix/array of numbers and/or :class:`gvar.GVar`\s. eigvec (bool): If ``True``, method returns a tuple of arrays ``(val, vec)`` where the ``val[i]`` are the eigenvalues. Arrays ``vec[:, i]`` are the corresponding eigenvectors of ``a`` when one ignores uncertainties (that is, they are eigenvectors of ``gvar.mean(a)``). Only ``val`` is returned if ``eigvec=False`` (default). Returns: Array of eigenvalues of matrix ``a`` if parameter ``eigvec==False`` (default). where the ``val[i]`` are the eigenvalues; otherwise it returns a tuple of arrays ``(val, vec)`` where the ``val[i]`` are the eigenvalues. Arrays ``vec[:, i]`` are the corresponding eigenvectors of ``a`` when one ignores uncertainties (that is, they are eigenvectors of ``gvar.mean(a)``). Raises: ValueError: If matrix is not square and two-dimensional. """ amean = gvar.mean(a) if amean.ndim != 2 or amean.shape[0] != amean.shape[1]: raise ValueError('Bad matrix shape: ' + str(a.shape)) da = a - amean val, vec = numpy.linalg.eigh(amean) val = val + [vec[:, i].dot(da.dot(vec[:, i])) for i in range(vec.shape[1])] return (val, vec) if eigvec else val
def main(): # pendulum data exhibits experimental error in ability to measure theta t = gv.gvar([ '0.10(1)', '0.20(1)', '0.30(1)', '0.40(1)', '0.50(1)', '0.60(1)', '0.70(1)', '0.80(1)', '0.90(1)', '1.00(1)' ]) theta = gv.gvar([ '1.477(79)', '0.791(79)', '-0.046(79)', '-0.852(79)', '-1.523(79)', '-1.647(79)', '-1.216(79)', '-0.810(79)', '0.185(79)', '0.832(79)' ]) for t_n, theta_n in zip(t, theta): print("{} {:>10}".format(t_n.fmt(2), theta_n.fmt(3))) # prior: assume experimental error in ability to specify theta(0) prior = gv.BufferDict() prior['g/l'] = gv.gvar('40(20)') prior['theta(0)'] = gv.gvar('1.571(50)') prior['t'] = t # fit function: use class Pendulum object to integrate pendulum motion def fitfcn(p, t=None): if t is None: t = p['t'] pendulum = Pendulum(p['g/l']) return pendulum(p['theta(0)'], t) # do the fit and print results fit = lsqfit.nonlinear_fit(data=theta, prior=prior, fcn=fitfcn) sys.stdout = tee.tee(STDOUT, open('case-pendulum.out', 'w')) print(fit.format(maxline=True)) sys.stdout = STDOUT print('fit/exact for (g/l) =', fit.p['g/l'] / (2*np.pi) ** 2) print('fit/exact for theta(0) =', fit.p['theta(0)'] / (np.pi / 2.)) if MAKE_PLOT: # make figure (saved to file pendulum.pdf) plt.figure(figsize=(4,3)) # start plot with data plt.errorbar( x=gv.mean(t), xerr=gv.sdev(t), y=gv.mean(theta), yerr=gv.sdev(theta), fmt='k.', ) # use best-fit function to add smooth curve for 100 points t = np.linspace(0., 1.1, 100) th = fitfcn(fit.p, t) show_plot(t, th)
def test_eigvalsh(self): m = gv.gvar([['2.1(1)', '0(0)'], ['0(0)', '0.5(3)']]) th = 0.92 cth = numpy.cos(th) sth = numpy.sin(th) u = numpy.array([[cth, sth], [-sth, cth]]) mrot = u.T.dot(m.dot(u)) val = linalg.eigvalsh(mrot) self.assertTrue(gv.equivalent(val[0], m[1, 1])) self.assertTrue(gv.equivalent(val[1], m[0, 0])) val, vec = linalg.eigvalsh(mrot, eigvec=True) np.testing.assert_allclose( gv.mean(mrot).dot(vec[:, 0]), val[0].mean * vec[:, 0] ) np.testing.assert_allclose( gv.mean(mrot).dot(vec[:, 1]), val[1].mean * vec[:, 1] )
def test_apply(self): " EigenBasis EigenBasis.apply EigenBasis.unapply " for tdata in [[1.0, 2.0, 3.0, 4.0], [2.0, 4.0, 6.0, 8.0], [0, 1.0, 2.0]]: tdata = np.array(tdata) G = self.make_G(tdata, keyfmt="{s1}{s2}", srcs="ab") basis = EigenBasis(data=G, keyfmt="{s1}{s2}", srcs="ab", t=2, tdata=tdata) np.testing.assert_allclose(basis.E, self.E) newG = basis.apply(G, "{s1}{s2}") newG_mean = gv.mean(newG) np.testing.assert_allclose(newG_mean["00"], gv.exp(-self.E[0] * tdata)) np.testing.assert_allclose(newG_mean["11"], gv.exp(-self.E[1] * tdata)) np.testing.assert_allclose(newG_mean["01"], 0, atol=1e-10) np.testing.assert_allclose(newG_mean["10"], 0, atol=1e-10) oldG = basis.unapply(newG, "{s1}{s2}") for k in ["aa", "ab", "ba", "bb"]: np.testing.assert_allclose(gv.mean(oldG[k] - G[k]), 0, atol=1e-10) np.testing.assert_allclose(gv.sdev(oldG[k] - G[k]), 0, atol=1e-10)
def main(): gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) for nexp in range(3,8): print('************************************* nexp =',nexp) prior = make_prior(nexp) # eps = gv.gvar(1,1e-300) # use svdcut to make it independent # prior['a'] *= eps # y *= eps fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior, p0=p0,svdcut=SVDCUT) print(fit) # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print('E1/E0 =',(E[1]/E[0]).fmt(),' E2/E0 =',(E[2]/E[0]).fmt()) print('a1/a0 =',(a[1]/a[0]).fmt(),' a2/a0 =',(a[2]/a[0]).fmt()) print() if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) if DO_BOOTSTRAP: Nbs = 10 # number of bootstrap copies outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1]/E[0]) # accumulate results outputs['E2/E0'].append(E[2]/E[0]) outputs['a1/a0'].append(a[1]/a[0]) outputs['a2/a0'].append(a[2]/a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.dataset.avg_data(outputs[k],bstrap=True).fmt(3) # gv.gvar(np.mean(outputs[k]), # np.std(outputs[k])).fmt(3) print('Bootstrap results:') print('E1/E0 =',outputs['E1/E0'],' E2/E0 =',outputs['E2/E0']) print('a1/a0 =',outputs['a1/a0'],' a2/a0 =',outputs['a2/a0']) print('E1 =',outputs['E1'],' a1 =',outputs['a1']) if DO_PLOT: print(fit.format(100)) # print the fit results import pylab as plt ratio = y/f(x,fit.pmean) plt.xlim(0,21) plt.xlabel('x') plt.ylabel('y/f(x,p)') plt.errorbar(x=x,y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob') plt.plot([0.0,21.0],[1.0,1.0]) plt.show()
def make_adv_init_from_fit_file_3pt(models,filename,nst=-1,ost=-1,n3st=-1,o3st=-1, fresh_overlap=False,fresh_amplitude=True): init = {} infit = __import__(filename) nn = nst no = ost if nn < 0: nn = df.num_nst if no < 0: no = df.num_ost if n3st < 0: nn3 = df.num_nst_3pt else: nn3 = n3st if o3st < 0: no3 = df.num_ost_3pt else: no3 = o3st ## -- save a list of keys for quick reference klst = tuple() ## all keys glst = tuple() ## diagonal terms only olst = tuple() ## overlaps only for model in models: try: for item in [model.dEa,model.dEb,model.a,model.b]: klst += tuple(item) klst += tuple(model.V[0]) + tuple(model.V[1]) ## -- need to split matrix up klst += tuple(model.g) ## -- probably not necessary except AttributeError: ## -- 2 point function for item in [model.dE,model.a,model.b]: klst += tuple(item) olst += tuple(model.a) + tuple(model.b) try: glst += tuple(model.g) except AttributeError: pass klst = tuple(set(klst)) ## -- delete duplicates olst = tuple(set(olst)) glst = tuple(set(glst)) for key in infit.init_val_import: skey = key.split('_') if skey[0] in klst: ## -- add to initial value dictionary init[key] = gv.mean(infit.init_val_import[key]) ## -- if requested, wipe values sk = skey[0][-2:] if fresh_amplitude and\ (sk == 'nn' or sk == 'no' or sk == 'on' or sk == 'oo'): init[key] = np.ones(np.shape(init[key])) if fresh_amplitude and skey[0] in glst: init[key] = np.ones(np.shape(init[key])) if fresh_overlap and (skey[0] in olst): init[key] = np.ones(np.shape(init[key])) ## -- finish up return mpa.truncate_prior_states(init,nn,no,nn3,no3)
def main(): gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) for nexp in range(3,8): print('************************************* nexp =',nexp) prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0,svdcut=SVDCUT) print(fit) # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print('E1/E0 =',(E[1]/E[0]).fmt(),' E2/E0 =',(E[2]/E[0]).fmt()) print('a1/a0 =',(a[1]/a[0]).fmt(),' a2/a0 =',(a[2]/a[0]).fmt()) print() if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) if DO_ERRORBUDGET: outputs = OrderedDict([ ('E1/E0', E[1]/E[0]), ('E2/E0', E[2]/E[0]), ('a1/a0', a[1]/a[0]), ('a2/a0', a[2]/a[0]) ]) inputs = OrderedDict([ ('E', fit.prior['E']), ('a', fit.prior['a']), ('y', y), ('svd', fit.svdcorrection) ]) print(fit.fmt_values(outputs)) print(fit.fmt_errorbudget(outputs,inputs)) if DO_EMPBAYES: def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0): z = gv.exp(z) prior['a'] = [gv.gvar(0.5,0.5*z[0]) for i in range(nexp)] return dict(prior=prior,data=data,fcn=f,p0=p0) ## z0 = [0.0] fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3) print(fit) # print the optimized fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print('E1/E0 =',(E[1]/E[0]).fmt(),' E2/E0 =',(E[2]/E[0]).fmt()) print('a1/a0 =',(a[1]/a[0]).fmt(),' a2/a0 =',(a[2]/a[0]).fmt()) print("prior['a'] =",fit.prior['a'][0].fmt()) print() if DO_PLOT: import pylab as pp from gvar import mean,sdev fity = f(x,fit.pmean) ratio = y/fity pp.xlim(0,21) pp.xlabel('x') pp.ylabel('y/f(x,p)') pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob') pp.plot([0.0,21.0],[1.0,1.0]) pp.show()
def dump_precompute(g, outputfile): if isinstance(outputfile, str): outputfile = open(outputfile, 'wb') mn = gv.mean(g) evb = gv.evalcov(g.buf) covm = {} for keyi in g: for keyj in g: covm[keyi,keyj] = evb[g.slice(keyi),g.slice(keyj)] pickle.dump((mn, covm), outputfile) outputfile.close()
def show_plot(t_array, th_array): """ Display theta vs t plot. """ th_mean = gv.mean(th_array) th_sdev = gv.sdev(th_array) thp = th_mean + th_sdev thm = th_mean - th_sdev plt.fill_between(t_array, thp, thm, color='0.8') plt.plot(t_array, th_mean, linewidth=0.5) plt.xlabel('$t$') plt.ylabel(r'$\theta(t)$') plt.savefig('pendulum.pdf', bbox_inches='tight') plt.show()
def tabulate_avg(avgout,format=(6,3)): """ Tabulates averages and standard deviations. tabulate_avg(...) creates a nicely formatted table displaying the output from functions like ``dataset.Dataset.gdev``. Here ``avgout`` is the output. Parameter ``format`` specifies the output format: ``format=(N,D)`` implies that format ``'%N.Df(%Dd)'`` is used to print ``avg,int(10**D * std_dev)``. The table is returned as a single string, for printing. """ table = [] output = avgout.items() output.sort() for tag,avsd in output: try: av = avsd.mean sd = avsd.sdev except AttributeError: av = gvar.mean(avsd) sd = gvar.sdev(avsd) lines = '' line = '%15s' % str(tag) try: sdfac = 10**format[1] fmt = (' %'+str(format[0])+'.'+str(format[1])+ 'f(%'+str(format[1])+'d)') def avgfmt(av,sd,fmt=fmt,sdfac=sdfac): try: return fmt % (av,int(sdfac*sd+0.5)) except: return (' %g (%.4g)' % (av,sd)) ## except: def avgfmt(av,sd): return (' %g (%.4g)' % (av,sd)) ## na = len(av) if len(sd)<na: na = len(sd) if na>=1: for i in xrange(na): if len(sd.shape)==2: sdi = math.sqrt(sd[i][i]) else: sdi = sd[i] nextfield = avgfmt(av[i],sdi) if (len(nextfield)+len(line))>78: lines = lines + line + '\n' line = ''.ljust(15) + nextfield else: line = line + nextfield table.append(lines + line +'\n') return '\n'.join(table)
def test_expval(self): " integrator(f ...) " xarray = gv.gvar([5., 3.], [[4., 0.9], [0.9, 1.]]) xdict = gv.BufferDict([(0, 1), (1, 1)]) xdict = gv.BufferDict(xdict, buf=xarray) xscalar = xarray[0] def fscalar(x): if hasattr(x, 'keys'): x = x.buf return x.flat[0] def farray(x): if hasattr(x, 'keys'): x = x.buf return gv.PDFStatistics.moments(x.flat[0]) def fdict(x): if hasattr(x, 'keys'): x = x.buf return gv.BufferDict([ (0, x.flat[0]), (1, x.flat[0] ** 2), (2, x.flat[0] ** 3), (3, x.flat[0] ** 4) ]) for x in [xscalar, xarray, xdict]: integ = PDFIntegrator(x) integ(neval=1000, nitn=5) for f in [fscalar, farray, fdict]: r = integ(f, neval=1000, nitn=5, adapt=False) if f is fscalar: self.assertTrue(abs(r.mean - 5) < 5. * r.sdev) else: if hasattr(r, 'keys'): r = r.buf s = gv.PDFStatistics(r) self.assertTrue(abs(s.mean.mean - 5.) < 10. * s.mean.sdev) self.assertTrue(abs(s.sdev.mean - 2.) < 10. * s.sdev.sdev) self.assertTrue(abs(s.skew.mean) < 10. * s.skew.sdev) self.assertTrue(abs(s.ex_kurt.mean) < 10. * s.ex_kurt.sdev) # covariance test def fcov(x): return dict(x=x, xx=np.outer(x, x)) integ = PDFIntegrator(xarray) r = integ(fcov, neval=1000, nitn=5) rmean = r['x'] rcov = r['xx'] - np.outer(r['x'], r['x']) xmean = gv.mean(xarray) xcov = gv.evalcov(xarray) for i in [0, 1]: self.assertTrue(abs(rmean[i].mean - xmean[i]) < 5. * rmean[i].sdev) for j in [0, 1]: self.assertTrue(abs(rcov[i,j].mean - xcov[i,j]) < 5. * rcov[i,j].sdev)
def do_plot_corr_effective_mass_check(idx,fig=fig): fig.clear() ax = fig.add_subplot(111) key = models[idx[0]].datatag ax.set_ylim(utp.get_option("y_limit",[0.0,1.2],**kwargs[key])) # ## -- plot fit ax.plot(_emTPosFit[idx[0]],_emFitCentral[idx[0]], color=utp.get_option("color3",'b',**kwargs[key])) ax.plot(_emTPosFit[idx[0]],_emFitError[idx[0]][0], color=utp.get_option("color3",'b',**kwargs[key]), ls=utp.get_option("linestyle2",'--',**kwargs[key])) ax.plot(_emTPosFit[idx[0]],_emFitError[idx[0]][1], color=utp.get_option("color3",'b',**kwargs[key]), ls=utp.get_option("linestyle2",'--',**kwargs[key])) ## -- plot reference for val in df.ec_reference_lines: vt = [val for t in _emTPosFit[idx[0]]] ax.plot(_emTPosFit[idx[0]],vt, color=utp.get_option("color2",'g',**kwargs[key])) # -- plot correlator data ax.errorbar(_emTPosRatio[idx[0]],_emLogRatioCentral[idx[0]],yerr=_emLogRatioError[idx[0]], mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]), mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]), color=utp.get_option("markeredgecolor1",'k',**kwargs[key]), ls=utp.get_option("linestyle1",'None',**kwargs[key]), marker=utp.get_option("marker1",'o',**kwargs[key]), ms=utp.get_option("markersize",6,**kwargs[key])) ax.scatter(_emTPosFit[idx[0]],gv.mean(_emLogRatioFit[idx[0]]), color=utp.get_option("color1",'r',**kwargs[key]), marker=utp.get_option("marker",'o',**kwargs[key]), s=utp.get_option("markersize",36,**kwargs[key])) fig.suptitle(utp.get_option("plottitle",str(idx[0])+" default title "+str(key),**kwargs[key]), fontsize=utp.get_option("titlesize",20,**kwargs[key])) # -- modify some options ax.set_xlabel(r'$t$ slice') ax.set_ylabel(utp.get_option("yaxistitle", r"$-\frac{1}{"+str(_emSep)+r"}\,log\frac{C(t+"+str(_emSep)+r")}{C(t)}$",**kwargs[key])) for item in ([ax.xaxis.label,ax.yaxis.label]): # must be after setting label content (LaTeX ruins it) item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key])) rect =fig.patch rect.set_facecolor('white') if utp.get_option("to_file",False,**kwargs[key]): save_dir = utp.get_option("ec_save_dir","./plotdump",**kwargs[key]) save_name = utp.get_option("ec_save_name","ecplot-"+key+".pdf",**kwargs[key]) plt.savefig(save_dir+'/'+save_name) if utp.get_option("to_terminal",True,**kwargs[key]): plt.draw() pass
def test_inv(self): m = self.make_random([[1., 0.1], [0.1, 2.]]) one = gv.gvar([['1(0)', '0(0)'], ['0(0)', '1(0)']]) invm = linalg.inv(m) self.assertTrue(gv.equivalent(linalg.inv(invm), m)) for mm in [invm.dot(m), m.dot(invm)]: np.testing.assert_allclose( gv.mean(mm), [[1, 0], [0, 1]], rtol=1e-10, atol=1e-10 ) np.testing.assert_allclose( gv.sdev(mm), [[0, 0], [0, 0]], rtol=1e-10, atol=1e-10 ) p = linalg.det(m) * linalg.det(invm) self.assertAlmostEqual(p.mean, 1.) self.assertGreater(1e-10, p.sdev)
def test_apply(self): " EigenBasis EigenBasis.apply EigenBasis.unapply " for tdata in [ [1., 2., 3., 4.], [2., 4., 6., 8.], [0, 1., 2.], ]: tdata = np.array(tdata) G = self.make_G(tdata, keyfmt='{s1}{s2}', srcs='ab') basis = EigenBasis( data=G, keyfmt='{s1}{s2}', srcs='ab', t=2, tdata=tdata, ) np.testing.assert_allclose(basis.E, self.E) newG = basis.apply(G, '{s1}{s2}') newG_mean = gv.mean(newG) np.testing.assert_allclose(newG_mean['00'], gv.exp(-self.E[0] * tdata)) np.testing.assert_allclose(newG_mean['11'], gv.exp(-self.E[1] * tdata)) np.testing.assert_allclose(newG_mean['01'], 0, atol=1e-10) np.testing.assert_allclose(newG_mean['10'], 0, atol=1e-10) oldG = basis.unapply(newG, '{s1}{s2}') for k in ['aa', 'ab', 'ba', 'bb']: np.testing.assert_allclose(gv.mean(oldG[k] - G[k]), 0, atol=1e-10) np.testing.assert_allclose(gv.sdev(oldG[k] - G[k]), 0, atol=1e-10)
def test_histogram(self): x = gv.gvar([5., 3.], [[4., 0.2], [0.2, 1.]]) xsum = x[0] + x[1] integ = PDFIntegrator(x) hist = gv.PDFHistogram(xsum, nbin=40, binwidth=0.2) integ(neval=1000, nitn=5) def fhist(x): return hist.count(x[0] + x[1]) r = integ(fhist, neval=1000, nitn=5, adapt=False) bins, prob, stat, norm = hist.analyze(r) self.assertTrue(abs(gv.mean(np.sum(prob)) - 1.) < 5. * gv.sdev(np.sum(prob))) self.assertTrue(abs(stat.mean.mean - xsum.mean) < 5. * stat.mean.sdev) self.assertTrue(abs(stat.sdev.mean - xsum.sdev) < 5. * stat.sdev.sdev) self.assertTrue(abs(stat.skew.mean) < 5. * stat.skew.sdev) self.assertTrue(abs(stat.ex_kurt.mean) < 5. * stat.ex_kurt.sdev)
def inv(a): """ Inverse of matrix ``a``. Args: a: Two-dimensional, square matrix/array of numbers and/or :class:`gvar.GVar`\s. Returns: The inverse of matrix ``a``. Raises: ValueError: If matrix is not square and two-dimensional. """ amean = gvar.mean(a) if amean.ndim != 2 or amean.shape[0] != amean.shape[1]: raise ValueError('Bad matrix shape: ' + str(a.shape)) da = a - amean ainv = numpy.linalg.inv(amean) return ainv - ainv.dot(da.dot(ainv))
def det(a): """ Determinant of matrix ``a``. Args: a: Two-dimensional, square matrix/array of numbers and/or :class:`gvar.GVar`\s. Returns: Deterimant of the matrix. Raises: ValueError: If matrix is not square and two-dimensional. """ amean = gvar.mean(a) if amean.ndim != 2 or amean.shape[0] != amean.shape[1]: raise ValueError('Bad matrix shape: ' + str(a.shape)) da = a - amean ainv = inv(amean) return numpy.linalg.det(amean) * (1 + numpy.matrix.trace(da.dot(ainv)))
def correlator_pion_ratio_fn(cora,corb,t,T,expm,fac): """ -- average corb*expm^t for a range of t, then divide all of cora by fac*sqrt(avg) == -- prototype function must define a new function with t,T,expm,fac fixed to use with fn_apply_tags """ def fexp(cor,tp): if tp<T/2: return np.abs(cor[tp])*np.power(expm,float(tp)) else: return np.abs(cor[tp])*np.power(expm,float(T-tp)) cor2=average_tag_fn(corb) cor1=apply_t_fn(cor2,fexp,t) cor0=plateau_tag_fn(cor1) cor0=gv.mean(np.sqrt(cor0)*fac) cnew=list() for c in cora: cnew.append(c*cor0) return cnew
def slogdet(a): """ Sign and logarithm of determinant of matrix ``a``. Args: a: Two-dimensional, square matrix/array of numbers and/or :class:`gvar.GVar`\s. Returns: Tuple ``(s, logdet)`` where the determinant of matrix ``a`` is ``s * exp(logdet)``. Raises: ValueError: If matrix is not square and two-dimensional. """ amean = gvar.mean(a) if amean.ndim != 2 or amean.shape[0] != amean.shape[1]: raise ValueError('Bad matrix shape: ' + str(a.shape)) da = a - amean ainv = inv(amean) s, ldet = numpy.linalg.slogdet(amean) ldet += numpy.matrix.trace(da.dot(ainv)) return s, ldet
def main(): # pendulum data exhibits experimental error in ability to measure theta t = [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.] theta = gv.gvar([ '1.477(79)', '0.791(79)', '-0.046(79)', '-0.852(79)', '-1.523(79)', '-1.647(79)', '-1.216(79)', '-0.810(79)', '0.185(79)', '0.832(79)' ]) # prior: assume experimental error in ability to specify theta(0) prior = gv.BufferDict() prior['g/l'] = (2 * math.pi) ** 2 * gv.gvar(1, 0.1) prior['theta(0)'] = gv.gvar(math.pi / 2., 0.05) # fit function: use class Pendulum object to integrate pendulum motion def fitfcn(p, t=t): pendulum = Pendulum(p['g/l']) return pendulum(p['theta(0)'], t) # do the fit and print results fit = lsqfit.nonlinear_fit(data=theta, prior=prior, fcn=fitfcn) print(fit.format(maxline=True)) print('fit/exact for (g/l) =', fit.p['g/l'] / (2*math.pi) ** 2) print('fit/exact for theta(0) =', fit.p['theta(0)'] / (math.pi / 2.)) if MAKE_PLOT: # make figure (saved to file pendulum.pdf) plt.figure(figsize=(4,3)) # start plot with data plt.errorbar( x=t, y=gv.mean(theta), yerr=gv.sdev(theta), fmt='k.', ) # use best-fit function to add smooth curve for 100 points t = np.linspace(0., 1.1, 100) th = fitfcn(fit.p, t) show_plot(t, th)
def main(): gv.ranseed([2009, 2010, 2011, 2012]) # initialize random numbers (opt.) x, y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout for nexp in range(3, 8): prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=f, prior=prior, p0=p0, svdcut=1e-15) # ,svdcut=SVDCUT) if fit.chi2 / fit.dof < 1.0: p0 = fit.pmean # starting point for next fit (opt.) if nexp == 5: sys.stdout = tee.tee(sys_stdout, open("eg3.out", "w")) print "************************************* nexp =", nexp print fit # print the fit results E = fit.p["E"] # best-fit parameters a = fit.p["a"] print "E1/E0 =", E[1] / E[0], " E2/E0 =", E[2] / E[0] print "a1/a0 =", a[1] / a[0], " a2/a0 =", a[2] / a[0] # print E[1]-E[0], E[-1]-E[-2] # print (E[1]/E[0]).partialsdev(fit.prior['E']) # print (E[1]/E[0]).partialsdev(fit.prior['a']) # print (E[1]/E[0]).partialsdev(fit.y) sys.stdout = sys_stdout print # sys.stdout = tee.tee(sys_stdout, open("eg3a.out", "w")) # for i in range(1): # print '--------------------- fit with %d extra data sets' % (i+1) # x, y = make_data(1) # prior = fit.p # fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f1,prior=prior, svdcut=SVDCUT) # print fit sys.stdout = sys_stdout if DO_BOOTSTRAP: Nbs = 10 # number of bootstrap copies outputs = {"E1/E0": [], "E2/E0": [], "a1/a0": [], "a2/a0": [], "E1": [], "a1": []} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean["E"] # best-fit parameters a = bsfit.pmean["a"] outputs["E1/E0"].append(E[1] / E[0]) # accumulate results outputs["E2/E0"].append(E[2] / E[0]) outputs["a1/a0"].append(a[1] / a[0]) outputs["a2/a0"].append(a[2] / a[0]) outputs["E1"].append(E[1]) outputs["a1"].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]), np.std(outputs[k])) print "Bootstrap results:" print "E1/E0 =", outputs["E1/E0"], " E2/E1 =", outputs["E2/E0"] print "a1/a0 =", outputs["a1/a0"], " a2/a0 =", outputs["a2/a0"] print "E1 =", outputs["E1"], " a1 =", outputs["a1"] if DO_PLOT: print fit.format(100) # print the fit results import pylab as pp from gvar import mean, sdev fity = f(x, fit.pmean) ratio = y / fity pp.xlim(0, 21) pp.xlabel("x") pp.ylabel("y/f(x,p)") pp.errorbar(x=x, y=mean(ratio), yerr=sdev(ratio), fmt="ob") pp.plot([0.0, 21.0], [1.0, 1.0]) pp.show()
def wavg(dataseq, prior=None, fast=False, **kargs): """ Weighted average of |GVar|\s or arrays/dicts of |GVar|\s. The weighted average of several |GVar|\s is what one obtains from a least-squares fit of the collection of |GVar|\s to the one-parameter fit function :: def f(p): return N * [p[0]] where ``N`` is the number of |GVar|\s. The average is the best-fit value for ``p[0]``. |GVar|\s with smaller standard deviations carry more weight than those with larger standard deviations. The averages computed by ``wavg`` take account of correlations between the |GVar|\s. If ``prior`` is not ``None``, it is added to the list of data used in the average. Thus ``wavg([x2, x3], prior=x1)`` is the same as ``wavg([x1, x2, x3])``. Typical usage is :: x1 = gvar.gvar(...) x2 = gvar.gvar(...) x3 = gvar.gvar(...) xavg = wavg([x1, x2, x3]) # weighted average of x1, x2 and x3 where the result ``xavg`` is a |GVar| containing the weighted average. The individual |GVar|\s in the last example can be replaced by multidimensional distributions, represented by arrays of |GVar|\s or dictionaries of |GVar|\s (or arrays of |GVar|\s). For example, :: x1 = [gvar.gvar(...), gvar.gvar(...)] x2 = [gvar.gvar(...), gvar.gvar(...)] x3 = [gvar.gvar(...), gvar.gvar(...)] xavg = wavg([x1, x2, x3]) # xavg[i] is wgtd avg of x1[i], x2[i], x3[i] where each array ``x1``, ``x2`` ... must have the same shape. The result ``xavg`` in this case is an array of |GVar|\s, where the shape of the array is the same as that of ``x1``, etc. Another example is :: x1 = dict(a=[gvar.gvar(...), gvar.gvar(...)], b=gvar.gvar(...)) x2 = dict(a=[gvar.gvar(...), gvar.gvar(...)], b=gvar.gvar(...)) x3 = dict(a=[gvar.gvar(...), gvar.gvar(...)]) xavg = wavg([x1, x2, x3]) # xavg['a'][i] is wgtd avg of x1['a'][i], x2['a'][i], x3['a'][i] # xavg['b'] is gtd avg of x1['b'], x2['b'] where different dictionaries can have (some) different keys. Here the result ``xavg`` is a :class:`gvar.BufferDict`` having the same keys as ``x1``, etc. Weighted averages can become costly when the number of random samples being averaged is large (100s or more). In such cases it might be useful to set parameter ``fast=True``. This causes ``wavg`` to estimate the weighted average by incorporating the random samples one at a time into a running average:: result = prior for dataseq_i in dataseq: result = wavg([result, dataseq_i], ...) This method is much faster when ``len(dataseq)`` is large, and gives the exact result when there are no correlations between different elements of list ``dataseq``. The results are approximately correct when ``dataseq[i]`` and ``dataseq[j]`` are correlated for ``i!=j``. :param dataseq: The |GVar|\s to be averaged. ``dataseq`` is a one-dimensional sequence of |GVar|\s, or of arrays of |GVar|\s, or of dictionaries containing |GVar|\s or arrays of |GVar|\s. All ``dataseq[i]`` must have the same shape. :param prior: Prior values for the averages, to be included in the weighted average. Default value is ``None``, in which case ``prior`` is ignored. :type prior: |GVar| or array/dictionary of |GVar|\s :param fast: Setting ``fast=True`` causes ``wavg`` to compute an approximation to the weighted average that is much faster to calculate when averaging a large number of samples (100s or more). The default is ``fast=False``. :type fast: bool :param kargs: Additional arguments (e.g., ``svdcut``) to the fitter used to do the averaging. :type kargs: dict Results returned by :func:`gvar.wavg` have the following extra attributes describing the average: .. attribute:: chi2 ``chi**2`` for weighted average. .. attribute:: dof Effective number of degrees of freedom. .. attribute:: Q The probability that the ``chi**2`` could have been larger, by chance, assuming that the data are all Gaussain and consistent with each other. Values smaller than 0.1 or suggest that the data are not Gaussian or are inconsistent with each other. Also called the *p-value*. Quality factor `Q` (or *p-value*) for fit. .. attribute:: time Time required to do average. .. attribute:: svdcorrection The *svd* corrections made to the data when ``svdcut`` is not ``None``. .. attribute:: fit Fit output from average. """ if len(dataseq) <= 0: if prior is None: return None wavg.Q = 1 wavg.chi2 = 0 wavg.dof = 0 wavg.time = 0 wavg.fit = None wavg.svdcorrection = None if hasattr(prior, 'keys'): return BufferDictWAvg(dataseq[0], wavg) if numpy.shape(prior) == (): return GVarWAvg(prior, wavg) else: return ArrayWAvg(numpy.asarray(prior), wavg) elif len(dataseq) == 1 and prior is None: wavg.Q = 1 wavg.chi2 = 0 wavg.dof = 0 wavg.time = 0 wavg.fit = None wavg.svdcorrection = None if hasattr(dataseq[0], 'keys'): return BufferDictWAvg(dataseq[0], wavg) if numpy.shape(dataseq[0]) == (): return GVarWAvg(dataseq[0], wavg) else: return ArrayWAvg(numpy.asarray(dataseq[0]), wavg) if fast: chi2 = 0 dof = 0 time = 0 ans = prior svdcorrection = gvar.BufferDict() for i, dataseq_i in enumerate(dataseq): if ans is None: ans = dataseq_i else: ans = wavg([ans, dataseq_i], fast=False, **kargs) chi2 += wavg.chi2 dof += wavg.dof time += wavg.time if wavg.svdcorrection is not None: for k in wavg.svdcorrection: svdcorrection[str(i) + ':' + k] = wavg.svdcorrection[k] wavg.chi2 = chi2 wavg.dof = dof wavg.time = time wavg.Q = gammaQ(dof / 2., chi2 / 2.) wavg.svdcorrection = svdcorrection wavg.fit = None ans.dof = wavg.dof ans.Q = wavg.Q ans.chi2 = wavg.chi2 ans.time = wavg.time ans.svdcorrection = wavg.svdcorrection ans.fit = wavg.fit return ans if hasattr(dataseq[0], 'keys'): data = {} keys = [] if prior is not None: dataseq = [prior] + list(dataseq) for dataseq_i in dataseq: for k in dataseq_i: if k in data: data[k].append(dataseq_i[k]) else: data[k] = [dataseq_i[k]] keys.append(k) data = gvar.BufferDict(data, keys=keys) p0 = gvar.BufferDict() for k in data: p0[k] = gvar.mean(data[k][0]) + gvar.sdev(data[k][0]) / 10. def fcn(p): ans = gvar.BufferDict() for k in data: ans[k] = len(data[k]) * [p[k]] return ans else: p = numpy.asarray(dataseq[0]) data = [] if prior is None else [prior] data += [dataseqi for dataseqi in dataseq] p0 = numpy.asarray(gvar.mean(data[0]) + gvar.sdev(data[0]) / 10.) data = numpy.array(data) def fcn(p): return len(data) * [p] fit = lsqfit.nonlinear_fit(data=data, fcn=fcn, p0=p0, **kargs) # wavg.Q = fit.Q # wavg.chi2 = fit.chi2 # wavg.dof = fit.dof # wavg.time = fit.time # wavg.svdcorrection = fit.svdcorrection # wavg.fit = fit if p0.shape is None: return BufferDictWAvg(gvar.BufferDict(p0, buf=fit.p.flat), fit) elif p0.shape == (): return GVarWAvg(fit.p.flat[0], fit) else: return ArrayWAvg(fit.p.reshape(p0.shape), fit)
def plot_corr_normalized(models,data,fit,**kwargs): """ Get all data ready so that it can be plotted on command Allows for dynamic cycling through plots """ _fnNMod = len(models) _fnIdx = [0] ## -- index of plotted function, in array so it can be modified in functions ## -- objects to hold all plot data ## - Dat/Fit refers to the correlator data or the fit function ## - Central/Error are the central value and errors _fnDatCentral = [] _fnDatError = [] _fnFitOnes = [] _fnFitError = [] # ## -- other objects _fnTDataNonZero = [] _fnTFitNonZero = [] _fnTData = [] _fnTFit = [] _fnTRem = [] # number of previous timeslices removed fig,ax = plt.subplots(1) # ## -- setup plot function def do_plot_normalized(idx,fig=fig): fig.clear() ax = fig.add_subplot(111) key = models[idx[0]].datatag ax.set_xlim([-1,len(_fnTData[idx[0]])]) ax.set_ylim(utp.get_option("y_limit",[0.2,1.8],**kwargs[key])) # ## -- plot fit ax.plot(_fnTDataNonZero[idx[0]],_fnFitOnes[idx[0]], color=utp.get_option("color2",'b',**kwargs[key])) ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][0], color=utp.get_option("color2",'g',**kwargs[key]), ls=utp.get_option("linestyle2",'--',**kwargs[key])) ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][1], color=utp.get_option("color2",'g',**kwargs[key]), ls=utp.get_option("linestyle2",'--',**kwargs[key])) ## -- plot correlator data ax.errorbar(_fnTDataNonZero[idx[0]],_fnDatCentral[idx[0]],yerr=_fnDatError[idx[0]], mfc=utp.get_option("markerfacecolor1",'None',**kwargs[key]), mec=utp.get_option("markeredgecolor1",'k',**kwargs[key]), color=utp.get_option("markeredgecolor1",'k',**kwargs[key]), ls=utp.get_option("linestyle1",'None',**kwargs[key]), marker=utp.get_option("marker1",'o',**kwargs[key]), ms=utp.get_option("markersize",6,**kwargs[key])) ax.scatter(_fnTFitNonZero[idx[0]], [ _fnDatCentral[idx[0]][t] for t in list(np.array(_fnTFitNonZero[idx[0]])-np.array(_fnTRem[idx[0]])) ], color=utp.get_option("color1",'r',**kwargs[key]), marker=utp.get_option("marker",'o',**kwargs[key]), s=utp.get_option("markersize",36,**kwargs[key])) fig.suptitle(utp.get_option("plottitlefn",str(idx[0])+" default title "+str(key),**kwargs[key]), fontsize=utp.get_option("titlesize",20,**kwargs[key])) ## -- modify some options ax.set_xlabel(r'$t$') ax.set_ylabel(utp.get_option("yaxistitle",r"$C(t)/C_{fit}(t)$",**kwargs[key])) for item in ([ax.xaxis.label,ax.yaxis.label]): # must be after setting label content (LaTeX ruins it) item.set_fontsize(fontsize=utp.get_option("fontsize",20,**kwargs[key])) rect =fig.patch rect.set_facecolor('white') if utp.get_option("to_file",False,**kwargs[key]): save_dir = utp.get_option("fn_save_dir","./plotdump",**kwargs[key]) save_name = utp.get_option("fn_save_name","fnplot-"+key+".pdf",**kwargs[key]) plt.savefig(save_dir+'/'+save_name) if utp.get_option("to_terminal",True,**kwargs[key]): plt.draw() pass # ## -- setup button press action function def press_normalized(event,idx=_fnIdx): #print('press_normalized', event.key) try: ## -- manually indicate index idx[0] = int(event.key) + (idx[0])*10 except ValueError: if event.key==' ': ## -- space ## -- allows for replotting when changing index by typing number keys idx[0] = idx[0] % _fnNMod do_plot_normalized(idx) elif event.key=='left': idx[0] = (idx[0] - 1) % _fnNMod do_plot_normalized(idx) elif event.key=='right': idx[0] = (idx[0] + 1) % _fnNMod do_plot_normalized(idx) elif event.key=='backspace': ## -- reset index so can manually flip through using number keys idx[0] = 0 elif event.key=='d': ## -- dump plots into ./plotdump directory for ix,model in zip(range(len(models)),models): key = model.datatag save_dir = utp.get_option("fn_save_dir","./plotdump",**kwargs[key]) save_name = utp.get_option("fn_save_name","fnplot-"+key+".png",**kwargs[key]) do_plot_normalized([ix]) plt.savefig(save_dir+'/'+save_name) do_plot_normalized(idx) # ## -- fig.canvas.mpl_connect('key_press_event',press_normalized) ## -- save plot data for idx,model in zip(range(len(models)),models): key = model.datatag _fnTData.append(model.tdata) _fnTFit.append(model.tfit) _fnTFit[-1] = np.append(_fnTFit[-1],list(sorted([len(_fnTData[-1]) - t for t in _fnTFit[-1]]))) ## -- fit _fnFitFunc = utp.create_fit_func(model,fit) _fnFitMean = gv.mean(_fnFitFunc(_fnTData[-1])) _fnTDataNonZero.append([t for t in _fnTData[-1] if np.abs(_fnFitMean[t]) > 1e-20]) _fnTFitNonZero.append([t for t in _fnTFit[-1] if np.abs(_fnFitMean[t]) > 1e-20]) _fnTRem.append([(0 if np.abs(_fnFitMean[t]) > 1e-20 else 1) for t in model.tdata]) _fnTRem[-1] = \ [sum(_fnTRem[-1][:i+1]) for i in range(len(_fnTRem[-1])) if i in _fnTFitNonZero[-1]] _fnFitMean = gv.mean(_fnFitFunc(_fnTDataNonZero[-1])) _fnFitSdev = list(np.array(gv.sdev(_fnFitFunc(_fnTDataNonZero[-1])))/np.array(_fnFitMean)) _fnFitOnes.append(list(np.ones(len(_fnTDataNonZero[-1])))) _fnFitError.append([ list(np.array(_fnFitOnes[-1])-np.array(_fnFitSdev)), list(np.array(_fnFitOnes[-1])+np.array(_fnFitSdev)) ]) ## -- data _fnDatCentral.append( list(np.array([gv.mean(data[key])[t] for t in _fnTDataNonZero[-1]])/ np.array(_fnFitMean)) ) _fnDatSdev = ( np.array([gv.sdev(data[key])[t] for t in _fnTDataNonZero[-1]])/ np.array(_fnFitMean) ) _fnDatError.append([ list(_fnDatSdev), list(_fnDatSdev) ]) ## -- done saving data if not(utp.get_option("to_terminal",True,**kwargs[key])) and\ utp.get_option("to_file",False,**kwargs[key]): for ix in range(len(models)): ## -- loops and saves all without creating window do_plot_normalized([ix]) else: do_plot_normalized(_fnIdx)
def main(): gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) for nexp in range(3,5): print '************************************* nexp =',nexp prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0) print fit # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] print if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) sys_stdout = sys.stdout if DO_ERRORBUDGET: lines = [ "E = fit.p['E']", "a = fit.p['a']", "print(E[1] / E[0])", "print((E[1] / E[0]).partialsdev(fit.prior['E']))", "print((E[1] / E[0]).partialsdev(fit.prior['a']))", "print((E[1] / E[0]).partialsdev(y))" ] sys.stdout = tee.tee(sys_stdout, open("eg4c.out","w")) for line in lines: print ">>>", line if line[:5] == "print": print(eval(line[5:])) # print E[1]/E[0] # print (E[1]/E[0]).partialsdev(fit.prior['E']) # print (E[1]/E[0]).partialsdev(fit.prior['a']) # print (E[1]/E[0]).partialsdev(y) outputs = {'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0], 'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0]} inputs = {'E':fit.prior['E'],'a':fit.prior['a'],'y':y} sys.stdout = tee.tee(sys_stdout, open("eg4b.out","w")) print fit.fmt_values(outputs) print fit.fmt_errorbudget(outputs,inputs) sys.stdout = sys_stdout if DO_SIMULATIONS: # fit simulations sys.stdout = tee.tee(sys_stdout, open("eg4d.out","w")) for sfit in fit.simulated_fit_iter(3): print '************************************* simulation' print(sfit) sE = sfit.p['E'] # best-fit parameters sa = sfit.p['a'] E = sfit.pexact['E'] a = sfit.pexact['a'] print 'E1/E0 =', sE[1] / sE[0], ' E2/E0 =', sE[2] / sE[0] print 'a1/a0 =', sa[1] / sa[0], ' a2/a0 =', sa[2] / sa[0] print '\nSimulated Fit Values - Exact Values:' print 'E1/E0:', (sE[1] / sE[0]) - (E[1] / E[0]),\ ' E2/E0:', (sE[2] / sE[0]) - (E[2] / E[0]) print 'a1/a0:', (sa[1] / sa[0]) - (a[1] / a[0]),\ ' a2/a0:', (sa[2] / sa[0]) - (a[2] / a[0]) # compute chi**2 comparing fit results to exact results sim_results = [sE[0], sE[1], sa[0], sa[1]] exact_results = [E[0], E[1], a[0], a[1]] chi2 = gv.chi2(sim_results, exact_results, svdcut=1e-8) print '\nParameter chi2/dof [dof] = %.2f' % (chi2/chi2.dof), '[%d]' % chi2.dof, ' Q = %.1f' % chi2.Q print sys.stdout = sys_stdout if DO_EMPBAYES: def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0): z = gv.exp(z) prior['a'] = [gv.gvar(0.5,0.5*z[0]) for i in range(nexp)] return dict(prior=prior,data=data,fcn=f,p0=p0) ## z0 = [0.0] fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3) sys.stdout = tee.tee(sys_stdout, open("eg4a.out","w")) print fit # print the optimized fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] # print "prior['a'] =",fit.prior['a'][0] sys.stdout = sys_stdout print if DO_PLOT: import pylab as pp from gvar import mean,sdev fity = f(x,fit.pmean) ratio = y/fity pp.xlim(0,21) pp.xlabel('x') pp.ylabel('y/f(x,p)') pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob') pp.plot([0.0,21.0],[1.0,1.0]) pp.show()
def compute_diagonal((dset,key)): print "diagonal key ",key tdat = compute_correlation_pair(dset,key,key) return (key,gv.mean(tdat[key]),gv.sdev(tdat[key]),gv.evalcorr(tdat)[key,key])