def main(): gv.ranseed(4) x = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]) y_samples = [ [2.8409, 4.8393, 6.8403, 8.8377, 10.8356, 12.8389, 14.8356, 16.8362, 18.8351, 20.8341], [2.8639, 4.8612, 6.8597, 8.8559, 10.8537, 12.8525, 14.8498, 16.8487, 18.8460, 20.8447], [3.1048, 5.1072, 7.1071, 9.1076, 11.1090, 13.1107, 15.1113, 17.1134, 19.1145, 21.1163], [3.0710, 5.0696, 7.0708, 9.0705, 11.0694, 13.0681, 15.0693, 17.0695, 19.0667, 21.0678], [3.0241, 5.0223, 7.0198, 9.0204, 11.0191, 13.0193, 15.0198, 17.0163, 19.0154, 21.0155], [2.9719, 4.9700, 6.9709, 8.9706, 10.9707, 12.9705, 14.9699, 16.9686, 18.9676, 20.9686], [3.0688, 5.0709, 7.0724, 9.0730, 11.0749, 13.0776, 15.0790, 17.0800, 19.0794, 21.0795], [3.1471, 5.1468, 7.1452, 9.1451, 11.1429, 13.1445, 15.1450, 17.1435, 19.1425, 21.1432], [3.0233, 5.0233, 7.0225, 9.0224, 11.0225, 13.0216, 15.0224, 17.0217, 19.0208, 21.0222], [2.8797, 4.8792, 6.8803, 8.8794, 10.8800, 12.8797, 14.8801, 16.8797, 18.8803, 20.8812], [3.0388, 5.0407, 7.0409, 9.0439, 11.0443, 13.0459, 15.0455, 17.0479, 19.0493, 21.0505], [3.1353, 5.1368, 7.1376, 9.1367, 11.1360, 13.1377, 15.1369, 17.1400, 19.1384, 21.1396], [3.0051, 5.0063, 7.0022, 9.0052, 11.0040, 13.0033, 15.0007, 16.9989, 18.9994, 20.9995], ] y = gv.dataset.avg_data(y_samples) svd = gv.dataset.svd_diagnosis(y_samples) y = gv.svd(y, svdcut=svd.svdcut) if SHOW_PLOTS: svd.plot_ratio(show=True) def fcn(p): return p['y0'] + p['s'] * x prior = gv.gvar(dict(y0='0(5)', s='0(5)')) fit = lsqfit.nonlinear_fit(data=y, fcn=fcn, prior=prior) print(fit)
def main(): gv.ranseed([2009,2010,2011,2012,2013]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout for nexp in range(3,6): prior = make_prior(nexp,x) fit = lsqfit.nonlinear_fit(data=y,fcn=f,prior=prior,p0=p0) # ,svdcut=SVDCUT) if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) fit.check_roundoff() if nexp == 4: sys.stdout = tee.tee(sys.stdout,open("eg2.out","w")) print '************************************* nexp =',nexp print fit # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] sys.stdout = sys_stdout print # if DO_BOOTSTRAP: Nbs = 10 # number of bootstrap copies outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1]/E[0]) # accumulate results outputs['E2/E0'].append(E[2]/E[0]) outputs['a1/a0'].append(a[1]/a[0]) outputs['a2/a0'].append(a[2]/a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k])) print 'Bootstrap results:' print 'E1/E0 =',outputs['E1/E0'],' E2/E1 =',outputs['E2/E0'] print 'a1/a0 =',outputs['a1/a0'],' a2/a0 =',outputs['a2/a0'] print 'E1 =',outputs['E1'],' a1 =',outputs['a1'] if DO_PLOT: print fit.format(100) # print the fit results import pylab as pp from gvar import mean,sdev fity = f(x,fit.pmean) ratio = y/fity pp.xlim(0,21) pp.xlabel('x') pp.ylabel('y/f(x,p)') pp.errorbar(x=gv.mean(x),y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob') pp.plot([0.0,21.0],[1.0,1.0]) pp.show()
def main(): gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) for nexp in range(3,8): print('************************************* nexp =',nexp) prior = make_prior(nexp) # eps = gv.gvar(1,1e-300) # use svdcut to make it independent # prior['a'] *= eps # y *= eps fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior, p0=p0,svdcut=SVDCUT) print(fit) # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print('E1/E0 =',(E[1]/E[0]).fmt(),' E2/E0 =',(E[2]/E[0]).fmt()) print('a1/a0 =',(a[1]/a[0]).fmt(),' a2/a0 =',(a[2]/a[0]).fmt()) print() if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) if DO_BOOTSTRAP: Nbs = 10 # number of bootstrap copies outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1]/E[0]) # accumulate results outputs['E2/E0'].append(E[2]/E[0]) outputs['a1/a0'].append(a[1]/a[0]) outputs['a2/a0'].append(a[2]/a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.dataset.avg_data(outputs[k],bstrap=True).fmt(3) # gv.gvar(np.mean(outputs[k]), # np.std(outputs[k])).fmt(3) print('Bootstrap results:') print('E1/E0 =',outputs['E1/E0'],' E2/E0 =',outputs['E2/E0']) print('a1/a0 =',outputs['a1/a0'],' a2/a0 =',outputs['a2/a0']) print('E1 =',outputs['E1'],' a1 =',outputs['a1']) if DO_PLOT: print(fit.format(100)) # print the fit results import pylab as plt ratio = y/f(x,fit.pmean) plt.xlim(0,21) plt.xlabel('x') plt.ylabel('y/f(x,p)') plt.errorbar(x=x,y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob') plt.plot([0.0,21.0],[1.0,1.0]) plt.show()
def main(): gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) for nexp in range(3,8): print('************************************* nexp =',nexp) prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0,svdcut=SVDCUT) print(fit) # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print('E1/E0 =',(E[1]/E[0]).fmt(),' E2/E0 =',(E[2]/E[0]).fmt()) print('a1/a0 =',(a[1]/a[0]).fmt(),' a2/a0 =',(a[2]/a[0]).fmt()) print() if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) if DO_ERRORBUDGET: outputs = OrderedDict([ ('E1/E0', E[1]/E[0]), ('E2/E0', E[2]/E[0]), ('a1/a0', a[1]/a[0]), ('a2/a0', a[2]/a[0]) ]) inputs = OrderedDict([ ('E', fit.prior['E']), ('a', fit.prior['a']), ('y', y), ('svd', fit.svdcorrection) ]) print(fit.fmt_values(outputs)) print(fit.fmt_errorbudget(outputs,inputs)) if DO_EMPBAYES: def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0): z = gv.exp(z) prior['a'] = [gv.gvar(0.5,0.5*z[0]) for i in range(nexp)] return dict(prior=prior,data=data,fcn=f,p0=p0) ## z0 = [0.0] fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3) print(fit) # print the optimized fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print('E1/E0 =',(E[1]/E[0]).fmt(),' E2/E0 =',(E[2]/E[0]).fmt()) print('a1/a0 =',(a[1]/a[0]).fmt(),' a2/a0 =',(a[2]/a[0]).fmt()) print("prior['a'] =",fit.prior['a'][0].fmt()) print() if DO_PLOT: import pylab as pp from gvar import mean,sdev fity = f(x,fit.pmean) ratio = y/fity pp.xlim(0,21) pp.xlabel('x') pp.ylabel('y/f(x,p)') pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob') pp.plot([0.0,21.0],[1.0,1.0]) pp.show()
def fcn2(z): M = np.asarray(z[:-ndim]) M.shape = ndim,ndim t = np.asarray(z[-ndim:]) aff = AffineFunction(M, t, region, fcn) @vegas.batchintegrand def aff2(x): return aff(x) ** 2 gv.ranseed(1) ans = vegas.Integrator(region)(aff2, max_nhcube=1, nitn=1, neval=neval) # print '***', ans return ans.mean + invdet_fac / abs(np.linalg.det(M)) # 1000. * (1. - np.linalg.det(M)) ** 2
def main(): # data x = np.array([ 0.2, 0.4, 0.6, 0.8, 1., 1.2, 1.4, 1.6, 1.8, 2., 2.2, 2.4, 2.6, 2.8, 3., 3.2, 3.4, 3.6, 3.8 ]) y = gv.gvar([ '0.38(20)', '2.89(20)', '0.85(20)', '0.59(20)', '2.88(20)', '1.44(20)', '0.73(20)', '1.23(20)', '1.68(20)', '1.36(20)', '1.51(20)', '1.73(20)', '2.16(20)', '1.85(20)', '2.00(20)', '2.11(20)', '2.75(20)', '0.86(20)', '2.73(20)' ]) data = Data(x, y) f = F(data) prep_plot(plt, data) if ONE_W: nstrat = [20, 20, 2] nitn_w = 6 nitn_r = 9 ranseed = gv.ranseed(12345) else: nstrat = [100, 100] + len(x) * [1] nitn_w = 16 nitn_r = 8 ranseed = gv.ranseed(1) print('ranseed = %d' % ranseed) itg = vegas.Integrator( [(-5, 5), (-5, 5)] + (1 if ONE_W else len(x)) * [(0, 1)], ) w = itg(f, nstrat=nstrat, nitn=nitn_w) nsample = w.sum_neval print(w.summary()) r = itg(f, nstrat=nstrat, nitn=nitn_r) nsample += r.sum_neval print(r.summary()) print('neval_tot =', nsample, ' nstrat =', np.array(itg.nstrat)) print( 'last neval =', itg.last_neval, ' r.sum_neval =', r.sum_neval, ' range =', list(itg.neval_hcube_range)) print('ninc =', itg.map.ninc, '\n') p = r['p'] / r['norm'] covp = r['p*p'] / r['norm'] - np.outer(p, p) w = r['w'] / r['norm'] sigw = np.sqrt(r['w*w'] / r['norm'] - w ** 2) print('p =', p, ' w =', w) plot_fit(plt, gv.gvar(gv.mean(p), gv.mean(covp)), data, 'b:', color='b', alpha=0.5) print('sigp =', np.diagonal(covp) ** 0.5) print('corr(p0,p1) =', (covp / np.outer(np.diagonal(covp) ** 0.5, np.diagonal(covp)** 0.5))[0,1]) print('cov(p,p):\n', covp) print('sigw =', sigw) print() save_plot(plt)
def fcn2(z): M = np.asarray(z[:-ndim]) M.shape = ndim, ndim t = np.asarray(z[-ndim:]) aff = AffineFunction(M, t, region, fcn) @vegas.batchintegrand def aff2(x): return aff(x)**2 gv.ranseed(1) ans = vegas.Integrator(region)(aff2, max_nhcube=1, nitn=1, neval=neval) # print '***', ans return ans.mean + invdet_fac / abs( np.linalg.det(M)) # 1000. * (1. - np.linalg.det(M)) ** 2
def main(): print( gv.ranseed( (2050203335594632366, 8881439510219835677, 2605204918634240925))) log_stdout('eg3a.out') integ = vegas.Integrator(4 * [[0, 1]]) # adapt grid training = integ(f(), nitn=10, neval=1000) # evaluate multi-integrands result = integ(f(), nitn=10, neval=5000) print('I[0] =', result[0], ' I[1] =', result[1], ' I[2] =', result[2]) print('Q = %.2f\n' % result.Q) print('<x> =', result[1] / result[0]) print('sigma_x**2 = <x**2> - <x>**2 =', result[2] / result[0] - (result[1] / result[0])**2) print('\ncorrelation matrix:\n', gv.evalcorr(result)) unlog_stdout() r = gv.gvar(gv.mean(result), gv.sdev(result)) print(r[1] / r[0]) print((r[1] / r[0]).sdev / (result[1] / result[0]).sdev) print(r[2] / r[0] - (r[1] / r[0])**2) print(result.summary())
def test_ravgarray_unwgtd(self): " unweighted RAvgArray " if not have_gvar: return gv.ranseed((1, 2)) mean = np.random.uniform(-10., 10., (2, )) cov = np.array([[1., 0.5], [0.5, 2.]]) / 10. N = 30 x = gv.gvar(mean, cov) r = gv.raniter(x, N) ravg = RAvgArray(2, weighted=False) for ri in r: ravg.add(gv.gvar(ri, cov)) np_assert_allclose(gv.evalcov(ravg), cov / N) for i in range(2): self.assertLess(abs(mean[i] - ravg[i].mean), 5 * ravg[i].sdev) self.assertEqual(ravg.dof, 2 * N - 2) self.assertGreater(ravg.Q, 1e-3)
def bs_fitting(fit_, nbs_, seed_=0): print('\nBootstrap Analysis with n_boot %d ...' % nbs_) st = time.time() gv.ranseed(seed_) n_boot = nbs_ bs_res = {} for key in fit_.p: bs_res[key] = [] for bs_fit in fit_.bootstrap_iter(n_boot): p = bs_fit.pmean print(p) for key in fit_.p: bs_res[key].append(p[key]) for key in fit_.p: bs_res[key] = np.array(bs_res[key]) print(key + ' =', gv.gvar(get_p68_mean_and_error(bs_res[key]))) ed = time.time() print('Bootstrap Analysis done, %4.2f s used.' % (ed - st)) return bs_res
def main(): print(gv.ranseed((1814855126, 100213625, 262796317))) log_stdout('eg3a.out') integ = vegas.Integrator(4 * [[0, 1]]) # adapt grid training = integ(f(), nitn=10, neval=2000) # evaluate multi-integrands result = integ(f(), nitn=10, neval=10000) print('I[0] =', result[0], ' I[1] =', result[1], ' I[2] =', result[2]) print('Q = %.2f\n' % result.Q) print('<x> =', result[1] / result[0]) print('sigma_x**2 = <x**2> - <x>**2 =', result[2] / result[0] - (result[1] / result[0])**2) print('\ncorrelation matrix:\n', gv.evalcorr(result)) unlog_stdout() r = gv.gvar(gv.mean(result), gv.sdev(result)) print(r[1] / r[0]) print((r[1] / r[0]).sdev / (result[1] / result[0]).sdev) print(r[2] / r[0] - (r[1] / r[0])**2) print((r[2] / r[0] - (r[1] / r[0])**2).sdev / (result[2] / result[0] - (result[1] / result[0])**2).sdev) print(result.summary()) # do it again for a dictionary print(gv.ranseed((1814855126, 100213625, 262796317))) integ = vegas.Integrator(4 * [[0, 1]]) # adapt grid training = integ(f(), nitn=10, neval=2000) # evaluate the integrals result = integ(fdict(), nitn=10, neval=10000) log_stdout('eg3b.out') print(result) print('Q = %.2f\n' % result.Q) print('<x> =', result['x'] / result['1']) print('sigma_x**2 = <x**2> - <x>**2 =', result['x**2'] / result['1'] - (result['x'] / result['1'])**2) unlog_stdout()
def test_fit(fitter, datafile): """ Test the fit with simulated data """ gv.ranseed((623738625, 435880512, 1745400596)) print('\nRandom seed:', gv.ranseed.seed) dataset = read_dataset(datafile) pexact = fitter.fit.pmean prior = fitter.fit.prior for sdata in fitter.simulated_data_iter(n=2, dataset=dataset, pexact=pexact): print('\n============================== simulation') sfit = fitter.lsqfit(data=sdata, prior=prior, p0=pexact, nterm=(2, 2)) diff = [] # check chi**2 for leading parameters for k in prior: diff.append(sfit.p[k].flat[0] - pexact[k].flat[0]) chi2_diff = gv.chi2(diff) print( 'Leading parameter chi2/dof [dof] = %.2f' % (chi2_diff / chi2_diff.dof), '[%d]' % chi2_diff.dof, ' Q = %.1f' % chi2_diff.Q )
def test_fit(fitter, datafile): """ Test the fit with simulated data """ gv.ranseed((1487942813, 775399747, 906327435)) print('\nRandom seed:', gv.ranseed.seed) dataset = cf.read_dataset(datafile) pexact = fitter.fit.pmean prior = fitter.fit.prior for sdata in fitter.simulated_data_iter(n=2, dataset=dataset, pexact=pexact): print('\n============================== simulation') sfit = fitter.lsqfit(data=sdata, prior=prior, p0=pexact) diff = [] # check chi**2 for leading parameters for k in prior: diff.append(sfit.p[k].flat[0] - pexact[k].flat[0]) chi2diff = gv.chi2(diff) print( 'Leading parameter chi2/dof [dof] = %.2f' % (chi2diff / chi2diff.dof), '[%d]' % chi2diff.dof, ' Q = %.1f' % chi2diff.Q )
def setUp(self): gv.ranseed(1) a = gv.gvar('1.000(1)') b = gv.gvar('0.500(1)') self.x = np.array([0.1, 0.2, 0.3, 0.4]) def fcn(p, x=self.x): ans = gv.BufferDict() ans['l'] = p['a'] + p['b'] * x ans['c1'] = 4 * [p['a']] ans['c2'] = 4 * [p['a']] return ans self.prior = gv.BufferDict([('a', a), ('b', b)]) self.data = gv.make_fake_data(fcn(self.prior)) self.fcn = fcn # reference fit without using MultiFitter self.ref_fit = lsqfit.nonlinear_fit(prior=self.prior, fcn=self.fcn, data=self.data) # these data should be ignored self.data['dummy'] = gv.gvar(['1(1)', '2(2)'])
def main(): sys_stdout = sys.stdout sys.stdout = tee.tee(sys.stdout, open("eg3a.out","w")) x, y = make_data() prior = make_prior() fit = lsqfit.nonlinear_fit(prior=prior, data=(x,y), fcn=fcn) print fit print 'p1/p0 =', fit.p[1] / fit.p[0], ' p3/p2 =', fit.p[3] / fit.p[2] print 'corr(p0,p1) =', gv.evalcorr(fit.p[:2])[1,0] if DO_PLOT: plt.semilogx() plt.errorbar( x=gv.mean(x), xerr=gv.sdev(x), y=gv.mean(y), yerr=gv.sdev(y), fmt='ob' ) # plot fit line xx = np.linspace(0.99 * gv.mean(min(x)), 1.01 * gv.mean(max(x)), 100) yy = fcn(xx, fit.pmean) plt.xlabel('x') plt.ylabel('y') plt.plot(xx, yy, ':r') plt.savefig('eg3.png', bbox_inches='tight') plt.show() sys.stdout = sys_stdout if DO_BOOTSTRAP: gv.ranseed(123) sys.stdout = tee.tee(sys_stdout, open('eg3c.out', 'w')) print fit print 'p1/p0 =', fit.p[1] / fit.p[0], ' p3/p2 =', fit.p[3] / fit.p[2] print 'corr(p0,p1) =', gv.evalcorr(fit.p[:2])[1,0] Nbs = 40 outputs = {'p':[], 'p1/p0':[], 'p3/p2':[]} for bsfit in fit.bootstrap_iter(n=Nbs): p = bsfit.pmean outputs['p'].append(p) outputs['p1/p0'].append(p[1] / p[0]) outputs['p3/p2'].append(p[3] / p[2]) print '\nBootstrap Averages:' outputs = gv.dataset.avg_data(outputs, bstrap=True) print gv.tabulate(outputs) print 'corr(p0,p1) =', gv.evalcorr(outputs['p'][:2])[1,0] # make histograms of p1/p0 and p3/p2 sys.stdout = sys_stdout print sys.stdout = tee.tee(sys_stdout, open('eg3d.out', 'w')) print 'Histogram Analysis:' count = {'p1/p0':[], 'p3/p2':[]} hist = { 'p1/p0':gv.PDFHistogram(fit.p[1] / fit.p[0]), 'p3/p2':gv.PDFHistogram(fit.p[3] / fit.p[2]), } for bsfit in fit.bootstrap_iter(n=1000): p = bsfit.pmean count['p1/p0'].append(hist['p1/p0'].count(p[1] / p[0])) count['p3/p2'].append(hist['p3/p2'].count(p[3] / p[2])) count = gv.dataset.avg_data(count) plt.rcParams['figure.figsize'] = [6.4, 2.4] pltnum = 1 for k in count: print k + ':' print hist[k].analyze(count[k]).stats plt.subplot(1, 2, pltnum) plt.xlabel(k) hist[k].make_plot(count[k], plot=plt) if pltnum == 2: plt.ylabel('') pltnum += 1 plt.rcParams['figure.figsize'] = [6.4, 4.8] plt.savefig('eg3d.png', bbox_inches='tight') plt.show() if DO_BAYESIAN: gv.ranseed(123) sys.stdout = tee.tee(sys_stdout, open('eg3e.out', 'w')) print fit expval = lsqfit.BayesIntegrator(fit) # adapt integrator to PDF from fit neval = 1000 nitn = 10 expval(neval=neval, nitn=nitn) # <g(p)> gives mean and covariance matrix, and histograms hist = [ gv.PDFHistogram(fit.p[0]), gv.PDFHistogram(fit.p[1]), gv.PDFHistogram(fit.p[2]), gv.PDFHistogram(fit.p[3]), ] def g(p): return dict( mean=p, outer=np.outer(p, p), count=[ hist[0].count(p[0]), hist[1].count(p[1]), hist[2].count(p[2]), hist[3].count(p[3]), ], ) # evaluate expectation value of g(p) results = expval(g, neval=neval, nitn=nitn, adapt=False) # analyze results print('\nIterations:') print(results.summary()) print('Integration Results:') pmean = results['mean'] pcov = results['outer'] - np.outer(pmean, pmean) print ' mean(p) =', pmean print ' cov(p) =\n', pcov # create GVars from results p = gv.gvar(gv.mean(pmean), gv.mean(pcov)) print('\nBayesian Parameters:') print(gv.tabulate(p)) # show histograms print('\nHistogram Statistics:') count = results['count'] for i in range(4): print('p[{}] -'.format(i)) print(hist[i].analyze(count[i]).stats) plt.subplot(2, 2, i + 1) plt.xlabel('p[{}]'.format(i)) hist[i].make_plot(count[i], plot=plt) if i % 2 != 0: plt.ylabel('') plt.savefig('eg3e.png', bbox_inches='tight') plt.show() if DO_SIMULATION: gv.ranseed(1234) sys.stdout = tee.tee(sys_stdout, open('eg3f.out', 'w')) print(40 * '*' + ' real fit') print(fit.format(True)) Q = [] p = [] for sfit in fit.simulated_fit_iter(n=3, add_priornoise=False): print(40 * '=' + ' simulation') print(sfit.format(True)) diff = sfit.p - sfit.pexact print '\nsfit.p - pexact =', diff print(gv.fmt_chi2(gv.chi2(diff))) print # omit constraint sys.stdout = tee.tee(sys_stdout, open("eg3b.out", "w")) prior = gv.gvar(4 * ['0(1)']) prior[1] = gv.gvar('0(20)') fit = lsqfit.nonlinear_fit(prior=prior, data=(x,y), fcn=fcn) print fit print 'p1/p0 =', fit.p[1] / fit.p[0], ' p3/p2 =', fit.p[3] / fit.p[2] print 'corr(p0,p1) =', gv.evalcorr(fit.p[:2])[1,0]
Y = gv.gvar(mean, covariance) X = np.array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17. ]) def f(x, p): a = p['a'] b = p['b'] c = p['c'] return a * x**(-b) * np.exp(-c * x) def make_prior(): prior = {} prior['a'] = gv.gvar(5, 100) prior['c'] = gv.gvar(0, 5.0) prior['b'] = gv.gvar(0.7, 0.3) return prior if __name__ == '__main__': gv.ranseed([2009, 2010, 2011, 2012]) p0 = None prior = make_prior() fit = lsqfit.nonlinear_fit(data=(X, Y), fcn=f, prior=prior, p0=p0) print(fit) print((fit.format(maxline=True)))
def main(): gd.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) for nexp in range(2,8): if nexp == 2: sys_stdout = sys.stdout sys.stdout = tee.tee(sys_stdout, open("eg4GBF.out","w")) print '************************************* nexp =',nexp prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0) print fit # print the fit results # E = fit.p['E'] # best-fit parameters # a = fit.p['a'] # print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] # print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] print if nexp == 3: sys.stdout = sys_stdout if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) if DO_ERRORBUDGET: print E[1]/E[0] print (E[1]/E[0]).partialsdev(fit.prior['E']) print (E[1]/E[0]).partialsdev(fit.prior['a']) print (E[1]/E[0]).partialsdev(y) outputs = {'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0], 'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0]} inputs = {'E':fit.prior['E'],'a':fit.prior['a'],'y':y} sys.stdout = tee.tee(sys_stdout, open("eg4GBFb.out","w")) print fit.fmt_values(outputs) print fit.fmt_errorbudget(outputs,inputs) sys.stdout = sys_stdout if DO_EMPBAYES: def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0): z = gd.exp(z) prior['a'] = [gd.gvar(0.5,0.5*z[0]) for i in range(nexp)] return dict(prior=prior,data=data,fcn=f,p0=p0) ## z0 = [0.0] fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3) sys.stdout = tee.tee(sys_stdout, open("eg4GBFa.out","w")) print fit # print the optimized fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] print "prior['a'] =",fit.prior['a'][0] sys.stdout = sys_stdout print if DO_PLOT: import pylab as pp from gvar import mean,sdev fity = f(x,fit.pmean) ratio = y/fity pp.xlim(0,21) pp.xlabel('x') pp.ylabel('y/f(x,p)') pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob') pp.plot([0.0,21.0],[1.0,1.0]) pp.show()
def main(): x, y = make_data() # make fit data # y = gv.gvar(gv.mean(y), 0.75**2 * gv.evalcov(y)) p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout sys.stdout = tee.tee(sys.stdout, open("eg1.out", "w")) for nexp in range(1, 7): prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=p0) if fit.chi2 / fit.dof < 1.: p0 = fit.pmean # starting point for next fit (opt.) print '************************************* nexp =', nexp print fit.format() # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] if nexp > 2: print 'E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0] print 'a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0] print # error budget outputs = { 'E1/E0': E[1] / E[0], 'E2/E0': E[2] / E[0], 'a1/a0': a[1] / a[0], 'a2/a0': a[2] / a[0] } inputs = {'E': fit.prior['E'], 'a': fit.prior['a'], 'y': y} inputs = collections.OrderedDict() inputs['a'] = fit.prior['a'] inputs['E'] = fit.prior['E'] inputs['y'] = fit.data[1] print '================= Error Budget Analysis' print fit.fmt_values(outputs) print fit.fmt_errorbudget(outputs, inputs) sys.stdout = sys_stdout # print(gv.gvar(str(a[1])) / gv.gvar(str(a[0])) ) # print(gv.evalcorr([fit.p['a'][1], fit.p['E'][1]])) # print(fit.format(True)) # redo fit with 4 parameters since that is enough prior = make_prior(4) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=fit.pmean) sys.stdout = tee.tee(sys_stdout, open("eg1a.out", "w")) print '--------------------- original fit' print fit.format() E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0] print 'a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0] print # extra data 1 print '\n--------------------- new fit to extra information' def ratio(p): return p['a'][1] / p['a'][0] newfit = lsqfit.nonlinear_fit(data=gv.gvar(1, 1e-5), fcn=ratio, prior=fit.p) print(newfit.format()) E = newfit.p['E'] a = newfit.p['a'] print 'E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0] print 'a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0] if DO_PLOT: import matplotlib.pyplot as plt ratio = y / fit.fcn(x, fit.pmean) plt.xlim(4, 15) plt.ylim(0.95, 1.05) plt.xlabel('x') plt.ylabel('y / f(x,p)') plt.yticks([0.96, 0.98, 1.00, 1.02, 1.04], ['0.96', '0.98', '1.00', '1.02', '1.04']) plt.errorbar(x=x, y=gv.mean(ratio), yerr=gv.sdev(ratio), fmt='ob') plt.plot([4.0, 21.0], [1.0, 1.0], 'b:') plt.savefig('eg1.png', bbox_inches='tight') plt.show() # alternate method for extra data sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w")) fit.p['a1/a0'] = fit.p['a'][1] / fit.p['a'][0] new_data = {'a1/a0': gv.gvar(1, 1e-5)} new_p = lsqfit.wavg([fit.p, new_data]) print 'chi2/dof = %.2f\n' % (new_p.chi2 / new_p.dof) print 'E:', new_p['E'][:4] print 'a:', new_p['a'][:4] print 'a1/a0:', new_p['a1/a0'] if DO_BAYES: # Bayesian Fit gv.ranseed([123]) prior = make_prior(4) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=f, prior=prior, p0=fit.pmean) sys.stdout = tee.tee(sys_stdout, open("eg1c.out", "w")) # print fit expval = lsqfit.BayesIntegrator(fit, limit=10.) # adapt integrator to PDF expval(neval=40000, nitn=10) # calculate expectation value of function g(p) fit_hist = gv.PDFHistogram(fit.p['E'][0]) def g(p): parameters = [p['a'][0], p['E'][0]] return dict( mean=parameters, outer=np.outer(parameters, parameters), hist=fit_hist.count(p['E'][0]), ) r = expval(g, neval=40000, nitn=10, adapt=False) # print results print r.summary() means = r['mean'] cov = r['outer'] - np.outer(r['mean'], r['mean']) print 'Results from Bayesian Integration:' print 'a0: mean =', means[0], ' sdev =', cov[0, 0]**0.5 print 'E0: mean =', means[1], ' sdev =', cov[1, 1]**0.5 print 'covariance from Bayesian integral =', np.array2string( cov, prefix=36 * ' ') print print 'Results from Least-Squares Fit:' print 'a0: mean =', fit.p['a'][0].mean, ' sdev =', fit.p['a'][0].sdev print 'E0: mean =', fit.p['E'][0].mean, ' sdev =', fit.p['E'][0].sdev print 'covariance from least-squares fit =', np.array2string( gv.evalcov([fit.p['a'][0], fit.p['E'][0]]), prefix=36 * ' ', precision=3) sys.stdout = sys_stdout # make histogram of E[0] probabilty plt = fit_hist.make_plot(r['hist']) plt.xlabel('$E_0$') plt.ylabel('probability') plt.savefig('eg1c.png', bbox_inches='tight') # plt.show() if DO_BOOTSTRAP: Nbs = 40 # number of bootstrap copies outputs = { 'E1/E0': [], 'E2/E0': [], 'a1/a0': [], 'a2/a0': [], 'E1': [], 'a1': [] } # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1] / E[0]) # accumulate results outputs['E2/E0'].append(E[2] / E[0]) outputs['a1/a0'].append(a[1] / a[0]) outputs['a2/a0'].append(a[2] / a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]), np.std(outputs[k])) print 'Bootstrap results:' print 'E1/E0 =', outputs['E1/E0'], ' E2/E1 =', outputs['E2/E0'] print 'a1/a0 =', outputs['a1/a0'], ' a2/a0 =', outputs['a2/a0'] print 'E1 =', outputs['E1'], ' a1 =', outputs['a1']
def main(): dset = cf.read_dataset('Ds-Ds.h5') s = gv.dataset.svd_diagnosis(dset, models=make_models()) print('svdcut =', s.svdcut) s.plot_ratio(show=True) import importlib import sys if sys.version_info > (2, ): make_models = importlib.import_module('Ds-Ds').make_models else: make_models = importlib.__import__('Ds-Ds').make_models if __name__ == '__main__': gv.ranseed(123456) main() # if True: # main() # else: # import cProfile, pstats, StringIO # pr = cProfile.Profile() # pr.enable() # main() # pr.disable() # s = StringIO.StringIO() # sortby = 'tottime' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # print (s.getvalue())
gvar.ode.Integrator to integrate the pendulum's equation of motion d/dt d/dt theta(t) = - (g/l) sin(theta(t)), and gvar.root.refine to find the period. The uncertainties in l and theta(0) are propagated through the integration and root-finding algorithms by gvar. The impact on the time-keeping abilities of the clock housing this pendulum are also examined. """ from __future__ import print_function # makes this work for python2 and 3 import gvar as gv import numpy as np gv.ranseed((1,2,4)) # gives reproducible random numbers def main(): l = gv.gvar(0.25, 0.0005) # length of pendulum theta_max = gv.gvar(np.pi / 6, 0.025) # max angle of swing y = make_pendulum(theta_max, l) # y(t) = [theta(t), d/dt theta(t)] T = find_period(y, Tapprox=1.0) print('period T = {} sec'.format(T)) fmt = 'uncertainty = {:.2f} min/day\n' print(fmt.format((T.sdev / T.mean) * 60. * 24.)) # error budget for T inputs = gv.BufferDict() # dict(l=l, theta_max=theta_max) inputs['l'] = l inputs['theta_max'] = theta_max outputs = {'T':T}
prior_pdf = self.gaussian_pdf(p.buf[:len(self.prior.buf)] - self.prior.buf) return np.prod((1. - w) * data_pdf1 + w * data_pdf2) * np.prod(prior_pdf) @staticmethod def gaussian_pdf(x, f=1.): xmean = gv.mean(x) xvar = gv.var(x) * f**2 return gv.exp(-xmean**2 / 2. / xvar) / gv.sqrt(2 * np.pi * xvar) def fitfcn(x, p): c = p['c'] return c[0] + c[1] * x #** c[2] def make_prior(): prior = gv.BufferDict(c=gv.gvar(['0(5)', '0(5)'])) if LSQFIT_ONLY: return prior if MULTI_W: prior['unif(w)'] = gv.BufferDict.uniform('unif', 0., 1., shape=19) else: prior['unif(w)'] = gv.BufferDict.uniform('unif', 0., 1.) return prior if __name__ == '__main__': gv.ranseed([12345]) main()
def main(): gd.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) max_prior = make_prior(20) # maximum sized prior p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout if USE_SVD: sys.stdout = tee.tee(sys_stdout,open("eg5a.out","w")) for nexp in range(1,5): print '************************************* nexp =',nexp fit_prior = lsqfit.GPrior() # prior used in fit ymod_prior = lsqfit.GPrior() # part of max_prior absorbed in ymod for k in max_prior: fit_prior[k] = max_prior[k][:nexp] ymod_prior[k] = max_prior[k][nexp:] x,y = make_data(ymod_prior) # make fit data fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=fit_prior,p0=p0,svdcut=SVDCUT,maxit=10000) if nexp==4 and not USE_SVD: sys.stdout = tee.tee(sys_stdout, open("eg5b.out", "w")) print fit.format(100) # print the fit results # if nexp>3: # E = fit.p['E'] # best-fit parameters # a = fit.p['a'] # print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] # print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] # E = fit.palt['E'] # best-fit parameters # a = fit.palt['a'] # print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] # print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] print if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',(E[1]/E[0]).fmt(),' E2/E0 =',(E[2]/E[0]).fmt() print 'a1/a0 =',(a[1]/a[0]).fmt(),' a2/a0 =',(a[2]/a[0]).fmt() sys.stdout = sys_stdout if DO_ERRORBUDGET: if USE_SVD: sys.stdout = tee.tee(sys_stdout,open("eg5d.out","w")) outputs = {'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0], 'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0]} inputs = {'E':max_prior['E'],'a':max_prior['a'],'svd':fit.svdcorrection} print fit.fmt_values(outputs) print fit.fmt_errorbudget(outputs,inputs) sys.stdout = sys_stdout outputs = {''} if DO_BOOTSTRAP: Nbs = 40 # number of bootstrap copies outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1]/E[0]) # accumulate results outputs['E2/E0'].append(E[2]/E[0]) outputs['a1/a0'].append(a[1]/a[0]) outputs['a2/a0'].append(a[2]/a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit # extract means and "standard deviations" from the bootstrap output outputs = gd.dataset.avg_data(outputs,bstrap=True) # for k in outputs: # outputs[k] = gd.gvar(np.mean(outputs[k]),np.std(outputs[k])) if USE_SVD: sys.stdout = tee.tee(sys_stdout,open("eg5e.out","w")) print 'Bootstrap results:' print 'E1/E0 =',outputs['E1/E0'].fmt(),' E2/E1 =',outputs['E2/E0'].fmt() print 'a1/a0 =',outputs['a1/a0'].fmt(),' a2/a0 =',outputs['a2/a0'].fmt() print 'E1 =',outputs['E1'].fmt(),' a1 =',outputs['a1'].fmt()
def main(): gv.ranseed([2009, 2010, 2011, 2012]) # initialize random numbers (opt.) x, y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout for nexp in range(3, 8): prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=f, prior=prior, p0=p0, svdcut=1e-15) # ,svdcut=SVDCUT) if fit.chi2 / fit.dof < 1.0: p0 = fit.pmean # starting point for next fit (opt.) if nexp == 5: sys.stdout = tee.tee(sys_stdout, open("eg3.out", "w")) print "************************************* nexp =", nexp print fit # print the fit results E = fit.p["E"] # best-fit parameters a = fit.p["a"] print "E1/E0 =", E[1] / E[0], " E2/E0 =", E[2] / E[0] print "a1/a0 =", a[1] / a[0], " a2/a0 =", a[2] / a[0] # print E[1]-E[0], E[-1]-E[-2] # print (E[1]/E[0]).partialsdev(fit.prior['E']) # print (E[1]/E[0]).partialsdev(fit.prior['a']) # print (E[1]/E[0]).partialsdev(fit.y) sys.stdout = sys_stdout print # sys.stdout = tee.tee(sys_stdout, open("eg3a.out", "w")) # for i in range(1): # print '--------------------- fit with %d extra data sets' % (i+1) # x, y = make_data(1) # prior = fit.p # fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f1,prior=prior, svdcut=SVDCUT) # print fit sys.stdout = sys_stdout if DO_BOOTSTRAP: Nbs = 10 # number of bootstrap copies outputs = {"E1/E0": [], "E2/E0": [], "a1/a0": [], "a2/a0": [], "E1": [], "a1": []} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean["E"] # best-fit parameters a = bsfit.pmean["a"] outputs["E1/E0"].append(E[1] / E[0]) # accumulate results outputs["E2/E0"].append(E[2] / E[0]) outputs["a1/a0"].append(a[1] / a[0]) outputs["a2/a0"].append(a[2] / a[0]) outputs["E1"].append(E[1]) outputs["a1"].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]), np.std(outputs[k])) print "Bootstrap results:" print "E1/E0 =", outputs["E1/E0"], " E2/E1 =", outputs["E2/E0"] print "a1/a0 =", outputs["a1/a0"], " a2/a0 =", outputs["a2/a0"] print "E1 =", outputs["E1"], " a1 =", outputs["a1"] if DO_PLOT: print fit.format(100) # print the fit results import pylab as pp from gvar import mean, sdev fity = f(x, fit.pmean) ratio = y / fity pp.xlim(0, 21) pp.xlabel("x") pp.ylabel("y/f(x,p)") pp.errorbar(x=x, y=mean(ratio), yerr=sdev(ratio), fmt="ob") pp.plot([0.0, 21.0], [1.0, 1.0]) pp.show()
else: sdi = sd[i] nextfield = avgfmt(av[i],sdi) if (len(nextfield)+len(line))>78: lines = lines + line + '\n' line = ''.ljust(15) + nextfield else: line = line + nextfield table.append(lines + line +'\n') return '\n'.join(table) ## if __name__ == '__main__': import gvar gvar.ranseed((1950,1)) r1 = gvar.gvar(8.,1.) r2 = gvar.gvar([-10.,-9.],[2.,3.]) r3 = gvar.gvar([[0.,1.],[2.,3.]],[[1.,2.],[3.,4.]]) r3_iter = gvar.raniter(r3) r2_iter = gvar.raniter(r2) N = 1001 d = Dataset(bstrap=False) for x in range(N): d.append('x',r3_iter.next()) for x in range(N): d.append('y',r2_iter.next()) d2 = Dataset(bstrap=False) for x in range(N): d2.append('z',r1()) d.copy(d2)
data_pdf2 = self.gaussian_pdf(y_fx, 10.) prior_pdf = self.gaussian_pdf( p.buf[:len(self.prior.buf)] - self.prior.buf ) return np.prod((1. - w) * data_pdf1 + w * data_pdf2) * np.prod(prior_pdf) @staticmethod def gaussian_pdf(x, f=1.): xmean = gv.mean(x) xvar = gv.var(x) * f ** 2 return gv.exp(-xmean ** 2 / 2. /xvar) / gv.sqrt(2 * np.pi * xvar) def fitfcn(x, p): c = p['c'] return c[0] + c[1] * x #** c[2] def make_prior(): prior = gv.BufferDict(c=gv.gvar(['0(5)', '0(5)'])) if LSQFIT_ONLY: return prior if MULTI_W: prior['erfinv(2w-1)'] = gv.gvar(19 * ['0(1)']) / 2 ** 0.5 else: prior['erfinv(2w-1)'] = gv.gvar('0(1)') / 2 ** 0.5 return prior if __name__ == '__main__': gv.ranseed([12345]) main()
gvar.ode.Integrator to integrate the pendulum's equation of motion d/dt d/dt theta(t) = - (g/l) sin(theta(t)), and gvar.root.refine to find the period. The uncertainties in l and theta(0) are propagated through the integration and root-finding algorithms by gvar. The impact on the time-keeping abilities of the clock housing this pendulum are also examined. """ from __future__ import print_function # makes this work for python2 and 3 import gvar as gv import numpy as np gv.ranseed((1, 2, 4)) # gives reproducible random numbers def main(): l = gv.gvar(0.25, 0.0005) # length of pendulum theta_max = gv.gvar(np.pi / 6, 0.025) # max angle of swing y = make_pendulum(theta_max, l) # y(t) = [theta(t), d/dt theta(t)] T = find_period(y, Tapprox=1.0) print('period T = {} sec'.format(T)) fmt = 'uncertainty = {:.2f} min/day\n' print(fmt.format((T.sdev / T.mean) * 60. * 24.)) # error budget for T inputs = gv.BufferDict() # dict(l=l, theta_max=theta_max) inputs['l'] = l inputs['theta_max'] = theta_max
b = p['b'] E = p['E'] return a * x**(-b) * np.exp(-E * x) def make_prior(): prior = {} prior['a'] = gv.gvar(1.0, 1.0) prior['E'] = gv.gvar(4.5, 100.0) prior['b'] = gv.gvar(-10.0, 50.0) return prior if __name__ == '__main__': gv.ranseed([2009, 2010, 2011, 2012]) p0 = None prior = make_prior() #p0 = {'a':0.1,'E':0.5} # ***** # #print f(X,p0) fit = lsqfit.nonlinear_fit(data = (X, Y), fcn=f, prior=prior, p0=p0) print fit print(fit.format(maxline=True))
def test_svd_diagnosis(self): " svd_diagnosis " # random correlated data (10x10 correlation matrix) chebval = np.polynomial.chebyshev.chebval gv.ranseed(1) x = np.linspace(-.9, .9, 10) c = gv.raniter(gv.gvar(len(x) * ['0(1)'])) # small dataset (big svdcut) dset = [] for n in range(15): dset.append(chebval(x, next(c))) gv.ranseed(2) s = gv.dataset.svd_diagnosis(dset) self.assertGreater(s.svdcut, 0.01) # print(s.svdcut) # s.plot_ratio(show=True) # test with dictionary gv.ranseed(2) sd = gv.dataset.svd_diagnosis(dict(a=dset)) self.assertEqual(s.svdcut, sd.svdcut) # large dataset (small or no svdcut) dset = [] for n in range(100): dset.append(chebval(x, next(c))) gv.ranseed(3) s = svd_diagnosis(dset) self.assertGreater(0.01, s.svdcut) # print(s.svdcut) # s.plot_ratio(show=True) # with models (only if lsqfit installed) if lsqfit is None: return class Linear(lsqfit.MultiFitterModel): def __init__(self, datatag, x, intercept, slope): super(Linear, self).__init__(datatag) self.x = np.array(x) self.intercept = intercept self.slope = slope def fitfcn(self, p): return p[self.intercept] + p[self.slope] * self.x def buildprior(self, prior, mopt=None): " Extract the model's parameters from prior. " newprior = {} newprior[self.intercept] = prior[self.intercept] newprior[self.slope] = prior[self.slope] return newprior def builddata(self, data): " Extract the model's fit data from data. " return data[self.datatag] def builddataset(self, dset): " Extract the model's fit data from a dataset. " return dset[self.datatag] x = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]) y_samples = [ [ 2.8409, 4.8393, 6.8403, 8.8377, 10.8356, 12.8389, 14.8356, 16.8362, 18.8351, 20.8341 ], [ 2.8639, 4.8612, 6.8597, 8.8559, 10.8537, 12.8525, 14.8498, 16.8487, 18.8460, 20.8447 ], [ 3.1048, 5.1072, 7.1071, 9.1076, 11.1090, 13.1107, 15.1113, 17.1134, 19.1145, 21.1163 ], [ 3.0710, 5.0696, 7.0708, 9.0705, 11.0694, 13.0681, 15.0693, 17.0695, 19.0667, 21.0678 ], [ 3.0241, 5.0223, 7.0198, 9.0204, 11.0191, 13.0193, 15.0198, 17.0163, 19.0154, 21.0155 ], [ 2.9719, 4.9700, 6.9709, 8.9706, 10.9707, 12.9705, 14.9699, 16.9686, 18.9676, 20.9686 ], [ 3.0688, 5.0709, 7.0724, 9.0730, 11.0749, 13.0776, 15.0790, 17.0800, 19.0794, 21.0795 ], [ 3.1471, 5.1468, 7.1452, 9.1451, 11.1429, 13.1445, 15.1450, 17.1435, 19.1425, 21.1432 ], [ 3.0233, 5.0233, 7.0225, 9.0224, 11.0225, 13.0216, 15.0224, 17.0217, 19.0208, 21.0222 ], [ 2.8797, 4.8792, 6.8803, 8.8794, 10.8800, 12.8797, 14.8801, 16.8797, 18.8803, 20.8812 ], [ 3.0388, 5.0407, 7.0409, 9.0439, 11.0443, 13.0459, 15.0455, 17.0479, 19.0493, 21.0505 ], [ 3.1353, 5.1368, 7.1376, 9.1367, 11.1360, 13.1377, 15.1369, 17.1400, 19.1384, 21.1396 ], [ 3.0051, 5.0063, 7.0022, 9.0052, 11.0040, 13.0033, 15.0007, 16.9989, 18.9994, 20.9995 ], [ 3.0221, 5.0197, 7.0193, 9.0183, 11.0179, 13.0184, 15.0164, 17.0177, 19.0159, 21.0155 ], [ 3.0188, 5.0200, 7.0184, 9.0183, 11.0189, 13.0188, 15.0191, 17.0183, 19.0177, 21.0186 ], ] dset = dict(y=y_samples) model = Linear('y', x, intercept='y0', slope='s') prior = gv.gvar(dict(y0='1(1)', s='2(2)')) gv.ranseed(4) s = svd_diagnosis(dset, models=[model]) self.assertGreater(s.nmod, 0) self.assertGreater(s.svdcut, s.val[s.nmod - 1] / s.val[-1]) self.assertGreater(s.val[s.nmod] / s.val[-1], s.svdcut) return # skip rest fitter = lsqfit.MultiFitter(models=[model]) fit = fitter.lsqfit(prior=prior, svdcut=s.svdcut, data=s.avgdata) print(fit) # s.avgdata = gv.gvar(gv.mean(s.avgdata), gv.sdev(s.avgdata)) fit = fitter.lsqfit(prior=prior, data=s.avgdata) print(fit) s.plot_ratio(show=True)
from __future__ import print_function import lsqfit import gvar as gv import numpy as np try: import vegas gv.ranseed(1) PLOT = False RESULTS = False # least-squares fit x = np.array([0.1, 1.2, 1.9, 3.5]) y = gv.gvar(["1.2(1.0)", "2.4(1)", "2.0(1.2)", "5.2(3.2)"]) prior = gv.BufferDict() prior["a"] = "0(5)" prior["s"] = "0(2)" prior["g"] = "2(2)" prior = gv.gvar(prior) def f(x, p): return p["a"] + p["s"] * x ** p["g"] fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior, fcn=f, debug=True) print(fit) hist = gv.PDFHistogram(fit.p["s"] * fit.p["g"])
# GNU General Public License for more details. from __future__ import print_function import numpy as np import gvar as gv import lsqfit def make_fake_data(x, p, f): f = f(x, p) df = gv.gvar(len(f) * ['0.000(1)']) return np.array([fi + dfi() / 2. + dfi for fi, dfi in zip(f, df)]) gv.ranseed(12) def f(x, p): return p[0] + p[1] * np.exp(-p[2] * x) def main(): p0 = [0.5, 0.4, 0.7] N = 50000 # takes 2min to do 2000000; scales linearly x = np.linspace(0.2, 2., N) y = make_fake_data(x, p0, f) print('y = [{} {} ... {}]\n'.format(y[0], y[1], y[-1])) prior = gv.gvar(['0(1)', '0(1)', '0(1)']) fit = lsqfit.nonlinear_fit(udata=(x, y), prior=prior, fcn=f)
def main(): gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) for nexp in range(3,5): print '************************************* nexp =',nexp prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0) print fit # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] print if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) sys_stdout = sys.stdout if DO_ERRORBUDGET: lines = [ "E = fit.p['E']", "a = fit.p['a']", "print(E[1] / E[0])", "print((E[1] / E[0]).partialsdev(fit.prior['E']))", "print((E[1] / E[0]).partialsdev(fit.prior['a']))", "print((E[1] / E[0]).partialsdev(y))" ] sys.stdout = tee.tee(sys_stdout, open("eg4c.out","w")) for line in lines: print ">>>", line if line[:5] == "print": print(eval(line[5:])) # print E[1]/E[0] # print (E[1]/E[0]).partialsdev(fit.prior['E']) # print (E[1]/E[0]).partialsdev(fit.prior['a']) # print (E[1]/E[0]).partialsdev(y) outputs = {'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0], 'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0]} inputs = {'E':fit.prior['E'],'a':fit.prior['a'],'y':y} sys.stdout = tee.tee(sys_stdout, open("eg4b.out","w")) print fit.fmt_values(outputs) print fit.fmt_errorbudget(outputs,inputs) sys.stdout = sys_stdout if DO_SIMULATIONS: # fit simulations sys.stdout = tee.tee(sys_stdout, open("eg4d.out","w")) for sfit in fit.simulated_fit_iter(3): print '************************************* simulation' print(sfit) sE = sfit.p['E'] # best-fit parameters sa = sfit.p['a'] E = sfit.pexact['E'] a = sfit.pexact['a'] print 'E1/E0 =', sE[1] / sE[0], ' E2/E0 =', sE[2] / sE[0] print 'a1/a0 =', sa[1] / sa[0], ' a2/a0 =', sa[2] / sa[0] print '\nSimulated Fit Values - Exact Values:' print 'E1/E0:', (sE[1] / sE[0]) - (E[1] / E[0]),\ ' E2/E0:', (sE[2] / sE[0]) - (E[2] / E[0]) print 'a1/a0:', (sa[1] / sa[0]) - (a[1] / a[0]),\ ' a2/a0:', (sa[2] / sa[0]) - (a[2] / a[0]) # compute chi**2 comparing fit results to exact results sim_results = [sE[0], sE[1], sa[0], sa[1]] exact_results = [E[0], E[1], a[0], a[1]] chi2 = gv.chi2(sim_results, exact_results, svdcut=1e-8) print '\nParameter chi2/dof [dof] = %.2f' % (chi2/chi2.dof), '[%d]' % chi2.dof, ' Q = %.1f' % chi2.Q print sys.stdout = sys_stdout if DO_EMPBAYES: def fitargs(z,nexp=nexp,prior=prior,f=f,data=(x,y),p0=p0): z = gv.exp(z) prior['a'] = [gv.gvar(0.5,0.5*z[0]) for i in range(nexp)] return dict(prior=prior,data=data,fcn=f,p0=p0) ## z0 = [0.0] fit,z = lsqfit.empbayes_fit(z0,fitargs,tol=1e-3) sys.stdout = tee.tee(sys_stdout, open("eg4a.out","w")) print fit # print the optimized fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] # print "prior['a'] =",fit.prior['a'][0] sys.stdout = sys_stdout print if DO_PLOT: import pylab as pp from gvar import mean,sdev fity = f(x,fit.pmean) ratio = y/fity pp.xlim(0,21) pp.xlabel('x') pp.ylabel('y/f(x,p)') pp.errorbar(x=x,y=mean(ratio),yerr=sdev(ratio),fmt='ob') pp.plot([0.0,21.0],[1.0,1.0]) pp.show()
def main(): gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout sys.stdout = tee.tee(sys.stdout, open("eg1.out","w")) for nexp in range(1, 11): prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0) #, svdcut=SVDCUT) if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) if nexp > 5 and nexp < 10: print(".".center(73)) continue elif nexp not in [1]: print("") print '************************************* nexp =',nexp print fit.format() # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] if nexp > 2: print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] # redo fit with 4 parameters since that is enough prior = make_prior(4) fit = lsqfit.nonlinear_fit(data=(x,y), fcn=f, prior=prior, p0=fit.pmean) sys.stdout = sys_stdout print fit # extra data 1 print '\n--------------------- fit with extra information' sys.stdout = tee.tee(sys_stdout, open("eg1a.out", "w")) def ratio(p): return p['a'][1] / p['a'][0] newfit = lsqfit.nonlinear_fit(data=gv.gvar(1,1e-5), fcn=ratio, prior=fit.p) print (newfit) E = newfit.p['E'] a = newfit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] # alternate method for extra data sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w")) fit.p['a1/a0'] = fit.p['a'][1] / fit.p['a'][0] new_data = {'a1/a0' : gv.gvar(1,1e-5)} new_p = lsqfit.wavg([fit.p, new_data]) print 'chi2/dof = %.2f\n' % (new_p.chi2 / new_p.dof) print 'E:', new_p['E'][:4] print 'a:', new_p['a'][:4] print 'a1/a0:', new_p['a1/a0'] if DO_BAYES: # Bayesian Fit gv.ranseed([123]) prior = make_prior(4) fit = lsqfit.nonlinear_fit(data=(x,y), fcn=f, prior=prior, p0=fit.pmean) sys.stdout = tee.tee(sys_stdout, open("eg1c.out", "w")) # print fit expval = lsqfit.BayesIntegrator(fit, limit=10.) # adapt integrator to PDF expval(neval=10000, nitn=10) # calculate expectation value of function g(p) fit_hist = gv.PDFHistogram(fit.p['E'][0]) def g(p): parameters = [p['a'][0], p['E'][0]] return dict( mean=parameters, outer=np.outer(parameters, parameters), hist=fit_hist.count(p['E'][0]), ) r = expval(g, neval=10000, nitn=10, adapt=False) # print results print r.summary() means = r['mean'] cov = r['outer'] - np.outer(r['mean'], r['mean']) print 'Results from Bayesian Integration:' print 'a0: mean =', means[0], ' sdev =', cov[0,0]**0.5 print 'E0: mean =', means[1], ' sdev =', cov[1,1]**0.5 print 'covariance from Bayesian integral =', np.array2string(cov, prefix=36 * ' ') print print 'Results from Least-Squares Fit:' print 'a0: mean =', fit.p['a'][0].mean, ' sdev =', fit.p['a'][0].sdev print 'E0: mean =', fit.p['E'][0].mean, ' sdev =', fit.p['E'][0].sdev print 'covariance from least-squares fit =', np.array2string(gv.evalcov([fit.p['a'][0], fit.p['E'][0]]), prefix=36*' ',precision=3) sys.stdout = sys_stdout # make histogram of E[0] probabilty plt = fit_hist.make_plot(r['hist']) plt.xlabel('$E_0$') plt.ylabel('probability') plt.savefig('eg1c.png', bbox_inches='tight') # plt.show() # # extra data 2 # sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w")) # newfit = fit # for i in range(1): # print '\n--------------------- fit with %d extra data sets' % (i+1) # x, ynew = make_data() # prior = newfit.p # newfit = lsqfit.nonlinear_fit(data=(x,ynew), fcn=f, prior=prior) # , svdcut=SVDCUT) # print newfit sys.stdout = sys_stdout # def fcn(x, p): # return f(x, p), f(x, p) # prior = make_prior(nexp) # fit = lsqfit.nonlinear_fit(data=(x, [y, ynew]), fcn=fcn, prior=prior, p0=newfit.pmean) # , svdcut=SVDCUT) # print(fit) if DO_BOOTSTRAP: Nbs = 40 # number of bootstrap copies outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1]/E[0]) # accumulate results outputs['E2/E0'].append(E[2]/E[0]) outputs['a1/a0'].append(a[1]/a[0]) outputs['a2/a0'].append(a[2]/a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k])) print 'Bootstrap results:' print 'E1/E0 =',outputs['E1/E0'],' E2/E1 =',outputs['E2/E0'] print 'a1/a0 =',outputs['a1/a0'],' a2/a0 =',outputs['a2/a0'] print 'E1 =',outputs['E1'],' a1 =',outputs['a1'] if DO_PLOT: import matplotlib.pyplot as plt ratio = y / fit.fcn(x,fit.pmean) plt.xlim(4, 21) plt.xlabel('x') plt.ylabel('y / f(x,p)') plt.errorbar(x=x,y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob') plt.plot([4.0, 21.0], [1.0, 1.0], 'b:') plt.savefig('eg1.png', bbox_inches='tight') plt.show()
# the Free Software Foundation, either version 3 of the License, or # any later version (see <http://www.gnu.org/licenses/>). # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. from __future__ import print_function import collections import numpy as np import lsqfit import gvar as gv import sys gv.ranseed(12) # remove randomness if sys.argv[1:]: SHOW_PLOTS = eval(sys.argv[1]) # display picture of grid ? else: SHOW_PLOTS = True if SHOW_PLOTS: try: import matplotlib except ImportError: SHOW_PLOTS = False def main(): # initial data
sdi = sd[i] nextfield = avgfmt(av[i], sdi) if (len(nextfield) + len(line)) > 78: lines = lines + line + '\n' line = ''.ljust(15) + nextfield else: line = line + nextfield table.append(lines + line + '\n') return '\n'.join(table) ## if __name__ == '__main__': import gvar gvar.ranseed((1950, 1)) r1 = gvar.gvar(8., 1.) r2 = gvar.gvar([-10., -9.], [2., 3.]) r3 = gvar.gvar([[0., 1.], [2., 3.]], [[1., 2.], [3., 4.]]) r3_iter = gvar.raniter(r3) r2_iter = gvar.raniter(r2) N = 1001 d = Dataset(bstrap=False) for x in range(N): d.append('x', r3_iter.next()) for x in range(N): d.append('y', r2_iter.next()) d2 = Dataset(bstrap=False) for x in range(N): d2.append('z', r1()) d.copy(d2)
def main(): gv.ranseed(SEED) y = exact(NSAMPLE) ysamples = [yi for yi in gv.raniter(y, n=NSAMPLE)] # above code (don't comment it out) generates the following ysamples = [ [0.0092441016, 0.0068974057, 0.0051480509, 0.0038431422, 0.0028690492], [0.0092477405, 0.0069030565, 0.0051531383, 0.0038455855, 0.0028700587], [0.0092558569, 0.0069102437, 0.0051596569, 0.0038514537, 0.0028749153], [0.0092294581, 0.0068865156, 0.0051395262, 0.003835656, 0.0028630454], [0.009240534, 0.0068961523, 0.0051480046, 0.0038424661, 0.0028675632], ] dstr = '[' for yi in ysamples: dstr += ('[' + len(yi) * '{:10.8g},' + '],').format(*yi) dstr += ']' ysamples = eval(dstr) print(np.array(ysamples).tolist()) s = gv.dataset.svd_diagnosis(ysamples) # s.plot_ratio(show=True) y = s.avgdata x = np.array([15., 16., 17., 18., 19.]) def f(p): return p['a'] * gv.exp(-p['b'] * x) prior = gv.gvar(dict(a='0.75(5)', b='0.30(3)')) sys_stdout = sys.stdout sys.stdout = tee.tee(sys_stdout, open('eg10a.out', 'w')) fit = lsqfit.nonlinear_fit(data=y, fcn=f, prior=prior, svdcut=0.0) print(fit) sys.stdout = tee.tee(sys_stdout, open('eg10b.out', 'w')) fit = lsqfit.nonlinear_fit(data=y, fcn=f, prior=prior, svdcut=s.svdcut) print(fit) sys.stdout = tee.tee(sys_stdout, open('eg10c.out', 'w')) fit = lsqfit.nonlinear_fit(data=y, fcn=f, prior=prior, svdcut=s.svdcut, add_svdnoise=True) print(fit) sys.stdout = tee.tee(sys_stdout, open('eg10d.out', 'w')) yex = gv.gvar(gv.mean(y), gv.evalcov(exact(1.))) fit = lsqfit.nonlinear_fit(data=yex, fcn=f, prior=prior, svdcut=0) print(fit) # fit.plot_residuals().show() sys.stdout = tee.tee(sys_stdout, open('eg10e.out', 'w')) fit = lsqfit.nonlinear_fit(data=y, fcn=f, prior=prior, svdcut=s.svdcut) print(fit) print('\n================ Add noise to prior, SVD') noisyfit = lsqfit.nonlinear_fit(data=y, prior=prior, fcn=f, svdcut=s.svdcut, add_svdnoise=True, add_priornoise=True) print(noisyfit.format(True)) # save figures fit.qqplot_residuals(plot=plt).savefig('eg10e1.png', bbox_inches='tight') plt.cla() noisyfit.qqplot_residuals(plot=plt).savefig('eg10e2.png', bbox_inches='tight')
def main(): gv.ranseed([2009,2010,2011,2012]) # initialize random numbers (opt.) x,y = make_data() # make fit data p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout sys.stdout = tee.tee(sys.stdout, open("eg1.out","w")) for nexp in range(3,20): prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x,y),fcn=f,prior=prior,p0=p0) #, svdcut=SVDCUT) if fit.chi2/fit.dof<1.: p0 = fit.pmean # starting point for next fit (opt.) if nexp in [8, 9, 10]: print(".".center(73)) if nexp > 7 and nexp < 19: continue elif nexp not in [3]: print("") print '************************************* nexp =',nexp print fit.format() # print the fit results E = fit.p['E'] # best-fit parameters a = fit.p['a'] print 'E1/E0 =',E[1]/E[0],' E2/E0 =',E[2]/E[0] print 'a1/a0 =',a[1]/a[0],' a2/a0 =',a[2]/a[0] # extra data 1 print '\n--------------------- fit with extra information' sys.stdout = tee.tee(sys_stdout, open("eg1a.out", "w")) def ratio(p): return p['a'][1] / p['a'][0] newfit = lsqfit.nonlinear_fit(data=gv.gvar(1,1e-5), fcn=ratio, prior=fit.p) print (newfit) # print(newfit.p['a'][1] / newfit.p['a'][0]) # print(fit.p['a'][1] / fit.p['a'][0]) # alternate method for extra data sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w")) fit.p['a1/a0'] = fit.p['a'][1] / fit.p['a'][0] new_data = {'a1/a0' : gv.gvar(1,1e-5)} new_p = lsqfit.wavg([fit.p, new_data]) print 'chi2/dof = %.2f\n' % (new_p.chi2 / new_p.dof) print 'E:', new_p['E'][:4] print 'a:', new_p['a'][:4] print 'a1/a0:', new_p['a1/a0'] # # extra data 2 # sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w")) # newfit = fit # for i in range(1): # print '\n--------------------- fit with %d extra data sets' % (i+1) # x, ynew = make_data() # prior = newfit.p # newfit = lsqfit.nonlinear_fit(data=(x,ynew), fcn=f, prior=prior) # , svdcut=SVDCUT) # print newfit sys.stdout = sys_stdout # def fcn(x, p): # return f(x, p), f(x, p) # prior = make_prior(nexp) # fit = lsqfit.nonlinear_fit(data=(x, [y, ynew]), fcn=fcn, prior=prior, p0=newfit.pmean) # , svdcut=SVDCUT) # print(fit) if DO_BOOTSTRAP: Nbs = 40 # number of bootstrap copies outputs = {'E1/E0':[], 'E2/E0':[], 'a1/a0':[],'a2/a0':[],'E1':[],'a1':[]} # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1]/E[0]) # accumulate results outputs['E2/E0'].append(E[2]/E[0]) outputs['a1/a0'].append(a[1]/a[0]) outputs['a2/a0'].append(a[2]/a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print E[:2] # print a[:2] # print bsfit.chi2/bsfit.dof # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]),np.std(outputs[k])) print 'Bootstrap results:' print 'E1/E0 =',outputs['E1/E0'],' E2/E1 =',outputs['E2/E0'] print 'a1/a0 =',outputs['a1/a0'],' a2/a0 =',outputs['a2/a0'] print 'E1 =',outputs['E1'],' a1 =',outputs['a1'] if DO_PLOT: import pylab as plt ratio = y/fit.fcn(x,fit.pmean) plt.xlim(0,21) plt.xlabel('x') plt.ylabel('y/f(x,p)') plt.errorbar(x=x,y=gv.mean(ratio),yerr=gv.sdev(ratio),fmt='ob') plt.plot([0.0,21.0],[1.0,1.0]) plt.show()
inputs = collections.OrderedDict() inputs['prior'] = prior inputs['data'] = data inputs['svdcut'] = fit.correction print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs, colwidth=18)) print('Prior:\n') for k in ['etab.l', 'etab.g', 'etab.d', 'etab.e']: print('{:13}{}'.format(k, list(prior[k]))) print() prior_eig = basis.apply(prior, keyfmt='etab.{s1}') for k in ['etab.0', 'etab.1', 'etab.2', 'etab.3']: print('{:13}{}'.format(k, list(prior_eig[k]))) if __name__ == '__main__': gv.ranseed(1234) if True: main() else: import cProfile, pstats, StringIO pr = cProfile.Profile() pr.enable() main() pr.disable() s = StringIO.StringIO() sortby = 'tottime' ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue())
def main(): print( gv.ranseed( (5751754790502652836, 7676372623888309739, 7570829798026950508))) integ = vegas.Integrator([[-1., 1.], [0., 1.], [0., 1.], [0., 1.]], # analyzer=vegas.reporter(), ) if SAVE_OUTPUT: log_stdout('eg1a.out') result = integ(f, nitn=10, neval=1000) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) integ.map.show_grid(30, axes=[(0, 1), (2, 3), (0, None), (None, 1), (2, 0), (3, 0)], shrink=False) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1b.out') result = integ(f, nitn=100, neval=1000) print('larger nitn => %s Q = %.2f' % (result, result.Q)) result = integ(f, nitn=10, neval=1e4) print('larger neval => %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1c.out') # integ.set(map=[[-2., .4, .6, 2.], [0, .4, .6, 2.], [0,.4, .6, 2.], [0.,.4, .6, 2.]]) # integ.set(map=[[-2., 2.], [0, 2.], [0, 2.], [0., 2.]]) integ = vegas.Integrator([[-2., 2.], [0, 2.], [0, 2.], [0., 2.]], ) result = integ(f, nitn=10, neval=1000) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1c1.out') if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1d.out') integ(f, nitn=7, neval=1000) result = integ(f, nitn=10, neval=1000) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1e.out') integ = vegas.Integrator([[-1, 1]] + 3 * [[0, 1]]) integ(f, nitn=10, neval=1000) def g(x): return x[0] * f(x) result = integ(g, nitn=10, neval=1000) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1f.out') # integ(f_sphere, nitn=10, neval=400000, alpha=0.25) # result = integ(f_sphere, nitn=10, neval=400000, alpha=0.25, beta=0.75)#, analyzer=vegas.reporter(5)) integ(f_sphere, nitn=10, neval=1000, alpha=0.5) result = integ(f_sphere, nitn=10, neval=1000, alpha=0.5) #, analyzer=vegas.reporter(5)) # print(integ.settings()) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1g.out') integ(f_sphere, nitn=10, neval=1000, alpha=0.1) result = integ(f_sphere, nitn=10, neval=1000, adapt=False) # alpha=0.1) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1h.out') integ(f_sphere, nitn=10, neval=1000, alpha=0.1) result = integ(f_sphere, nitn=10, neval=1000, alpha=0.1) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() # log_stdout('eg1h.out') integ = vegas.Integrator(4 * [[0, 1]]) integ(f2, nitn=10, neval=4e4) result = integ(f2, nitn=10, neval=4e4, beta=0.75) # , analyzer=vegas.reporter()) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) integ.map.show_grid(70) print(integ(f2, nitn=10, neval=4e4, beta=0.).summary())
# any later version (see <http://www.gnu.org/licenses/>). # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. from __future__ import print_function # import matplotlib.pyplot as plt import numpy as np import gvar as gv import lsqfit import vegas gv.ranseed(1) def main(): if not hasattr(lsqfit, 'BayesIntegrator'): # fake the run so that `make run` still works outfile = open('bayes.out', 'r').read() print(outfile[:-1]) return x, y = make_data() prior = make_prior() fit = lsqfit.nonlinear_fit(prior=prior, data=(x, y), fcn=fcn) print(fit) # Bayesian integrator expval = vegas.PDFIntegrator(fit.p, sync_ran=False)
def main(): print(gv.ranseed( (5751754790502652836, 7676372623888309739, 7570829798026950508) )) integ = vegas.Integrator( [[-1., 1.], [0., 1.], [0., 1.], [0., 1.]], # analyzer=vegas.reporter(), ) if SAVE_OUTPUT: log_stdout('eg1a.out') result = integ(f, nitn=10, neval=1000) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) integ.map.show_grid( 30, axes=[(0, 1), (2, 3), (0, None), (None, 1), (2, 0), (3, 0)], shrink=False ) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1b.out') result = integ(f, nitn=100, neval=1000 ) print('larger nitn => %s Q = %.2f' % (result, result.Q)) result = integ(f, nitn=10, neval=1e4) print('larger neval => %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1c.out') # integ.set(map=[[-2., .4, .6, 2.], [0, .4, .6, 2.], [0,.4, .6, 2.], [0.,.4, .6, 2.]]) # integ.set(map=[[-2., 2.], [0, 2.], [0, 2.], [0., 2.]]) integ = vegas.Integrator( [[-2., 2.], [0, 2.], [0, 2.], [0., 2.]], ) result = integ(f, nitn=10, neval=1000) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1c1.out') if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1d.out') integ(f, nitn=7, neval=1000) result = integ(f, nitn=10, neval=1000) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1e.out') integ = vegas.Integrator([[-1,1]] + 3 * [[0, 1]]) integ(f, nitn=10, neval=1000) def g(x): return x[0] * f(x) result = integ(g, nitn=10, neval=1000) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1f.out') # integ(f_sphere, nitn=10, neval=400000, alpha=0.25) # result = integ(f_sphere, nitn=10, neval=400000, alpha=0.25, beta=0.75)#, analyzer=vegas.reporter(5)) integ(f_sphere, nitn=10, neval=1000, alpha=0.5) result = integ(f_sphere, nitn=10, neval=1000, alpha=0.5)#, analyzer=vegas.reporter(5)) # print(integ.settings()) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1g.out') integ(f_sphere, nitn=10, neval=1000, alpha=0.1) result = integ(f_sphere, nitn=10, neval=1000, adapt=False) # alpha=0.1) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() log_stdout('eg1h.out') integ(f_sphere, nitn=10, neval=1000, alpha=0.1) result = integ(f_sphere, nitn=10, neval=1000, alpha=0.1) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) if SAVE_OUTPUT: unlog_stdout() # log_stdout('eg1h.out') integ = vegas.Integrator(4 * [[0, 1]]) integ(f2, nitn=10, neval=4e4) result = integ(f2, nitn=10, neval=4e4, beta=0.75) # , analyzer=vegas.reporter()) print(result.summary()) print('result = %s Q = %.2f' % (result, result.Q)) integ.map.show_grid(70) print(integ(f2, nitn=10, neval=4e4, beta=0.).summary())