def write_results(fit, basis, prior, data, N): l.write('Parameters used: ' + '\n') l.write('file: '+ file + '\n') l.write('t0: '+ str(t0) + '\n') l.write('T: '+ str(T) + '\n') l.write('tmin: '+ str(tmin) + '\n') l.write('tmax: '+ str(tmax) + '\n\n') l.write('offtmin: '+ str(offtmin) + '\n') l.write('offtmax: '+ str(offtmax) + '\n\n') l.write(30 * '=' + '\n' + 'nterm = ' + str(N[-1]) + '\n') l.write(fit.format(pstyle=None if N < 7 else 'm')) l.write(30 * '=' + ' Results\n') l.write(basis.tabulate(fit.p, keyfmt=tag+'{s1}')) if OSC: l.write(basis.tabulate(fit.p, keyfmt=otag+'{s1}')) E = np.cumsum(fit.p[tag+'dE']) outputs = collections.OrderedDict() outputs['a*E(2s-1s)'] = E[1] - E[0] outputs['a*E(3s-1s)'] = E[2] - E[0] outputs['E(3s-1s)/E(2s-1s)'] = (E[2] - E[0]) / (E[1] - E[0]) inputs = collections.OrderedDict() inputs['prior'] = prior inputs['data'] = data inputs['svdcut'] = fit.svdcorrection l.write(gv.fmt_values(outputs)) l.write(gv.fmt_errorbudget(outputs, inputs, colwidth=18)) l.write('Prior:\n') for k in [tag+SRC[0], tag+SRC[1]]: l.write('{:8}{}\n'.format(k, list(prior[k])))
def main(): x, y = make_data() # collect fit data p0 = None # make larger fits go faster (opt.) for nexp in range(1, 7): print('************************************* nexp =', nexp) prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=p0) print(fit) # print the fit results if nexp > 2: E = fit.p['E'] # best-fit parameters a = fit.p['a'] print('E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0]) print('a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0]) if fit.chi2 / fit.dof < 1.: p0 = fit.pmean # starting point for next fit (opt.) print() # error budget analysis # outputs = { # 'E1/E0':E[1]/E[0], 'E2/E0':E[2]/E[0], # 'a1/a0':a[1]/a[0], 'a2/a0':a[2]/a[0] # } # inputs = {'E':fit.prior['E'], 'a':fit.prior['a'], 'y':y} outputs = gv.BufferDict() outputs['E2/E0'] = E[2] / E[0] outputs['E1/E0'] = E[1] / E[0] outputs['a2/a0'] = a[2] / a[0] outputs['a1/a0'] = a[1] / a[0] inputs = gv.BufferDict() inputs['a'] = fit.prior['a'] inputs['y'] = y inputs['E'] = fit.prior['E'] print('================= Error Budget Analysis') print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs))
def print_results(fit, prior, data): """ Print results of fit. """ outputs = collections.OrderedDict() outputs['mDs'] = fit.p['dE'][0] outputs['Vnn'] = fit.p['Vnn'][0] inputs = collections.OrderedDict() inputs['statistics'] = data # statistical errors in data inputs.update(prior) # errors from priors inputs['svd'] = fit.svdcorrection # errors from svd cut (if present) print('\n' + gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs))
def main(): param, data = collect_data('spline.p') F, prior = make_fcn_prior(param) fit = lsqfit.nonlinear_fit(data=data, prior=prior, fcn=F) print(fit) # create f(m) f = gv.cspline.CSpline(fit.p['mknot'], fit.p['fknot']) # create error budget outputs = {'f(1)':f(1), 'f(5)':f(5), 'f(9)':f(9)} inputs = {'data':data} inputs.update(prior) print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs=outputs, inputs=inputs)) make_plot(param, data, fit)
def print_results(fit, basis, prior, data): print(30 * '=', 'Results\n') print(basis.tabulate(fit.p, keyfmt='etab.{s1}')) print(basis.tabulate(fit.p, keyfmt='etab.{s1}', eig_srcs=True)) E = np.cumsum(fit.p['etab.dE']) outputs = collections.OrderedDict() outputs['a*E(2s-1s)'] = E[1] - E[0] outputs['a*E(3s-1s)'] = E[2] - E[0] outputs['E(3s-1s)/E(2s-1s)'] = (E[2] - E[0]) / (E[1] - E[0]) inputs = collections.OrderedDict() inputs['prior'] = prior inputs['data'] = data inputs['svdcut'] = fit.svdcorrection print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs, colwidth=18))
def print_results(fit, basis, prior, data): print(30 * '=', 'Results\n') print(basis.tabulate(fit.p, keyfmt='etab.{s1}')) print(basis.tabulate(fit.p, keyfmt='etab.{s1}', eig_srcs=True)) E = np.cumsum(fit.p['etab.dE']) outputs = collections.OrderedDict() outputs['a*E(2s-1s)'] = E[1] - E[0] outputs['a*E(3s-1s)'] = E[2] - E[0] outputs['E(3s-1s)/E(2s-1s)'] = (E[2] - E[0]) / (E[1] - E[0]) inputs = collections.OrderedDict() inputs['prior'] = prior inputs['data'] = data inputs['svdcut'] = fit.correction print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs, colwidth=18))
def print_results(fit, prior, data): """ Print results of fit. """ outputs = collections.OrderedDict() outputs['mDs'] = fit.p['dE'][0] outputs['Vnn'] = fit.p['Vnn'][0] inputs = collections.OrderedDict() inputs['statistics'] = data # statistical errors in data inputs['Ds priors'] = { k:prior[k] for k in ['log(a)', 'log(dE)', 'log(ao)', 'log(dEo)'] } inputs['V priors'] = { k:prior[k] for k in ['Vnn', 'Vno', 'Voo'] } print('\n' + gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs))
def main(): x, y = make_data() prior = make_prior(100) # 100 exponential terms in all p0 = None for nexp in range(1, 6): # marginalize the last 100 - nexp terms (in ymod_prior) fit_prior = gv.BufferDict() # part of prior used in fit ymod_prior = gv.BufferDict() # part of prior absorbed in ymod for k in prior: fit_prior[k] = prior[k][:nexp] ymod_prior[k] = prior[k][nexp:] ymod = y - fcn(x, ymod_prior) # remove temrs in ymod_prior # fit modified data with just nexp terms (in fit_prior) fit = lsqfit.nonlinear_fit( data=(x, ymod), prior=fit_prior, fcn=fcn, p0=p0, tol=1e-15, svdcut=1e-12, ) # print fit information print('************************************* nexp =',nexp) print(fit.format(True)) p0 = fit.pmean # print summary information and error budget E = fit.p['E'] # best-fit parameters a = fit.p['a'] # outputs = { # 'E1/E0':E[1] / E[0], 'E2/E0':E[2] / E[0], # 'a1/a0':a[1] / a[0], 'a2/a0':a[2] / a[0] # } # inputs = { # 'E prior':prior['E'], 'a prior':prior['a'], # 'svd cut':fit.correction, # } outputs = gv.BufferDict() outputs['E2/E0'] = E[2] / E[0] outputs['E1/E0'] = E[1] / E[0] outputs['a2/a0'] = a[2] / a[0] outputs['a1/a0'] = a[1] / a[0] inputs = gv.BufferDict() inputs['E prior'] = prior['E'] inputs['svd cut'] = fit.correction inputs['a prior'] = prior['a'] print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs))
def print_results(fit, prior, data): """ Report best-fit results. """ print('Fit results:') p = fit.p # best-fit parameters # etas E_etas = np.cumsum(p['etas:dE']) a_etas = p['etas:a'] print(' Eetas:', E_etas[:3]) print(' aetas:', a_etas[:3]) # Ds E_Ds = np.cumsum(p['Ds:dE']) a_Ds = p['Ds:a'] print('\n EDs:', E_Ds[:3]) print( ' aDs:', a_Ds[:3]) # Dso -- oscillating piece E_Dso = np.cumsum(p['Dso:dE']) a_Dso = p['Dso:a'] print('\n EDso:', E_Dso[:3]) print( ' aDso:', a_Dso[:3]) # V Vnn = p['Vnn'] Vno = p['Vno'] print('\n etas->V->Ds =', Vnn[0, 0]) print(' etas->V->Dso =', Vno[0, 0]) # error budget outputs = collections.OrderedDict() outputs['metas'] = E_etas[0] outputs['mDs'] = E_Ds[0] outputs['mDso-mDs'] = E_Dso[0] - E_Ds[0] outputs['Vnn'] = Vnn[0, 0] outputs['Vno'] = Vno[0, 0] inputs = collections.OrderedDict() inputs['statistics'] = data # statistical errors in data inputs.update(prior) # all entries in prior inputs['svd'] = fit.svdcorrection # svd cut (if present) print('\n' + gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs)) print('\n')
def print_results(fit, prior, data): """ Report best-fit results. """ print('Fit results:') p = fit.p # best-fit parameters # etas E_etas = np.cumsum(p['etas:dE']) a_etas = p['etas:a'] print(' Eetas:', E_etas[:3]) print(' aetas:', a_etas[:3]) # Ds E_Ds = np.cumsum(p['Ds:dE']) a_Ds = p['Ds:a'] print('\n EDs:', E_Ds[:3]) print(' aDs:', a_Ds[:3]) # Dso -- oscillating piece E_Dso = np.cumsum(p['Dso:dE']) a_Dso = p['Dso:a'] print('\n EDso:', E_Dso[:3]) print(' aDso:', a_Dso[:3]) # V Vnn = p['Vnn'] print('\n etas->V->Ds =', Vnn[0, 0]) # error budget outputs = collections.OrderedDict() outputs['metas'] = E_etas[0] outputs['mDs'] = E_Ds[0] outputs['Vnn'] = Vnn[0, 0] inputs = collections.OrderedDict() inputs['statistics'] = data # statistical errors in data inputs['svd'] = fit.correction inputs.update(prior) # all entries in prior print('\n' + gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs)) print('\n')
def print_results(fit, basis, prior, data): print(30 * '=', 'Results\n') print(basis.tabulate(fit.p, keyfmt=tag+'{s1}')) if OSC: print(basis.tabulate(fit.p, keyfmt=otag+'{s1}')) #print(basis.tabulate(fit.p, keyfmt=tag+'{s1}', eig_srcs=True)) E = np.cumsum(fit.p[tag+'dE']) outputs = collections.OrderedDict() outputs['a*E(2s-1s)'] = E[1] - E[0] outputs['a*E(3s-1s)'] = E[2] - E[0] outputs['E(3s-1s)/E(2s-1s)'] = (E[2] - E[0]) / (E[1] - E[0]) inputs = collections.OrderedDict() inputs['prior'] = prior inputs['data'] = data inputs['svdcut'] = fit.svdcorrection print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs, colwidth=18)) print('Prior:\n') for k in [tag+SRC[0], tag+SRC[1]]: print('{:13}{}'.format(k, list(prior[k]))) print()
def print_results(fit, basis, prior, data): print(30 * '=', 'Results\n') print(basis.tabulate(fit.p, keyfmt='etab.{s1}')) print(basis.tabulate(fit.p, keyfmt='etab.{s1}', eig_srcs=True)) E = np.cumsum(fit.p['etab.dE']) outputs = collections.OrderedDict() outputs['a*E(2s-1s)'] = E[1] - E[0] outputs['a*E(3s-1s)'] = E[2] - E[0] outputs['E(3s-1s)/E(2s-1s)'] = (E[2] - E[0]) / (E[1] - E[0]) inputs = collections.OrderedDict() inputs['prior'] = prior inputs['data'] = data inputs['svdcut'] = fit.correction print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs, colwidth=18)) print('Prior:\n') for k in ['etab.l', 'etab.g', 'etab.d', 'etab.e']: print('{:13}{}'.format(k, list(prior[k]))) print() prior_eig = basis.apply(prior, keyfmt='etab.{s1}') for k in ['etab.0', 'etab.1', 'etab.2', 'etab.3']: print('{:13}{}'.format(k, list(prior_eig[k])))
def do_fit(svdcut=None, do_plot=False): if svdcut is None: svdcut = lsqfit.nonlinear_fit.set()['svdcut'] sys.stdout = tee.tee(sys_stdout, open('eg5a.out', 'w')) default_svd = True else: default_svd = False x, y = make_data() prior = make_prior( 100) # 20 exponential terms in all (10 gives same result) p0 = None for nexp in range(1, 6): # marginalize the last 100 - nexp terms fit_prior = gv.BufferDict() # part of prior used in fit ymod_prior = gv.BufferDict() # part of prior absorbed in ymod for k in prior: fit_prior[k] = prior[k][:nexp] ymod_prior[k] = prior[k][nexp:] ymod = y - fcn(x, ymod_prior) # fit modified data with just nexp terms fit = lsqfit.nonlinear_fit(data=(x, ymod), prior=fit_prior, fcn=fcn, p0=p0, tol=1e-10, svdcut=svdcut) if not default_svd and nexp == 5: sys.stdout = tee.tee(sys_stdout, open('eg5b.out', 'w')) print '************************************* nexp =', nexp print fit.format(True) p0 = fit.pmean if do_plot: import matplotlib.pyplot as plt if nexp > 4: continue plt.subplot(2, 2, nexp) if nexp not in [1, 3]: plt.yticks([0.05, 0.10, 0.15, 0.20, 0.25], []) else: plt.ylabel('y') if nexp not in [3, 4]: plt.xticks([1.0, 1.5, 2.0, 2.5], []) else: plt.xlabel('x') plt.errorbar(x=x, y=gv.mean(ymod), yerr=gv.sdev(ymod), fmt='bo') plt.plot(x, y, '-r') plt.plot(x, fcn(x, fit.pmean), ':k') plt.text(1.75, 0.22, 'nexp = {}'.format(nexp)) if nexp == 4: plt.savefig('eg5.png', bbox_inches='tight') plt.show() # print summary information and error budget E = fit.p['E'] # best-fit parameters a = fit.p['a'] outputs = { 'E1/E0': E[1] / E[0], 'E2/E0': E[2] / E[0], 'a1/a0': a[1] / a[0], 'a2/a0': a[2] / a[0] } inputs = { 'E prior': prior['E'], 'a prior': prior['a'], 'svd cut': fit.svdcorrection, } print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs)) sys.stdout = sys_stdout
} # a priori values for fit parameters prior = dict(a=gv.gvar(0.5, 0.5), b=gv.gvar(0.5, 0.5)) # print(y["data1"][0].mean,"+-",y["data1"][0].sdev) # print(gv.evalcov(y["data1"])) def fcn(x, p): # fit function of x and parameters p ans = {} for k in ["data1", "data2"]: ans[k] = gv.exp(p['a'] + x[k] * p['b']) ans['b/a'] = p['b'] / p['a'] return ans # do the fit fit = lsqfit.nonlinear_fit(data=(x, y), prior=prior, fcn=fcn) sys.stdout = open("eg0.out", "w") print(fit.format(maxline=True)) # print standard summary of fit p = fit.p # best-fit values for parameters outputs = dict(a=p['a'], b=p['b']) outputs['b/a'] = p['b'] / p['a'] inputs = dict(y=y, prior=prior) print(gv.fmt_values(outputs)) # tabulate outputs print(gv.fmt_errorbudget(outputs, inputs)) # print error budget for outputs # save best-fit values in file "outputfile.p" for later use import pickle pickle.dump(fit.p, open("outputfile.p", "wb"))
def main(): x, y = make_data() # make fit data # y = gv.gvar(gv.mean(y), 0.75**2 * gv.evalcov(y)) p0 = None # make larger fits go faster (opt.) sys_stdout = sys.stdout sys.stdout = tee.tee(sys.stdout, open("eg1.out", "w")) for nexp in range(1, 7): prior = make_prior(nexp) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=p0) if fit.chi2 / fit.dof < 1.: p0 = fit.pmean # starting point for next fit (opt.) print('************************************* nexp =', nexp) print(fit.format()) # print the fit results) E = fit.p['E'] # best-fit parameters a = fit.p['a'] if nexp > 2: print('E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0]) print('a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0]) print # error budget outputs = { 'E1/E0': E[1] / E[0], 'E2/E0': E[2] / E[0], 'a1/a0': a[1] / a[0], 'a2/a0': a[2] / a[0] } inputs = {'E': fit.prior['E'], 'a': fit.prior['a'], 'y': y} inputs = collections.OrderedDict() inputs['a'] = fit.prior['a'] inputs['E'] = fit.prior['E'] inputs['y'] = fit.data[1] print('================= Error Budget Analysis') print(gv.fmt_values(outputs)) print(gv.fmt_errorbudget(outputs, inputs)) sys.stdout = sys_stdout # print(gv.gvar(str(a[1])) / gv.gvar(str(a[0])) ) # print(gv.evalcorr([fit.p['a'][1], fit.p['E'][1]])) # print(fit.format(True)) # redo fit with 4 parameters since that is enough prior = make_prior(4) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=fcn, prior=prior, p0=fit.pmean) sys.stdout = tee.tee(sys_stdout, open("eg1a.out", "w")) print('--------------------- original fit') print(fit.format()) E = fit.p['E'] # best-fit parameters a = fit.p['a'] print('E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0]) print('a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0]) print # extra data 1 print('\n--------------------- new fit to extra information') def ratio(p): return p['a'][1] / p['a'][0] newfit = lsqfit.nonlinear_fit(data=gv.gvar(1, 1e-5), fcn=ratio, prior=fit.p) print((newfit.format())) E = newfit.p['E'] a = newfit.p['a'] print('E1/E0 =', E[1] / E[0], ' E2/E0 =', E[2] / E[0]) print('a1/a0 =', a[1] / a[0], ' a2/a0 =', a[2] / a[0]) if DO_PLOT: import matplotlib.pyplot as plt ratio = y / fit.fcn(x, fit.pmean) plt.xlim(4, 15) plt.ylim(0.95, 1.05) plt.xlabel('x') plt.ylabel('y / f(x,p)') plt.yticks([0.96, 0.98, 1.00, 1.02, 1.04], ['0.96', '0.98', '1.00', '1.02', '1.04']) plt.errorbar(x=x, y=gv.mean(ratio), yerr=gv.sdev(ratio), fmt='ob') plt.plot([4.0, 21.0], [1.0, 1.0], 'b:') plt.savefig('eg1.png', bbox_inches='tight') plt.show() # alternate method for extra data sys.stdout = tee.tee(sys_stdout, open("eg1b.out", "w")) fit.p['a1/a0'] = fit.p['a'][1] / fit.p['a'][0] new_data = {'a1/a0': gv.gvar(1, 1e-5)} new_p = lsqfit.wavg([fit.p, new_data]) print('chi2/dof = %.2f\n' % (new_p.chi2 / new_p.dof)) print('E:', new_p['E'][:4]) print('a:', new_p['a'][:4]) print('a1/a0:', new_p['a1/a0']) if DO_BAYES: # Bayesian Fit gv.ranseed([123]) prior = make_prior(4) fit = lsqfit.nonlinear_fit(data=(x, y), fcn=f, prior=prior, p0=fit.pmean) sys.stdout = tee.tee(sys_stdout, open("eg1c.out", "w")) # print(fit) expval = lsqfit.BayesIntegrator(fit, limit=10.) # adapt integrator to PDF expval(neval=40000, nitn=10) # calculate expectation value of function g(p) fit_hist = gv.PDFHistogram(fit.p['E'][0]) def g(p): parameters = [p['a'][0], p['E'][0]] return dict( mean=parameters, outer=np.outer(parameters, parameters), hist=fit_hist.count(p['E'][0]), ) r = expval(g, neval=40000, nitn=10, adapt=False) # print(results) print(r.summary()) means = r['mean'] cov = r['outer'] - np.outer(r['mean'], r['mean']) print('Results from Bayesian Integration:') print('a0: mean =', means[0], ' sdev =', cov[0, 0]**0.5) print('E0: mean =', means[1], ' sdev =', cov[1, 1]**0.5) print('covariance from Bayesian integral =', np.array2string(cov, prefix=36 * ' ')) print print('Results from Least-Squares Fit:') print('a0: mean =', fit.p['a'][0].mean, ' sdev =', fit.p['a'][0].sdev) print('E0: mean =', fit.p['E'][0].mean, ' sdev =', fit.p['E'][0].sdev) print( 'covariance from least-squares fit =', np.array2string(gv.evalcov([fit.p['a'][0], fit.p['E'][0]]), prefix=36 * ' ', precision=3)) sys.stdout = sys_stdout # make histogram of E[0] probabilty plt = fit_hist.make_plot(r['hist']) plt.xlabel('$E_0$') plt.ylabel('probability') plt.savefig('eg1c.png', bbox_inches='tight') # plt.show() if DO_BOOTSTRAP: Nbs = 40 # number of bootstrap copies outputs = { 'E1/E0': [], 'E2/E0': [], 'a1/a0': [], 'a2/a0': [], 'E1': [], 'a1': [] } # results for bsfit in fit.bootstrap_iter(n=Nbs): E = bsfit.pmean['E'] # best-fit parameters a = bsfit.pmean['a'] outputs['E1/E0'].append(E[1] / E[0]) # accumulate results outputs['E2/E0'].append(E[2] / E[0]) outputs['a1/a0'].append(a[1] / a[0]) outputs['a2/a0'].append(a[2] / a[0]) outputs['E1'].append(E[1]) outputs['a1'].append(a[1]) # print(E[:2]) # print(a[:2]) # print(bsfit.chi2/bsfit.dof) # extract means and standard deviations from the bootstrap output for k in outputs: outputs[k] = gv.gvar(np.mean(outputs[k]), np.std(outputs[k])) print('Bootstrap results:') print('E1/E0 =', outputs['E1/E0'], ' E2/E1 =', outputs['E2/E0']) print('a1/a0 =', outputs['a1/a0'], ' a2/a0 =', outputs['a2/a0']) print('E1 =', outputs['E1'], ' a1 =', outputs['a1'])