def test_bootstrap_lsqfit(self): fitter = MultiFitter(models=self.make_models(ncg=1)) fit = fitter.lsqfit(data=self.data, prior=self.prior) datalist = gv.bootstrap_iter(self.data, n=10) ds = gv.dataset.Dataset() for bf in fit.bootstrapped_fit_iter(datalist=datalist): ds.append(bf.pmean) p = gv.dataset.avg_data(ds, bstrap=True) self.assertTrue(abs(p['a'].mean - 1.) < 5 * p['a'].sdev) self.assertTrue(abs(p['b'].mean - 0.5) < 5 * p['b'].sdev) self.assertEqual(ds.samplesize, 10) pdatalist = (fitter.process_data(d, fitter.models) for d in gv.bootstrap_iter(self.data, n=10)) ds = gv.dataset.Dataset() for bf in fit.bootstrapped_fit_iter(pdatalist=pdatalist): ds.append(bf.pmean) p = gv.dataset.avg_data(ds, bstrap=True) self.assertTrue(abs(p['a'].mean - 1.) < 5 * p['a'].sdev) self.assertTrue(abs(p['b'].mean - 0.5) < 5 * p['b'].sdev) self.assertEqual(ds.samplesize, 10) ds = gv.dataset.Dataset() for bf in fit.bootstrapped_fit_iter(n=10): ds.append(bf.pmean) p = gv.dataset.avg_data(ds, bstrap=True) self.assertTrue(abs(p['a'].mean - 1.) < 5 * p['a'].sdev) self.assertTrue(abs(p['b'].mean - 0.5) < 5 * p['b'].sdev) self.assertEqual(ds.samplesize, 10)
def make_bs(m_N, e_NN, n=100): ''' re-sample the normal distributed energies (m_N and e_NN) to create bootstrap samples ''' d_dict = dict() d_dict['m_n'] = m_N d_dict['e_nn'] = e_NN bs_list = list(gv.bootstrap_iter(d_dict, n=n)) return bs_list
def test_bootstrap_chained_lsqfit(self): fitter = MultiFitter(models=self.make_models(ncg=1)) fit = fitter.chained_lsqfit(data=self.data, prior=self.prior) datalist = gv.bootstrap_iter(self.data, n=10) ds = gv.dataset.Dataset() for bf in fit.bootstrapped_fit_iter(datalist=datalist): ds.append(bf.pmean) p = gv.dataset.avg_data(ds, bstrap=True) self.assertTrue(abs(p['a'].mean - 1.) < 5 * p['a'].sdev) self.assertTrue(abs(p['b'].mean - 0.5) < 5 * p['b'].sdev)
def add_noise(data, frac): """ add noise to correlators in list corrlist; frac = rel. size """ global_noise = gv.gvar(1, frac) ans = gv.BufferDict() for k in data: ## add: a) uncorr. noise (smear for zeros); b) corr. noise corr = data[k] dcorr = np.abs(corr * frac) dcorr[1:-1] = (dcorr[1:-1] + dcorr[2:] + dcorr[:-2]) / 3.0 dcorr = gv.gvar(np.zeros(dcorr.shape), dcorr) dcorr = next(gv.bootstrap_iter(dcorr)) ans[k] = (corr + dcorr) * global_noise return ans
if vs_mpi: qsq_m = qsq / mpi**2 qcotd_m = all_qcotd[k] * mN / mpi else: qsq_m = qsq / mN**2 qcotd_m = all_qcotd[k] print('%d& %3s& %s& %s& %s& %s& %s& %s& %s& %s& %s& %s\\\\' \ %(Psq, irrep, n, s1, en1, s2, en2, de_nn, e_nn, np.sqrt(E_cmSq), qsq_m, qcotd_m)) # put parent x-variable into dictionary all_data[k] = e_nn #print(all_data) # Using correlated parent-x variables, create BS distribution of them for different data sets all_data_bs = list(gv.bootstrap_iter(all_data, n=n_bs)) # create container for x-variable for plotting qsqmn_range = np.arange(-0.04, 0.501, .001) qsqmn_plot = {k: k for k in qsqmn_range} # create dictionaries to hold mean and BS results qcotd_vals_0 = dict() qsq_result_0 = dict() qcotd_vals_bs = dict() qsq_result_bs = dict() for k in all_data_bs[0]: if k != 'm_n': x = [] y = [] for bs in range(n_bs):