# number of samples/burnin per point n_samples, n_burn_in = 100000, 500 mixing_angles = np.array([.5 * np.pi]) min_step = 0.2 max_step = 0.7 data_res = 500 plot_res = 10000 step_sizes = np.linspace(min_step, max_step, data_res) separations = np.arange(500) # theoretical functions tintTh = None paccTn = lambda dtau: acceptance(dtau=dtau, tau=dtau * n_steps, n=n, m=m) opTh = lambda null: x2_1df(m, n, spacing, 0) main(x0, pot, file_name, n_samples, n_burn_in, mixing_angles, step_sizes, separations=separations, opFn=x_sq, tintTh=tintTh, paccTh=paccTn, opTh=opTh, plot_res=plot_res, n_steps=n_steps,
n, dim = 20, 1 x0 = np.random.random((n,)*dim) spacing = 1. step_size = .1 n_steps = 1 points = 50 tau = n_steps*step_size n_samples, n_burn_in = 100000, 20 h_res = np.linspace(-0.15, 0.15, 100, True) l_res = np.linspace(0.151, 0.85, points, True) angle_fracs = np.concatenate([h_res,l_res,h_res+1,l_res+1,h_res+2]) opFn = lambda samples: twoPoint(samples, separation=0) op_name = r'$\hat{O}_{pq} = \phi_0^2$' # theoretical calculations op_theory = x2_1df(mu=pot.m, n=x0.size, a=spacing, sep=0) pacc_theory = acceptance(dtau=step_size, tau=tau, n=x0.size, m=pot.m, t=tau*n_samples) acth = AC_Theory(tau=n_steps*step_size, m=pot.m) iTauTheory = acth.integrated if '__main__' == __name__: intac.main(x0, pot, file_name, n_samples = n_samples, n_burn_in = n_burn_in, spacing = spacing, step_size = step_size, n_steps = n_steps, opFn = opFn, op_name=op_name, angle_fracs = angle_fracs, iTauTheory = iTauTheory, pacc_theory = pacc_theory, op_theory = op_theory, save = True)
def main(x0, pot, file_name, n_samples, n_burn_in, c_len=5, step_size=.5, n_steps=50, spacing=1., free=True, logscale=False, save=False): """A wrapper function Required Inputs x0 :: np.array :: initial position input to the HMC algorithm pot :: potential class :: defined in hmc.potentials file_name :: string :: the final plot will be saved with a similar name if save=True n_samples :: int :: number of HMC samples n_burn_in :: int :: number of burn in samples Optional Inputs c_len :: int :: length of corellation step_size :: float :: MDMC step size n_steps :: int :: number of MDMC steps spacing ::float :: lattice spacing save :: bool :: True saves the plot, False prints to the screen free :: bool :: assumes free field logscale :: bool :: add a logscale """ rng = np.random.RandomState() model = Model(x0, pot=pot, spacing=spacing, rng=rng, step_size=step_size, n_steps=n_steps, rand_steps=True) c = corr.Correlations_1d(model) subtitle = r"Lattice: ${}$; $a={:.1f}$".format(x0.shape, spacing) length = model.x0.size if hasattr(pot, 'm'): m0 = 1 #= pot.m0 mu = 1 #pot.mu th_x_sq = np.asarray( # [qho_theory(spacing, mu, length, i) for i in range(c_len)]) [x2_1df(mu, length, spacing, i) for i in range(c_len)]) print 'theory: <x(0)x(0)> = {}'.format(th_x_sq[0]) subtitle += r"; $m=1" + r' $M' + '=10^{}$'.format( int(np.log10(n_samples))) else: th_x_sq, m0, mu = None, None, None print 'Running Model: {}'.format(file_name) c.runModel(n_samples=n_samples, n_burn_in=n_burn_in, verbose=True) def core(i): """multiprocessing""" av = c.getTwoPoint(separation=i) ans = errors.uWerr(c.op_samples) # get errors f_aav, f_diff, _, itau, itau_diff, _, acns = ans # extract data return f_aav, f_diff, itau ans = prll_map(core, xrange(c_len), verbose=True) c_fn, errs, itau = zip(*ans) c_fn = np.asarray(c_fn) errs = np.asarray(errs) print 'Finished Running Model: {}'.format(file_name) av_x0_sq = c_fn[0] print 'measured: <x(0)x(0)> = {}'.format(av_x0_sq) # make periodic if free: th_x_sq = np.tile(th_x_sq[:x0.shape[0]], c_len // x0.shape[0] + 1)[:c_len] else: th_x_sq = None all_plot = { 'c_fn': c_fn, 'errs': errs, 'th_x_sq': th_x_sq, 'spacing': spacing, 'subtitle': subtitle, 'logscale': logscale } store.store(all_plot, file_name, '_allPlot') plot(c_fn, errs, th_x_sq, spacing, subtitle, logscale, save=saveOrDisplay(save, file_name))