Exemplo n.º 1
0
def test_2():
    corr_time1 = integrated_autocorr1(SAMPLES)

    # http://www.hep.fsu.edu/~berg/teach/mcmc08/material/lecture07mcmc3.pdf
    # For a large exponential autocorrelation time t_exp, the approximation
    # is that the integrated autocorrelation time is equal to twice the
    # exponential autocorrelation time, which for a AR1 model is 1/log(\phi)

    expected = -2/np.log(PHI)
    assert corr_time1.shape == (1,)
    assert np.abs(corr_time1 - expected) < 0.3 * expected
Exemplo n.º 2
0
def test_2():
    corr_time1 = integrated_autocorr1(SAMPLES)

    # http://www.hep.fsu.edu/~berg/teach/mcmc08/material/lecture07mcmc3.pdf
    # For a large exponential autocorrelation time t_exp, the approximation
    # is that the integrated autocorrelation time is equal to twice the
    # exponential autocorrelation time, which for a AR1 model is 1/log(\phi)

    expected = -2 / np.log(PHI)
    assert corr_time1.shape == (1, )
    assert np.abs(corr_time1 - expected) < 0.3 * expected
Exemplo n.º 3
0
        model.kern.rbf.lengthscale.constrain_positive()
        model.kern.rbf.variance.constrain_positive()
        model.kern.white.variance.constrain_positive()
        model.kern.rbf.lengthscale.set_prior(priors['rbf_lengthscale'])
        model.kern.rbf.variance.set_prior(priors['rbf_variance'])
        model.kern.white.variance.fix(1e-3)
    return model


#32 x 32 calculations.
averageLeapFrogSteps = 10
acf_cutoff = 0.05  #Following estimation method of Hoffman and Gelman 2011 "The no U-turn sampler: Adaptively setting path lengths in Hamiltonian Monte Carlo"
burn_in = 100

full_mcmc_samples_32 = np.loadtxt('long_run_exact_samples.np')
auto_correlation_time_mcmc_full_32 = pyhmc.integrated_autocorr1(
    full_mcmc_samples_32[burn_in:, :], acf_cutoff).max()

variational_samples_32 = np.loadtxt(
    '225_inducing_point_samples_32_grid_comma.np', delimiter=',')
auto_correlation_time_variational_32 = pyhmc.integrated_autocorr1(
    variational_samples_32[burn_in:, :], acf_cutoff).max()

variational_samples_64 = np.loadtxt(
    '225_inducing_point_samples_64_grid_comma.np', delimiter=',')
auto_correlation_time_variational_64 = pyhmc.integrated_autocorr1(
    variational_samples_64[burn_in:, :], acf_cutoff).max()

#Now do the timing bit.

vb_32_model = build_reference_experiment_model(True, 32)
vb_64_model = build_reference_experiment_model(True, 64)
Exemplo n.º 4
0
        model.optimize('bfgs',messages=True,max_iters = 100)
        model.kern.rbf.lengthscale.constrain_positive()
        model.kern.rbf.variance.constrain_positive()
        model.kern.white.variance.constrain_positive()
        model.kern.rbf.lengthscale.set_prior(priors['rbf_lengthscale'])
        model.kern.rbf.variance.set_prior(priors['rbf_variance'])
        model.kern.white.variance.fix(1e-3)
    return model

#32 x 32 calculations.
averageLeapFrogSteps = 10
acf_cutoff = 0.05 #Following estimation method of Hoffman and Gelman 2011 "The no U-turn sampler: Adaptively setting path lengths in Hamiltonian Monte Carlo"
burn_in = 100

full_mcmc_samples_32 = np.loadtxt('long_run_exact_samples.np')
auto_correlation_time_mcmc_full_32 = pyhmc.integrated_autocorr1( full_mcmc_samples_32[burn_in:,:], acf_cutoff ).max()

variational_samples_32 = np.loadtxt('225_inducing_point_samples_32_grid_comma.np',delimiter=',')
auto_correlation_time_variational_32 = pyhmc.integrated_autocorr1( variational_samples_32[burn_in:,:], acf_cutoff ).max()

variational_samples_64 = np.loadtxt('225_inducing_point_samples_64_grid_comma.np',delimiter=',')
auto_correlation_time_variational_64 = pyhmc.integrated_autocorr1( variational_samples_64[burn_in:,:], acf_cutoff ).max()

#Now do the timing bit.

vb_32_model = build_reference_experiment_model( True, 32 )
vb_64_model = build_reference_experiment_model( True, 64 )
mc_32_model = build_reference_experiment_model( False, 32 )

number_of_trials = 10
Exemplo n.º 5
0
tau2 = []
tau3 = []
tau4 = []
tau5 = []
tau6 = []

n_trials = 10
for i in range(n_trials):
    y = generate_AR1(phi=PHI,
                     sigma=1,
                     n_steps=n_steps,
                     c=0,
                     y0=0,
                     random_state=None)

    tau1.append([integrated_autocorr1(y[:n]) for n in grid])
    tau2.append([integrated_autocorr2(y[:n]) for n in grid])
    tau3.append([integrated_autocorr3(y[:n]) for n in grid])
    tau4.append([integrated_autocorr4(y[:n]) for n in grid])
    tau5.append([integrated_autocorr5(y[:n]) for n in grid])
    tau6.append([integrated_autocorr6(y[:n]) for n in grid])

pp.errorbar(grid,
            y=np.mean(tau1, axis=0),
            yerr=np.std(tau1, axis=0),
            c='b',
            label='tau 1')
pp.errorbar(grid - 1,
            y=np.mean(tau2, axis=0),
            yerr=np.std(tau2, axis=0),
            c='r',
Exemplo n.º 6
0
def test_3():
    samples = np.hstack((SAMPLES, SAMPLES))
    corr_time = integrated_autocorr1(samples)
    assert corr_time.shape == (2, )
Exemplo n.º 7
0
TRUE = 2/(1-PHI) - 1
n_steps = 1000000
grid = np.logspace(2, np.log10(n_steps), 10)

tau1 = []
tau2 = []
tau3 = []
tau4 = []
tau5 = []
tau6 = []

n_trials = 10
for i in range(n_trials):
    y = generate_AR1(phi=PHI, sigma=1, n_steps=n_steps, c=0, y0=0, random_state=None)

    tau1.append([integrated_autocorr1(y[:n]) for n in grid])
    tau2.append([integrated_autocorr2(y[:n]) for n in grid])
    tau3.append([integrated_autocorr3(y[:n]) for n in grid])
    tau4.append([integrated_autocorr4(y[:n]) for n in grid])
    tau5.append([integrated_autocorr5(y[:n]) for n in grid])
    tau6.append([integrated_autocorr6(y[:n]) for n in grid])

pp.errorbar(grid, y=np.mean(tau1, axis=0), yerr=np.std(tau1, axis=0), c='b',    label='tau 1')
pp.errorbar(grid-1, y=np.mean(tau2, axis=0), yerr=np.std(tau2, axis=0), c='r',    label='tau 2')
pp.errorbar(grid-5, y=np.mean(tau3, axis=0), yerr=np.std(tau3, axis=0), c='g',    label='tau 3')
pp.errorbar(grid-10, y=np.mean(tau4, axis=0), yerr=np.std(tau4, axis=0), c='gold', label='tau 4')
pp.errorbar(grid-20, y=np.mean(tau5, axis=0), yerr=np.std(tau5, axis=0), c='m',    label='tau 5')
pp.errorbar(grid-30, y=np.mean(tau6, axis=0), yerr=np.std(tau6, axis=0), c='cyan', label='tau 6')


pp.plot(grid, [TRUE]*len(grid), 'k-')
Exemplo n.º 8
0
def test_3():
    samples = np.hstack((SAMPLES, SAMPLES))
    corr_time = integrated_autocorr1(samples)
    assert corr_time.shape == (2, )