def test_austerity(self):
        np.random.seed(SEED)
        X = gen_X(SAMPLE_SIZE)


        def vectorized_log_lik(X,theta):
            return _vector_of_log_likelihoods(theta[0],theta[1],X)

        def log_density_prior(theta):
            return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))


        sample,_ = austerity(vectorized_log_lik,log_density_prior, X,0.01,batch_size=50,chain_size=10, thinning=1, theta_t=np.random.randn(2))
        assert_almost_equal(np.array([-0.2554517,  1.3805683]),sample[-1])
    def test_austerity(self):
        np.random.seed(SEED)
        X = gen_X(SAMPLE_SIZE)

        def vectorized_log_lik(X, theta):
            return _vector_of_log_likelihoods(theta[0], theta[1], X)

        def log_density_prior(theta):
            return np.log(norm.pdf(theta[0], 0, SIGMA_1)) + np.log(
                norm.pdf(theta[1], 0, SIGMA_2))

        sample, _ = austerity(vectorized_log_lik,
                              log_density_prior,
                              X,
                              0.01,
                              batch_size=50,
                              chain_size=10,
                              thinning=1,
                              theta_t=np.random.randn(2))
        assert_almost_equal(np.array([-0.2554517, 1.3805683]), sample[-1])
from sampplers.MetropolisHastings import metropolis_hastings
import numpy as np


np.random.seed(SEED)
X = gen_X(SAMPLE_SIZE)


def vectorized_log_lik(X,theta):
     return _vector_of_log_likelihoods(theta[0],theta[1],X)

def log_density_prior(theta):
    return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))



sample = austerity(vectorized_log_lik,log_density_prior, X,0.01,batch_size=50,chain_size=20*1000, thinning=1, theta_t=np.random.randn(2))

print(acf(sample[:,1],nlags=50))

#
# import seaborn as sns
# sns.set(color_codes=True)
# with sns.axes_style("white"):
#     pr = sns.jointplot(x=sample[:,0], y=sample[:,1], kind="kde", color="k");
#
#     # pr.savefig('../../write_up/img/mcmc_sample.pdf')
#
#     sns.plt.show()

Beispiel #4
0
def get_thinning(X,nlags = 50):
    autocorrelation = acf(X, nlags=nlags, fft=True)
    thinning = np.argmin(np.abs(autocorrelation - 0.5)) + 1
    return thinning, autocorrelation

def grad_log_lik(t):
    a = np.sum(manual_grad(t[0],t[1],X),axis=0)  - t[1]/SIGMA_2 -t[0]/SIGMA_1
    return a

pvals = []
no_evals = []
for epsilon in np.linspace(0.001, 0.2,25):
    THINNING_ESTIMAE = 10**4

    sample,evals = austerity(vectorized_log_lik,log_density_prior, X,epsilon,batch_size=50, chain_size=THINNING_ESTIMAE, thinning=1, theta_t=np.random.randn(2))


    thinning, autocorr =  get_thinning(sample[:,0])


    print(' - thinning for epsilon:',thinning,epsilon)

    TEST_SIZE = 500

    e_pvals = []
    e_no_evals = []
    for mc_reps in range(50):
        print(mc_reps)
        sample, evals = austerity(vectorized_log_lik,log_density_prior, X,epsilon,batch_size=50,chain_size=TEST_SIZE + MAGIC_BURNIN_NUMBER, thinning=thinning, theta_t=np.random.randn(2))
Beispiel #5
0
X = gen_X(SAMPLE_SIZE)


def vectorized_log_lik(X, theta):
    return _vector_of_log_likelihoods(theta[0], theta[1], X)


def log_density_prior(theta):
    return np.log(norm.pdf(theta[0], 0, SIGMA_1)) + np.log(
        norm.pdf(theta[1], 0, SIGMA_2))


sample = austerity(vectorized_log_lik,
                   log_density_prior,
                   X,
                   0.01,
                   batch_size=50,
                   chain_size=20 * 1000,
                   thinning=1,
                   theta_t=np.random.randn(2))

print(acf(sample[:, 1], nlags=50))

#
# import seaborn as sns
# sns.set(color_codes=True)
# with sns.axes_style("white"):
#     pr = sns.jointplot(x=sample[:,0], y=sample[:,1], kind="kde", color="k");
#
#     # pr.savefig('../../write_up/img/mcmc_sample.pdf')
#
#     sns.plt.show()