def test_austerity(self):
        np.random.seed(SEED)
        X = gen_X(SAMPLE_SIZE)


        def vectorized_log_lik(X,theta):
            return _vector_of_log_likelihoods(theta[0],theta[1],X)

        def log_density_prior(theta):
            return np.log(norm.pdf(theta[0],0, SIGMA_1)) + np.log(norm.pdf(theta[1],0, SIGMA_2))


        sample,_ = austerity(vectorized_log_lik,log_density_prior, X,0.01,batch_size=50,chain_size=10, thinning=1, theta_t=np.random.randn(2))
        assert_almost_equal(np.array([-0.2554517,  1.3805683]),sample[-1])
    def test_austerity(self):
        np.random.seed(SEED)
        X = gen_X(SAMPLE_SIZE)

        def vectorized_log_lik(X, theta):
            return _vector_of_log_likelihoods(theta[0], theta[1], X)

        def log_density_prior(theta):
            return np.log(norm.pdf(theta[0], 0, SIGMA_1)) + np.log(
                norm.pdf(theta[1], 0, SIGMA_2))

        sample, _ = austerity(vectorized_log_lik,
                              log_density_prior,
                              X,
                              0.01,
                              batch_size=50,
                              chain_size=10,
                              thinning=1,
                              theta_t=np.random.randn(2))
        assert_almost_equal(np.array([-0.2554517, 1.3805683]), sample[-1])
from time import time
from sgld_test.bimodal_SGLD import vSGLD, evSGLD
from sgld_test.gradients_of_likelihood import manual_grad, grad_log_prior
from sgld_test.mcmc_convergance.cosnt import NUMBER_OF_TESTS, NO_OF_SAMPELS_IN_TEST, CHAIN_SIZE, SEED, SGLD_CHAIN_SIZE, \
    SAMPLE_SIZE
from sgld_test.likelihoods import gen_X, log_probability


import numpy as np



np.random.seed(SEED)
X = gen_X(SAMPLE_SIZE)


def vectorized_log_density(theta):
     return log_probability(theta,X)

t1 = time()


sample = []
no_chains = NUMBER_OF_TESTS * NO_OF_SAMPELS_IN_TEST

for i in range(no_chains):
    if i % (100) == 0:
        print(float(i)*100.0/no_chains)
        print(time()-t1)
    sample.append(evSGLD(manual_grad, grad_log_prior, X, n=1, chain_size=SGLD_CHAIN_SIZE,theta = np.random.randn(2) ) )
from time import time
from sgld_test.bimodal_SGLD import vSGLD, evSGLD
from sgld_test.gradients_of_likelihood import manual_grad, grad_log_prior
from sgld_test.mcmc_convergance.cosnt import NUMBER_OF_TESTS, NO_OF_SAMPELS_IN_TEST, CHAIN_SIZE, SEED, SGLD_CHAIN_SIZE, \
    SAMPLE_SIZE
from sgld_test.likelihoods import gen_X, log_probability

import numpy as np

np.random.seed(SEED)
X = gen_X(SAMPLE_SIZE)


def vectorized_log_density(theta):
    return log_probability(theta, X)


t1 = time()

sample = []
no_chains = NUMBER_OF_TESTS * NO_OF_SAMPELS_IN_TEST

for i in range(no_chains):
    if i % (100) == 0:
        print(float(i) * 100.0 / no_chains)
        print(time() - t1)
    sample.append(
        evSGLD(manual_grad,
               grad_log_prior,
               X,
               n=1,
from sgld_test.gradients_of_likelihood import manual_grad, grad_log_prior
from sgld_test.likelihoods import gen_X
import numpy as np
import matplotlib.pyplot as plt

theta1 = np.arange(-2, 2, 0.025)
theta2 = np.arange(-2, 2, 0.025)

grid_dimension_size = len(theta1)

theta1, theta2 = np.meshgrid(theta1, theta2)

D_theta1 = np.copy(theta1)
D_theta2 = np.copy(theta1)

sample = gen_X(400)

for i in range(grid_dimension_size):
    for j in range(grid_dimension_size):
        th = np.array([theta1[i, j], theta2[i, j]])

        # subsample = np.random.choice(sample, 40)
        stoch_grad_log_lik = np.sum(manual_grad(th[0], th[1], sample),
                                    axis=0) + grad_log_prior(th)

        D_theta1[i, j] = stoch_grad_log_lik[0]
        D_theta2[i, j] = stoch_grad_log_lik[1]

plt.figure()
CS = plt.streamplot(theta1, theta2, D_theta1, D_theta2, density=[0.5, 1])
plt.show()
Exemple #6
0
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns;

from sgld_test.likelihoods import gen_X, _log_probability

sns.set(color_codes=True)

np.random.seed(1307)

N = 400
X = gen_X(N)
theta1 = np.arange(-2, 2, 0.25)
grid_size = len(theta1)
theta2 = np.arange(-2, 2, 0.25)
theta1, theta2 = np.meshgrid(theta1, theta2)
Z = np.copy(theta1)


for i in range(grid_size):
    for j in range(grid_size):
        probability = _log_probability(theta1[i, j], theta2[i, j], X)
        Z[i, j] = probability

max = np.max(Z)+2

Z = np.exp(Z -max)

print(Z)

plt.figure()
from sgld_test.likelihoods import gen_X
import numpy as np
import matplotlib.pyplot as plt


theta1 = np.arange(-2, 2, 0.025)
theta2 = np.arange(-2, 2, 0.025)

grid_dimension_size = len(theta1)

theta1, theta2 = np.meshgrid(theta1, theta2)

D_theta1 = np.copy(theta1)
D_theta2 = np.copy(theta1)

sample = gen_X(400)

for i in range(grid_dimension_size):
    for j in range(grid_dimension_size):
        th = np.array([theta1[i, j], theta2[i, j]])

        # subsample = np.random.choice(sample, 40)
        stoch_grad_log_lik = np.sum(manual_grad(th[0], th[1], sample), axis=0)  + grad_log_prior(th)

        D_theta1[i, j] = stoch_grad_log_lik[0]
        D_theta2[i, j] = stoch_grad_log_lik[1]

plt.figure()
CS = plt.streamplot(theta1, theta2, D_theta1, D_theta2, density=[0.5, 1])
plt.show()