import bayes_lib as bl import autograd import autograd.numpy as agnp import autograd.scipy as agsp import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data #x_true = agnp.random.normal(3, 1, size = (100, 10)) mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) train_images = mnist.train.next_batch(100)[0] with bl.Model() as m: X = bl.Placeholder('X', dimensions=agnp.array([100, 784])) encoder = bl.ml.neural_network.DenseNeuralNetwork( 'Encoder', X, layer_dims=[784, 50, 20], nonlinearity=bl.math.utils.sigmoid) decoder = bl.ml.neural_network.DenseNeuralNetwork( 'Decoder', encoder, layer_dims=[20, 50, 784], nonlinearity=bl.math.utils.sigmoid, last_layer_nonlinearity=bl.math.utils.sigmoid) y = bl.rvs.Bernoulli('obs', decoder, observed=X) fit_params = agnp.loadtxt("pos.txt", delimiter=',') m.set_param(fit_params) out = m.evaluate(decoder, feed_dict={X: train_images}) for i in range(out.shape[1]):
logpdfs = [] #theta = agnp.random.multivariate_normal(agnp.array([3,3]), agnp.eye(2), size = n) theta = agnp.random.uniform(0, 5, size=(n, 2)) data = [] for i in range(n): y = agnp.random.multivariate_normal(theta[i, :], agnp.eye(2)) logpdfs.append( agsp.stats.multivariate_normal.logpdf(y, theta[i, :], agnp.eye(2))) data.append(y) return logpdfs, theta, agnp.array(data) #data = agnp.hstack([agnp.random.normal(3, 2, size = (1000,1)),agnp.random.normal(10, .1, size = (1000,1)), agnp.random.normal(-5, 3, size = (1000,1))]) with bl.Model() as m_surrogate_density: #X = bl.Placeholder('X', dimensions = agnp.array([1000,3])) X = bl.Placeholder('X', dimensions=agnp.array([100, 2])) theta = bl.Placeholder('theta', dimensions=agnp.array([100, 2])) made = bl.ml.made.ConditionalGaussianMADE('made', 2, 2, theta, [20, 21], X, nonlinearity=bl.math.utils.relu) init = agnp.random.normal(-3, 3, size=m_surrogate_density.n_params) def iter_func(t, p, o): print("Objective value: %f" % o) for i in range(10): _, pseudo_theta, pseudo_data = gen_data(100)