Exemple #1
0
def main(_):
    ed.set_seed(42)

    # DATA. MNIST batches are fed at training time.
    (x_train, _), (x_test, _) = mnist(FLAGS.data_dir)
    x_train_generator = generator(x_train, FLAGS.M)

    # MODEL
    # Define a subgraph of the full model, corresponding to a minibatch of
    # size M.
    z = Normal(loc=tf.zeros([FLAGS.M, FLAGS.d]),
               scale=tf.ones([FLAGS.M, FLAGS.d]))
    hidden = tf.layers.dense(z, 256, activation=tf.nn.relu)
    x = Bernoulli(logits=tf.layers.dense(hidden, 28 * 28))

    # INFERENCE
    # Define a subgraph of the variational model, corresponding to a
    # minibatch of size M.
    x_ph = tf.placeholder(tf.int32, [FLAGS.M, 28 * 28])
    hidden = tf.layers.dense(tf.cast(x_ph, tf.float32),
                             256,
                             activation=tf.nn.relu)
    qz = Normal(loc=tf.layers.dense(hidden, FLAGS.d),
                scale=tf.layers.dense(hidden,
                                      FLAGS.d,
                                      activation=tf.nn.softplus))

    # Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
    inference = ed.KLqp({z: qz}, data={x: x_ph})
    optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
    inference.initialize(optimizer=optimizer)

    tf.global_variables_initializer().run()

    n_iter_per_epoch = x_train.shape[0] // FLAGS.M
    for epoch in range(1, FLAGS.n_epoch + 1):
        print("Epoch: {0}".format(epoch))
        avg_loss = 0.0

        pbar = Progbar(n_iter_per_epoch)
        for t in range(1, n_iter_per_epoch + 1):
            pbar.update(t)
            x_batch = next(x_train_generator)
            info_dict = inference.update(feed_dict={x_ph: x_batch})
            avg_loss += info_dict['loss']

        # Print a lower bound to the average marginal likelihood for an
        # image.
        avg_loss /= n_iter_per_epoch
        avg_loss /= FLAGS.M
        print("-log p(x) <= {:0.3f}".format(avg_loss))

        # Prior predictive check.
        images = x.eval()
        for m in range(FLAGS.M):
            imsave(
                os.path.join(FLAGS.out_dir, '%d.png') % m,
                images[m].reshape(28, 28))
Exemple #2
0
def main(_):
  ed.set_seed(42)

  # DATA. MNIST batches are fed at training time.
  (x_train, _), (x_test, _) = mnist(FLAGS.data_dir)
  x_train_generator = generator(x_train, FLAGS.M)

  # MODEL
  # Define a subgraph of the full model, corresponding to a minibatch of
  # size M.
  z = Normal(loc=tf.zeros([FLAGS.M, FLAGS.d]),
             scale=tf.ones([FLAGS.M, FLAGS.d]))
  hidden = tf.layers.dense(z, 256, activation=tf.nn.relu)
  x = Bernoulli(logits=tf.layers.dense(hidden, 28 * 28))

  # INFERENCE
  # Define a subgraph of the variational model, corresponding to a
  # minibatch of size M.
  x_ph = tf.placeholder(tf.int32, [FLAGS.M, 28 * 28])
  hidden = tf.layers.dense(tf.cast(x_ph, tf.float32), 256,
                           activation=tf.nn.relu)
  qz = Normal(loc=tf.layers.dense(hidden, FLAGS.d),
              scale=tf.layers.dense(
                  hidden, FLAGS.d, activation=tf.nn.softplus))

  # Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
  inference = ed.KLqp({z: qz}, data={x: x_ph})
  optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
  inference.initialize(optimizer=optimizer)

  tf.global_variables_initializer().run()

  n_iter_per_epoch = x_train.shape[0] // FLAGS.M
  for epoch in range(1, FLAGS.n_epoch + 1):
    print("Epoch: {0}".format(epoch))
    avg_loss = 0.0

    pbar = Progbar(n_iter_per_epoch)
    for t in range(1, n_iter_per_epoch + 1):
      pbar.update(t)
      x_batch = next(x_train_generator)
      info_dict = inference.update(feed_dict={x_ph: x_batch})
      avg_loss += info_dict['loss']

    # Print a lower bound to the average marginal likelihood for an
    # image.
    avg_loss /= n_iter_per_epoch
    avg_loss /= FLAGS.M
    print("-log p(x) <= {:0.3f}".format(avg_loss))

    # Prior predictive check.
    images = x.eval()
    for m in range(FLAGS.M):
      imsave(os.path.join(FLAGS.out_dir, '%d.png') % m,
             images[m].reshape(28, 28))
# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
inference = ed.KLqp({z: qz}, data={x: x_ph})
optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer)

tf.global_variables_initializer().run()

n_epoch = 100
n_iter_per_epoch = x_train.shape[0] // M
for epoch in range(1, n_epoch + 1):
    print("Epoch: {0}".format(epoch))
    avg_loss = 0.0

    pbar = Progbar(n_iter_per_epoch)
    for t in range(1, n_iter_per_epoch + 1):
        pbar.update(t)
        x_batch = next(x_train_generator)
        info_dict = inference.update(feed_dict={x_ph: x_batch})
        avg_loss += info_dict['loss']

    # Print a lower bound to the average marginal likelihood for an
    # image.
    avg_loss = avg_loss / n_iter_per_epoch
    avg_loss = avg_loss / M
    print("-log p(x) <= {:0.3f}".format(avg_loss))

    # Prior predictive check.
    images = x.eval()
    for m in range(M):
        imsave(os.path.join(out_dir, '%d.png') % m, images[m].reshape(28, 28))
Exemple #4
0
init = tf.global_variables_initializer()
init.run()

n_iter_per_epoch = 100
n_epoch = T // n_iter_per_epoch
for epoch in range(n_epoch):
    avg_loss = 0.0

    widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
    pbar = ProgressBar(n_iter_per_epoch, widgets=widgets)
    pbar.start()
    for t in range(n_iter_per_epoch):
        pbar.update(t)
        info_dict_e = inference_e.update()
        info_dict_m = inference_m.update()
        avg_loss += info_dict_m['loss']

    print("Acceptance Rate:")
    print(info_dict_e['accept_rate'])

    # Print a lower bound to the average marginal likelihood for an
    # image.
    avg_loss = avg_loss / n_iter_per_epoch
    avg_loss = avg_loss / N
    print("log p(x) >= {:0.3f}".format(avg_loss))

    # Prior predictive check.
    imgs = x.eval()
    for m in range(N):
        imsave(os.path.join(IMG_DIR, '%d.png') % m, imgs[m].reshape(28, 28))
Exemple #5
0
import tensorflow as tf
import edward as ed
from edward.models import Bernoulli, Beta, Binomial
import matplotlib.pyplot as plt
import seaborn as sns

##Single coin weight inference

##Model:
theta = Beta(1.0, 1.0)
x = Bernoulli(probs=theta)

##Sampling:
with tf.Session() as sess:
    for i in range(10):
        print(x.eval())

##Observations:
data = 1

##Infer:
qtheta = Beta(tf.Variable(1.0), tf.Variable(1.0))
inference = ed.KLqp({theta: qtheta}, {x: data})
inference.run()

##Results:
qtheta_samples = qtheta.sample(1000).eval()
print(qtheta_samples.mean())
plt.hist(qtheta_samples)
plt.show()
Exemple #6
0
	if(log_enabled):
		pbar = Progbar(n_iter_per_epoch)
	for t in range(1, n_iter_per_epoch + 1):
		if(log_enabled):
			pbar.update(t)
		x_batch = next(x_train_generator)
		info_dict = inference.update(feed_dict={x_ph: x_batch})
		avg_loss += info_dict['loss']/d

	# Print a lower bound to the average marginal likelihood for an
	# image.
	avg_loss = avg_loss / n_iter_per_epoch
	avg_loss = avg_loss / M
	print("-log p(x) <= {:0.3f}".format(avg_loss))
	saver.save(sess, out_model+"/model.%05d.ckpt"%(epoch))
	if np.isnan(avg_loss):
		print("[ERR0R]")
		break
	idx = np.random.randint(M, size=16)
	samples = x.eval()
	samples = samples[idx, ]

	fig = plot(samples)
	plt.savefig(os.path.join(out_dir, '{}.png').format(
	str(i).zfill(3)), bbox_inches='tight')
	plt.close(fig)
	i+=1