def generative_adversarial_network_example():
	ed.set_seed(42)

	data_dir = '/tmp/data'
	out_dir = '/tmp/out'
	if not os.path.exists(out_dir):
		os.makedirs(out_dir)
	M = 128  # Batch size during training.
	d = 100  # Latent dimension.

	(x_train, _), (x_test, _) = mnist(data_dir)
	x_train_generator = generator(x_train, M)
	x_ph = tf.placeholder(tf.float32, [M, 784])

	#--------------------
	# GANs posit generative models using an implicit mechanism.
	# Given some random noise, the data is assumed to be generated by a deterministic function of that noise.
	with tf.variable_scope('Gen'):
		eps = Uniform(tf.zeros([M, d]) - 1.0, tf.ones([M, d]))
		x = generative_network(eps)

	#--------------------
	# In Edward, the GAN algorithm (GANInference) simply takes the implicit density model on x as input, binded to its realizations x_ph.
	# In addition, a parameterized function discriminator is provided to distinguish their samples.
	inference = ed.GANInference(data={x: x_ph}, discriminator=discriminative_network)

	# We'll use ADAM as optimizers for both the generator and discriminator.
	# We'll run the algorithm for 15,000 iterations and print progress every 1,000 iterations.
	optimizer = tf.train.AdamOptimizer()
	optimizer_d = tf.train.AdamOptimizer()

	inference = ed.GANInference(data={x: x_ph}, discriminator=discriminative_network)
	inference.initialize(optimizer=optimizer, optimizer_d=optimizer_d, n_iter=15000, n_print=1000)

	# We now form the main loop which trains the GAN.
	# At each iteration, it takes a minibatch and updates the parameters according to the algorithm.
	sess = ed.get_session()
	tf.global_variables_initializer().run()

	idx = np.random.randint(M, size=16)
	i = 0
	for t in range(inference.n_iter):
		if t % inference.n_print == 0:
			samples = sess.run(x)
			samples = samples[idx, ]

			fig = plot(samples)
			plt.savefig(os.path.join(out_dir, '{}.png').format(str(i).zfill(3)), bbox_inches='tight')
			plt.close(fig)
			i += 1

		x_batch = next(x_train_generator)
		info_dict = inference.update(feed_dict={x_ph: x_batch})
		inference.print_progress(info_dict)
    def test_gan_inference(self):
        N, D, W_1, W_2, W_3, b_1, b_2, x_ph, y, X_train, y_train = self._test()

        with tf.variable_scope("Gen"):
            theta = tf.get_variable("theta", [1])
            y = tf.cast(y, tf.float32) * theta

        def discriminator(x):
            w = tf.get_variable("w", [1])
            return w * tf.cast(x, tf.float32)

        inference = ed.GANInference(data={
            y: tf.cast(y_train, tf.float32),
            x_ph: X_train
        },
                                    discriminator=discriminator)
        inference.run(n_iter=1)
示例#3
0
def main(_):
    sns.set(color_codes=True)
    ed.set_seed(42)

    # DATA. We use a placeholder to represent a minibatch. During
    # inference, we generate data on the fly and feed `x_ph`.
    x_ph = tf.placeholder(tf.float32, [FLAGS.M, 1])

    # MODEL
    with tf.variable_scope("Gen"):
        eps = tf.linspace(-8.0, 8.0,
                          FLAGS.M) + 0.01 * tf.random_normal([FLAGS.M])
        eps = tf.reshape(eps, [-1, 1])
        x = generative_network(eps)

    # INFERENCE
    optimizer = tf.train.GradientDescentOptimizer(0.03)
    optimizer_d = tf.train.GradientDescentOptimizer(0.03)

    inference = ed.GANInference(data={x: x_ph},
                                discriminator=discriminative_network)
    inference.initialize(optimizer=optimizer, optimizer_d=optimizer_d)
    tf.global_variables_initializer().run()

    for _ in range(inference.n_iter):
        x_data = next_batch(FLAGS.M).reshape([FLAGS.M, 1])
        info_dict = inference.update(feed_dict={x_ph: x_data})
        inference.print_progress(info_dict)

    # CRITICISM
    db, pd, pg = get_samples(x_ph)
    db_x = np.linspace(-8, 8, len(db))
    p_x = np.linspace(-8, 8, len(pd))
    f, ax = plt.subplots(1)
    ax.plot(db_x, db, label="Decision boundary")
    ax.set_ylim(0, 1)
    plt.plot(p_x, pd, label="Real data")
    plt.plot(p_x, pg, label="Generated data")
    plt.title("1D Generative Adversarial Network")
    plt.xlabel("Data values")
    plt.ylabel("Probability density")
    plt.legend()
    plt.show()
示例#4
0
  def test_normal(self):
    with self.test_session() as sess:
      # DATA
      M = 12  # batch size during training
      x_ph = tf.placeholder(tf.float32, [M, 1])

      # MODEL
      with tf.variable_scope("Gen"):
        theta = tf.Variable(0.0)
        x = Normal(theta, 0.1, sample_shape=[M, 1])

      # INFERENCE
      inference = ed.GANInference(
          data={x: x_ph}, discriminator=discriminative_network)
      inference.initialize(n_iter=1000)
      tf.global_variables_initializer().run()

      for _ in range(inference.n_iter):
        x_data = next_batch(M).reshape([M, 1])
        inference.update(feed_dict={x_ph: x_data})

      # CRITICISM
      self.assertAllClose(theta.eval(), 4.0, rtol=1.0, atol=1.0)
示例#5
0
    os.makedirs(IMG_DIR)

# DATA. MNIST batches are fed at training time.
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
x_ph = tf.placeholder(tf.float32, [M, 784])

# MODEL
with tf.variable_scope("Gen"):
    eps = Uniform(a=tf.zeros([M, d]) - 1.0, b=tf.ones([M, d]))
    x = generative_network(eps)

# INFERENCE
optimizer = tf.train.AdamOptimizer()
optimizer_d = tf.train.AdamOptimizer()

inference = ed.GANInference(data={x: x_ph},
                            discriminator=discriminative_network)
inference.initialize(optimizer=optimizer,
                     optimizer_d=optimizer_d,
                     n_iter=15000,
                     n_print=1000)

sess = ed.get_session()
tf.global_variables_initializer().run()

idx = np.random.randint(M, size=16)
i = 0
for t in range(inference.n_iter):
    if t % inference.n_print == 0:
        samples = sess.run(x)
        samples = samples[idx, ]