Esempio n. 1
0
def train_gan(args):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir)

    x = tf.placeholder(tf.float32, [None, 784])
    y_labels = tf.placeholder(tf.float32, [None])
    y = discriminator(x)

    batch_size=tf.Variable(100)
    gen=Network()
    fake_x=gen.calculate(uniform_data_gen(batch_size))
    fake_y=discriminator(fake_x,True)

    #generators
    gen_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope="GAN/Generator")
    disc_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope="GAN/Discriminator")

    disc_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(y), logits=y)+\
        tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake_y), logits=fake_y))
    gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake_y),logits=fake_y))

    gen_train_step = tf.train.AdamOptimizer(0.0001).minimize(gen_loss,var_list=gen_vars)
    disc_train_step = tf.train.AdamOptimizer(0.0001).minimize(disc_loss,var_list=disc_vars)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver=tf.train.Saver()

    #Train
    for _ in xrange(10000):
        print("Iteration ",_)
        batch_xs, batch_ys = mnist.train.next_batch(100)
        for d_to_g in xrange(5):
            d_loss,d_steps=sess.run([disc_loss, disc_train_step], feed_dict={x: batch_xs, y_labels: batch_ys})
        g_loss,g_steps=sess.run([gen_loss, gen_train_step], feed_dict={x: batch_xs, y_labels: batch_ys})
        print("Discriminator loss: ",d_loss, d_steps)
        print("Generator loss: ",g_loss, g_steps)

    if "--save" in args:
        saver.save(sess, './models/mnist_test_model',global_step=1000)

    print("\nTESTING DISCRIMINATOR ON MNIST DATA\n")
    batch_xs, batch_ys = mnist.train.next_batch(1)
    image_read.draw_ascii(np.asarray(batch_xs).reshape(-1),printOut=True)
    output=sess.run([y], feed_dict={x: batch_xs, y_labels: batch_ys})
    print("y guess: ",output)
    print("y val: ",batch_ys)

    print("\nTESTING GENERATORS OUTPUT\n")
    x_val,y_val=sess.run([fake_x,fake_y],feed_dict={batch_size:1})
    image_read.draw_ascii(np.asarray(x_val).reshape(-1),printOut=True)
    print("y val: ",y_val)
Esempio n. 2
0
def load_gan(_):
    mnist = input_data.read_data_sets(FLAGS.data_dir)

    sess = tf.Session()
    saver = tf.train.import_meta_graph('./models/mnist_test_model-1000.meta')
    saver.restore(sess, tf.train.latest_checkpoint("./models"))
    graph = tf.get_default_graph()

    x = tf.placeholder(tf.float32, [None, 784])
    y_labels = tf.placeholder(tf.int64, [None])

    discriminator=Network([{'w':graph.get_tensor_by_name("GAN/Discriminator/h1:0"),\
                    'b':graph.get_tensor_by_name("GAN/Discriminator/h1_bias:0"),\
                    'activation':tf.nn.softmax}])
    testout, xyz = discriminator.calculate(x)

    batch_xs, batch_ys = mnist.train.next_batch(1)
    image_read.draw_ascii(np.asarray(batch_xs).reshape(-1), printOut=True)
    output = sess.run([testout], feed_dict={x: batch_xs, y_labels: batch_ys})
    print(output)
    print(type(batch_ys))
Esempio n. 3
0
def train_gan(args):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir)

    x = tf.placeholder(tf.float32, [None, 784])
    y_labels = tf.placeholder(tf.int32, [None])
    y = discriminator(x)
    noise = tf.placeholder(tf.float32, [None, 100])

    batch_size = tf.Variable(100)
    #gen=generator()
    fake_x = generator(noise)
    fake_y = discriminator(fake_x, True)

    #generators
    gen_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope="GAN/Generator")
    disc_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                  scope="GAN/Discriminator")

    disc_loss = tf.losses.sparse_softmax_cross_entropy(labels=y_labels, logits=y)+\
        tf.losses.sparse_softmax_cross_entropy(labels=10*tf.ones_like(y_labels), logits=fake_y)
    gen_loss = tf.losses.sparse_softmax_cross_entropy(labels=7 *
                                                      tf.ones_like(y_labels),
                                                      logits=fake_y)

    gen_train_step = tf.train.AdamOptimizer(0.0001).minimize(gen_loss,
                                                             var_list=gen_vars)
    disc_train_step = tf.train.AdamOptimizer(0.0001).minimize(
        disc_loss, var_list=disc_vars)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()

    #Train
    for _ in xrange(5000):
        print("Iteration ", _)
        batch_xs, batch_ys = mnist.train.next_batch(100)
        for d_to_g in xrange(5):
            d_loss, d_steps = sess.run(
                [disc_loss, disc_train_step],
                feed_dict={
                    x: batch_xs,
                    y_labels: batch_ys,
                    noise: uniform_data_gen(100).eval(session=sess)
                })
        for i in xrange(5):
            g_loss, g_steps = sess.run(
                [gen_loss, gen_train_step],
                feed_dict={
                    x: batch_xs,
                    y_labels: batch_ys,
                    noise: uniform_data_gen(100).eval(session=sess)
                })
        print("Discriminator loss: ", d_loss, d_steps)
        print("Generator loss: ", g_loss, g_steps)

        if _ % 50 == 0:
            print("\nTESTING DISCRIMINATOR ON MNIST DATA\n")
            for it in xrange(5):
                batch_xs, batch_ys = mnist.train.next_batch(1)
                image_read.draw_ascii(np.asarray(batch_xs).reshape(-1),
                                      printOut=True)
                output = sess.run(
                    [y],
                    feed_dict={
                        x: batch_xs,
                        y_labels: batch_ys,
                        noise: uniform_data_gen(100).eval(session=sess)
                    })
                print("y guess: ", output)
                print("y val: ", batch_ys)
                batch_xs, batch_ys = mnist.train.next_batch(1)
            print("\nTESTING GENERATORS OUTPUT\n")
            for it in xrange(2):
                x_val, y_val = sess.run(
                    [fake_x, fake_y],
                    feed_dict={noise: uniform_data_gen(1).eval(session=sess)})
                image_read.draw_ascii(np.asarray(x_val).reshape(-1),
                                      printOut=True)
                print("y val: ", y_val)

    if "--save" in args:
        saver.save(sess, './models/mnist_test_model', global_step=1000)

    print("\nTESTING DISCRIMINATOR ON MNIST DATA\n")
    for it in xrange(5):
        batch_xs, batch_ys = mnist.train.next_batch(1)
        image_read.draw_ascii(np.asarray(batch_xs).reshape(-1), printOut=True)
        output = sess.run(
            [y],
            feed_dict={
                x: batch_xs,
                y_labels: batch_ys,
                noise: uniform_data_gen(100).eval(session=sess)
            })
        print("y guess: ", output)
        print("y val: ", batch_ys)
        batch_xs, batch_ys = mnist.train.next_batch(1)

    print("\nTESTING GENERATORS OUTPUT\n")
    for it in xrange(2):
        x_val, y_val = sess.run(
            [fake_x, fake_y],
            feed_dict={noise: uniform_data_gen(1).eval(session=sess)})
        image_read.draw_ascii(np.asarray(x_val).reshape(-1), printOut=True)
        print("y val: ", y_val)
Esempio n. 4
0
def train_gan(args):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir)

    gen = Generator()
    disc = Discriminator()

    keep_prob = tf.placeholder(tf.float32, None)
    is_training = tf.placeholder(tf.bool, None)

    x = tf.placeholder(tf.float32, [None, 784])
    y_labels = tf.placeholder(tf.int32, [None])
    y = disc.classify(tf.reshape(x, [-1, 28, 28, 1]), keep_prob, is_training)

    noise = tf.placeholder(tf.float32, [None, 100])

    fake_x = gen.generate(noise, keep_prob, is_training)
    blah1 = fake_x
    fake_y = disc.classify(fake_x, keep_prob, is_training, reuse=True)

    #generators
    gen_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope="GAN/Generator")
    disc_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                  scope="GAN/Discriminator")

    disc_reg = tf.contrib.layers.apply_regularization(
        tf.contrib.layers.l2_regularizer(1e-8), disc_vars)
    gen_reg = tf.contrib.layers.apply_regularization(
        tf.contrib.layers.l2_regularizer(1e-8), gen_vars)

    disc_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(y), logits=y))+\
            tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake_y), logits=fake_y))

    gen_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake_y),
                                                logits=fake_y))

    opt = tf.train.RMSPropOptimizer(0.001)
    gen_train_step = opt.minimize(gen_loss, var_list=gen_vars)
    disc_train_step = opt.minimize(disc_loss + disc_reg, var_list=disc_vars)

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    saver = tf.train.Saver()
    if "--load" in args:
        saver.restore(sess, "./models/mnist_test_model")

    show_z = normal_data_gen(16)
    g_loss = 0
    d_loss = 0
    past_glosses = []
    past_dlosses = []

    #Train
    for _ in xrange(5000):
        print("Iteration ", _)
        batch_xs, batch_ys = mnist.train.next_batch(128)
        uniform_noise = normal_data_gen(128)
        d_loss, d_steps = sess.run([disc_loss, disc_train_step], feed_dict={keep_prob:0.7, is_training:True, \
                                    x: batch_xs, y_labels: batch_ys, noise: uniform_noise})

        uniform_noise = normal_data_gen(128)
        g_loss, g_steps = sess.run([gen_loss, gen_train_step],
                                   feed_dict={
                                       keep_prob: 0.7,
                                       is_training: True,
                                       noise: uniform_noise
                                   })

        print("Discriminator loss: ", d_loss, d_steps)
        print("Generator loss: ", g_loss, g_steps)

        if _ % 10 == 0:
            past_glosses.append(g_loss)
            past_dlosses.append(d_loss)

        if _ % 50 == 0:
            print("\nTESTING DISCRIMINATOR ON MNIST DATA\n")
            for it in xrange(0):
                batch_xs, batch_ys = mnist.train.next_batch(16)
                image_read.draw_ascii(np.asarray(batch_xs).reshape(-1),
                                      printOut=True)
                output = sess.run(
                    [y],
                    feed_dict={
                        keep_prob: 1.0,
                        is_training: False,
                        x: batch_xs,
                        y_labels: batch_ys,
                        noise: uniform_data_gen(16)
                    })

                batch_xs = np.reshape(batch_xs, (-1, 28, 28, 1))
                imgs = [img[:, :, 0] for img in batch_xs]
                m = montage(imgs)
                gen_img = m
                plt.axis('off')
                plt.imshow(gen_img, cmap='gray')
                #plt.show()

                print("y guess: ", output)
                print("y val: ", batch_ys)
                batch_xs, batch_ys = mnist.train.next_batch(1)
            print("\nTESTING GENERATORS OUTPUT\n")
            for it in xrange(1):
                x_val, y_val, blah2 = sess.run([fake_x, fake_y, blah1],
                                               feed_dict={
                                                   keep_prob: 1.0,
                                                   is_training: False,
                                                   noise: show_z
                                               })
                #image_read.draw_ascii(np.asarray(x_val).reshape(-1),printOut=True)
                print("y val: ", y_val)
                imgs = [img[:, :, 0] for img in blah2]
                m = montage(imgs)
                gen_img = m
                plt.axis('off')
                plt.imshow(gen_img, cmap='gray')
                plt.savefig("./output/it%d.png" % _)
                plt.show()

                plt.plot(np.linspace(0, len(past_dlosses), len(past_dlosses)),
                         past_dlosses,
                         label="dloss")
                plt.plot(np.linspace(0, len(past_glosses), len(past_glosses)),
                         past_glosses,
                         label="gloss")
                plt.title('DCGAN Loss')
                plt.xlabel('Iteration')
                plt.ylabel('Loss')
                plt.legend()
                plt.savefig("./output/progress.png")
                plt.show()

            if "--save" in args:
                saver.save(sess, './models/mnist_test_model')
Esempio n. 5
0
def train_gan(args):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir)

    x = tf.placeholder(tf.float32, [None, 784])
    y_labels = tf.placeholder(tf.int64, [None])
    y = discriminator(x)

    generators = []
    blah = []
    fake_x_vals = []
    fake_y_vals = []

    batch_size = tf.Variable(100)

    for ind in xrange(10):
        generators.append(Network())
        a, b = generators[-1].calculate(uniform_data_gen(batch_size))
        fake_x_vals.append(a)
        blah.append(b)
        fake_y_vals.append(discriminator(fake_x_vals[-1]))

    #fake_y=discriminator(fake_x,reuse=True)

    #generators
    generators_loss = []
    generators_optimizers = []
    gen_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope="GAN/Generator")
    for ind in xrange(len(generators)):
        generators_loss.append(
            tf.losses.sparse_softmax_cross_entropy(
                labels=ind * tf.ones([100], dtype=tf.int64),
                logits=fake_y_vals[ind]))
        generators_optimizers.append(
            tf.train.GradientDescentOptimizer(0.1).minimize(
                generators_loss[ind], var_list=gen_vars))

    counter = tf.Variable(0)
    for i in generators_optimizers:
        if i == None:
            counter += 1

    #disc_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=tf.ones_like(y), logits=y)+\
    #    tf.nn.softmax_cross_entropy_with_logits(labels=tf.zeros_like(fake_y), logits=fake_y))

    disc_loss = tf.losses.sparse_softmax_cross_entropy(labels=y_labels,
                                                       logits=y)
    disc_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                  scope="GAN/Discriminator")
    disc_train_step = tf.train.GradientDescentOptimizer(0.1).minimize(
        disc_loss, var_list=disc_vars)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()

    #Train
    for _ in xrange(1000):
        print("Iteration ", _)
        batch_xs, batch_ys = mnist.train.next_batch(100)
        for d_to_g in xrange(10):
            r, g, d_loss, d_steps = sess.run(
                [y, y_labels, disc_loss, disc_train_step],
                feed_dict={
                    x: batch_xs,
                    y_labels: batch_ys
                })
        qwert, asd, g_loss, g_steps = sess.run(
            [counter, blah, generators_loss, generators_optimizers],
            feed_dict={
                x: batch_xs,
                y_labels: batch_ys
            })
        print("Discriminator loss: ", d_loss, d_steps, r.shape, g.shape)
        print(asd)
        #print("Generator loss: ",g_loss, g_steps)

    if "--save" in args:
        saver.save(sess, './models/mnist_test_model', global_step=1000)

    print("\nTESTING DISCRIMINATOR ON MNIST DATA\n")
    batch_xs, batch_ys = mnist.train.next_batch(1)
    image_read.draw_ascii(np.asarray(batch_xs).reshape(-1), printOut=True)
    output = sess.run([y], feed_dict={x: batch_xs, y_labels: batch_ys})

    print("\nTESTING GENERATORS OUTPUT\n")
    ignore, output = sess.run([counter, fake_x_vals],
                              feed_dict={batch_size: 1})
    for gen in xrange(len(output)):
        print("GENERATOR ", gen)
        image_read.draw_ascii(np.asarray(output[gen]).reshape(-1),
                              printOut=True)