Example #1
0
def main():
    # set session
    sess = tf.Session()
    model = GAN(sess=sess, init=False, gf_dim=128)
    model.restore(model_path='hw3_1/model_file/WGAN_v2')

    z_plot = np.random.uniform(-1., 1., size=[25, 100])
    img = model.generate(z_plot)
    plot_samples(img,
                 save=True,
                 h=5,
                 w=5,
                 filename='gan',
                 folder_path='samples/')
from model import GAN

################################
# Main
################################

if __name__ == '__main__':

    # Create and train the GAN
    model = GAN(noise_dim=NOISE_DIM,
                image_dim=IMAGE_DIM,
                name='gan',
                debug=False)
    feed = Feeder(IMAGES_BASE_FOLDER, LABEL_BASE_FOLDER, batch_size=BATCH_SIZE)
    model.train(feed, epochs=EPOCHS)

    # Save submission
    with ZipFile(OUTPUT_ZIP_NAME, 'w') as zip:
        batch_size = 100
        num_batches = int(10000 / batch_size)
        for idx_batch in tqdm(range(num_batches), desc='Writing test images'):
            images_gen = (
                model.generate(num_images=batch_size, seed=idx_batch + 1) *
                255).astype(np.uint8)
            for idx_image, image_gen in enumerate(images_gen):
                image_name = '{}.png'.format((idx_batch + 1) * batch_size +
                                             idx_image)
                cv2.imwrite(image_name, image_gen)
                zip.write(image_name)
                os.remove(image_name)
Example #3
0
def main():
    # set GPU card
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # load anime face
    data_dir = '../anime_face/data_64/images/'
    data_extra_dir = '../anime_face/extra_data/images/'
    ds = dataset()
    ds.load_data(data_dir, verbose=0)
    ds.load_data(data_extra_dir, verbose=0)
    ds.shuffle()

    # reset graph
    tf.reset_default_graph()

    # set session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # build model
    model = GAN(sess, gf_dim=128)

    # training
    z_plot = sample_z(36, 100)

    # initial fake image
    z = sample_z((bs), 100)
    i = 1
    while True:
        if (i == 1) or (i <= 100
                        and i % 20 == 0) or (i <= 200 and i % 50 == 0) or (
                            i <= 1000 and i % 100 == 0) or (i % 200 == 0):
            g_samples = model.generate(z_plot)
            plot_samples(g_samples,
                         save=True,
                         filename=str(i),
                         folder_path='out2/',
                         h=6,
                         w=6)

        # train discriminator more
        for _ in range(5):
            real_img = ds.next_batch(bs)
            z = sample_z(bs, 100)
            fake_img = model.generate(z)
            # train D
            D_loss = model.train_D(real_img, fake_img)

        G_loss = model.train_G(bs)

        if (i % 100) == 0:
            model.save(model_name='WGAN_v2')
            z_loss = sample_z(64, 100)
            g_loss = model.generate(sample_z(32, 100))
            g, d = model.sess.run([model.G_loss, model.D_loss],
                                  feed_dict={
                                      model.xs: ds.random_sample(32),
                                      model.gs: g_loss,
                                      model.zs: z_loss
                                  })
            print(str(i) + ' iteration:')
            print('D_loss:', d)
            print('G_loss:', g, '\n')

        i = i + 1