示例#1
0
def run_gan():
    (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
    print(train_images.shape)

    train_images = train_images.reshape(train_images.shape[0], 28, 28,
                                        1).astype('float32')
    train_images = (train_images - 127.5) / 127.5  # Normalize images to [-1,1]
    print(train_images.shape)

    # Batch and shuffle the data
    train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(
        BUFFER_SIZE).batch(BATCH_SIZE)

    gan = DCGAN(gen_lr, disc_lr, batch_size=BATCH_SIZE, noise_dim=NOISE_DIM)
    gan.create_generator()
    gan.create_discriminator()

    # Test generator
    random_noise = tf.random.normal([1, NOISE_DIM])
    generated_image = gan.generator(random_noise)
    #plt.imshow(generated_image[0,:,:,0],cmap='gray')
    #plt.show()
    # Test Discriminator
    prob = gan.discriminator(generated_image)
    print("Probability of image being real: {}".format(sigmoid(prob)))

    gan.set_noise_seed(num_examples_to_generate)
    gan.set_checkpoint(path=save_ckpt_path)
    gen_loss_array, disc_loss_array = gan.train(train_dataset, epochs=EPOCHS)

    # Plot Discriminator Loss
    plt.plot(range(EPOCHS), gen_loss_array)
    plt.plot(range(EPOCHS), disc_loss_array)
    plt.show()
示例#2
0
def main():
    args, save_dir, load_dir = check_args(parse_arguments())
    global BATCH_SIZE
    BATCH_SIZE = args.batch_size
    config_path = os.path.join(load_dir, 'params.pkl')
    if os.path.exists(config_path):
        config = pickle.load(open(config_path, 'rb'))
        output_width = config['output_width']
        output_height = config['output_height']
        resolution = output_height
        z_dim = config['z_dim']
    else:
        output_width = output_height = 64
        resolution = 64
        z_dim = 100

    ### open session
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        dcgan = DCGAN(sess,
                      output_width=output_width,
                      output_height=output_height,
                      batch_size=BATCH_SIZE,
                      sample_num=BATCH_SIZE,
                      z_dim=z_dim)

        load_success, load_counter = dcgan.load(load_dir)
        if not load_success:
            raise Exception("Checkpoint not found in " + load_dir)

        ### initialization
        init_val_ph = None
        init_val = {'pos': None, 'neg': None}
        if args.initialize_type == 'zero':
            z = tf.Variable(tf.zeros([BATCH_SIZE, z_dim], tf.float32),
                            name='latent_z')

        elif args.initialize_type == 'random':
            np.random.seed(RANDOM_SEED)
            init_val_np = np.random.normal(size=(z_dim, ))
            init = np.tile(init_val_np, (BATCH_SIZE, 1)).astype(np.float32)
            z = tf.Variable(init, name='latent_z')

        elif args.initialize_type == 'nn':
            idx = 0
            init_val['pos'] = np.load(os.path.join(args.nn_dir,
                                                   'pos_z.npy'))[:, idx, :]
            init_val['neg'] = np.load(os.path.join(args.nn_dir,
                                                   'neg_z.npy'))[:, idx, :]
            init_val_ph = tf.placeholder(dtype=tf.float32,
                                         name='init_ph',
                                         shape=(BATCH_SIZE, z_dim))
            z = tf.Variable(init_val_ph, name='latent_z')

        else:
            raise NotImplementedError

        ### define variables
        x = tf.placeholder(tf.float32,
                           shape=(BATCH_SIZE, resolution, resolution, 3))
        x_hat = dcgan.generator(z, is_training=False)

        ### loss
        if args.distance == 'l2':
            print('use distance: l2')
            loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
            vec_loss = loss_l2
            vec_losses = {'l2': loss_l2}

        elif args.distance == 'l2-lpips':
            print('use distance: lpips + l2')
            loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
            loss_lpips = lpips_tf.lpips(x_hat,
                                        x,
                                        normalize=False,
                                        model='net-lin',
                                        net='vgg',
                                        version='0.1')
            vec_losses = {'l2': loss_l2, 'lpips': loss_lpips}
            vec_loss = loss_l2 + LAMBDA2 * loss_lpips
        else:
            raise NotImplementedError

        ## regularizer
        norm = tf.reduce_sum(tf.square(z), axis=1)
        norm_penalty = (norm - z_dim)**2

        if args.if_norm_reg:
            loss = tf.reduce_mean(
                vec_loss) + LAMBDA3 * tf.reduce_mean(norm_penalty)
            vec_losses['norm'] = norm_penalty
        else:
            loss = tf.reduce_mean(vec_loss)

        ### set up optimizer
        opt = tf.contrib.opt.ScipyOptimizerInterface(
            loss,
            var_list=[z],
            method='L-BFGS-B',
            options={'maxfun': args.maxfunc})

        ### load query images
        pos_data_paths = get_filepaths_from_dir(args.pos_data_dir,
                                                ext='png')[:args.data_num]
        pos_query_imgs = np.array(
            [read_image(f, resolution) for f in pos_data_paths])

        neg_data_paths = get_filepaths_from_dir(args.neg_data_dir,
                                                ext='png')[:args.data_num]
        neg_query_imgs = np.array(
            [read_image(f, resolution) for f in neg_data_paths])

        ### run the optimization on query images
        query_loss, query_z, query_xhat = optimize_z(
            sess, z, x, x_hat, init_val_ph, init_val['pos'], pos_query_imgs,
            check_folder(os.path.join(save_dir, 'pos_results')), opt, vec_loss,
            vec_losses)
        save_files(save_dir, ['pos_loss'], [query_loss])

        query_loss, query_z, query_xhat = optimize_z(
            sess, z, x, x_hat, init_val_ph, init_val['neg'], neg_query_imgs,
            check_folder(os.path.join(save_dir, 'neg_results')), opt, vec_loss,
            vec_losses)
        save_files(save_dir, ['neg_loss'], [query_loss])