Exemple #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('image_dir', type=str)
    parser.add_argument('--batch_size', '-bs', type=int, default=64)
    parser.add_argument('--nb_epoch', '-e', type=int, default=1000)
    parser.add_argument('--noise_dim', '-nd', type=int, default=100)
    parser.add_argument('--height', '-ht', type=int, default=128)
    parser.add_argument('--width', '-wd', type=int, default=128)
    parser.add_argument('--save_steps', '-ss', type=int, default=1)
    parser.add_argument('--visualize_steps', '-vs', type=int, default=1)
    parser.add_argument('--logdir', '-ld', type=str, default="../logs")
    parser.add_argument('--noise_mode', '-nm', type=str, default="uniform")
    parser.add_argument('--upsampling', '-up', type=str, default="deconv")
    parser.add_argument('--metrics', '-m', type=str, default="JSD")
    parser.add_argument('--lr_d', type=float, default=1e-4)
    parser.add_argument('--lr_g', type=float, default=1e-4)
    parser.add_argument('--norm_d', type=str, default=None)
    parser.add_argument('--norm_g', type=str, default=None)
    parser.add_argument('--model', type=str, default='residual')

    args = parser.parse_args()

    # output config to csv
    args_to_csv(os.path.join(args.logdir, 'config.csv'), args)

    input_shape = (args.height, args.width, 3)

    image_sampler = ImageSampler(args.image_dir,
                                 target_size=(args.width, args.height))
    noise_sampler = NoiseSampler(args.noise_mode)

    if args.model == 'residual':
        generator = ResidualGenerator(args.noise_dim,
                                      target_size=(args.width, args.height),
                                      upsampling=args.upsampling,
                                      normalization=args.norm_g)
        discriminator = ResidualDiscriminator(input_shape,
                                              normalization=args.norm_d)
    elif args.model == 'plane':
        generator = Generator(args.noise_dim,
                              upsampling=args.upsampling,
                              normalization=args.norm_g)
        discriminator = Discriminator(input_shape,
                                      normalization=args.norm_d)
    else:
        raise ValueError

    gan = GAN(generator,
              discriminator,
              metrics=args.metrics,
              lr_d=args.lr_d,
              lr_g=args.lr_g)

    gan.fit(image_sampler.flow_from_directory(args.batch_size),
            noise_sampler,
            nb_epoch=args.nb_epoch,
            logdir=args.logdir,
            save_steps=args.save_steps,
            visualize_steps=args.visualize_steps)
Exemple #2
0
def main(batch_size, file_dir):
    # Prepare the dataset. We use both the training & test MNIST digits.
    x = get_data(file_dir)
    
    gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
    gan.compile(
        d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
        g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
        loss_fn=keras.losses.BinaryCrossentropy(from_logits=True)
    )
    # To limit the execution time, we only train on 100 batches. You can train on
    # the entire dataset. You will need about 20 epochs to get nice results.
    print(generator.summary())
    print(discriminator.summary())
    history = gan.fit(x, batch_size=batch_size, epochs=20)
    g_loss, d_loss = history.history['g_loss'], history.history['d_loss']
    plt.plot(g_loss)
    plt.plot(d_loss)
    plt.xticks(np.arange(0, 20, step=1))  # Set label locations.
    plt.xlabel('epochs')
    plt.ylabel('loss')
    plt.title('Protein Structure Generation With DCGAN')
    # print(xticks(np.arange(0, 20, step=1)))
    # pred = np.stack(history.history['pred'], axis=0)
    # labels = np.stack(history.history['label'], axis=0)
    # accuracies = get_accuracies(pred, labels)
    # plt.plot(accuracies)
    plt.legend(['Generator loss', 'Discriminator loss'], loc='upper right')
    plt.show()
Exemple #3
0
def test_gan(
        n_iters=1000,
        learning_rate=1e-4,
        n_mc_samples=1,
        scale_init=0.01,
        dim_z=2,
    ):

    datasets = load_data('../20150717-/mnist.pkl.gz')

    train_set, validate_set = datasets
    train_x, train_y = train_set
    validate_x, validate_y = validate_set
    xs = np.r_[train_x, validate_x]
    optimize_params = {
        'learning_rate' : learning_rate,
        'n_iters'       : n_iters,
        'minibatch_size': 100,
        'calc_history'     : 'all',
        'calc_hist'     : 'all',
        'n_mod_history'    : 100,
        'n_mod_hist'    : 100,
        'patience'      : 5000,
        'patience_increase': 2,
        'improvement_threshold': 0.995,
    }

    all_params = {
            'hyper_params': {
            'rng_seed'          : 1234,
            'dim_z'             : dim_z,
            'n_hidden'          : [500, 500],
            'n_mc_sampling'     : n_mc_samples,
            'scale_init'        : scale_init,
            'nonlinear_q'       : 'relu',
            'nonlinear_p'       : 'relu',
            'type_px'           : 'bernoulli',
            'optimizer'         : 'adam',
            'learning_process'  : 'early_stopping'
        }
    }
    all_params.update({'optimize_params': optimize_params})

    model = GAN(**all_params)
    model.fit(xs)

    return datasets, model
Exemple #4
0
def train(dataset,
          epochs=30,
          num_images=1,
          latent_dim=256,
          learning_rate_g=0.00005,
          learning_rate_d=0.00005):
    discriminator = Discriminator().discriminator
    generator = Generator(latent_dim).generator

    gan = GAN(discriminator, generator, latent_dim)
    gan.compile(
        d_optimizer=keras.optimizers.Adam(learning_rate_d),
        g_optimizer=keras.optimizers.Adam(learning_rate_g),
        loss_function=keras.losses.BinaryCrossentropy(from_logits=True),
    )

    gan.fit(dataset,
            epochs=epochs,
            callbacks=[GANMonitor(num_images, latent_dim)])

    # gan.save("gan_model.h5")
    generator.save("generator_model.h5")
    discriminator.save("discriminator_model.h5")
Exemple #5
0
    images_path = glob(sys.argv[1] + "/" + sys.argv[2] + "_resized/*")

    d_model = build_discriminator()
    g_model = build_generator(latent_dim)

    d_model.summary()
    g_model.summary()

    gan = GAN(d_model, g_model, latent_dim)

    bce_loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True,
                                                     label_smoothing=0.1)
    d_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    g_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
    gan.compile(d_optimizer, g_optimizer, bce_loss_fn)

    images_dataset = tf_dataset(images_path, batch_size)

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs))
        gan.fit(images_dataset, epochs=1)
        g_model.save(sys.argv[1] + "/" + sys.argv[2] +
                     "_saved_models/g_model.h5")
        d_model.save(sys.argv[1] + "/" + sys.argv[2] +
                     "_saved_models/d_model.h5")

        noise = np.random.normal(size=(n_samples, latent_dim))
        examples = g_model.predict(noise)
        save_plot(sys.argv[1] + "/" + sys.argv[2] + "_samples", examples,
                  epoch, int(np.sqrt(n_samples)), IMG_CHANNEL)
Exemple #6
0
from gan import Data_Manager, GAN

# --------------------------------------------------------------------------------
if __name__ == '__main__':
    device_id = '3'

    device = torch.device('cpu')
    if torch.cuda.is_available():
        device = torch.device('cuda:{}'.format(device_id))

    batch_size = 64

    img_size = 32
    img_shape = [1, img_size, img_size]
    n_classes = 10

    lr = 2e-4
    gen_z_dim = 100

    # Data Manager
    data_manager = Data_Manager(batch_size, img_size)

    # C-VAE
    model = GAN(data_manager=data_manager,
                device=device,
                img_shape=img_shape,
                epochs=1,
                lr=lr,
                gen_z_dim=gen_z_dim)
    model.fit()
Exemple #7
0
    # training config
    epochs = 500
    batch_size = 16  # mini-batch size
    sample_size = 128  # number of samples to train d&g over one epoch
    learning_rate = {'pretrain': 1e-5, 'g': 1e-3, 'd': 1e-3}
    k_step = 1  # step of training discriminator over one epoch
    sample_rate = 50  # save result every sample_reate epochs
    dropout = 0
    pretrain_epochs = 30  # 0 is not to do pretrain
    teacher_forcing_rate = 0.7

    save_path = '/home/wu/projects/emo-gan/chkpt/rnngan'
    # load data
    train_data_path = "/home/wu/mounts/Emo-gesture/train_set.pkl"
    with open(train_data_path, 'rb') as f:
        data = pickle.load(f)
    dataset = MyDataset(data, max_len=max_len, num_joints=3, dim=dim)

    gan = GAN(latent_code_size,
              hidden_size,
              generator_output_size,
              discriminator_output_size,
              num_layers,
              bidirectional,
              relu_slope,
              dropout,
              max_len=300)
    gan.fit(dataset, epochs, batch_size, sample_size, learning_rate, k_step,
            sample_rate, pretrain_epochs, teacher_forcing_rate, save_path)
Exemple #8
0
# coding: utf-8
from gan import GAN

gan = GAN()
gan.fit()
gan.predict()
Exemple #9
0
    # delete old renders
    shutil.rmtree(config.render_dir)
    os.mkdir(config.render_dir)
else:
    start_iter = param.start_iter
    model.restore_model(config.save_dir + 'model.ckpt')
    print 'Resuming training from iter %d' % start_iter

# training
for iter_no in range(start_iter, config.N_iter):

    for dis_iter in range(config.dis_n_iter):
        noise = np.random.normal(size=[config.batch_size, config.z_size],
                                 scale=0.2)
        data = all_pc_data.next_batch(config.batch_size)[0]
        model.fit(data, noise, iter_no, dis_iter)

    noise = np.random.normal(size=[config.batch_size, config.z_size],
                             scale=0.2)
    data = all_pc_data.next_batch(config.batch_size)[0]
    pc_gen = model.fit(data, noise, iter_no, config.dis_n_iter)

    if iter_no % 1000 == 0:
        sio.savemat('%srender_%d.mat' % (config.render_dir, iter_no),
                    {'X_hat': pc_gen})

    # save image of point cloud
    if iter_no % config.renders_every_iter == 0:
        pc_gen = np.reshape(pc_gen[0, :], [2048, 3])
        im_array = point_cloud_three_views(pc_gen)
        img = Image.fromarray(np.uint8(im_array * 255.0))
        model = GAN(gex_size=train.shape[1], num_cells_generate=test.shape[0])
        model.compile()
        model.build(input_shape=(model.hyperparams.batch_size, model.hyperparams.latent_dim))  # req. for subclassed models

    # process data for training
    train_tf = tf.data.Dataset.from_tensor_slices(train.X). \
        cache(). \
        shuffle(buffer_size=train.shape[0], seed=utils.RANDOM). \
        batch(batch_size=model.hyperparams.batch_size * strategy.num_replicas_in_sync, num_parallel_calls=tf.data.AUTOTUNE). \
        prefetch(buffer_size=tf.data.AUTOTUNE)
    train_tf_distributed = strategy.experimental_distribute_dataset(train_tf)

    test_tf = tf.data.Dataset.from_tensor_slices(test.X). \
        cache(). \
        shuffle(buffer_size=test.shape[0], seed=utils.RANDOM). \
        prefetch(buffer_size=tf.data.AUTOTUNE)

    tb_callback = tf.keras.callbacks.TensorBoard(log_dir=utils.LOG_DIR + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"),
                                                 update_freq='epoch',
                                                 write_graph=False,
                                                 profile_batch=0)

    model.fit(x=train_tf_distributed,
              epochs=model.hyperparams.num_epochs,
              steps_per_epoch=int (train.shape[0] / model.hyperparams.batch_size), # steps = # batches per epoch
              callbacks=[tb_callback,
                         TrainingMetrics(tb_callback),
                         tf.keras.callbacks.ModelCheckpoint(period=100, filepath='training/model_{epoch}')])
    # Save model
    model.save("training/model_final")
Exemple #11
0
def main(num_faces, num_epochs, cuda, verbose=False):
    # set computation device (None/CPU if in development mode, CUDA otherwise)
    device = torch.device("cuda:0") if cuda else None

    # load faces
    masked_dir = "../data/masked"
    masked_suffix = "_Mask.jpg"
    unmasked_dir = "../data/unmasked"
    unmasked_suffix = ".png"
    masked_faces, unmasked_faces, idx_to_face_id = utils.load_faces(
        num_faces, masked_dir, masked_suffix, unmasked_dir, unmasked_suffix)
    if verbose:
        print("loaded {} faces...".format(num_faces))

    # split data into training and testing sets
    split = int(0.8 * num_faces)
    train_input, train_output = (
        masked_faces[:split],
        torch.Tensor((range(0, split))).long(),
    )
    test_input, test_output = (
        masked_faces[split:],
        torch.Tensor(range(split, num_faces)).long(),
    )
    static_faces = unmasked_faces[:split]

    # instantiate GAN
    generator = Generator(learning_rate=2e-3)
    projector = Projector(load_path="../models/projector.pt")
    discriminator = Discriminator()
    gan = GAN(generator, projector, discriminator, device=device)
    if verbose:
        print("instantiated GAN...")

    # compute and store unmasked discriminator embeddings
    gan.compute_unmasked_embeddings(unmasked_faces)

    # train
    if verbose:
        print("training initiated...")
    gan.fit(train_input,
            static_faces,
            train_output,
            num_epochs=num_epochs,
            verbose=verbose)
    if verbose:
        print("\ntraining complete...")

    # save models
    save_dir = "../models"
    suffix = time.strftime("%Y%m%d_%H%M%S")
    gan.save(save_dir, suffix)
    if verbose:
        print("models saved under '{}/<model>_{}'...".format(save_dir, suffix))

    # display sample masks and faces
    plt.figure()
    fig, axes = plt.subplots(2, 5)
    fig.set_figwidth(20)
    fig.set_figheight(7)
    for idx in range(5):
        # original image
        face_id = idx_to_face_id[idx]
        original_img = Image.open("../data/masked/{}_Mask.jpg".format(face_id))
        axes[0, idx].imshow(original_img)
        axes[0, idx].get_xaxis().set_ticks([])
        axes[0, idx].get_yaxis().set_ticks([])

        # generated mask image
        mask = (gan.generator(torch.rand(1, 100).to(device))
                if device else gan.generator())
        masked_tensor = masked_faces[idx].unsqueeze(0)
        if device:
            masked_tensor = masked_tensor.to(device)
        masked_image = gan.project_mask(mask, masked_tensor, process=True)[0]
        masked_image = torch.transpose(masked_image, 0, 1)
        masked_image = torch.transpose(masked_image, 1, 2)
        masked_image = masked_image.cpu().detach().numpy()
        axes[1, idx].imshow(masked_image)
        axes[1, idx].get_xaxis().set_ticks([])
        axes[1, idx].get_yaxis().set_ticks([])
    plt.savefig("../figures/sample_masks.png")

    # evaluate accuracy
    train_accuracy = gan.evaluate(train_input, train_output)
    test_accuracy = gan.evaluate(test_input, test_output)
    masked_accuracy = gan.discriminator_evaluate(masked_faces, unmasked_faces)
    unmasked_accuracy = gan.discriminator_evaluate(unmasked_faces,
                                                   unmasked_faces)
    print("\nfacial recognition accuracy for...")
    print("   random choice:\t\t{:.1f}%".format(100 / num_faces))
    print("   training images:\t\t{:.1f}%".format(100 * train_accuracy))
    print("   testing images:\t\t{:.1f}%".format(100 * test_accuracy))
    print("   original masked images:\t{:.1f}%".format(100 * masked_accuracy))
    print("   original unmasked images:\t{:.1f}%".format(100 *
                                                         unmasked_accuracy))

    # write results to file
    file_path = "../data/accuracy.txt"
    with open(file_path, "w") as file:
        file.write("facial recognition accuracy for...")
        file.write("\n   random choice:\t\t{:.1f}%".format(100 / num_faces))
        file.write("\n   training images:\t\t{:.1f}%".format(100 *
                                                             train_accuracy))
        file.write("\n   testing images:\t\t{:.1f}%".format(100 *
                                                            test_accuracy))
        file.write("\n   original masked images:\t{:.1f}%".format(
            100 * masked_accuracy))
        file.write("\n   original unmasked images:\t{:.1f}%".format(
            100 * unmasked_accuracy))
    if verbose:
        print("\nsaved results...")
        print("done:)")
Exemple #12
0
            create_results_dir()

            # load MNIST
            X_train = np.load('MNIST_data/mnist_train_x.npy')

            if args.model == 0:
                X_train = X_train.reshape(60000, 784)
            elif args.model == 1:
                X_train = X_train.reshape(60000, 28, 28, 1)
                X_train = tf.image.resize_images(X_train, [64, 64]).eval()

            # normalize -1 to 1
            X_train = (X_train.astype(np.float32) - 127.5) / 127.5

            #train
            model.fit(X_train)

        else:
            model.load(args.restoring_epoch)
            if args.model == 0:
                img_path = 'results/gan_genarated_image.png'
                img_size = (28, 28)
            elif args.model == 1:
                img_path = 'results/dcgan_genarated_image.png'
                img_size = (64, 64)
            save_results(model,
                         args.restoring_epoch,
                         img_path,
                         dim=(7, 7),
                         figsize=(7, 7),
                         img_size=img_size)