示例#1
0
def main():
    # Load a batch of images (to feed to the discriminator)
    dataset_iterator = load_image_batch(args.img_dir, batch_size=args.batch_size, n_threads=args.num_data_threads)

    # Initialize generator and discriminator models
    generator = Generator_Model()
    discriminator = Discriminator_Model()

    # For saving/loading models
    checkpoint_dir = './checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(generator=generator, discriminator=discriminator)
    manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=3)
    # Ensure the output directory exists
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)

    if args.restore_checkpoint or args.mode == 'test':
        checkpoint.restore(manager.latest_checkpoint)
    try:
        # Specify an invalid GPU device
        with tf.device('/device:' + args.device):
            if args.mode == 'train':
                for epoch in range(0, args.num_epochs):
                    print('========================== EPOCH %d  ==========================' % epoch)
                    avg_fid = train(generator, discriminator, dataset_iterator, manager)
                    print("Average FID for Epoch: " + str(avg_fid))
                    # Save at the end of the epoch, too
                    print("**** SAVING CHECKPOINT AT END OF EPOCH ****")
                    manager.save()
            if args.mode == 'test':
                test(generator)
    except RuntimeError as e:
        print(e)
示例#2
0
        with tf.GradientTape() as tape:
            means, eps, outputs = wae.call(batch)
            loss = wae.loss(means, eps, batch, outputs)
        gradients = tape.gradient(loss, wae.trainable_variables)
        wae.optimizer.apply_gradients(zip(gradients, wae.trainable_variables))
        print(iteration, 'loss', loss)
        #save model
        # if iteration % 1000 == 0:
        #     with open('loss_0505/loss_wae_'+str(wae.lamb)+'_'+str(epoch)+'_'+str(iteration), 'wb') as fp:
        #         pickle.dump(all_loss,fp)
        #     wae.save_weights(filepath = 'trained_models/wae_'+str(wae.lamb)+'_'+str(epoch)+'_'+str(iteration)+'.h5')
        #     print('**** LOSS: %g ****' % loss)


#load in data
dataset_iterator = load_image_batch('celebA', batch_size=100, n_threads=2)

#defime z_dim
z_dim = 64

#batch size
batch_size = 100
u = tf.convert_to_tensor(np.zeros(z_dim), dtype='float32')
z_dim = 64
temp = np.zeros((z_dim, z_dim))
for i in range(z_dim):
    temp[i, i] = 1.0

v = tf.convert_to_tensor(temp, dtype='float32')
#lamb = 0.001WW
wae = WAE(z_dim, u, v, lamb)
示例#3
0
文件: main.py 项目: wkuenne/album-gan
def main():
    # Load images
    rock = load_image_batch(args.img_dir + '/rock')
    rap = load_image_batch(args.img_dir + '/rap')
    jazz = load_image_batch(args.img_dir + '/jazz')

    # generate labels and make full dataset
    genre_labels = np.zeros(((len(rock) + len(rap) + len(jazz)), ))
    dataset = np.zeros(((len(rock) + len(rap) + len(jazz)), rock[0].shape[0],
                        rock[0].shape[1], rock[0].shape[2]))

    # concatenating is super slow, so we do this
    for i in range(len(rock)):
        dataset[i] = rock[i]

    offset = len(rock)
    for i in range(len(rap)):
        genre_labels[i + offset] = 1
        dataset[i + offset] = rap[i]

    offset = len(rock) + len(jazz)
    for i in range(len(jazz)):
        genre_labels[i + offset] = 2
        dataset[i + offset] = jazz[i]

    dataset = tf.convert_to_tensor(dataset)
    genre_labels = tf.convert_to_tensor(genre_labels)

    # Initialize models
    generator = Generator_Model()
    discriminator = Discriminator_Model()
    mapping_net = Mapping_Model()
    noise_net = make_noise_scale_net()
    adain_net = ADAin_Model()

    # For saving/loading models
    checkpoint_dir = './checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(generator=generator,
                                     discriminator=discriminator)
    manager = tf.train.CheckpointManager(checkpoint,
                                         checkpoint_dir,
                                         max_to_keep=3)
    # Ensure the output directory exists
    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)

    if args.restore_checkpoint or args.mode == 'test':
        # restores the latest checkpoint using from the manager
        checkpoint.restore(manager.latest_checkpoint)

    try:
        # Specify an invalid GPU device
        with tf.device('/device:' + args.device):
            if args.mode == 'train':
                for epoch in range(0, args.num_epochs):
                    print(
                        '========================== EPOCH %d  =========================='
                        % epoch)
                    avg_fid = train(generator, discriminator, dataset,
                                    genre_labels, manager, mapping_net,
                                    noise_net, adain_net)
                    print("Average FID for Epoch: " + str(avg_fid))
                    # Save at the end of the epoch, too
                    print("**** SAVING CHECKPOINT AT END OF EPOCH ****")
                    manager.save()
            if args.mode == 'test':
                test(generator, args.batch_size, args.z_dim, args.out_dir)
    except RuntimeError as e:
        print(e)