Esempio n. 1
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--z_dim', type=int, default=100,
					   help='Noise dimension')

	parser.add_argument('--t_dim', type=int, default=256,
					   help='Text feature dimension')

	parser.add_argument('--batch_size', type=int, default=64,
					   help='Batch Size')

	parser.add_argument('--image_size', type=int, default=64,
					   help='Image Size a, a x a')

	parser.add_argument('--gf_dim', type=int, default=64,
					   help='Number of conv in the first layer gen.')

	parser.add_argument('--df_dim', type=int, default=64,
					   help='Number of conv in the first layer discr.')

	parser.add_argument('--gfc_dim', type=int, default=1024,
					   help='Dimension of gen untis for for fully connected layer 1024')

	parser.add_argument('--caption_vector_length', type=int, default=2400,
					   help='Caption Vector Length')

	parser.add_argument('--data_dir', type=str, default="Data",
					   help='Data Directory')

	parser.add_argument('--learning_rate', type=float, default=0.0002,
					   help='Learning Rate')

	parser.add_argument('--beta1', type=float, default=0.5,
					   help='Momentum for Adam Update')

	parser.add_argument('--epochs', type=int, default=600,
					   help='Max number of epochs')

	parser.add_argument('--save_every', type=int, default=30,
					   help='Save Model/Samples every x iterations over batches')

	parser.add_argument('--resume_model', type=str, default=None,
                       help='Pre-Trained Model Path, to resume from')

	parser.add_argument('--data_set', type=str, default="flowers",
                       help='Dat set: MS-COCO, flowers')

	args = parser.parse_args()
	model_options = {
		'z_dim' : args.z_dim,
		't_dim' : args.t_dim,
		'batch_size' : args.batch_size,
		'image_size' : args.image_size,
		'gf_dim' : args.gf_dim,
		'df_dim' : args.df_dim,
		'gfc_dim' : args.gfc_dim,
		'caption_vector_length' : args.caption_vector_length
	}
	
	
	gan = model.GAN(model_options)
	input_tensors, variables, loss, outputs, checks = gan.build_model()
	
	d_optim = tf.train.AdamOptimizer(args.learning_rate, beta1 = args.beta1).minimize(loss['d_loss'], var_list=variables['d_vars'])
	g_optim = tf.train.AdamOptimizer(args.learning_rate, beta1 = args.beta1).minimize(loss['g_loss'], var_list=variables['g_vars'])
	
	sess = tf.InteractiveSession()
	tf.initialize_all_variables().run()
	
        print "load?"
	saver = tf.train.Saver(max_to_keep=None)
	if args.resume_model:
		print "loading"
		saver.restore(sess, args.resume_model)
		print "done loading model"
	
	print "load training data"
	loaded_data, test_data = load_training_data(args.data_dir, args.data_set)
	print "done loading training data"
	print(args.data_dir)
	print(args.data_set)
	logger.info("Starting")

	for i in range(1, args.epochs + 1):
                print "epoch", i
		batch_no = 0
                num_batches = int(len(loaded_data['image_list']) / args.batch_size)
                print len(loaded_data['image_list'])
                total_d_loss = 0
                total_g_loss = 0
		while batch_no*args.batch_size < loaded_data['data_length']:
		#while batch_no < 2:
			real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(batch_no, args.batch_size, 
				args.image_size, args.z_dim, args.caption_vector_length, 'train', args.data_dir, args.data_set, loaded_data)
			
			# DISCR UPDATE
			check_ts = [ checks['d_loss1'] , checks['d_loss2'], checks['d_loss3']]
			_, d_loss, gen, d1, d2, d3 = sess.run([d_optim, loss['d_loss'], outputs['generator']] + check_ts,
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_wrong_image'] : wrong_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
				})
			
			#print "d1", d1
			#print "d2", d2
			#print "d3", d3
			#print "D", d_loss
			
			# GEN UPDATE
			_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_wrong_image'] : wrong_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
				})

			# GEN UPDATE TWICE, to make sure d_loss does not go to 0
			_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_wrong_image'] : wrong_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
				})
			
			#print "LOSSES", d_loss, g_loss, batch_no, i, len(loaded_data['image_list'])/ args.batch_size
                        total_d_loss += d_loss
                        total_g_loss += g_loss
                        print "epoch {}/{} batch {}/{} - d_loss {} g_loss: {}".format(i, args.epochs, batch_no+1, num_batches, d_loss, g_loss)
                        #print "d_loss: {}".format(d_loss)
                        #print "g_loss: {}".format(g_loss)
			batch_no += 1
			if (batch_no % args.save_every) == 0:
				print "Saving Images, Model"
				save_for_vis(args.data_dir, real_images, gen, image_files)
				save_path = saver.save(sess, "Data/Models/latest_model_{}_temp.ckpt".format(args.data_set))
		if i%10 == 0:
			save_path = saver.save(sess, "Data/Models/model_after_{}_epoch_{}.ckpt".format(args.data_set, i))
                total_d_loss /= num_batches
                total_g_loss /= num_batches
                logger.info('epoch {} d_loss {} g_loss {}'.format(i, total_d_loss, total_g_loss))

                if i % 10 == 0:
                    print "testing"
                    batch_no = 0
                    num_batches = int(len(test_data['image_list']) / args.batch_size)
                    tp = 0
                    fp = 0
		    while batch_no*args.batch_size < test_data['data_length']:
                        real_images, caption_vectors, z_noise = get_test_batch(batch_no, args.batch_size, args.image_size, args.z_dim,
                                args.caption_vector_length, args.data_dir, args.data_set, test_data) 

                        true_pos = tf.reduce_mean(tf.round(checks['disc_real_image']))
                        false_pos = tf.reduce_mean(tf.round(checks['disc_fake_image']))
                        batch_tp, batch_fp = sess.run([true_pos, false_pos],
                                feed_dict = {
                                        input_tensors['t_real_image']: real_images,
                                        input_tensors['t_real_caption']: caption_vectors,
                                        input_tensors['t_z']: z_noise, 
                                }
                        )
                        print "Test batch {}/{}".format(batch_no+1, num_batches), "tp:", batch_tp, "fp:", batch_fp
                        tp += batch_tp
                        fp += batch_fp
                        batch_no += 1

                    tp /= num_batches
                    fp /= num_batches
                    print "Test Metrics:", tp, fp
                    logger.info('epoch {} true_positive {} false_positive {}'.format(i, tp, fp))
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=2400,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="Data",
                        help='Data Directory')

    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Momentum for Adam Update')

    parser.add_argument('--epochs',
                        type=int,
                        default=1000,
                        help='Max number of epochs')

    parser.add_argument(
        '--save_every',
        type=int,
        default=60,
        help='Save Model/Samples every x iterations over batches')

    parser.add_argument('--resume_model',
                        type=str,
                        default="Data/Models/model_after_epoch_628.ckpt",
                        help='Pre-Trained Model Path, to resume from')

    parser.add_argument('--data_set',
                        type=str,
                        default="flowers",
                        help='Dat set: MS-COCO, flowers')

    parser.add_argument('--data_set2',
                        type=str,
                        default="birds",
                        help='Dat set: MS-COCO, flowers')

    args = parser.parse_args()
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    input_tensors, variables, loss, outputs, checks = gan.build_model()
    with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):

        d_optim = tf.train.AdamOptimizer(args.learning_rate,
                                         beta1=args.beta1).minimize(
                                             loss['d_loss'],
                                             var_list=variables['d_vars'])
        g_optim = tf.train.AdamOptimizer(args.learning_rate,
                                         beta1=args.beta1).minimize(
                                             loss['g_loss'],
                                             var_list=variables['g_vars'])

    sess = tf.InteractiveSession()
    tf.initialize_all_variables().run()

    saver = tf.train.Saver()
    if args.resume_model:
        saver.restore(sess, args.resume_model)

    loaded_data = load_training_data(args.data_dir, args.data_set)
    loaded_data2 = load_training_data(args.data_dir, args.data_set2)

    for i in range(629, args.epochs):
        batch_no_flowers = 0
        batch_no_birds = 0
        while batch_no_flowers * args.batch_size < loaded_data[
                'data_length'] and batch_no_birds * args.batch_size < loaded_data2[
                    'data_length']:
            coin = random.uniform(0, 1)
            if batch_no_flowers * args.batch_size > loaded_data['data_length']:
                coin = 0
            if batch_no_birds * args.batch_size > loaded_data2['data_length']:
                coin = 1
            if batch_no_flowers * args.batch_size < loaded_data[
                    'data_length'] and coin > 0.5:
                print("Flower batch {}".format(batch_no_flowers))
                real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                    batch_no_flowers, args.batch_size, args.image_size,
                    args.z_dim, args.caption_vector_length, 'train',
                    args.data_dir, args.data_set, loaded_data)

                # DISCR UPDATE
                check_ts = [
                    checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
                ]
                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                print("d1", d1)
                print("d2", d2)
                print("d3", d3)
                print("D", d_loss)

                # GEN UPDATE
                _, g_loss, gen = sess.run(
                    [g_optim, loss['g_loss'], outputs['generator']],
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                # GEN UPDATE TWICE, to make sure d_loss does not go to 0
                _, g_loss, gen = sess.run(
                    [g_optim, loss['g_loss'], outputs['generator']],
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                print("LOSSES", d_loss, g_loss, batch_no_flowers, i,
                      len(loaded_data['image_list']) / args.batch_size)
                batch_no_flowers += 1
                if (batch_no_flowers % args.save_every) == 0:
                    print("Saving Images, Model")
                    save_for_vis(args.data_dir, real_images, gen, image_files,
                                 args.data_set)
                #     save_path = saver.save(sess, "Data/Models/latest_model_{}_temp.ckpt".format(args.data_set))

            if batch_no_birds * args.batch_size < loaded_data2[
                    'data_length'] and coin <= 0.5:
                print("Birds batch {}".format(batch_no_birds))

                real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                    batch_no_birds, args.batch_size, args.image_size,
                    args.z_dim, args.caption_vector_length, 'train',
                    args.data_dir, args.data_set2, loaded_data2)

                # DISCR UPDATE
                check_ts = [
                    checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
                ]
                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                print("d1", d1)
                print("d2", d2)
                print("d3", d3)
                print("D", d_loss)

                # GEN UPDATE
                _, g_loss, gen = sess.run(
                    [g_optim, loss['g_loss'], outputs['generator']],
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                # GEN UPDATE TWICE, to make sure d_loss does not go to 0
                _, g_loss, gen = sess.run(
                    [g_optim, loss['g_loss'], outputs['generator']],
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                print("LOSSES", d_loss, g_loss, batch_no_birds, i,
                      len(loaded_data['image_list']) / args.batch_size)
                batch_no_birds += 1
                if (batch_no_birds % args.save_every) == 0:
                    print("Saving Images, Model")
                    save_for_vis(args.data_dir, real_images, gen, image_files,
                                 args.data_set2)
                    # save_path = saver.save(sess, "Data/Models/latest_model_{}_temp.ckpt".format(args.data_set2))

        if i % 2 == 0:
            save_path = saver.save(
                sess, "Data/Models/model_after_epoch_{}.ckpt".format(i))
Esempio n. 3
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--z_dim', type=int, default=100,
					   help='Noise dimension')

	parser.add_argument('--t_dim', type=int, default=256,
					   help='Text feature dimension')

	parser.add_argument('--batch_size', type=int, default=64,
					   help='Batch Size')

	parser.add_argument('--image_size', type=int, default=64,
					   help='Image Size a, a x a')

	parser.add_argument('--gf_dim', type=int, default=64,
					   help='Number of conv in the first layer gen.')

	parser.add_argument('--df_dim', type=int, default=64,
					   help='Number of conv in the first layer discr.')

	parser.add_argument('--gfc_dim', type=int, default=1024,
					   help='Dimension of gen untis for for fully connected layer 1024')

	parser.add_argument('--caption_vector_length', type=int, default=2400,
					   help='Caption Vector Length')

	parser.add_argument('--data_dir', type=str, default="Data",
					   help='Data Directory')

	parser.add_argument('--learning_rate', type=float, default=0.0002,
					   help='Learning Rate')

	parser.add_argument('--beta1', type=float, default=0.5,
					   help='Momentum for Adam Update')

	parser.add_argument('--epochs', type=int, default=35,
					   help='Max number of epochs')

	parser.add_argument('--save_every', type=int, default=30,
					   help='Save Model/Samples every x iterations over batches')

	parser.add_argument('--resume_model', type=str, default="Data/Models/model_after_faces_epoch_95.ckpt",
                       help='Pre-Trained Model Path, to resume from')

	parser.add_argument('--data_set', type=str, default="faces",
                       help='Dat set: MS-COCO, flowers')

	args = parser.parse_args()
	model_options = {
		'z_dim' : args.z_dim,
		't_dim' : args.t_dim,
		'batch_size' : args.batch_size,
		'image_size' : args.image_size,
		'gf_dim' : args.gf_dim,
		'df_dim' : args.df_dim,
		'gfc_dim' : args.gfc_dim,
		'caption_vector_length' : args.caption_vector_length
	}
	
	
	gan = model.GAN(model_options)
	input_tensors, variables, loss, outputs, checks = gan.build_model()
	with tf.variable_scope(tf.get_variable_scope(), reuse=False):
		d_optim = tf.train.AdamOptimizer(args.learning_rate, beta1 = args.beta1).minimize(loss['d_loss'], var_list=variables['d_vars'])
		g_optim = tf.train.AdamOptimizer(args.learning_rate, beta1 = args.beta1).minimize(loss['g_loss'], var_list=variables['g_vars'])
	
	sess = tf.InteractiveSession()
	tf.initialize_all_variables().run()
	
	saver = tf.train.Saver()
	if args.resume_model:
		saver.restore(sess, args.resume_model)
	
	loaded_data = load_training_data(args.data_dir, args.data_set)
	
	for i in range(args.epochs):
		batch_no = 0
		list_losses_d=[]
		list_losses_g = []
		list_batches=[]
		while batch_no*args.batch_size < loaded_data['data_length']:
			real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(batch_no, args.batch_size, 
				args.image_size, args.z_dim, args.caption_vector_length, 'train', args.data_dir, args.data_set, loaded_data)
			
			# DISCR UPDATE
			check_ts = [ checks['d_loss1'] , checks['d_loss2'], checks['d_loss3']]
			_, d_loss, gen, d1, d2, d3 = sess.run([d_optim, loss['d_loss'], outputs['generator']] + check_ts,
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_wrong_image'] : wrong_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
				})
			
			print "d1", d1
			print "d2", d2
			print "d3", d3
			print "D", d_loss
			
			# GEN UPDATE
			_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_wrong_image'] : wrong_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
				})

			# GEN UPDATE TWICE, to make sure d_loss does not go to 0
			_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_wrong_image'] : wrong_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
				})
			list_losses_d.append(d_loss)
			list_losses_g.append(g_loss)
			list_batches.append(batch_no)
			print "LOSSES", d_loss, g_loss, batch_no, i, len(loaded_data['image_list'])/ args.batch_size
			batch_no += 1
			if (batch_no % args.save_every) == 0:
				print "Saving Images, Model"
				save_for_vis(args.data_dir, real_images, gen, image_files)
				save_path = saver.save(sess, "Data/Models/latest_model_{}_temp.ckpt".format(args.data_set))
		if i%5 == 0:
			save_path = saver.save(sess, "Data/Models/model_after_{}_epoch_{}.ckpt".format(args.data_set, i))
		with open("Data/plots/losses_discriminator_epoch_{}.txt".format(i), 'w') as f:
			for s in list_losses_d:
				f.write(str(s) + '\n')
		with open("Data/plots/losses_generator_epoch_{}.txt".format(i), 'w') as f:
			for s in list_losses_g:
				f.write(str(s) + '\n')
Esempio n. 4
0
datasets_root_dir = 'datasets'

loaded_data = load_training_data(datasets_root_dir, 'flowers', 512, 312)
model_options = {
    'z_dim': 100,
    't_dim': 256,
    'batch_size': 64,
    'image_size': 128,
    'gf_dim': 64,
    'df_dim': 64,
    'caption_vector_length': 512,
    'n_classes': 312
}

gan = model.GAN(model_options)
input_tensors, variables, loss, outputs, checks = gan.build_model()

sessdefault = tf.compat.v1.InteractiveSession()
sessflowers = tf.compat.v1.InteractiveSession()
sessbirds = tf.compat.v1.InteractiveSession()
sessanime = tf.compat.v1.InteractiveSession()
tf.compat.v1.initialize_all_variables().run()

saver = tf.compat.v1.train.Saver(max_to_keep=10000)
logging.info('Trying to resume model from ' + str(tf.train.latest_checkpoint('./checkpoints/')))

if tf.train.latest_checkpoint('./checkpoints/default/') is not None:
    saver.restore(sessdefault, tf.train.latest_checkpoint('./checkpoints/default/'))
    logging.info('Successfully loaded model from ./checkpoints/default/')
else:
Esempio n. 5
0
def main():
	print('lol')
	parser = argparse.ArgumentParser()
	parser.add_argument('--z_dim', type=int, default=100,
					   help='Noise dimension')

	parser.add_argument('--t_dim', type=int, default=256,
					   help='Text feature dimension')

	parser.add_argument('--batch_size', type=int, default=64,
					   help='Batch Size')

	parser.add_argument('--image_size', type=int, default=64,
					   help='Image Size a, a x a')

	parser.add_argument('--gf_dim', type=int, default=64,
					   help='Number of conv in the first layer gen.')

	parser.add_argument('--df_dim', type=int, default=64,
					   help='Number of conv in the first layer discr.')

	parser.add_argument('--gfc_dim', type=int, default=1024,
					   help='Dimension of gen untis for for fully connected layer 1024')

	parser.add_argument('--caption_vector_length', type=int, default=4800,
					   help='Caption Vector Length')

	parser.add_argument('--data_dir', type=str, default="/media/ssd_working_space/osaid/Data",
					   help='Data Directory')

	parser.add_argument('--learning_rate', type=float, default=0.0002,
					   help='Learning Rate')

	parser.add_argument('--beta1', type=float, default=0.5,
					   help='Momentum for Adam Update')

	parser.add_argument('--epochs', type=int, default=200,
					   help='Max number of epochs')

	parser.add_argument('--save_every', type=int, default=30,
					   help='Save Model/Samples every x iterations over batches')

	parser.add_argument('--resume_model', type=str, default=None,
                       help='Pre-Trained Model Path, to resume from')

	parser.add_argument('--data_set', type=str, default="face",
                       help='Dat set: MS-COCO, flowers')

	args = parser.parse_args()
	model_options = {
		'z_dim' : args.z_dim,
		't_dim' : args.t_dim,
		'batch_size' : args.batch_size,
		'image_size' : args.image_size,
		'gf_dim' : args.gf_dim,
		'df_dim' : args.df_dim,
		'gfc_dim' : args.gfc_dim,
		'caption_vector_length' : args.caption_vector_length
	}
	
	gan = model.GAN(model_options)
	input_tensors, variables, loss, outputs, checks = gan.build_model()
	
	d_optim = tf.train.AdamOptimizer(args.learning_rate, beta1 = args.beta1).minimize(loss['d_loss'], var_list=variables['d_vars'])
	g_optim = tf.train.AdamOptimizer(args.learning_rate, beta1 = args.beta1).minimize(loss['g_loss'], var_list=variables['g_vars'])
	
	sess = tf.InteractiveSession()
	tf.global_variables_initializer().run()
	
	saver = tf.train.Saver()
	if args.resume_model:
		saver.restore(sess, args.resume_model)
	
	loaded_data = load_training_data(args.data_dir, args.data_set)
	for i in range(args.epochs):
		batch_no = 0
		while batch_no*args.batch_size < loaded_data['data_length']:
			real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(batch_no, args.batch_size, 
					args.image_size, args.z_dim, args.caption_vector_length, 'train', args.data_dir, args.data_set, loaded_data)
		

			# GEN UPDATE TWICE, to make sure d_loss does not go to 0		

			_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_wrong_image'] : wrong_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
				})

			_, g_loss, fake_img = sess.run([g_optim, loss['g_loss'], outputs['generator']],
                                feed_dict = {
                                        input_tensors['t_real_image'] : real_images,
                                        input_tensors['t_wrong_image'] : wrong_images,
                                        input_tensors['t_real_caption'] : caption_vectors,
                                        input_tensors['t_z'] : z_noise,
                                })


			# DISCR UPDATE
			if(batch_no%4==0):
				check_ts = [ checks['d_loss1'] , checks['d_loss2'], checks['d_loss3']]
				_, d_loss, gen, d1, d2, d3 = sess.run([d_optim, loss['d_loss'], outputs['generator']] + check_ts,
					feed_dict = {
						input_tensors['t_real_image'] : fake_img,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})
			else:
				check_ts = [ checks['d_loss1'] , checks['d_loss2'], checks['d_loss3']]
				_, d_loss, gen, d1, d2, d3 = sess.run([d_optim, loss['d_loss'], outputs['generator']] + check_ts,
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})
			
			# print('here')

			print("d1", d1)
			print("d2", d2)
			print("d3", d3)
			print("D", d_loss)		
			print("LOSSES", d_loss, g_loss, batch_no, i, len(loaded_data['image_list'])/ args.batch_size)

			batch_no += 1
			if (batch_no % args.save_every) == 0:
				print("Saving Images, Model")
				save_for_vis(i,batch_no,args.data_dir, real_images, gen, image_files)
				save_path = saver.save(sess, "/media/ssd_working_space/osaid/Data/Models/latest_model_{}_temp.ckpt".format(args.data_set))
		if i%5 == 0:
			save_path = saver.save(sess, "/media/ssd_working_space/osaid/Data/Models/model_after_{}_epoch_{}.ckpt".format(args.data_set, i))
Esempio n. 6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')
    """
    =====================================================================================================
    yokuwakarann nannyanenn 256tte caption vector tono tigai #toha
    =====================================================================================================
    """

    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=2400,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="Data",
                        help='Data Directory')

    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Momentum for Adam Update')

    parser.add_argument('--epochs',
                        type=int,
                        default=1,
                        help='Max number of epochs')

    parser.add_argument(
        '--save_every',
        type=int,
        default=100,
        help='Save Model/Samples every x iterations over batches')

    parser.add_argument('--resume_model',
                        type=str,
                        default=None,
                        help='Pre-Trained Model Path, to resume from')

    parser.add_argument('--data_set',
                        type=str,
                        default="flowers",
                        help='Dat set: ImageNet, MS-COCO, flowers')

    args = parser.parse_args()
    # reference model.py self.options
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    _save_path = "Data/Models/" + str(args.epochs) + "_" + str(
        args.caption_vector_length) + "CapVecDims" + "_"

    print("== finish checking GAN_model_options")
    input_tensors, variables, loss, outputs, checks = gan.build_model()
    print("== finish building GAN_model")
    d_optim = tf.train.AdamOptimizer(args.learning_rate,
                                     beta1=args.beta1).minimize(
                                         loss['d_loss'],
                                         var_list=variables['d_vars'])
    g_optim = tf.train.AdamOptimizer(args.learning_rate,
                                     beta1=args.beta1).minimize(
                                         loss['g_loss'],
                                         var_list=variables['g_vars'])
    print("== finish initing the optimizer")
    sess = tf.InteractiveSession()
    print("== Now runing...")
    tf.initialize_all_variables().run()

    saver = tf.train.Saver()
    if args.resume_model:
        print("== Now loading the saved model...")
        saver.restore(sess, args.resume_model)
    loaded_data = load_training_data(args.data_dir, args.data_set)
    print(len(loaded_data))
    print("== Finish loading the training data")
    for i in range(args.epochs):
        batch_no = 0
        while batch_no * args.batch_size < loaded_data['data_length']:
            real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                batch_no, args.batch_size, args.image_size, args.z_dim,
                args.caption_vector_length, 'train', args.data_dir,
                args.data_set, loaded_data)

            # DISCR UPDATE
            check_ts = [
                checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
            ]

            _, d_loss, gen, d1, d2, d3 = sess.run(
                [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            print("d1", d1)
            print("d2", d2)
            print("d3", d3)
            print("D", d_loss)

            # GEN UPDATE
            _, g_loss, gen = sess.run(
                [g_optim, loss['g_loss'], outputs['generator']],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            # GEN UPDATE TWICE, to make sure d_loss does not go to 0
            _, g_loss, gen = sess.run(
                [g_optim, loss['g_loss'], outputs['generator']],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            print("LOSSES (D/G)", d_loss, "/", g_loss, "batch_no:", batch_no,
                  "/",
                  len(loaded_data['image_list']) / args.batch_size, "Ittr:", i)
            batch_no += 1
            if (batch_no % args.save_every) == 0:
                print("Saving Model")
                save_for_vis(args.data_dir, real_images, gen, image_files)
                save_path = saver.save(
                    sess, "Data/Models/" + str(args.data_set) +
                    "/latest_model_{}_temp.ckpt".format(args.data_set))
        if i % 5 == 0:
            save_path = saver.save(
                sess, "Data/Models/" + str(args.data_set) +
                "/model_after_{}_epoch_{}.ckpt".format(args.data_set, i))

    #_save_path="Data/Models/"+args.epochs+"_"+args.caption_vector_length+"Captiondims"+"_"
    if args.data_set == "flowers":
        _save_model_fullpath = _save_path + "model.ckpt"
    elif args.data_set == "ImageNet":
        _save_model_fullpath = _save_path + "ImageNet_model.ckpt"
    saver.save(sess, _save_model_fullpath)
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=2400,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="Data",
                        help='Data Directory')

    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Momentum for Adam Update')

    parser.add_argument('--epochs',
                        type=int,
                        default=600,
                        help='Max number of epochs')

    parser.add_argument(
        '--save_every',
        type=int,
        default=30,
        help='Save Model/Samples every x iterations over batches')

    parser.add_argument('--resume_model',
                        type=str,
                        default=None,
                        help='Pre-Trained Model Path, to resume from')

    parser.add_argument('--data_set',
                        type=str,
                        default="flowers",
                        help='Dat set: MS-COCO, flowers')

    parser.add_argument('--model', type=str, default="gan", help='Type of GAN')

    parser.add_argument('--lam',
                        type=float,
                        default=0.25,
                        help='lambda of O-GAN')

    args = parser.parse_args()
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length,
        'lam': args.lam
    }

    if args.model == 'o-gan':
        gan = o_gan_model.O_GAN(model_options)
    else:
        gan = model.GAN(model_options)
    input_tensors, variables, loss, outputs, checks = gan.build_model()
    tf.summary.scalar('g_loss', loss['g_loss'])
    tf.summary.scalar('d_loss', loss['d_loss'])
    with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
        d_optim = tf.train.AdamOptimizer(args.learning_rate,
                                         beta1=args.beta1).minimize(
                                             loss['d_loss'],
                                             var_list=variables['d_vars'])
        g_optim = tf.train.AdamOptimizer(args.learning_rate,
                                         beta1=args.beta1).minimize(
                                             loss['g_loss'],
                                             var_list=variables['g_vars'])

    sess = tf.InteractiveSession()
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter("path")
    writer.add_graph(sess.graph)
    tf.initialize_all_variables().run()

    saver = tf.train.Saver()
    if args.resume_model:
        saver.restore(sess, args.resume_model)

    loaded_data = load_training_data(args.data_dir, args.data_set)

    for i in range(args.epochs):
        batch_no = 0
        while batch_no * args.batch_size < loaded_data['data_length']:
            real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                batch_no, args.batch_size, args.image_size, args.z_dim,
                args.caption_vector_length, 'train', args.data_dir,
                args.data_set, loaded_data)

            # DISCR UPDATE
            if args.model == 'gan':
                check_ts = [
                    checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
                ]
                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })
                print("d1", d1)
                print("d2", d2)
                print("d3", d3)
                print("D", d_loss)
            else:
                check_ts = [
                    checks['mean_disc_real_image'],
                    checks['mean_wrong_real_image'],
                    checks['mean_disc_fake_image'], checks['z1_corr'],
                    checks['z2_corr']
                ]
                _, d_loss, gen, md_real_image, mw_real_image, md_fake_image, z1, z2 = sess.run(
                    [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })
                print("md_real_image", md_real_image[0])
                print("mw_real_image", mw_real_image[0])
                print("md_fake_image", md_fake_image[0])
                # print("z1", z1)
                # print("z2", z2)
                print("D", d_loss)

            # GEN UPDATE
            _, g_loss, gen = sess.run(
                [g_optim, loss['g_loss'], outputs['generator']],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            # GEN UPDATE TWICE, to make sure d_loss does not go to 0
            _, g_loss, gen, result = sess.run(
                [g_optim, loss['g_loss'], outputs['generator'], merged],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })
            writer.add_summary(result,
                               i * loaded_data['data_length'] + batch_no)
            print("LOSSES", d_loss, g_loss, batch_no, i,
                  len(loaded_data['image_list']) / args.batch_size)
            batch_no += 1
            if (batch_no % args.save_every) == 0:
                print("Saving Images, Model")
                save_for_vis(args.data_dir, args.model, real_images, gen,
                             image_files)
                save_path = saver.save(
                    sess, "Data/Models/{}/latest_model_{}_temp.ckpt".format(
                        args.model, args.data_set))
        if i % 5 == 0:
            save_path = saver.save(
                sess, "Data/Models/{}/model_after_{}_epoch_{}.ckpt".format(
                    args.model, args.data_set, i))
    writer.close()
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=2400,
                        help='Caption Vector Length')

    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Momentum for Adam Update')

    parser.add_argument('--gen_updates',
                        type=int,
                        default=10,
                        help='Generator updates per discriminator update')

    parser.add_argument('--epochs',
                        type=int,
                        default=200,
                        help='Max number of epochs')

    parser.add_argument('--resume_model',
                        type=str,
                        default=None,
                        help='Pre-Trained Model Path, to resume from')

    parser.add_argument('--resume_epoch',
                        type=int,
                        default=0,
                        help='Number of epochs already trained')

    parser.add_argument('--image_dir',
                        type=str,
                        default="Data/mscoco_raw/processed",
                        help='Directory of image')

    parser.add_argument('--experiment',
                        type=str,
                        default="default",
                        help='Experiment to save to and load captions for')

    parser.add_argument('--style_gan',
                        type=bool,
                        default=False,
                        help='adds style loss to generator loss')

    parser.add_argument('--transfer',
                        action='store_true',
                        help='does transfer learning')

    parser.add_argument('--split',
                        type=str,
                        default="train",
                        help='use val for validation set, train for train\
                        mostly a debug flag')

    parser.add_argument('--extra_32',
                        action='store_true',
                        help='extra conv layer when the image is at size 32')
    parser.add_argument('--extra_64',
                        action='store_true',
                        help='extra conv layer when the image is at size 64')
    parser.add_argument('--vgg',
                        action='store_true',
                        help='use vgg like layout')

    args = parser.parse_args()
    if args.vgg and (args.extra_32 or args.extra_64):
        raise Exception(
            "Cannot perform both vgg and extra_x mods at the same time")

    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length,
        'extra_32': args.extra_32,
        'extra_64': args.extra_64,
        'vgg': args.vgg,
        'style_gan': args.style_gan
    }

    tbdir = "Data/Experiments/{}/".format(args.experiment)
    tbpath = os.path.join(tbdir, "tensorboard")
    if not os.path.isdir(tbdir):
        os.makedirs(tbdir)
    if not os.path.isdir(tbpath):
        os.makedirs(tbpath)
    tbwriter = tf.summary.FileWriter(tbpath)

    gan = model.GAN(model_options)
    input_tensors, variables, loss, outputs, checks = gan.build_model()

    d_optim = tf.train.AdamOptimizer(args.learning_rate,
                                     beta1=args.beta1).minimize(
                                         loss['d_loss'],
                                         var_list=variables['d_vars'])
    g_optim = tf.train.AdamOptimizer(args.learning_rate,
                                     beta1=args.beta1).minimize(
                                         loss['g_loss'],
                                         var_list=variables['g_vars'])

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    checkpointer = tf.train.Saver()
    perm_saver = tf.train.Saver(max_to_keep=None)

    if args.transfer:
        transfer_learning(sess)
    if args.resume_model:
        checkpointer.restore(sess, args.resume_model)

    loaded_data = load_training_data(args.split, args.experiment)

    for i in range(args.resume_epoch, args.epochs + 1):
        batch_no = 0
        gen_images = None
        random.shuffle(loaded_data['image_list'])
        print(loaded_data['data_length'])
        while batch_no * args.batch_size < loaded_data['data_length']:
            real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                batch_no, args.batch_size, args.image_size, args.z_dim,
                args.caption_vector_length, args.image_dir, loaded_data)

            # DISCR UPDATE
            check_ts = [
                checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
            ]
            _, d_loss, gen, d1, d2, d3 = sess.run(
                [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            print("d1", d1)
            print("d2", d2)
            print("d3", d3)
            print("D", d_loss)

            g_loss = None
            for _ in range(args.gen_updates):
                # GEN UPDATE
                _, g_loss, gen_images = sess.run(
                    [g_optim, loss['g_loss'], outputs['generator']],
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

            summary = tf.Summary(value=[
                tf.Summary.Value(tag="d_loss", simple_value=d_loss),
                tf.Summary.Value(tag="d_loss1", simple_value=d1),
                tf.Summary.Value(tag="d_loss2", simple_value=d2),
                tf.Summary.Value(tag="d_loss3", simple_value=d3),
                tf.Summary.Value(tag="g_loss", simple_value=g_loss)
            ])
            global_step = i * loaded_data[
                'data_length'] / args.batch_size + batch_no
            tbwriter.add_summary(summary, global_step)
            print("Epoch", i, "LOSSES", d_loss, g_loss, batch_no, i,
                  loaded_data['data_length'] / args.batch_size)
            batch_no += 1
            checkpointer.save(
                sess,
                "Data/Experiments/{}/model/checkpoint.ckpt".format(
                    args.experiment),
                global_step=i)
        if i > 0 and (i % 100) == 0:
            print("Saving Images, Model")
            save_for_vis(args.experiment, gen_images)
            perm_saver.save(
                sess, "Data/Experiments/{}/model/after_{}_epochs.ckpt".format(
                    args.experiment, i))
Esempio n. 9
0
#!/usr/bin/env python3
Esempio n. 10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=23,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="hw3_data",
                        help='Data Directory')

    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1',
                        type=float,
                        default=0.7,
                        help='Momentum for Adam Update')

    parser.add_argument('--epochs',
                        type=int,
                        default=800,
                        help='Max number of epochs')

    parser.add_argument(
        '--save_every',
        type=int,
        default=100,
        help='Save Model/Samples every x iterations over batches')

    parser.add_argument('--resume_model',
                        type=str,
                        default=None,
                        help='Pre-Trained Model Path, to resume from')

    parser.add_argument('--data_set',
                        type=str,
                        default="bonus",
                        help='Dat set: faces,bonus')
    parser.add_argument(
        '--vector',
        type=int,
        default=3,
        help=
        'method to encode captions, options: 1. uni-skip, 2. bi-skip, 3. combine-skip, 4. one-hot, 5. glove_50 , 6. glove_100 , 7. glove_200 , 8. glove_300'
    )
    parser.add_argument(
        '--update_rate',
        type=str,
        default='1_2',
        help='update rate between discrimminator and generator')
    parser.add_argument('--gan_type',
                        type=int,
                        default=0,
                        help='GAN type: 0->DCGAN, 1->WGAN, 2->LSGAN, 3->BSGAN')

    args = parser.parse_args()
    #check if the caption vector length is correct:
    if args.vector == 1:
        args.caption_vector_length = 2400
    if args.vector == 2:
        args.caption_vector_length = 2400
    if args.vector == 3:
        args.caption_vector_length = 4800
    if args.vector == 4:
        args.caption_vector_length = 23
    if args.vector == 5:
        args.caption_vector_length = 100
    if args.vector == 6:
        args.caption_vector_length = 200
    if args.vector == 7:
        args.caption_vector_length = 400
    if args.vector == 8:
        args.caption_vector_length = 600

    print(args.caption_vector_length)
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length,
        'gan_type': args.gan_type
    }

    #GAN model
    gan = model.GAN(model_options)
    input_tensors, variables, loss, outputs, checks = gan.build_model()
    with tf.variable_scope(tf.get_variable_scope(), reuse=False):
        if args.gan_type == 1:  # WGAN
            d_optim = tf.train.RMSPropOptimizer(
                args.learning_rate,
                beta1=args.beta1).minimize(loss['d_loss'],
                                           var_list=variables['d_vars'])
            g_optim = tf.train.RMSPropOptimizer(
                args.learning_rate,
                beta1=args.beta1).minimize(loss['g_loss'],
                                           var_list=variables['g_vars'])
        else:
            d_optim = tf.train.AdamOptimizer(args.learning_rate,
                                             beta1=args.beta1).minimize(
                                                 loss['d_loss'],
                                                 var_list=variables['d_vars'])
            g_optim = tf.train.AdamOptimizer(args.learning_rate,
                                             beta1=args.beta1).minimize(
                                                 loss['g_loss'],
                                                 var_list=variables['g_vars'])

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1.0
    sess = tf.InteractiveSession(config=config)
    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver(max_to_keep=None)
    if args.resume_model:
        saver.restore(sess, args.resume_model)

    loaded_data = load_training_data(args.data_dir, args.data_set, args.vector)

    for i in range(args.epochs):
        batch_no = 0
        while batch_no * args.batch_size < loaded_data['data_length']:
            real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                batch_no, args.batch_size, args.image_size, args.z_dim,
                args.caption_vector_length, 'train', args.data_dir,
                args.data_set, args.vector, loaded_data)

            # DISCR UPDATE ( 5 times for WGAN )
            check_ts = [
                checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
            ]
            if args.gan_type == 1:  #WGAN
                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [
                        d_optim, loss['d_loss'], outputs['generator'],
                        clip_updates['clip_updates1'],
                        clip_updates['clip_updates2'],
                        clip_updates['clip_updates3']
                    ] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

            else:
                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

            print("d1", d1)
            print("d2", d2)
            print("d3", d3)
            print("D", d_loss)

            if args.gan_type == 1:  #WAGN
                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [
                        d_optim, loss['d_loss'], outputs['generator'],
                        clip_updates['clip_updates1'],
                        clip_updates['clip_updates2'],
                        clip_updates['clip_updates3']
                    ] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                print("d1", d1)
                print("d2", d2)
                print("d3", d3)
                print("D", d_loss)

                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [
                        d_optim, loss['d_loss'], outputs['generator'],
                        clip_updates['clip_updates1'],
                        clip_updates['clip_updates2'],
                        clip_updates['clip_updates3']
                    ] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                print("d1", d1)
                print("d2", d2)
                print("d3", d3)
                print("D", d_loss)

                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [
                        d_optim, loss['d_loss'], outputs['generator'],
                        clip_updates['clip_updates1'],
                        clip_updates['clip_updates2'],
                        clip_updates['clip_updates3']
                    ] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                print("d1", d1)
                print("d2", d2)
                print("d3", d3)
                print("D", d_loss)

                _, d_loss, gen, d1, d2, d3 = sess.run(
                    [
                        d_optim, loss['d_loss'], outputs['generator'],
                        clip_updates['clip_updates1'],
                        clip_updates['clip_updates2'],
                        clip_updates['clip_updates3']
                    ] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

                print("d1", d1)
                print("d2", d2)
                print("d3", d3)
                print("D", d_loss)

            # GEN UPDATE
            _, g_loss, gen = sess.run(
                [g_optim, loss['g_loss'], outputs['generator']],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })
            if args.update_rate == '1_2':

                # GEN UPDATE TWICE, to make sure d_loss does not go to 0
                _, g_loss, gen = sess.run(
                    [g_optim, loss['g_loss'], outputs['generator']],
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                    })

            print("LOSSES", d_loss, g_loss, batch_no, i,
                  len(loaded_data['image_list']) / args.batch_size)
            batch_no += 1
            if (batch_no % args.save_every) == 0:
                #print("Saving Images, Model")
                save_for_vis(args.data_dir, real_images, gen, image_files,
                             args.vector, args.update_rate)
                save_path = saver.save(
                    sess,
                    join(
                        args.data_dir, vector_name[args.vector - 1],
                        args.update_rate,
                        "Models/latest_model_{}_temp.ckpt".format(
                            args.data_set)))
        if i % 40 == 0:
            save_path = saver.save(
                sess,
                join(
                    args.data_dir, vector_name[args.vector - 1],
                    args.update_rate,
                    "Models/model_after_{}_epoch_{}.ckpt".format(
                        args.data_set, i)))
Esempio n. 11
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise Dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=2400,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="./",
                        help='Data Directory')

    parser.add_argument('--model_path',
                        type=str,
                        default='Data/Models/latest_faces_model.ckpt',
                        help='Trained Model Path')

    parser.add_argument('--n_images',
                        type=int,
                        default=5,
                        help='Number of Images per Caption')

    parser.add_argument('--caption_thought_vectors',
                        type=str,
                        default='Data/sample_caption_vectors.hdf5',
                        help='Caption Thought Vector File')

    args = parser.parse_args()
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.n_images,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    _, _, _, _, _ = gan.build_model()
    sess = tf.InteractiveSession()
    saver = tf.train.Saver()
    saver.restore(sess, args.model_path)

    input_tensors, outputs = gan.build_generator()

    h = h5py.File(args.caption_thought_vectors)
    caption_vectors = np.array(h['vectors'])
    caption_image_dic = {}
    for cn, caption_vector in enumerate(caption_vectors):

        caption_images = []
        z_n1 = np.random.uniform(-0.5, 0.5, [1, args.z_dim])
        z_n2 = np.random.uniform(-10, 10, [args.n_images - 1, args.z_dim])
        z_noise = np.append(z_n1, z_n2, axis=0)
        caption = [caption_vector[0:args.caption_vector_length]
                   ] * args.n_images
        [gen_image] = sess.run(
            [outputs['generator']],
            feed_dict={
                input_tensors['t_real_caption']: caption,
                input_tensors['t_z']: z_noise,
            })

        # print gen_image.shape
        caption_images = [
            gen_image[i, :, :, :] for i in range(0, args.n_images)
        ]
        caption_image_dic[cn] = caption_images
        print "Generated", cn, "captions."

    make_sure_path_exists(join(args.data_dir, 'samples'))

    for f in os.listdir(join(args.data_dir, 'samples')):
        if os.path.isfile(f):
            os.unlink(join(args.data_dir, 'samples/' + f))

    for cn in range(0, len(caption_vectors)):
        caption_images = []
        for i, im in enumerate(caption_image_dic[cn]):
            im_name = "sample{}_{}.jpg".format(cn, i)
            scipy.misc.imsave(
                join(args.data_dir, 'samples/{}'.format(im_name)), im)
Esempio n. 12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument(
        '--t_dim',
        type=int,
        default=1024,  #default=256,
        help='Text feature dimension')

    parser.add_argument(
        '--batch_size',
        type=int,
        default=64,  #LEECHANGE default=64,
        help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=4,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=4800,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="Data",
                        help='Data Directory')

    parser.add_argument(
        '--learning_rate',
        type=float,
        default=1e-5,  #LEECHANGE default=0.0002,
        help='Learning Rate')

    parser.add_argument(
        '--beta1',
        type=float,
        default=.5,  #LEECHANGE default=0.5,
        help='Momentum for Adam Update')

    parser.add_argument('--epochs',
                        type=int,
                        default=6000,
                        help='Max number of epochs')

    parser.add_argument(
        '--save_every',
        type=int,
        default=30,
        help='Save Model/Samples every x iterations over batches')

    parser.add_argument('--resume_model',
                        type=str,
                        default=None,
                        help='Pre-Trained Model Path, to resume from')

    parser.add_argument('--data_set',
                        type=str,
                        default="flowers",
                        help='Dat set: MS-COCO, flowers')

    args = parser.parse_args()
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    input_tensors, variables, loss, outputs, checks = gan.build_model()

    #d_optim = tf.train.AdamOptimizer(args.learning_rate, beta1 = args.beta1, beta2 = .9).minimize(loss['d_loss'], var_list=variables['d_vars'])
    #g_optim = tf.train.AdamOptimizer(args.learning_rate, beta1 = args.beta1, beta2 = .9).minimize(loss['g_loss'], var_list=variables['g_vars'])

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
    gvs = optimizer.compute_gradients(loss['d_loss'],
                                      var_list=variables['d_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    d_optim = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['d_loss_full'],
                                      var_list=variables['d_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    d_optim_full = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['d_loss_mid'],
                                      var_list=variables['d_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    d_optim_mid = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['d_loss_small'],
                                      var_list=variables['d_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    d_optim_small = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['d_loss_small_full'],
                                      var_list=variables['d_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    d_optim_small_full = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['g_loss'],
                                      var_list=variables['g_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    g_optim = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['g_loss_full'],
                                      var_list=variables['g_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    g_optim_full = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['g_loss_mid'],
                                      var_list=variables['g_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    g_optim_mid = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['g_loss_small'],
                                      var_list=variables['g_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    g_optim_small = optimizer.apply_gradients(capped_gvs)

    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate,
                                       beta1=args.beta1,
                                       beta2=.9)
    gvs = optimizer.compute_gradients(loss['g_loss_small_mid'],
                                      var_list=variables['g_vars'])
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs
                  if grad is not None]
    g_optim_small_mid = optimizer.apply_gradients(capped_gvs)

    sess = tf.InteractiveSession()
    if prince:
        sess.run(tf.global_variables_initializer())
    else:
        tf.initialize_all_variables().run()

    saver = tf.train.Saver()
    if args.resume_model:
        saver.restore(sess, args.resume_model)

    loaded_data = load_training_data(args.data_dir, args.data_set)
    init = 1
    d_avg_full, d_avg_mid, d_avg_sml = 0, 0, 0
    lb = 0  #-.3
    disc_break = -.3
    for i in range(args.epochs):
        img_idx = 0
        batch_no = 0
        while batch_no * args.batch_size < loaded_data['data_length']:
            real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                batch_no, args.batch_size, args.image_size, args.z_dim,
                args.caption_vector_length, 'train', args.data_dir,
                args.data_set, loaded_data)

            # DISCR UPDATE
            check_ts = [
                checks['d_loss_full'], checks['d_loss_mid'],
                checks['d_loss_small'], checks['g_loss_full'],
                checks['g_loss_mid'], checks['g_loss_small']
            ]
            '''
			sess.run([g_optim],
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
					input_tensors['l2reg']: 0
				})
			sess.run([g_optim_full],
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
					input_tensors['l2reg']: 0
				})
			running = []
			if d_avg_full < lb:
				running += [g_optim_full]
			if d_avg_mid < lb:
				running += [g_optim_mid]
			if d_avg_sml < lb:
				running += [g_optim_small]
				
			if d_avg_full + d_avg_mid + d_avg_sml > 12:
				running += [d_optim]
				sess.run(running,
					feed_dict = {
							input_tensors['t_real_image'] : real_images,
							input_tensors['t_wrong_image'] : wrong_images,
							input_tensors['t_real_caption'] : caption_vectors,
							input_tensors['t_z'] : z_noise,
							input_tensors['l2reg']: 0
							})
			elif len(running) > 0:
				sess.run(running,
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
						input_tensors['l2reg']: 0
						})
			'''
            if disc_break > 0:
                disc_break -= 1
                sess.run(
                    [g_optim],
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                        input_tensors['l2reg']: 0
                    })
            else:
                _, _, d_loss, gen, gen_small, gen_mid, real_small, real_mid, g_loss, df, dm, ds, gf, gm, gs = sess.run(
                    [
                        g_optim, d_optim, loss['d_loss'], outputs['generator'],
                        outputs['generator_small_image'],
                        outputs['generator_mid_image'],
                        outputs['real_small_image'], outputs['real_mid_image'],
                        loss['g_loss']
                    ] + check_ts,
                    feed_dict={
                        input_tensors['t_real_image']: real_images,
                        input_tensors['t_wrong_image']: wrong_images,
                        input_tensors['t_real_caption']: caption_vectors,
                        input_tensors['t_z']: z_noise,
                        input_tensors['l2reg']: 0
                    })
                df /= 10.
                dm /= 10.
                ds /= 10.
                gf *= 10.
                gm *= 10.
                gs *= 10.
                d_avg_full = d_avg_full * .8 + .2 * (df - gf)
                d_avg_mid = d_avg_mid * .8 + .2 * (dm - gm)
                d_avg_sml = d_avg_sml * .8 + .2 * (ds - gs)

                if d_avg_full + d_avg_mid + d_avg_sml < 0:
                    disc_break = np.abs(
                        int(d_avg_full + d_avg_mid + d_avg_sml)) * 3 + 1

                if 1:  #batch_no % 2 == 0:

                    idx = np.random.randint(1, 10)
                    img_full = gen[idx, :, :, :]
                    img_small = gen_small[idx, :, :, :]
                    r_small = real_small[idx, :, :, :]
                    img_mid = gen_mid[idx, :, :, :]
                    r_mid = real_mid[idx, :, :, :]
                    scipy.misc.imsave(
                        'output_images/' + str(i) + '_img_idx:' +
                        str(batch_no) + 'full.jpg', img_full)
                    scipy.misc.imsave(
                        'output_images/' + str(i) + '_img_idx:' +
                        str(batch_no) + 'tiny.jpg', img_small)
                    scipy.misc.imsave(
                        'output_images/' + str(i) + '_img_idx:' +
                        str(batch_no) + 'mid.jpg', img_mid)
                    scipy.misc.imsave(
                        'output_images/' + str(i) + '_img_idx:' +
                        str(batch_no) + 'a_r_small.jpg', r_small)
                    scipy.misc.imsave(
                        'output_images/' + str(i) + '_img_idx:' +
                        str(batch_no) + 'a_r_mid.jpg', r_mid)

                    img_idx += 1
            '''
			sess.run([g_optim_full],
				feed_dict = {
					input_tensors['t_real_image'] : real_images,
					input_tensors['t_real_caption'] : caption_vectors,
					input_tensors['t_z'] : z_noise,
					input_tensors['l2reg']: 0
				})
			'''
            print 'd_loss_full', df
            print 'd_loss_mid', dm
            print 'd_loss_small', ds
            print 'g_loss_full', gf
            print 'g_loss_mid', gm
            print 'g_loss_small', gs
            print "D", d_loss

            print 'd_avg_full', d_avg_full
            print 'd_avg_mid', d_avg_mid
            print 'd_avg_sml', d_avg_sml

            print "LOSSES", d_loss, g_loss, batch_no, i, len(
                loaded_data['image_list']) / args.batch_size
            batch_no += 1
            if (batch_no % args.save_every) == 0:
                print "Saving Images, Model"
                #Lee commented the following line out because it crashed. No idea what it was trying to do.
                #save_for_vis(args.data_dir, real_images, gen, image_files)
                save_path = saver.save(
                    sess, "Data/Models/latest_model_1_{}_temp.ckpt".format(
                        args.data_set))
        if i % 5 == 0:
            save_path = saver.save(
                sess, "Data/Models/model_after_{}_epoch_{}.ckpt".format(
                    args.data_set, i))
Esempio n. 13
0
def main():
    z_dim = 100
    t_dim = 256
    image_size = 64
    model_type = 'gan'
    n_images = 5

    # model Parameters
    gf_dim = 64
    df_dim = 64

    # fully connected
    gfc_dim = 1024
    caption_vector_length = 600

    MODEL_DIR = 'gan-models'
    MODEL_NAME = 'final_model.ckpt'
    OUTPUT_DIR = 'samples'

    caption_vectors_name = 'test_caption_vectors.hdf5'

    model_options = {
        'z_dim': z_dim,
        't_dim': t_dim,
        'batch_size': n_images,
        'image_size': image_size,
        'gf_dim': gf_dim,
        'df_dim': df_dim,
        'gfc_dim': gfc_dim,
        'caption_vector_length': caption_vector_length
    }

    gan = model.GAN(model_options)
    _, _, _, _, _ = gan.build_model()
    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, join(MODEL_DIR, model_type, 'Models', MODEL_NAME))

    input_tensors, outputs = gan.build_generator()

    h = h5py.File(join(MODEL_DIR, model_type, caption_vectors_name))
    caption_image_dic = {}

    for i, key in enumerate(h):
        np.random.seed(1005)
        caption_images = []
        z_noise = np.random.uniform(0, 0, [n_images, z_dim])
        caption = np.array(
            [h[key][0, :caption_vector_length] for i in range(n_images)])

        [gen_image] =\
          sess.run([outputs['generator']],
                   feed_dict = {input_tensors['t_real_caption'] : caption,
                                input_tensors['t_z'] : z_noise} )

        caption_image_dic[key] =\
          [gen_image[i, :, :, :] for i in range(0, n_images)]

    if not os.path.exists(OUTPUT_DIR):
        os.makedirs(OUTPUT_DIR)
    for key in h:
        for i, im in enumerate(caption_image_dic[key]):
            scipy.misc.imsave(
                join(OUTPUT_DIR, 'sample_' + key + '_' + str(i + 1) + '.jpg'),
                im)
Esempio n. 14
0
import datasetloader
import cifarloader
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth = True
# run optimization
with tf.Session(config=run_config) as sess:
    # load options
    opt = options.option_initialize()
    # load dataset
    if opt.dataseyt == cifar:
        data_loader = cifarloader.cifarDataLoader(
            opt)  #for image generation using cifar
    else:
        data_loader = datasetloader.datasetloader(
            opt)  #for image generation using celebA or imagenet

    Gan = model.GAN(opt, sess)
    batch_iters = data_loader.batch_iters()
    print('batch_iters:', batch_iters)

    if opt.train == False:
        Gan.sampling()
    else:
        epochs = (opt.iters - Gan.counter) // batch_iters
        for epoch in range(epochs):
            for idx in range(0, batch_iters):
                batch_images, batch_z = data_loader.Loaddata(idx)
                Gan.optimization(batch_images, batch_z)
        print(Gan.mean)
        print(max(Gan.mean))
Esempio n. 15
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise Dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=2400,
                        help='Caption Vector Length')

    parser.add_argument('--n_images',
                        type=int,
                        default=4,
                        help='Number of Images per Caption')

    parser.add_argument('--split',
                        type=str,
                        default='gen',
                        help='train/val/test/gen')

    parser.add_argument('--experiment',
                        type=str,
                        default="default",
                        help='Experiment of dataset')

    parser.add_argument(
        '--epoch',
        type=int,
        default=None,
        help='Epoch of the trained model to load. Defaults to latest checkpoint'
    )
    parser.add_argument('--transfer',
                        action='store_true',
                        help='does transfer learning')

    parser.add_argument('--extra_32',
                        action='store_true',
                        help='extra conv layer when the image is at size 32')
    parser.add_argument('--extra_64',
                        action='store_true',
                        help='extra conv layer when the image is at size 64')
    parser.add_argument('--vgg',
                        action='store_true',
                        help='use vgg like layout')

    args = parser.parse_args()
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.n_images,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length,
        'extra_32': args.extra_32,
        'extra_64': args.extra_64,
        'vgg': args.vgg
    }

    data_dir = os.path.join("Data", "Experiments",
                            "{}".format(args.experiment))
    caption_thought_vectors = os.path.join(
        data_dir, '{}_captions.hdf5'.format(args.split))
    save_dir = os.path.join(data_dir, "{}_samples".format(args.split))

    model_path = os.path.join(data_dir, "model")
    checkpoint = tf.train.latest_checkpoint(model_path)
    if args.epoch is not None:
        checkpoint = os.path.join(model_path,
                                  "after_{}_epochs.ckpt".format(args.epoch))
        save_dir = os.path.join(save_dir, "epoch_{}".format(args.epoch))
    else:
        save_dir = os.path.join(save_dir, "latest".format(args.epoch))

    gan = model.GAN(model_options)
    _, _, _, _, _ = gan.build_model()
    sess = tf.InteractiveSession()
    saver = tf.train.Saver()
    saver.restore(sess, checkpoint)

    input_tensors, outputs = gan.build_generator()

    h = h5py.File(caption_thought_vectors)
    caption_vectors = {}
    if args.split == 'gen':
        caption_vectors["generated"] = np.array(h['vectors'])
    else:
        class_name = list(h.keys())[0]
        for img_file, vector in h[class_name].items():
            img_id = os.path.splitext(img_file)[0]
            caption_vectors[img_id] = np.array(vector)

    generated_images = {}
    for img_id, vectors in caption_vectors.items():
        caption_image_dic = {}
        for cn, caption_vector in enumerate(vectors):
            caption_images = []
            z_noise = 1 * np.random.uniform(-1, 1, [args.n_images, args.z_dim])
            caption = [caption_vector[0:args.caption_vector_length]
                       ] * args.n_images

            [gen_image] = sess.run(
                [outputs['generator']],
                feed_dict={
                    input_tensors['t_real_caption']: caption,
                    input_tensors['t_z']: z_noise,
                })

            caption_images = [
                gen_image[i, :, :, :] for i in range(0, args.n_images)
            ]
            caption_image_dic[cn] = caption_images
            print("Generated {} images for {}".format(cn, img_id))
        generated_images[img_id] = caption_image_dic

    if os.path.isdir(save_dir):
        shutil.rmtree(save_dir)

    os.makedirs(save_dir, exist_ok=True)

    for img_id, caption_image_dic in generated_images.items():
        for cn, images in caption_image_dic.items():
            for i, im in enumerate(images):
                scipy.misc.imsave(
                    join(
                        save_dir,
                        '{}_{}_image_{}_{}.jpg'.format(img_id,
                                                       args.image_size, cn,
                                                       chr(ord('A') + i))), im)
Esempio n. 16
0
def main():

    # argunment parser variable.
    parser = argparse.ArgumentParser()

    # add the following as possible arguments that can be passed while running the file.
    # [
    # 	d_dim (Noise dimension)
    # 	t_dim (Text feature dimension)
    # 	batch_size (No of images used in training during iterations)
    # 	gf_dim (neurons in generators first layer)
    # 	df_dim (neurons in discriminators first layer)
    # 	data_dir (Path to data directory)
    # 	learning rate
    # 	beta1 (momentum value for adam update)
    # 	epochs (Number of epochs) **10 epoch take around 6-7 hours**
    # 	save_every (number of iterations over which the model is saved)
    # 	resume_model (to resume the training of a model from file)
    # 	data_set (which data set to train on)
    # ]

    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=2400,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="Data",
                        help='Data Directory')

    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Momentum for Adam Update')

    parser.add_argument('--epochs',
                        type=int,
                        default=600,
                        help='Max number of epochs')

    parser.add_argument(
        '--save_every',
        type=int,
        default=30,
        help='Save Model/Samples every x iterations over batches')

    parser.add_argument('--resume_model',
                        type=str,
                        default=None,
                        help='Pre-Trained Model Path, to resume from')

    parser.add_argument('--data_set',
                        type=str,
                        default="flowers",
                        help='Which data set?')

    args = parser.parse_args()

    # Dict defining the model properties depending upon the command line arguments.
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    # Call the model.GAN function from the model file and pass the above dictionary to create a model based on those properties.
    gan = model.GAN(model_options)
    # "gan" is the handle to that model for rest of the code

    # Unpacking the values sent by build_model() function
    input_tensors, variables, loss, outputs, checks = gan.build_model()

    # Based on loss recieved from gan.build_model() use adam optimizer to minimize the loss
    with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
        d_optim = tf.train.AdamOptimizer(args.learning_rate,
                                         beta1=args.beta1).minimize(
                                             loss['d_loss'],
                                             var_list=variables['d_vars'])

    with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
        g_optim = tf.train.AdamOptimizer(args.learning_rate,
                                         beta1=args.beta1).minimize(
                                             loss['g_loss'],
                                             var_list=variables['g_vars'])

    # Initialize all variables
    sess = tf.InteractiveSession()
    tf.initialize_all_variables().run()

    # If resuming a trained model for further training
    saver = tf.train.Saver()
    if args.resume_model:
        saver.restore(sess, args.resume_model)

    loaded_data = load_training_data(args.data_dir, args.data_set)

    j = 0  #To keep track of iterations
    # For "args.epochs" number of epochs---
    for i in range(args.epochs):
        batch_no = 0
        print("Batch size: ", args.batch_size)
        print("loaded_data['data_length']: ",
              loaded_data['data_length'])  #6000

        while batch_no * args.batch_size < loaded_data['data_length']:

            print("batch_no:", batch_no + 1, "iteration_no:", j + 1, "epoch:",
                  i + 1)

            # Create a training batch which is fed into the dicriminator in the current batch.
            real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                batch_no, args.batch_size, args.image_size, args.z_dim,
                args.caption_vector_length, 'train', args.data_dir,
                args.data_set, loaded_data)

            # DISCR UPDATE
            check_ts = [
                checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
            ]

            # Feed in input from the training batch using feed_dict to the placeholders
            _, d_loss, gen, d1, d2, d3 = sess.run(
                [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            # Print the discriminator losses
            print("d1", d1)
            print("d2", d2)
            print("d3", d3)
            print("D", d_loss)

            # GEN UPDATE
            # Feed in input from the training batch using feed_dict to the placeholders
            _, g_loss, gen = sess.run(
                [g_optim, loss['g_loss'], outputs['generator']],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            # GEN UPDATE TWICE, to make sure d_loss does not go to 0
            _, g_loss, gen = sess.run(
                [g_optim, loss['g_loss'], outputs['generator']],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            # Print final loss of current batch
            print("LOSSES", d_loss, g_loss, batch_no, i,
                  len(loaded_data['image_list']) / args.batch_size, "\n")
            batch_no += 1
            j += 1

            # Regularly save the network
            if (batch_no % args.save_every) == 0:
                print("Saving Images, Model", "\n\n")
                save_for_vis(args.data_dir, real_images, gen, image_files)
                save_path = saver.save(
                    sess, "Data/Models/latest_model_{}_temp.ckpt".format(
                        args.data_set))

        if i % 5 == 0:
            save_path = saver.save(
                sess, "Data/Models/model_after_{}_epoch_{}.ckpt".format(
                    args.data_set, i))
Esempio n. 17
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise Dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=768,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="Data",
                        help='Data Directory')

    parser.add_argument(
        '--model_path',
        type=str,
        default='Data/Models/latest_bert_model_flowers_temp.ckpt',
        help='Trained Model Path')

    parser.add_argument('--n_images',
                        type=int,
                        default=5,
                        help='Number of Images per Caption')

    parser.add_argument('--caption_thought_vectors',
                        type=str,
                        default='Data/submission_bert_caption_vectors.hdf5',
                        help='Caption Thought Vector File')

    # conda install -c conda-forge tensorflow==1.15

    args = parser.parse_args()
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.n_images,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    _, _, _, _, _ = gan.build_model()
    sess = tf.InteractiveSession()
    saver = tf.train.Saver()
    saver.restore(sess, args.model_path)

    input_tensors, outputs = gan.build_generator()

    h = h5py.File(args.caption_thought_vectors)
    caption_vectors = np.array(h['vectors'])
    caption_image_dic = {}
    for cn, caption_vector in enumerate(caption_vectors):

        caption_images = []
        z_noise = np.random.uniform(-1, 1, [args.n_images, args.z_dim])
        caption = [caption_vector[0:args.caption_vector_length]
                   ] * args.n_images

        [gen_image] = sess.run(
            [outputs['generator']],
            feed_dict={
                input_tensors['t_real_caption']: caption,
                input_tensors['t_z']: z_noise,
            })

        caption_images = [
            gen_image[i, :, :, :] for i in range(0, args.n_images)
        ]
        caption_image_dic[cn] = caption_images
        print("Generated", cn)

    for f in os.listdir(join(args.data_dir, 'val_samples')):
        if os.path.isfile(f):
            os.unlink(join(args.data_dir, 'val_samples/' + f))

    for cn in range(0, len(caption_vectors)):
        caption_images = []
        for i, im in enumerate(caption_image_dic[cn]):
            # im_name = "caption_{}_{}.jpg".format(cn, i)
            # scipy.misc.imsave( join(args.data_dir, 'val_samples/{}'.format(im_name)) , im)
            caption_images.append(im)
            caption_images.append(np.zeros((64, 5, 3)))
        combined_image = np.concatenate(caption_images[0:-1], axis=1)
        imageio.imwrite(
            join(args.data_dir,
                 'val_samples/combined_image_{}.jpg'.format(cn)),
            combined_image)
Esempio n. 18
0
def map_fn(index=None, flags=None):
    torch.set_default_tensor_type('torch.FloatTensor')
    torch.manual_seed(1234)

    train_data = dataset.DATA(config.TRAIN_DIR)

    if config.MULTI_CORE:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_data,
            num_replicas=xm.xrt_world_size(),
            rank=xm.get_ordinal(),
            shuffle=True)
    else:
        train_sampler = torch.utils.data.RandomSampler(train_data)

    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=flags['batch_size']
        if config.MULTI_CORE else config.BATCH_SIZE,
        sampler=train_sampler,
        num_workers=flags['num_workers'] if config.MULTI_CORE else 4,
        drop_last=True,
        pin_memory=True)

    if config.MULTI_CORE:
        DEVICE = xm.xla_device()
    else:
        DEVICE = config.DEVICE

    netG = model.colorization_model().double()
    netD = model.discriminator_model().double()

    VGG_modelF = torchvision.models.vgg16(pretrained=True).double()
    VGG_modelF.requires_grad_(False)

    netG = netG.to(DEVICE)
    netD = netD.to(DEVICE)

    VGG_modelF = VGG_modelF.to(DEVICE)

    optD = torch.optim.Adam(netD.parameters(), lr=2e-4, betas=(0.5, 0.999))
    optG = torch.optim.Adam(netG.parameters(), lr=2e-4, betas=(0.5, 0.999))

    ## Trains
    train_start = time.time()
    losses = {
        'G_losses': [],
        'D_losses': [],
        'EPOCH_G_losses': [],
        'EPOCH_D_losses': [],
        'G_losses_eval': []
    }

    netG, optG, netD, optD, epoch_checkpoint = utils.load_checkpoint(
        config.CHECKPOINT_DIR, netG, optG, netD, optD, DEVICE)
    netGAN = model.GAN(netG, netD)
    for epoch in range(
            epoch_checkpoint, flags['num_epochs'] +
            1 if config.MULTI_CORE else config.NUM_EPOCHS + 1):
        print('\n')
        print('#' * 8, f'EPOCH-{epoch}', '#' * 8)
        losses['EPOCH_G_losses'] = []
        losses['EPOCH_D_losses'] = []
        if config.MULTI_CORE:
            para_train_loader = pl.ParallelLoader(
                train_loader, [DEVICE]).per_device_loader(DEVICE)
            engine.train(para_train_loader,
                         netGAN,
                         netD,
                         VGG_modelF,
                         optG,
                         optD,
                         device=DEVICE,
                         losses=losses)
            elapsed_train_time = time.time() - train_start
            print("Process", index, "finished training. Train time was:",
                  elapsed_train_time)
        else:
            engine.train(train_loader,
                         netGAN,
                         netD,
                         VGG_modelF,
                         optG,
                         optD,
                         device=DEVICE,
                         losses=losses)
        #########################CHECKPOINTING#################################
        utils.create_checkpoint(epoch,
                                netG,
                                optG,
                                netD,
                                optD,
                                max_checkpoint=config.KEEP_CKPT,
                                save_path=config.CHECKPOINT_DIR)
        ########################################################################
        utils.plot_some(train_data, netG, DEVICE, epoch)
        gc.collect()
Esempio n. 19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--rnn_hidden',
                        type=int,
                        default=200,
                        help='Number of nodes in the rnn hidden layer')

    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument('--word_dim',
                        type=int,
                        default=256,
                        help='Word embedding matrix dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--batch_size', type=int, default=2, help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=32,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=20,
                        help='Caption Vector Length')

    parser.add_argument('--data_dir',
                        type=str,
                        default="../data",
                        help='Data Directory')

    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Momentum for Adam Update')

    parser.add_argument('--epochs',
                        type=int,
                        default=600,
                        help='Max number of epochs')

    parser.add_argument(
        '--save_every',
        type=int,
        default=30,
        help='Save Model/Samples every x iterations over batches')

    parser.add_argument('--resume_model',
                        type=str,
                        default=None,
                        help='Pre-Trained Model Path, to resume from')

    parser.add_argument('--data_set',
                        type=str,
                        default="flowers",
                        help='Dat set: MS-COCO, flowers')

    args = parser.parse_args()
    model_options = {
        'rnn_hidden': args.rnn_hidden,
        'word_dim': args.word_dim,
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    with tf.variable_scope(tf.get_variable_scope()) as scope:
        input_tensors, variables, loss, outputs, checks = gan.build_model()

    # with tf.variable_scope("Adam", reuse=False):
    # d_optim = tf.train.AdamOptimizer().minimize(loss['d_loss'], var_list=variables['d_vars'], name='Adam_d')
    # g_optim = tf.train.AdamOptimizer().minimize(loss['g_loss'], var_list=variables['g_vars'], name='Adam_g')
    d_optim = tf.train.AdamOptimizer(args.learning_rate,
                                     beta1=args.beta1).minimize(
                                         loss['d_loss'],
                                         var_list=variables['d_vars'],
                                         name='Adam_d')
    g_optim = tf.train.AdamOptimizer(args.learning_rate,
                                     beta1=args.beta1).minimize(
                                         loss['g_loss'],
                                         var_list=variables['g_vars'],
                                         name='Adam_g')

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    saver = tf.train.Saver()
    if args.resume_model:
        saver.restore(sess, args.resume_model)

    loaded_data = load_training_data(args.data_dir, args.data_set,
                                     args.caption_vector_length,
                                     args.image_size)

    for i in range(args.epochs):
        print("epoch" + str(i))
        batch_no = 0
        index4shuffle = [i for i in range(len(loaded_data))]
        random.shuffle(index4shuffle)

        while (batch_no + 1) * args.batch_size < len(loaded_data):
            real_images, wrong_images, caption_vectors, z_noise, image_files = get_training_batch(
                batch_no, args.batch_size, args.image_size, args.z_dim,
                args.caption_vector_length, 'train', args.data_dir,
                args.data_set,
                index4shuffle[batch_no * args.batch_size:(batch_no + 1) *
                              args.batch_size], loaded_data)
            print(caption_vectors)

            # DISCR UPDATE
            check_ts = [
                checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
            ]
            _, d_loss, gen, d1, d2, d3 = sess.run(
                [d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            print("d1", d1)
            print("d2", d2)
            print("d3", d3)
            print("D", d_loss)

            # GEN UPDATE
            _, g_loss, gen = sess.run(
                [g_optim, loss['g_loss'], outputs['generator']],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            # GEN UPDATE TWICE, to make sure d_loss does not go to 0
            _, g_loss, gen = sess.run(
                [g_optim, loss['g_loss'], outputs['generator']],
                feed_dict={
                    input_tensors['t_real_image']: real_images,
                    input_tensors['t_wrong_image']: wrong_images,
                    input_tensors['t_real_caption']: caption_vectors,
                    input_tensors['t_z']: z_noise,
                })

            # print("LOSSES")
            print("d_loss=" + str(d_loss) + ", g_loss=" + str(g_loss))
            batch_no += 1
            if (batch_no % args.save_every) == 0:
                print("Saving Images, Model")
                save_for_vis(args.data_dir, real_images, gen, image_files)
                save_path = saver.save(
                    sess, "../models/latest_model_{}_temp.ckpt".format(
                        args.data_set))
        if i % 5 == 0:
            save_path = saver.save(
                sess, "../models/model_after_{}_epoch_{}.ckpt".format(
                    args.data_set, i))
Esempio n. 20
0
def main():
	
	model_options = {
		'z_dim' : 100,
		't_dim' : 256,
		'batch_size' : 64,
		'image_size' : 64,
		'gf_dim' : 64,
		'df_dim' : 64,
		'gfc_dim' : 1024,
		'caption_vector_length' : 4800
	}
	beta1 = 0.5
	lr    = 2e-4
	z_dim = 100
	t_dim = 256
	batch_size = 64
	image_size = 64
	gfc_dim = 1024
	caption_vector_length = 4800
	epochs = 600
	
	
	gan = model.GAN(model_options)
	input_tensors, variables, loss, outputs, checks = gan.build_model()
	
	checkpoint_dir = "./DCModel"


	d_optim = tf.train.AdamOptimizer(lr*0.5, beta1 = beta1).minimize(loss['d_loss'], var_list=variables['d_vars'])
	g_optim = tf.train.AdamOptimizer(lr, beta1 = beta1).minimize(loss['g_loss'], var_list=variables['g_vars'])
	
	#sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
	with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
		tf.global_variables_initializer().run()
	
		saver = tf.train.Saver()
		ckpt = tf.train.get_checkpoint_state("Data/Models/")
		saver.restore(sess, ckpt.model_checkpoint_path)
		print("Model restored.")


		loaded_data = load_training_data()
		start_time = time.time()
		
		for i in range(epochs):
			#batch_no = 0
			for batch_no in range(loaded_data['data_length'] // batch_size):
			#while batch_no* < loaded_data['data_length']:
				real_images, wrong_images, caption_vectors, z_noise = get_training_batch(batch_no, batch_size, z_dim, loaded_data)
				
				# DISCR UPDATE
				check_ts = [ checks['d_loss1'] , checks['d_loss2'], checks['d_loss3']]
				_, d_loss, gen, d1, d2, d3 = sess.run([d_optim, loss['d_loss'], outputs['generator']] + check_ts,
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise
					})
				

				# GEN UPDATE
				

				# GEN UPDATE TWICE, to make sure d_loss does not go to 0
				_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})

				_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})

				_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})

				_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})


				_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})

				_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})

				_, g_loss, gen = sess.run([g_optim, loss['g_loss'], outputs['generator']],
					feed_dict = {
						input_tensors['t_real_image'] : real_images,
						input_tensors['t_wrong_image'] : wrong_images,
						input_tensors['t_real_caption'] : caption_vectors,
						input_tensors['t_z'] : z_noise,
					})

				
				
				#batch_no += 1
				#if batch_no == 171:
				#	continue
				if batch_no == 100:
					print "Saving Images, Model"
					#print "d1", d1
					#print "d2", d2
					#print "d3", d3
					#print "D", d_loss
					print "Epoch: [%2d], [%4d/%4d]  d_loss: %.8f, g_loss: %.8f, time: %4.4f" %(i, batch_no, len(loaded_data['image_list'])/ batch_size, d_loss, g_loss, 
					time.time() - start_time )
					save_for_vis(real_images, gen)
					saver.save(sess, checkpoint_dir)


			if i%3 == 0:
				save_path = saver.save(sess, "Data/Models/model_after_epoch_{}.ckpt".format(i))
Esempio n. 21
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--z_dim', type=int, default=100,
						help='Noise dimension')

	parser.add_argument('--t_dim', type=int, default=256,
						help='Text feature dimension')

	parser.add_argument('--batch_size', type=int, default=64,
						help='Batch Size')

	parser.add_argument('--image_size', type=int, default=128,
						help='Image Size a, a x a')

	parser.add_argument('--gf_dim', type=int, default=64,
						help='Number of conv in the first layer gen.')

	parser.add_argument('--df_dim', type=int, default=64,
						help='Number of conv in the first layer discr.')

	parser.add_argument('--caption_vector_length', type=int, default=4800,
						help='Caption Vector Length')

	parser.add_argument('--n_classes', type=int, default=102,
						help='Number of classes/class labels')

	parser.add_argument('--data_dir', type=str, default="Data",
						help='Data Directory')

	parser.add_argument('--learning_rate', type=float, default=0.0002,
						help='Learning Rate')

	parser.add_argument('--beta1', type=float, default=0.5,
						help='Momentum for Adam Update')

	parser.add_argument('--images_per_caption', type=int, default=30,
						help='The number of images that you want to generate '
	                         'per text description')

	parser.add_argument('--data_set', type=str, default="flowers",
						help='Dat set: MS-COCO, flowers')

	parser.add_argument('--checkpoints_dir', type=str, default="/tmp",
						help='Path to the checkpoints directory')


	args = parser.parse_args()

	datasets_root_dir = join(args.data_dir, 'datasets')

	loaded_data = load_training_data(datasets_root_dir, args.data_set,
									 args.caption_vector_length,
									 args.n_classes)
	model_options = {
		'z_dim': args.z_dim,
		't_dim': args.t_dim,
		'batch_size': args.batch_size,
		'image_size': args.image_size,
		'gf_dim': args.gf_dim,
		'df_dim': args.df_dim,
		'caption_vector_length': args.caption_vector_length,
		'n_classes': loaded_data['n_classes']
	}

	gan = model.GAN(model_options)
	input_tensors, variables, loss, outputs, checks = gan.build_model()

	sess = tf.InteractiveSession()
	tf.initialize_all_variables().run()

	#saver = tf.compat.v1.train.Saver(max_to_keep=1)
	print('Trying to resume model from ' +
		  str(tf.train.latest_checkpoint(args.checkpoints_dir)))
	if tf.train.latest_checkpoint(args.checkpoints_dir) is not None:
		#saver.restore(sess, tf.train.latest_checkpoint(args.checkpoints_dir))
		print('Successfully loaded model from ')
	else:
		print('Could not load checkpoints. Please provide a valid path to'
		      ' your checkpoints directory')
		exit()

	print('Starting to generate images from text descriptions.')
	for sel_i, text_cap in enumerate(loaded_data['text_caps']['features']):

		print('Text idx: {}\nRaw Text: {}\n'.format(sel_i, text_cap))
		captions_1, image_files_1, image_caps_1, image_ids_1,\
		image_caps_ids_1 = get_caption_batch(loaded_data, datasets_root_dir,
                         dataset=args.data_set, batch_size=args.batch_size)

		captions_1[args.batch_size-1, :] = text_cap

		for z_i in range(args.images_per_caption):
			z_noise = np.random.uniform(-1, 1, [args.batch_size, args.z_dim])
			val_feed = {
				input_tensors['t_real_caption'].name: captions_1,
				input_tensors['t_z'].name: z_noise,
				input_tensors['t_training'].name: True
			}

			val_gen = sess.run(
				[outputs['generator']],
				feed_dict=val_feed)
			dump_dir = os.path.join(args.data_dir,
			                        'images_generated_from_text')
			save_distributed_image_batch(dump_dir, val_gen, sel_i, z_i,
			                             args.batch_size)
	print('Finished generating images from text description')
Esempio n. 22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')
    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')
    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Batch Size')
    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size a, a x a')
    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')
    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')
    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for fully connected layer')
    parser.add_argument('--caption_vector_length',
                        '-cvl',
                        type=int,
                        default=600,
                        help='Caption Vector Length')
    parser.add_argument('--method_dir',
                        '-md',
                        type=str,
                        default='',
                        help='method directory')
    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Momentum for Adam Update')
    parser.add_argument('--epochs',
                        type=int,
                        default=600,
                        help='Max number of epochs')
    parser.add_argument(
        '--save_every',
        type=int,
        default=30,
        help='Save Model/Samples every x iterations over batches')
    parser.add_argument('--resume_model',
                        type=str,
                        default=None,
                        help='Pre-Trained Model Path, to resume from')
    parser.add_argument('--data_set',
                        type=str,
                        default='faces',
                        help='data set: faces')
    parser.add_argument('--imgs_dir',
                        type=str,
                        default='imgs',
                        help='images directory')
    parser.add_argument('--caption_vectors',
                        type=str,
                        default='caption_vectors.hdf5',
                        help='encoded training caption')
    parser.add_argument('--dis_updates',
                        '-du',
                        type=int,
                        default=1,
                        help='discriminator update per round')
    parser.add_argument('--gen_updates',
                        '-gu',
                        type=int,
                        default=2,
                        help='generator update per round')
    args = parser.parse_args()
    if args.method_dir == '':
        print('need to specify method_dir!')
        exit(1)
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    input_tensors, variables, loss, outputs, checks = gan.build_model()
    with tf.variable_scope(tf.get_variable_scope(), reuse=False):
        d_optim =\
          tf.train.AdamOptimizer(\
            args.learning_rate, beta1 = args.beta1
          ).minimize(loss['d_loss'], var_list=variables['d_vars'])
        g_optim =\
          tf.train.AdamOptimizer(\
            args.learning_rate, beta1 = args.beta1
          ).minimize(loss['g_loss'], var_list=variables['g_vars'])

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    init = tf.global_variables_initializer()
    sess.run(init)

    saver = tf.train.Saver(max_to_keep=None)
    if args.resume_model:
        saver.restore(sess, args.resume_model)

    loaded_data = load_training_data(args.data_set, args.method_dir,
                                     args.imgs_dir, args.caption_vectors)

    for i in range(1, args.epochs + 1):
        batch_no = 0
        while batch_no * args.batch_size < loaded_data['data_length']:
            real_images, wrong_images, caption_vectors, z_noise, image_files =\
              get_training_batch(batch_no, args.batch_size, args.image_size,
                                 args.z_dim, args.caption_vector_length, 'train',
                                 args.method_dir, args.imgs_dir, args.data_set,
                                 loaded_data)

            # DISCR UPDATE
            for j in range(args.dis_updates):
                check_ts = [
                    checks['d_loss1'], checks['d_loss2'], checks['d_loss3']
                ]
                _, d_loss, gen, d1, d2, d3 =\
                  sess.run([d_optim, loss['d_loss'], outputs['generator']] + check_ts,
                           feed_dict = {
                             input_tensors['t_real_image'] : real_images,
                             input_tensors['t_wrong_image'] : wrong_images,
                             input_tensors['t_real_caption'] : caption_vectors,
                             input_tensors['t_z'] : z_noise,
                           })

            print('d1 = {:5f} d2 = {:5f} d3 = {:5f} '
                  'D = {:5f}'.format(d1, d2, d3, d_loss))

            # GEN UPDATE
            for j in range(args.gen_updates):
                _, g_loss, gen =\
                  sess.run([g_optim, loss['g_loss'], outputs['generator']],
                           feed_dict = {
                             input_tensors['t_real_image'] : real_images,
                             input_tensors['t_wrong_image'] : wrong_images,
                             input_tensors['t_real_caption'] : caption_vectors,
                             input_tensors['t_z'] : z_noise,
                           })

            print('d_loss = {:5f} g_loss = {:5f} batch_no = {} '
                  'epochs = {}'.format(d_loss, g_loss, batch_no, i))
            print('-' * 60)
            batch_no += 1
            if (batch_no % args.save_every) == 0:
                save_for_vis(args.data_set, args.method_dir, real_images, gen,
                             image_files)
                save_path =\
                  saver.save(sess, join(args.data_set, args.method_dir, 'Models',
                                        'latest_model_'
                                        '{}_temp.ckpt'.format(args.data_set)))
        if i % 50 == 0:
            save_path =\
              saver.save(sess, join(args.data_set,
                                    args.method_dir, 'Models', 'model_after_'
                                    '{}_epoch_{}.ckpt'.format(args.data_set, i)))
Esempio n. 23
0
def train(args):
    height, width, channel = 28, 28, 1
    batch_size = args.batch_size
    z_size = args.nd  # 噪声维数
    real_img = tf.placeholder(tf.float32, [batch_size, height, width, channel],
                              name='img')
    z = tf.placeholder(tf.float32, [batch_size, z_size], name='z')
    label = tf.placeholder(tf.float32, [batch_size, 10], name='label')  # 0~9

    gan = model.GAN(height, width, channel)
    gan.set_batch_size(batch_size)
    fake_img = gan.generator(z, label)
    real_result = gan.discriminator(real_img, label, reuse=False)
    fake_result = gan.discriminator(fake_img, label, reuse=True)
    real = tf.reduce_sum(label * real_result, 1)
    fake = tf.reduce_sum(label * fake_result, 1)
    d_loss = -tf.reduce_mean(tf.log(real) + tf.log(1. - fake))
    g_loss = -tf.reduce_mean(tf.log(fake))

    t_vars = tf.trainable_variables()
    d_vars = [var for var in t_vars if 'dis' in var.name]
    g_vars = [var for var in t_vars if 'gen' in var.name]
    d_optimizer = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5) \
              .minimize(d_loss, var_list=d_vars)
    g_optimizer = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5) \
              .minimize(g_loss, var_list=g_vars)

    data = DataProvider()
    train_num = data.get_train_num()
    batch_num = int(train_num / args.batch_size)

    saver = tf.train.Saver(max_to_keep=1)
    model_dir = args.model_dir
    if (not os.path.exists(model_dir)):
        os.mkdir(model_dir)

    accuracy_real = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(label, 1), tf.argmax(real_result, 1)),
                'float'))
    accuracy_fake = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(label, 1), tf.argmax(fake_result, 1)),
                'float'))

    with tf.Session() as sess:
        counter = 0
        sess.run(tf.global_variables_initializer())
        for epoch in range(args.epoch):
            for batch in range(batch_num):
                counter += 1
                train_data, label_data = data.next_batch(batch_size)
                batch_z = np.random.normal(0, 1, [batch_size, z_size]).astype(
                    np.float_)

                sess.run(d_optimizer,
                         feed_dict={
                             real_img: train_data,
                             z: batch_z,
                             label: label_data
                         })
                sess.run(g_optimizer,
                         feed_dict={
                             z: batch_z,
                             label: label_data
                         })

                if (counter % 20 == 0):
                    dloss, gloss, ac_real, ac_fake = sess.run(
                        [d_loss, g_loss, accuracy_real, accuracy_fake],
                        feed_dict={
                            real_img: train_data,
                            z: batch_z,
                            label: label_data
                        })
                    print('iter:', counter, 'd_loss:', dloss, 'g_loss:', gloss,
                          'ac_real:', ac_real, 'ac_fake:', ac_fake)
                if (counter % 200 == 0):
                    saver.save(sess, os.path.join(model_dir, 'model'))
Esempio n. 24
0
def init_random(shape):
    return np.random.random_sample(shape)


def next_batch(x, y, batch_size=BATCH_SIZE):
    i = 0
    while (i < len(x)):
        yield x[i:i + batch_size], y[i:i + batch_size]
        i = i + batch_size


if __name__ == '__main__':

    tmp_buff = open('tmp.out', 'a')

    gan = model.GAN()
    images, labels = load_data.load_SVHN()

    label_ = tf.placeholder(tf.float32, [None, 2])
    # sess = tf.Session()

    train_step = tf.train.AdamOptimizer(1e-4).minimize(
        gan.gan.loss(logit=gan.gan.h_fc8, label=label_))
    correct_prediction = tf.equal(tf.argmax(label_, 1),
                                  tf.argmax(gan.gan.h_fc8, 1))

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    for step in range(EPOCH_SIZE):
        if step % 10 == 0:
Esempio n. 25
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim', type=int, default=100,
                        help='Noise dimension')

    parser.add_argument('--t_dim', type=int, default=256,
                        help='Text feature dimension')

    parser.add_argument('--batch_size', type=int, default=64,
                        help='Batch Size')

    parser.add_argument('--image_size', type=int, default=128,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim', type=int, default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim', type=int, default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument('--caption_vector_length', type=int, default=4800,
                        help='Caption Vector Length')

    parser.add_argument('--n_classes', type=int, default=102,
                        help='Number of classes/class labels')

    parser.add_argument('--data_dir', type=str, default="Data",
                        help='Data Directory')

    parser.add_argument('--learning_rate', type=float, default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1', type=float, default=0.5,
                        help='Momentum for Adam Update')

    parser.add_argument('--data_set', type=str, default="flowers",
                        help='Dat set: flowers')

    parser.add_argument('--output_dir', type=str,
                        default="Data/synthetic_dataset",
                        help='The directory in which this dataset will be '
                             'created')

    parser.add_argument('--checkpoints_dir', type=str, default="/tmp",
                        help='Path to the checkpoints directory')

    parser.add_argument('--n_interp', type=int, default=100,
                        help='The difference between each interpolation. '
                             'Should ideally be a multiple of 10')

    parser.add_argument('--n_images', type=int, default=500,
                        help='Number of images to randomply sample for '
                             'generating interpolation results')

    args = parser.parse_args()
    datasets_root_dir = join(args.data_dir, 'datasets')

    loaded_data = load_training_data(datasets_root_dir, args.data_set,
                         args.caption_vector_length, args.n_classes)

    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'caption_vector_length': args.caption_vector_length,
        'n_classes': loaded_data['n_classes']
    }

    gan = model.GAN(model_options)
    input_tensors, variables, loss, outputs, checks = gan.build_model()

    sess = tf.InteractiveSession()
    tf.initialize_all_variables().run()

    saver = tf.train.Saver(max_to_keep=10000)
    print('resuming model from checkpoint' +
          str(tf.train.latest_checkpoint(args.checkpoints_dir)))
    if tf.train.latest_checkpoint(args.checkpoints_dir) is not None:
        saver.restore(sess, tf.train.latest_checkpoint(args.checkpoints_dir))
        print('Successfully loaded model')
    else:
        print('Could not load checkpoints')
        exit()

    random.shuffle(loaded_data['image_list'])
    selected_images = loaded_data['image_list'][:args.n_images]
    cap_id = [0]#[np.random.randint(0, 4) for cap_i in range(len(selected_images))]

    print('Generating Images by interpolating z')
    bar = progressbar.ProgressBar(redirect_stdout=True,
                                  max_value=args.n_images)
    for sel_i, (sel_img, sel_cap) in enumerate(zip(selected_images, cap_id)):
        captions, image_files, image_caps, image_ids, image_caps_ids = \
            get_images_z_intr(sel_img, sel_cap, loaded_data,
                              datasets_root_dir, args.data_set, args.batch_size)

        z_noise_1 = np.full((args.batch_size, args.z_dim), -1.0)
        z_noise_2 = np.full((args.batch_size, args.z_dim), 1.0)
        intr_z_list = get_interp_vec(z_noise_1, z_noise_2, args.z_dim,
                                     args.n_interp, args.batch_size)

        for z_i, z_noise in enumerate(intr_z_list):
            val_feed = {
                input_tensors['t_real_caption'].name: captions,
                input_tensors['t_z'].name: z_noise,
                input_tensors['t_training'].name: True
            }

            val_gen = sess.run([outputs['generator']], feed_dict=val_feed)

            save_distributed_image_batch(args.output_dir, val_gen, sel_i, z_i,
                                         sel_img, sel_cap, args.batch_size)
        bar.update(sel_i)
    bar.finish()
    print('Finished generating interpolated images')
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Batch Size')

    parser.add_argument('--image_size',
                        type=int,
                        default=128,
                        help='Image Size a, a x a')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=4800,
                        help='Caption Vector Length')

    parser.add_argument('--n_classes',
                        type=int,
                        default=102,
                        help='Number of classes/class labels')

    parser.add_argument('--data_dir',
                        type=str,
                        default="Data",
                        help='Data Directory')

    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.0002,
                        help='Learning Rate')

    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Momentum for Adam Update')

    parser.add_argument('--epochs',
                        type=int,
                        default=200,
                        help='Max number of epochs')

    parser.add_argument('--data_set',
                        type=str,
                        default="flowers",
                        help='Dat set: flowers')

    parser.add_argument('--output_dir',
                        type=str,
                        default="Data/ds",
                        help='The directory in which this dataset will be '
                        'created')

    parser.add_argument('--checkpoints_dir',
                        type=str,
                        default="/tmp",
                        help='Path to the checkpoints directory')

    args = parser.parse_args()

    model_stage_1_ds_tr, model_stage_1_ds_val, datasets_root_dir = \
                 prepare_dirs(args)

    loaded_data = load_training_data(datasets_root_dir, args.data_set,
                                     args.caption_vector_length,
                                     args.n_classes)

    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.batch_size,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'caption_vector_length': args.caption_vector_length,
        'n_classes': loaded_data['n_classes']
    }

    gan = model.GAN(model_options)
    input_tensors, variables, loss, outputs, checks = gan.build_model()

    sess = tf.InteractiveSession()
    tf.initialize_all_variables().run()

    saver = tf.train.Saver(max_to_keep=10000)
    print('resuming model from checkpoint' +
          str(tf.train.latest_checkpoint(args.checkpoints_dir)))
    if tf.train.latest_checkpoint(args.checkpoints_dir) is not None:
        saver.restore(sess, tf.train.latest_checkpoint(args.checkpoints_dir))
        print('Successfully loaded model from ')
    else:
        print('Could not load checkpoints')
        exit()

    print('Generating images for the captions in the training set at ' +
          model_stage_1_ds_tr)
    for i in range(args.epochs):
        batch_no = 0
        while batch_no * args.batch_size + args.batch_size < \
          loaded_data['data_length']:

            real_images, wrong_images, caption_vectors, z_noise, image_files, \
            real_classes, wrong_classes, image_caps, image_ids, \
            image_caps_ids = get_training_batch(batch_no, args.batch_size,
              args.image_size, args.z_dim, datasets_root_dir,
              args.data_set, loaded_data)

            feed = {
                input_tensors['t_real_image'].name: real_images,
                input_tensors['t_wrong_image'].name: wrong_images,
                input_tensors['t_real_caption'].name: caption_vectors,
                input_tensors['t_z'].name: z_noise,
                input_tensors['t_real_classes'].name: real_classes,
                input_tensors['t_wrong_classes'].name: wrong_classes,
                input_tensors['t_training'].name: True
            }

            g_loss, gen = sess.run([loss['g_loss'], outputs['generator']],
                                   feed_dict=feed)

            print("LOSSES", g_loss, batch_no, i,
                  len(loaded_data['image_list']) / args.batch_size)
            batch_no += 1
            save_distributed_image_batch(model_stage_1_ds_tr, gen, image_caps,
                                         image_ids, image_caps_ids)

    print('Finished generating images for the training set captions.\n\n')
    print('Generating images for the captions in the validation set at ' +
          model_stage_1_ds_val)
    for i in range(args.epochs):
        batch_no = 0
        while batch_no * args.batch_size + args.batch_size < \
          loaded_data['val_data_len']:

            val_captions, val_image_files, val_image_caps, val_image_ids, \
            val_image_caps_ids, val_z_noise = get_val_caps_batch(batch_no,
                    args.batch_size, args.z_dim, loaded_data, args.data_set,
                             datasets_root_dir)

            val_feed = {
                input_tensors['t_real_caption'].name: val_captions,
                input_tensors['t_z'].name: val_z_noise,
                input_tensors['t_training'].name: True
            }

            val_gen, val_attn_spn = sess.run(
                [outputs['generator'], checks['attn_span']],
                feed_dict=val_feed)

            print("LOSSES", batch_no, i,
                  len(loaded_data['val_img_list']) / args.batch_size)
            batch_no += 1
            save_distributed_image_batch(model_stage_1_ds_val, val_gen,
                                         val_image_caps, val_image_ids,
                                         val_image_caps_ids, val_attn_spn)
    print('Finished generating images for the validation set captions.\n\n')
Esempio n. 27
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise Dimension')

    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')

    parser.add_argument('--image_size',
                        type=int,
                        default=64,
                        help='Image Size')

    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')

    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')

    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')

    parser.add_argument('--caption_vector_length',
                        type=int,
                        default=4800,
                        help='Caption Vector Length')

    #parser.add_argument('--data_dir', type=str, default="Data",
    #				   help='Data Directory') old code

    parser.add_argument('--data_dir',
                        type=str,
                        default="/media/ssd_working_space/osaid/Data",
                        help='Data Directory')

    #parser.add_argument('--model_path', type=str, default='Data/Models/latest_model_flowers_temp.ckpt',help='Trained Model Path') old code

    parser.add_argument(
        '--model_path',
        type=str,
        default=
        '/media/ssd_working_space/osaid/Data/Models/latest_model_face_temp.ckpt',
        help='Trained Model Path')

    parser.add_argument('--n_images',
                        type=int,
                        default=1,
                        help='Number of Images per Caption')

    #parser.add_argument('--caption_thought_vectors', type=str, default='Data/sample_caption_vectors.hdf5', help='Caption Thought Vector File') old code

    parser.add_argument(
        '--caption_thought_vectors',
        type=str,
        default=
        '/media/ssd_working_space/osaid/Data/sample_caption_vectors.hdf5',
        help='Caption Thought Vector File')

    args = parser.parse_args()
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.n_images,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    _, _, _, _, _ = gan.build_model()
    sess = tf.InteractiveSession()
    saver = tf.train.Saver()
    saver.restore(sess, args.model_path)

    input_tensors, outputs = gan.build_generator()

    # h = h5py.File('/media/ssd_working_space/osaid/Data/flower_tv.hdf5')
    # image_name = [l for l in h]
    # caption_vectors = np.array([h[l][0] for l in h])
    test_encoding = pickle.load(
        open('/media/ssd_working_space/osaid/celebA/train/encoding', 'rb'))
    image_name = [key for key in test_encoding.keys()]
    caption_vectors = [
        test_encoding[key].reshape(-1) for key in test_encoding.keys()
    ]
    caption_image_dic = {}
    for cn, caption_vector in enumerate(caption_vectors):

        caption_images = []
        z_noise = np.random.uniform(-1, 1, [args.n_images, args.z_dim])
        caption = [caption_vector[0:args.caption_vector_length]
                   ] * args.n_images

        [gen_image] = sess.run(
            [outputs['generator']],
            feed_dict={
                input_tensors['t_real_caption']: caption,
                input_tensors['t_z']: z_noise,
            })

        caption_images = [
            gen_image[i, :, :, :] for i in range(0, args.n_images)
        ]
        caption_image_dic[cn] = caption_images
        print("Generated", cn)

    #for f in os.listdir( join(args.data_dir, 'val_samples')):
    #if os.path.isfile(f):
    #os.unlink(join(args.data_dir, 'val_samples/' + f))

    for cn in range(0, len(caption_vectors)):
        caption_images = []
        # for i, im in enumerate( caption_image_dic[ cn ] ):
        # im_name = "caption_{}_{}.jpg".format(cn, i)
        # scipy.misc.imsave( join(args.data_dir, 'val_samples/{}'.format(im_name)) , im)
        # caption_images.append( im )
        # caption_images.append( np.zeros((64, 5, 3)) )
        # combined_image = np.concatenate( caption_images[0:-1], axis = 1 )
        # if cn < 6000:
        # 	#scipy.misc.imsave( join(args.data_dir, 'sr_train_lr/image_{}.jpg'.format(cn)) , caption_image_dic[ cn ][0])
        # 	scipy.misc.imsave( '/media/ssd_working_space/osaid/Data/sr_train_lr/'+image_name[cn] , caption_image_dic[ cn ][0])
        # else:
        #scipy.misc.imsave( join(args.data_dir, 'sr_test_lr/image_{}.jpg'.format(cn)) , caption_image_dic[ cn ][0])
        scipy.misc.imsave(
            '/media/ssd_working_space/osaid/Data/infered_train_images/' +
            image_name[cn], caption_image_dic[cn][0])
Esempio n. 28
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--z_dim', type=int, default=100,
						help='Noise dimension')

	parser.add_argument('--t_dim', type=int, default=256,
						help='Text feature dimension')

	parser.add_argument('--batch_size', type=int, default=64,
						help='Batch Size')

	parser.add_argument('--image_size', type=int, default=128,
						help='Image Size a, a x a')

	parser.add_argument('--gf_dim', type=int, default=64,
						help='Number of conv in the first layer gen.')

	parser.add_argument('--df_dim', type=int, default=64,
						help='Number of conv in the first layer discr.')

	parser.add_argument('--caption_vector_length', type=int, default=4800,
						help='Caption Vector Length')

	parser.add_argument('--n_classes', type = int, default = 102,
	                    help = 'Number of classes/class labels')

	parser.add_argument('--data_dir', type=str, default="Data",
						help='Data Directory')

	parser.add_argument('--learning_rate', type=float, default=0.0002,
						help='Learning Rate')

	parser.add_argument('--beta1', type=float, default=0.5,
						help='Momentum for Adam Update')

	parser.add_argument('--epochs', type=int, default=200,
						help='Max number of epochs')

	parser.add_argument('--save_every', type=int, default=30,
						help='Save Model/Samples every x iterations over '
							 'batches')

	parser.add_argument('--resume_model', type=bool, default=False,
						help='Pre-Trained Model load or not')

	parser.add_argument('--data_set', type=str, default="flowers",
						help='Dat set: MS-COCO, flowers')

	parser.add_argument('--model_name', type=str, default="TAC_GAN",
						help='model_1 or model_2')

	parser.add_argument('--train', type = bool, default = True,
	                    help = 'True while training and otherwise')

	args = parser.parse_args()

	model_dir, model_chkpnts_dir, model_samples_dir, model_val_samples_dir,\
							model_summaries_dir = initialize_directories(args)

	datasets_root_dir = join(args.data_dir, 'datasets')
	loaded_data = load_training_data(datasets_root_dir, args.data_set,
	                                 args.caption_vector_length,
	                                 args.n_classes)
	model_options = {
		'z_dim': args.z_dim,
		't_dim': args.t_dim,
		'batch_size': args.batch_size,
		'image_size': args.image_size,
		'gf_dim': args.gf_dim,
		'df_dim': args.df_dim,
		'caption_vector_length': args.caption_vector_length,
		'n_classes': loaded_data['n_classes']
	}

	# Initialize and build the GAN model
	gan = model.GAN(model_options)
	input_tensors, variables, loss, outputs, checks = gan.build_model()

	d_optim = tf.train.AdamOptimizer(args.learning_rate,
									 beta1=args.beta1).minimize(loss['d_loss'],
											var_list=variables['d_vars'])
	g_optim = tf.train.AdamOptimizer(args.learning_rate,
									 beta1=args.beta1).minimize(loss['g_loss'],
											var_list=variables['g_vars'])

	global_step_tensor = tf.Variable(1, trainable=False, name='global_step')
	merged = tf.summary.merge_all()
	sess = tf.InteractiveSession()

	summary_writer = tf.summary.FileWriter(model_summaries_dir, sess.graph)

	tf.global_variables_initializer().run()
	saver = tf.train.Saver(max_to_keep=10000)

	if args.resume_model:
		print('Trying to resume training from a previous checkpoint' +
		      str(tf.train.latest_checkpoint(model_chkpnts_dir)))
		if tf.train.latest_checkpoint(model_chkpnts_dir) is not None:
			saver.restore(sess, tf.train.latest_checkpoint(model_chkpnts_dir))
			print('Successfully loaded model. Resuming training.')
		else:
			print('Could not load checkpoints.  Training a new model')
	global_step = global_step_tensor.eval()
	gs_assign_op = global_step_tensor.assign(global_step)
	for i in range(args.epochs):
		batch_no = 0
		while batch_no * args.batch_size + args.batch_size < \
				loaded_data['data_length']:

			real_images, wrong_images, caption_vectors, z_noise, image_files, \
			real_classes, wrong_classes, image_caps, image_ids = \
							   get_training_batch(batch_no, args.batch_size,
	                                              args.image_size, args.z_dim,
	                                              'train', datasets_root_dir,
	                                              args.data_set, loaded_data)

			# DISCR UPDATE
			check_ts = [checks['d_loss1'], checks['d_loss2'],
			            checks['d_loss3'], checks['d_loss1_1'],
			            checks['d_loss2_1']]

			feed = {
				input_tensors['t_real_image'].name : real_images,
				input_tensors['t_wrong_image'].name : wrong_images,
				input_tensors['t_real_caption'].name : caption_vectors,
				input_tensors['t_z'].name : z_noise,
				input_tensors['t_real_classes'].name : real_classes,
				input_tensors['t_wrong_classes'].name : wrong_classes,
				input_tensors['t_training'].name : args.train
			}

			_, d_loss, gen, d1, d2, d3, d4, d5= sess.run([d_optim,
                        loss['d_loss'],outputs['generator']] + check_ts,
                        feed_dict=feed)

			print("D total loss: {}\n"
			      "D loss-1 [Real/Fake loss for real images] : {} \n"
			      "D loss-2 [Real/Fake loss for wrong images]: {} \n"
			      "D loss-3 [Real/Fake loss for fake images]: {} \n"
			      "D loss-4 [Aux Classifier loss for real images]: {} \n"
			      "D loss-5 [Aux Classifier loss for wrong images]: {}"
			      " ".format(d_loss, d1, d2, d3, d4, d5))

			# GEN UPDATE
			_, g_loss, gen = sess.run([g_optim, loss['g_loss'],
                                       outputs['generator']], feed_dict=feed)

			# GEN UPDATE TWICE
			_, summary, g_loss, gen, g1, g2 = sess.run([g_optim, merged,
                   loss['g_loss'], outputs['generator'], checks['g_loss_1'],
                   checks['g_loss_2']], feed_dict=feed)
			summary_writer.add_summary(summary, global_step)
			print("\n\nLOSSES\nDiscriminator Loss: {}\nGenerator Loss: {"
                  "}\nBatch Number: {}\nEpoch: {},\nTotal Batches per "
                  "epoch: {}\n".format( d_loss, g_loss, batch_no, i,
                    int(len(loaded_data['image_list']) / args.batch_size)))
			print("\nG loss-1 [Real/Fake loss for fake images] : {} \n"
			      "G loss-2 [Aux Classifier loss for fake images]: {} \n"
			      " ".format(g1, g2))
			global_step += 1
			sess.run(gs_assign_op)
			batch_no += 1
			if (i % args.save_every) == 0 and i != 0:
			# if (batch_no % args.save_every) == 0 and batch_no != 0:
				print("Saving Images and the Model\n\n")

				save_for_vis(model_samples_dir, real_images, gen, image_files,
				             image_caps, image_ids)
				save_path = saver.save(sess,
                                       join(model_chkpnts_dir,
				                            "latest_model_{}_temp.ckpt".format(
										        args.data_set)))

				# Getting a batch for validation
				val_captions, val_image_files, val_image_caps, val_image_ids = \
                          get_val_caps_batch(args.batch_size, loaded_data,
                                             args.data_set, datasets_root_dir)

				shutil.rmtree(model_val_samples_dir)
				os.makedirs(model_val_samples_dir)

				for val_viz_cnt in range(0, 4):
					val_z_noise = np.random.uniform(-1, 1, [args.batch_size,
					                                        args.z_dim])

					val_feed = {
						input_tensors['t_real_caption'].name : val_captions,
						input_tensors['t_z'].name : val_z_noise,
						input_tensors['t_training'].name : True
					}

					val_gen = sess.run([outputs['generator']],
					                   feed_dict=val_feed)
					save_for_viz_val(model_val_samples_dir, val_gen,
					                 val_image_files, val_image_caps,
									 val_image_ids, args.image_size,
									 val_viz_cnt)

		# Save the model after save_every epoch
		if i % args.save_every == 0 and i != 0:
			epoch_dir = os.path.join(model_chkpnts_dir, str(i))
			print("saving epoch %s" % epoch_dir)
			if not os.path.exists(epoch_dir):
				os.makedirs(epoch_dir)

			checkpoint_path = os.path.join(epoch_dir, "model_after_{}_epoch_{}.ckpt".format(args.data_set, i))
			print("save model to %s" % checkpoint_path)
			save_path = saver.save(sess, checkpoint_path)
			print("saved to %s" % save_path)

			val_captions, val_image_files, val_image_caps, val_image_ids = \
				  get_val_caps_batch(args.batch_size, loaded_data,
				                     args.data_set, datasets_root_dir)

			shutil.rmtree(model_val_samples_dir)
			os.makedirs(model_val_samples_dir)

			for val_viz_cnt in range(0, 10):
				val_z_noise = np.random.uniform(-1, 1, [args.batch_size,
				                                        args.z_dim])
				val_feed = {
					input_tensors['t_real_caption'].name : val_captions,
					input_tensors['t_z'].name : val_z_noise,
					input_tensors['t_training'].name : True
				}
				val_gen = sess.run([outputs['generator']], feed_dict=val_feed)
				save_for_viz_val(model_val_samples_dir, val_gen,
				                 val_image_files, val_image_caps,
								 val_image_ids, args.image_size,
								 val_viz_cnt)
Esempio n. 29
0
# create required directory
required_dirs = ["param", "result", "mnist"]
hoge.make_dir(required_dirs)

mnist_data = MNIST('./mnist/',
                   train=True,
                   download=True,
                   transform=transforms.ToTensor())
dataloader = DataLoader(mnist_data, batch_size=mini_batch_num, shuffle=True)

print("\n")
# train restart
if interrupt_flag:
    f = open("./param/tmp.pickle", mode="rb")
    init_epoch = pickle.load(f)
    model = model.GAN(dataloader, interrupting=True)
else:
    init_epoch = 1
    model = model.GAN(dataloader)
del dataloader

for epoch in range(init_epoch, epochs + 1):
    print("Epoch[%d/%d]:" % (epoch, epochs))
    model.study(epoch)
    model.evaluate()
    model.save_tmp_weight(epoch)
    model.eval_pic(epoch)
    model.output(epoch)
#model.output()
Esempio n. 30
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--z_dim',
                        type=int,
                        default=100,
                        help='Noise Dimension')
    parser.add_argument('--t_dim',
                        type=int,
                        default=256,
                        help='Text feature dimension')
    parser.add_argument('--image_size',
                        '-is',
                        type=int,
                        default=64,
                        help='Image Size')
    parser.add_argument('--gf_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer gen.')
    parser.add_argument('--df_dim',
                        type=int,
                        default=64,
                        help='Number of conv in the first layer discr.')
    parser.add_argument(
        '--gfc_dim',
        type=int,
        default=1024,
        help='Dimension of gen untis for for fully connected layer 1024')
    parser.add_argument('--caption_vector_length',
                        '-cvl',
                        type=int,
                        default=2400,
                        help='Caption Vector Length')
    parser.add_argument('--data_set',
                        '-ds',
                        type=str,
                        default='faces',
                        help='data directory')
    parser.add_argument('--method_dir',
                        '-md',
                        type=str,
                        default='',
                        help='method directory')
    parser.add_argument('--model_path',
                        '-mp',
                        type=str,
                        default='latest_model_faces_temp.ckpt',
                        help='Trained Model Path')
    parser.add_argument('--n_images',
                        '-ni',
                        type=int,
                        default=5,
                        help='Number of Images per Caption')
    parser.add_argument('--caption_vectors',
                        '-cv',
                        type=str,
                        default='test_caption_vectors.hdf5',
                        help='Caption Thought Vector File')
    parser.add_argument('--out_dir',
                        '-od',
                        type=str,
                        default='samples',
                        help='output directory')

    args = parser.parse_args()
    model_options = {
        'z_dim': args.z_dim,
        't_dim': args.t_dim,
        'batch_size': args.n_images,
        'image_size': args.image_size,
        'gf_dim': args.gf_dim,
        'df_dim': args.df_dim,
        'gfc_dim': args.gfc_dim,
        'caption_vector_length': args.caption_vector_length
    }

    gan = model.GAN(model_options)
    _, _, _, _, _ = gan.build_model()
    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(
        sess, join(args.data_set, args.method_dir, 'Models', args.model_path))

    input_tensors, outputs = gan.build_generator()

    h = h5py.File(join(args.data_set, args.method_dir, args.caption_vectors))
    caption_image_dic = {}

    for i, key in enumerate(h):
        caption_images = []
        z_noise = np.random.uniform(-1, 1, [args.n_images, args.z_dim])
        caption = np.array([
            h[key][0, :args.caption_vector_length]
            for i in range(args.n_images)
        ])

        [gen_image] =\
          sess.run([outputs['generator']],
                   feed_dict = {input_tensors['t_real_caption'] : caption,
                                input_tensors['t_z'] : z_noise} )

        caption_image_dic[key] =\
          [gen_image[i, :, :, :] for i in range(0, args.n_images)]

    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)
    for key in h:
        for i, im in enumerate(caption_image_dic[key]):
            scipy.misc.imsave(
                join(args.out_dir, 'sample_' + key + '_' + str(i) + '.jpg'),
                im)