def train_cycle_gan(data_root, semi_supervised=False):
    opt = get_opts()

    ensure_dir(models_prefix)
    ensure_dir(images_prefix)

    cycle_gan = CycleGAN(device,
                         models_prefix,
                         opt["lr"],
                         opt["b1"],
                         train=True,
                         semi_supervised=semi_supervised)
    data = DataLoader(data_root=data_root,
                      image_size=(opt['img_height'], opt['img_width']),
                      batch_size=opt['batch_size'])

    total_images = len(data.names)
    print("Total Training Images", total_images)

    total_batches = int(ceil(total_images / opt['batch_size']))

    for epoch in range(cycle_gan.epoch_tracker.epoch, opt['n_epochs']):
        for iteration in range(total_batches):

            if (epoch == cycle_gan.epoch_tracker.epoch
                    and iteration < cycle_gan.epoch_tracker.iter):
                continue

            y, x = next(data.data_generator(iteration))

            real_A = Variable(x.type(Tensor))
            real_B = Variable(y.type(Tensor))

            cycle_gan.set_input(real_A, real_B)
            cycle_gan.train()

            message = (
                "\r[Epoch {}/{}] [Batch {}/{}] [DA:{}, DB:{}] [GA:{}, GB:{}, cycleA:{}, cycleB:{}, G:{}]"
                .format(epoch, opt["n_epochs"], iteration, total_batches,
                        cycle_gan.loss_disA.item(), cycle_gan.loss_disB.item(),
                        cycle_gan.loss_genA.item(), cycle_gan.loss_genB.item(),
                        cycle_gan.loss_cycle_A.item(),
                        cycle_gan.loss_cycle_B.item(), cycle_gan.loss_G))
            print(message)
            logger.info(message)

            if iteration % opt['sample_interval'] == 0:
                cycle_gan.save_progress(images_prefix, epoch, iteration)
        cycle_gan.save_progress(images_prefix,
                                epoch,
                                total_batches,
                                save_epoch=True)
def test_cycle_gan(semi_supervised=True):
    opt = get_opts()

    ensure_dir(models_prefix)
    ensure_dir(images_prefix)

    cycle_gan = CycleGAN(device,
                         models_prefix,
                         opt["lr"],
                         opt["b1"],
                         train=False,
                         semi_supervised=semi_supervised)
    data = DataLoader(data_root=data_root,
                      image_size=(opt['img_height'], opt['img_width']),
                      batch_size=1,
                      train=False)

    total_images = len(data.names)
    print("Total Testing Images", total_images)

    loss_A = 0.0
    loss_B = 0.0
    name_loss_A = []
    name_loss_B = []

    for i in range(total_images):
        print(i, "/", total_images)
        x, y = next(data.data_generator(i))
        name = data.names[i]

        real_A = Variable(x.type(Tensor))
        real_B = Variable(y.type(Tensor))

        cycle_gan.set_input(real_A, real_B)
        cycle_gan.test()
        cycle_gan.save_image(images_prefix, name)
        loss_A += cycle_gan.test_A
        loss_B += cycle_gan.test_B
        name_loss_A.append((cycle_gan.test_A, name))
        name_loss_B.append((cycle_gan.test_B, name))

    info = "Average Loss A:{} B :{}".format(loss_A / (1.0 * total_images),
                                            loss_B / (1.0 * total_images))
    print(info)
    logger.info(info)
    name_loss_A = sorted(name_loss_A)
    name_loss_B = sorted(name_loss_B)
    print("top 10 images")
    print(name_loss_A[:10])
    print(name_loss_B[:10])
def gan_repository(sess, flags, dataset):
    if flags.gan_model == 'vanilla_gan':
        print('Initializing Vanilla GAN...')
        return GAN(sess, flags, dataset.image_size)
    elif flags.gan_model == 'dcgan':
        print('Initializing DCGAN...')
        return DCGAN(sess, flags, dataset.image_size)
    elif flags.gan_model == 'pix2pix':
        print('Initializing pix2pix...')
        return Pix2Pix(sess, flags, dataset.image_size)
    elif flags.gan_model == 'pix2pix-patch':
        print('Initializing pix2pix-patch...')
        return Pix2PixPatch(sess, flags, dataset.image_size)
    elif flags.gan_model == 'wgan':
        print('Initializing WGAN...')
        return WGAN(sess, flags, dataset)
    elif flags.gan_model == 'cyclegan':
        print('Initializing cyclegan...')
        return CycleGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan':
        print('Initializing mrigan...')
        return MRIGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan02':
        print('Initializing mrigan02...')
        return MRIGAN02(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan03':
        print('Initializing mrigan03...')
        return MRIGAN03(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan01_lsgan':
        print('Initializing mrigan01_lsgan...')
        return MRIGAN01_LSGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan02_lsgan':
        print('Initializing mrigan02_lsgan...')
        return MRIGAN02_LSGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan03_lsgan':
        print('Initializing mrigan03_lsgan...')
        return MRIGAN03_LSGAN(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan_01':
        print('Initializing mrigan_01...')
        return MRIGAN_01(sess, flags, dataset.image_size, dataset())
    elif flags.gan_model == 'mrigan_02':
        print('Initializing mrigan_02...')
        return MRIGAN_02(sess, flags, dataset.image_size, dataset())
    else:
        raise NotImplementedError
Esempio n. 4
0
def inference():
    configs = read_config(FLAGS.config_path)
    class_ids = read_config(FLAGS.class_config)

    img_data = read_data(FLAGS.data_dir, int(configs['width']),
                         int(configs['height']))

    cyc_GAN = CycleGAN(configs)
    cyc_GAN.build_model()

    # You must define saver after you build a tensorflow graph
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, FLAGS.model_path)

        meta_data = meta_initializer(
            int(class_ids[FLAGS.t]),
            [int(configs['width']),
             int(configs['height'])])

        idx = 0

        for img in img_data:

            img_vec = np.expand_dims(img, axis=0)

            if int(class_ids[FLAGS.f]) < int(class_ids[FLAGS.t]):
                mode = "AtoB"
            else:
                mode = "BtoA"

            output_img = sess.run(cyc_GAN.predict(mode),
                                  feed_dict={
                                      cyc_GAN.sample_vector: img_vec,
                                      cyc_GAN.sample_meta_data: meta_data
                                  })

            output_img = sess.run(tf.image.encode_jpeg(tf.squeeze(output_img)))

            with open(join(FLAGS.output_dir, str(idx) + '.jpg'), 'wb') as f:
                f.write(output_img)

            idx += 1
Esempio n. 5
0
def train_cycle_gan(data_root, semi_supervised=False):
    opt = get_opts()

    ensure_dir(models_prefix)
    ensure_dir(images_prefix)

    cycle_gan = CycleGAN(device, models_prefix, opt["lr"], opt["b1"],
                         train=True, semi_supervised=semi_supervised)
    # data = DataLoader(data_root=data_root,
    #                   image_size=(opt['img_height'], opt['img_width']),
    #                   batch_size=opt['batch_size'])
    dataset = TrainDataSet(data_root=data_root, image_size=(opt['img_height'], opt['img_width']))
    print("dataset : ", dataset)
    dataLoader = DataLoader(dataset, batch_size=1)

    total_images = len(dataset.names)
    print("Total Training Images", total_images)

    total_batches = int(ceil(total_images / opt['batch_size']))

    for epoch in range(5):
        for i, data in enumerate(dataLoader):
            real_A, real_B = data

            real_A = Variable(real_A.type(Tensor))
            real_B = Variable(real_B.type(Tensor))

            cycle_gan.set_input(real_A, real_B)
            cycle_gan.train()

            message = (
                "\r[Epoch {}/{}] [Batch {}/{}] [DA:{}, DB:{}] [GA:{}, GB:{}, cycleA:{}, cycleB:{}, G:{}]"
                    .format(epoch, opt["n_epochs"], iteration, total_batches,
                            cycle_gan.loss_disA.item(),
                            cycle_gan.loss_disB.item(),
                            cycle_gan.loss_genA.item(),
                            cycle_gan.loss_genB.item(),
                            cycle_gan.loss_cycle_A.item(),
                            cycle_gan.loss_cycle_B.item(),
                            cycle_gan.loss_G))
            print(message)
            logger.info(message)
Esempio n. 6
0
def inference():
	configs = read_config(FLAGS.config_path)

	face_names, face_data = read_data_shuffle(FLAGS.data_dir, int(configs['width']), int(configs['height']))

	cyc_GAN = CycleGAN(configs)
	cyc_GAN.build_model()

	# You must define saver after you build a tensorflow graph
	saver = tf.train.Saver()
	with tf.Session() as sess:
		saver.restore(sess, FLAGS.model_path)
		
		age_batch, gender_batch = read_data_batch(face_names)
		
		fake_imgs = sess.run(cyc_GAN.predict(), feed_dict = {cyc_GAN.sample_batch: face_data, cyc_GAN.age_batch: age_batch, cyc_GAN.gender_batch: gender_batch})

		for i in range(len(face_names)):
			pred_img = sess.run(tf.image.encode_jpeg(fake_imgs[i]))
			
			with open(join(FLAGS.output_dir, basename(face_names[i])), "wb") as fw:
				fw.write(pred_img)
def test_model():

    model = CycleGAN(generator_cls=MiniGenerator, gen_kwargs={
                     "domain_a": {}, "domain_b": {}, "shared": {}},
                     discriminator_cls=MiniDiscriminator, discr_kwargs={
                         "domain_a": {}, "domain_b": {}, "shared": {}})

    # check single forward
    preds = model(torch.rand(10, 3, 64, 64), torch.rand(10, 3, 64, 64))
    assert len(preds) == 8
    assert all([isinstance(_pred, torch.Tensor) for _pred in preds])

    # check closure without optimizers and criterions
    model.closure(model, {"input_a": torch.rand(10, 3, 64, 64),
                          "input_b": torch.rand(10, 3, 64, 64),
                          "target_a": torch.rand(10, 3, 64, 64),
                          "target_b": torch.rand(10, 3, 64, 64)},
                  optimizers={},
                  losses={k: lambda *x: sum([_x.sum() for _x in x])
                          for k in ["cycle", "adv", "discr"]})

    # check forward with optimizers and criterions
    model.closure(
        model, {"input_a": torch.rand(10, 3, 64, 64),
                "input_b": torch.rand(10, 3, 64, 64),
                "target_a": torch.rand(10, 3, 64, 64),
                "target_b": torch.rand(10, 3, 64, 64)},
        optimizers={
            "gen": OptimWrapper(torch.optim.Adam(
                list(model.gen_a.parameters())
                + list(model.gen_b.parameters()))),
            "discr_a": OptimWrapper(torch.optim.Adam(
                model.discr_a.parameters())),
            "discr_b": OptimWrapper(torch.optim.Adam(
                model.discr_b.parameters()
            ))},
        losses={k: lambda *x: sum([_x.sum() for _x in x])
                for k in ["cycle", "adv", "discr"]})
def train():
	train_configs = read_config(FLAGS.config_path)
	class_index = dic_key_invert(read_config(FLAGS.class_config))

	# Read Data
	img_corpus = read_data_corpus(FLAGS.data_dir, int(train_configs['width']), int(train_configs['height']))
	class_len = get_class_len(img_corpus)

	# Init cycle GAN
	cyc_GAN = CycleGAN(train_configs)
	cyc_GAN.build_model()


	saver = tf.train.Saver()
	
	# run training
	with tf.Session() as sess:
		sess.run(tf.global_variables_initializer())
				
		# We pick two class and samples within randomly

		for step in range(int(train_configs['train_steps_total'])):

			img_list = []
			
			class_range = get_keys(class_index)
			
			class_ids = np.random.choice(class_range, 2, replace = False)

			class_ids = np.sort(class_ids)

			meta_vector_A2B = meta_initializer(class_ids[1], [int(train_configs['width']), int(train_configs['height'])])
			
			meta_vector_B2A = meta_initializer(class_ids[0], [int(train_configs['width']), int(train_configs['height'])])

			for class_id in class_ids:

				random_ids = np.random.choice(class_len[class_index[class_id]], int(train_configs['batch_size']), replace = False)
				
				ims = []

				for r_id in random_ids:
					im = img_corpus[class_index[class_id]][r_id]
					ims.append(im)

				img_list.append(np.asarray(ims))

			sess.run(cyc_GAN.g_optim, 
				feed_dict = {cyc_GAN.img_A: img_list[0], cyc_GAN.img_B: img_list[1], cyc_GAN.meta_vector_A2B: meta_vector_A2B, cyc_GAN.meta_vector_B2A: meta_vector_B2A})
				
			sess.run(cyc_GAN.d_optim,
				feed_dict = {cyc_GAN.img_A: img_list[0], cyc_GAN.img_B: img_list[1], cyc_GAN.meta_vector_A2B: meta_vector_A2B, cyc_GAN.meta_vector_B2A: meta_vector_B2A})

			g_loss, d_loss = sess.run([cyc_GAN.g_loss, cyc_GAN.d_loss], 
				feed_dict = {cyc_GAN.img_A: img_list[0], cyc_GAN.img_B: img_list[1], cyc_GAN.meta_vector_A2B: meta_vector_A2B, cyc_GAN.meta_vector_B2A: meta_vector_B2A})

			print "Steps: ", step, "gloss", g_loss, "dloss", d_loss

			
			if step % int(train_configs['sample_step']) == 0 and step != 0:
				fake_A = sess.run(cyc_GAN.sample(), feed_dict = {cyc_GAN.sample_vector: img_list[0], cyc_GAN.sample_meta_data: meta_vector_A2B})
				
				image_A = sess.run(tf.image.encode_jpeg(tf.squeeze(fake_A)))

				with open(FLAGS.sample_dir + "/sample%d_A.jpeg" % step, "wb") as f:
					f.write(image_A)
				#with open(FLAGS.sample_dir + "/sample%d_B.jpeg" % step, "wb") as f:
					#f.write(image_B)
			if step % int(train_configs['save_step']) == 0 and step != 0:
					
				save_model(saver, sess, step, FLAGS.model_dir)
Esempio n. 9
0
from cycle_gan_preprocess import *
import numpy as np
import config
import wave
import os

cycleGAN_config = config.cycleGAN()

num_features = 24
sampling_rate = 16000
frame_period = 5.0

model_dir = os.path.dirname(config.cycleGAN_model_default)
model_name = os.path.basename(config.cycleGAN_model_default)

model = CycleGAN(num_features=num_features, mode='test')

model.load(filepath=config.cycleGAN_model_default)

mcep_normalization_params = np.load(
    os.path.join(model_dir, 'mcep_normalization.npz'))
mcep_mean_A = mcep_normalization_params['mean_A']
mcep_std_A = mcep_normalization_params['std_A']
mcep_mean_B = mcep_normalization_params['mean_B']
mcep_std_B = mcep_normalization_params['std_B']

logf0s_normalization_params = np.load(
    os.path.join(model_dir, 'logf0s_normalization.npz'))
logf0s_mean_A = logf0s_normalization_params['mean_A']
logf0s_std_A = logf0s_normalization_params['std_A']
logf0s_mean_B = logf0s_normalization_params['mean_B']
 def setUp(self) -> None:
     self.gan = CycleGAN(epochs=500, color_depth=1, progrssive=True)
Esempio n. 11
0
import numpy as np
from glob import glob
from image_helper_cycle_gan import ImageHelper
from cycle_gan import CycleGAN

image_helper = ImageHelper()

dataset_folder = "datasets/monet2photo"  #modify this folder

print("Ploting the images...")
filenames = np.array(glob(dataset_folder + '/testA/*.jpg'))
image_helper.plot20(filenames)

generative_advarsial_network = CycleGAN((128, 128, 3), 10.0, image_helper)
generative_advarsial_network.train(100, 1, dataset_folder)
Esempio n. 12
0
def train():
	train_configs = read_config(FLAGS.config_path)

	print "[*] Start reading data and shuffling"

	face_names, face_data = read_data_shuffle(FLAGS.data_dir, int(train_configs['width']), int(train_configs['height']))
	test_names, test_data = read_data_shuffle(FLAGS.sample_dir, int(train_configs['width']), int(train_configs['height']))

	print "[*] training set size: ", len(face_names)
	print "[*] validation set size: ", len(test_names)

	print "[*] Start building model"

	cyc_GAN = CycleGAN(train_configs)
	cyc_GAN.build_model()

	saver = tf.train.Saver()

	# run training
	with tf.Session() as sess:
		sess.run(tf.global_variables_initializer())

		batch_idx = 0

		for epoch in range(int(train_configs['epochs'])):
			
			batch_size = int(train_configs['batch_size'])

			num_batches = len(face_names) / batch_size

			for step in range(num_batches):
				# Perhaps random shuffle is more suitable, I'll test it later

				if (step + 1) * batch_size > len(face_names):
					img_batch_names = face_names[step * batch_size:]
					img_batch = np.asarray(face_data[step * batch_size:])
					batch_size = len(face_names) - step * batch_size

				else:
					img_batch_names = face_names[step * batch_size : (step + 1) * batch_size]
					img_batch = np.asarray(face_data[step * batch_size : (step + 1) * batch_size])

				age_batch, gender_batch = read_data_batch(img_batch_names)

				prior_batch = np.random.uniform(-1.0, 1.0, [batch_size, int(train_configs['z_dim'])])	

				sess.run(cyc_GAN.eg_optim, feed_dict = {cyc_GAN.img_batch: img_batch, cyc_GAN.age_batch: age_batch, cyc_GAN.gender_batch: gender_batch, 
						cyc_GAN.prior: prior_batch, cyc_GAN.batch_size: batch_size})

				# remove the effect of the discriminator_Z
				#sess.run(cyc_GAN.dz_optim, feed_dict = {cyc_GAN.img_batch: img_batch, cyc_GAN.age_batch: age_batch, cyc_GAN.gender_batch: gender_batch, 
						#cyc_GAN.prior: prior_batch, cyc_GAN.batch_size: batch_size})

				sess.run(cyc_GAN.d_optim, feed_dict = {cyc_GAN.img_batch: img_batch, cyc_GAN.age_batch: age_batch, cyc_GAN.gender_batch: gender_batch, 
						cyc_GAN.prior: prior_batch, cyc_GAN.batch_size: batch_size})

				eg_loss = sess.run(cyc_GAN.eg_loss, feed_dict = {cyc_GAN.img_batch: img_batch, cyc_GAN.age_batch: age_batch, cyc_GAN.gender_batch: gender_batch, 
						cyc_GAN.prior: prior_batch, cyc_GAN.batch_size: batch_size})

			
				progress_rate = float(step) / float(num_batches) * 100.0
	
				print "Epoch:", epoch, "EG Loss:", eg_loss, "Progress: {}%".format(progress_rate)

			if epoch % int(train_configs['save_epoch']) == 0:
				save_model(saver, sess, epoch, FLAGS.model_dir)

			if epoch % int(train_configs['sample_epoch']) == 0:

				age_batch, gender_batch = read_data_batch(test_names)

				fake_imgs = sess.run(cyc_GAN.predict(), feed_dict = {cyc_GAN.sample_batch: test_data, cyc_GAN.age_batch: age_batch, cyc_GAN.gender_batch: gender_batch})

				for i in range(len(test_names)):

					sample_img = sess.run(tf.image.encode_jpeg(tf.squeeze(fake_imgs[i])))

					with open(join(FLAGS.output_dir, "epoch_" + str(epoch) + "_" + basename(test_names[i])), "wb") as fw:
						fw.write(sample_img)

				print "Images Sampled"