def train(self, sess, celeba, show_every=250, print_every=50): """Train a GAN for a certain number of epochs""" #WIP saver = tf.train.Saver() sample_z = np.random.uniform(-1, 1, size=(self.config.sample_size, self.config.z_dim)) losses = [] steps = 0 start_epoch = load_checkpoint(sess, saver, self.config.checkpoint_dir) for e in range(start_epoch, self.config.num_epoch): for minibatch in celeba.batches(self.config.batch_size): steps += 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(self.config.batch_size, self.config.z_dim)) # Run optimizers _, D_loss_curr = sess.run([self.D_opt, self.D_loss], feed_dict={self.input_real: minibatch, self.input_z: batch_z}) _, G_loss_curr = sess.run([self.G_opt, self.G_loss], feed_dict={self.input_z: batch_z, self.input_real: minibatch}) losses.append([D_loss_curr, G_loss_curr]) # print loss every so often # We want to make sure D_loss doesn't go to 0 if steps % print_every == 0: print('... Epoch {}, Iter {}, D:{:.4}, G:{:.4}'.format(e, steps, D_loss_curr, G_loss_curr)) # show sample result every so often if steps % show_every == 1: gen_samples = sess.run(generator(self.input_z, reuse=True, training=False), feed_dict={self.input_z: sample_z}) _ = view_samples(gen_samples, 4, 4) plt.savefig(os.path.join(self.config.sample_dir, 'train_{}.png'.format(steps))) plt.show() # save model after each epoch saver.save(sess, os.path.join(self.config.checkpoint_dir, 'GAN_epoch{}.ckpt'.format(e))) print('Final images') gen_samples = sess.run(generator(self.input_z, reuse=True, training=False), feed_dict={self.input_z: sample_z}) _ = view_samples(gen_samples, 4, 4) plt.savefig(os.path.join(self.config.sample_dir, 'final.png')) plt.show() # save final model saver.save(sess, os.path.join(self.config.checkpoint_dir, 'GAN_final.ckpt')) return losses
def main(FLAGS): if not os.path.isdir(FLAGS.data_dir): os.makedirs(FLAGS.data_dir) download_data(FLAGS.data_dir, FLAGS.train_data, FLAGS.test_data) net = GAN(real_size, z_size, learning_rate, alpha=FLAGS.alpha, beta1=FLAGS.beta1) '''--------Load data--------''' train_data = loadmat(FLAGS.data_dir + '/' + FLAGS.train_data) test_data = loadmat(FLAGS.data_dir + '/' + FLAGS.test_data) dataset = preprocessor.Dataset(train_data, test_data) '''--------Build net--------''' saver = tf.train.Saver() sample_z = np.random.uniform(-1, 1, size=(72, z_size)) samples, losses = [], [] steps = 0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(FLAGS.epochs): for x, y in dataset.batches(FLAGS.batch_size): steps += 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(FLAGS.batch_size, z_size)) # Run optimizers _ = sess.run(net.d_opt, feed_dict={ net.input_real: x, net.input_z: batch_z }) _ = sess.run(net.g_opt, feed_dict={ net.input_z: batch_z, net.input_real: x }) if steps % FLAGS.print_every == 0: # At the end of each epoch, get the losses and print them out train_loss_d = net.d_loss.eval({ net.input_z: batch_z, net.input_real: x }) train_loss_g = net.g_loss.eval({net.input_z: batch_z}) print('Epoch {}/{}...'.format(e + 1, FLAGS.epochs), 'Discriminator Loss: {:.4f}'.format(train_loss_d), 'Generator loss: {:.4f}'.format(train_loss_g)) # Save losses to view after traning losses.append((train_loss_d, train_loss_g)) if steps % FLAGS.show_every == 0: gen_samples = sess.run(generator(net.input_z, 3, reuse=True, training=False), feed_dict={net.input_z: sample_z}) samples.append(gen_samples) _ = utils.view_samples(-1, samples, 6, 12, figsize=(FLAGS.h_figsize, FLAGS.v_figsize)) plt.show() saver.save(sess, './checkpoints/generator.ckpt') with open('samples.pkl', 'wb') as f: pkl.dump(samples, f) fig, ax = plt.subplot() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title('Training Losses') plt.legend() plt.show()
import wget from params import * from model import Generator from utils import view_samples, im_convert import torch import argparse from torch.autograd import Variable import numpy as np n = 20 n_col = 5 n_row = 4 wget.download(trained_weights_url) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #creating generator object gen = Generator().to(device) gen.load_state_dict(torch.load('./dcgan.h5', map_location=device)) #define number of images to generate Tensor = torch.FloatTensor if (torch.cuda.is_available()): Tensor = torch.cuda.FloatTensor z = Variable(Tensor(np.random.normal(0, 1, (n, 100, 1, 1)))) gen_imgs = gen(z) a = [] for i in range(n): b = im_convert(gen_imgs[i]) a.append(b) #enter number of rows and columns in plot such that n_row*n_col=n view_samples(n_row, n_col, a)
from generator import Generator from parameters import d_conv_dim, z_size, g_conv_dim, beta1, beta2, lr_d, lr_g from train import train from utils import display_images, weights_init_normal, gpu_check, load_samples, view_samples display_images(loader) D = Discriminator(d_conv_dim) G = Generator(z_size=z_size, conv_dim=g_conv_dim) D.apply(weights_init_normal) G.apply(weights_init_normal) train_on_gpu = gpu_check() if not train_on_gpu: print('No GPU found. Please use a GPU to train your neural network.') else: print('Training on GPU!') d_optimizer = optim.Adam(D.parameters(), lr_d, [beta1, beta2]) g_optimizer = optim.Adam(G.parameters(), lr_g, [beta1, beta2]) n_epochs = 30 losses = train(D, d_optimizer, G, g_optimizer, n_epochs=n_epochs) samples = load_samples() _ = view_samples(-1, samples)
def train(D, d_optimizer: optim.Adam, G, g_optimizer: optim.Adam, n_epochs, print_every=50): '''Trains adversarial networks for some number of epochs param, D: the discriminator network param, G: the generator network param, n_epochs: number of epochs to train for param, print_every: when to print and record the models' losses return: D and G losses''' if train_on_gpu: D.cuda() G.cuda() samples = [] losses = [] sample_size = 16 fixed_z = generate_fixed_z((sample_size, z_size)) for epoch in range(n_epochs): for batch_i, (real_images, _) in enumerate(loader): batch_size = real_images.size(0) real_images = scale(real_images) if train_on_gpu: real_images = real_images.cuda() d_optimizer.zero_grad() d_real_loss = real_loss(D(real_images)) fake_images = G(generate_fixed_z((batch_size, z_size))) d_fake_loss = fake_loss(D(fake_images)) d_loss = d_real_loss + d_fake_loss d_loss.backward() d_optimizer.step() g_optimizer.zero_grad() fake_images = G(generate_fixed_z((batch_size, z_size))) g_loss = real_loss(D(fake_images)) g_loss.backward() g_optimizer.step() # Print some loss stats if batch_i % print_every == 0: # append discriminator loss and generator loss losses.append((d_loss.item(), g_loss.item())) # print discriminator and generator loss print( 'Epoch [{:5d}/{:5d}] | d_loss: {:6.4f} | g_loss: {:6.4f}'. format(epoch + 1, n_epochs, d_loss.item(), g_loss.item())) G.eval() # for generating samples samples_z = G(fixed_z) samples.append(samples_z) view_samples(epoch, samples) G.train() # back to training mode dump_samples(samples) # finally return losses return losses