def save_weights(): # Define Generator, Discriminator and their corresponding optimizers generator = Generator(img_size=32) discriminator = Discriminator(img_size=32) generator_optimizer, discriminator_optimizer = define_lsgan_optimizers() # Set Tensorflow to run on CPU - that is not training os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Restore checkpoint checkpoint = tf.train.Checkpoint(generator=generator, discriminator=discriminator, generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer) restore_checkpoint(checkpoint, 'models/checkpoints') # Build the Generator and specify its input shape generator.build(input_shape=(None, 100)) # Make a test prediction to see whether it is working test_prediction = generator.predict(generate_noise(1, 100)) print(test_prediction) # Save weights generator.save_weights('generator_weights/generator')
def training_step_lsgan(generator: Generator, discriminator: Discriminator, generator_optimizer, discriminator_optimizer, images: np.ndarray, k: int = 1, batch_size=1): for i in range(k): with tf.GradientTape() as generator_tape, tf.GradientTape( ) as discriminator_tape: noise = generate_noise(batch_size, 100) generated_images = generator(noise, training=True) # Get the predictions of the Discriminator real_prediction = discriminator(images, training=True) fake_prediction = discriminator(generated_images, training=True) # Calculate the losses generator_loss = generator_lsgan_loss_function(fake_prediction) discriminator_loss = discriminator_lsgan_loss_function( real_prediction, fake_prediction) # Optimize the Discriminator gradients_of_discriminator = discriminator_tape.gradient( discriminator_loss, discriminator.trainable_variables) discriminator_optimizer.apply_gradients( zip(gradients_of_discriminator, discriminator.trainable_variables)) # Optimize the Generator gradients_of_generator = generator_tape.gradient( generator_loss, generator.trainable_variables) generator_optimizer.apply_gradients( zip(gradients_of_generator, generator.trainable_variables)) print('Trained on another batch')
def generate_and_save_sample_image(generator: Generator, architecture_type, epoch): image = generator(generate_noise(batch_size=1, random_noise_size=100), training=False)[0] plt.axis('off') plt.imshow(image) plt.savefig('{}/{}/{}.png'.format('generated_images', architecture_type, epoch), bbox_inches='tight')
def generate_and_save_sample_image(generator: ConditionalGenerator, labels_batch, batch_size, architecture_type, epoch): noise = generate_noise(batch_size, 100) generated_image = generator(noise, labels_batch)[0] plt.imshow(generated_image) plt.axis('off') plt.savefig('{}/{}/{}.png'.format('generated_images/', architecture_type, epoch), bbox_inches='tight')
def training_step_clsgan(generator: ConditionalGenerator, discriminator: ConditionalDiscriminator, generator_optimizer, discriminator_optimizer, images_batch: np.ndarray, labels_batch: np.ndarray, batch_size, generator_training_rate, discriminator_training_rate): for _ in range(generator_training_rate): with tf.GradientTape() as generator_tape: noise = generate_noise(batch_size, 100) generated_images = generator(noise, labels_batch, training=True) # Get the predictions of the Discriminator fake_prediction = discriminator(generated_images, labels_batch, training=True) # Calculate the losses generator_loss = generator_lsgan_loss_function(fake_prediction) gradients_of_generator = generator_tape.gradient( generator_loss, generator.trainable_variables) generator_optimizer.apply_gradients( zip(gradients_of_generator, generator.trainable_variables)) for _ in range(discriminator_training_rate): with tf.GradientTape() as discriminator_tape: noise = generate_noise(batch_size, 100) generated_images = generator(noise, labels_batch, training=False) real_prediction = discriminator(images_batch, labels_batch, training=False) fake_prediction = discriminator(generated_images, labels_batch, training=True) discriminator_loss = discriminator_lsgan_loss_function( real_prediction, fake_prediction) # Optimize the Discriminator gradients_of_discriminator = discriminator_tape.gradient( discriminator_loss, discriminator.trainable_variables) discriminator_optimizer.apply_gradients( zip(gradients_of_discriminator, discriminator.trainable_variables))
def generate(): username = str(sys.argv[1]) generator = Generator(img_size=32) os.environ['CUDA_VISIBLE_DEVICES'] = '-1' generator.load_weights('../CalliopeAPI/generator_weights/generator') img = generator(generate_noise(1, 100))[0] plt.imshow(img) plt.axis('off') plt.savefig('media/temporary_logos/' + username + '_output.png', bbox_inches='tight')
def training_step_wgan(generator: Generator, critic: Critic, generator_optimizer, critic_optimizer, images: np.ndarray, k: int = 1, batch_size=1): for i in range(k): # Critic Training for j in range(5): with tf.GradientTape() as critic_tape: noise = generate_noise(batch_size, 100) generated_images = generator(noise, training=False) real_prediction = critic(images, training=True) fake_prediction = critic(generated_images, training=True) critic_loss = critic_loss_function(real_prediction, fake_prediction, critic) # Optimize the Critic gradients_of_critic = critic_tape.gradient( critic_loss, critic.trainable_variables) critic_optimizer.apply_gradients( zip(gradients_of_critic, critic.trainable_variables)) # Generator Training with tf.GradientTape() as generator_tape: noise = generate_noise(batch_size, 100) generated_images = generator(noise, training=True) fake_prediction = critic(generated_images, training=False) generator_loss = generator_wgan_loss_function(fake_prediction) # Optimize the Generator gradients_of_generator = generator_tape.gradient( generator_loss, generator.trainable_variables) generator_optimizer.apply_gradients( zip(gradients_of_generator, generator.trainable_variables))