def filter_loss(target, prediction, adv_): prediction = tf.image.rgb_to_grayscale(Getfilter(19, prediction)) target = tf.image.rgb_to_grayscale(Getfilter(19, target)) enhanced = tf.reshape(prediction, [-1, PATCH_WIDTH * PATCH_HEIGHT]) dslr = tf.reshape(target, [-1, PATCH_WIDTH * PATCH_HEIGHT]) adversarial_ = tf.multiply(enhanced, 1 - adv_) + tf.multiply(dslr, adv_) adversarial_image = tf.reshape(adversarial_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 1]) discrim_predictions = models.adversarial(adversarial_image) discrim_target = tf.concat([adv_, 1 - adv_], 1) loss_filter = -tf.reduce_sum(discrim_target * tf.log( tf.clip_by_value(discrim_predictions, 1e-10, 1.0))) correct_predictions = tf.equal(tf.argmax(discrim_predictions, 1), tf.argmax(discrim_target, 1)) discim_accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32)) return -loss_filter, discim_accuracy
# transform both dslr and enhanced images to grayscale enhanced_gray = tf.reshape(tf.image.rgb_to_grayscale(enhanced), [-1, PATCH_WIDTH * PATCH_HEIGHT]) dslr_gray = tf.reshape(tf.image.rgb_to_grayscale(dslr_image), [-1, PATCH_WIDTH * PATCH_HEIGHT]) # push randomly the enhanced or dslr image to an adversarial CNN-discriminator adversarial_ = tf.multiply(enhanced_gray, 1 - adv_) + tf.multiply( dslr_gray, adv_) adversarial_image = tf.reshape(adversarial_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 1]) discrim_predictions = models.adversarial(adversarial_image) # losses # 1) texture (adversarial) loss discrim_target = tf.concat([adv_, 1 - adv_], 1) loss_discrim = -tf.reduce_sum(discrim_target * tf.log( tf.clip_by_value(discrim_predictions, 1e-10, 1.0))) loss_texture = -loss_discrim correct_predictions = tf.equal(tf.argmax(discrim_predictions, 1), tf.argmax(discrim_target, 1)) discim_accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32)) # 2) content loss
training_steps = 350 generator_model = None adversarial_model = None discriminator_model = None batch_size = 128 if GAN: for i in range(training_steps): x = X[i * batch_size:(i + 1) * batch_size, :, :] size = x.shape[0] if not discriminator_model: discriminator_model = discriminator() if not generator_model: generator_model = generator() if not adversarial_model: adversarial_model = adversarial( generator_model, discriminator_model) noise = np.random.uniform(0, 1.0, size=[batch_size, 100]) fakes = generator_model.predict(noise) x = np.concatenate((x, fakes)) x = np.expand_dims(x, axis=3) y = np.zeros([2 * batch_size, 1]) y[:batch_size, :] = 1 d_loss = discriminator_model.train_on_batch(x, y) y = np.ones([batch_size, 1]) noise = np.random.uniform(0, 1.0, size=[batch_size, 100])
[-1, PATCH_WIDTH * PATCH_HEIGHT * 3]) dslr_blur = tf.reshape(utils.blur(dslr_image), [-1, PATCH_WIDTH * PATCH_HEIGHT * 3]) # push randomly the enhanced or dslr image to an adversarial CNN-discriminator adversarial_ = tf.multiply(enhanced_gray, 1 - adv_) + tf.multiply( dslr_gray, adv_) adversarial_image = tf.reshape(adversarial_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 1]) adversarial_color_ = tf.multiply( enhanced_blur, 1 - adv_color_) + tf.multiply(dslr_blur, adv_color_) adversarial_color_image = tf.reshape(adversarial_color_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 3]) discrim_predictions = models.adversarial(adversarial_image) discrim_predictions_color = models.adversarial(adversarial_color_image) # losses # 1) texture (adversarial) loss discrim_target = tf.concat([adv_, 1 - adv_], 1) loss_discrim = -tf.reduce_sum(discrim_target * tf.log( tf.clip_by_value(discrim_predictions, 1e-10, 1.0))) loss_texture = -loss_discrim correct_predictions = tf.equal(tf.argmax(discrim_predictions, 1), tf.argmax(discrim_target, 1)) discim_accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))