예제 #1
0
def tv_loss(prediction, batch_size):
    batch_shape = (batch_size, PATCH_WIDTH, PATCH_HEIGHT, 3)
    tv_y_size = utils._tensor_size(prediction[:, 1:, :, :])
    tv_x_size = utils._tensor_size(prediction[:, :, 1:, :])
    y_tv = tf.nn.l2_loss(prediction[:, 1:, :, :] -
                         prediction[:, :batch_shape[1] - 1, :, :])
    x_tv = tf.nn.l2_loss(prediction[:, :, 1:, :] -
                         prediction[:, :, :batch_shape[2] - 1, :])
    loss_tv = 2 * (x_tv / tv_x_size + y_tv / tv_y_size) / batch_size
    return loss_tv
예제 #2
0
def content_loss(target, prediction, batch_size):
    CONTENT_LAYER = 'relu5_4'
    vgg_dir = '/gdata/huangjie/hdrnet/vgg_pretrained/imagenet-vgg-verydeep-19.mat'
    enhanced_vgg = vgg.net(vgg_dir, vgg.preprocess(prediction * 255))
    dslr_vgg = vgg.net(vgg_dir, vgg.preprocess(target * 255))

    content_size = utils._tensor_size(dslr_vgg[CONTENT_LAYER]) * batch_size
    loss_content = 2 * tf.nn.l2_loss(enhanced_vgg[CONTENT_LAYER] -
                                     dslr_vgg[CONTENT_LAYER]) / content_size
    return tf.reduce_mean(loss_content)
예제 #3
0
def content_loss(target, prediction, batch_size):
    CONTENT_LAYER = 'relu5_4'
    CONTENT_LAYER1 = 'relu3_4'
    CONTENT_LAYER2 = 'relu1_2'
    vgg_dir = './vgg_pretrained/imagenet-vgg-verydeep-19.mat'
    enhanced_vgg = vgg.net(vgg_dir, vgg.preprocess(prediction * 255))
    dslr_vgg = vgg.net(vgg_dir, vgg.preprocess(target * 255))

    content_size = utils._tensor_size(dslr_vgg[CONTENT_LAYER]) * batch_size
    content_size1 = utils._tensor_size(dslr_vgg[CONTENT_LAYER1]) * batch_size
    content_size2 = utils._tensor_size(dslr_vgg[CONTENT_LAYER2]) * batch_size

    loss_content = 2 * tf.nn.l2_loss(enhanced_vgg[CONTENT_LAYER] -
                                     dslr_vgg[CONTENT_LAYER]) / content_size
    loss_content1 = 2 * tf.nn.l2_loss(enhanced_vgg[CONTENT_LAYER1] -
                                      dslr_vgg[CONTENT_LAYER1]) / content_size1
    loss_content2 = 2 * tf.nn.l2_loss(enhanced_vgg[CONTENT_LAYER2] -
                                      dslr_vgg[CONTENT_LAYER2]) / content_size2

    return (tf.reduce_mean(loss_content) + tf.reduce_mean(loss_content1) +
            tf.reduce_mean(loss_content2)) / 3
예제 #4
0
    loss_content = loss.content_loss(dslr_image, enhanced, batch_size)

    # 3) color loss
    enhanced_blur = lutils.blur(enhanced)
    dslr_blur = lutils.blur(dslr_image)

    #loss_color = tf.reduce_sum(tf.pow(dslr_blur - enhanced_blur, 2))/(2 * batch_size)
    loss_color = tf.reduce_sum(
        tf.abs(dslr_image - enhanced)) / (2 * batch_size)

    #loss_color = loss.color_loss(dslr_image, enhanced, batch_size)

    # 4) total variation loss

    batch_shape = (batch_size, PATCH_WIDTH, PATCH_HEIGHT, 3)
    tv_y_size = lutils._tensor_size(enhanced[:, 1:, :, :])
    tv_x_size = lutils._tensor_size(enhanced[:, :, 1:, :])
    y_tv = tf.nn.l2_loss(enhanced[:, 1:, :, :] -
                         enhanced[:, :batch_shape[1] - 1, :, :])
    x_tv = tf.nn.l2_loss(enhanced[:, :, 1:, :] -
                         enhanced[:, :, :batch_shape[2] - 1, :])
    loss_tv = 2 * (x_tv / tv_x_size + y_tv / tv_y_size) / batch_size

    # 5) ssim
    loss_ssim = loss.Mssim_loss(dslr_image, enhanced)

    # final loss

    loss_generator = w_content * loss_content + w_texture * loss_texture + w_color * loss_color + w_tv * loss_tv + w_ssim * loss_ssim

    # psnr loss
    # PSNR loss
    loss_psnr = 20 * utils.log10(1.0 / tf.sqrt(loss_mse))

    # SSIM loss
    loss_ssim = tf.reduce_mean(tf.image.ssim(enhanced, dslr_, 1.0))

    # MS-SSIM loss
    loss_ms_ssim = tf.reduce_mean(tf.image.ssim_multiscale(enhanced, dslr_, 1.0))

    # Content loss
    CONTENT_LAYER = 'relu5_4'

    enhanced_vgg = vgg.net(vgg_dir, vgg.preprocess(enhanced * 255))
    dslr_vgg = vgg.net(vgg_dir, vgg.preprocess(dslr_ * 255))

    content_size = utils._tensor_size(dslr_vgg[CONTENT_LAYER]) * batch_size
    loss_content = 2 * tf.nn.l2_loss(enhanced_vgg[CONTENT_LAYER] - dslr_vgg[CONTENT_LAYER]) / content_size

    # Final loss function
    loss_generator = loss_mse * 20 + loss_content + (1 - loss_ssim) * 20

    # Optimize network parameters
    generator_vars = [v for v in tf.compat.v1.global_variables() if v.name.startswith("generator")]
    train_step_gen = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(loss_generator, var_list=generator_vars)

    # Initialize and restore the variables
    print("Initializing variables...")
    sess.run(tf.compat.v1.global_variables_initializer())

    saver = tf.compat.v1.train.Saver(var_list=generator_vars, max_to_keep=100)
예제 #6
0
    discrim_target = tf.concat([adv_, 1 - adv_], 1)

    loss_discrim = -tf.reduce_sum(discrim_target * tf.log(tf.clip_by_value(discrim_predictions, 1e-10, 1.0)))
    loss_texture = -loss_discrim

    correct_predictions = tf.equal(tf.argmax(discrim_predictions, 1), tf.argmax(discrim_target, 1))
    discim_accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))

    # 2) content loss

    CONTENT_LAYER = 'relu5_4'

    enhanced_vgg = vgg.net(vgg_dir, vgg.preprocess(enhanced * 255))
    dslr_vgg = vgg.net(vgg_dir, vgg.preprocess(dslr_image * 255))

    content_size = utils._tensor_size(dslr_vgg[CONTENT_LAYER]) * batch_size
    loss_content = 2 * tf.nn.l2_loss(enhanced_vgg[CONTENT_LAYER] - dslr_vgg[CONTENT_LAYER]) / content_size
    # content_size = num * height * weidth (of feature map)
    # tf.nn.l2_loss(a) = sum(a**2)/2

    # 3) color loss

    enhanced_blur = utils.blur(enhanced)
    dslr_blur = utils.blur(dslr_image)

    loss_color = tf.reduce_sum(tf.pow(dslr_blur - enhanced_blur, 2)) / (2 * batch_size)

    # 4) total variation loss

    batch_shape = (batch_size, PATCH_WIDTH, PATCH_HEIGHT, 3)
    tv_y_size = utils._tensor_size(enhanced[:, 1:, :, :])
예제 #7
0
def perceptual_loss(gen_hq_vgg, hq_vgg, VGG_LAYER, function, batch_size):
    content_size = utils._tensor_size(hq_vgg[VGG_LAYER]) * batch_size
    loss_vgg = tf.reduce_sum(
        tf.pow(gen_hq_vgg[VGG_LAYER] - hq_vgg[VGG_LAYER], 2)) / content_size

    return loss_vgg
예제 #8
0
    # content loss
    loss_content = ssim_loss + cx_loss

    # 3) color loss

    enhanced_blur = utils.blur(enhanced)
    dslr_blur = utils.blur(dslr_image)

    loss_color = tf.reduce_sum(tf.pow(dslr_blur - enhanced_blur,
                                      2)) / (2 * batch_size)

    # 4) total variation loss

    batch_shape = (batch_size, PATCH_WIDTH, PATCH_HEIGHT, 3)
    tv_y_size = utils._tensor_size(enhanced[:, 1:, :, :])  # H
    tv_x_size = utils._tensor_size(enhanced[:, :, 1:, :])  # W
    y_tv = tf.nn.l2_loss(enhanced[:, 1:, :, :] -
                         enhanced[:, :batch_shape[1] - 1, :, :])
    x_tv = tf.nn.l2_loss(enhanced[:, :, 1:, :] -
                         enhanced[:, :, :batch_shape[2] - 1, :])
    loss_tv = 2 * (x_tv / tv_x_size + y_tv / tv_y_size) / batch_size

    # final loss

    loss_generator = w_content * loss_content + w_texture * loss_texture + w_color * loss_color + w_tv * loss_tv

    # PSNR evaluation

    enhanced_flat = tf.reshape(enhanced, [-1, PATCH_SIZE])
예제 #9
0
    loss_ssim = tf.reduce_mean(tf.image.ssim(bokeh_img, target_, 1.0))

    # MS-SSIM loss
    loss_ms_ssim = tf.reduce_mean(
        tf.image.ssim_multiscale(bokeh_img, target_, 1.0))

    # L1 loss
    loss_l1 = tf.compat.v1.losses.absolute_difference(bokeh_img, target_)

    # Content loss
    CONTENT_LAYER = 'relu5_4'

    bokeh_img_vgg = vgg.net(vgg_dir, vgg.preprocess(bokeh_img * 255))
    target_vgg = vgg.net(vgg_dir, vgg.preprocess(target_ * 255))

    content_size = utils._tensor_size(target_vgg[CONTENT_LAYER]) * batch_size
    loss_content = 2 * tf.nn.l2_loss(bokeh_img_vgg[CONTENT_LAYER] -
                                     target_vgg[CONTENT_LAYER]) / content_size

    # Final loss function

    if LEVEL > 1:
        loss_generator = loss_l1 * 100
    else:
        loss_generator = loss_l1 * 10 + loss_content * 0.1 + (1 -
                                                              loss_ssim) * 10

    # Optimize network parameters

    generator_vars = [
        v for v in tf.compat.v1.global_variables()