def bce_loss(preds, targets):
    """
    Calculates the sum of binary cross-entropy losses between predictions and ground truths.

    @param preds: A 1xN tensor. The predicted classifications of each frame.
    @param targets: A 1xN tensor The target labels for each frame. (Either 1 or -1). Not "truths"
                    because the generator passes in lies to determine how well it confuses the
                    discriminator.

    @return: The sum of binary cross-entropy losses.
    """
    return tf.squeeze(-1 * (tf.matmul(targets, log10(preds), transpose_a=True) +
                            tf.matmul(1 - targets, log10(1 - preds), transpose_a=True)))
Example #2
0
def psnr_error(gen_frames, gt_frames):
    """
    Computes the Peak Signal to Noise Ratio error between the generated images and the ground
    truth images.

    @param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
                       generator model.
    @param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
                      each frame in gen_frames.

    @return: A scalar tensor. The mean Peak Signal to Noise Ratio error over each frame in the
             batch.
    """
    shape = tf.shape(gen_frames)
    num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])
    square_diff = tf.square(gt_frames - gen_frames)

    batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))
    return tf.reduce_mean(batch_errors)
Example #3
0
def sharp_diff_error(gen_frames, gt_frames):
    """
    Computes the Sharpness Difference error between the generated images and the ground truth
    images.

    @param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
                       generator model.
    @param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
                      each frame in gen_frames.

    @return: A scalar tensor. The Sharpness Difference error over each frame in the batch.
    """
    shape = tf.shape(gen_frames)
    num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])

    with tf.name_scope('grad_diff'):
        # gradient difference
        # create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
        with tf.name_scope('setup'):
            pos = tf.constant(np.identity(3), dtype=tf.float32)
            neg = -1 * pos
            filter_x = tf.expand_dims(tf.stack([neg, pos]), 0)  # [-1, 1]
            filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)])  # [[1],[-1]]
            strides = [1, 1, 1, 1]  # stride of (1, 1)
            padding = 'SAME'

        with tf.name_scope('gen_dx'):
            gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
        with tf.name_scope('gen_dy'):
            gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
        with tf.name_scope('gt_dx'):
            gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
        with tf.name_scope('gt_dy'):
            gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))

        gen_grad_sum = gen_dx + gen_dy
        gt_grad_sum = gt_dx + gt_dy

        grad_diff = tf.abs(gt_grad_sum - gen_grad_sum)

    batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(grad_diff, [1, 2, 3])))
    return tf.reduce_mean(batch_errors)
Example #4
0
def bce_loss(preds, targets):
    return tf.squeeze(
        -1 * (tf.matmul(targets, log10(preds), transpose_a=True) +
              tf.matmul(1 - targets, log10(1 - preds), transpose_a=True)))