Beispiel #1
0
    def metric_summaries(self, x, y, g, y_hat, args, name=None):
        # from Eigen et. al 2014
        ns = 'metrics' if name is None else 'metrics_' + name
        with tf.variable_scope(ns):
            g = g / 10.0
            y = y / 10.0
            y_hat = y_hat / 10.0

            # standard pixel-wise difference metrics
            abs_rel_diff = tf.reduce_mean(tf.abs(y - y_hat)/y_hat, name='abs_rel_diff')
            squared_rel_diff = tf.reduce_mean(tf.square(y - y_hat)/y_hat)
            linear_rmse = hem.rmse(y, y_hat, name='linear_rmse')
            log_rmse = hem.rmse(tf.log(y + 1e-8), tf.log(y_hat + 1e-8), name='log_rmse')
            tf.summary.scalar('abs_rel_diff', abs_rel_diff)
            tf.summary.scalar('squared_rel_diff', squared_rel_diff)
            tf.summary.scalar('linear_rmse', linear_rmse)
            tf.summary.scalar('log_rmse', log_rmse)

            # scale-invariant rmse
            d = tf.log(y + 1e-8) - tf.log(y_hat + 1e-8)
            n = tf.cast(tf.size(d), tf.float32) # tf.size() = 430592
            scale_invariant_log_rmse = tf.reduce_mean(tf.square(d)) - (tf.reduce_sum(d) ** 2)/(n**2)
            tf.summary.scalar('scale_invariant_log_rmse', scale_invariant_log_rmse)

            # threshold metrics
            delta = tf.maximum(y/y_hat, y_hat/y)
            t1, t1_op = tf.metrics.percentage_below(delta, 1.25,    name='threshold1')
            t2, t2_op = tf.metrics.percentage_below(delta, 1.25**2, name='threshold2')
            t3, t3_op = tf.metrics.percentage_below(delta, 1.25**3, name='threshold3')
            tf.summary.scalar('threshold1', t1_op)
            tf.summary.scalar('threshold2', t2_op)
            tf.summary.scalar('threshold3', t3_op)
Beispiel #2
0
 def loss(self, x, y, y_hat, args, reuse=False):
     with tf.variable_scope('loss'):
         y_bar = tf.reduce_mean(y, axis=[2, 3], keep_dims=True)
         y = y / 10.0
         y_hat = y_hat / 10.0
         y_bar = y_bar / 10.0
         l = hem.rmse(y, y_hat)
         if not reuse:
             hem.add_to_collection('losses', [l])
     return l
Beispiel #3
0
def losses(x, g, d_fake, d_real, args, reuse=False):
    """Add loss nodes to the graph depending on the model type.

    This implements an Improved Wasserstein loss with an additional
    reconstruction loss term in the generator.

    Args:
        x: Tensor, the real (input) images.
        g: Tensor, the fake images (i.e. the output from the generator)
        d_fake: Tensor, the discriminator using the fake images.
        d_real: Tensor, the discriminator using the real images.
        args: Argparse struct.
        reuse: Boolean, whether to reuse the variables in this scope.

    Returns:
        g_loss: Op, the loss op for the generator.
        d_loss: Op, the loss op for the discriminator.

    """
    # generator loss
    _, x_depth = _split(x)
    _, g_depth = _split(g)
    if not reuse:
        hem.histograms(('loss_x_depth', x_depth), ('loss_g_depth', g_depth))
    # rescaled to [0, 1]
    x_depth = (x_depth + 1.0) / 2.0
    g_depth = (g_depth + 1.0) / 2.0

    if not reuse:
        hem.histograms(('loss_x_depth_rescaled', x_depth),
                       ('loss_g_depth_rescaled', g_depth))

    l_term = 1.0
    rmse_loss = hem.rmse(x_depth, g_depth, name='rmse')
    g_loss = tf.identity(-tf.reduce_mean(d_fake) + l_term * rmse_loss,
                         name='g_loss')

    # discriminator loss
    l_term = 10.0
    with tf.variable_scope('discriminator'):
        gp = tf.identity(_gradient_penalty(x, g, args), 'grad_penalty')
    d_gan_loss = tf.identity(tf.reduce_mean(d_fake) - tf.reduce_mean(d_real),
                             name='d_loss1')
    d_grad_penalty_loss = tf.identity(l_term * gp, name='d_loss2')
    d_loss = tf.identity(d_gan_loss + d_grad_penalty_loss, name='d_loss')

    # track losses in collection node
    if not reuse:
        for l in [g_loss, d_loss, rmse_loss, d_gan_loss, d_grad_penalty_loss]:
            tf.add_to_collection('losses', l)

    return g_loss, d_loss
Beispiel #4
0
    def loss(d_real,
             d_real_logits,
             d_fake,
             d_fake_logits,
             g,
             x_depth,
             args,
             reuse=False):
        """Adds loss nodes to the graph.

        Args:
            d_real: Tensor, the discriminator's output with a real batch.
            d_fake: Tensor, the discriminator's output with a fake batch.
            g: Tensor, the generator's output.
            x_depth: Tensor, the real depth maps.
            reuse: Bool, whether to add the loss nodes to the loss collection
                   for later summary collection. Should only be True once.

        Returns:
            Tensors, the losses for the generator and discriminator, respectively.
        """
        def xentropy(logits, labels):
            return tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
                                                           labels=labels)

        with tf.variable_scope('loss'):
            g = hem.rescale(g, (-1, 1), (0, 1))
            x_depth = hem.rescale(x_depth, (-1, 1), (0, 1))
            # losses
            with tf.variable_scope('generator'):
                g_fake = tf.reduce_mean(xentropy(d_fake_logits,
                                                 tf.ones_like(d_fake)),
                                        name='g_fake')
            with tf.variable_scope('discriminator'):
                d_real = tf.reduce_mean(xentropy(d_real_logits,
                                                 tf.ones_like(d_fake)),
                                        name='d_real')
                d_fake = tf.reduce_mean(xentropy(d_fake_logits,
                                                 tf.zeros_like(d_fake)),
                                        name='d_fake')
                d_total = tf.identity(d_real + d_fake, name='total')
            rmse_loss = hem.rmse(x_depth, g)
            l1_loss = tf.reduce_mean(tf.abs(x_depth - g), name='l1')

        # only add these to the collection once
        if not reuse:
            hem.add_to_collection(
                'losses',
                [g_fake, d_real, d_fake, d_total, rmse_loss, l1_loss])
        return g_fake, d_total
Beispiel #5
0
def metrics(x, y, g, y_hat):
    g = g / 10.0
    y = y / 10.0
    y_hat = y_hat / 10.0
    linear_rmse = hem.rmse(y, y_hat)
    log_rmse = hem.rmse(tf.log(y + 1e-8), tf.log(y_hat + 1e-8))
    abs_rel_diff = tf.reduce_mean(tf.abs(y - y_hat) / y_hat)
    squared_rel_diff = tf.reduce_mean(tf.square(y - y_hat) / y_hat)
    d = tf.log(y + 1e-8) - tf.log(y_hat + 1e-8)
    n = tf.cast(tf.size(d), tf.float32)  # tf.size() = 430592
    scale_invariant_log_rmse = tf.reduce_mean(tf.square(d)) - (tf.reduce_sum(d) ** 2) / (n ** 2)
    delta = tf.maximum(y / y_hat, y_hat / y)
    t1, t1_op = tf.metrics.percentage_below(delta, 1.25, name='threshold1')
    t2, t2_op = tf.metrics.percentage_below(delta, 1.25 ** 2, name='threshold2')
    t3, t3_op = tf.metrics.percentage_below(delta, 1.25 ** 3, name='threshold3')
    all_metrics = {'linear_rmse': linear_rmse,
                   'log_rmse': log_rmse,
                   'abs_rel_diff': abs_rel_diff,
                   'squared_rel_diff': squared_rel_diff,
                   'scale_invariant_log_rmse': scale_invariant_log_rmse,
                   't1': t1_op,
                   't2': t2_op,
                   't3': t3_op}
    return all_metrics
Beispiel #6
0
    def loss(d_real, d_real_logits, d_fake, d_fake_logits, x, g, x_depth, q, args, reuse=False):
        def xentropy(logits, labels):
            return tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)

        with tf.variable_scope('loss'):
            g = hem.rescale(g, (-1, 1), (0, 1))
            x_depth = hem.rescale(x_depth, (-1, 1), (0, 1))
            rmse_loss = hem.rmse(x_depth, g)
            l1_loss = tf.reduce_mean(tf.abs(x_depth - g), name='l1')

            # losses
            with tf.variable_scope('generator'):
                g_fake = tf.reduce_mean(xentropy(d_fake_logits, tf.ones_like(d_fake)), name='g_fake')
                # if args.g_sparsity:
                #     layers = tf.get_collection('conv_layers')
                #     for l in layers:
                #         if 'e5' in l.name:
                #             sparsity_term = tf.nn.zero_fraction(l, name='sparsity_term')
                #     lambda_term = 1.0
                #     if args.g_rmse:
                #         g_total = tf.identity(g_fake - lambda_term * sparsity_term + rmse_loss, name='g_total')
                #     else:
                #         g_total = tf.identity(g_fake - lambda_term * sparsity_term, name='g_total')
                # elif args.g_rmse:
                #     g_total = tf.identity(g_fake + rmse_loss, name='g_total')

            with tf.variable_scope('discriminator'):
                d_real = tf.reduce_mean(xentropy(d_real_logits, tf.ones_like(d_real)), name='d_real')
                d_fake = tf.reduce_mean(xentropy(d_fake_logits, tf.zeros_like(d_fake)), name='d_fake')
                d_total = tf.identity(d_real + d_fake, name='d_total')
            # only add these to the collection once
            if not reuse:
                hem.add_to_collection('losses', [g_fake, d_real, d_fake, d_total, rmse_loss, l1_loss])
                # if args.g_sparsity:
                #     hem.add_to_collection('losses', [g_total, sparsity_term])
                # elif args.g_rmse:
                #     hem.add_to_collection('losses', [g_total])
        # if args.g_sparsity or args.g_rmse:
        #     return g_total, d_total
        # else:
        return g_fake, d_total
Beispiel #7
0
 def test_rmse(self):
     with self.test_session() as sess:
         x = tf.placeholder(tf.float32, (1, 64, 64, 3))
         x_hat = tf.placeholder(tf.float32, (1, 64, 64, 3))
         x_data = np.ones((1, 64, 64, 3))
         l = hem.rmse(x, x_hat)
         # 1 - 1 = 0
         x_hat_data_ones = np.ones((1, 64, 64, 3))
         results = sess.run(l,
                            feed_dict={
                                x: x_data,
                                x_hat: x_hat_data_ones
                            })
         self.assertAllClose(results, 0)
         # 1 - 0 = 1
         x_hat_data_zeros = np.zeros((1, 64, 64, 3))
         results = sess.run(l,
                            feed_dict={
                                x: x_data,
                                x_hat: x_hat_data_zeros
                            })
         self.assertAllClose(results, 1)
         # -1 - 1 = 2
         x_data = np.ones((1, 64, 64, 3)) * -1
         results = sess.run(l,
                            feed_dict={
                                x: x_data,
                                x_hat: x_hat_data_ones
                            })
         self.assertAllClose(results, 2)
         # 1 - -1 = 2
         results = sess.run(l,
                            feed_dict={
                                x: x_hat_data_ones,
                                x_hat: x_data
                            })
         self.assertAllClose(results, 2)