예제 #1
0
  def discriminator_loss(self, D, y, fake_y, use_lsgan=True, dy = True):
    """ Note: default: D(y).shape == (batch_size,5,5,1),
                       fake_buffer_size=50, batch_size=1
    Args:
      G: generator object
      D: discriminator object
      y: 4D tensor (batch_size, image_size, image_size, 3)
    Returns:
      loss: scalar
    """
    # modify loss function to focus on ROI
    # add transformation loss
    # discriminator hint staff yw3025
    if dy: # y discriminator
        y = tf.multiply(self.mask_y, y)
        fake_y = tf.multiply(self.mask_x, fake_y)
    else:  # x discriminator
        y = tf.multiply(self.mask_x, y)
        fake_y = tf.multiply(self.mask_y, fake_y)

    if use_lsgan:
      # use mean squared error
      error_real = tf.reduce_mean(tf.squared_difference(D(y), REAL_LABEL))
      error_fake = tf.reduce_mean(tf.square(D(fake_y)))
    else:
      # use cross entropy
      error_real = -tf.reduce_mean(ops.safe_log(D(y)))
      error_fake = -tf.reduce_mean(ops.safe_log(1-D(fake_y)))
    loss = (error_real + error_fake) / 2
    return loss
예제 #2
0
 def discriminator_loss(self, D, y, fake_y, use_lsgan=True):
     if use_lsgan:
         # use mean squared error# use mean squared error
         error_real = tf.reduce_mean(tf.squared_difference(
             D(y), REAL_LABEL))
         error_fake = tf.reduce_mean(tf.square(D(fake_y)))
     else:
         error_real = -tf.reduce_mean(ops.safe_log(D(y)))
         error_fake = -tf.reduce_mean(ops.safe_log(1 - D(fake_y)))
     loss = (error_real + error_fake) / 2
     return loss
예제 #3
0
 def generator_loss(self, D, fake_y, use_lsgan=True):
     """Fool discriminator into believing that G(x) is real."""
     if use_lsgan:
         # Use mean squared error
         loss = tf.reduce_mean(tf.squared_difference(D(fake_y), REAL_LABEL))
     else:
         # heuristic, non-saturating loss
         loss = -tf.reduce_mean(ops.safe_log(D(fake_y))) / 2
     return loss
예제 #4
0
 def generator_loss(self, D, fake_y, use_lsgan=True):
   """  fool discriminator into believing that G(x) is real
   """
   if use_lsgan:
     # use mean squared error
     loss = tf.reduce_mean(tf.squared_difference(D(fake_y), REAL_LABEL))
   else:
     # heuristic, non-saturating loss
     loss = -tf.reduce_mean(ops.safe_log(D(fake_y))) / 2
   return loss
예제 #5
0
 def discriminator_loss(self, D, y, fake_y, use_lsgan=True):
   """ Note: default: D(y).shape == (batch_size,5,5,1),
                      fake_buffer_size=50, batch_size=1
   Args:
     G: generator object
     D: discriminator object
     y: 4D tensor (batch_size, image_size, image_size, 3)
   Returns:
     loss: scalar
   """
   if use_lsgan:
     # use mean squared error
     error_real = tf.reduce_mean(tf.squared_difference(D(y), REAL_LABEL))
     error_fake = tf.reduce_mean(tf.square(D(fake_y)))
   else:
     # use cross entropy
     error_real = -tf.reduce_mean(ops.safe_log(D(y)))
     error_fake = -tf.reduce_mean(ops.safe_log(1-D(fake_y)))
   loss = (error_real + error_fake) / 2
   return loss
예제 #6
0
def conditioned_generator_loss(D, condition, fake_y, use_lsgan=True):
    """  fool discriminator into believing that G(x) is real
    """
    if use_lsgan:
        # use mean squared error
        loss = tf.reduce_mean(
            tf.math.squared_difference(D(condition, fake_y), REAL_LABEL))
    else:
        # heuristic, non-saturating loss
        loss = -tf.reduce_mean(ops.safe_log(D(condition, fake_y))) / 2
    return loss
예제 #7
0
 def discriminator_loss(self, D, y, fake_y, use_lsgan=True):
   """ Note: default: D(y).shape == (batch_size,5,5,1),
                      fake_buffer_size=50, batch_size=1
   Args:
     G: generator object
     D: discriminator object
     y: 4D tensor (batch_size, image_size, image_size, 3)
   Returns:
     loss: scalar
   """
   if use_lsgan:
     # use mean squared error
     error_real = tf.reduce_mean(tf.squared_difference(D(y), REAL_LABEL))
     error_fake = tf.reduce_mean(tf.square(D(fake_y)))
   else:
     # use cross entropy
     error_real = -tf.reduce_mean(ops.safe_log(D(y)))
     error_fake = -tf.reduce_mean(ops.safe_log(1-D(fake_y)))
   loss = (error_real + error_fake) / 2
   return loss
예제 #8
0
 def discriminator_loss(self, D, y, fake_y, use_lsgan=True):
     '''
     Note: default: D(y).shape == (batch_size,5,5,1),
                    fake_buffer_size=50, batch_size=1
     :param D: discriminator object
     :param y:  4D tensor (batch_size, image_size, image_size, 3)
     :param fake_y:
     :param use_lsgan:
     :return:
     '''
     if use_lsgan:
         # use mean squared error
         error_real = tf.reduce_mean(tf.squared_difference(
             D(y), REAL_LABEL))
         error_fake = tf.reduce_mean(tf.square(D(fake_y)))
     else:
         error_real = -tf.reduce_mean(ops.safe_log(D(y)))
         error_fake = -tf.reduce_mean(ops.safe_log(1 - D(fake_y)))
     loss = (error_real + error_fake) / 2
     return loss
    def generator_loss(self, D_output_fake, gan_mode='lsgan', maxloss='mean'):
        """  fool discriminator into believing that G(x) is real
        """
        if gan_mode == 'lsgan':
            if maxloss == 'mean':
                # use mean squared error
                loss = tf.reduce_mean(
                    tf.squared_difference(D_output_fake, REAL_LABEL))
            elif maxloss == 'max':
                # use max squared error
                loss = tf.reduce_max(
                    tf.squared_difference(D_output_fake, REAL_LABEL))
            elif maxloss == 'softmax':
                #use softmax squared error
                loss_map = (tf.squared_difference(D_output_fake, REAL_LABEL))
                batchsize = loss_map.get_shape()[0].value
                reshaped_loss_map = tf.reshape(loss_map, shape=[batchsize, -1])
                softmax_weight = tf.nn.softmax(reshaped_loss_map, dim=1)
                loss = tf.reduce_sum(softmax_weight * reshaped_loss_map)
            elif maxloss == 'focal':
                loss_map = (tf.squared_difference(D_output_fake, REAL_LABEL) +
                            tf.square(D_output_fake)) / 2
                loss_map_shape = loss_map.get_shape()
                D_output_fake_shape = D_output_fake.get_shape()
                prob_weight = (1 -
                               D_output_fake) * 1.5  # here debug the prob coef
                print 'loss_map_shape:', loss_map_shape
                print 'D_output_fake_shape:', D_output_fake_shape
                loss = tf.reduce_mean(prob_weight * loss_map)

        elif gan_mode == 'lcgan':
            loss = tf.reduce_mean(
                tf.pow(tf.abs(tf.subtract(D_output_fake, REAL_LABEL)), 3))
        elif gan_mode == 'gan':
            # heuristic, non-saturating loss
            loss = -tf.reduce_mean(ops.safe_log(D_output_fake)) / 2
        elif gan_mode == 'gan_logits':
            if self.patchgan:
                constant05 = tf.constant(0.5, shape=(self.batch_size, 64))
                loss = tf.reduce_mean(
                    tf.losses.sigmoid_cross_entropy(constant05, D_output_fake))
            else:
                constant05 = tf.constant(0.5, shape=(self.batch_size, 1))
                loss = tf.reduce_mean(
                    tf.losses.sigmoid_cross_entropy(constant05, D_output_fake))
        elif gan_mode == 'wgangp':
            fake_result = D_output_fake
            g_loss = -tf.reduce_mean(
                fake_result)  # This optimizes the generator.
            return g_loss
        else:
            print 'unknown gan mode %s' % gan_mode
            exit(0)
        return loss
예제 #10
0
 def generator_loss(self, D, fake_y, use_lsgan=True):
     """  fool discriminator into believing that G(x) is real
     """
     if use_lsgan:
         # use mean squared error
         # tf.squared_difference(x,y)
         # calculate the squared error between x and y
         loss = tf.reduce_mean(tf.squared_difference(D(fake_y), REAL_LABEL))
     else:
         # heuristic, non-saturating loss
         loss = -tf.reduce_mean(ops.safe_log(D(fake_y))) / 2
     return loss
예제 #11
0
  def generator_loss(self, D, fake_y, use_lsgan=True):
    """  fool discriminator into believing that G(x) is real
    """
    fake_y = self.segmentation.get_one_result(fake_y)*y

    if use_lsgan:
      # use mean squared error
      loss = tf.reduce_mean(tf.squared_difference(D(fake_y), REAL_LABEL))
    else:
      # heuristic, non-saturating loss
      loss = -tf.reduce_mean(ops.safe_log(D(fake_y))) / 2
    return loss
예제 #12
0
 def generator_loss(self, D, fake_y, use_lsgan=True):
     if use_lsgan:
         loss = tf.reduce_mean(tf.squared_difference(D(fake_y), REAL_LABEL))
     else:
         loss = -tf.reduce_mean(ops.safe_log(D(fake_y))) / 2
     return loss
    def discriminator_loss(self,
                           D,
                           D_output_fake,
                           D_output_real,
                           fake,
                           real,
                           gan_mode='lsgan',
                           maxloss='mean'):
        """ Note: default: D(y).shape == (batch_size,5,5,1),
                           fake_buffer_size=50, batch_size=1
        Args:
          G: generator object
          D: discriminator object
          y: 4D tensor (batch_size, image_size, image_size, 3)
        Returns:
          loss: scalar
        """
        if gan_mode == 'lsgan':
            if maxloss == 'mean':
                # use mean squared error
                error_real = tf.reduce_mean(
                    tf.squared_difference(D_output_real, REAL_LABEL))
                error_fake = tf.reduce_mean(tf.square(D_output_fake))
            elif maxloss == 'max':
                # use max squared error
                error_real = tf.reduce_max(
                    tf.squared_difference(D_output_real, REAL_LABEL))
                error_fake = tf.reduce_max(tf.square(D_output_fake))
            '''
            elif maxloss == 'softmax':
                loss_map = (tf.squared_difference(D_output_real, REAL_LABEL) +
                            tf.square(D_output_fake)) / 2
                loss_map_shape = loss_map.get_shape()
                reshaped_loss_map = tf.reshape(loss_map, shape=[loss_map_shape[0], -1])
                softmax_weight = tf.nn.softmax(reshaped_loss_map, dim=1)
                error = tf.reduce_sum(softmax_weight * reshaped_loss_map)
                loss = error / 2
                return loss
            '''

        elif gan_mode == 'lcgan':
            # use mean cubic error
            error_real = tf.reduce_mean(
                tf.pow(tf.abs(tf.subtract(D_output_real, REAL_LABEL)), 3))
            error_fake = tf.reduce_mean(tf.pow(tf.abs(D_output_fake), 3))
        elif gan_mode == 'gan':
            # use cross entropy
            error_real = -tf.reduce_mean(ops.safe_log(D_output_real))
            error_fake = -tf.reduce_mean(ops.safe_log(1 - D_output_fake))
        elif gan_mode == 'gan_logits':
            if self.patchgan:
                constant08 = tf.constant(0.8, shape=(self.batch_size, 64))
                constant02 = tf.constant(0.2, shape=(self.batch_size, 64))
                error_real = tf.reduce_mean(
                    tf.losses.sigmoid_cross_entropy(constant08, D_output_real))
                error_fake = tf.reduce_mean(
                    tf.losses.sigmoid_cross_entropy(constant02, D_output_fake))
            else:
                constant08 = tf.constant(0.8, shape=(self.batch_size, 1))
                constant02 = tf.constant(0.2, shape=(self.batch_size, 1))
                error_real = tf.reduce_mean(
                    tf.losses.sigmoid_cross_entropy(constant08, D_output_real))
                error_fake = tf.reduce_mean(
                    tf.losses.sigmoid_cross_entropy(constant02, D_output_fake))
        elif gan_mode == 'wgangp':
            alpha = tf.random_uniform(shape=[self.batch_size, 1],
                                      minval=0.,
                                      maxval=1.)
            real_result = D_output_real
            fake_result = D_output_fake
            d_loss = tf.reduce_mean(
                fake_result - real_result)  # This optimizes the discriminator.
            differences = fake - real
            interpolates = real + tf.multiply(alpha, differences)
            gradients = tf.gradients(D(interpolates), [interpolates])[0]
            slopes = tf.sqrt(
                tf.reduce_sum(tf.square(gradients), reduction_indices=[3]))
            gradient_penalty = tf.reduce_mean((slopes - 1.)**2)
            d_loss += LAMBDA * gradient_penalty
            return d_loss
        else:
            print 'unknown gan mode %s' % gan_mode
            exit(0)

        loss = (error_real + error_fake) / 2
        return loss