Ejemplo n.º 1
0
 def create_loss(self, features, mode, logits, labels):
     """See `Head`."""
     del mode  # Unused for this head.
     logits = ops.convert_to_tensor(logits)
     processed_labels = self._process_labels(labels)
     processed_labels = head_lib._check_dense_labels_match_logits_and_reshape(  # pylint:disable=protected-access
         labels=processed_labels,
         logits=logits,
         expected_labels_dimension=self.logits_dimension)
     if self._loss_fn:
         unweighted_loss = head_lib._call_loss_fn(  # pylint:disable=protected-access
             loss_fn=self._loss_fn,
             labels=processed_labels,
             logits=logits,
             features=features,
             expected_loss_dim=1)
     else:
         unweighted_loss = losses.sigmoid_cross_entropy(
             multi_class_labels=processed_labels,
             logits=logits,
             reduction=losses.Reduction.NONE)
         # Averages loss over classes.
         unweighted_loss = math_ops.reduce_mean(unweighted_loss,
                                                axis=-1,
                                                keep_dims=True)
     weights = head_lib._get_weights_and_check_match_logits(  # pylint:disable=protected-access,
         features=features,
         weight_column=self._weight_column,
         logits=logits)
     training_loss = losses.compute_weighted_loss(
         unweighted_loss, weights=weights, reduction=self._loss_reduction)
     return head_lib.LossSpec(training_loss=training_loss,
                              unreduced_loss=unweighted_loss,
                              weights=weights,
                              processed_labels=processed_labels)
Ejemplo n.º 2
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode  # Unused for this head.
   logits = ops.convert_to_tensor(logits)
   processed_labels = self._process_labels(labels)
   processed_labels = head_lib._check_dense_labels_match_logits_and_reshape(  # pylint:disable=protected-access
       labels=processed_labels, logits=logits,
       expected_labels_dimension=self.logits_dimension)
   if self._loss_fn:
     unweighted_loss = head_lib._call_loss_fn(  # pylint:disable=protected-access
         loss_fn=self._loss_fn, labels=processed_labels, logits=logits,
         features=features, expected_loss_dim=1)
   else:
     unweighted_loss = losses.sigmoid_cross_entropy(
         multi_class_labels=processed_labels, logits=logits,
         reduction=losses.Reduction.NONE)
     # Averages loss over classes.
     unweighted_loss = math_ops.reduce_mean(
         unweighted_loss, axis=-1, keepdims=True)
   weights = head_lib._get_weights_and_check_match_logits(  # pylint:disable=protected-access,
       features=features, weight_column=self._weight_column, logits=logits)
   training_loss = losses.compute_weighted_loss(
       unweighted_loss, weights=weights, reduction=self._loss_reduction)
   return head_lib.LossSpec(
       training_loss=training_loss,
       unreduced_loss=unweighted_loss,
       weights=weights,
       processed_labels=processed_labels)
Ejemplo n.º 3
0
 def create_loss(self, features, mode, logits, labels):
     """See `Head`."""
     del mode  # Unused for this head.
     processed_labels = self._process_labels(labels)
     if self._loss_fn:
         unweighted_loss = _call_loss_fn(loss_fn=self._loss_fn,
                                         labels=processed_labels,
                                         logits=logits,
                                         features=features)
     else:
         unweighted_loss = losses.sigmoid_cross_entropy(
             multi_class_labels=processed_labels,
             logits=logits,
             reduction=losses.Reduction.NONE)
         # Averages loss over classes.
         unweighted_loss = math_ops.reduce_mean(unweighted_loss,
                                                axis=-1,
                                                keep_dims=True)
     weights = head_lib._weights(features, self._weight_column)  # pylint:disable=protected-access,
     weighted_sum_loss = losses.compute_weighted_loss(
         unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
     # _weights() can return 1.
     example_weight_sum = math_ops.reduce_sum(
         weights * array_ops.ones_like(unweighted_loss))
     return head_lib.LossSpec(weighted_sum_loss=weighted_sum_loss,
                              example_weight_sum=example_weight_sum,
                              processed_labels=processed_labels)
Ejemplo n.º 4
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode  # Unused for this head.
   processed_labels = self._process_labels(labels)
   if self._loss_fn:
     unweighted_loss = _call_loss_fn(
         loss_fn=self._loss_fn, labels=processed_labels, logits=logits,
         features=features)
   else:
     unweighted_loss = losses.sigmoid_cross_entropy(
         multi_class_labels=processed_labels, logits=logits,
         reduction=losses.Reduction.NONE)
     # Averages loss over classes.
     unweighted_loss = math_ops.reduce_mean(
         unweighted_loss, axis=-1, keep_dims=True)
   weights = head_lib._weights(features, self._weight_column)  # pylint:disable=protected-access,
   weighted_sum_loss = losses.compute_weighted_loss(
       unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
   # _weights() can return 1.
   example_weight_sum = math_ops.reduce_sum(
       weights * array_ops.ones_like(unweighted_loss))
   return head_lib.LossSpec(
       weighted_sum_loss=weighted_sum_loss,
       example_weight_sum=example_weight_sum,
       processed_labels=processed_labels)
Ejemplo n.º 5
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode, features  # Unused for this head.
   processed_labels = self._process_labels(labels)
   unweighted_loss = losses.sigmoid_cross_entropy(
       multi_class_labels=processed_labels, logits=logits,
       reduction=losses.Reduction.NONE)
   return head_lib.LossAndLabels(
       unweighted_loss=unweighted_loss,
       processed_labels=processed_labels)
Ejemplo n.º 6
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode, features  # Unused for this head.
   processed_labels = self._process_labels(labels)
   unweighted_loss = losses.sigmoid_cross_entropy(
       multi_class_labels=processed_labels, logits=logits,
       reduction=losses.Reduction.NONE)
   # Averages loss over classes.
   unweighted_loss = math_ops.reduce_mean(
       unweighted_loss, axis=-1, keep_dims=True)
   return head_lib.LossAndLabels(
       unweighted_loss=unweighted_loss,
       processed_labels=processed_labels)
Ejemplo n.º 7
0
def _logistic_regression_model_fn(features, labels, mode):
  _ = mode
  logits = layers.linear(
      features,
      1,
      weights_initializer=init_ops.zeros_initializer(),
      # Intentionally uses really awful initial values so that
      # AUC/precision/recall/etc will change meaningfully even on a toy dataset.
      biases_initializer=init_ops.constant_initializer(-10.0))
  predictions = math_ops.sigmoid(logits)
  loss = losses.sigmoid_cross_entropy(labels, logits)
  train_op = optimizers.optimize_loss(
      loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
  return predictions, loss, train_op
Ejemplo n.º 8
0
def modified_generator_loss(
    discriminator_gen_outputs,
    label_smoothing=0.0,
    weights=1.0,
    scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
    add_summaries=False):
  """Modified generator loss for GANs.

  L = -log(sigmoid(D(G(z))))

  This is the trick used in the original paper to avoid vanishing gradients
  early in training. See `Generative Adversarial Nets`
  (https://arxiv.org/abs/1406.2661) for more details.

  Args:
    discriminator_gen_outputs: Discriminator output on generated data. Expected
      to be in the range of (-inf, inf).
    label_smoothing: The amount of smoothing for positive labels. This technique
      is taken from `Improved Techniques for Training GANs`
      (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `discriminator_gen_outputs`, and must be broadcastable to `labels` (i.e.,
      all dimensions must be either `1`, or the same as the corresponding
      dimension).
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which this loss will be added.
    reduction: A `tf.losses.Reduction` to apply to loss.
    add_summaries: Whether or not to add summaries for the loss.

  Returns:
    A loss Tensor. The shape depends on `reduction`.
  """
  with ops.name_scope(scope, 'generator_modified_loss',
                      [discriminator_gen_outputs]) as scope:
    loss = losses.sigmoid_cross_entropy(
        array_ops.ones_like(discriminator_gen_outputs),
        discriminator_gen_outputs, weights, label_smoothing, scope,
        loss_collection, reduction)

    if add_summaries:
      summary.scalar('generator_modified_loss', loss)

  return loss
Ejemplo n.º 9
0
def modified_generator_loss(
    discriminator_gen_outputs,
    label_smoothing=0.0,
    weights=1.0,
    scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
    add_summaries=False):
  """Modified generator loss for GANs.

  L = -log(sigmoid(D(G(z))))

  This is the trick used in the original paper to avoid vanishing gradients
  early in training. See `Generative Adversarial Nets`
  (https://arxiv.org/abs/1406.2661) for more details.

  Args:
    discriminator_gen_outputs: Discriminator output on generated data. Expected
      to be in the range of (-inf, inf).
    label_smoothing: The amount of smoothing for positive labels. This technique
      is taken from `Improved Techniques for Training GANs`
      (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `discriminator_gen_outputs`, and must be broadcastable to `labels` (i.e.,
      all dimensions must be either `1`, or the same as the corresponding
      dimension).
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which this loss will be added.
    reduction: A `tf.losses.Reduction` to apply to loss.
    add_summaries: Whether or not to add summaries for the loss.

  Returns:
    A loss Tensor. The shape depends on `reduction`.
  """
  with ops.name_scope(scope, 'generator_modified_loss',
                      [discriminator_gen_outputs]) as scope:
    loss = losses.sigmoid_cross_entropy(
        array_ops.ones_like(discriminator_gen_outputs),
        discriminator_gen_outputs, weights, label_smoothing, scope,
        loss_collection, reduction)

    if add_summaries:
      summary.scalar('generator_modified_loss', loss)

  return loss
Ejemplo n.º 10
0
def acgan_generator_loss(
    discriminator_gen_classification_logits,
    fake_class_labels,
    weights=1.0,
    scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
    add_summaries=False):
  """ACGAN loss for the generator.
  The ACGAN loss adds a classification loss to the conditional discriminator.
  Therefore, the discriminator must output a tuple consisting of
    (1) the real/fake prediction and
    (2) the logits for the classification (usually the last conv layer,
        flattened).
  For more details:
    ACGAN: https://arxiv.org/abs/1610.09585
  Args:
    discriminator_gen_classification_logits: Classification logits for generated
      data.
    fake_class_labels,
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `discriminator_gen_classification_logits`, and must be broadcastable to
      `discriminator_gen_classification_logits` (i.e., all dimensions must be
      either `1`, or the same as the corresponding dimension).
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which this loss will be added.
    reduction: A `tf.losses.Reduction` to apply to loss.
    add_summaries: Whether or not to add summaries for the loss.
  Returns:
    A loss Tensor. Shape depends on `reduction`.
  Raises:
    ValueError: if arg module not either `generator` or `discriminator`
    TypeError: if the discriminator does not output a tuple.
  """
  loss = losses.sigmoid_cross_entropy(
      fake_class_labels, discriminator_gen_classification_logits, weights=weights,
      scope=scope, loss_collection=loss_collection, reduction=reduction)

  if add_summaries:
    summary.scalar('generator_ac_loss', loss)

  return loss
Ejemplo n.º 11
0
def _sigmoid_cross_entropy_loss(
        labels,
        logits,
        weights=None,
        reduction=core_losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
        name=None):
    """Computes the sigmoid_cross_entropy loss for a list.

  Given the labels of graded relevance l_i and the logits s_i, we calculate
  the sigmoid cross entropy for each ith position and aggregate the per position
  losses.

  Args:
    labels: A `Tensor` of the same shape as `logits` representing graded
      relevance.
    logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
      ranking score of the corresponding item.
    weights: A scalar, a `Tensor` with shape [batch_size, 1] for list-wise
      weights, or a `Tensor` with shape [batch_size, list_size] for item-wise
      weights.
    reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch.
    name: A string used as the name for this loss.

  Returns:
    An op for the sigmoid cross entropy as a loss.
  """
    with ops.name_scope(name, 'sigmoid_cross_entropy_loss',
                        (labels, logits, weights)):
        is_label_valid = array_ops.reshape(utils.is_label_valid(labels), [-1])
        weights = 1.0 if weights is None else ops.convert_to_tensor(weights)
        weights = array_ops.ones_like(labels) * weights
        label_vector, logit_vector, weight_vector = [
            array_ops.boolean_mask(array_ops.reshape(x, [-1]), is_label_valid)
            for x in [labels, logits, weights]
        ]
        return core_losses.sigmoid_cross_entropy(label_vector,
                                                 logit_vector,
                                                 weights=weight_vector,
                                                 reduction=reduction)
Ejemplo n.º 12
0
 def _unweighted_loss_and_weights(self, logits, processed_labels, features):
     """Computes loss spec."""
     if self._loss_fn:
         unweighted_loss = base_head.call_loss_fn(loss_fn=self._loss_fn,
                                                  labels=processed_labels,
                                                  logits=logits,
                                                  features=features,
                                                  expected_loss_dim=1)
     else:
         unweighted_loss = losses.sigmoid_cross_entropy(
             multi_class_labels=processed_labels,
             logits=logits,
             reduction=losses.Reduction.NONE)
         # Averages loss over classes.
         unweighted_loss = math_ops.reduce_mean(unweighted_loss,
                                                axis=-1,
                                                keepdims=True)
     weights = base_head.get_weights_and_check_match_logits(
         features=features,
         weight_column=self._weight_column,
         logits=logits)
     return unweighted_loss, weights
Ejemplo n.º 13
0
def binary_cross_entropy(labels, logits, weights=1.0):
    """ Binary Cross Entropy

    Measures the probability error in discrete binary classification tasks in which each class is independent and
    not mutually exclusive. The cross entropy between two distributions p and q is defined as:


    Warning:
        This is to be used on the logits of a model, not on the predicted labels.
        See ``tf.nn.sigmoid_cross_entropy_with_logits``.

    Args:
        labels: ground truth, correct values
        logits: a tensor with the unscaled log probabilities used to predict the labels with sigmoid(logits)
        weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `losses` dimension).

    Returns:
        ``Tensor``: a float ``Tensor``.

    """
    return sigmoid_cross_entropy(labels, logits)
Ejemplo n.º 14
0
def minimax_discriminator_loss(
    discriminator_real_outputs,
    discriminator_gen_outputs,
    label_smoothing=0.25,
    real_weights=1.0,
    generated_weights=1.0,
    scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
    add_summaries=False):
  """Original minimax discriminator loss for GANs, with label smoothing.

  Note that the authors don't recommend using this loss. A more practically
  useful loss is `modified_discriminator_loss`.

  L = - real_weights * log(sigmoid(D(x)))
      - generated_weights * log(1 - sigmoid(D(G(z))))

  See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
  details.

  Args:
    discriminator_real_outputs: Discriminator output on real data.
    discriminator_gen_outputs: Discriminator output on generated data. Expected
      to be in the range of (-inf, inf).
    label_smoothing: The amount of smoothing for positive labels. This technique
      is taken from `Improved Techniques for Training GANs`
      (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
    real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `real_data`, and must be broadcastable to `real_data` (i.e., all
      dimensions must be either `1`, or the same as the corresponding
      dimension).
    generated_weights: Same as `real_weights`, but for `generated_data`.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which this loss will be added.
    reduction: A `tf.losses.Reduction` to apply to loss.
    add_summaries: Whether or not to add summaries for the loss.

  Returns:
    A loss Tensor. The shape depends on `reduction`.
  """
  with ops.name_scope(scope, 'discriminator_minimax_loss', (
      discriminator_real_outputs, discriminator_gen_outputs, real_weights,
      generated_weights, label_smoothing)) as scope:

    # -log((1 - label_smoothing) - sigmoid(D(x)))
    loss_on_real = losses.sigmoid_cross_entropy(
        array_ops.ones_like(discriminator_real_outputs),
        discriminator_real_outputs, real_weights, label_smoothing, scope,
        loss_collection=None, reduction=reduction)
    # -log(- sigmoid(D(G(x))))
    loss_on_generated = losses.sigmoid_cross_entropy(
        array_ops.zeros_like(discriminator_gen_outputs),
        discriminator_gen_outputs, generated_weights, scope=scope,
        loss_collection=None, reduction=reduction)

    loss = loss_on_real + loss_on_generated
    util.add_loss(loss, loss_collection)

    if add_summaries:
      summary.scalar('discriminator_gen_minimax_loss', loss_on_generated)
      summary.scalar('discriminator_real_minimax_loss', loss_on_real)
      summary.scalar('discriminator_minimax_loss', loss)

  return loss
Ejemplo n.º 15
0
def minimax_discriminator_loss(
        discriminator_real_outputs,
        discriminator_gen_outputs,
        label_smoothing=0.25,
        real_weights=1.0,
        generated_weights=1.0,
        scope=None,
        loss_collection=ops.GraphKeys.LOSSES,
        reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
        add_summaries=False):
    """Original minimax discriminator loss for GANs, with label smoothing.

  Note that the authors don't recommend using this loss. A more practically
  useful loss is `modified_discriminator_loss`.

  L = - real_weights * log(sigmoid(D(x)))
      - generated_weights * log(1 - sigmoid(D(G(z))))

  See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
  details.

  Args:
    discriminator_real_outputs: Discriminator output on real data.
    discriminator_gen_outputs: Discriminator output on generated data. Expected
      to be in the range of (-inf, inf).
    label_smoothing: The amount of smoothing for positive labels. This technique
      is taken from `Improved Techniques for Training GANs`
      (https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
    real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `real_data`, and must be broadcastable to `real_data` (i.e., all
      dimensions must be either `1`, or the same as the corresponding
      dimension).
    generated_weights: Same as `real_weights`, but for `generated_data`.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which this loss will be added.
    reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
    add_summaries: Whether or not to add summaries for the loss.

  Returns:
    A loss Tensor. The shape depends on `reduction`.
  """
    with ops.name_scope(
            scope, 'discriminator_minimax_loss',
        (discriminator_real_outputs, discriminator_gen_outputs, real_weights,
         generated_weights, label_smoothing)) as scope:

        # -log((1 - label_smoothing) - sigmoid(D(x)))
        loss_on_real = losses.sigmoid_cross_entropy(
            array_ops.ones_like(discriminator_real_outputs),
            discriminator_real_outputs,
            real_weights,
            label_smoothing,
            scope,
            loss_collection=None,
            reduction=reduction)
        # -log(- sigmoid(D(G(x))))
        loss_on_generated = losses.sigmoid_cross_entropy(
            array_ops.zeros_like(discriminator_gen_outputs),
            discriminator_gen_outputs,
            generated_weights,
            scope=scope,
            loss_collection=None,
            reduction=reduction)

        loss = loss_on_real + loss_on_generated
        util.add_loss(loss, loss_collection)

        if add_summaries:
            summary.scalar('discriminator_gen_minimax_loss', loss_on_generated)
            summary.scalar('discriminator_real_minimax_loss', loss_on_real)
            summary.scalar('discriminator_minimax_loss', loss)

    return loss
Ejemplo n.º 16
0
def acgan_discriminator_loss(
    discriminator_real_classification_logits,
    discriminator_gen_classification_logits,
    real_class_labels,
    fake_class_labels,
    label_smoothing=0.0,
    real_weights=1.0,
    generated_weights=1.0,
    scope=None,
    loss_collection=ops.GraphKeys.LOSSES,
    reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
    add_summaries=False):
  """ACGAN loss for the discriminator.
  The ACGAN loss adds a classification loss to the conditional discriminator.
  Therefore, the discriminator must output a tuple consisting of
    (1) the real/fake prediction and
    (2) the logits for the classification (usually the last conv layer,
        flattened).
  For more details:
    ACGAN: https://arxiv.org/abs/1610.09585
  Args:
    discriminator_real_classification_logits: Classification logits for real
      data.
    discriminator_gen_classification_logits: Classification logits for generated
      data.
    real_class_labels,
    fake_class_labels,
    label_smoothing: A float in [0, 1]. If greater than 0, smooth the labels for
      "discriminator on real data" as suggested in
      https://arxiv.org/pdf/1701.00160
    real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `discriminator_real_outputs`, and must be broadcastable to
      `discriminator_real_outputs` (i.e., all dimensions must be either `1`, or
      the same as the corresponding dimension).
    generated_weights: Same as `real_weights`, but for
      `discriminator_gen_classification_logits`.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which this loss will be added.
    reduction: A `tf.losses.Reduction` to apply to loss.
    add_summaries: Whether or not to add summaries for the loss.
  Returns:
    A loss Tensor. Shape depends on `reduction`.
  Raises:
    TypeError: If the discriminator does not output a tuple.
  """
  loss_on_generated = losses.sigmoid_cross_entropy(
      fake_class_labels, discriminator_gen_classification_logits,
      weights=generated_weights, scope=scope, loss_collection=None,
      reduction=reduction)
  loss_on_real = losses.sigmoid_cross_entropy(
      real_class_labels, discriminator_real_classification_logits,
      weights=real_weights, label_smoothing=label_smoothing, scope=scope,
      loss_collection=None, reduction=reduction)
  loss = loss_on_generated + loss_on_real
  util.add_loss(loss, loss_collection)

  if add_summaries:
    summary.scalar('discriminator_gen_ac_loss', loss_on_generated)
    summary.scalar('discriminator_real_ac_loss', loss_on_real)
    summary.scalar('discriminator_ac_loss', loss)

  return loss