Example #1
0
  def testInvalidLabel(self):
    features = [
        [1., 1., 1., 1.],
        [1., 1., 1., 1.],
        [1., 2., 3., 4.],
        [1., 2., 3., 4.]]
    labels = [4, 3, 0, -1]

    if tf.test.is_built_with_cuda() and tf.test.is_gpu_available():
      with self.test_session(use_gpu=True) as sess:
        loss, backprop = (
            gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
                features, labels))
        tf_loss, tf_backprop = sess.run([loss, backprop])
        self.assertAllClose(
            [[np.nan] * 4,
             [0.25, 0.25, 0.25, -0.75],
             [-0.968, 0.087, 0.237, 0.6439],
             [np.nan] * 4],
            tf_backprop, rtol=1e-3, atol=1e-3)
        self.assertAllClose(
            [np.nan, 1.3862, 3.4420, np.nan], tf_loss, rtol=1e-3, atol=1e-3)

    with self.test_session(use_gpu=False) as sess:
      loss, backprop = (
          gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
              features, labels))
      with self.assertRaisesOpError("Received a label value of"):
        sess.run([loss, backprop])
Example #2
0
    def testInvalidLabel(self):
        features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
                    [1., 2., 3., 4.]]
        labels = [4, 3, 0, -1]

        if test.is_built_with_cuda() and test.is_gpu_available():
            with self.test_session(use_gpu=True) as sess:
                loss, backprop = (
                    gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
                        features, labels))
                tf_loss, tf_backprop = sess.run([loss, backprop])
                self.assertAllClose(
                    [[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
                     [-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
                    tf_backprop,
                    rtol=1e-3,
                    atol=1e-3)
                self.assertAllClose([np.nan, 1.3862, 3.4420, np.nan],
                                    tf_loss,
                                    rtol=1e-3,
                                    atol=1e-3)

        with self.test_session(use_gpu=False) as sess:
            loss, backprop = (
                gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
                    features, labels))
            with self.assertRaisesOpError("Received a label value of"):
                sess.run([loss, backprop])
 def _testXent(self, np_features, np_labels):
     np_loss, np_backprop = self._npXent(np_features, np_labels)
     with self.test_session(use_gpu=True) as sess:
         loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(np_features, np_labels)
         tf_loss, tf_backprop = sess.run([loss, backprop])
     self.assertAllCloseAccordingToType(np_loss, tf_loss)
     self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
Example #4
0
 def _testXent(self, np_features, np_labels):
     np_loss, np_backprop = self._npXent(np_features, np_labels)
     with self.test_session(use_gpu=True) as sess:
         loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
             np_features, np_labels)
         tf_loss, tf_backprop = sess.run([loss, backprop])
     self.assertAllCloseAccordingToType(np_loss, tf_loss)
     self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
 def testSingleClass(self):
   for label_dtype in np.int32, np.int64:
     with self.test_session(use_gpu=True) as sess:
       loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
           np.array([[1.], [-1.], [0.]]).astype(np.float32),
           np.array([0, 0, 0]).astype(label_dtype))
       tf_loss, tf_backprop = sess.run([loss, backprop])
     self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
     self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
Example #6
0
 def testSingleClass(self):
     for label_dtype in np.int32, np.int64:
         with self.test_session(use_gpu=True) as sess:
             loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
                 np.array([[1.], [-1.], [0.]]).astype(np.float32),
                 np.array([0, 0, 0]).astype(label_dtype))
             tf_loss, tf_backprop = sess.run([loss, backprop])
         self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
         self.assertAllClose([[0.0], [0.0], [0.0]], tf_backprop)
Example #7
0
 def testInvalidLabel(self):
     features = [[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 2., 3., 4.],
                 [1., 2., 3., 4.]]
     labels = [4, 3, 0, -1]
     with self.test_session(use_gpu=True) as sess:
         loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
             features, labels)
         tf_loss, tf_backprop = sess.run([loss, backprop])
         self.assertAllClose([[np.nan] * 4, [0.25, 0.25, 0.25, -0.75],
                              [-0.968, 0.087, 0.237, 0.6439], [np.nan] * 4],
                             tf_backprop,
                             rtol=1e-3,
                             atol=1e-3)
         self.assertAllClose([np.nan, 1.3862, 3.4420, np.nan],
                             tf_loss,
                             rtol=1e-3,
                             atol=1e-3)
Example #8
0
def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
  """Computes sparse softmax cross entropy between `logits` and `labels`.

  Measures the probability error in discrete classification tasks in which the
  classes are mutually exclusive (each entry is in exactly one class).  For
  example, each CIFAR-10 image is labeled with one and only one label: an image
  can be a dog or a truck, but not both.

  **NOTE:**  For this operation, the probability of a given label is considered
  exclusive.  That is, soft classes are not allowed, and the `labels` vector
  must provide a single specific index for the true class for each row of
  `logits` (each minibatch entry).  For soft softmax classification with
  a probability distribution for each entry, see
  `softmax_cross_entropy_with_logits`.

  **WARNING:** This op expects unscaled logits, since it performs a softmax
  on `logits` internally for efficiency.  Do not call this op with the
  output of `softmax`, as it will produce incorrect results.

  `logits` must have the shape `[batch_size, num_classes]`
  and dtype `float32` or `float64`.

  `labels` must have the shape `[batch_size]` and dtype `int32` or `int64`.

  Args:
    logits: Unscaled log probabilities.
    labels: Each entry `labels[i]` must be an index in `[0, num_classes)`. Other
      values will result in a loss of 0, but incorrect gradient computations.
    name: A name for the operation (optional).

  Returns:
    A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
    softmax cross entropy loss.
  """
  # TODO(pcmurray) Raise an error when the label is not an index in
  # [0, num_classes). Note: This could break users who call this with bad
  # labels, but disregard the bad results.

  # The second output tensor contains the gradients.  We use it in
  # _CrossEntropyGrad() in nn_grad but not here.
  cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
      logits, labels, name=name)
  return cost
Example #9
0
def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
    """Computes sparse softmax cross entropy between `logits` and `labels`.

  Measures the probability error in discrete classification tasks in which the
  classes are mutually exclusive (each entry is in exactly one class).  For
  example, each CIFAR-10 image is labeled with one and only one label: an image
  can be a dog or a truck, but not both.

  **NOTE:**  For this operation, the probability of a given label is considered
  exclusive.  That is, soft classes are not allowed, and the `labels` vector
  must provide a single specific index for the true class for each row of
  `logits` (each minibatch entry).  For soft softmax classification with
  a probability distribution for each entry, see
  `softmax_cross_entropy_with_logits`.

  **WARNING:** This op expects unscaled logits, since it performs a softmax
  on `logits` internally for efficiency.  Do not call this op with the
  output of `softmax`, as it will produce incorrect results.

  `logits` must have the shape `[batch_size, num_classes]`
  and dtype `float32` or `float64`.

  `labels` must have the shape `[batch_size]` and dtype `int32` or `int64`.

  Args:
    logits: Unscaled log probabilities.
    labels: Each entry `labels[i]` must be an index in `[0, num_classes)`. Other
      values will result in a loss of 0, but incorrect gradient computations.
    name: A name for the operation (optional).

  Returns:
    A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
    softmax cross entropy loss.
  """
    # TODO(pcmurray) Raise an error when the label is not an index in
    # [0, num_classes). Note: This could break users who call this with bad
    # labels, but disregard the bad results.

    # The second output tensor contains the gradients.  We use it in
    # _CrossEntropyGrad() in nn_grad but not here.
    cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
        logits, labels, name=name)
    return cost
Example #10
0
 def _testInvalidLabel(self, use_gpu):
   features = [
       [1., 1., 1., 1.],
       [1., 1., 1., 1.],
       [1., 2., 3., 4.],
       [1., 2., 3., 4.]]
   labels = [4, 3, 0, -1]
   with self.test_session(use_gpu=use_gpu) as sess:
     loss, backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
         features, labels)
     tf_loss, tf_backprop = sess.run([loss, backprop])
     self.assertAllClose(
         [[np.nan] * 4,
          [0.25, 0.25, 0.25, -0.75],
          [-0.968, 0.087, 0.237, 0.6439],
          [np.nan] * 4],
         tf_backprop, rtol=1e-3, atol=1e-3)
     self.assertAllClose(
         [np.nan, 1.3862, 3.4420, np.nan], tf_loss, rtol=1e-3, atol=1e-3)