示例#1
0
 def _testXent(self, np_features, np_labels, use_gpu=False):
   np_loss, np_backprop = self._npXent(np_features, np_labels)
   with self.test_session(use_gpu=use_gpu) as sess:
     loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
         np_features, np_labels)
     tf_loss, tf_backprop = sess.run([loss, backprop])
   self.assertAllCloseAccordingToType(np_loss, tf_loss)
   self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
示例#2
0
 def _testSingleClass(self, use_gpu=False):
   for dtype in np.float16, np.float32:
     with self.test_session(use_gpu=use_gpu) as sess:
       loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
           np.array([[1.], [-1.], [0.]]).astype(dtype),
           np.array([[-1.], [0.], [1.]]).astype(dtype))
       tf_loss, tf_backprop = sess.run([loss, backprop])
     self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
     self.assertAllClose([[2.0], [1.0], [0.0]], tf_backprop)
示例#3
0
 def testShapeBroadcast(self):
     np_f = np.array([[1., 2., 3., 4.], [1., 2., 3.,
                                         4.]]).astype(np.float32)
     np_l = np.array([[0., 0., 0., 1.], [0., .5, .5,
                                         0.]]).astype(np.float32)
     np_loss, np_backprop = self._npXent(np_f, np_l)
     tf_f = constant_op.constant(
         np.array([[1., 2., 3., 4.]]).astype(np.float32))
     tf_l = constant_op.constant(
         np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
     for use_gpu in [False, True]:
         with self.cached_session(use_gpu=use_gpu) as sess:
             loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
                 tf_f, tf_l)
             tf_loss, tf_backprop = self.evaluate([loss, backprop])
         self.assertAllCloseAccordingToType(np_loss, tf_loss)
         self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
示例#4
0
 def testShapeBroadcast(self):
   np_f = np.array([[1., 2., 3., 4.],
                    [1., 2., 3., 4.]]).astype(np.float32)
   np_l = np.array([[0., 0., 0., 1.],
                    [0., .5, .5, 0.]]).astype(np.float32)
   np_loss, np_backprop = self._npXent(np_f, np_l)
   tf_f = constant_op.constant(
       np.array([[1., 2., 3., 4.]]).astype(np.float32))
   tf_l = constant_op.constant(
       np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
   for use_gpu in [False, True]:
     with self.test_session(use_gpu=use_gpu) as sess:
       loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
           tf_f, tf_l)
       tf_loss, tf_backprop = sess.run([loss, backprop])
     self.assertAllCloseAccordingToType(np_loss, tf_loss)
     self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
 def testExceptionThrowing(self):
     with self.session(), test_util.force_gpu():
         for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
             features = constant_op.constant([[0.3, 0.5], [0.5, 0.6]],
                                             dtype=dtype)
             labels = constant_op.constant([[0.2, 0.4], [0.1, 0.2]],
                                           dtype=dtype)
             with self.assertRaisesRegex(
                     errors_impl.UnimplementedError,
                     "The GPU implementation of SoftmaxCrossEntropyWithLogits that "
                     +
                     "would have been executed is not deterministic. Note that the "
                     +
                     "Python API uses an alternative, deterministic, GPU-accelerated "
                     + "path when determinism is enabled."):
                 result = gen_nn_ops.softmax_cross_entropy_with_logits(
                     features=features, labels=labels)
                 self.evaluate(result)
示例#6
0
    def test_softmax_cross_entropy_with_logits_1d2d(self):
        num_classes = 10
        batch_size = 100
        total_size = batch_size * num_classes

        # labels should be a valid prob distribution across num_classes for each training sample
        # Compute the softmax transformation along the second axis (i.e. along the rows summing over num_classes)
        labels = tf.nn.softmax(np.random.rand(batch_size, num_classes), axis=1)

        # features/Logits could be any real number
        features = 200 * np.random.rand(1, num_classes)

        out = softmax_cross_entropy_with_logits(features, labels)
        sess_fn = lambda sess: sess.run(out)

        expected = self.without_ngraph(sess_fn)
        result = self.with_ngraph(sess_fn)

        assert np.allclose(result[0], expected[0], rtol=0, atol=1e-02)  # loss
        assert np.allclose(result[1], expected[1], rtol=0,
                           atol=1e-02)  # backprop
示例#7
0
 def testNotMatrix(self):
     with self.cached_session():
         with self.assertRaises(ValueError):
             gen_nn_ops.softmax_cross_entropy_with_logits([0., 1., 2., 3.],
                                                          [0., 1., 0., 1.])
示例#8
0
 def testShapeMismatch(self):
     with self.cached_session():
         with self.assertRaises(ValueError):
             gen_nn_ops.softmax_cross_entropy_with_logits(
                 [[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]])
示例#9
0
 def testNotMatrix(self):
   with self.test_session():
     with self.assertRaises(ValueError):
       gen_nn_ops.softmax_cross_entropy_with_logits([0., 1., 2., 3.],
                                                    [0., 1., 0., 1.])
示例#10
0
 def testShapeMismatch(self):
   with self.test_session():
     with self.assertRaises(ValueError):
       gen_nn_ops.softmax_cross_entropy_with_logits(
           [[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]])