Esempio n. 1
0
 def test_softmax_loss(self):
     with tf.Graph().as_default():
         scores = [[1., 3., 2.], [1., 2., 3.], [1., 2., 3.]]
         labels = [[0., 0., 1.], [0., 0., 2.], [0., 0., 0.]]
         weights = [[2.], [1.], [1.]]
         with self.cached_session():
             self.assertAlmostEqual(
                 ranking_losses._softmax_loss(labels, scores).eval(),
                 -(math.log(_softmax(scores[0])[2]) +
                   math.log(_softmax(scores[1])[2]) * 2.) / 2.,
                 places=5)
             self.assertAlmostEqual(
                 ranking_losses._softmax_loss(labels, scores,
                                              weights).eval(),
                 -(math.log(_softmax(scores[0])[2]) * 2. +
                   math.log(_softmax(scores[1])[2]) * 2. * 1.) / 2.,
                 places=5)
             # Test LambdaWeight.
             lambda_weight = ranking_losses.DCGLambdaWeight(
                 rank_discount_fn=lambda r: 1. / tf.math.log1p(r))
             self.assertAlmostEqual(
                 ranking_losses._softmax_loss(
                     labels, scores, lambda_weight=lambda_weight).eval(),
                 -(math.log(_softmax(scores[0])[2]) / math.log(1. + 2.) +
                   math.log(_softmax(scores[1])[2]) * 2. /
                   math.log(1. + 1.)) / 2.,
                 places=5)
Esempio n. 2
0
 def test_softmax_loss_with_invalid_labels(self):
   scores = [[1., 3., 2.]]
   labels = [[0., -1., 1.]]
   with self.cached_session():
     self.assertAlmostEqual(
         ranking_losses._softmax_loss(labels, scores).eval(),
         -(math.log(_softmax([1, 2])[1])),
         places=5)