Пример #1
0
 def testSample(self):
   """Ensure samples can be drawn."""
   with tf.Graph().as_default(), self.test_session() as sess:
     logits = np.asarray([
         [0., 0., 0.],  #
         [1., -1., 0.]
     ]).astype(np.float32)
     loss = loss_functions.CategoricalLogitsNegativeLogProbLoss(
         tf.constant(logits))
     sample = loss.sample(42)
     sample = sess.run(sample)
     self.assertEqual(sample.shape, (2,))
Пример #2
0
  def testEvaluateOnSample(self):
    """Ensure log probability of a sample can be drawn."""
    with tf.Graph().as_default(), self.test_session() as sess:
      logits = np.asarray([
          [0., 0., 0.],  #
          [1., -1., 0.]
      ]).astype(np.float32)
      loss = loss_functions.CategoricalLogitsNegativeLogProbLoss(
          tf.constant(logits))
      neg_log_prob = loss.evaluate_on_sample(42)

      # Simply ensure this doesn't crash. As the output is random, it's
      # difficult to say if the output is correct or not...
      neg_log_prob = sess.run(neg_log_prob)
Пример #3
0
  def testMultiplyFisherBatch(self):
    with tf.Graph().as_default(), self.test_session() as sess:
      logits = np.array([[1., 2., 3.], [4., 6., 8.]])
      loss = loss_functions.CategoricalLogitsNegativeLogProbLoss(logits)

      vector = np.array([[1., 2., 3.], [5., 3., 1.]])

      na = np.newaxis
      probs = np.exp(logits - np.logaddexp.reduce(logits, axis=-1,
                                                  keepdims=True))
      fishers = probs[..., na] * np.eye(3) - probs[..., na] * probs[..., na, :]

      result = loss.multiply_fisher(vector)
      expected_result = np.matmul(vector[..., na, :], fishers)[..., 0, :]
      self.assertEqual(sess.run(result).shape, logits.shape)
      self.assertAllClose(expected_result, sess.run(result))
Пример #4
0
  def testMultiplyFisherSingleVector(self):
    with tf.Graph().as_default(), self.test_session() as sess:
      logits = np.array([1., 2., 3.])
      loss = loss_functions.CategoricalLogitsNegativeLogProbLoss(logits)

      # the LossFunction.multiply_fisher docstring only says it supports the
      # case where the vector is the same shape as the input natural parameters
      # (i.e. the logits here), but here we also test leading dimensions
      vector = np.array([1., 2., 3.])
      vectors = [vector, vector.reshape(1, -1), np.stack([vector] * 4)]

      probs = np.exp(logits - np.logaddexp.reduce(logits))
      fisher = np.diag(probs) - np.outer(probs, probs)

      for vector in vectors:
        result = loss.multiply_fisher(vector)
        expected_result = np.dot(vector, fisher)
        self.assertAllClose(expected_result, sess.run(result))
Пример #5
0
  def testEvaluateOnTargets(self):
    """Ensure log probability can be evaluated correctly."""
    with tf.Graph().as_default(), self.test_session() as sess:
      logits = np.asarray([
          [0., 0., 0.],  #
          [1., -1., 0.]
      ]).astype(np.float32)
      targets = np.asarray([2, 1]).astype(np.int32)
      loss = loss_functions.CategoricalLogitsNegativeLogProbLoss(
          tf.constant(logits), targets=tf.constant(targets))
      neg_log_prob = loss.evaluate()
      neg_log_prob = sess.run(neg_log_prob)

      # Calculate explicit log probability of targets.
      probs = np.exp(logits) / np.sum(np.exp(logits), axis=1, keepdims=True)
      log_probs = np.log([
          probs[0, targets[0]],  #
          probs[1, targets[1]]
      ])
      expected_log_prob = np.sum(log_probs)

      self.assertAllClose(neg_log_prob, -expected_log_prob)