def testTwoClassLogLikelihoodVersusOldImplementation(self):
        def alt_two_class_log_likelihood_impl(predictions, labels):
            float_labels = tf.cast(labels, dtype=tf.float64)
            float_predictions = tf.cast(tf.squeeze(predictions),
                                        dtype=tf.float64)
            # likelihood should be just p for class 1, and 1 - p for class 0.
            # signs is 1 for class 1, and -1 for class 0
            signs = 2 * float_labels - tf.ones_like(float_labels)
            # constant_term is 1 for class 0, and 0 for class 1.
            constant_term = tf.ones_like(float_labels) - float_labels
            likelihoods = constant_term + signs * float_predictions
            log_likelihoods = tf.log(likelihoods)
            avg_log_likelihood = tf.reduce_mean(log_likelihoods)
            return avg_log_likelihood

        predictions = np.random.rand(1, 10, 1)
        targets = np.random.randint(2, size=10)
        with self.test_session() as session:
            new_log_likelihood, _ = metrics.two_class_log_likelihood(
                predictions, targets)
            alt_log_likelihood = alt_two_class_log_likelihood_impl(
                predictions, targets)
            new_impl, alt_impl = session.run(
                [new_log_likelihood, alt_log_likelihood])
        self.assertAlmostEqual(new_impl, alt_impl)
Example #2
0
 def testTwoClassLogLikelihood(self):
     predictions = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
     targets = np.array([0, 0, 1, 0, 1, 1])
     expected = (2.0 * np.log(0.8) + 2.0 * np.log(0.4)) / 6.0
     with self.test_session() as session:
         avg_log_likelihood, _ = metrics.two_class_log_likelihood(
             predictions, targets)
         actual = session.run(avg_log_likelihood)
     self.assertAlmostEqual(actual, expected)