def testChiChiKL(self): # We make sure a_df and b_df don't have any overlap. If this is not done, # then the check for true_kl vs kl_sample_ ends up failing because the # true_kl is zero and the sample is nonzero (though very small). a_df = np.arange(1, 6, step=2, dtype=np.float64) b_df = np.arange(2, 7, step=2, dtype=np.float64) a_df = a_df.reshape((len(a_df), 1)) b_df = b_df.reshape((1, len(b_df))) a = tfd.Chi(df=a_df) b = tfd.Chi(df=b_df) true_kl = (0.5 * special.digamma(0.5 * a_df) * (a_df - b_df) + special.gammaln(0.5 * b_df) - special.gammaln(0.5 * a_df)) kl = tfd.kl_divergence(a, b) x = a.sample(int(8e5), seed=test_util.test_seed()) kl_sample = tf.reduce_mean(a.log_prob(x) - b.log_prob(x), axis=0) kl_, kl_sample_ = self.evaluate([kl, kl_sample]) self.assertAllClose(true_kl, kl_, atol=0., rtol=1e-14) self.assertAllClose(true_kl, kl_sample_, atol=0., rtol=5e-2) zero_kl = tfd.kl_divergence(a, a) true_zero_kl_, zero_kl_ = self.evaluate( [tf.zeros_like(zero_kl), zero_kl]) self.assertAllEqual(true_zero_kl_, zero_kl_)
def testAssertsPositiveDfAfterMutation(self): df = tf.Variable([1., 2., 3.]) d = tfd.Chi(df, validate_args=True) self.evaluate([v.initializer for v in d.variables]) with self.assertRaisesOpError('Argument `df` must be positive.'): with tf.control_dependencies([df.assign([1., 2., -3.])]): self.evaluate(d.sample())
def testLogPdfAssertsOnInvalidSample(self): d = tfd.Chi(df=13.37, validate_args=True) with self.assertRaisesOpError('All elements must be non-negative.'): self.evaluate(d.log_prob([14.2, -5.3])) with self.assertRaisesOpError('Sample must be positive.'): print(self.evaluate(d.log_prob([0.0, 0.0])))
def testGradientThroughParams(self): df = tf.Variable(19.43, dtype=tf.float64) d = tfd.Chi(df, validate_args=True) with tf.GradientTape() as tape: loss = -d.log_prob([1., 2., 3.]) grad = tape.gradient(loss, d.trainable_variables) self.assertLen(grad, 1) self.assertAllNotNone(grad)
def testChiCDF(self): df = np.arange(1, 6, dtype=np.float64) x = np.arange(1, 6, dtype=np.float64) df = df.reshape((len(df), 1)) x = x.reshape((1, len(x))) chi = tfd.Chi(df=df) expected_cdf = stats.chi.cdf(x, df) cdf = chi.cdf(x) self.assertEqual(cdf.shape, np.broadcast(df, x).shape) self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testChiLogPDF(self): df = np.arange(1, 6, dtype=np.float64) x = np.arange(1, 6, dtype=np.float64) df = df.reshape((len(df), 1)) x = x.reshape((1, len(x))) chi = tfd.Chi(df=df) expected_log_pdf = stats.chi.logpdf(x, df) log_pdf = chi.log_prob(x) self.assertEqual(log_pdf.shape, np.broadcast(df, x).shape) self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf) pdf = chi.prob(x) self.assertEqual(pdf.shape, np.broadcast(df, x).shape) self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testChiEntropy(self): df = np.arange(1, 6, dtype=np.float64) expected_entropy = stats.chi.entropy(df) chi = tfd.Chi(df=df) self.assertEqual(chi.entropy().shape, df.shape) self.assertAllClose(self.evaluate(chi.entropy()), expected_entropy)
def testChiVariance(self): df = np.arange(1, 6, dtype=np.float64) expected_variances = stats.chi.var(df) chi = tfd.Chi(df=df) self.assertEqual(chi.variance().shape, df.shape) self.assertAllClose(self.evaluate(chi.variance()), expected_variances)
def testChiMean(self): df = np.arange(1, 6, dtype=np.float64) expected_mean = stats.chi.mean(df) chi = tfd.Chi(df=df) self.assertEqual(chi.mean().shape, df.shape) self.assertAllClose(self.evaluate(chi.mean()), expected_mean)
def testAssertsPositiveDf(self): df = tf.Variable([1., 2., -3.]) with self.assertRaisesOpError('Argument `df` must be positive.'): d = tfd.Chi(df, validate_args=True) self.evaluate([v.initializer for v in d.variables]) self.evaluate(d.entropy())
def testPdfOnBoundary(self): d = tfd.Chi(df=[1., 3.], validate_args=True) log_prob_boundary = self.evaluate(d.log_prob(0.)) self.assertAllNegativeInf(log_prob_boundary[1])
def testLogPdfAssertsOnInvalidSample(self): d = tfd.Chi(df=13.37, validate_args=True) with self.assertRaisesOpError('Condition x >= 0'): self.evaluate(d.log_prob([14.2, -5.3]))