def testDocstringExample(self): with self.test_session(): exp_gamma_distribution = ( transformed_distribution_lib.TransformedDistribution( distribution=gamma_lib.Gamma(concentration=1., rate=2.), bijector=bijectors.Invert(bijectors.Exp()))) self.assertAllEqual( [], array_ops.shape(exp_gamma_distribution.sample()).eval())
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined( self): alpha_v = np.array([5.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v) expected_modes = (alpha_v - 1) / beta_v self.assertEqual(gamma.mode().get_shape(), (3, )) self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaShape(self): alpha = constant_op.constant([3.0] * 5) beta = constant_op.constant(11.0) gamma = gamma_lib.Gamma(concentration=alpha, rate=beta) self.assertEqual(self.evaluate(gamma.batch_shape_tensor()), (5, )) self.assertEqual(gamma.batch_shape, tensor_shape.TensorShape([5])) self.assertAllEqual(self.evaluate(gamma.event_shape_tensor()), []) self.assertEqual(gamma.event_shape, tensor_shape.TensorShape([]))
def testGammaEntropy(self): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v) self.assertEqual(gamma.entropy().get_shape(), (3, )) if not stats: return expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v) self.assertAllClose(self.evaluate(gamma.entropy()), expected_entropy)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self): # Mode will not be defined for the first entry. alpha_v = np.array([0.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v, allow_nan_stats=False) with self.assertRaisesOpError("x < y"): self.evaluate(gamma.mode())
def testGammaStd(self): with self.test_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v) self.assertEqual(gamma.stddev().get_shape(), (3, )) if not stats: return expected_stddev = stats.gamma.std(alpha_v, scale=1. / beta_v) self.assertAllClose(self.evaluate(gamma.stddev()), expected_stddev)
def test_docstring_example_gamma(self): with self.test_session() as sess: num_draws = int(1e5) concentration_p = constant_op.constant(1.) concentration_q = constant_op.constant(2.) p = gamma_lib.Gamma(concentration=concentration_p, rate=1.) q = gamma_lib.Gamma(concentration=concentration_q, rate=3.) approx_kl_gamma_gamma = monte_carlo_lib.expectation( f=lambda x: p.log_prob(x) - q.log_prob(x), samples=p.sample(num_draws, seed=42), log_prob=p.log_prob, use_reparametrization=(p.reparameterization_type == distribution_lib.FULLY_REPARAMETERIZED)) exact_kl_gamma_gamma = kullback_leibler.kl_divergence(p, q) [exact_kl_gamma_gamma_, approx_kl_gamma_gamma_] = sess.run([ exact_kl_gamma_gamma, approx_kl_gamma_gamma]) self.assertEqual( False, p.reparameterization_type == distribution_lib.FULLY_REPARAMETERIZED) self.assertAllClose(exact_kl_gamma_gamma_, approx_kl_gamma_gamma_, rtol=0.01, atol=0.) # Compare gradients. (Not present in `docstring`.) gradp = lambda fp: gradients_impl.gradients(fp, concentration_p)[0] gradq = lambda fq: gradients_impl.gradients(fq, concentration_q)[0] [ gradp_exact_kl_gamma_gamma_, gradq_exact_kl_gamma_gamma_, gradp_approx_kl_gamma_gamma_, gradq_approx_kl_gamma_gamma_, ] = sess.run([ gradp(exact_kl_gamma_gamma), gradq(exact_kl_gamma_gamma), gradp(approx_kl_gamma_gamma), gradq(approx_kl_gamma_gamma), ]) # Notice that variance (i.e., `rtol`) is higher when using score-trick. self.assertAllClose(gradp_exact_kl_gamma_gamma_, gradp_approx_kl_gamma_gamma_, rtol=0.05, atol=0.) self.assertAllClose(gradq_exact_kl_gamma_gamma_, gradq_approx_kl_gamma_gamma_, rtol=0.03, atol=0.)
def testGammaVariance(self): with self.test_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v) self.assertEqual(gamma.variance().get_shape(), (3, )) if not stats: return expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v) self.assertAllClose(gamma.variance().eval(), expected_variances)
def testGammaFullyReparameterized(self): alpha = constant_op.constant(4.0) beta = constant_op.constant(3.0) with backprop.GradientTape() as tape: tape.watch(alpha) tape.watch(beta) gamma = gamma_lib.Gamma(concentration=alpha, rate=beta) samples = gamma.sample(100) grad_alpha, grad_beta = tape.gradient(samples, [alpha, beta]) self.assertIsNotNone(grad_alpha) self.assertIsNotNone(grad_beta)
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers( self): # Mode will not be defined for the first entry. alpha_v = np.array([0.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v, allow_nan_stats=True) expected_modes = (alpha_v - 1) / beta_v expected_modes[0] = np.nan self.assertEqual(gamma.mode().get_shape(), (3, )) self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)
def testGammaCDF(self): batch_size = 6 alpha = constant_op.constant([2.0] * batch_size) beta = constant_op.constant([3.0] * batch_size) alpha_v = 2.0 beta_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) gamma = gamma_lib.Gamma(concentration=alpha, rate=beta) cdf = gamma.cdf(x) self.assertEqual(cdf.get_shape(), (6, )) if not stats: return expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v) self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testGammaLogPDF(self): with self.test_session(): batch_size = 6 alpha = constant_op.constant([2.0] * batch_size) beta = constant_op.constant([3.0] * batch_size) alpha_v = 2.0 beta_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) gamma = gamma_lib.Gamma(concentration=alpha, rate=beta) log_pdf = gamma.log_prob(x) self.assertEqual(log_pdf.get_shape(), (6, )) pdf = gamma.prob(x) self.assertEqual(pdf.get_shape(), (6, )) if not stats: return expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v) self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf) self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensional(self): batch_size = 6 alpha = constant_op.constant([[2.0, 4.0]] * batch_size) beta = constant_op.constant([[3.0, 4.0]] * batch_size) alpha_v = np.array([2.0, 4.0]) beta_v = np.array([3.0, 4.0]) x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T gamma = gamma_lib.Gamma(concentration=alpha, rate=beta) log_pdf = gamma.log_prob(x) log_pdf_values = self.evaluate(log_pdf) self.assertEqual(log_pdf.get_shape(), (6, 2)) pdf = gamma.prob(x) pdf_values = self.evaluate(pdf) self.assertEqual(pdf.get_shape(), (6, 2)) if not stats: return expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v) self.assertAllClose(log_pdf_values, expected_log_pdf) self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testStateParts(self): with self.test_session(graph=ops.Graph()) as sess: dist_x = normal_lib.Normal(loc=self.dtype(0), scale=self.dtype(1)) dist_y = independent_lib.Independent( gamma_lib.Gamma(concentration=self.dtype([1, 2]), rate=self.dtype([0.5, 0.75])), reinterpreted_batch_ndims=1) def target_log_prob(x, y): return dist_x.log_prob(x) + dist_y.log_prob(y) x0 = [dist_x.sample(seed=1), dist_y.sample(seed=2)] samples, _ = hmc.sample_chain(num_results=int(2e3), target_log_prob_fn=target_log_prob, current_state=x0, step_size=0.85, num_leapfrog_steps=3, num_burnin_steps=int(250), seed=49) actual_means = [math_ops.reduce_mean(s, axis=0) for s in samples] actual_vars = [_reduce_variance(s, axis=0) for s in samples] expected_means = [dist_x.mean(), dist_y.mean()] expected_vars = [dist_x.variance(), dist_y.variance()] [ actual_means_, actual_vars_, expected_means_, expected_vars_, ] = sess.run([ actual_means, actual_vars, expected_means, expected_vars, ]) self.assertAllClose(expected_means_, actual_means_, atol=0.05, rtol=0.16) self.assertAllClose(expected_vars_, actual_vars_, atol=0., rtol=0.25)
def testGammaSample(self): alpha_v = 4.0 beta_v = 3.0 alpha = constant_op.constant(alpha_v) beta = constant_op.constant(beta_v) n = 100000 gamma = gamma_lib.Gamma(concentration=alpha, rate=beta) samples = gamma.sample(n, seed=137) sample_values = self.evaluate(samples) self.assertEqual(samples.get_shape(), (n, )) self.assertEqual(sample_values.shape, (n, )) self.assertTrue(self._kstest(alpha_v, beta_v, sample_values)) if not stats: return self.assertAllClose(sample_values.mean(), stats.gamma.mean(alpha_v, scale=1 / beta_v), atol=.01) self.assertAllClose(sample_values.var(), stats.gamma.var(alpha_v, scale=1 / beta_v), atol=.15)
def testGammaPdfOfSampleMultiDims(self): gamma = gamma_lib.Gamma(concentration=[7., 11.], rate=[[5.], [6.]]) num = 50000 samples = gamma.sample(num, seed=137) pdfs = gamma.prob(samples) sample_vals, pdf_vals = self.evaluate([samples, pdfs]) self.assertEqual(samples.get_shape(), (num, 2, 2)) self.assertEqual(pdfs.get_shape(), (num, 2, 2)) self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02) self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02) self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02) self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02) if not stats: return self.assertAllClose(stats.gamma.mean([[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])), sample_vals.mean(axis=0), atol=.1) self.assertAllClose(stats.gamma.var([[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])), sample_vals.var(axis=0), atol=.1)