def testBernoulliLogProb(self, logits, n): rv = ed.Bernoulli(logits) dist = tfd.Bernoulli(logits) x = rv.distribution.sample(n) rv_log_prob, dist_log_prob = self.evaluate( [rv.distribution.log_prob(x), dist.log_prob(x)]) self.assertAllEqual(rv_log_prob, dist_log_prob)
def mixture_of_real_and_int(): loc = ed.Normal(loc=0., scale=1., name="loc") flip = ed.Bernoulli(probs=0.5, name="flip") if tf.equal(flip, 1): x = ed.Normal(loc=loc, scale=0.5, sample_shape=5, name="x") else: x = ed.Poisson(rate=tf.nn.softplus(loc), sample_shape=3, name="x") return x
class GeneratedRandomVariablesTest(parameterized.TestCase, tf.test.TestCase): @tfe.run_test_in_graph_and_eager_modes def testBernoulliDoc(self): self.assertGreater(len(ed.Bernoulli.__doc__), 0) self.assertIn(inspect.cleandoc(tfd.Bernoulli.__init__.__doc__), ed.Bernoulli.__doc__) self.assertEqual(ed.Bernoulli.__name__, "Bernoulli") @parameterized.named_parameters( { "testcase_name": "1d_rv_1d_event", "logits": np.zeros(1), "n": [1] }, { "testcase_name": "1d_rv_5d_event", "logits": np.zeros(1), "n": [5] }, { "testcase_name": "5d_rv_1d_event", "logits": np.zeros(5), "n": [1] }, { "testcase_name": "5d_rv_5d_event", "logits": np.zeros(5), "n": [5] }, ) @tfe.run_test_in_graph_and_eager_modes def testBernoulliLogProb(self, logits, n): rv = ed.Bernoulli(logits) dist = tfd.Bernoulli(logits) x = rv.distribution.sample(n) rv_log_prob, dist_log_prob = self.evaluate( [rv.distribution.log_prob(x), dist.log_prob(x)]) self.assertAllEqual(rv_log_prob, dist_log_prob) @parameterized.named_parameters( { "testcase_name": "0d_rv_0d_sample", "logits": 0., "n": 1 }, { "testcase_name": "0d_rv_1d_sample", "logits": 0., "n": [1] }, { "testcase_name": "1d_rv_1d_sample", "logits": np.array([0.]), "n": [1] }, { "testcase_name": "1d_rv_5d_sample", "logits": np.array([0.]), "n": [5] }, { "testcase_name": "2d_rv_1d_sample", "logits": np.array([-0.2, 0.8]), "n": [1] }, { "testcase_name": "2d_rv_5d_sample", "logits": np.array([-0.2, 0.8]), "n": [5] }, ) @tfe.run_test_in_graph_and_eager_modes def testBernoulliSample(self, logits, n): rv = ed.Bernoulli(logits) dist = tfd.Bernoulli(logits) self.assertEqual(rv.distribution.sample(n).shape, dist.sample(n).shape) @parameterized.named_parameters( { "testcase_name": "0d_bernoulli", "rv": ed.Bernoulli(probs=0.5), "sample_shape": [], "batch_shape": [], "event_shape": [] }, { "testcase_name": "2d_bernoulli", "rv": ed.Bernoulli(tf.zeros([2, 3])), "sample_shape": [], "batch_shape": [2, 3], "event_shape": [] }, { "testcase_name": "2x0d_bernoulli", "rv": ed.Bernoulli(probs=0.5, sample_shape=2), "sample_shape": [2], "batch_shape": [], "event_shape": [] }, { "testcase_name": "2x1d_bernoulli", "rv": ed.Bernoulli(probs=0.5, sample_shape=[2, 1]), "sample_shape": [2, 1], "batch_shape": [], "event_shape": [] }, { "testcase_name": "3d_dirichlet", "rv": ed.Dirichlet(tf.zeros(3)), "sample_shape": [], "batch_shape": [], "event_shape": [3] }, { "testcase_name": "2x3d_dirichlet", "rv": ed.Dirichlet(tf.zeros([2, 3])), "sample_shape": [], "batch_shape": [2], "event_shape": [3] }, { "testcase_name": "1x3d_dirichlet", "rv": ed.Dirichlet(tf.zeros(3), sample_shape=1), "sample_shape": [1], "batch_shape": [], "event_shape": [3] }, { "testcase_name": "2x1x3d_dirichlet", "rv": ed.Dirichlet(tf.zeros(3), sample_shape=[2, 1]), "sample_shape": [2, 1], "batch_shape": [], "event_shape": [3] }, ) @tfe.run_test_in_graph_and_eager_modes def testShape(self, rv, sample_shape, batch_shape, event_shape): self.assertEqual(rv.shape, sample_shape + batch_shape + event_shape) self.assertEqual(rv.sample_shape, sample_shape) self.assertEqual(rv.distribution.batch_shape, batch_shape) self.assertEqual(rv.distribution.event_shape, event_shape) def _testValueShapeAndDtype(self, cls, value, **kwargs): rv = cls(value=value, **kwargs) value_shape = rv.value.shape expected_shape = rv.sample_shape.concatenate( rv.distribution.batch_shape).concatenate( rv.distribution.event_shape) self.assertEqual(value_shape, expected_shape) self.assertEqual(rv.distribution.dtype, rv.value.dtype) @parameterized.parameters( { "cls": ed.Normal, "value": 2, "kwargs": { "loc": 0.5, "scale": 1.0 } }, { "cls": ed.Normal, "value": [2], "kwargs": { "loc": [0.5], "scale": [1.0] } }, { "cls": ed.Poisson, "value": 2, "kwargs": { "rate": 0.5 } }, ) @tfe.run_test_in_graph_and_eager_modes def testValueShapeAndDtype(self, cls, value, kwargs): self._testValueShapeAndDtype(cls, value, **kwargs) @tfe.run_test_in_graph_and_eager_modes def testValueMismatchRaises(self): with self.assertRaises(ValueError): self._testValueShapeAndDtype(ed.Normal, 2, loc=[0.5, 0.5], scale=1.0) with self.assertRaises(ValueError): self._testValueShapeAndDtype(ed.Normal, 2, loc=[0.5], scale=[1.0]) with self.assertRaises(ValueError): self._testValueShapeAndDtype(ed.Normal, np.zeros([10, 3]), loc=[0.5, 0.5], scale=[1.0, 1.0]) def testValueUnknownShape(self): # should not raise error ed.Bernoulli(probs=0.5, value=tf.placeholder(tf.int32)) @tfe.run_test_in_graph_and_eager_modes def testAsRandomVariable(self): # A wrapped Normal distribution should behave identically to # the builtin Normal RV. def model_builtin(): return ed.Normal(1., 0.1, name="x") def model_wrapped(): return ed.as_random_variable(tfd.Normal(1., 0.1, name="x")) # Check that both models are interceptable and yield # identical log probs. log_joint_builtin = ed.make_log_joint_fn(model_builtin) log_joint_wrapped = ed.make_log_joint_fn(model_wrapped) self.assertEqual(self.evaluate(log_joint_builtin(x=7.)), self.evaluate(log_joint_wrapped(x=7.))) # Check that our attempt to back out the variable name from the # Distribution name is robust to name scoping. with tf.name_scope("nested_scope"): dist = tfd.Normal(1., 0.1, name="x") def model_scoped(): return ed.as_random_variable(dist) log_joint_scoped = ed.make_log_joint_fn(model_scoped) self.assertEqual(self.evaluate(log_joint_builtin(x=7.)), self.evaluate(log_joint_scoped(x=7.)))
def testValueUnknownShape(self): # should not raise error ed.Bernoulli(probs=0.5, value=tf.placeholder(tf.int32))
def testBernoulliSample(self, logits, n): rv = ed.Bernoulli(logits) dist = tfd.Bernoulli(logits) self.assertEqual(rv.distribution.sample(n).shape, dist.sample(n).shape)