def testNonInjectiveTransformedDistribution(self): mu = 1. sigma = 2.0 abs_normal = self._cls()(distribution=tfd.Normal(loc=mu, scale=sigma), bijector=tfb.AbsoluteValue(), validate_args=True) sp_normal = stats.norm(mu, sigma) # sample sample = abs_normal.sample(100000, seed=test_util.test_seed()) self.assertAllEqual([], abs_normal.event_shape) sample_ = self.evaluate(sample) self.assertAllEqual([], self.evaluate(abs_normal.event_shape_tensor())) # Abs > 0, duh! np.testing.assert_array_less(0, sample_) # Let X ~ Normal(mu, sigma), Y := |X|, then # P[Y < 0.77] = P[-0.77 < X < 0.77] self.assertAllClose(sp_normal.cdf(0.77) - sp_normal.cdf(-0.77), (sample_ < 0.77).mean(), rtol=0.01) # p_Y(y) = p_X(-y) + p_X(y), self.assertAllClose( sp_normal.pdf(1.13) + sp_normal.pdf(-1.13), self.evaluate(abs_normal.prob(1.13))) # Log[p_Y(y)] = Log[p_X(-y) + p_X(y)] self.assertAllClose(np.log(sp_normal.pdf(2.13) + sp_normal.pdf(-2.13)), self.evaluate(abs_normal.log_prob(2.13)))
def testCovarianceNotImplemented(self): mvn = tfd.MultivariateNormalDiag(loc=[0., 0.], scale_diag=[1., 2.]) # Non-affine bijector. with self.assertRaisesRegex( NotImplementedError, '`covariance` is not implemented'): tfd.TransformedDistribution( distribution=mvn, bijector=tfb.Exp()).covariance() # Non-injective bijector. with self.assertRaisesRegex( NotImplementedError, '`covariance` is not implemented'): tfd.TransformedDistribution( distribution=mvn, bijector=tfb.AbsoluteValue()).covariance() # Non-vector event shape. with self.assertRaisesRegex( NotImplementedError, '`covariance` is only implemented'): tfd.TransformedDistribution( distribution=mvn, bijector=tfb.Reshape(event_shape_out=[2, 1], event_shape_in=[2])).covariance() # Multipart bijector. with self.assertRaisesRegex( NotImplementedError, '`covariance` is only implemented'): tfd.TransformedDistribution( distribution=mvn, bijector=tfb.Split(2)).covariance()
def testCompositeTensor(self): bijector = tfb.AbsoluteValue(validate_args=True) flat = tf.nest.flatten(bijector, expand_composites=True) unflat = tf.nest.pack_sequence_as(bijector, flat, expand_composites=True) x = tf.convert_to_tensor([0., 1., -1.]) self.assertAllClose( bijector.forward(x), tf.function(lambda b_: b_.forward(x))(unflat))
def testBijectorVersusNumpyRewriteOfBasicFunctionsEventNdims0(self): bijector = tfb.AbsoluteValue(validate_args=True) self.assertStartsWith(bijector.name, "absolute_value") x = tf.constant([[0., 1., -1], [0., -5., 3.]]) # Shape [2, 3] y = tf.math.abs(x) y_ = self.evaluate(y) self.assertAllClose(y_, self.evaluate(bijector.forward(x))) self.assertAllClose((-y_, y_), self.evaluate(bijector.inverse(y))) self.assertAllClose( (0., 0.), self.evaluate(bijector.inverse_log_det_jacobian(y, event_ndims=0))) # Run things twice to make sure there are no issues in caching the tuples # returned by .inverse* self.assertAllClose(y_, self.evaluate(bijector.forward(x))) self.assertAllClose((-y_, y_), self.evaluate(bijector.inverse(y))) self.assertAllClose( (0., 0.), self.evaluate(bijector.inverse_log_det_jacobian(y, event_ndims=0)))
def testNegativeYRaisesForILDJIfValidateArgs(self): with self.test_session() as sess: bijector = tfb.AbsoluteValue(validate_args=True) with self.assertRaisesOpError("y was negative"): sess.run(bijector.inverse_log_det_jacobian(-1., event_ndims=0))
def testNegativeYRaisesForInverseIfValidateArgs(self): with self.test_session() as sess: bijector = tfb.AbsoluteValue(validate_args=True) with self.assertRaisesOpError("y was negative"): sess.run(bijector.inverse(-1.))