def testBijector(self): with self.cached_session(): for fwd in [ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev = tfb.Invert(fwd) self.assertEqual("_".join(["invert", fwd.name]), rev.name) x = [[[1., 2.], [2., 3.]]] self.assertAllClose(self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose(self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate( fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate( rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate( fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate( rev.forward_log_det_jacobian(x, event_ndims=1)))
def testTransformedDistribution(self): mu = 3.0 sigma = 2.0 # Note: the Jacobian callable only works for this example; more generally # you may or may not need a reduce_sum. log_normal = tfd.TransformedDistribution( distribution=tfd.Normal(loc=mu, scale=sigma), bijector=tfb.Exp(), validate_args=True) sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu)) # sample sample = log_normal.sample(100000, seed=test_util.test_seed()) self.assertAllEqual([], log_normal.event_shape) self.assertAllEqual([], self.evaluate(log_normal.event_shape_tensor())) self.assertAllClose( sp_dist.mean(), np.mean(self.evaluate(sample)), atol=0.0, rtol=0.05) # pdf, log_pdf, cdf, etc... # The mean of the lognormal is around 148. test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32) for func in [[log_normal.log_prob, sp_dist.logpdf], [log_normal.prob, sp_dist.pdf], [log_normal.log_cdf, sp_dist.logcdf], [log_normal.cdf, sp_dist.cdf], [log_normal.survival_function, sp_dist.sf], [log_normal.log_survival_function, sp_dist.logsf]]: actual = func[0](test_vals) expected = func[1](test_vals) self.assertAllClose( expected, self.evaluate(actual), atol=0, rtol=0.01)
def testScalarCongruency(self): with self.test_session(): bijector = tfb.Exp() assert_scalar_congruency(bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testCovarianceNotImplemented(self): mvn = tfd.MultivariateNormalDiag(loc=[0., 0.], scale_diag=[1., 2.]) # Non-affine bijector. with self.assertRaisesRegex( NotImplementedError, '`covariance` is not implemented'): tfd.TransformedDistribution( distribution=mvn, bijector=tfb.Exp()).covariance() # Non-injective bijector. with self.assertRaisesRegex( NotImplementedError, '`covariance` is not implemented'): tfd.TransformedDistribution( distribution=mvn, bijector=tfb.AbsoluteValue()).covariance() # Non-vector event shape. with self.assertRaisesRegex( NotImplementedError, '`covariance` is only implemented'): tfd.TransformedDistribution( distribution=mvn, bijector=tfb.Reshape(event_shape_out=[2, 1], event_shape_in=[2])).covariance() # Multipart bijector. with self.assertRaisesRegex( NotImplementedError, '`covariance` is only implemented'): tfd.TransformedDistribution( distribution=mvn, bijector=tfb.Split(2)).covariance()
def testBijector(self): x = np.float32(np.random.randn(3, 4, 4)) y = x.copy() for i in range(x.shape[0]): np.fill_diagonal(y[i, :, :], np.exp(np.diag(x[i, :, :]))) exp = tfb.Exp() b = tfb.TransformDiagonal(diag_bijector=exp) y_ = self.evaluate(b.forward(x)) self.assertAllClose(y, y_) x_ = self.evaluate(b.inverse(y)) self.assertAllClose(x, x_) fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=2)) ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2)) self.assertAllEqual( fldj, self.evaluate( exp.forward_log_det_jacobian(np.array( [np.diag(x_mat) for x_mat in x]), event_ndims=1))) self.assertAllEqual( ildj, self.evaluate( exp.inverse_log_det_jacobian(np.array( [np.diag(y_mat) for y_mat in y]), event_ndims=1)))
def testDocstringExample(self): exp_gamma_distribution = ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate(tf.shape(exp_gamma_distribution.sample())))
def testScalarCongruency(self): chain = tfb.Chain((tfb.Exp(), tfb.Softplus())) bijector_test_util.assert_scalar_congruency(chain, lower_x=1e-3, upper_x=1.5, rtol=0.05, eval_func=self.evaluate)
def testCompositeTensor(self): exp = tfb.Exp() sp = tfb.Softplus() aff = tfb.Scale(scale=2.) chain = tfb.Chain(bijectors=[exp, sp, aff]) self.assertIsInstance(chain, tf.__internal__.CompositeTensor) # Bijector may be flattened into `Tensor` components and rebuilt. flat = tf.nest.flatten(chain, expand_composites=True) unflat = tf.nest.pack_sequence_as(chain, flat, expand_composites=True) self.assertIsInstance(unflat, tfb.Chain) # Bijector may be input to a `tf.function`-decorated callable. @tf.function def call_forward(bij, x): return bij.forward(x) x = tf.ones([2, 3], dtype=tf.float32) self.assertAllClose(call_forward(unflat, x), chain.forward(x)) # TypeSpec can be encoded/decoded. struct_coder = tf.__internal__.saved_model.StructureCoder() enc = struct_coder.encode_structure(chain._type_spec) dec = struct_coder.decode_proto(enc) self.assertEqual(chain._type_spec, dec)
def test_single_part_str_repr_match_expected(self): bij = tfb.Exp() self.assertContainsInOrder( ['tfp.bijectors.Exp("exp", batch_shape=[], min_event_ndims=0)'], str(bij)) self.assertContainsInOrder([ "<tfp.bijectors.Exp 'exp' batch_shape=[] forward_min_event_ndims=0 " "inverse_min_event_ndims=0 dtype_x=? dtype_y=?>" ], repr(bij)) bij = tfb.Scale([1., 1.], name='myscale') self.assertContainsInOrder([ 'tfp.bijectors.Scale("myscale", batch_shape=[2], min_event_ndims=0, ' 'dtype=float32)' ], str(bij)) self.assertContainsInOrder([ "<tfp.bijectors.Scale 'myscale' batch_shape=[2] " "forward_min_event_ndims=0 inverse_min_event_ndims=0 dtype_x=float32 " "dtype_y=float32>" ], repr(bij)) bij = tfb.Split([3, 4, 2], name='s_p_l_i_t') self.assertContainsInOrder([ 'tfp.bijectors.Split("s_p_l_i_t", batch_shape=[], ' 'forward_min_event_ndims=1, inverse_min_event_ndims=[1, 1, 1])' ], str(bij)) self.assertContainsInOrder([ "<tfp.bijectors.Split 's_p_l_i_t' batch_shape=[] " "forward_min_event_ndims=1 inverse_min_event_ndims=[1, 1, 1] " "dtype_x=? dtype_y=[?, ?, ?]>" ], repr(bij))
def testScalarCongruency(self): bijector = tfb.Exp() bijector_test_util.assert_scalar_congruency(bijector, lower_x=-2., upper_x=1.5, eval_func=self.evaluate, rtol=0.05)
def testRaisesBadBlocks(self): with self.assertRaisesRegexp( ValueError, r'`block_sizes` must be `None`, or a vector of the same length as ' r'`bijectors`. Got a `Tensor` with shape \(2L?,\) and `bijectors` of ' r'length 1'): tfb.Blockwise(bijectors=[tfb.Exp()], block_sizes=[1, 2])
def testRaisesBadBlocksDynamic(self): if tfe.executing_eagerly(): return with self.assertRaises(tf.errors.InvalidArgumentError): block_sizes = tf.placeholder_with_default([1, 2], shape=None) blockwise = tfb.Blockwise(bijectors=[tfb.Exp()], block_sizes=block_sizes, validate_args=True) self.evaluate(blockwise.block_sizes) with self.assertRaises(tf.errors.InvalidArgumentError): block_sizes = tf.placeholder_with_default([[1]], shape=None) blockwise = tfb.Blockwise(bijectors=[tfb.Exp()], block_sizes=block_sizes, validate_args=True) self.evaluate(blockwise.block_sizes)
def testDofChangeError(self): exp = tfb.Exp() smc = tfb.SoftmaxCentered() # Increase in event-size is the last step. No problems here. safe_bij = tfb.Chain([smc, exp], validate_args=True, validate_event_size=True) self.evaluate(safe_bij.forward_log_det_jacobian([1., 2., 3.], 1)) # Increase in event-size before Exp. raise_bij = tfb.Chain([exp, smc], validate_args=True, validate_event_size=True) with self.assertRaisesRegex( (ValueError, tf.errors.InvalidArgumentError), r".+degrees of freedom.+"): self.evaluate(raise_bij.forward_log_det_jacobian([1., 2., 3.], 1)) # When validate_args is False, warns instead of raising. warn_bij = tfb.Chain([exp, smc], validate_args=False, validate_event_size=True) with mock.patch.object(tf, "print", return_value=tf.no_op()) as mock_print: self.evaluate(warn_bij.forward_log_det_jacobian([1., 2., 3.], 1)) print_args, _ = mock_print.call_args self.assertRegex(print_args[0], r"WARNING:.+degrees of freedom") # When validate_event_shape is False, neither warns nor raises. ignore_bij = tfb.Chain([exp, smc], validate_event_size=False) self.evaluate(ignore_bij.forward_log_det_jacobian([1., 2., 3.], 1))
def test_caches(self): if mock is None: return x_ = np.array([[-0.1, 0.2], [0.3, -0.4]], np.float32) y_ = np.exp(x_) b = tfb.Exp() # We will intercept calls to TF to ensure np.array objects don't get # converted to tf.Tensor objects. with mock.patch.object(tf, 'convert_to_tensor', return_value=x_): with mock.patch.object(tf, 'exp', return_value=y_): y = b.forward(x_) self.assertIsInstance(y, np.ndarray) self.assertAllEqual([x_], [k() for k in b._from_x.keys()]) with mock.patch.object(tf, 'convert_to_tensor', return_value=y_): with mock.patch.object(tf.math, 'log', return_value=x_): x = b.inverse(y_) self.assertIsInstance(x, np.ndarray) self.assertIs(x, b.inverse(y)) self.assertAllEqual([y_], [k() for k in b._from_y.keys()]) yt_ = y_.T xt_ = x_.T with mock.patch.object(tf, 'convert_to_tensor', return_value=yt_): with mock.patch.object(tf.math, 'log', return_value=xt_): xt = b.inverse(yt_) self.assertIsNot(x, xt) self.assertIs(xt_, xt)
def __init__(self, loc=None, scale=None, validate_args=False, allow_nan_stats=True, name="LogNormal"): """Construct a log-normal distribution. The LogNormal distribution models positive-valued random variables whose logarithm is normally distributed with mean `loc` and standard deviation `scale`. It is constructed as the exponential transformation of a Normal distribution. Args: loc: Floating-point `Tensor`; the means of the underlying Normal distribution(s). scale: Floating-point `Tensor`; the stddevs of the underlying Normal distribution(s). validate_args: Python `bool`, default `False`. Whether to validate input with asserts. If `validate_args` is `False`, and the inputs are invalid, correct behavior is not guaranteed. allow_nan_stats: Python `bool`, default `True`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. name: The name to give Ops created by the initializer. """ with tf.name_scope(name, values=[loc, scale]) as name: dtype = dtype_util.common_dtype([loc, scale], tf.float32) super(LogNormal, self).__init__(distribution=normal.Normal( loc=tf.convert_to_tensor(loc, name="loc", dtype=dtype), scale=tf.convert_to_tensor(scale, name="scale", dtype=dtype)), bijector=bijectors.Exp(), validate_args=validate_args, name=name)
def test_nested_transform(self): target_dist = tfd.Normal(loc=0., scale=1.) b1 = tfb.Scale(0.5) b2 = tfb.Exp() chain = tfb.Chain([b2, b1 ]) # applies bijectors right to left (b1 then b2). inner_kernel = tfp.mcmc.TransformedTransitionKernel( inner_kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target_dist.log_prob, num_leapfrog_steps=27, step_size=10), bijector=b1) outer_kernel = tfp.mcmc.TransformedTransitionKernel( inner_kernel=inner_kernel, bijector=b2) chain_kernel = tfp.mcmc.TransformedTransitionKernel( inner_kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target_dist.log_prob, num_leapfrog_steps=27, step_size=10), bijector=chain) outer_pkr_one, outer_pkr_two = self.evaluate([ outer_kernel.bootstrap_results(2.), outer_kernel.bootstrap_results(9.), ]) # the outermost kernel only applies the outermost bijector self.assertNear(np.log(2.), outer_pkr_one.transformed_state, err=1e-6) self.assertNear(np.log(9.), outer_pkr_two.transformed_state, err=1e-6) chain_pkr_one, chain_pkr_two = self.evaluate([ chain_kernel.bootstrap_results(2.), chain_kernel.bootstrap_results(9.), ]) # all bijectors are applied to the inner kernel, from innermost to outermost # this behavior is completely analogous to a bijector Chain self.assertNear(chain_pkr_one.transformed_state, outer_pkr_one.inner_results.transformed_state, err=1e-6) self.assertEqual( chain_pkr_one.inner_results.accepted_results, outer_pkr_one.inner_results.inner_results.accepted_results) self.assertNear(chain_pkr_two.transformed_state, outer_pkr_two.inner_results.transformed_state, err=1e-6) self.assertEqual( chain_pkr_two.inner_results.accepted_results, outer_pkr_two.inner_results.inner_results.accepted_results) seed = test_util.test_seed(sampler_type='stateless') outer_results_one, outer_results_two = self.evaluate([ outer_kernel.one_step(2., outer_pkr_one, seed=seed), outer_kernel.one_step(9., outer_pkr_two, seed=seed) ]) chain_results_one, chain_results_two = self.evaluate([ chain_kernel.one_step(2., chain_pkr_one, seed=seed), chain_kernel.one_step(9., chain_pkr_two, seed=seed) ]) self.assertNear(chain_results_one[0], outer_results_one[0], err=1e-6) self.assertNear(chain_results_two[0], outer_results_two[0], err=1e-6)
def testBijectorWithDeepStructure(self): bij = tfb.JointMap({ 'a': tfb.Exp(), 'bc': tfb.JointMap([tfb.Scale(2.), tfb.Shift(3.)]) }) a = np.asarray([[[1, 2], [2, 3]]], dtype=np.float32) # shape=[1, 2, 2] b = np.asarray([[0, 4]], dtype=np.float32) # shape=[1, 2] c = np.asarray([[5, 6]], dtype=np.float32) # shape=[1, 2] inputs = { 'a': a, 'bc': [b, c] } # Could be inputs to forward or inverse. event_ndims = {'a': 1, 'bc': [0, 0]} self.assertStartsWith(bij.name, 'jointmap_of_exp_and_jointmap_of_') self.assertAllCloseNested({ 'a': np.exp(a), 'bc': [b * 2., c + 3] }, self.evaluate(bij.forward(inputs))) self.assertAllCloseNested({ 'a': np.log(a), 'bc': [b / 2., c - 3] }, self.evaluate(bij.inverse(inputs))) fldj = self.evaluate(bij.forward_log_det_jacobian(inputs, event_ndims)) self.assertEqual((1, 2), fldj.shape) self.assertAllClose(np.sum(a, axis=-1) + np.log(2), fldj) ildj = self.evaluate(bij.inverse_log_det_jacobian(inputs, event_ndims)) self.assertEqual((1, 2), ildj.shape) self.assertAllClose(-np.log(a).sum(axis=-1) - np.log(2), ildj)
def testBatchShapeBroadcasts(self): bij = tfb.JointMap({ 'a': tfb.Exp(), 'b': tfb.Scale(10.) }, validate_args=True) self.assertStartsWith(bij.name, 'jointmap_of_exp_and_scale') a = np.asarray([[[1, 2]], [[2, 3]]], dtype=np.float32) # shape=[2, 1, 2] b = np.asarray([[0, 1, 2]], dtype=np.float32) # shape=[1, 3] inputs = {'a': a, 'b': b} # Could be inputs to forward or inverse. self.assertAllClose( a.sum(axis=-1) + np.log(10.), self.evaluate( bij.forward_log_det_jacobian(inputs, { 'a': 1, 'b': 0 }))) self.assertAllClose( a.sum(axis=-1) + 3 * np.log(10.), self.evaluate( bij.forward_log_det_jacobian(inputs, { 'a': 1, 'b': 1 })))
def testNonCompositeTensor(self): # TODO(b/182603117): Move NonComposite* into test_util. class NonCompositeScale(tfb.Bijector): """Bijector that is not a `CompositeTensor`.""" def __init__(self, scale): parameters = dict(locals()) self.scale = scale super(NonCompositeScale, self).__init__( validate_args=True, forward_min_event_ndims=0., parameters=parameters, name='non_composite_scale') def _forward(self, x): return x * self.scale exp = tfb.Exp() scale = NonCompositeScale(scale=tf.constant(3.)) bij = tfb.JointMap(bijectors=[exp, scale]) self.assertNotIsInstance(bij, tf.__internal__.CompositeTensor) self.assertAllCloseNested( bij.forward([1., 1.]), [exp.forward(1.), scale.forward(1.)])
def testScalarCongruency(self): with self.test_session(): chain = tfb.Chain((tfb.Exp(), tfb.Softplus())) assert_scalar_congruency(chain, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def test_composition_str_and_repr_match_expected_dynamic_shape(self): bij = tfb.Chain([ tfb.Exp(), tfb.Shift(self._tensor([1., 2.])), tfb.SoftmaxCentered() ]) self.assertContainsInOrder([ 'tfp.bijectors.Chain(', ('min_event_ndims=1, bijectors=[Exp, Shift, SoftmaxCentered])') ], str(bij)) self.assertContainsInOrder([ '<tfp.bijectors.Chain ', ('batch_shape=? forward_min_event_ndims=1 inverse_min_event_ndims=1 ' 'dtype_x=float32 dtype_y=float32 bijectors=[<tfp.bijectors.Exp'), '>, <tfp.bijectors.Shift', '>, <tfp.bijectors.SoftmaxCentered', '>]>' ], repr(bij)) bij = tfb.Chain([ tfb.JointMap({ 'a': tfb.Exp(), 'b': tfb.ScaleMatvecDiag(self._tensor([2., 2.])) }), tfb.Restructure({ 'a': 0, 'b': 1 }, [0, 1]), tfb.Split(2), tfb.Invert(tfb.SoftmaxCentered()), ]) self.assertContainsInOrder([ 'tfp.bijectors.Chain(', ('forward_min_event_ndims=1, ' 'inverse_min_event_ndims={a: 1, b: 1}, ' 'bijectors=[JointMap({a: Exp, b: ScaleMatvecDiag}), ' 'Restructure, Split, Invert(SoftmaxCentered)])') ], str(bij)) self.assertContainsInOrder([ '<tfp.bijectors.Chain ', ('batch_shape=? forward_min_event_ndims=1 ' "inverse_min_event_ndims={'a': 1, 'b': 1} dtype_x=float32 " "dtype_y={'a': ?, 'b': float32} " "bijectors=[<tfp.bijectors.JointMap "), '>, <tfp.bijectors.Restructure', '>, <tfp.bijectors.Split', '>, <tfp.bijectors.Invert', '>]>' ], repr(bij))
def testJacobian(self): bijector = tfb.Exp() x = np.expand_dims(np.linspace(-1, 1, num=10), -1) fldj = bijector.forward_log_det_jacobian(x, event_ndims=1) fldj_theoretical = bijector_test_util.get_fldj_theoretical( bijector, x, event_ndims=1) fldj_, fldj_theoretical_ = self.evaluate([fldj, fldj_theoretical]) self.assertAllClose(fldj_, fldj_theoretical_)
def test_bijector_valid_adapt_then_transform(self): new_kernel = make_adapt_then_transform_kernel(tfb.Exp()) pkr_one, pkr_two = self.evaluate([ new_kernel.bootstrap_results(2.), new_kernel.bootstrap_results(9.), ]) self.assertNear(np.log(2.), pkr_one.transformed_state, err=1e-6) self.assertNear(np.log(9.), pkr_two.transformed_state, err=1e-6)
def testName(self): exp = tfb.Exp() sp = tfb.Softplus() aff = tfb.Affine(scale_diag=[2., 3., 4.]) blockwise = tfb.Blockwise(bijectors=[exp, sp, aff], block_sizes=[2, 1, 3]) self.assertStartsWith(blockwise.name, 'blockwise_of_exp_and_softplus_and_affine')
def testHandlesKwargs(self): x = tfb.Exp()(tfd.Normal(0, 1), event_shape=[4]) y = tfd.Independent(tfd.LogNormal(tf.zeros(4), 1), 1) z = tf.constant([[1., 2, 3, 4], [0.5, 1.5, 2., 2.5]]) self.assertAllClose( *self.evaluate([y.log_prob(z), x.log_prob(z)]), atol=0, rtol=1e-3)
def testComposeFromNonTransformedDistribution(self): actual_log_normal = tfb.Exp()(tfd.Normal(0.5, 2.)) expected_log_normal = tfd.LogNormal(0.5, 2.) x = tf.constant([0.1, 1., 5.]) self.assertAllClose( *self.evaluate([actual_log_normal.log_prob(x), expected_log_normal.log_prob(x)]), atol=0, rtol=1e-3)
def testLDJRatio(self): q = tfb.JointMap({ 'a': tfb.Exp(), 'b': tfb.Scale(2.), 'c': tfb.Shift(3.) }) p = tfb.JointMap({ 'a': tfb.Exp(), 'b': tfb.Scale(3.), 'c': tfb.Shift(4.) }) a = np.asarray([[[1, 2], [2, 3]]], dtype=np.float32) # shape=[1, 2, 2] b = np.asarray([[0, 4]], dtype=np.float32) # shape=[1, 2] c = np.asarray([[5, 6]], dtype=np.float32) # shape=[1, 2] x = {'a': a, 'b': b, 'c': c} y = {'a': a + 1, 'b': b + 1, 'c': c + 1} event_ndims = {'a': 1, 'b': 0, 'c': 0} fldj_ratio_true = p.forward_log_det_jacobian( x, event_ndims) - q.forward_log_det_jacobian(y, event_ndims) fldj_ratio = ldj_ratio.forward_log_det_jacobian_ratio( p, x, q, y, event_ndims) self.assertAllClose(fldj_ratio_true, fldj_ratio) ildj_ratio_true = p.inverse_log_det_jacobian( x, event_ndims) - q.inverse_log_det_jacobian(y, event_ndims) ildj_ratio = ldj_ratio.inverse_log_det_jacobian_ratio( p, x, q, y, event_ndims) self.assertAllClose(ildj_ratio_true, ildj_ratio) event_ndims = {'a': 1, 'b': 2, 'c': 0} fldj_ratio_true = p.forward_log_det_jacobian( x, event_ndims) - q.forward_log_det_jacobian(y, event_ndims) fldj_ratio = ldj_ratio.forward_log_det_jacobian_ratio( p, x, q, y, event_ndims) self.assertAllClose(fldj_ratio_true, fldj_ratio) ildj_ratio_true = p.inverse_log_det_jacobian( x, event_ndims) - q.inverse_log_det_jacobian(y, event_ndims) ildj_ratio = ldj_ratio.inverse_log_det_jacobian_ratio( p, x, q, y, event_ndims) self.assertAllClose(ildj_ratio_true, ildj_ratio)
def testBijectiveAndFinite(self): bijector = tfb.Exp() x = np.linspace(-10, 10, num=10).astype(np.float32) y = np.logspace(-10, 10, num=10).astype(np.float32) bijector_test_util.assert_bijective_and_finite(bijector, x, y, eval_func=self.evaluate, event_ndims=0)
def testSupportBijectorOutsideRange(self): log_normal = tfd.TransformedDistribution( distribution=tfd.Normal(loc=1., scale=2.), bijector=tfb.Exp(), validate_args=True) x = np.array([-4.2, -1e-6, -1.3]) bijector_inverse_x = ( log_normal._experimental_default_event_space_bijector().inverse(x)) self.assertAllNan(self.evaluate(bijector_inverse_x))
def testNonCompositeTensor(self): exp = tfb.Exp() scale = test_util.NonCompositeTensorScale(scale=tf.constant(3.)) blockwise = tfb.Blockwise(bijectors=[exp, scale]) self.assertNotIsInstance(blockwise, tf.__internal__.CompositeTensor) self.assertAllClose( blockwise.forward([1., 1.]), tf.convert_to_tensor([exp.forward(1.), scale.forward(1.)]))