Exemple #1
0
 def testJacobian(self):
   bijector = tfb.MatrixInverseTriL()
   batch_size = 5
   for ndims in range(2, 5):
     x_ = np.tril(
         np.random.uniform(
             -1., 1., size=[batch_size, ndims, ndims]).astype(np.float64))
     fldj = bijector.forward_log_det_jacobian(x_, event_ndims=2)
     fldj_theoretical = bijector_test_util.get_fldj_theoretical(
         bijector, x_, event_ndims=2,
         input_to_unconstrained=tfb.Invert(tfb.FillTriangular()),
         output_to_unconstrained=tfb.Invert(tfb.FillTriangular()))
     fldj_, fldj_theoretical_ = self.evaluate([fldj, fldj_theoretical])
     self.assertAllClose(fldj_, fldj_theoretical_)
  def testMinEventNdimsWithPartiallyDependentJointMap(self):

    dependent = tfb.Chain([tfb.Split(2), tfb.Invert(tfb.Split(2))])
    wrap_in_list = tfb.Restructure(input_structure=[0, 1],
                                   output_structure=[[0, 1]])
    dependent_as_chain = tfb.Chain([
        tfb.Invert(wrap_in_list),
        tfb.JointMap([dependent]),
        wrap_in_list])
    self.assertAllEqualNested(dependent.forward_min_event_ndims,
                              dependent_as_chain.forward_min_event_ndims)
    self.assertAllEqualNested(dependent.inverse_min_event_ndims,
                              dependent_as_chain.inverse_min_event_ndims)
    self.assertAllEqualNested(dependent._parts_interact,
                              dependent_as_chain._parts_interact)
  def test_unknown_event_rank(self):
    if tf.executing_eagerly():
      self.skipTest('Eager execution.')
    unknown_rank_dist = tfd.Independent(
        tfd.Normal(loc=tf.ones([2, 1, 3]), scale=2.),
        reinterpreted_batch_ndims=tf1.placeholder_with_default(1, shape=[]))
    td = tfd.TransformedDistribution(
        distribution=unknown_rank_dist,
        bijector=tfb.Scale(1.),
        validate_args=True)
    self.assertEqual(td.batch_shape, tf.TensorShape(None))
    self.assertEqual(td.event_shape, tf.TensorShape(None))
    self.assertAllEqual(td.batch_shape_tensor(), [2, 1])
    self.assertAllEqual(td.event_shape_tensor(), [3])

    joint_td = tfd.TransformedDistribution(
        distribution=tfd.JointDistributionSequentialAutoBatched(
            [unknown_rank_dist, unknown_rank_dist]),
        bijector=tfb.Invert(tfb.Split(2)),
        validate_args=True)
    # Note that the current behavior is conservative; we could also correctly
    # return a batch shape of `[]` in this case.
    self.assertEqual(joint_td.batch_shape, tf.TensorShape(None))
    self.assertEqual(joint_td.event_shape, tf.TensorShape(None))
    self.assertAllEqual(joint_td.batch_shape_tensor(), [])
    self.assertAllEqual(joint_td.event_shape_tensor(), [2, 1, 6])
 def testScalarCongruency(self):
     bijector = tfb.Invert(tfb.Exp())
     bijector_test_util.assert_scalar_congruency(bijector,
                                                 lower_x=1e-3,
                                                 upper_x=1.5,
                                                 eval_func=self.evaluate,
                                                 rtol=0.05)
 def testDocstringExample(self):
   with self.test_session():
     exp_gamma_distribution = (
         transformed_distribution_lib.TransformedDistribution(
             distribution=gamma_lib.Gamma(concentration=1., rate=2.),
             bijector=tfb.Invert(tfb.Exp())))
     self.assertAllEqual([], tf.shape(exp_gamma_distribution.sample()).eval())
    def testWithLKJSamples(self, dimension, concentration):
        bijector = tfb.CorrelationCholesky()
        lkj_dist = lkj.LKJ(dimension=dimension,
                           concentration=np.float64(concentration),
                           input_output_cholesky=True)
        batch_size = 10
        y = self.evaluate(lkj_dist.sample([batch_size]))
        x = self.evaluate(bijector.inverse(y))

        bijector_test_util.assert_bijective_and_finite(bijector,
                                                       x,
                                                       y,
                                                       eval_func=self.evaluate,
                                                       event_ndims=1,
                                                       inverse_event_ndims=2,
                                                       rtol=1e-5)

        fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector,
            x,
            event_ndims=1,
            inverse_event_ndims=2,
            output_to_unconstrained=tfb.Invert(tfb.FillTriangular()))
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
Exemple #7
0
    def testCachedSamplesInvert(self):
        class ExpInverseOnly(tfb.Bijector):
            def __init__(self):
                super(ExpInverseOnly, self).__init__(inverse_min_event_ndims=0)

            def _inverse(self, y):
                return tf.math.log(y)

            def _inverse_log_det_jacobian(self, y):
                return -tf.math.log(y)

        exp_inverse_only = ExpInverseOnly()

        log_forward_only = tfb.Invert(exp_inverse_only)

        # The log bijector isn't defined over the whole real line, so we make
        # sigma sufficiently small so that the draws are positive.
        mu = 2.
        sigma = 1e-2
        exp_normal = self._cls()(distribution=tfd.Normal(loc=mu, scale=sigma),
                                 bijector=log_forward_only,
                                 validate_args=True)

        sample = exp_normal.sample([2, 3],
                                   seed=test_util.test_seed(hardcoded_seed=42))
        sample_val, log_pdf_val = self.evaluate(
            [sample, exp_normal.log_prob(sample)])
        expected_log_pdf = sample_val + stats.norm.logpdf(
            np.exp(sample_val), loc=mu, scale=sigma)
        self.assertAllClose(expected_log_pdf, log_pdf_val, atol=0., rtol=1e-5)
 def testLogitBetaTargetConservation(self):
     logit_beta_dist = tfb.Invert(tfb.Sigmoid())(tfd.Beta(
         concentration0=1., concentration1=2.))
     self.evaluate(
         assert_univariate_target_conservation(self,
                                               logit_beta_dist,
                                               step_size=0.2))
Exemple #9
0
 def testBijector(self):
     with self.cached_session():
         for fwd in [
                 tfb.Identity(),
                 tfb.Exp(),
                 tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
                 tfb.Softplus(),
                 tfb.SoftmaxCentered(),
         ]:
             rev = tfb.Invert(fwd)
             self.assertEqual("_".join(["invert", fwd.name]), rev.name)
             x = [[[1., 2.], [2., 3.]]]
             self.assertAllClose(self.evaluate(fwd.inverse(x)),
                                 self.evaluate(rev.forward(x)))
             self.assertAllClose(self.evaluate(fwd.forward(x)),
                                 self.evaluate(rev.inverse(x)))
             self.assertAllClose(
                 self.evaluate(
                     fwd.forward_log_det_jacobian(x, event_ndims=1)),
                 self.evaluate(
                     rev.inverse_log_det_jacobian(x, event_ndims=1)))
             self.assertAllClose(
                 self.evaluate(
                     fwd.inverse_log_det_jacobian(x, event_ndims=1)),
                 self.evaluate(
                     rev.forward_log_det_jacobian(x, event_ndims=1)))
 def testDocstringExample(self):
   exp_gamma_distribution = (
       tfd.TransformedDistribution(
           distribution=tfd.Gamma(concentration=1., rate=2.),
           bijector=tfb.Invert(tfb.Exp())))
   self.assertAllEqual(
       [], self.evaluate(tf.shape(exp_gamma_distribution.sample())))
Exemple #11
0
 def testScalarCongruency(self):
     with self.test_session():
         bijector = tfb.Invert(tfb.Exp())
         assert_scalar_congruency(bijector,
                                  lower_x=1e-3,
                                  upper_x=1.5,
                                  rtol=0.05)
Exemple #12
0
    def __init__(self,
                 base_kernel,
                 fixed_inputs,
                 diag_shift=None,
                 validate_args=False,
                 name='SchurComplement'):
        """Construct a SchurComplement kernel instance.

    Args:
      base_kernel: A `PositiveSemidefiniteKernel` instance, the kernel used to
        build the block matrices of which this kernel computes the  Schur
        complement.
      fixed_inputs: A Tensor, representing a collection of inputs. The Schur
        complement that this kernel computes comes from a block matrix, whose
        bottom-right corner is derived from `base_kernel.matrix(fixed_inputs,
        fixed_inputs)`, and whose top-right and bottom-left pieces are
        constructed by computing the base_kernel at pairs of input locations
        together with these `fixed_inputs`. `fixed_inputs` is allowed to be an
        empty collection (either `None` or having a zero shape entry), in which
        case the kernel falls back to the trivial application of `base_kernel`
        to inputs. See class-level docstring for more details on the exact
        computation this does; `fixed_inputs` correspond to the `Z` structure
        discussed there. `fixed_inputs` is assumed to have shape `[b1, ..., bB,
        N, f1, ..., fF]` where the `b`'s are batch shape entries, the `f`'s are
        feature_shape entries, and `N` is the number of fixed inputs. Use of
        this kernel entails a 1-time O(N^3) cost of computing the Cholesky
        decomposition of the k(Z, Z) matrix. The batch shape elements of
        `fixed_inputs` must be broadcast compatible with
        `base_kernel.batch_shape`.
      diag_shift: A floating point scalar to be added to the diagonal of the
        divisor_matrix before computing its Cholesky.
      validate_args: If `True`, parameters are checked for validity despite
        possibly degrading runtime performance.
        Default value: `False`
      name: Python `str` name prefixed to Ops created by this class.
        Default value: `"SchurComplement"`
    """
        with tf.compat.v1.name_scope(name, values=[base_kernel,
                                                   fixed_inputs]) as name:
            dtype = dtype_util.common_dtype([base_kernel, fixed_inputs],
                                            tf.float32)
            self._base_kernel = base_kernel
            self._fixed_inputs = (None if fixed_inputs is None else
                                  tf.convert_to_tensor(value=fixed_inputs,
                                                       dtype=dtype))
            if not self._is_empty_fixed_inputs():
                # We create and store this matrix here, so that we get the caching
                # benefit when we later access its cholesky. If we computed the matrix
                # every time we needed the cholesky, the bijector cache wouldn't be hit.
                self._divisor_matrix = base_kernel.matrix(
                    fixed_inputs, fixed_inputs)
                if diag_shift is not None:
                    self._divisor_matrix = _add_diagonal_shift(
                        self._divisor_matrix, diag_shift)

            self._cholesky_bijector = tfb.Invert(tfb.CholeskyOuterProduct())
        super(SchurComplement, self).__init__(base_kernel.feature_ndims,
                                              dtype=dtype,
                                              name=name)
 def testTransformedDist(self):
     d = tfd.Independent(tfd.Normal(tf.zeros([4, 3, 2]), 1), 3)
     dt = tfb.Transpose([1, 0])(d)
     self.assertEqual((4, 3, 2), d.event_shape)
     self.assertEqual((4, 2, 3), dt.event_shape)
     dt = tfb.Invert(tfb.Transpose([1, 0, 2]))(d)
     self.assertEqual((4, 3, 2), d.event_shape)
     self.assertEqual((3, 4, 2), dt.event_shape)
Exemple #14
0
 def testNoReductionWhenEventNdimsIsOmitted(self):
   x = np.array([0.5, 2.]).astype(np.float32)
   bij = tfb.Invert(tfb.Exp())
   self.assertAllClose(
       -np.log(x),
       self.evaluate(bij.forward_log_det_jacobian(x)))
   self.assertAllClose(
       x,
       self.evaluate(bij.inverse_log_det_jacobian(x)))
Exemple #15
0
  def test_transform_parts_to_vector(self, known_split_sizes):
    batch_shape = [4, 2]
    true_split_sizes = [1, 3, 2]

    # Create a joint distribution with parts of the specified sizes.
    seed = test_util.test_seed_stream()
    component_dists = tf.nest.map_structure(
        lambda size: tfd.MultivariateNormalDiag(  # pylint: disable=g-long-lambda
            loc=tf.random.normal(batch_shape + [size], seed=seed()),
            scale_diag=tf.exp(
                tf.random.normal(batch_shape + [size], seed=seed()))),
        true_split_sizes)
    base_dist = tfd.JointDistributionSequential(component_dists)

    # Transform to a vector-valued distribution by concatenating the parts.
    bijector = tfb.Invert(tfb.Split(known_split_sizes, axis=-1))

    with self.assertRaisesRegexp(ValueError, 'Overriding the batch shape'):
      tfd.TransformedDistribution(base_dist, bijector, batch_shape=[3])

    with self.assertRaisesRegexp(ValueError, 'Overriding the event shape'):
      tfd.TransformedDistribution(base_dist, bijector, event_shape=[3])

    concat_dist = tfd.TransformedDistribution(base_dist, bijector)
    self.assertAllEqual(concat_dist.event_shape, [sum(true_split_sizes)])
    self.assertAllEqual(self.evaluate(concat_dist.event_shape_tensor()),
                        [sum(true_split_sizes)])
    self.assertAllEqual(concat_dist.batch_shape, batch_shape)
    self.assertAllEqual(self.evaluate(concat_dist.batch_shape_tensor()),
                        batch_shape)

    # Since the Split bijector has (constant) unit Jacobian, the transformed
    # entropy and mean/mode should match the base entropy and (split) base
    # mean/mode.
    self.assertAllEqual(*self.evaluate(
        (base_dist.entropy(), concat_dist.entropy())))

    self.assertAllEqual(*self.evaluate(
        (concat_dist.mean(), bijector.forward(base_dist.mean()))))
    self.assertAllEqual(*self.evaluate(
        (concat_dist.mode(), bijector.forward(base_dist.mode()))))

    # Since the Split bijector has zero Jacobian, the transformed `log_prob`
    # and `prob` should match the base distribution.
    sample_shape = [3]
    x = base_dist.sample(sample_shape, seed=seed())
    y = bijector.forward(x)
    for attr in ('log_prob', 'prob'):
      base_attr = getattr(base_dist, attr)(x)
      concat_attr = getattr(concat_dist, attr)(y)
      self.assertAllClose(*self.evaluate((base_attr, concat_attr)))

    # Test that `.sample()` works and returns a result of the expected structure
    # and shape.
    y_sampled = concat_dist.sample(sample_shape, seed=seed())
    self.assertAllEqual(y.shape, y_sampled.shape)
Exemple #16
0
 def testJacobian(self):
   cholesky_to_vector = tfb.Chain([
       tfb.Invert(tfb.FillTriangular()),
       tfb.TransformDiagonal(tfb.Invert(tfb.Exp()))
   ])
   bijector = tfb.CholeskyToInvCholesky()
   for x in [np.array([[2.]],
                      dtype=np.float64),
             np.array([[2., 0.], [3., 4.]],
                      dtype=np.float64),
             np.array([[2., 0., 0.], [3., 4., 0.], [5., 6., 7.]],
                      dtype=np.float64)]:
     fldj = bijector.forward_log_det_jacobian(x, event_ndims=2)
     fldj_numerical = self._get_fldj_numerical(
         bijector, x, event_ndims=2, eps=1.e-6,
         input_to_vector=cholesky_to_vector,
         output_to_vector=cholesky_to_vector)
     fldj_, fldj_numerical_ = self.evaluate([fldj, fldj_numerical])
     self.assertAllClose(fldj_, fldj_numerical_)
Exemple #17
0
    def testSharedCaching(self):
        for fwd in [
                tfb.Exp(),
                tfb.Shift(2.),
        ]:
            x = tf.constant([0.5, -1.], dtype=tf.float32)
            inv = tfb.Invert(fwd)
            y = fwd.forward(x)

            self.assertIs(inv.forward(y), x)
            self.assertIs(inv.inverse(x), y)
Exemple #18
0
    def __init__(self,
                 loc,
                 scale,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="Gumbel"):
        """Construct Gumbel distributions with location and scale `loc` and `scale`.

    The parameters `loc` and `scale` must be shaped in a way that supports
    broadcasting (e.g. `loc + scale` is a valid operation).

    Args:
      loc: Floating point tensor, the means of the distribution(s).
      scale: Floating point tensor, the scales of the distribution(s).
        scale must contain only positive values.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
        Default value: `False`.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
        Default value: `True`.
      name: Python `str` name prefixed to Ops created by this class.
        Default value: `'Gumbel'`.

    Raises:
      TypeError: if loc and scale are different dtypes.
    """
        with tf.name_scope(name, values=[loc, scale]) as name:
            dtype = dtype_util.common_dtype([loc, scale],
                                            preferred_dtype=tf.float32)
            loc = tf.convert_to_tensor(loc, name="loc", dtype=dtype)
            scale = tf.convert_to_tensor(scale, name="scale", dtype=dtype)
            with tf.control_dependencies(
                [tf.assert_positive(scale)] if validate_args else []):
                loc = tf.identity(loc, name="loc")
                scale = tf.identity(scale, name="scale")
                tf.assert_same_float_dtype([loc, scale])
                self._gumbel_bijector = bijectors.Gumbel(
                    loc=loc, scale=scale, validate_args=validate_args)

            super(Gumbel, self).__init__(
                distribution=uniform.Uniform(low=tf.zeros([], dtype=loc.dtype),
                                             high=tf.ones([], dtype=loc.dtype),
                                             allow_nan_stats=allow_nan_stats),
                # The Gumbel bijector encodes the quantile
                # function as the forward, and hence needs to
                # be inverted.
                bijector=bijectors.Invert(self._gumbel_bijector),
                batch_shape=distribution_util.get_broadcast_shape(loc, scale),
                name=name)
Exemple #19
0
 def test_slice_single_param_bijector_composition(self):
   sliced = slicing._slice_single_param(
       tfb.JointMap({'a': tfb.Chain([
           tfb.Invert(tfb.Scale(tf.ones([4, 3, 1])))
       ])}),
       param_event_ndims={'a': 1},
       slices=make_slices[..., tf.newaxis, 2:, tf.newaxis],
       batch_shape=tf.constant([7, 4, 3]))
   self.assertAllEqual(
       list(tf.zeros([1, 4, 3])[..., tf.newaxis, 2:, tf.newaxis].shape),
       sliced.experimental_batch_shape_tensor(x_event_ndims={'a': 1}))
 def __init__(self, loc, chol_precision_tril, name=None):
   super(MVNCholPrecisionTriL, self).__init__(
       distribution=tfd.Independent(tfd.Normal(tf.zeros_like(loc),
                                               scale=tf.ones_like(loc)),
                                    reinterpreted_batch_ndims=1),
       bijector=tfb.Chain([
           tfb.Shift(shift=loc),
           tfb.Invert(tfb.ScaleMatvecTriL(scale_tril=chol_precision_tril,
                                          adjoint=True)),
       ]),
       name=name)
Exemple #21
0
    def testNonCompositeTensorBijectorTfFunction(self):
        scale = tf.Variable(5.)
        b = NonCompositeScale(scale)
        inv_b = tfb.Invert(b)
        x = tf.constant([3.])

        @tf.function
        def f(bij, x):
            return bij.forward(x)

        self.evaluate(scale.initializer)
        self.assertAllClose(self.evaluate(f(inv_b, x)), [0.6])
Exemple #22
0
 def testShapeGetters(self):
     bijector = tfb.Invert(tfb.SoftmaxCentered(validate_args=True))
     x = tf.TensorShape([2])
     y = tf.TensorShape([1])
     self.assertAllEqual(y, bijector.forward_event_shape(x))
     self.assertAllEqual(
         y.as_list(),
         self.evaluate(bijector.forward_event_shape_tensor(x.as_list())))
     self.assertAllEqual(x, bijector.inverse_event_shape(y))
     self.assertAllEqual(
         x.as_list(),
         self.evaluate(bijector.inverse_event_shape_tensor(y.as_list())))
Exemple #23
0
    def _validateChainMinEventNdims(self, bijectors, forward_min_event_ndims,
                                    inverse_min_event_ndims):
        chain = tfb.Chain(bijectors)
        self.assertAllEqual(forward_min_event_ndims,
                            chain.forward_min_event_ndims)
        self.assertAllEqual(inverse_min_event_ndims,
                            chain.inverse_min_event_ndims)

        chain_inverse = tfb.Chain([tfb.Invert(b) for b in reversed(bijectors)])
        self.assertAllEqual(forward_min_event_ndims,
                            chain_inverse.inverse_min_event_ndims)
        self.assertAllEqual(inverse_min_event_ndims,
                            chain_inverse.forward_min_event_ndims)
 def test_invert_str_and_repr_match_expected(self):
     bij = tfb.Invert(tfb.Split([3, 4, 2]))
     self.assertContainsInOrder([
         'tfp.bijectors.Invert("invert_split", batch_shape=[], '
         'forward_min_event_ndims=[1, 1, 1], inverse_min_event_ndims=1, '
         'bijector=Split)'
     ], str(bij))
     self.assertContainsInOrder([
         "<tfp.bijectors.Invert 'invert_split' batch_shape=[] "
         "forward_min_event_ndims=[1, 1, 1] inverse_min_event_ndims=1 "
         "dtype_x=[?, ?, ?] dtype_y=? "
         "bijector=<tfp.bijectors.Split 'split' batch_shape=[] "
         "forward_min_event_ndims=1 inverse_min_event_ndims=[1, 1, 1] "
         "dtype_x=? dtype_y=[?, ?, ?]>>"
     ], repr(bij))
 def testBijectorVector(self):
     ordered = tfb.Invert(tfb.Ascending())
     x = np.asarray([[2., 3, 4], [4., 8, 13]])
     y = [[2., 0, 0], [4., np.log(4.), np.log(5.)]]
     self.assertAllClose(y, self.evaluate(ordered.forward(x)))
     self.assertAllClose(x, self.evaluate(ordered.inverse(y)))
     self.assertAllClose(
         np.sum(np.asarray(y)[..., 1:], axis=-1),
         self.evaluate(ordered.inverse_log_det_jacobian(y, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
     self.assertAllClose(
         self.evaluate(-ordered.inverse_log_det_jacobian(y, event_ndims=1)),
         self.evaluate(ordered.forward_log_det_jacobian(x, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
Exemple #26
0
    def testNonCompositeTensorBijectorRetainsVariable(self):
        class BijectorContainer(tf.Module):
            def __init__(self, bijector):
                self.bijector = bijector

        b = NonCompositeScale(tf.Variable(3.))
        inv_b = tfb.Invert(b)
        bc = BijectorContainer(inv_b)

        # If `Invert` subclasses `CompositeTensor` but its inner bijector does not,
        # this test fails because `tf.Module.trainable_variables` calls
        # `nest.flatten(..., expand_composites=True` on the `tf.Module`s attributes.
        # `Invert._type_spec` will treat the inner bijector as a callable (see
        # `AutoCompositeTensor` docs) and not decompose the inner bijector correctly
        # into its `Tensor` components.
        self.assertLen(bc.trainable_variables, 1)
Exemple #27
0
 def testBijectorIsInvertExp(self):
     x = np.linspace(1., 10., num=200)
     log = tfb.Log()
     invert_exp = tfb.Invert(tfb.Exp())
     self.assertAllClose(self.evaluate(log.forward(x)),
                         self.evaluate(invert_exp.forward(x)))
     self.assertAllClose(self.evaluate(log.inverse(x)),
                         self.evaluate(invert_exp.inverse(x)))
     self.assertAllClose(
         self.evaluate(log.forward_log_det_jacobian(x, event_ndims=1)),
         self.evaluate(invert_exp.forward_log_det_jacobian(x,
                                                           event_ndims=1)))
     self.assertAllClose(
         self.evaluate(log.inverse_log_det_jacobian(x, event_ndims=1)),
         self.evaluate(invert_exp.inverse_log_det_jacobian(x,
                                                           event_ndims=1)))
Exemple #28
0
 def testInvertMutuallyConsistent(self):
     dims = 4
     ma = tfb.Invert(
         tfb.MaskedAutoregressiveFlow(validate_args=True,
                                      **self._autoregressive_flow_kwargs))
     dist = tfd.TransformedDistribution(distribution=tfd.Normal(loc=0.,
                                                                scale=1.),
                                        bijector=ma,
                                        event_shape=[dims],
                                        validate_args=True)
     self.run_test_sample_consistent_log_prob(sess_run_fn=self.evaluate,
                                              dist=dist,
                                              num_samples=int(1e5),
                                              radius=1.,
                                              center=0.,
                                              rtol=0.02)
    def test_composition_str_and_repr_match_expected_dynamic_shape(self):
        bij = tfb.Chain([
            tfb.Exp(),
            tfb.Shift(self._tensor([1., 2.])),
            tfb.SoftmaxCentered()
        ])
        self.assertContainsInOrder([
            'tfp.bijectors.Chain(',
            ('min_event_ndims=1, bijectors=[Exp, Shift, SoftmaxCentered])')
        ], str(bij))
        self.assertContainsInOrder([
            '<tfp.bijectors.Chain ',
            ('batch_shape=? forward_min_event_ndims=1 inverse_min_event_ndims=1 '
             'dtype_x=float32 dtype_y=float32 bijectors=[<tfp.bijectors.Exp'),
            '>, <tfp.bijectors.Shift', '>, <tfp.bijectors.SoftmaxCentered',
            '>]>'
        ], repr(bij))

        bij = tfb.Chain([
            tfb.JointMap({
                'a': tfb.Exp(),
                'b': tfb.ScaleMatvecDiag(self._tensor([2., 2.]))
            }),
            tfb.Restructure({
                'a': 0,
                'b': 1
            }, [0, 1]),
            tfb.Split(2),
            tfb.Invert(tfb.SoftmaxCentered()),
        ])
        self.assertContainsInOrder([
            'tfp.bijectors.Chain(',
            ('forward_min_event_ndims=1, '
             'inverse_min_event_ndims={a: 1, b: 1}, '
             'bijectors=[JointMap({a: Exp, b: ScaleMatvecDiag}), '
             'Restructure, Split, Invert(SoftmaxCentered)])')
        ], str(bij))
        self.assertContainsInOrder([
            '<tfp.bijectors.Chain ',
            ('batch_shape=? forward_min_event_ndims=1 '
             "inverse_min_event_ndims={'a': 1, 'b': 1} dtype_x=float32 "
             "dtype_y={'a': ?, 'b': float32} "
             "bijectors=[<tfp.bijectors.JointMap "),
            '>, <tfp.bijectors.Restructure', '>, <tfp.bijectors.Split',
            '>, <tfp.bijectors.Invert', '>]>'
        ], repr(bij))
Exemple #30
0
 def testInvertMutuallyConsistent(self):
   dims = 4
   nvp = tfb.Invert(
       tfb.RealNVP(
           num_masked=3, validate_args=True, **self._real_nvp_kwargs))
   dist = tfd.TransformedDistribution(
       distribution=tfd.Normal(loc=0., scale=1.),
       bijector=nvp,
       event_shape=[dims],
       validate_args=True)
   self.run_test_sample_consistent_log_prob(
       sess_run_fn=self.evaluate,
       dist=dist,
       num_samples=int(1e5),
       radius=1.,
       center=0.,
       rtol=0.02)