示例#1
0
    def testDiagWithVDVTUpdate(self):
        def static_run(fun, x, **kwargs):
            return self.evaluate(fun(x, **kwargs))

        def dynamic_run(fun, x_value, **kwargs):
            x_value = np.array(x_value, dtype=np.float32)
            placeholder = tf.placeholder_with_default(x_value, shape=None)
            return self.evaluate(fun(placeholder, **kwargs))

        for run in (static_run, dynamic_run):
            mu = -1.
            # Corresponds to scale = [[10, 0, 0], [0, 3, 0], [0, 0, 5]]
            bijector = tfb.Affine(shift=mu,
                                  scale_diag=[2., 3, 4],
                                  scale_perturb_diag=[2., 1],
                                  scale_perturb_factor=[[2., 0], [0., 0],
                                                        [0, 1]])
            bijector_ref = tfb.Affine(shift=mu, scale_diag=[10., 3, 5])

            x = [1., 2, 3]  # Vector.
            self.assertAllClose([9., 5, 14], run(bijector.forward, x))
            self.assertAllClose(run(bijector_ref.forward, x),
                                run(bijector.forward, x))
            self.assertAllClose([0.2, 1., 0.8], run(bijector.inverse, x))
            self.assertAllClose(run(bijector_ref.inverse, x),
                                run(bijector.inverse, x))
            self.assertAllClose(
                -np.log(150.),
                run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
            self.assertAllClose(
                run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
                run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
示例#2
0
    def testNoBatchMultivariateDiag(self):
        def static_run(fun, x, **kwargs):
            return self.evaluate(fun(x, **kwargs))

        def dynamic_run(fun, x_value, **kwargs):
            x_value = np.array(x_value, dtype=np.float32)
            placeholder = tf.placeholder_with_default(x_value, shape=None)
            return self.evaluate(fun(placeholder, **kwargs))

        for run in (static_run, dynamic_run):
            mu = [1., -1]
            # Multivariate
            # Corresponds to scale = [[2., 0], [0, 1.]]
            bijector = tfb.Affine(shift=mu, scale_diag=[2., 1])
            x = [1., 1]
            # matmul(sigma, x) + shift
            # = [-1, -1] + [1, -1]
            self.assertAllClose([3., 0], run(bijector.forward, x))
            self.assertAllClose([0., 2], run(bijector.inverse, x))
            self.assertAllClose(
                -np.log(2.),
                run(bijector.inverse_log_det_jacobian, x, event_ndims=1))

            # Reset bijector.
            bijector = tfb.Affine(shift=mu, scale_diag=[2., 1])
            # x is a 2-batch of 2-vectors.
            # The first vector is [1, 1], the second is [-1, -1].
            # Each undergoes matmul(sigma, x) + shift.
            x = [[1., 1], [-1., -1]]
            self.assertAllClose([[3., 0], [-1., -2]], run(bijector.forward, x))
            self.assertAllClose([[0., 2], [-1., 0]], run(bijector.inverse, x))
            self.assertAllClose(
                -np.log(2.),
                run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
示例#3
0
  def testTriLWithVDVTUpdateNoDiagonal(self):
    def static_run(fun, x, **kwargs):
      return self.evaluate(fun(x, **kwargs))

    def dynamic_run(fun, x_value, **kwargs):
      x_value = np.array(x_value, dtype=np.float32)
      placeholder = tf1.placeholder_with_default(x_value, shape=None)
      return self.evaluate(fun(placeholder, **kwargs))

    for run in (static_run, dynamic_run):
      mu = -1.
      # Corresponds to scale = [[6, 0, 0], [1, 3, 0], [2, 3, 5]]
      bijector = tfb.Affine(
          shift=mu,
          scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
          scale_perturb_diag=None,
          scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
      bijector_ref = tfb.Affine(
          shift=mu, scale_tril=[[6., 0, 0], [1, 3, 0], [2, 3, 5]])

      x = [1., 2, 3]  # Vector.
      self.assertAllClose([5., 6, 22], run(bijector.forward, x))
      self.assertAllClose(
          run(bijector_ref.forward, x), run(bijector.forward, x))
      self.assertAllClose([1 / 3., 8 / 9., 4 / 30.], run(bijector.inverse, x))
      self.assertAllClose(
          run(bijector_ref.inverse, x), run(bijector.inverse, x))
      self.assertAllClose(
          -np.log(90.),
          run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
      self.assertAllClose(
          run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
          run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
示例#4
0
    def _testLegalInputs(self, shift=None, scale_params=None, x=None):
        def _powerset(x):
            s = list(x)
            return itertools.chain.from_iterable(
                itertools.combinations(s, r) for r in range(len(s) + 1))

        for args in _powerset(scale_params.items()):
            with self.cached_session():
                args = dict(args)

                scale_args = dict({"x": x}, **args)
                scale = self._makeScale(**scale_args)

                # We haven't specified enough information for the scale.
                if scale is None:
                    with self.assertRaisesRegexp(ValueError,
                                                 ("must be specified.")):
                        bijector = tfb.Affine(shift=shift, **args)
                else:
                    bijector = tfb.Affine(shift=shift, **args)
                    np_x = x
                    # For the case a vector is passed in, we need to make the shape
                    # match the matrix for matmul to work.
                    if x.ndim == scale.ndim - 1:
                        np_x = np.expand_dims(x, axis=-1)

                    forward = np.matmul(scale, np_x) + shift
                    if x.ndim == scale.ndim - 1:
                        forward = np.squeeze(forward, axis=-1)
                    self.assertAllClose(forward,
                                        self.evaluate(bijector.forward(x)))

                    backward = np.linalg.solve(scale, np_x - shift)
                    if x.ndim == scale.ndim - 1:
                        backward = np.squeeze(backward, axis=-1)
                    self.assertAllClose(backward,
                                        self.evaluate(bijector.inverse(x)))

                    scale *= np.ones(shape=x.shape[:-1], dtype=scale.dtype)
                    ildj = -np.log(np.abs(np.linalg.det(scale)))
                    # TODO(jvdillon): We need to make it so the scale_identity_multiplier
                    # case does not deviate in expected shape. Fixing this will get rid of
                    # these special cases.
                    if (ildj.ndim > 0 and
                        (len(scale_args) == 1 or
                         (len(scale_args) == 2 and scale_args.get(
                             "scale_identity_multiplier", None) is not None))):
                        ildj = np.squeeze(ildj[0])
                    elif ildj.ndim < scale.ndim - 2:
                        ildj = np.reshape(ildj, scale.shape[0:-2])
                    self.assertAllClose(
                        ildj,
                        self.evaluate(
                            bijector.inverse_log_det_jacobian(x,
                                                              event_ndims=1)))
示例#5
0
 def __init__(self, loc, chol_precision_tril, name=None):
   super(MVNCholPrecisionTriL, self).__init__(
       distribution=tfd.Independent(tfd.Normal(tf.zeros_like(loc),
                                               scale=tf.ones_like(loc)),
                                    reinterpreted_batch_ndims=1),
       bijector=tfb.Chain([
           tfb.Affine(shift=loc),
           tfb.Invert(tfb.Affine(scale_tril=chol_precision_tril,
                                 adjoint=True)),
       ]),
       name=name)
示例#6
0
 def testBijector(self):
     with self.cached_session():
         for fwd in [
                 tfb.Identity(),
                 tfb.Exp(),
                 tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
                 tfb.Softplus(),
                 tfb.SoftmaxCentered(),
         ]:
             rev = tfb.Invert(fwd)
             self.assertEqual("_".join(["invert", fwd.name]), rev.name)
             x = [[[1., 2.], [2., 3.]]]
             self.assertAllClose(self.evaluate(fwd.inverse(x)),
                                 self.evaluate(rev.forward(x)))
             self.assertAllClose(self.evaluate(fwd.forward(x)),
                                 self.evaluate(rev.inverse(x)))
             self.assertAllClose(
                 self.evaluate(
                     fwd.forward_log_det_jacobian(x, event_ndims=1)),
                 self.evaluate(
                     rev.inverse_log_det_jacobian(x, event_ndims=1)))
             self.assertAllClose(
                 self.evaluate(
                     fwd.inverse_log_det_jacobian(x, event_ndims=1)),
                 self.evaluate(
                     rev.forward_log_det_jacobian(x, event_ndims=1)))
示例#7
0
    def test_transformed_affine(self):
        sample_shape = 3
        mvn = tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1)
        aff = tfb.Affine(scale_tril=[[0.75, 0.], [0.05, 0.5]])

        def expected_lp(y):
            x = aff.inverse(y)  # Ie, tf.random.normal([4, 3, 2])
            fldj = aff.forward_log_det_jacobian(x, event_ndims=1)
            return tf.reduce_sum(mvn.log_prob(x) - fldj, axis=1)

        # Transform a Sample.
        d = tfd.TransformedDistribution(tfd.Sample(mvn,
                                                   sample_shape,
                                                   validate_args=True),
                                        bijector=aff)
        y = d.sample(4, seed=test_util.test_seed())
        actual_lp = d.log_prob(y)
        self.assertAllEqual((4, ) + (sample_shape, ) + (2, ), y.shape)
        self.assertAllEqual((4, ), actual_lp.shape)
        self.assertAllClose(*self.evaluate([expected_lp(y), actual_lp]),
                            atol=0.,
                            rtol=1e-3)

        # Sample a Transform.
        d = tfd.Sample(tfd.TransformedDistribution(mvn, bijector=aff),
                       sample_shape,
                       validate_args=True)
        y = d.sample(4, seed=test_util.test_seed())
        actual_lp = d.log_prob(y)
        self.assertAllEqual((4, ) + (sample_shape, ) + (2, ), y.shape)
        self.assertAllEqual((4, ), actual_lp.shape)
        self.assertAllClose(*self.evaluate([expected_lp(y), actual_lp]),
                            atol=0.,
                            rtol=1e-3)
示例#8
0
    def testMinEventNdimsShapeChangingRemoveDims(self):
        chain = tfb.Chain([ShapeChanging(3, 0)])
        self.assertEqual(3, chain.forward_min_event_ndims)
        self.assertEqual(0, chain.inverse_min_event_ndims)

        chain = tfb.Chain([ShapeChanging(3, 0), tfb.Affine()])
        self.assertEqual(3, chain.forward_min_event_ndims)
        self.assertEqual(0, chain.inverse_min_event_ndims)

        chain = tfb.Chain([tfb.Affine(), ShapeChanging(3, 0)])
        self.assertEqual(4, chain.forward_min_event_ndims)
        self.assertEqual(1, chain.inverse_min_event_ndims)

        chain = tfb.Chain([ShapeChanging(3, 0), ShapeChanging(3, 0)])
        self.assertEqual(6, chain.forward_min_event_ndims)
        self.assertEqual(0, chain.inverse_min_event_ndims)
示例#9
0
    def testTriLWithVDVTUpdateAdjoint(self):
        def static_run(fun, x, **kwargs):
            return self.evaluate(fun(x, **kwargs))

        def dynamic_run(fun, x_value, **kwargs):
            x_value = np.array(x_value, dtype=np.float32)
            placeholder = tf.placeholder_with_default(x_value, shape=None)
            return self.evaluate(fun(placeholder, **kwargs))

        for run in (static_run, dynamic_run):
            mu = -1.
            # Corresponds to scale = [[10, 0, 0], [1, 3, 0], [2, 3, 5]]
            bijector = tfb.Affine(shift=mu,
                                  scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3,
                                                                      4]],
                                  scale_perturb_diag=[2., 1],
                                  scale_perturb_factor=[[2., 0], [0., 0],
                                                        [0, 1]],
                                  adjoint=True,
                                  validate_args=True)
            scale_ref = np.array([[10., 0, 0], [1, 3, 0], [2, 3, 5]],
                                 dtype=np.float32)
            x = np.array([1., 2, 3], dtype=np.float32)
            expected_forward = np.matmul(scale_ref.T, x) + mu
            self.assertAllClose(expected_forward, run(bijector.forward, x))
            expected_inverse = np.linalg.solve(scale_ref.T, x - mu)
            self.assertAllClose(expected_inverse, run(bijector.inverse, x))
            self.assertAllClose(x, run(bijector.inverse, expected_forward))
            expected_fldj = np.log(np.prod(np.diagonal(scale_ref)))
            self.assertAllClose(
                -expected_fldj,
                run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
            self.assertAllClose(
                expected_fldj,
                run(bijector.forward_log_det_jacobian, x, event_ndims=1))
示例#10
0
    def testValuesAreCorrectVectorTransform(self, feature_ndims, dims):
        amplitude = self.dtype(5.)
        length_scale = self.dtype(0.2)
        kernel = tfpk.ExponentiatedQuadratic(amplitude, length_scale,
                                             feature_ndims)
        input_shape = [dims] * feature_ndims

        scale_diag = np.random.uniform(-1, 1, size=(dims, )).astype(self.dtype)
        bij = bijectors.Affine(scale_diag=scale_diag)

        # Scaling the last dimension.
        def vector_transform(x, feature_ndims, param_expansion_ndims):
            del feature_ndims, param_expansion_ndims
            return bij.forward(x)

        vector_transformed_kernel = tfpk.FeatureTransformed(
            kernel, transformation_fn=vector_transform)

        x = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
        y = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
        self.assertAllClose(
            _numpy_exp_quad(amplitude,
                            length_scale,
                            scale_diag * x,
                            scale_diag * y,
                            feature_ndims=feature_ndims),
            self.evaluate(vector_transformed_kernel.apply(x, y)))
示例#11
0
    def testNoBatchMultivariateIdentity(self):
        with self.test_session() as sess:
            placeholder = tf.placeholder(tf.float32, name="x")

            def static_run(fun, x, **kwargs):
                return fun(x, **kwargs).eval()

            def dynamic_run(fun, x_value, **kwargs):
                x_value = np.array(x_value)
                return sess.run(fun(placeholder, **kwargs),
                                feed_dict={placeholder: x_value})

            for run in (static_run, dynamic_run):
                mu = [1., -1]
                # Multivariate
                # Corresponds to scale = [[1., 0], [0, 1.]]
                bijector = tfb.Affine(shift=mu)
                x = [1., 1]
                # matmul(sigma, x) + shift
                # = [-1, -1] + [1, -1]
                self.assertAllClose([2., 0], run(bijector.forward, x))
                self.assertAllClose([0., 2], run(bijector.inverse, x))

                # x is a 2-batch of 2-vectors.
                # The first vector is [1, 1], the second is [-1, -1].
                # Each undergoes matmul(sigma, x) + shift.
                x = [[1., 1], [-1., -1]]
                self.assertAllClose([[2., 0], [0., -2]],
                                    run(bijector.forward, x))
                self.assertAllClose([[0., 2], [-2., 0]],
                                    run(bijector.inverse, x))
                self.assertAllClose(
                    0., run(bijector.inverse_log_det_jacobian,
                            x,
                            event_ndims=1))
示例#12
0
    def testIdentityAndDiagWithTriL(self):
        with self.test_session() as sess:
            placeholder = tf.placeholder(tf.float32, name="x")

            def static_run(fun, x, **kwargs):
                return fun(x, **kwargs).eval()

            def dynamic_run(fun, x_value, **kwargs):
                x_value = np.array(x_value)
                return sess.run(fun(placeholder, **kwargs),
                                feed_dict={placeholder: x_value})

            for run in (static_run, dynamic_run):
                mu = -1.
                # scale = [[3., 0], [2, 4]]
                bijector = tfb.Affine(shift=mu,
                                      scale_identity_multiplier=1.0,
                                      scale_diag=[1., 2.],
                                      scale_tril=[[1., 0], [2., 1]])
                x = [[1., 2]]  # One multivariate sample.
                self.assertAllClose([[2., 9]], run(bijector.forward, x))
                self.assertAllClose([[2 / 3., 5 / 12.]],
                                    run(bijector.inverse, x))
                self.assertAllClose(
                    -np.log(12.),
                    run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
示例#13
0
    def testBatchMultivariateFullDynamic(self):
        with self.test_session() as sess:
            x = tf.placeholder(tf.float32, name="x")
            mu = tf.placeholder(tf.float32, name="mu")
            scale_diag = tf.placeholder(tf.float32, name="scale_diag")

            x_value = np.array([[[1., 1]]], dtype=np.float32)
            mu_value = np.array([[1., -1]], dtype=np.float32)
            scale_diag_value = np.array([[2., 2]], dtype=np.float32)

            feed_dict = {
                x: x_value,
                mu: mu_value,
                scale_diag: scale_diag_value,
            }

            bijector = tfb.Affine(shift=mu, scale_diag=scale_diag)
            self.assertAllClose([[[3., 1]]],
                                sess.run(bijector.forward(x), feed_dict))
            self.assertAllClose([[[0., 1]]],
                                sess.run(bijector.inverse(x), feed_dict))
            self.assertAllClose(
                [-np.log(4)],
                sess.run(bijector.inverse_log_det_jacobian(x, event_ndims=1),
                         feed_dict))
示例#14
0
 def _testScaledIdentityComplexAdjoint(self, is_dynamic):
   shift_ = np.array(-0.5, dtype=np.complex)
   scale_ = np.array(4 + 2j, dtype=np.complex)
   shift = tf1.placeholder_with_default(
       shift_, shape=None if is_dynamic else [])
   scale = tf1.placeholder_with_default(
       scale_, shape=None if is_dynamic else [])
   bijector = tfb.Affine(
       shift=shift,
       scale_identity_multiplier=scale,
       adjoint=True,
       validate_args=True)
   z = np.array([1., 2, 3], dtype=np.complex)
   y = bijector.forward(z)
   x = bijector.inverse(z)
   inv_fwd_z = bijector.inverse(tf.identity(y))
   ildj = bijector.inverse_log_det_jacobian(z, event_ndims=1)
   fldj = bijector.forward_log_det_jacobian(z, event_ndims=1)
   [x_, y_, inv_fwd_z_, ildj_, fldj_] = self.evaluate([
       x, y, inv_fwd_z, ildj, fldj])
   self.assertAllClose(np.conj(scale_) * z + shift_, y_)
   self.assertAllClose((z - shift_) / np.conj(scale_), x_)
   self.assertAllClose(z, inv_fwd_z_)
   self.assertAllClose(z.shape[-1] * np.log(np.abs(scale_)), fldj_)
   self.assertAllClose(-z.shape[-1] * np.log(np.abs(scale_)), ildj_)
示例#15
0
 def testCompareToBijector(self):
     """Demonstrates equivalence between TD, Bijector approach and AR dist."""
     sample_shape = np.int32([4, 5])
     batch_shape = np.int32([])
     event_size = np.int32(2)
     batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
     sample0 = tf.zeros(batch_event_shape)
     affine = tfb.Affine(scale_tril=self._random_scale_tril(event_size))
     ar = tfd.Autoregressive(self._normal_fn(affine),
                             sample0,
                             validate_args=True)
     ar_flow = tfb.MaskedAutoregressiveFlow(
         is_constant_jacobian=True,
         shift_and_log_scale_fn=lambda x: [None, affine.forward(x)],
         validate_args=True)
     td = tfd.TransformedDistribution(distribution=tfd.Normal(loc=0.,
                                                              scale=1.),
                                      bijector=ar_flow,
                                      event_shape=[event_size],
                                      batch_shape=batch_shape,
                                      validate_args=True)
     x_shape = np.concatenate([sample_shape, batch_shape, [event_size]],
                              axis=0)
     x = 2. * self._rng.random_sample(x_shape).astype(np.float32) - 1.
     td_log_prob_, ar_log_prob_ = self.evaluate(
         [td.log_prob(x), ar.log_prob(x)])
     self.assertAllClose(td_log_prob_, ar_log_prob_, atol=0., rtol=1e-6)
示例#16
0
 def testName(self):
     exp = tfb.Exp()
     sp = tfb.Softplus()
     aff = tfb.Affine(scale_diag=[2., 3., 4.])
     blockwise = tfb.Blockwise(bijectors=[exp, sp, aff],
                               block_sizes=[2, 1, 3])
     self.assertStartsWith(blockwise.name,
                           'blockwise_of_exp_and_softplus_and_affine')
示例#17
0
 def testNoBatchMultivariateRaisesWhenSingular(self):
     mu = [1., -1]
     with self.assertRaisesOpError("diagonal part must be non-zero"):
         bijector = tfb.Affine(
             shift=mu,
             # Has zero on the diagonal.
             scale_diag=[0., 1],
             validate_args=True)
         self.evaluate(bijector.forward([1., 1.]))
示例#18
0
 def testNoBatchMultivariateRaisesWhenSingular(self):
   mu = [1., -1]
   with self.assertRaisesRegexp(
       Exception,
       ".*Singular operator:  Diagonal contained zero values.*"):
     bijector = tfb.Affine(
         shift=mu,
         # Has zero on the diagonal.
         scale_diag=[0., 1],
         validate_args=True)
     self.evaluate(bijector.forward([1., 1.]))
示例#19
0
 def testNonScalarBatchNonScalarEvent(self):
   # Can't override event_shape and/or batch_shape for non_scalar batch,
   # non-scalar event.
   with self.assertRaisesRegexp(ValueError, 'Base distribution is not scalar'):
     self._cls()(
         distribution=tfd.MultivariateNormalDiag(
             loc=[[0.]], scale_diag=[[1.]]),
         bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),
         batch_shape=[2],
         event_shape=[3],
         validate_args=True)
示例#20
0
    def testTriLWithVDVTUpdateNoDiagonal(self):
        with self.test_session() as sess:
            placeholder = tf.placeholder(tf.float32, name="x")

            def static_run(fun, x, **kwargs):
                return fun(x, **kwargs).eval()

            def dynamic_run(fun, x_value, **kwargs):
                x_value = np.array(x_value)
                return sess.run(fun(placeholder, **kwargs),
                                feed_dict={placeholder: x_value})

            for run in (static_run, dynamic_run):
                mu = -1.
                # Corresponds to scale = [[6, 0, 0], [1, 3, 0], [2, 3, 5]]
                bijector = tfb.Affine(shift=mu,
                                      scale_tril=[[2., 0, 0], [1, 3, 0],
                                                  [2, 3, 4]],
                                      scale_perturb_diag=None,
                                      scale_perturb_factor=[[2., 0], [0., 0],
                                                            [0, 1]])
                bijector_ref = tfb.Affine(shift=mu,
                                          scale_tril=[[6., 0, 0], [1, 3, 0],
                                                      [2, 3, 5]])

                x = [1., 2, 3]  # Vector.
                self.assertAllClose([5., 6, 22], run(bijector.forward, x))
                self.assertAllClose(run(bijector_ref.forward, x),
                                    run(bijector.forward, x))
                self.assertAllClose([1 / 3., 8 / 9., 4 / 30.],
                                    run(bijector.inverse, x))
                self.assertAllClose(run(bijector_ref.inverse, x),
                                    run(bijector.inverse, x))
                self.assertAllClose(
                    -np.log(90.),
                    run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
                self.assertAllClose(
                    run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
                    run(bijector_ref.inverse_log_det_jacobian,
                        x,
                        event_ndims=1))
示例#21
0
    def testIdentityWithVDVTUpdate(self):
        with self.test_session() as sess:
            placeholder = tf.placeholder(tf.float32, name="x")

            def static_run(fun, x, **kwargs):
                return fun(x, **kwargs).eval()

            def dynamic_run(fun, x_value, **kwargs):
                x_value = np.array(x_value)
                return sess.run(fun(placeholder, **kwargs),
                                feed_dict={placeholder: x_value})

            for run in (static_run, dynamic_run):
                mu = -1.
                # Corresponds to scale = [[10, 0, 0], [0, 2, 0], [0, 0, 3]]
                bijector = tfb.Affine(shift=mu,
                                      scale_identity_multiplier=2.,
                                      scale_perturb_diag=[2., 1],
                                      scale_perturb_factor=[[2., 0], [0., 0],
                                                            [0, 1]])
                bijector_ref = tfb.Affine(shift=mu, scale_diag=[10., 2, 3])

                x = [1., 2, 3]  # Vector.
                self.assertAllClose([9., 3, 8], run(bijector.forward, x))
                self.assertAllClose(run(bijector_ref.forward, x),
                                    run(bijector.forward, x))

                self.assertAllClose([0.2, 1.5, 4 / 3.],
                                    run(bijector.inverse, x))
                self.assertAllClose(run(bijector_ref.inverse, x),
                                    run(bijector.inverse, x))
                self.assertAllClose(
                    -np.log(60.),
                    run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
                self.assertAllClose(
                    run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
                    run(bijector_ref.inverse_log_det_jacobian,
                        x,
                        event_ndims=1))
示例#22
0
    def testChainAffineExp(self):
        scale_diag = np.array([1., 2., 3.], dtype=np.float32)
        chain = tfb.Chain([tfb.Affine(scale_diag=scale_diag), tfb.Exp()])
        x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
        y = [1., 4., 9.]
        self.assertAllClose(y, self.evaluate(chain.forward(x)))
        self.assertAllClose(x, self.evaluate(chain.inverse(y)))
        self.assertAllClose(
            np.log(6, dtype=np.float32) + np.sum(x),
            self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))

        self.assertAllClose(
            -np.log(6, dtype=np.float32) - np.sum(x),
            self.evaluate(chain.inverse_log_det_jacobian(y, event_ndims=1)))
示例#23
0
    def testBatchMultivariateFullDynamic(self):
        x_value = np.array([[[1., 1]]], dtype=np.float32)
        mu_value = np.array([[1., -1]], dtype=np.float32)
        scale_diag_value = np.array([[2., 2]], dtype=np.float32)

        x = tf.placeholder_with_default(x_value, shape=None)
        mu = tf.placeholder_with_default(mu_value, shape=None)
        scale_diag = tf.placeholder_with_default(scale_diag_value, shape=None)

        bijector = tfb.Affine(shift=mu, scale_diag=scale_diag)
        self.assertAllClose([[[3., 1]]], self.evaluate(bijector.forward(x)))
        self.assertAllClose([[[0., 1]]], self.evaluate(bijector.inverse(x)))
        self.assertAllClose(
            [-np.log(4)],
            self.evaluate(bijector.inverse_log_det_jacobian(x, event_ndims=1)))
 def testSampleAndLogProbConsistency(self):
   batch_shape = []
   event_size = 2
   batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
   sample0 = tf.zeros(batch_event_shape)
   affine = tfb.Affine(scale_tril=self._random_scale_tril(event_size))
   ar = tfd.Autoregressive(
       self._normal_fn(affine), sample0, validate_args=True)
   if tf.executing_eagerly():
     return
   self.run_test_sample_consistent_log_prob(
       self.evaluate,
       ar,
       num_samples=int(1e6),
       radius=1.,
       center=0.,
       rtol=0.01,
       seed=tfp_test_util.test_seed())
示例#25
0
    def testBijectiveAndFinite(self):
        exp = tfb.Exp()
        sp = tfb.Softplus()
        aff = tfb.Affine(scale_diag=[2., 3., 4.])
        blockwise = tfb.Blockwise(bijectors=[exp, sp, aff],
                                  block_sizes=[2, 1, 3])

        x = tf.cast([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=tf.float32)
        x = tf1.placeholder_with_default(x, shape=x.shape)
        # Identity to break the caching.
        blockwise_y = tf.identity(blockwise.forward(x))

        bijector_test_util.assert_bijective_and_finite(
            blockwise,
            x=self.evaluate(x),
            y=self.evaluate(blockwise_y),
            eval_func=self.evaluate,
            event_ndims=1)
示例#26
0
  def testNonScalarBatchScalarEvent(self):
    self._testMVN(
        base_distribution_class=tfd.Normal,
        base_distribution_kwargs={
            'loc': [0., 0],
            'scale': [1., 1]
        },
        event_shape=[3],
        not_implemented_message='not implemented when overriding event_shape')

    # Can't override batch_shape for non-scalar batch, scalar event.
    with self.assertRaisesRegexp(ValueError, 'base distribution not scalar'):
      self._cls()(
          distribution=tfd.Normal(loc=[0.], scale=[1.]),
          bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),
          batch_shape=[2],
          event_shape=[3],
          validate_args=True)
示例#27
0
    def testBatchMultivariateIdentity(self):
        def static_run(fun, x, **kwargs):
            return self.evaluate(fun(x, **kwargs))

        def dynamic_run(fun, x_value, **kwargs):
            x_value = np.array(x_value, dtype=np.float32)
            placeholder = tf.placeholder_with_default(x_value, shape=None)
            return self.evaluate(fun(placeholder, **kwargs))

        for run in (static_run, dynamic_run):
            mu = [[1., -1]]
            # Corresponds to 1 2x2 matrix, with twos on the diagonal.
            scale = 2.
            bijector = tfb.Affine(shift=mu, scale_identity_multiplier=scale)
            x = [[[1., 1]]]
            self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
            self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
            self.assertAllClose(
                -np.log(4),
                run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
示例#28
0
    def testScalarBatchNonScalarEvent(self):
        self._testMVN(base_distribution_class=tfd.MultivariateNormalDiag,
                      base_distribution_kwargs={
                          'loc': [0., 0., 0.],
                          'scale_diag': [1., 1, 1]
                      },
                      batch_shape=[2],
                      not_implemented_message='not implemented')

        # Can't override event_shape for scalar batch, non-scalar event.
        with self.assertRaisesWithPredicateMatch(
                Exception, 'Base distribution is not scalar.'):

            self._cls()(distribution=tfd.MultivariateNormalDiag(
                loc=[0.], scale_diag=[1.]),
                        bijector=tfb.Affine(shift=self._shift,
                                            scale_tril=self._tril),
                        batch_shape=[2],
                        event_shape=[3],
                        validate_args=True)
示例#29
0
  def testDiagWithTriL(self):
    def static_run(fun, x, **kwargs):
      return self.evaluate(fun(x, **kwargs))

    def dynamic_run(fun, x_value, **kwargs):
      x_value = np.array(x_value, dtype=np.float32)
      placeholder = tf1.placeholder_with_default(x_value, shape=None)
      return self.evaluate(fun(placeholder, **kwargs))

    for run in (static_run, dynamic_run):
      mu = -1.
      # scale = [[2., 0], [2, 3]]
      bijector = tfb.Affine(
          shift=mu, scale_diag=[1., 2.], scale_tril=[[1., 0], [2., 1]])
      x = [[1., 2]]  # One multivariate sample.
      self.assertAllClose([[1., 7]], run(bijector.forward, x))
      self.assertAllClose([[1., 1 / 3.]], run(bijector.inverse, x))
      self.assertAllClose(
          -np.log(6.),
          run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
示例#30
0
  def testIdentityWithDiagUpdate(self):
    def static_run(fun, x, **kwargs):
      return self.evaluate(fun(x, **kwargs))

    def dynamic_run(fun, x_value, **kwargs):
      x_value = np.array(x_value, dtype=np.float32)
      placeholder = tf1.placeholder_with_default(x_value, shape=None)
      return self.evaluate(fun(placeholder, **kwargs))

    for run in (static_run, dynamic_run):
      mu = -1.
      # Corresponds to scale = 2
      bijector = tfb.Affine(
          shift=mu, scale_identity_multiplier=1., scale_diag=[1., 1., 1.])
      x = [1., 2, 3]  # Three scalar samples (no batches).
      self.assertAllClose([1., 3, 5], run(bijector.forward, x))
      self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
      self.assertAllClose(
          -np.log(2.**3),
          run(bijector.inverse_log_det_jacobian, x, event_ndims=1))