Beispiel #1
0
    def testJacobian(self):
        bijector = tfb.Exp()
        x = tf.constant([22.])
        fldj = bijector.forward_log_det_jacobian(x, event_ndims=0)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector, x, event_ndims=0)
        fldj_, fldj_theoretical_ = self.evaluate([fldj, fldj_theoretical])
        self.assertAllClose(fldj_, fldj_theoretical_)

        x = np.expand_dims(np.linspace(-1, 1, num=10), -1)
        fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector, x, event_ndims=1)
        fldj_, fldj_theoretical_ = self.evaluate([fldj, fldj_theoretical])
        self.assertAllClose(fldj_, fldj_theoretical_)
Beispiel #2
0
  def testTheoreticalFldj(self):
    raw_mat = tf.constant([[1., 2, 3],
                           [4, 5, 6],
                           [0.5, 0., 0.25]])
    nbatch = 5
    batch_mats = raw_mat * tf.range(1., nbatch + 1.)[:, tf.newaxis, tf.newaxis]
    lower_upper, permutation = tf.linalg.lu(tf.cast(batch_mats, tf.float64))

    bijector = tfb.ScaleMatvecLU(
        lower_upper=lower_upper, permutation=permutation, validate_args=True)
    self.assertEqual(tf.float64, bijector.dtype)

    channels = tf.compat.dimension_value(lower_upper.shape[-1])
    x = np.random.uniform(size=[2, 7, nbatch, channels]).astype(np.float64)
    y = self.evaluate(bijector.forward(x))
    bijector_test_util.assert_bijective_and_finite(
        bijector,
        x,
        y,
        eval_func=self.evaluate,
        event_ndims=1,
        inverse_event_ndims=1,
        rtol=1e-5)
    fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
    # The jacobian is not yet broadcast, since it is constant.
    fldj = fldj + tf.zeros(tf.shape(x)[:-1], dtype=x.dtype)
    fldj_theoretical = bijector_test_util.get_fldj_theoretical(
        bijector, x, event_ndims=1)
    self.assertAllClose(
        self.evaluate(fldj_theoretical),
        self.evaluate(fldj),
        atol=1e-5,
        rtol=1e-5)
 def testTheoreticalFldjSimple(self):
   bijector = tfb.RationalQuadraticSpline(
       bin_widths=[1., 1],
       bin_heights=[np.sqrt(.5), 2 - np.sqrt(.5)],
       knot_slopes=1)
   self.assertEqual(tf.float64, bijector.dtype)
   dim = 5
   x = np.linspace(-1.05, 1.05, num=2 * dim, dtype=np.float64).reshape(2, dim)
   y = self.evaluate(bijector.forward(x))
   bijector_test_util.assert_bijective_and_finite(
       bijector,
       x,
       y,
       eval_func=self.evaluate,
       event_ndims=0,
       inverse_event_ndims=0,
       rtol=1e-5)
   fldj = bijector.forward_log_det_jacobian(x, event_ndims=0)
   fldj_theoretical = bijector_test_util.get_fldj_theoretical(
       bijector, x, event_ndims=0)
   self.assertAllClose(
       self.evaluate(fldj_theoretical),
       self.evaluate(fldj),
       atol=1e-5,
       rtol=1e-5)
 def testTheoreticalFldj(self, data):
     # get_fldj_theoretical test rig requires 1-d batches.
     batch_shape = data.draw(tfp_hps.shapes(min_ndims=1, max_ndims=1))
     bijector = data.draw(
         rq_splines(batch_shape=batch_shape, dtype=tf.float64))
     self.assertEqual(tf.float64, bijector.dtype)
     bw, bh, kd = self.evaluate(
         [bijector.bin_widths, bijector.bin_heights, bijector.knot_slopes])
     logging.info('bw: %s\nbh: %s\nkd: %s', bw, bh, kd)
     x_shp = ((bw + bh)[..., :-1] + kd).shape[:-1]
     if x_shp[-1] == 1:  # Possibly broadcast the x dim.
         dim = data.draw(hps.integers(min_value=1, max_value=7))
         x_shp = x_shp[:-1] + (dim, )
     x = np.linspace(-5, 5, np.prod(x_shp),
                     dtype=np.float64).reshape(*x_shp)
     y = self.evaluate(bijector.forward(x))
     bijector_test_util.assert_bijective_and_finite(bijector,
                                                    x,
                                                    y,
                                                    eval_func=self.evaluate,
                                                    event_ndims=0,
                                                    inverse_event_ndims=0,
                                                    rtol=1e-5)
     fldj = bijector.forward_log_det_jacobian(x, event_ndims=0)
     fldj_theoretical = bijector_test_util.get_fldj_theoretical(
         bijector, x, event_ndims=0)
     self.assertAllClose(self.evaluate(fldj_theoretical),
                         self.evaluate(fldj),
                         atol=1e-5,
                         rtol=1e-5)
    def testWithLKJSamples(self, dimension, concentration):
        bijector = tfb.CorrelationCholesky()
        lkj_dist = lkj.LKJ(dimension=dimension,
                           concentration=np.float64(concentration),
                           input_output_cholesky=True)
        batch_size = 10
        y = self.evaluate(lkj_dist.sample([batch_size]))
        x = self.evaluate(bijector.inverse(y))

        bijector_test_util.assert_bijective_and_finite(bijector,
                                                       x,
                                                       y,
                                                       eval_func=self.evaluate,
                                                       event_ndims=1,
                                                       inverse_event_ndims=2,
                                                       rtol=1e-5)

        fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector,
            x,
            event_ndims=1,
            inverse_event_ndims=2,
            output_to_unconstrained=tfb.Invert(tfb.FillTriangular()))
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
    def testTheoreticalFldj(self, data):
        dim = data.draw(hps.integers(min_value=0, max_value=10))
        diag_bijector = data.draw(
            bijector_hps.unconstrained_bijectors(
                max_forward_event_ndims=1,
                must_preserve_event_ndims=True).filter(
                    _preserves_vector_dim(dim)))
        logging.info('Using diagonal bijector %s %s', diag_bijector.name,
                     diag_bijector)

        bijector = tfb.TransformDiagonal(diag_bijector=diag_bijector)
        ensure_nonzero_batch = lambda shape: [d if d > 0 else 1 for d in shape]
        shape = data.draw(
            tfp_hps.shapes().map(ensure_nonzero_batch)) + [dim, dim]
        x = np.random.randn(*shape).astype(np.float64)
        y = self.evaluate(bijector.forward(x))
        bijector_test_util.assert_bijective_and_finite(bijector,
                                                       x,
                                                       y,
                                                       eval_func=self.evaluate,
                                                       event_ndims=2,
                                                       inverse_event_ndims=2,
                                                       rtol=1e-5)
        fldj = bijector.forward_log_det_jacobian(x, event_ndims=2)
        # For constant-jacobian bijectors, the zero fldj may not be broadcast.
        fldj = fldj + tf.zeros(tf.shape(x)[:-2], dtype=x.dtype)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector, x, event_ndims=2, inverse_event_ndims=2)
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
Beispiel #7
0
    def testTheoreticalFldj(self):
        width = 4
        bijector = tfp.experimental.bijectors.build_trainable_highway_flow(
            width,
            activation_fn=tf.nn.softplus,
            gate_first_n=2,
            seed=test_util.test_seed())
        self.evaluate([v.initializer for v in bijector.trainable_variables])

        x = self.evaluate(
            samplers.uniform(
                [width],
                minval=-1.,
                maxval=1.,
                seed=test_util.test_seed(sampler_type='stateless')))
        y = self.evaluate(bijector.forward(x))
        bijector_test_util.assert_bijective_and_finite(bijector,
                                                       x,
                                                       y,
                                                       eval_func=self.evaluate,
                                                       event_ndims=1,
                                                       inverse_event_ndims=1,
                                                       rtol=1e-5)
        fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
        # The jacobian is not yet broadcast, since it is constant.
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector, x, event_ndims=1)
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
    def testTheoreticalFldj(self):
        nbatch = 5
        channels = 10
        x = np.random.uniform(size=[nbatch, channels]).astype(np.float32)

        bijector = tfb.BatchNormalization(training=False)
        bijector.batchnorm.build(x.shape)
        self.evaluate([v.initializer for v in bijector.variables])

        y = self.evaluate(bijector.forward(x))
        bijector_test_util.assert_bijective_and_finite(bijector,
                                                       x,
                                                       y,
                                                       eval_func=self.evaluate,
                                                       event_ndims=1,
                                                       inverse_event_ndims=1,
                                                       rtol=1e-5)
        fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
        # The jacobian is not yet broadcast, since it is constant.
        fldj = fldj + tf.zeros(tf.shape(x)[:-1], dtype=x.dtype)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector, x, event_ndims=1)
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
    def testTheoreticalFldj(self):
        softmax = tfb.SoftmaxCentered()
        x = np.linspace(-15, 15, num=10).reshape(5, 2).astype(np.float64)

        fldj = softmax.forward_log_det_jacobian(x, event_ndims=1)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            softmax, x, event_ndims=1)
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
 def testTheoreticalFldjNormalCDF(self):
     # b/137367959 test failure trigger case (resolved by using
     # experimental_use_pfor=False as fallback instead of primary in
     # bijector_test_util.get_fldj_theoretical)
     bijector = tfb.TransformDiagonal(diag_bijector=tfb.NormalCDF())
     x = np.zeros([0, 0])
     fldj = bijector.forward_log_det_jacobian(x, event_ndims=2)
     fldj_theoretical = bijector_test_util.get_fldj_theoretical(
         bijector, x, event_ndims=2, inverse_event_ndims=2)
     self.assertAllClose(self.evaluate(fldj_theoretical),
                         self.evaluate(fldj),
                         atol=1e-5,
                         rtol=1e-5)
Beispiel #11
0
  def _checkEqualTheoreticalFldj(self, x):
    """Helper for `testJacobian`."""
    event_ndims = int(self.evaluate(tf.rank(x)) - 1)
    self.assertGreaterEqual(event_ndims, 1)

    bijector = tfb.Cumsum(axis=-event_ndims)
    fldj = bijector.forward_log_det_jacobian(
        self._build_tensor(x), event_ndims=event_ndims)
    fldj_theoretical = bijector_test_util.get_fldj_theoretical(
        bijector, x, event_ndims=event_ndims)
    fldj_, fldj_theoretical_ = self.evaluate([fldj, fldj_theoretical])
    self.assertAllEqual(np.zeros_like(fldj_), fldj_)
    self.assertAllClose(np.zeros_like(fldj_theoretical_), fldj_theoretical_)
Beispiel #12
0
 def testJacobian(self):
   bijector = tfb.MatrixInverseTriL()
   batch_size = 5
   for ndims in range(2, 5):
     x_ = np.tril(
         np.random.uniform(
             -1., 1., size=[batch_size, ndims, ndims]).astype(np.float64))
     fldj = bijector.forward_log_det_jacobian(x_, event_ndims=2)
     fldj_theoretical = bijector_test_util.get_fldj_theoretical(
         bijector, x_, event_ndims=2,
         input_to_unconstrained=tfb.Invert(tfb.FillTriangular()),
         output_to_unconstrained=tfb.Invert(tfb.FillTriangular()))
     fldj_, fldj_theoretical_ = self.evaluate([fldj, fldj_theoretical])
     self.assertAllClose(fldj_, fldj_theoretical_)
 def testTheoreticalFldj(self, data):
   if not tf.executing_eagerly():
     msg = ('Testing eager mode only because graph is very slow, '
            'presumably due to costly graph construction.')
     self.skipTest(msg)
   if JAX_MODE:  # TODO(b/160167257): Eliminate this workaround.
     # get_fldj_theoretical uses tfp.math.batch_jacobian and assumes the
     # behavior of the bijector does not vary by position. In this case, it
     # can, so we must vmap the result.
     batch_shape = [1]
   else:
     # get_fldj_theoretical test rig requires 1-d batches.
     batch_shape = data.draw(tfp_hps.shapes(min_ndims=1, max_ndims=1))
   hp.note('batch shape: {}'.format(batch_shape))
   bijector = data.draw(rq_splines(batch_shape=batch_shape, dtype=tf.float64))
   self.assertEqual(tf.float64, bijector.dtype)
   bw, bh, kd = self.evaluate(
       [bijector.bin_widths, bijector.bin_heights, bijector.knot_slopes])
   hp.note('bw: {!r}\nbh: {!r}\nkd: {!r}'.format(bw, bh, kd))
   x_shp = ((bw + bh)[..., :-1] + kd).shape[:-1]
   if x_shp[-1] == 1:  # Possibly broadcast the x dim.
     dim = data.draw(hps.integers(min_value=1, max_value=7))
     x_shp = x_shp[:-1] + (dim,)
   x = np.linspace(-4.9, 4.9, np.prod(x_shp), dtype=np.float64).reshape(*x_shp)
   hp.note('x: {!r}'.format(x))
   y = self.evaluate(bijector.forward(x))
   hp.note('x: {!r}'.format(x))
   bijector_test_util.assert_bijective_and_finite(
       bijector,
       x,
       y,
       eval_func=self.evaluate,
       event_ndims=0,
       inverse_event_ndims=0,
       rtol=1e-5)
   fldj = bijector.forward_log_det_jacobian(x, event_ndims=0)
   fldj_theoretical = bijector_test_util.get_fldj_theoretical(
       bijector, x, event_ndims=0)
   self.assertAllClose(
       self.evaluate(fldj_theoretical),
       self.evaluate(fldj),
       atol=1e-5,
       rtol=1e-5)
Beispiel #14
0
    def testJacobianWithLKJSamples(self, dimension, concentration):
        bijector = tfb.CorrelationCholesky()
        lkj_dist = lkj.LKJ(dimension=dimension,
                           concentration=np.float64(concentration),
                           input_output_cholesky=True)
        batch_size = 10
        y = self.evaluate(
            lkj_dist.sample([batch_size], seed=test_util.test_seed()))
        x = self.evaluate(bijector.inverse(y))

        fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            bijector,
            x,
            event_ndims=1,
            inverse_event_ndims=2,
            output_to_unconstrained=OutputToUnconstrained())
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
Beispiel #15
0
 def testTheoreticalFldj(self):
     bijector = tfb.CorrelationCholesky()
     x = np.linspace(-50, 50, num=30).reshape(5, 6).astype(np.float64)
     y = self.evaluate(bijector.forward(x))
     bijector_test_util.assert_bijective_and_finite(bijector,
                                                    x,
                                                    y,
                                                    eval_func=self.evaluate,
                                                    event_ndims=1,
                                                    inverse_event_ndims=2,
                                                    rtol=1e-5)
     fldj = bijector.forward_log_det_jacobian(x, event_ndims=1)
     fldj_theoretical = bijector_test_util.get_fldj_theoretical(
         bijector,
         x,
         event_ndims=1,
         inverse_event_ndims=2,
         output_to_unconstrained=OutputToUnconstrained())
     self.assertAllClose(self.evaluate(fldj_theoretical),
                         self.evaluate(fldj),
                         atol=1e-5,
                         rtol=1e-5)