Пример #1
0
 def testShapeGetters(self):
     chain = tfb.Chain([
         tfb.SoftmaxCentered(validate_args=True),
         tfb.SoftmaxCentered(validate_args=True),
     ])
     x = tf.TensorShape([1])
     y = tf.TensorShape([2 + 1])
     self.assertAllEqual(y, chain.forward_event_shape(x))
     self.assertAllEqual(
         y.as_list(),
         self.evaluate(chain.forward_event_shape_tensor(x.as_list())))
     self.assertAllEqual(x, chain.inverse_event_shape(y))
     self.assertAllEqual(
         x.as_list(),
         self.evaluate(chain.inverse_event_shape_tensor(y.as_list())))
Пример #2
0
 def testBijectorUnknownShape(self):
     with self.test_session():
         softmax = tfb.SoftmaxCentered()
         self.assertEqual("softmax_centered", softmax.name)
         x = tf.placeholder(shape=[2, None], dtype=tf.float32)
         real_x = np.log([[2., 3, 4], [4., 8, 12]])
         y = tf.placeholder(shape=[2, None], dtype=tf.float32)
         real_y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]
         self.assertAllClose(real_y,
                             softmax.forward(x).eval(feed_dict={x: real_x}))
         self.assertAllClose(real_x,
                             softmax.inverse(y).eval(feed_dict={y: real_y}))
         self.assertAllClose(-np.sum(np.log(real_y), axis=1),
                             softmax.inverse_log_det_jacobian(
                                 y,
                                 event_ndims=1).eval(feed_dict={y: real_y}),
                             atol=0.,
                             rtol=1e-7)
         self.assertAllClose(-softmax.inverse_log_det_jacobian(
             y, event_ndims=1).eval(feed_dict={y: real_y}),
                             softmax.forward_log_det_jacobian(
                                 x,
                                 event_ndims=1).eval(feed_dict={x: real_x}),
                             atol=0.,
                             rtol=1e-7)
Пример #3
0
 def testBijector(self):
     with self.cached_session():
         for fwd in [
                 tfb.Identity(),
                 tfb.Exp(),
                 tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
                 tfb.Softplus(),
                 tfb.SoftmaxCentered(),
         ]:
             rev = tfb.Invert(fwd)
             self.assertEqual("_".join(["invert", fwd.name]), rev.name)
             x = [[[1., 2.], [2., 3.]]]
             self.assertAllClose(self.evaluate(fwd.inverse(x)),
                                 self.evaluate(rev.forward(x)))
             self.assertAllClose(self.evaluate(fwd.forward(x)),
                                 self.evaluate(rev.inverse(x)))
             self.assertAllClose(
                 self.evaluate(
                     fwd.forward_log_det_jacobian(x, event_ndims=1)),
                 self.evaluate(
                     rev.inverse_log_det_jacobian(x, event_ndims=1)))
             self.assertAllClose(
                 self.evaluate(
                     fwd.inverse_log_det_jacobian(x, event_ndims=1)),
                 self.evaluate(
                     rev.forward_log_det_jacobian(x, event_ndims=1)))
Пример #4
0
 def testShapeChangingBijector(self):
     softmax = tfb.SoftmaxCentered()
     standard_normal = tfd.Normal(loc=0., scale=1.)
     multi_logit_normal = self._cls()(distribution=standard_normal,
                                      bijector=softmax,
                                      event_shape=[1],
                                      validate_args=True)
     x = [[[-np.log(3.)], [0.]], [[np.log(3)], [np.log(5)]]]
     x = np.float32(x)
     y = self.evaluate(softmax.forward(x))
     expected_log_pdf = -0.5 * np.log(2) + (
         np.squeeze(stats.norm(loc=0., scale=1.).logpdf(x)) -
         np.sum(np.log(y), axis=-1))
     self.assertAllClose(expected_log_pdf,
                         self.evaluate(multi_logit_normal.log_prob(y)))
     self.assertAllClose(
         [1, 2, 3, 2],
         self.evaluate(
             tf.shape(
                 multi_logit_normal.sample([1, 2, 3],
                                           seed=test_util.test_seed()))))
     self.assertAllEqual([2], multi_logit_normal.event_shape)
     self.assertAllEqual([2],
                         self.evaluate(
                             multi_logit_normal.event_shape_tensor()))
Пример #5
0
  def testDofChangeError(self):
    exp = tfb.Exp()
    smc = tfb.SoftmaxCentered()

    # Increase in event-size is the last step. No problems here.
    safe_bij = tfb.Chain([smc, exp],
                         validate_args=True,
                         validate_event_size=True)
    self.evaluate(safe_bij.forward_log_det_jacobian([1., 2., 3.], 1))

    # Increase in event-size before Exp.
    raise_bij = tfb.Chain([exp, smc],
                          validate_args=True,
                          validate_event_size=True)
    with self.assertRaisesRegex((ValueError, tf.errors.InvalidArgumentError),
                                r".+degrees of freedom.+"):
      self.evaluate(raise_bij.forward_log_det_jacobian([1., 2., 3.], 1))

    # When validate_args is False, warns instead of raising.
    warn_bij = tfb.Chain([exp, smc],
                         validate_args=False,
                         validate_event_size=True)
    with mock.patch.object(tf, "print", return_value=tf.no_op()) as mock_print:
      self.evaluate(warn_bij.forward_log_det_jacobian([1., 2., 3.], 1))
      print_args, _ = mock_print.call_args
      self.assertRegex(print_args[0], r"WARNING:.+degrees of freedom")

    # When validate_event_shape is False, neither warns nor raises.
    ignore_bij = tfb.Chain([exp, smc], validate_event_size=False)
    self.evaluate(ignore_bij.forward_log_det_jacobian([1., 2., 3.], 1))
Пример #6
0
 def testShapeGettersWithDynamicShape(self):
   x = tf.compat.v1.placeholder_with_default(input=[2, 4], shape=None)
   y = tf.compat.v1.placeholder_with_default(input=[2, 5], shape=None)
   bijector = tfb.SoftmaxCentered(validate_args=True)
   self.assertAllEqual(
       [2, 5], self.evaluate(bijector.forward_event_shape_tensor(x)))
   self.assertAllEqual(
       [2, 4], self.evaluate(bijector.inverse_event_shape_tensor(y)))
Пример #7
0
    def test_composition_str_and_repr_match_expected_dynamic_shape(self):
        bij = tfb.Chain([
            tfb.Exp(),
            tfb.Shift(self._tensor([1., 2.])),
            tfb.SoftmaxCentered()
        ])
        self.assertContainsInOrder([
            'tfp.bijectors.Chain(',
            ('min_event_ndims=1, bijectors=[Exp, Shift, SoftmaxCentered])')
        ], str(bij))
        self.assertContainsInOrder([
            '<tfp.bijectors.Chain ',
            ('batch_shape=? forward_min_event_ndims=1 inverse_min_event_ndims=1 '
             'dtype_x=float32 dtype_y=float32 bijectors=[<tfp.bijectors.Exp'),
            '>, <tfp.bijectors.Shift', '>, <tfp.bijectors.SoftmaxCentered',
            '>]>'
        ], repr(bij))

        bij = tfb.Chain([
            tfb.JointMap({
                'a': tfb.Exp(),
                'b': tfb.ScaleMatvecDiag(self._tensor([2., 2.]))
            }),
            tfb.Restructure({
                'a': 0,
                'b': 1
            }, [0, 1]),
            tfb.Split(2),
            tfb.Invert(tfb.SoftmaxCentered()),
        ])
        self.assertContainsInOrder([
            'tfp.bijectors.Chain(',
            ('forward_min_event_ndims=1, '
             'inverse_min_event_ndims={a: 1, b: 1}, '
             'bijectors=[JointMap({a: Exp, b: ScaleMatvecDiag}), '
             'Restructure, Split, Invert(SoftmaxCentered)])')
        ], str(bij))
        self.assertContainsInOrder([
            '<tfp.bijectors.Chain ',
            ('batch_shape=? forward_min_event_ndims=1 '
             "inverse_min_event_ndims={'a': 1, 'b': 1} dtype_x=float32 "
             "dtype_y={'a': ?, 'b': float32} "
             "bijectors=[<tfp.bijectors.JointMap "),
            '>, <tfp.bijectors.Restructure', '>, <tfp.bijectors.Split',
            '>, <tfp.bijectors.Invert', '>]>'
        ], repr(bij))
    def testTheoreticalFldj(self):
        softmax = tfb.SoftmaxCentered()
        x = np.linspace(-15, 15, num=10).reshape(5, 2).astype(np.float64)

        fldj = softmax.forward_log_det_jacobian(x, event_ndims=1)
        fldj_theoretical = bijector_test_util.get_fldj_theoretical(
            softmax, x, event_ndims=1)
        self.assertAllClose(self.evaluate(fldj_theoretical),
                            self.evaluate(fldj),
                            atol=1e-5,
                            rtol=1e-5)
    def testAssertsValidArgToInverse(self):
        softmax = tfb.SoftmaxCentered(validate_args=True)
        with self.assertRaisesOpError('must sum to `1`'):
            self.evaluate(softmax.inverse([0.03, 0.7, 0.4]))

        with self.assertRaisesOpError(
                'must be less than or equal to `1`|must sum to `1`'):
            self.evaluate(softmax.inverse([0.06, 0.4, 1.02]))

        with self.assertRaisesOpError('must be non-negative'):
            self.evaluate(softmax.inverse([0.4, 0.5, 0.3, -0.2]))
Пример #10
0
 def testShapeGetters(self):
     x = tf.TensorShape([4])
     y = tf.TensorShape([5])
     bijector = tfb.SoftmaxCentered(validate_args=True)
     self.assertAllEqual(y, bijector.forward_event_shape(x))
     self.assertAllEqual(
         y.as_list(),
         self.evaluate(bijector.forward_event_shape_tensor(x.as_list())))
     self.assertAllEqual(x, bijector.inverse_event_shape(y))
     self.assertAllEqual(
         x.as_list(),
         self.evaluate(bijector.inverse_event_shape_tensor(y.as_list())))
Пример #11
0
 def testBijectiveAndFinite(self):
     with self.test_session():
         softmax = tfb.SoftmaxCentered()
         x = np.linspace(-50, 50, num=10).reshape(5, 2).astype(np.float32)
         # Make y values on the simplex with a wide range.
         y_0 = np.ones(5).astype(np.float32)
         y_1 = (1e-5 * rng.rand(5)).astype(np.float32)
         y_2 = (1e1 * rng.rand(5)).astype(np.float32)
         y = np.array([y_0, y_1, y_2])
         y /= y.sum(axis=0)
         y = y.T  # y.shape = [5, 3]
         assert_bijective_and_finite(softmax, x, y, event_ndims=1)
Пример #12
0
 def testShapeGetters(self):
   with self.test_session():
     bijector = tfb.Invert(
         tfb.SoftmaxCentered(validate_args=True))
     x = tf.TensorShape([2])
     y = tf.TensorShape([1])
     self.assertAllEqual(y, bijector.forward_event_shape(x))
     self.assertAllEqual(
         y.as_list(),
         bijector.forward_event_shape_tensor(x.as_list()).eval())
     self.assertAllEqual(x, bijector.inverse_event_shape(y))
     self.assertAllEqual(
         x.as_list(),
         bijector.inverse_event_shape_tensor(y.as_list()).eval())
Пример #13
0
 def testShapeGetersWithBatchShape(self):
   x = tf.TensorShape([2, 4])
   y = tf.TensorShape([2, 5])
   bijector = tfb.SoftmaxCentered(validate_args=True)
   self.assertAllEqual(y, bijector.forward_event_shape(x))
   self.assertAllEqual(
       tensorshape_util.as_list(y),
       self.evaluate(
           bijector.forward_event_shape_tensor(tensorshape_util.as_list(x))))
   self.assertAllEqual(x, bijector.inverse_event_shape(y))
   self.assertAllEqual(
       tensorshape_util.as_list(x),
       self.evaluate(
           bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y))))
Пример #14
0
 def testShapeGetters(self):
   bijector = tfb.Invert(
       tfb.SoftmaxCentered(validate_args=True))
   x = tf.TensorShape([2])
   y = tf.TensorShape([1])
   self.assertAllEqual(y, bijector.forward_event_shape(x))
   self.assertAllEqual(
       tensorshape_util.as_list(y),
       self.evaluate(
           bijector.forward_event_shape_tensor(tensorshape_util.as_list(x))))
   self.assertAllEqual(x, bijector.inverse_event_shape(y))
   self.assertAllEqual(
       tensorshape_util.as_list(x),
       self.evaluate(
           bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y))))
Пример #15
0
 def _compute_quantiles():
     """Helper to build quantiles."""
     # Omit {0, 1} since they might lead to Inf/NaN.
     zero = tf.zeros([], dtype=dist.dtype)
     edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
     # Expand edges so its broadcast across batch dims.
     edges = tf.reshape(
         edges,
         shape=tf.concat(
             [[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
     quantiles = dist.quantile(edges)
     quantiles = bijectors.SoftmaxCentered().forward(quantiles)
     # Cyclically permute left by one.
     perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
     quantiles = tf.transpose(quantiles, perm)
     quantiles.set_shape(_get_final_shape(quadrature_size + 1))
     return quantiles
 def testBijectorVector(self):
     softmax = tfb.SoftmaxCentered()
     self.assertStartsWith(softmax.name, "softmax_centered")
     x = np.log([[2., 3, 4], [4., 8, 12]])
     y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]
     self.assertAllClose(y, self.evaluate(softmax.forward(x)))
     self.assertAllClose(x, self.evaluate(softmax.inverse(y)))
     self.assertAllClose(
         -np.sum(np.log(y), axis=1) - 0.5 * np.log(4.)[np.newaxis, ...],
         self.evaluate(softmax.inverse_log_det_jacobian(y, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
     self.assertAllClose(
         self.evaluate(-softmax.inverse_log_det_jacobian(y, event_ndims=1)),
         self.evaluate(softmax.forward_log_det_jacobian(x, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
 def testBijectorVector(self):
     with self.test_session():
         softmax = tfb.SoftmaxCentered()
         self.assertEqual("softmax_centered", softmax.name)
         x = np.log([[2., 3, 4], [4., 8, 12]])
         y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]
         self.assertAllClose(y, softmax.forward(x).eval())
         self.assertAllClose(x, softmax.inverse(y).eval())
         self.assertAllClose(-np.sum(np.log(y), axis=1),
                             softmax.inverse_log_det_jacobian(
                                 y, event_ndims=1).eval(),
                             atol=0.,
                             rtol=1e-7)
         self.assertAllClose(
             -softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(),
             softmax.forward_log_det_jacobian(x, event_ndims=1).eval(),
             atol=0.,
             rtol=1e-7)
 def testBijectorUnknownShape(self):
     softmax = tfb.SoftmaxCentered()
     self.assertStartsWith(softmax.name, "softmax_centered")
     x_ = np.log([[2., 3, 4], [4., 8, 12]]).astype(np.float32)
     y_ = np.array([[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]],
                   dtype=np.float32)
     x = tf1.placeholder_with_default(x_, shape=[2, None])
     y = tf1.placeholder_with_default(y_, shape=[2, None])
     self.assertAllClose(y_, self.evaluate(softmax.forward(x)))
     self.assertAllClose(x_, self.evaluate(softmax.inverse(y)))
     self.assertAllClose(
         -np.sum(np.log(y_), axis=1) - 0.5 * np.log(4.)[np.newaxis, ...],
         self.evaluate(softmax.inverse_log_det_jacobian(y, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
     self.assertAllClose(
         -self.evaluate(softmax.inverse_log_det_jacobian(y, event_ndims=1)),
         self.evaluate(softmax.forward_log_det_jacobian(x, event_ndims=1)),
         atol=0.,
         rtol=1e-7)
Пример #19
0
 def testBijector(self):
     for fwd in [
             tfb.Identity(),
             tfb.Exp(),
             tfb.ScaleMatvecDiag(scale_diag=[2., 3.]),
             tfb.Softplus(),
             tfb.SoftmaxCentered(),
     ]:
         rev = tfb.Invert(fwd)
         self.assertStartsWith(rev.name, '_'.join(['invert', fwd.name]))
         x = [[[1., 2.], [2., 3.]]]
         self.assertAllClose(self.evaluate(fwd.inverse(x)),
                             self.evaluate(rev.forward(x)))
         self.assertAllClose(self.evaluate(fwd.forward(x)),
                             self.evaluate(rev.inverse(x)))
         self.assertAllClose(
             self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)),
             self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1)))
         self.assertAllClose(
             self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)),
             self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1)))
Пример #20
0
    def testSizeChangingExplicitBlocks(self, dynamic_shape, batch_shape):
        block_sizes = tf.convert_to_tensor(value=[2, 1, 3])
        if dynamic_shape:
            block_sizes = tf1.placeholder_with_default(block_sizes,
                                                       shape=block_sizes.shape)
        exp = tfb.Exp()
        sc = tfb.SoftmaxCentered()
        aff = tfb.Affine(scale_diag=[2., 3., 4.])
        blockwise = tfb.Blockwise(bijectors=[exp, sc, aff],
                                  block_sizes=block_sizes,
                                  maybe_changes_size=True)

        x = tf.cast([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=tf.float32)
        for s in batch_shape:
            x = tf.expand_dims(x, 0)
            x = tf.tile(x, [s] + [1] * (tensorshape_util.rank(x.shape) - 1))
        x = tf1.placeholder_with_default(
            x, shape=None if dynamic_shape else x.shape)

        # Identity to break the caching.
        blockwise_y = tf.identity(blockwise.forward(x))
        blockwise_fldj = blockwise.forward_log_det_jacobian(x, event_ndims=1)
        blockwise_y_shape_tensor = blockwise.forward_event_shape_tensor(
            tf.shape(x))
        blockwise_y_shape = blockwise.forward_event_shape(x.shape)

        blockwise_x = blockwise.inverse(blockwise_y)
        blockwise_x_shape_tensor = blockwise.inverse_event_shape_tensor(
            tf.shape(blockwise_y))
        blockwise_x_shape = blockwise.inverse_event_shape(blockwise_y.shape)
        blockwise_ildj = blockwise.inverse_log_det_jacobian(blockwise_y,
                                                            event_ndims=1)

        if not dynamic_shape:
            self.assertEqual(blockwise_y.shape, batch_shape + [7])
            self.assertEqual(blockwise_y_shape, batch_shape + [7])
            self.assertEqual(blockwise_fldj.shape, batch_shape + [])
            self.assertEqual(blockwise_x.shape, batch_shape + [6])
            self.assertEqual(blockwise_x_shape, batch_shape + [6])
            self.assertEqual(blockwise_ildj.shape, batch_shape + [])
        self.assertAllEqual(self.evaluate(tf.shape(blockwise_y)),
                            batch_shape + [7])
        self.assertAllEqual(self.evaluate(blockwise_y_shape_tensor),
                            batch_shape + [7])
        self.assertAllEqual(self.evaluate(tf.shape(blockwise_fldj)),
                            batch_shape + [])
        self.assertAllEqual(self.evaluate(tf.shape(blockwise_x)),
                            batch_shape + [6])
        self.assertAllEqual(self.evaluate(blockwise_x_shape_tensor),
                            batch_shape + [6])
        self.assertAllEqual(self.evaluate(tf.shape(blockwise_ildj)),
                            batch_shape + [])

        expl_y = tf.concat([
            exp.forward(x[..., :2]),
            sc.forward(x[..., 2:3]),
            aff.forward(x[..., 3:]),
        ],
                           axis=-1)
        expl_fldj = sum([
            exp.forward_log_det_jacobian(x[..., :2], event_ndims=1),
            sc.forward_log_det_jacobian(x[..., 2:3], event_ndims=1),
            aff.forward_log_det_jacobian(x[..., 3:], event_ndims=1)
        ])
        expl_x = tf.concat([
            exp.inverse(expl_y[..., :2]),
            sc.inverse(expl_y[..., 2:4]),
            aff.inverse(expl_y[..., 4:])
        ],
                           axis=-1)
        expl_ildj = sum([
            exp.inverse_log_det_jacobian(expl_y[..., :2], event_ndims=1),
            sc.inverse_log_det_jacobian(expl_y[..., 2:4], event_ndims=1),
            aff.inverse_log_det_jacobian(expl_y[..., 4:], event_ndims=1)
        ])

        self.assertAllClose(self.evaluate(expl_y), self.evaluate(blockwise_y))
        self.assertAllClose(self.evaluate(expl_fldj),
                            self.evaluate(blockwise_fldj))
        self.assertAllClose(self.evaluate(expl_x), self.evaluate(blockwise_x))
        self.assertAllClose(self.evaluate(expl_ildj),
                            self.evaluate(blockwise_ildj))