Ejemplo n.º 1
0
 def testChainIldjWithPlaceholder(self):
     chain = Chain((Exp(), Exp()))
     samples = array_ops.placeholder(dtype=np.float32,
                                     shape=[None, 10],
                                     name="samples")
     ildj = chain.inverse_log_det_jacobian(samples, event_ndims=0)
     self.assertTrue(ildj is not None)
     with self.cached_session():
         ildj.eval({samples: np.zeros([2, 10], np.float32)})
Ejemplo n.º 2
0
 def testScalarCongruency(self):
     with self.cached_session():
         chain = Chain((Exp(), Softplus()))
         assert_scalar_congruency(chain,
                                  lower_x=1e-3,
                                  upper_x=1.5,
                                  rtol=0.05)
Ejemplo n.º 3
0
 def testScalarCongruency(self):
     with self.test_session():
         bijector = Chain((Exp(), Softplus()))
         assert_scalar_congruency(bijector,
                                  lower_x=1e-3,
                                  upper_x=1.5,
                                  rtol=0.05)
Ejemplo n.º 4
0
 def testScalarCongruency(self):
     with self.test_session():
         bijector = Exp()
         assert_scalar_congruency(bijector,
                                  lower_x=-2.,
                                  upper_x=1.5,
                                  rtol=0.05)
Ejemplo n.º 5
0
 def testBijector(self):
     with self.test_session():
         chain = Chain((Exp(event_ndims=1), Softplus(event_ndims=1)))
         self.assertEqual("chain_of_exp_of_softplus", chain.name)
         x = np.asarray([[[1., 2.], [2., 3.]]])
         self.assertAllClose(1. + np.exp(x), chain.forward(x).eval())
         self.assertAllClose(np.log(x - 1.), chain.inverse(x).eval())
         self.assertAllClose(-np.sum(np.log(x - 1.), axis=2),
                             chain.inverse_log_det_jacobian(x).eval())
         self.assertAllClose(np.sum(x, axis=2),
                             chain.forward_log_det_jacobian(x).eval())
Ejemplo n.º 6
0
    def testChainAffineExp(self):
        scale_diag = np.array([1., 2., 3.], dtype=np.float32)
        chain = Chain([Affine(scale_diag=scale_diag), Exp()])
        x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
        y = [1., 4., 9.]
        self.assertAllClose(y, self.evaluate(chain.forward(x)))
        self.assertAllClose(x, self.evaluate(chain.inverse(y)))
        self.assertAllClose(
            np.log(6, dtype=np.float32) + np.sum(x),
            self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))

        self.assertAllClose(
            -np.log(6, dtype=np.float32) - np.sum(x),
            self.evaluate(chain.inverse_log_det_jacobian(y, event_ndims=1)))
Ejemplo n.º 7
0
 def testBijector(self):
   with self.test_session():
     bijector = Exp(event_ndims=1)
     self.assertEqual("exp", bijector.name)
     x = [[[1.], [2.]]]
     y = np.exp(x)
     self.assertAllClose(y, bijector.forward(x).eval())
     self.assertAllClose(x, bijector.inverse(y).eval())
     self.assertAllClose(
         -np.sum(np.log(y), axis=-1),
         bijector.inverse_log_det_jacobian(y).eval())
     self.assertAllClose(-bijector.inverse_log_det_jacobian(np.exp(x)).eval(),
                         bijector.forward_log_det_jacobian(x).eval())
Ejemplo n.º 8
0
 def testBijector(self):
   with self.test_session():
     bijector = Exp(event_ndims=1)
     self.assertEqual("exp", bijector.name)
     x = [[[1.], [2.]]]
     y = np.exp(x)
     self.assertAllClose(y, bijector.forward(x).eval())
     self.assertAllClose(x, bijector.inverse(y).eval())
     self.assertAllClose(
         -np.sum(np.log(y), axis=-1),
         bijector.inverse_log_det_jacobian(y).eval())
     self.assertAllClose(-bijector.inverse_log_det_jacobian(np.exp(x)).eval(),
                         bijector.forward_log_det_jacobian(x).eval())
Ejemplo n.º 9
0
    def testBijector(self):
        with self.test_session():
            exp = Exp()
            inline = Inline(
                forward_fn=math_ops.exp,
                inverse_fn=math_ops.log,
                inverse_log_det_jacobian_fn=lambda y: -math_ops.log(y),
                forward_log_det_jacobian_fn=lambda x: x,
                forward_min_event_ndims=0,
                name="exp")

            self.assertEqual(exp.name, inline.name)
            x = [[[1., 2.], [3., 4.], [5., 6.]]]
            y = np.exp(x)
            self.assertAllClose(y, inline.forward(x).eval())
            self.assertAllClose(x, inline.inverse(y).eval())
            self.assertAllClose(
                -np.sum(np.log(y), axis=-1),
                inline.inverse_log_det_jacobian(y, event_ndims=1).eval())
            self.assertAllClose(
                -inline.inverse_log_det_jacobian(y, event_ndims=1).eval(),
                inline.forward_log_det_jacobian(x, event_ndims=1).eval())
Ejemplo n.º 10
0
    def testBijector(self):
        with self.test_session():
            exp = Exp(event_ndims=1)
            inline = Inline(
                forward_fn=math_ops.exp,
                inverse_fn=math_ops.log,
                inverse_log_det_jacobian_fn=(
                    lambda y: -math_ops.reduce_sum(  # pylint: disable=g-long-lambda
                        math_ops.log(y),
                        reduction_indices=-1)),
                forward_log_det_jacobian_fn=(
                    lambda x: math_ops.reduce_sum(x, reduction_indices=-1)),
                name="exp")

            self.assertEqual(exp.name, inline.name)
            x = [[[1., 2.], [3., 4.], [5., 6.]]]
            y = np.exp(x)
            self.assertAllClose(y, inline.forward(x).eval())
            self.assertAllClose(x, inline.inverse(y).eval())
            self.assertAllClose(-np.sum(np.log(y), axis=-1),
                                inline.inverse_log_det_jacobian(y).eval())
            self.assertAllClose(-inline.inverse_log_det_jacobian(y).eval(),
                                inline.forward_log_det_jacobian(x).eval())
Ejemplo n.º 11
0
    def testMinEventNdimsChain(self):
        chain = Chain([Exp(), Exp(), Exp()])
        self.assertEqual(0, chain.forward_min_event_ndims)
        self.assertEqual(0, chain.inverse_min_event_ndims)

        chain = Chain([Affine(), Affine(), Affine()])
        self.assertEqual(1, chain.forward_min_event_ndims)
        self.assertEqual(1, chain.inverse_min_event_ndims)

        chain = Chain([Exp(), Affine()])
        self.assertEqual(1, chain.forward_min_event_ndims)
        self.assertEqual(1, chain.inverse_min_event_ndims)

        chain = Chain([Affine(), Exp()])
        self.assertEqual(1, chain.forward_min_event_ndims)
        self.assertEqual(1, chain.inverse_min_event_ndims)

        chain = Chain([Affine(), Exp(), Softplus(), Affine()])
        self.assertEqual(1, chain.forward_min_event_ndims)
        self.assertEqual(1, chain.inverse_min_event_ndims)
Ejemplo n.º 12
0
def quadrature_scheme_lognormal_quantiles(loc,
                                          scale,
                                          quadrature_size,
                                          validate_args=False,
                                          name=None):
    """Use LogNormal quantiles to form quadrature on positive-reals.

  Args:
    loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
      the LogNormal prior.
    scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
      the LogNormal prior.
    quadrature_size: Python `int` scalar representing the number of quadrature
      points.
    validate_args: Python `bool`, default `False`. When `True` distribution
      parameters are checked for validity despite possibly degrading runtime
      performance. When `False` invalid inputs may silently render incorrect
      outputs.
    name: Python `str` name prefixed to Ops created by this class.

  Returns:
    grid: (Batch of) length-`quadrature_size` vectors representing the
      `log_rate` parameters of a `Poisson`.
    probs: (Batch of) length-`quadrature_size` vectors representing the
      weight associate with each `grid` value.
  """
    with ops.name_scope(name, "quadrature_scheme_lognormal_quantiles",
                        [loc, scale]):
        # Create a LogNormal distribution.
        dist = transformed_lib.TransformedDistribution(
            distribution=normal_lib.Normal(loc=loc, scale=scale),
            bijector=Exp(),
            validate_args=validate_args)
        batch_ndims = dist.batch_shape.ndims
        if batch_ndims is None:
            batch_ndims = array_ops.shape(dist.batch_shape_tensor())[0]

        def _compute_quantiles():
            """Helper to build quantiles."""
            # Omit {0, 1} since they might lead to Inf/NaN.
            zero = array_ops.zeros([], dtype=dist.dtype)
            edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
            # Expand edges so its broadcast across batch dims.
            edges = array_ops.reshape(
                edges,
                shape=array_ops.concat(
                    [[-1],
                     array_ops.ones([batch_ndims], dtype=dtypes.int32)],
                    axis=0))
            quantiles = dist.quantile(edges)
            # Cyclically permute left by one.
            perm = array_ops.concat([math_ops.range(1, 1 + batch_ndims), [0]],
                                    axis=0)
            quantiles = array_ops.transpose(quantiles, perm)
            return quantiles

        quantiles = _compute_quantiles()

        # Compute grid as quantile midpoints.
        grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
        # Set shape hints.
        grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))

        # By construction probs is constant, i.e., `1 / quadrature_size`. This is
        # important, because non-constant probs leads to non-reparameterizable
        # samples.
        probs = array_ops.fill(dims=[quadrature_size],
                               value=1. /
                               math_ops.cast(quadrature_size, dist.dtype))

        return grid, probs
Ejemplo n.º 13
0
 def testBijectiveAndFinite(self):
   with self.test_session():
     bijector = Exp(event_ndims=0)
     x = np.linspace(-10, 10, num=10).astype(np.float32)
     y = np.logspace(-10, 10, num=10).astype(np.float32)
     assert_bijective_and_finite(bijector, x, y)