Example #1
0
    def testDistributionShapeGetShapeDynamic(self):
        # Works for static ndims despite unknown static shape.
        shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
        y = tf.placeholder_with_default(self._random_sample((3, 4, 2),
                                                            dtype=np.float32),
                                        shape=None)
        self._assertNdArrayEqual(([3], [4], [2]),
                                 self.evaluate(shaper.get_shape(y)))

        shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
        y = tf.placeholder_with_default(np.ones((3, 2), dtype=np.int32),
                                        shape=(None, None))
        self._assertNdArrayEqual(([3], _empty_shape, [2]),
                                 self.evaluate(shaper.get_shape(y)))

        # Works for deferred {batch,event}_ndims.
        batch_ndims = tf.placeholder_with_default(1, shape=None)
        event_ndims = tf.placeholder_with_default(1, shape=None)
        shaper = _DistributionShape(batch_ndims=batch_ndims,
                                    event_ndims=event_ndims)
        y = tf.placeholder_with_default(np.ones((3, 4, 2), dtype=np.int32),
                                        shape=[None, None, 2])
        self._assertNdArrayEqual(([3], [4], [2]),
                                 self.evaluate(shaper.get_shape(y)))

        y = tf.placeholder_with_default(np.ones((3, 2), dtype=np.int32),
                                        shape=None)
        batch_ndims = tf.placeholder_with_default(0, shape=None)
        event_ndims = tf.placeholder_with_default(1, shape=None)
        shaper = _DistributionShape(batch_ndims=batch_ndims,
                                    event_ndims=event_ndims)
        self._assertNdArrayEqual(([3], _empty_shape, [2]),
                                 self.evaluate(shaper.get_shape(y)))
Example #2
0
    def testDistributionShapeGetShapeDynamic(self):
        with self.test_session() as sess:
            # Works for static ndims despite unknown static shape.
            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            y = tf.placeholder(tf.int32, shape=(None, None, 2))
            y_value = np.ones((3, 4, 2), dtype=y.dtype.as_numpy_dtype())
            self._assertNdArrayEqual(([3], [4], [2]),
                                     sess.run(shaper.get_shape(y),
                                              feed_dict={y: y_value}))

            shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
            y = tf.placeholder(tf.int32, shape=(None, None))
            y_value = np.ones((3, 2), dtype=y.dtype.as_numpy_dtype())
            self._assertNdArrayEqual(([3], _empty_shape, [2]),
                                     sess.run(shaper.get_shape(y),
                                              feed_dict={y: y_value}))

            # Works for deferred {batch,event}_ndims.
            batch_ndims = tf.placeholder(tf.int32)
            event_ndims = tf.placeholder(tf.int32)
            shaper = _DistributionShape(batch_ndims=batch_ndims,
                                        event_ndims=event_ndims)
            y = tf.placeholder(tf.float32)
            y_value = self._random_sample((3, 4, 2), dtype=y.dtype)
            feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
            self._assertNdArrayEqual(([3], [4], [2]),
                                     sess.run(shaper.get_shape(y),
                                              feed_dict=feed_dict))

            y_value = self._random_sample((3, 2), dtype=y.dtype)
            feed_dict = {y: y_value, batch_ndims: 0, event_ndims: 1}
            self._assertNdArrayEqual(([3], _empty_shape, [2]),
                                     sess.run(shaper.get_shape(y),
                                              feed_dict=feed_dict))
Example #3
0
  def testDistributionShapeGetNdimsStatic(self):
    with self.test_session():
      shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
      x = 1
      self.assertEqual(0, shaper.get_sample_ndims(x).eval())
      self.assertEqual(0, shaper.batch_ndims.eval())
      self.assertEqual(0, shaper.event_ndims.eval())

      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      x = self._random_sample((1, 2, 3))
      self.assertAllEqual(3, shaper.get_ndims(x).eval())
      self.assertEqual(1, shaper.get_sample_ndims(x).eval())
      self.assertEqual(1, shaper.batch_ndims.eval())
      self.assertEqual(1, shaper.event_ndims.eval())

      x += self._random_sample((1, 2, 3))
      self.assertAllEqual(3, shaper.get_ndims(x).eval())
      self.assertEqual(1, shaper.get_sample_ndims(x).eval())
      self.assertEqual(1, shaper.batch_ndims.eval())
      self.assertEqual(1, shaper.event_ndims.eval())

      # Test ndims functions work, even despite unfed Tensors.
      y = tf.placeholder(tf.float32, shape=(1024, None, 1024))
      self.assertEqual(3, shaper.get_ndims(y).eval())
      self.assertEqual(1, shaper.get_sample_ndims(y).eval())
      self.assertEqual(1, shaper.batch_ndims.eval())
      self.assertEqual(1, shaper.event_ndims.eval())
Example #4
0
  def testDistributionShapeGetShapeDynamic(self):
    with self.test_session() as sess:
      # Works for static ndims despite unknown static shape.
      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      y = tf.placeholder(tf.int32, shape=(None, None, 2))
      y_value = np.ones((3, 4, 2), dtype=y.dtype.as_numpy_dtype())
      self._assertNdArrayEqual(
          ([3], [4], [2]),
          sess.run(shaper.get_shape(y), feed_dict={y: y_value}))

      shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
      y = tf.placeholder(tf.int32, shape=(None, None))
      y_value = np.ones((3, 2), dtype=y.dtype.as_numpy_dtype())
      self._assertNdArrayEqual(
          ([3], _empty_shape, [2]),
          sess.run(shaper.get_shape(y), feed_dict={y: y_value}))

      # Works for deferred {batch,event}_ndims.
      batch_ndims = tf.placeholder(tf.int32)
      event_ndims = tf.placeholder(tf.int32)
      shaper = _DistributionShape(
          batch_ndims=batch_ndims, event_ndims=event_ndims)
      y = tf.placeholder(tf.float32)
      y_value = self._random_sample((3, 4, 2), dtype=y.dtype)
      feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
      self._assertNdArrayEqual(
          ([3], [4], [2]), sess.run(shaper.get_shape(y), feed_dict=feed_dict))

      y_value = self._random_sample((3, 2), dtype=y.dtype)
      feed_dict = {y: y_value, batch_ndims: 0, event_ndims: 1}
      self._assertNdArrayEqual(
          ([3], _empty_shape, [2]),
          sess.run(shaper.get_shape(y), feed_dict=feed_dict))
Example #5
0
    def testDistributionShapeGetNdimsStatic(self):
        shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
        x = 1
        self.assertEqual(0, self.evaluate(shaper.get_sample_ndims(x)))
        self.assertEqual(0, self.evaluate(shaper.batch_ndims))
        self.assertEqual(0, self.evaluate(shaper.event_ndims))

        shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
        x = self._random_sample((1, 2, 3))
        self.assertAllEqual(3, self.evaluate(shaper.get_ndims(x)))
        self.assertEqual(1, self.evaluate(shaper.get_sample_ndims(x)))
        self.assertEqual(1, self.evaluate(shaper.batch_ndims))
        self.assertEqual(1, self.evaluate(shaper.event_ndims))

        x += self._random_sample((1, 2, 3))
        self.assertAllEqual(3, self.evaluate(shaper.get_ndims(x)))
        self.assertEqual(1, self.evaluate(shaper.get_sample_ndims(x)))
        self.assertEqual(1, self.evaluate(shaper.batch_ndims))
        self.assertEqual(1, self.evaluate(shaper.event_ndims))

        # There is no such thing as unfed Tensor in Eager, so exit early.
        if tf.executing_eagerly():
            return

        # Test ndims functions work, even despite unfed Tensors.
        y = tf.placeholder(tf.float32, shape=(1024, None, 1024))
        self.assertEqual(3, self.evaluate(shaper.get_ndims(y)))
        self.assertEqual(1, self.evaluate(shaper.get_sample_ndims(y)))
        self.assertEqual(1, self.evaluate(shaper.batch_ndims))
        self.assertEqual(1, self.evaluate(shaper.event_ndims))
Example #6
0
    def testDistributionShapeGetNdimsStatic(self):
        with self.test_session():
            shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
            x = 1
            self.assertEqual(0, shaper.get_sample_ndims(x).eval())
            self.assertEqual(0, shaper.batch_ndims.eval())
            self.assertEqual(0, shaper.event_ndims.eval())

            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            x = self._random_sample((1, 2, 3))
            self.assertAllEqual(3, shaper.get_ndims(x).eval())
            self.assertEqual(1, shaper.get_sample_ndims(x).eval())
            self.assertEqual(1, shaper.batch_ndims.eval())
            self.assertEqual(1, shaper.event_ndims.eval())

            x += self._random_sample((1, 2, 3))
            self.assertAllEqual(3, shaper.get_ndims(x).eval())
            self.assertEqual(1, shaper.get_sample_ndims(x).eval())
            self.assertEqual(1, shaper.batch_ndims.eval())
            self.assertEqual(1, shaper.event_ndims.eval())

            # Test ndims functions work, even despite unfed Tensors.
            y = tf.placeholder(tf.float32, shape=(1024, None, 1024))
            self.assertEqual(3, shaper.get_ndims(y).eval())
            self.assertEqual(1, shaper.get_sample_ndims(y).eval())
            self.assertEqual(1, shaper.batch_ndims.eval())
            self.assertEqual(1, shaper.event_ndims.eval())
Example #7
0
 def testDistributionShapeGetDimsStatic(self):
     shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
     x = 1
     self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
                         _constant(shaper.get_dims(x)))
     shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
     x += self._random_sample((1, 1, 2, 2))
     self._assertNdArrayEqual(([0], [1], [2, 3]),
                              _constant(shaper.get_dims(x)))
     x += x
     self._assertNdArrayEqual(([0], [1], [2, 3]),
                              _constant(shaper.get_dims(x)))
Example #8
0
 def testDistributionShapeGetDimsStatic(self):
   with self.test_session():
     shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
     x = 1
     self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
                         _constant(shaper.get_dims(x)))
     shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
     x += self._random_sample((1, 1, 2, 2))
     self._assertNdArrayEqual(([0], [1], [2, 3]),
                              _constant(shaper.get_dims(x)))
     x += x
     self._assertNdArrayEqual(([0], [1], [2, 3]),
                              _constant(shaper.get_dims(x)))
Example #9
0
    def testDistributionShapeGetShapeStatic(self):
        with self.test_session():
            shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
            self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
                                _constant(shaper.get_shape(1.)))
            self._assertNdArrayEqual(([1], _empty_shape, _empty_shape),
                                     _constant(shaper.get_shape(np.ones(1))))
            self._assertNdArrayEqual(
                ([2, 2], _empty_shape, _empty_shape),
                _constant(shaper.get_shape(np.ones((2, 2)))))
            self._assertNdArrayEqual(
                ([3, 2, 1], _empty_shape, _empty_shape),
                _constant(shaper.get_shape(np.ones((3, 2, 1)))))

            shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
            with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
                shaper.get_shape(1.)
            self._assertNdArrayEqual((_empty_shape, _empty_shape, [1]),
                                     _constant(shaper.get_shape(np.ones(1))))
            self._assertNdArrayEqual(
                ([2], _empty_shape, [2]),
                _constant(shaper.get_shape(np.ones((2, 2)))))
            self._assertNdArrayEqual(
                ([3, 2], _empty_shape, [1]),
                _constant(shaper.get_shape(np.ones((3, 2, 1)))))

            shaper = _DistributionShape(batch_ndims=1, event_ndims=0)
            with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
                shaper.get_shape(1.)
            self._assertNdArrayEqual((_empty_shape, [1], _empty_shape),
                                     _constant(shaper.get_shape(np.ones(1))))
            self._assertNdArrayEqual(
                ([2], [2], _empty_shape),
                _constant(shaper.get_shape(np.ones((2, 2)))))
            self._assertNdArrayEqual(
                ([3, 2], [1], _empty_shape),
                _constant(shaper.get_shape(np.ones((3, 2, 1)))))

            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
                shaper.get_shape(1.)
            with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
                shaper.get_shape(np.ones(1))
            self._assertNdArrayEqual(
                (_empty_shape, [2], [2]),
                _constant(shaper.get_shape(np.ones((2, 2)))))
            self._assertNdArrayEqual(
                ([3], [2], [1]), _constant(shaper.get_shape(np.ones(
                    (3, 2, 1)))))
Example #10
0
 def _build_graph(self, x, batch_ndims, event_ndims, expand_batch_dim):
     shaper = _DistributionShape(batch_ndims=batch_ndims,
                                 event_ndims=event_ndims)
     y, sample_shape = shaper.make_batch_of_event_sample_matrices(
         x, expand_batch_dim=expand_batch_dim)
     should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
         y, sample_shape, expand_batch_dim=expand_batch_dim)
     return y, sample_shape, should_be_x_value
Example #11
0
 def testDistributionShapeGetNdimsDynamic(self):
     batch_ndims = tf.placeholder_with_default(1, shape=None)
     event_ndims = tf.placeholder_with_default(1, shape=None)
     shaper = _DistributionShape(batch_ndims=batch_ndims,
                                 event_ndims=event_ndims)
     y = tf.placeholder_with_default(np.ones((4, 2), dtype=np.float32),
                                     shape=None)
     self.assertEqual(2, self.evaluate(shaper.get_ndims(y)))
Example #12
0
 def _build_graph(self, x, batch_ndims, event_ndims, expand_batch_dim):
   shaper = _DistributionShape(batch_ndims=batch_ndims,
                               event_ndims=event_ndims)
   y, sample_shape = shaper.make_batch_of_event_sample_matrices(
       x, expand_batch_dim=expand_batch_dim)
   should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
       y, sample_shape, expand_batch_dim=expand_batch_dim)
   return y, sample_shape, should_be_x_value
Example #13
0
  def testDistributionShapeGetDimsDynamic(self):
    with self.test_session() as sess:
      # Works for static {batch,event}_ndims despite unfed input.
      shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
      y = tf.placeholder(tf.float32, shape=(10, None, 5, 5))
      self._assertNdArrayEqual([[0], [1], [2, 3]], _eval(shaper.get_dims(y)))

      # Works for deferred {batch,event}_ndims.
      batch_ndims = tf.placeholder(tf.int32)
      event_ndims = tf.placeholder(tf.int32)
      shaper = _DistributionShape(
          batch_ndims=batch_ndims, event_ndims=event_ndims)
      y = tf.placeholder(tf.float32)
      y_value = self._random_sample((10, 3, 5, 5), dtype=y.dtype)
      feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 2}
      self._assertNdArrayEqual(
          ([0], [1], [2, 3]), sess.run(shaper.get_dims(y), feed_dict=feed_dict))
Example #14
0
    def testDistributionShapeGetDimsDynamic(self):
        with self.test_session() as sess:
            # Works for static {batch,event}_ndims despite unfed input.
            shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
            y = tf.placeholder(tf.float32, shape=(10, None, 5, 5))
            self._assertNdArrayEqual([[0], [1], [2, 3]],
                                     _eval(shaper.get_dims(y)))

            # Works for deferred {batch,event}_ndims.
            batch_ndims = tf.placeholder(tf.int32)
            event_ndims = tf.placeholder(tf.int32)
            shaper = _DistributionShape(batch_ndims=batch_ndims,
                                        event_ndims=event_ndims)
            y = tf.placeholder(tf.float32)
            y_value = self._random_sample((10, 3, 5, 5), dtype=y.dtype)
            feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 2}
            self._assertNdArrayEqual(([0], [1], [2, 3]),
                                     sess.run(shaper.get_dims(y),
                                              feed_dict=feed_dict))
Example #15
0
 def testDistributionShapeGetNdimsDynamic(self):
   with self.test_session() as sess:
     batch_ndims = tf.placeholder(tf.int32)
     event_ndims = tf.placeholder(tf.int32)
     shaper = _DistributionShape(
         batch_ndims=batch_ndims, event_ndims=event_ndims)
     y = tf.placeholder(tf.float32)
     y_value = np.ones((4, 2), dtype=y.dtype.as_numpy_dtype())
     feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
     self.assertEqual(2, sess.run(shaper.get_ndims(y), feed_dict=feed_dict))
Example #16
0
 def testDistributionShapeGetNdimsDynamic(self):
     with self.test_session() as sess:
         batch_ndims = tf.placeholder(tf.int32)
         event_ndims = tf.placeholder(tf.int32)
         shaper = _DistributionShape(batch_ndims=batch_ndims,
                                     event_ndims=event_ndims)
         y = tf.placeholder(tf.float32)
         y_value = np.ones((4, 2), dtype=y.dtype.as_numpy_dtype())
         feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
         self.assertEqual(
             2, sess.run(shaper.get_ndims(y), feed_dict=feed_dict))
Example #17
0
  def testDistributionShapeGetShapeStatic(self):
    with self.test_session():
      shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
      self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
                          _constant(shaper.get_shape(1.)))
      self._assertNdArrayEqual(([1], _empty_shape, _empty_shape),
                               _constant(shaper.get_shape(np.ones(1))))
      self._assertNdArrayEqual(([2, 2], _empty_shape, _empty_shape),
                               _constant(shaper.get_shape(np.ones((2, 2)))))
      self._assertNdArrayEqual(([3, 2, 1], _empty_shape, _empty_shape),
                               _constant(shaper.get_shape(np.ones((3, 2, 1)))))

      shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
      with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
        shaper.get_shape(1.)
      self._assertNdArrayEqual((_empty_shape, _empty_shape, [1]),
                               _constant(shaper.get_shape(np.ones(1))))
      self._assertNdArrayEqual(([2], _empty_shape, [2]),
                               _constant(shaper.get_shape(np.ones((2, 2)))))
      self._assertNdArrayEqual(([3, 2], _empty_shape, [1]),
                               _constant(shaper.get_shape(np.ones((3, 2, 1)))))

      shaper = _DistributionShape(batch_ndims=1, event_ndims=0)
      with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
        shaper.get_shape(1.)
      self._assertNdArrayEqual((_empty_shape, [1], _empty_shape),
                               _constant(shaper.get_shape(np.ones(1))))
      self._assertNdArrayEqual(([2], [2], _empty_shape),
                               _constant(shaper.get_shape(np.ones((2, 2)))))
      self._assertNdArrayEqual(([3, 2], [1], _empty_shape),
                               _constant(shaper.get_shape(np.ones((3, 2, 1)))))

      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
        shaper.get_shape(1.)
      with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
        shaper.get_shape(np.ones(1))
      self._assertNdArrayEqual((_empty_shape, [2], [2]),
                               _constant(shaper.get_shape(np.ones((2, 2)))))
      self._assertNdArrayEqual(([3], [2], [1]),
                               _constant(shaper.get_shape(np.ones((3, 2, 1)))))
Example #18
0
    def testDistributionShapeGetDimsDynamic(self):
        # Works for deferred {batch,event}_ndims.
        batch_ndims = tf.placeholder_with_default(1, shape=None)
        event_ndims = tf.placeholder_with_default(2, shape=None)
        shaper = _DistributionShape(batch_ndims=batch_ndims,
                                    event_ndims=event_ndims)
        y = tf.placeholder_with_default(self._random_sample((10, 3, 5, 5),
                                                            dtype=np.float32),
                                        shape=None)
        self._assertNdArrayEqual(([0], [1], [2, 3]),
                                 self.evaluate(shaper.get_dims(y)))

        # In eager mode, there is no such thing as unfed input, so exit early.
        if tf.executing_eagerly():
            return

        # Works for static {batch,event}_ndims despite unfed input.
        shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
        y = tf.placeholder(tf.float32, shape=(10, None, 5, 5))
        self._assertNdArrayEqual([[0], [1], [2, 3]],
                                 self.evaluate(shaper.get_dims(y)))
Example #19
0
  def __init__(self,
               shift=None,
               scale_identity_multiplier=None,
               scale_diag=None,
               scale_tril=None,
               scale_perturb_factor=None,
               scale_perturb_diag=None,
               adjoint=False,
               validate_args=False,
               name="affine"):
    """Instantiates the `Affine` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale @ X + shift
    ```

    where the `scale` term is logically equivalent to:

    ```python
    scale = (
      scale_identity_multiplier * tf.diag(tf.ones(d)) +
      tf.diag(scale_diag) +
      scale_tril +
      scale_perturb_factor @ diag(scale_perturb_diag) @
        tf.transpose([scale_perturb_factor])
    )
    ```

    If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
    specified then `scale += IdentityMatrix`. Otherwise specifying a
    `scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
    `scale_diag != None` means `scale += tf.diag(scale_diag)`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix.
        When `scale_identity_multiplier = scale_diag = scale_tril = None` then
        `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
        to `scale`.
      scale_diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
        When `None` no diagonal term is added to `scale`.
      scale_tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k, k], which represents a k x k
        lower triangular matrix.
        When `None` no `scale_tril` term is added to `scale`.
        The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Floating-point `Tensor` representing factor matrix
        with last two dimensions of shape `(k, r)`. When `None`, no rank-r
        update is added to `scale`.
      scale_perturb_diag: Floating-point `Tensor` representing the diagonal
        matrix. `scale_perturb_diag` has shape [N1, N2, ...  r], which
        represents an `r x r` diagonal matrix. When `None` low rank updates will
        take the form `scale_perturb_factor * scale_perturb_factor.T`.
      adjoint: Python `bool` indicating whether to use the `scale` matrix as
        specified or its adjoint.
        Default value: `False`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `perturb_diag` is specified but not `perturb_factor`.
      TypeError: if `shift` has different `dtype` from `scale` arguments.
    """
    self._graph_parents = []
    self._name = name
    self._validate_args = validate_args

    # Ambiguous definition of low rank update.
    if scale_perturb_diag is not None and scale_perturb_factor is None:
      raise ValueError("When scale_perturb_diag is specified, "
                       "scale_perturb_factor must be specified.")

    # Special case, only handling a scaled identity matrix. We don't know its
    # dimensions, so this is special cased.
    # We don't check identity_multiplier, since below we set it to 1. if all
    # other scale args are None.
    self._is_only_identity_multiplier = (scale_tril is None and
                                         scale_diag is None and
                                         scale_perturb_factor is None)

    with self._name_scope("init", values=[
        shift, scale_identity_multiplier, scale_diag, scale_tril,
        scale_perturb_diag, scale_perturb_factor]):

      # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
      dtype = tf.float32

      if shift is not None:
        shift = tf.convert_to_tensor(shift, name="shift")
        dtype = shift.dtype.base_dtype
      self._shift = shift

      # When no args are specified, pretend the scale matrix is the identity
      # matrix.
      if (self._is_only_identity_multiplier and
          scale_identity_multiplier is None):
        scale_identity_multiplier = tf.convert_to_tensor(1., dtype=dtype)

      # self._create_scale_operator returns a LinearOperator in all cases
      # except if self._is_only_identity_multiplier; in which case it
      # returns a scalar Tensor.
      scale = self._create_scale_operator(
          identity_multiplier=scale_identity_multiplier,
          diag=scale_diag,
          tril=scale_tril,
          perturb_diag=scale_perturb_diag,
          perturb_factor=scale_perturb_factor,
          shift=shift,
          validate_args=validate_args)

      if scale.dtype is not None:
        dtype = scale.dtype.base_dtype

      if scale is not None and not self._is_only_identity_multiplier:
        if (shift is not None and
            shift.dtype.base_dtype != scale.dtype.base_dtype):
          raise TypeError(
              "shift.dtype({}) is incompatible with scale.dtype({}).".format(
                  shift.dtype, scale.dtype))

        if scale.tensor_rank is not None:
          batch_ndims = scale.tensor_rank - 2
        else:
          batch_ndims = scale.tensor_rank_tensor() - 2
      else:
        # We won't need shape inference when scale is None or when scale is a
        # scalar.
        batch_ndims = 0
      self._scale = scale
      self._shaper = _DistributionShape(
          batch_ndims=batch_ndims,
          event_ndims=1,
          validate_args=validate_args)
      self._adjoint = adjoint
      super(Affine, self).__init__(
          forward_min_event_ndims=1,
          graph_parents=(
              [self._scale] if tensor_util.is_tensor(self._scale)
              else self._scale.graph_parents +
              [self._shift] if self._shift is not None else []),
          is_constant_jacobian=True,
          dtype=dtype,
          validate_args=validate_args,
          name=name)
Example #20
0
    def __init__(self,
                 shift=None,
                 scale_identity_multiplier=None,
                 scale_diag=None,
                 scale_tril=None,
                 scale_perturb_factor=None,
                 scale_perturb_diag=None,
                 validate_args=False,
                 name="affine"):
        """Instantiates the `Affine` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale @ X + shift
    ```

    where the `scale` term is logically equivalent to:

    ```python
    scale = (
      scale_identity_multiplier * tf.diag(tf.ones(d)) +
      tf.diag(scale_diag) +
      scale_tril +
      scale_perturb_factor @ diag(scale_perturb_diag) @
        tf.transpose([scale_perturb_factor])
    )
    ```

    If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
    specified then `scale += IdentityMatrix`. Otherwise specifying a
    `scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
    `scale_diag != None` means `scale += tf.diag(scale_diag)`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix.
        When `scale_identity_multiplier = scale_diag = scale_tril = None` then
        `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
        to `scale`.
      scale_diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
        When `None` no diagonal term is added to `scale`.
      scale_tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k, k], which represents a k x k
        lower triangular matrix.
        When `None` no `scale_tril` term is added to `scale`.
        The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Floating-point `Tensor` representing factor matrix
        with last two dimensions of shape `(k, r)`. When `None`, no rank-r
        update is added to `scale`.
      scale_perturb_diag: Floating-point `Tensor` representing the diagonal
        matrix. `scale_perturb_diag` has shape [N1, N2, ...  r], which
        represents an `r x r` diagonal matrix. When `None` low rank updates will
        take the form `scale_perturb_factor * scale_perturb_factor.T`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `perturb_diag` is specified but not `perturb_factor`.
      TypeError: if `shift` has different `dtype` from `scale` arguments.
    """
        self._graph_parents = []
        self._name = name
        self._validate_args = validate_args

        # Ambiguous definition of low rank update.
        if scale_perturb_diag is not None and scale_perturb_factor is None:
            raise ValueError("When scale_perturb_diag is specified, "
                             "scale_perturb_factor must be specified.")

        # Special case, only handling a scaled identity matrix. We don't know its
        # dimensions, so this is special cased.
        # We don't check identity_multiplier, since below we set it to 1. if all
        # other scale args are None.
        self._is_only_identity_multiplier = (scale_tril is None
                                             and scale_diag is None
                                             and scale_perturb_factor is None)

        with self._name_scope("init",
                              values=[
                                  shift, scale_identity_multiplier, scale_diag,
                                  scale_tril, scale_perturb_diag,
                                  scale_perturb_factor
                              ]):

            # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
            dtype = tf.float32

            if shift is not None:
                shift = tf.convert_to_tensor(shift, name="shift")
                dtype = shift.dtype.base_dtype
            self._shift = shift

            # When no args are specified, pretend the scale matrix is the identity
            # matrix.
            if (self._is_only_identity_multiplier
                    and scale_identity_multiplier is None):
                scale_identity_multiplier = tf.convert_to_tensor(1.,
                                                                 dtype=dtype)

            # self._create_scale_operator returns a LinearOperator in all cases
            # except if self._is_only_identity_multiplier; in which case it
            # returns a scalar Tensor.
            scale = self._create_scale_operator(
                identity_multiplier=scale_identity_multiplier,
                diag=scale_diag,
                tril=scale_tril,
                perturb_diag=scale_perturb_diag,
                perturb_factor=scale_perturb_factor,
                shift=shift,
                validate_args=validate_args)

            if scale.dtype is not None:
                dtype = scale.dtype.base_dtype

            if scale is not None and not self._is_only_identity_multiplier:
                if (shift is not None
                        and shift.dtype.base_dtype != scale.dtype.base_dtype):
                    raise TypeError(
                        "shift.dtype({}) is incompatible with scale.dtype({})."
                        .format(shift.dtype, scale.dtype))

                if scale.tensor_rank is not None:
                    batch_ndims = scale.tensor_rank - 2
                else:
                    batch_ndims = scale.tensor_rank_tensor() - 2
            else:
                # We won't need shape inference when scale is None or when scale is a
                # scalar.
                batch_ndims = 0
            self._scale = scale
            self._shaper = _DistributionShape(batch_ndims=batch_ndims,
                                              event_ndims=1,
                                              validate_args=validate_args)
            super(Affine, self).__init__(
                forward_min_event_ndims=1,
                graph_parents=(
                    [self._scale] if tensor_util.is_tensor(
                        self._scale) else self._scale.graph_parents +
                    [self._shift] if self._shift is not None else []),
                is_constant_jacobian=True,
                dtype=dtype,
                validate_args=validate_args,
                name=name)
    def __init__(self,
                 shift=None,
                 scale=None,
                 validate_args=False,
                 name="affine_linear_operator"):
        """Instantiates the `AffineLinearOperator` bijector.

    Args:
      shift: Floating-point `Tensor`.
      scale:  Subclass of `LinearOperator`. Represents the (batch) positive
        definite matrix `M` in `R^{k x k}`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      TypeError: if `scale` is not a `LinearOperator`.
      TypeError: if `shift.dtype` does not match `scale.dtype`.
      ValueError: if not `scale.is_non_singular`.
    """
        self._graph_parents = []
        self._name = name
        self._validate_args = validate_args
        graph_parents = []
        with self._name_scope("init", values=[shift]):
            # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
            dtype = tf.float32

            if shift is not None:
                shift = tf.convert_to_tensor(shift, name="shift")
                graph_parents += [shift]
                dtype = shift.dtype.base_dtype
            self._shift = shift

            if scale is not None:
                if (shift is not None
                        and shift.dtype.base_dtype != scale.dtype.base_dtype):
                    raise TypeError(
                        "shift.dtype({}) is incompatible with scale.dtype({})."
                        .format(shift.dtype, scale.dtype))
                if not isinstance(scale, linear_operator.LinearOperator):
                    raise TypeError(
                        "scale is not an instance of tf.LinearOperator")
                if validate_args and not scale.is_non_singular:
                    raise ValueError("Scale matrix must be non-singular.")
                graph_parents += scale.graph_parents
                if scale.tensor_rank is not None:
                    batch_ndims = scale.tensor_rank - 2
                else:
                    batch_ndims = scale.tensor_rank_tensor() - 2
                    graph_parents += [batch_ndims]
                if scale.dtype is not None:
                    dtype = scale.dtype.base_dtype
            else:
                batch_ndims = 0  # We won't need shape inference when scale is None.
            self._scale = scale
            self._shaper = _DistributionShape(batch_ndims=batch_ndims,
                                              event_ndims=1,
                                              validate_args=validate_args)
            super(AffineLinearOperator,
                  self).__init__(forward_min_event_ndims=1,
                                 graph_parents=graph_parents,
                                 is_constant_jacobian=True,
                                 dtype=dtype,
                                 validate_args=validate_args,
                                 name=name)