示例#1
0
    def testDistributionShapeGetShapeDynamic(self):
        with self.test_session() as sess:
            # Works for static ndims despite unknown static shape.
            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            y = tf.placeholder(tf.int32, shape=(None, None, 2))
            y_value = np.ones((3, 4, 2), dtype=y.dtype.as_numpy_dtype())
            self._assertNdArrayEqual(([3], [4], [2]),
                                     sess.run(shaper.get_shape(y),
                                              feed_dict={y: y_value}))

            shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
            y = tf.placeholder(tf.int32, shape=(None, None))
            y_value = np.ones((3, 2), dtype=y.dtype.as_numpy_dtype())
            self._assertNdArrayEqual(([3], _empty_shape, [2]),
                                     sess.run(shaper.get_shape(y),
                                              feed_dict={y: y_value}))

            # Works for deferred {batch,event}_ndims.
            batch_ndims = tf.placeholder(tf.int32)
            event_ndims = tf.placeholder(tf.int32)
            shaper = _DistributionShape(batch_ndims=batch_ndims,
                                        event_ndims=event_ndims)
            y = tf.placeholder(tf.float32)
            y_value = self._random_sample((3, 4, 2), dtype=y.dtype)
            feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
            self._assertNdArrayEqual(([3], [4], [2]),
                                     sess.run(shaper.get_shape(y),
                                              feed_dict=feed_dict))

            y_value = self._random_sample((3, 2), dtype=y.dtype)
            feed_dict = {y: y_value, batch_ndims: 0, event_ndims: 1}
            self._assertNdArrayEqual(([3], _empty_shape, [2]),
                                     sess.run(shaper.get_shape(y),
                                              feed_dict=feed_dict))
示例#2
0
  def testDistributionShapeGetShapeDynamic(self):
    with self.test_session() as sess:
      # Works for static ndims despite unknown static shape.
      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      y = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
      y_value = np.ones((3, 4, 2), dtype=y.dtype.as_numpy_dtype())
      self._assertNdArrayEqual(
          ([3], [4], [2]),
          sess.run(shaper.get_shape(y), feed_dict={y: y_value}))

      shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
      y = array_ops.placeholder(dtypes.int32, shape=(None, None))
      y_value = np.ones((3, 2), dtype=y.dtype.as_numpy_dtype())
      self._assertNdArrayEqual(
          ([3], _empty_shape, [2]),
          sess.run(shaper.get_shape(y), feed_dict={y: y_value}))

      # Works for deferred {batch,event}_ndims.
      batch_ndims = array_ops.placeholder(dtypes.int32)
      event_ndims = array_ops.placeholder(dtypes.int32)
      shaper = _DistributionShape(
          batch_ndims=batch_ndims, event_ndims=event_ndims)
      y = array_ops.placeholder(dtypes.float32)
      y_value = self._random_sample((3, 4, 2), dtype=y.dtype)
      feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
      self._assertNdArrayEqual(
          ([3], [4], [2]), sess.run(shaper.get_shape(y), feed_dict=feed_dict))

      y_value = self._random_sample((3, 2), dtype=y.dtype)
      feed_dict = {y: y_value, batch_ndims: 0, event_ndims: 1}
      self._assertNdArrayEqual(
          ([3], _empty_shape, [2]),
          sess.run(shaper.get_shape(y), feed_dict=feed_dict))
示例#3
0
    def testDistributionShapeGetNdimsStatic(self):
        with self.test_session():
            shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
            x = 1
            self.assertEqual(0, shaper.get_sample_ndims(x).eval())
            self.assertEqual(0, shaper.batch_ndims.eval())
            self.assertEqual(0, shaper.event_ndims.eval())

            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            x = self._random_sample((1, 2, 3))
            self.assertAllEqual(3, shaper.get_ndims(x).eval())
            self.assertEqual(1, shaper.get_sample_ndims(x).eval())
            self.assertEqual(1, shaper.batch_ndims.eval())
            self.assertEqual(1, shaper.event_ndims.eval())

            x += self._random_sample((1, 2, 3))
            self.assertAllEqual(3, shaper.get_ndims(x).eval())
            self.assertEqual(1, shaper.get_sample_ndims(x).eval())
            self.assertEqual(1, shaper.batch_ndims.eval())
            self.assertEqual(1, shaper.event_ndims.eval())

            # Test ndims functions work, even despite unfed Tensors.
            y = tf.placeholder(tf.float32, shape=(1024, None, 1024))
            self.assertEqual(3, shaper.get_ndims(y).eval())
            self.assertEqual(1, shaper.get_sample_ndims(y).eval())
            self.assertEqual(1, shaper.batch_ndims.eval())
            self.assertEqual(1, shaper.event_ndims.eval())
示例#4
0
  def testDistributionShapeGetNdimsStatic(self):
    with self.test_session():
      shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
      x = 1
      self.assertEqual(0, shaper.get_sample_ndims(x).eval())
      self.assertEqual(0, shaper.batch_ndims.eval())
      self.assertEqual(0, shaper.event_ndims.eval())

      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      x = self._random_sample((1, 2, 3))
      self.assertAllEqual(3, shaper.get_ndims(x).eval())
      self.assertEqual(1, shaper.get_sample_ndims(x).eval())
      self.assertEqual(1, shaper.batch_ndims.eval())
      self.assertEqual(1, shaper.event_ndims.eval())

      x += self._random_sample((1, 2, 3))
      self.assertAllEqual(3, shaper.get_ndims(x).eval())
      self.assertEqual(1, shaper.get_sample_ndims(x).eval())
      self.assertEqual(1, shaper.batch_ndims.eval())
      self.assertEqual(1, shaper.event_ndims.eval())

      # Test ndims functions work, even despite unfed Tensors.
      y = array_ops.placeholder(dtypes.float32, shape=(1024, None, 1024))
      self.assertEqual(3, shaper.get_ndims(y).eval())
      self.assertEqual(1, shaper.get_sample_ndims(y).eval())
      self.assertEqual(1, shaper.batch_ndims.eval())
      self.assertEqual(1, shaper.event_ndims.eval())
示例#5
0
 def testDistributionShapeGetDimsStatic(self):
     with self.cached_session():
         shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
         x = 1
         self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
                             _constant(shaper.get_dims(x)))
         shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
         x += self._random_sample((1, 1, 2, 2))
         self._assertNdArrayEqual(([0], [1], [2, 3]),
                                  _constant(shaper.get_dims(x)))
         x += x
         self._assertNdArrayEqual(([0], [1], [2, 3]),
                                  _constant(shaper.get_dims(x)))
示例#6
0
 def testDistributionShapeGetDimsStatic(self):
   with self.test_session():
     shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
     x = 1
     self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
                         _constant(shaper.get_dims(x)))
     shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
     x += self._random_sample((1, 1, 2, 2))
     self._assertNdArrayEqual(([0], [1], [2, 3]),
                              _constant(shaper.get_dims(x)))
     x += x
     self._assertNdArrayEqual(([0], [1], [2, 3]),
                              _constant(shaper.get_dims(x)))
示例#7
0
  def testDistributionShapeMakeBatchReadyStaticNoExpand(self):
    with self.test_session() as sess:
      x = self._random_sample((1, 2, 3))
      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      y, sample_shape = shaper.make_batch_of_event_sample_matrices(
          x, expand_batch_dim=False)
      self.assertAllEqual(np.transpose(x, axes=(1, 2, 0)), y.eval())
      self.assertAllEqual((1,), sample_shape.eval())
      should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
          y, sample_shape, expand_batch_dim=False)
      self.assertAllEqual(x, should_be_x_value.eval())

      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      x = tf.placeholder(tf.float32)
      x_value = self._random_sample((3, 4, 2), dtype=x.dtype)
      feed_dict = {x: x_value}
      y, sample_shape = shaper.make_batch_of_event_sample_matrices(
          x, expand_batch_dim=False)
      self.assertAllEqual(
          (3,),
          sess.run(sample_shape, feed_dict=feed_dict))
      self.assertAllClose(
          np.transpose(np.reshape(x_value, (-1, 4, 2)), (1, 2, 0)),
          sess.run(y, feed_dict=feed_dict),
          rtol=1e-3)
      should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
          y, sample_shape, expand_batch_dim=False)
      self.assertAllEqual(x_value, sess.run(should_be_x_value,
                                            feed_dict=feed_dict))

      shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
      x = tf.placeholder(tf.float32)
      x_value = np.ones([3], dtype=x.dtype.as_numpy_dtype())
      feed_dict = {x: x_value}
      y, sample_shape = shaper.make_batch_of_event_sample_matrices(
          x, expand_batch_dim=False)
      self.assertAllEqual(
          [3],
          sess.run(sample_shape, feed_dict=feed_dict))
      # The following check shows we don't need to manually set_shape in the
      # ShapeUtil.
      self.assertAllEqual([1, None],
                          y.get_shape().ndims and y.get_shape().as_list())
      self.assertAllEqual(
          np.ones([1, 3], dtype=x.dtype.as_numpy_dtype()),
          sess.run(y, feed_dict=feed_dict))
      should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
          y, sample_shape, expand_batch_dim=False)
      self.assertAllEqual(x_value, sess.run(should_be_x_value,
                                            feed_dict=feed_dict))
示例#8
0
    def testDistributionShapeGetShapeStatic(self):
        with self.test_session():
            shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
            self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
                                _constant(shaper.get_shape(1.)))
            self._assertNdArrayEqual(([1], _empty_shape, _empty_shape),
                                     _constant(shaper.get_shape(np.ones(1))))
            self._assertNdArrayEqual(
                ([2, 2], _empty_shape, _empty_shape),
                _constant(shaper.get_shape(np.ones((2, 2)))))
            self._assertNdArrayEqual(
                ([3, 2, 1], _empty_shape, _empty_shape),
                _constant(shaper.get_shape(np.ones((3, 2, 1)))))

            shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
            with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
                shaper.get_shape(1.)
            self._assertNdArrayEqual((_empty_shape, _empty_shape, [1]),
                                     _constant(shaper.get_shape(np.ones(1))))
            self._assertNdArrayEqual(
                ([2], _empty_shape, [2]),
                _constant(shaper.get_shape(np.ones((2, 2)))))
            self._assertNdArrayEqual(
                ([3, 2], _empty_shape, [1]),
                _constant(shaper.get_shape(np.ones((3, 2, 1)))))

            shaper = _DistributionShape(batch_ndims=1, event_ndims=0)
            with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
                shaper.get_shape(1.)
            self._assertNdArrayEqual((_empty_shape, [1], _empty_shape),
                                     _constant(shaper.get_shape(np.ones(1))))
            self._assertNdArrayEqual(
                ([2], [2], _empty_shape),
                _constant(shaper.get_shape(np.ones((2, 2)))))
            self._assertNdArrayEqual(
                ([3, 2], [1], _empty_shape),
                _constant(shaper.get_shape(np.ones((3, 2, 1)))))

            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
                shaper.get_shape(1.)
            with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
                shaper.get_shape(np.ones(1))
            self._assertNdArrayEqual(
                (_empty_shape, [2], [2]),
                _constant(shaper.get_shape(np.ones((2, 2)))))
            self._assertNdArrayEqual(
                ([3], [2], [1]), _constant(shaper.get_shape(np.ones(
                    (3, 2, 1)))))
示例#9
0
    def testDistributionShapeMakeBatchReadyStaticNoExpand(self):
        with self.test_session() as sess:
            x = self._random_sample((1, 2, 3))
            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            y, sample_shape = shaper.make_batch_of_event_sample_matrices(
                x, expand_batch_dim=False)
            self.assertAllEqual(np.transpose(x, axes=(1, 2, 0)), y.eval())
            self.assertAllEqual((1, ), sample_shape.eval())
            should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
                y, sample_shape, expand_batch_dim=False)
            self.assertAllEqual(x, should_be_x_value.eval())

            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            x = tf.placeholder(tf.float32)
            x_value = self._random_sample((3, 4, 2), dtype=x.dtype)
            feed_dict = {x: x_value}
            y, sample_shape = shaper.make_batch_of_event_sample_matrices(
                x, expand_batch_dim=False)
            self.assertAllEqual((3, ),
                                sess.run(sample_shape, feed_dict=feed_dict))
            self.assertAllClose(np.transpose(np.reshape(x_value, (-1, 4, 2)),
                                             (1, 2, 0)),
                                sess.run(y, feed_dict=feed_dict),
                                rtol=1e-3)
            should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
                y, sample_shape, expand_batch_dim=False)
            self.assertAllEqual(
                x_value, sess.run(should_be_x_value, feed_dict=feed_dict))

            shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
            x = tf.placeholder(tf.float32)
            x_value = np.ones([3], dtype=x.dtype.as_numpy_dtype())
            feed_dict = {x: x_value}
            y, sample_shape = shaper.make_batch_of_event_sample_matrices(
                x, expand_batch_dim=False)
            self.assertAllEqual([3], sess.run(sample_shape,
                                              feed_dict=feed_dict))
            # The following check shows we don't need to manually set_shape in the
            # ShapeUtil.
            self.assertAllEqual([1, None],
                                y.get_shape().ndims
                                and y.get_shape().as_list())
            self.assertAllEqual(
                np.ones([1, 3], dtype=x.dtype.as_numpy_dtype()),
                sess.run(y, feed_dict=feed_dict))
            should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
                y, sample_shape, expand_batch_dim=False)
            self.assertAllEqual(
                x_value, sess.run(should_be_x_value, feed_dict=feed_dict))
示例#10
0
  def testDistributionShapeGetDimsDynamic(self):
    with self.test_session() as sess:
      # Works for static {batch,event}_ndims despite unfed input.
      shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
      y = array_ops.placeholder(dtypes.float32, shape=(10, None, 5, 5))
      self._assertNdArrayEqual([[0], [1], [2, 3]], _eval(shaper.get_dims(y)))

      # Works for deferred {batch,event}_ndims.
      batch_ndims = array_ops.placeholder(dtypes.int32)
      event_ndims = array_ops.placeholder(dtypes.int32)
      shaper = _DistributionShape(
          batch_ndims=batch_ndims, event_ndims=event_ndims)
      y = array_ops.placeholder(dtypes.float32)
      y_value = self._random_sample((10, 3, 5, 5), dtype=y.dtype)
      feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 2}
      self._assertNdArrayEqual(
          ([0], [1], [2, 3]), sess.run(shaper.get_dims(y), feed_dict=feed_dict))
示例#11
0
 def _build_graph(self, x, batch_ndims, event_ndims, expand_batch_dim):
   shaper = _DistributionShape(batch_ndims=batch_ndims,
                               event_ndims=event_ndims)
   y, sample_shape = shaper.make_batch_of_event_sample_matrices(
       x, expand_batch_dim=expand_batch_dim)
   should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
       y, sample_shape, expand_batch_dim=expand_batch_dim)
   return y, sample_shape, should_be_x_value
示例#12
0
  def testDistributionShapeGetDimsDynamic(self):
    with self.test_session() as sess:
      # Works for static {batch,event}_ndims despite unfed input.
      shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
      y = array_ops.placeholder(dtypes.float32, shape=(10, None, 5, 5))
      self._assertNdArrayEqual([[0], [1], [2, 3]], _eval(shaper.get_dims(y)))

      # Works for deferred {batch,event}_ndims.
      batch_ndims = array_ops.placeholder(dtypes.int32)
      event_ndims = array_ops.placeholder(dtypes.int32)
      shaper = _DistributionShape(
          batch_ndims=batch_ndims, event_ndims=event_ndims)
      y = array_ops.placeholder(dtypes.float32)
      y_value = self._random_sample((10, 3, 5, 5), dtype=y.dtype)
      feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 2}
      self._assertNdArrayEqual(
          ([0], [1], [2, 3]), sess.run(shaper.get_dims(y), feed_dict=feed_dict))
示例#13
0
 def _build_graph(self, x, batch_ndims, event_ndims, expand_batch_dim):
     shaper = _DistributionShape(batch_ndims=batch_ndims,
                                 event_ndims=event_ndims)
     y, sample_shape = shaper.make_batch_of_event_sample_matrices(
         x, expand_batch_dim=expand_batch_dim)
     should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
         y, sample_shape, expand_batch_dim=expand_batch_dim)
     return y, sample_shape, should_be_x_value
示例#14
0
    def __init__(self,
                 batch_ndims=None,
                 event_ndims=None,
                 parameters=None,
                 is_constant_jacobian=False,
                 validate_args=False,
                 dtype=None,
                 name=None):
        """Constructs Bijector.

    A `Bijector` transforms random variables into new random variables.

    Examples:

    ```python
    # Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
    identity = Identity(batch_ndims=4, event_ndims=1)

    # Create the Y = g(X) = exp(X) transform which operates on matrices.
    exp = Exp(batch_ndims=0, event_ndims=2)
    ```

    See `Bijector` subclass docstring for more details and specific examples.

    Args:
      batch_ndims: number of dimensions associated with batch coordinates.
      event_ndims: number of dimensions associated with event coordinates.
      parameters: Dictionary of parameters used by this `Bijector`
      is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
        function of the input.
      validate_args: `Boolean`, default `False`.  Whether to validate input with
        asserts. If `validate_args` is `False`, and the inputs are invalid,
        correct behavior is not guaranteed.
      dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
        enforced.
      name: The name to give Ops created by the initializer.
    """
        if batch_ndims is None or event_ndims is None:
            self._shaper = None  # Apparently subclass will create.
        else:
            self._shaper = _DistributionShape(batch_ndims=batch_ndims,
                                              event_ndims=event_ndims,
                                              validate_args=validate_args)
        self._parameters = parameters or {}
        self._is_constant_jacobian = is_constant_jacobian
        self._validate_args = validate_args
        self._dtype = dtype
        if name:
            self._name = name
        else:
            # We want the default convention to be snake_case rather than CamelCase
            # since `Chain` uses bijector.name as the condition_kwargs dictionary key.
            def camel_to_snake(name):
                s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
                return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()

            self._name = camel_to_snake(type(self).__name__)
示例#15
0
  def __init__(self,
               batch_ndims=None,
               event_ndims=None,
               parameters=None,
               is_constant_jacobian=False,
               validate_args=False,
               dtype=None,
               name=None):
    """Constructs Bijector.

    A `Bijector` transforms random variables into new random variables.

    Examples:

    ```python
    # Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
    identity = Identity(batch_ndims=4, event_ndims=1)

    # Create the Y = g(X) = exp(X) transform which operates on matrices.
    exp = Exp(batch_ndims=0, event_ndims=2)
    ```

    See `Bijector` subclass docstring for more details and specific examples.

    Args:
      batch_ndims: number of dimensions associated with batch coordinates.
      event_ndims: number of dimensions associated with event coordinates.
      parameters: Dictionary of parameters used by this `Bijector`
      is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
        function of the input.
      validate_args: `Boolean`, default `False`.  Whether to validate input with
        asserts. If `validate_args` is `False`, and the inputs are invalid,
        correct behavior is not guaranteed.
      dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
        enforced.
      name: The name to give Ops created by the initializer.
    """
    if batch_ndims is None or event_ndims is None:
      self._shaper = None  # Apparently subclass will create.
    else:
      self._shaper = _DistributionShape(
          batch_ndims=batch_ndims,
          event_ndims=event_ndims,
          validate_args=validate_args)
    self._parameters = parameters or {}
    self._is_constant_jacobian = is_constant_jacobian
    self._validate_args = validate_args
    self._dtype = dtype
    if name:
      self._name = name
    else:
      # We want the default convention to be snake_case rather than CamelCase
      # since `Chain` uses bijector.name as the condition_kwargs dictionary key.
      def camel_to_snake(name):
        s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
        return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
      self._name = camel_to_snake(type(self).__name__)
示例#16
0
 def testDistributionShapeGetNdimsDynamic(self):
   with self.test_session() as sess:
     batch_ndims = array_ops.placeholder(dtypes.int32)
     event_ndims = array_ops.placeholder(dtypes.int32)
     shaper = _DistributionShape(
         batch_ndims=batch_ndims, event_ndims=event_ndims)
     y = array_ops.placeholder(dtypes.float32)
     y_value = np.ones((4, 2), dtype=y.dtype.as_numpy_dtype())
     feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
     self.assertEqual(2, sess.run(shaper.get_ndims(y), feed_dict=feed_dict))
示例#17
0
 def testDistributionShapeGetNdimsDynamic(self):
   with self.test_session() as sess:
     batch_ndims = array_ops.placeholder(dtypes.int32)
     event_ndims = array_ops.placeholder(dtypes.int32)
     shaper = _DistributionShape(
         batch_ndims=batch_ndims, event_ndims=event_ndims)
     y = array_ops.placeholder(dtypes.float32)
     y_value = np.ones((4, 2), dtype=y.dtype.as_numpy_dtype())
     feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
     self.assertEqual(2, sess.run(shaper.get_ndims(y), feed_dict=feed_dict))
示例#18
0
  def testDistributionShapeGetShapeStatic(self):
    with self.test_session():
      shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
      self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
                          _constant(shaper.get_shape(1.)))
      self._assertNdArrayEqual(([1], _empty_shape, _empty_shape),
                               _constant(shaper.get_shape(np.ones(1))))
      self._assertNdArrayEqual(([2, 2], _empty_shape, _empty_shape),
                               _constant(shaper.get_shape(np.ones((2, 2)))))
      self._assertNdArrayEqual(([3, 2, 1], _empty_shape, _empty_shape),
                               _constant(shaper.get_shape(np.ones((3, 2, 1)))))

      shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
      with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
        shaper.get_shape(1.)
      self._assertNdArrayEqual((_empty_shape, _empty_shape, [1]),
                               _constant(shaper.get_shape(np.ones(1))))
      self._assertNdArrayEqual(([2], _empty_shape, [2]),
                               _constant(shaper.get_shape(np.ones((2, 2)))))
      self._assertNdArrayEqual(([3, 2], _empty_shape, [1]),
                               _constant(shaper.get_shape(np.ones((3, 2, 1)))))

      shaper = _DistributionShape(batch_ndims=1, event_ndims=0)
      with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
        shaper.get_shape(1.)
      self._assertNdArrayEqual((_empty_shape, [1], _empty_shape),
                               _constant(shaper.get_shape(np.ones(1))))
      self._assertNdArrayEqual(([2], [2], _empty_shape),
                               _constant(shaper.get_shape(np.ones((2, 2)))))
      self._assertNdArrayEqual(([3, 2], [1], _empty_shape),
                               _constant(shaper.get_shape(np.ones((3, 2, 1)))))

      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
        shaper.get_shape(1.)
      with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
        shaper.get_shape(np.ones(1))
      self._assertNdArrayEqual((_empty_shape, [2], [2]),
                               _constant(shaper.get_shape(np.ones((2, 2)))))
      self._assertNdArrayEqual(([3], [2], [1]),
                               _constant(shaper.get_shape(np.ones((3, 2, 1)))))
示例#19
0
  def __init__(self,
               batch_ndims=None,
               event_ndims=None,
               parameters=None,
               is_constant_jacobian=False,
               validate_args=True,
               dtype=None,
               name=None):
    """Constructs Bijector.

    A `Bijector` transforms random variables into new random variables.

    Examples:

    ```python
    # Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
    identity = Identity(batch_ndims=4, event_ndims=1)

    # Create the Y = g(X) = exp(X) transform which operates on matrices.
    exp = Exp(batch_ndims=0, event_ndims=2)
    ```

    See `Bijector` subclass docstring for more details and specific examples.

    Args:
      batch_ndims: number of dimensions associated with batch coordinates.
      event_ndims: number of dimensions associated with event coordinates.
      parameters: Dictionary of parameters used by this `Bijector`
      is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
        function of the input.
      validate_args: `Boolean`. If true, Tensor arguments are
        checked for correctness. (Non-tensor arguments are always checked.)
      dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
        enforced.
      name: The name to give Ops created by the initializer.
    """
    if batch_ndims is None or event_ndims is None:
      self._shaper = None  # Apparently subclass will create.
    else:
      self._shaper = _DistributionShape(
          batch_ndims=batch_ndims,
          event_ndims=event_ndims,
          validate_args=validate_args)
    self._parameters = parameters or {}
    self._is_constant_jacobian = is_constant_jacobian
    self._validate_args = validate_args
    self._dtype = dtype
    self._name = name or type(self).__name__
示例#20
0
    def __init__(self,
                 batch_ndims=None,
                 event_ndims=None,
                 parameters=None,
                 is_constant_jacobian=False,
                 validate_args=True,
                 dtype=None,
                 name=None):
        """Constructs Bijector.

    A `Bijector` transforms random variables into new random variables.

    Examples:

    ```python
    # Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
    identity = Identity(batch_ndims=4, event_ndims=1)

    # Create the Y = g(X) = exp(X) transform which operates on matrices.
    exp = Exp(batch_ndims=0, event_ndims=2)
    ```

    See `Bijector` subclass docstring for more details and specific examples.

    Args:
      batch_ndims: number of dimensions associated with batch coordinates.
      event_ndims: number of dimensions associated with event coordinates.
      parameters: Dictionary of parameters used by this `Bijector`
      is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
        function of the input.
      validate_args: `Boolean`. If true, Tensor arguments are
        checked for correctness. (Non-tensor arguments are always checked.)
      dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
        enforced.
      name: The name to give Ops created by the initializer.
    """
        if batch_ndims is None or event_ndims is None:
            self._shaper = None  # Apparently subclass will create.
        else:
            self._shaper = _DistributionShape(batch_ndims=batch_ndims,
                                              event_ndims=event_ndims,
                                              validate_args=validate_args)
        self._parameters = parameters or {}
        self._is_constant_jacobian = is_constant_jacobian
        self._validate_args = validate_args
        self._dtype = dtype
        self._name = name or type(self).__name__
示例#21
0
    def __init__(self,
                 shift=None,
                 scale_identity_multiplier=None,
                 scale_diag=None,
                 scale_tril=None,
                 scale_perturb_factor=None,
                 scale_perturb_diag=None,
                 validate_args=False,
                 name="affine"):
        """Instantiates the `Affine` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale @ X + shift
    ```

    where the `scale` term is logically equivalent to:

    ```python
    scale = (
      scale_identity_multiplier * tf.linalg.tensor_diag(tf.ones(d)) +
      tf.linalg.tensor_diag(scale_diag) +
      scale_tril +
      scale_perturb_factor @ diag(scale_perturb_diag) @
        tf.transpose([scale_perturb_factor])
    )
    ```

    If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
    specified then `scale += IdentityMatrix`. Otherwise specifying a
    `scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
    `scale_diag != None` means `scale += tf.linalg.tensor_diag(scale_diag)`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix.
        When `scale_identity_multiplier = scale_diag = scale_tril = None` then
        `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
        to `scale`.
      scale_diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
        When `None` no diagonal term is added to `scale`.
      scale_tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k, k], which represents a k x k
        lower triangular matrix.
        When `None` no `scale_tril` term is added to `scale`.
        The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Floating-point `Tensor` representing factor matrix
        with last two dimensions of shape `(k, r)`. When `None`, no rank-r
        update is added to `scale`.
      scale_perturb_diag: Floating-point `Tensor` representing the diagonal
        matrix. `scale_perturb_diag` has shape [N1, N2, ...  r], which
        represents an `r x r` diagonal matrix. When `None` low rank updates will
        take the form `scale_perturb_factor * scale_perturb_factor.T`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `perturb_diag` is specified but not `perturb_factor`.
      TypeError: if `shift` has different `dtype` from `scale` arguments.
    """
        self._graph_parents = []
        self._name = name
        self._validate_args = validate_args

        # Ambiguous definition of low rank update.
        if scale_perturb_diag is not None and scale_perturb_factor is None:
            raise ValueError("When scale_perturb_diag is specified, "
                             "scale_perturb_factor must be specified.")

        # Special case, only handling a scaled identity matrix. We don't know its
        # dimensions, so this is special cased.
        # We don't check identity_multiplier, since below we set it to 1. if all
        # other scale args are None.
        self._is_only_identity_multiplier = (scale_tril is None
                                             and scale_diag is None
                                             and scale_perturb_factor is None)

        with self._name_scope("init",
                              values=[
                                  shift, scale_identity_multiplier, scale_diag,
                                  scale_tril, scale_perturb_diag,
                                  scale_perturb_factor
                              ]):

            # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
            dtype = dtypes.float32

            if shift is not None:
                shift = ops.convert_to_tensor(shift, name="shift")
                dtype = shift.dtype.base_dtype
            self._shift = shift

            # When no args are specified, pretend the scale matrix is the identity
            # matrix.
            if (self._is_only_identity_multiplier
                    and scale_identity_multiplier is None):
                scale_identity_multiplier = ops.convert_to_tensor(1.,
                                                                  dtype=dtype)

            # self._create_scale_operator returns a LinearOperator in all cases
            # except if self._is_only_identity_multiplier; in which case it
            # returns a scalar Tensor.
            scale = self._create_scale_operator(
                identity_multiplier=scale_identity_multiplier,
                diag=scale_diag,
                tril=scale_tril,
                perturb_diag=scale_perturb_diag,
                perturb_factor=scale_perturb_factor,
                shift=shift,
                validate_args=validate_args)

            if scale.dtype is not None:
                dtype = scale.dtype.base_dtype

            if scale is not None and not self._is_only_identity_multiplier:
                if (shift is not None
                        and shift.dtype.base_dtype != scale.dtype.base_dtype):
                    raise TypeError(
                        "shift.dtype({}) is incompatible with scale.dtype({})."
                        .format(shift.dtype, scale.dtype))

                if scale.tensor_rank is not None:
                    batch_ndims = scale.tensor_rank - 2
                else:
                    batch_ndims = scale.tensor_rank_tensor() - 2
            else:
                # We won't need shape inference when scale is None or when scale is a
                # scalar.
                batch_ndims = 0
            self._scale = scale
            self._shaper = _DistributionShape(batch_ndims=batch_ndims,
                                              event_ndims=1,
                                              validate_args=validate_args)
            super(Affine, self).__init__(
                forward_min_event_ndims=1,
                graph_parents=(
                    [self._scale] if tensor_util.is_tensor(
                        self._scale) else self._scale.graph_parents +
                    [self._shift] if self._shift is not None else []),
                is_constant_jacobian=True,
                dtype=dtype,
                validate_args=validate_args,
                name=name)
示例#22
0
    def __init__(self,
                 shift=None,
                 scale_identity_multiplier=None,
                 scale_diag=None,
                 scale_tril=None,
                 scale_perturb_factor=None,
                 scale_perturb_diag=None,
                 event_ndims=1,
                 validate_args=False,
                 name="affine"):
        """Instantiates the `Affine` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale @ X + shift
    ```

    where the `scale` term is logically equivalent to:

    ```python
    scale = (
      scale_identity_multiplier * tf.diag(tf.ones(d)) +
      tf.diag(scale_diag) +
      scale_tril +
      scale_perturb_factor @ diag(scale_perturb_diag) @
        tf.transpose([scale_perturb_factor])
    )
    ```

    If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
    specified then `scale += IdentityMatrix`. Otherwise specifying a
    `scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
    `scale_diag != None` means `scale += tf.diag(scale_diag)`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix.
        When `scale_identity_multiplier = scale_diag = scale_tril = None` then
        `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
        to `scale`.
      scale_diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
        When `None` no diagonal term is added to `scale`.
      scale_tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k, k], which represents a k x k
        lower triangular matrix.
        When `None` no `scale_tril` term is added to `scale`.
        The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Floating-point `Tensor` representing factor matrix
        with last two dimensions of shape `(k, r)`. When `None`, no rank-r
        update is added to `scale`.
      scale_perturb_diag: Floating-point `Tensor` representing the diagonal
        matrix. `scale_perturb_diag` has shape [N1, N2, ...  r], which
        represents an `r x r` diagonal matrix. When `None` low rank updates will
        take the form `scale_perturb_factor * scale_perturb_factor.T`.
      event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
        associated with a particular draw from the distribution. Must be 0 or 1.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `perturb_diag` is specified but not `perturb_factor`.
      TypeError: if `shift` has different `dtype` from `scale` arguments.
    """
        self._graph_parents = []
        self._name = name
        self._validate_args = validate_args
        # Ambiguous definition of low rank update.
        if scale_perturb_diag is not None and scale_perturb_factor is None:
            raise ValueError("When scale_perturb_diag is specified, "
                             "scale_perturb_factor must be specified.")
        # Special case, only handling a scaled identity matrix. We don't know its
        # dimensions, so this is special cased.
        # We don't check identity_multiplier, since below we set it to 1. if all
        # other scale args are None.
        self._is_only_identity_multiplier = (scale_tril is None
                                             and scale_diag is None
                                             and scale_perturb_factor is None)
        # When no args are specified, pretend the scale matrix is the identity
        # matrix.
        if self._is_only_identity_multiplier and scale_identity_multiplier is None:
            scale_identity_multiplier = 1.
        with self._name_scope("init",
                              values=[
                                  shift, scale_identity_multiplier, scale_diag,
                                  scale_tril, scale_perturb_diag,
                                  scale_perturb_factor, event_ndims
                              ]):
            event_ndims = ops.convert_to_tensor(event_ndims,
                                                name="event_ndims")
            if validate_args:
                is_less_than_two = check_ops.assert_less(
                    event_ndims, 2, message="event_ndims must be 0 or 1")
                event_ndims = control_flow_ops.with_dependencies(
                    [is_less_than_two], event_ndims)
            self._shift = _as_tensor(shift, "shift")
            # self._create_scale_operator returns an OperatorPD in all cases except if
            # self._is_only_identity_multiplier; in which case it returns a scalar
            # Tensor.
            self._scale = self._create_scale_operator(
                identity_multiplier=scale_identity_multiplier,
                diag=scale_diag,
                tril=scale_tril,
                perturb_diag=scale_perturb_diag,
                perturb_factor=scale_perturb_factor,
                event_ndims=event_ndims,
                validate_args=validate_args)
            if (self._shift is not None and self._shift.dtype.base_dtype !=
                    self._scale.dtype.base_dtype):
                raise TypeError(
                    "shift.dtype({}) does not match scale.dtype({})".format(
                        self._shift.dtype, self._scale.dtype))
            self._shaper = _DistributionShape(
                batch_ndims=self._infer_batch_ndims(),
                event_ndims=event_ndims,
                validate_args=validate_args)
            super(Affine, self).__init__(
                event_ndims=event_ndims,
                graph_parents=(
                    [event_ndims] + [self._scale] if tensor_util.is_tensor(
                        self._scale) else self._scale.inputs +
                    [self._shift] if self._shift is not None else []),
                is_constant_jacobian=True,
                dtype=self._scale.dtype,
                validate_args=validate_args,
                name=name)
示例#23
0
  def testDistributionShapeMakeBatchReadyDynamicNoExpand(self):
    with self.test_session() as sess:
      shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
      x = tf.placeholder(tf.float32, shape=(1, 2, 3))
      x_value = self._random_sample(x.get_shape().as_list(), dtype=x.dtype)
      y, sample_shape = sess.run(
          shaper.make_batch_of_event_sample_matrices(
              x, expand_batch_dim=False),
          feed_dict={x: x_value})
      self.assertAllEqual(np.transpose(x_value, (1, 2, 0)), y)
      self.assertAllEqual((1,), sample_shape)

      feed_dict = {x: x_value}
      y, sample_shape = shaper.make_batch_of_event_sample_matrices(
          x, expand_batch_dim=False)
      self.assertAllEqual(
          (1,),
          sess.run(sample_shape, feed_dict=feed_dict))
      self.assertAllEqual(
          np.transpose(x_value, (1, 2, 0)),
          sess.run(y, feed_dict=feed_dict))
      should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
          y, sample_shape, expand_batch_dim=False)
      self.assertAllEqual(x_value, sess.run(should_be_x_value,
                                            feed_dict=feed_dict))

      batch_ndims = tf.placeholder(tf.int32)
      event_ndims = tf.placeholder(tf.int32)
      shaper = _DistributionShape(batch_ndims=batch_ndims,
                                  event_ndims=event_ndims)

      # batch_ndims = 1, event_ndims = 1.
      x = tf.placeholder(tf.float32)
      x_value = np.ones((3, 4, 2), dtype=x.dtype.as_numpy_dtype())
      feed_dict = {x: x_value, batch_ndims: 1, event_ndims: 1}
      y, sample_shape = shaper.make_batch_of_event_sample_matrices(
          x, expand_batch_dim=False)
      self.assertAllEqual(
          [3],
          sess.run(sample_shape, feed_dict=feed_dict))
      self.assertAllEqual(
          np.ones([4, 2, 3], dtype=x.dtype.as_numpy_dtype()),
          sess.run(y, feed_dict=feed_dict))
      should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
          y, sample_shape, expand_batch_dim=False)
      self.assertAllEqual(x_value, sess.run(should_be_x_value,
                                            feed_dict=feed_dict))

      # batch_ndims = 0, event_ndims = 0.
      x_value = np.ones((3,), dtype=x.dtype.as_numpy_dtype())
      feed_dict = {x: x_value, batch_ndims: 0, event_ndims: 0}
      y, sample_shape = shaper.make_batch_of_event_sample_matrices(
          x, expand_batch_dim=False)
      self.assertAllEqual(
          [3],
          sess.run(sample_shape, feed_dict=feed_dict))
      self.assertAllEqual(
          np.ones([1, 3], dtype=x.dtype.as_numpy_dtype()),
          sess.run(y, feed_dict=feed_dict))
      should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
          y, sample_shape, expand_batch_dim=False)
      self.assertAllEqual(x_value, sess.run(should_be_x_value,
                                            feed_dict=feed_dict))

      # batch_ndims = 0, event_ndims = 1.
      x_value = np.ones([2], dtype=x.dtype.as_numpy_dtype())
      feed_dict = {x: x_value, batch_ndims: 0, event_ndims: 1}
      y, sample_shape = shaper.make_batch_of_event_sample_matrices(
          x, expand_batch_dim=False)
      self.assertAllEqual(
          [],
          sess.run(sample_shape, feed_dict=feed_dict))
      self.assertAllEqual(
          np.ones([2, 1], dtype=x.dtype.as_numpy_dtype()),
          sess.run(y, feed_dict=feed_dict))
      should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
          y, sample_shape, expand_batch_dim=False)
      self.assertAllEqual(x_value, sess.run(should_be_x_value,
                                            feed_dict=feed_dict))

      # batch_ndims = 1, event_ndims = 0.
      x_value = np.ones((1, 2), dtype=x.dtype.as_numpy_dtype())
      feed_dict = {x: x_value, batch_ndims: 1, event_ndims: 0}
      y, sample_shape = shaper.make_batch_of_event_sample_matrices(
          x, expand_batch_dim=False)
      self.assertAllEqual(
          (1,),
          sess.run(sample_shape, feed_dict=feed_dict))
      self.assertAllEqual(
          np.ones((2, 1, 1), dtype=x.dtype.as_numpy_dtype()),
          sess.run(y, feed_dict=feed_dict))
      should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
          y, sample_shape, expand_batch_dim=False)
      self.assertAllEqual(x_value, sess.run(should_be_x_value,
                                            feed_dict=feed_dict))
示例#24
0
    def testDistributionShapeMakeBatchReadyDynamic(self):
        with self.test_session() as sess:
            shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
            x = tf.placeholder(tf.float32, shape=(1, 2, 3))
            x_value = self._random_sample(x.get_shape().as_list(),
                                          dtype=x.dtype)
            y, sample_shape = sess.run(
                shaper.make_batch_of_event_sample_matrices(x),
                feed_dict={x: x_value})
            self.assertAllEqual(np.transpose(x_value, (1, 2, 0)), y)
            self.assertAllEqual((1, ), sample_shape)

            feed_dict = {x: x_value}
            y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
            self.assertAllEqual((1, ),
                                sess.run(sample_shape, feed_dict=feed_dict))
            self.assertAllEqual(np.transpose(x_value, (1, 2, 0)),
                                sess.run(y, feed_dict=feed_dict))
            should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
                y, sample_shape)
            self.assertAllEqual(
                x_value, sess.run(should_be_x_value, feed_dict=feed_dict))

            batch_ndims = tf.placeholder(tf.int32)
            event_ndims = tf.placeholder(tf.int32)
            shaper = _DistributionShape(batch_ndims=batch_ndims,
                                        event_ndims=event_ndims)

            # batch_ndims = 1, event_ndims = 1.
            x = tf.placeholder(tf.float32)
            x_value = np.ones((3, 4, 2), dtype=x.dtype.as_numpy_dtype())
            feed_dict = {x: x_value, batch_ndims: 1, event_ndims: 1}
            y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
            self.assertAllEqual((3, ),
                                sess.run(sample_shape, feed_dict=feed_dict))
            self.assertAllEqual(
                np.ones((4, 2, 3), dtype=x.dtype.as_numpy_dtype()),
                sess.run(y, feed_dict=feed_dict))
            should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
                y, sample_shape)
            self.assertAllEqual(
                x_value, sess.run(should_be_x_value, feed_dict=feed_dict))

            # batch_ndims = 0, event_ndims = 0.
            x_value = np.ones((3, ), dtype=x.dtype.as_numpy_dtype())
            feed_dict = {x: x_value, batch_ndims: 0, event_ndims: 0}
            y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
            self.assertAllEqual((3, ),
                                sess.run(sample_shape, feed_dict=feed_dict))
            self.assertAllEqual(
                np.ones((1, 1, 3), dtype=x.dtype.as_numpy_dtype()),
                sess.run(y, feed_dict=feed_dict))
            should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
                y, sample_shape)
            self.assertAllEqual(
                x_value, sess.run(should_be_x_value, feed_dict=feed_dict))

            # batch_ndims = 0, event_ndims = 1.
            x_value = np.ones((
                1,
                2,
            ), dtype=x.dtype.as_numpy_dtype())
            feed_dict = {x: x_value, batch_ndims: 0, event_ndims: 1}
            y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
            self.assertAllEqual((1, ),
                                sess.run(sample_shape, feed_dict=feed_dict))
            self.assertAllEqual(
                np.ones((1, 2, 1), dtype=x.dtype.as_numpy_dtype()),
                sess.run(y, feed_dict=feed_dict))
            should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
                y, sample_shape)
            self.assertAllEqual(
                x_value, sess.run(should_be_x_value, feed_dict=feed_dict))

            # batch_ndims = 1, event_ndims = 0.
            x_value = np.ones((1, 2), dtype=x.dtype.as_numpy_dtype())
            feed_dict = {x: x_value, batch_ndims: 1, event_ndims: 0}
            y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
            self.assertAllEqual((1, ),
                                sess.run(sample_shape, feed_dict=feed_dict))
            self.assertAllEqual(
                np.ones((2, 1, 1), dtype=x.dtype.as_numpy_dtype()),
                sess.run(y, feed_dict=feed_dict))
            should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
                y, sample_shape)
            self.assertAllEqual(
                x_value, sess.run(should_be_x_value, feed_dict=feed_dict))
示例#25
0
  def __init__(self,
               shift=None,
               scale_identity_multiplier=None,
               scale_diag=None,
               scale_tril=None,
               scale_perturb_factor=None,
               scale_perturb_diag=None,
               validate_args=False,
               name="affine"):
    """Instantiates the `Affine` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale @ X + shift
    ```

    where the `scale` term is logically equivalent to:

    ```python
    scale = (
      scale_identity_multiplier * tf.diag(tf.ones(d)) +
      tf.diag(scale_diag) +
      scale_tril +
      scale_perturb_factor @ diag(scale_perturb_diag) @
        tf.transpose([scale_perturb_factor])
    )
    ```

    If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
    specified then `scale += IdentityMatrix`. Otherwise specifying a
    `scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
    `scale_diag != None` means `scale += tf.diag(scale_diag)`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix.
        When `scale_identity_multiplier = scale_diag = scale_tril = None` then
        `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
        to `scale`.
      scale_diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
        When `None` no diagonal term is added to `scale`.
      scale_tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k, k], which represents a k x k
        lower triangular matrix.
        When `None` no `scale_tril` term is added to `scale`.
        The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Floating-point `Tensor` representing factor matrix
        with last two dimensions of shape `(k, r)`. When `None`, no rank-r
        update is added to `scale`.
      scale_perturb_diag: Floating-point `Tensor` representing the diagonal
        matrix. `scale_perturb_diag` has shape [N1, N2, ...  r], which
        represents an `r x r` diagonal matrix. When `None` low rank updates will
        take the form `scale_perturb_factor * scale_perturb_factor.T`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `perturb_diag` is specified but not `perturb_factor`.
      TypeError: if `shift` has different `dtype` from `scale` arguments.
    """
    self._graph_parents = []
    self._name = name
    self._validate_args = validate_args

    # Ambiguous definition of low rank update.
    if scale_perturb_diag is not None and scale_perturb_factor is None:
      raise ValueError("When scale_perturb_diag is specified, "
                       "scale_perturb_factor must be specified.")

    # Special case, only handling a scaled identity matrix. We don't know its
    # dimensions, so this is special cased.
    # We don't check identity_multiplier, since below we set it to 1. if all
    # other scale args are None.
    self._is_only_identity_multiplier = (scale_tril is None and
                                         scale_diag is None and
                                         scale_perturb_factor is None)

    with self._name_scope("init", values=[
        shift, scale_identity_multiplier, scale_diag, scale_tril,
        scale_perturb_diag, scale_perturb_factor]):

      # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
      dtype = dtypes.float32

      if shift is not None:
        shift = ops.convert_to_tensor(shift, name="shift")
        dtype = shift.dtype.base_dtype
      self._shift = shift

      # When no args are specified, pretend the scale matrix is the identity
      # matrix.
      if (self._is_only_identity_multiplier and
          scale_identity_multiplier is None):
        scale_identity_multiplier = ops.convert_to_tensor(1., dtype=dtype)

      # self._create_scale_operator returns a LinearOperator in all cases
      # except if self._is_only_identity_multiplier; in which case it
      # returns a scalar Tensor.
      scale = self._create_scale_operator(
          identity_multiplier=scale_identity_multiplier,
          diag=scale_diag,
          tril=scale_tril,
          perturb_diag=scale_perturb_diag,
          perturb_factor=scale_perturb_factor,
          shift=shift,
          validate_args=validate_args)

      if scale.dtype is not None:
        dtype = scale.dtype.base_dtype

      if scale is not None and not self._is_only_identity_multiplier:
        if (shift is not None and
            shift.dtype.base_dtype != scale.dtype.base_dtype):
          raise TypeError(
              "shift.dtype({}) is incompatible with scale.dtype({}).".format(
                  shift.dtype, scale.dtype))

        if scale.tensor_rank is not None:
          batch_ndims = scale.tensor_rank - 2
        else:
          batch_ndims = scale.tensor_rank_tensor() - 2
      else:
        # We won't need shape inference when scale is None or when scale is a
        # scalar.
        batch_ndims = 0
      self._scale = scale
      self._shaper = _DistributionShape(
          batch_ndims=batch_ndims,
          event_ndims=1,
          validate_args=validate_args)
      super(Affine, self).__init__(
          forward_min_event_ndims=1,
          graph_parents=(
              [self._scale] if tensor_util.is_tensor(self._scale)
              else self._scale.graph_parents +
              [self._shift] if self._shift is not None else []),
          is_constant_jacobian=True,
          dtype=dtype,
          validate_args=validate_args,
          name=name)
示例#26
0
    def __init__(self,
                 shift=None,
                 scale=None,
                 validate_args=False,
                 name="affine_linear_operator"):
        """Instantiates the `AffineLinearOperator` bijector.

    Args:
      shift: Floating-point `Tensor`.
      scale:  Subclass of `LinearOperator`. Represents the (batch) positive
        definite matrix `M` in `R^{k x k}`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      TypeError: if `scale` is not a `LinearOperator`.
      TypeError: if `shift.dtype` does not match `scale.dtype`.
      ValueError: if not `scale.is_non_singular`.
    """
        self._graph_parents = []
        self._name = name
        self._validate_args = validate_args
        graph_parents = []
        with self._name_scope("init", values=[shift]):
            # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
            dtype = dtypes.float32

            if shift is not None:
                shift = ops.convert_to_tensor(shift, name="shift")
                graph_parents += [shift]
                dtype = shift.dtype.base_dtype
            self._shift = shift

            if scale is not None:
                if (shift is not None
                        and shift.dtype.base_dtype != scale.dtype.base_dtype):
                    raise TypeError(
                        "shift.dtype({}) is incompatible with scale.dtype({})."
                        .format(shift.dtype, scale.dtype))
                if not isinstance(scale, linear_operator.LinearOperator):
                    raise TypeError(
                        "scale is not an instance of tf.LinearOperator")
                if validate_args and not scale.is_non_singular:
                    raise ValueError("Scale matrix must be non-singular.")
                graph_parents += scale.graph_parents
                if scale.tensor_rank is not None:
                    batch_ndims = scale.tensor_rank - 2
                else:
                    batch_ndims = scale.tensor_rank_tensor() - 2
                    graph_parents += [batch_ndims]
                if scale.dtype is not None:
                    dtype = scale.dtype.base_dtype
            else:
                batch_ndims = 0  # We won't need shape inference when scale is None.
            self._scale = scale
            self._shaper = _DistributionShape(batch_ndims=batch_ndims,
                                              event_ndims=1,
                                              validate_args=validate_args)
            super(AffineLinearOperator,
                  self).__init__(forward_min_event_ndims=1,
                                 graph_parents=graph_parents,
                                 is_constant_jacobian=True,
                                 dtype=dtype,
                                 validate_args=validate_args,
                                 name=name)
  def __init__(self,
               shift=None,
               scale=None,
               event_ndims=1,
               validate_args=False,
               name="affine_linear_operator"):
    """Instantiates the `AffineLinearOperator` bijector.

    Args:
      shift: Floating-point `Tensor`.
      scale:  Subclass of `LinearOperator`. Represents the (batch) positive
        definite matrix `M` in `R^{k x k}`.
      event_ndims: Scalar `integer` `Tensor` indicating the number of dimensions
        associated with a particular draw from the distribution. Must be 0 or 1.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `event_ndims` is not 0 or 1.
      TypeError: if `scale` is not a `LinearOperator`.
      TypeError: if `shift.dtype` does not match `scale.dtype`.
      ValueError: if not `scale.is_non_singular`.
    """
    self._graph_parents = []
    self._name = name
    self._validate_args = validate_args
    graph_parents = []
    with self._name_scope("init", values=[shift]):
      event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
      if tensor_util.constant_value(event_ndims) is not None:
        event_ndims = tensor_util.constant_value(event_ndims)
        if event_ndims not in (0, 1):
          raise ValueError("event_ndims({}) was not 0 or 1".format(event_ndims))
      else:
        if validate_args:
          # Shape tool will catch if event_ndims is negative.
          event_ndims = control_flow_ops.with_dependencies(
              [check_ops.assert_less(
                  event_ndims, 2, message="event_ndims must be 0 or 1")],
              event_ndims)
        graph_parents += [event_ndims]

      # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
      dtype = dtypes.float32

      if shift is not None:
        shift = ops.convert_to_tensor(shift, name="shift")
        graph_parents += [shift]
        dtype = shift.dtype.base_dtype
      self._shift = shift

      if scale is not None:
        if (shift is not None and
            shift.dtype.base_dtype != scale.dtype.base_dtype):
          raise TypeError(
              "shift.dtype({}) is incompatible with scale.dtype({}).".format(
                  shift.dtype, scale.dtype))
        if not isinstance(scale, linear_operator.LinearOperator):
          raise TypeError("scale is not an instance of tf.LinearOperator")
        if validate_args and not scale.is_non_singular:
          raise ValueError("Scale matrix must be non-singular.")
        graph_parents += scale.graph_parents
        if scale.tensor_rank is not None:
          batch_ndims = scale.tensor_rank - 2
        else:
          batch_ndims = scale.tensor_rank_tensor() - 2
          graph_parents += [batch_ndims]
        if scale.dtype is not None:
          dtype = scale.dtype.base_dtype
      else:
        batch_ndims = 0  # We won't need shape inference when scale is None.
      self._scale = scale
      self._shaper = _DistributionShape(
          batch_ndims=batch_ndims,
          event_ndims=event_ndims,
          validate_args=validate_args)
      super(AffineLinearOperator, self).__init__(
          event_ndims=event_ndims,
          graph_parents=graph_parents,
          is_constant_jacobian=True,
          dtype=dtype,
          validate_args=validate_args,
          name=name)
示例#28
0
  def __init__(self,
               shift=None,
               scale_identity_multiplier=None,
               scale_diag=None,
               scale_tril=None,
               scale_perturb_factor=None,
               scale_perturb_diag=None,
               event_ndims=1,
               validate_args=False,
               name="affine"):
    """Instantiates the `Affine` bijector.

    This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
    giving the forward operation:

    ```none
    Y = g(X) = scale @ X + shift
    ```

    where the `scale` term is logically equivalent to:

    ```python
    scale = (
      scale_identity_multiplier * tf.diag(tf.ones(d)) +
      tf.diag(scale_diag) +
      scale_tril +
      scale_perturb_factor @ diag(scale_perturb_diag) @
        tf.transpose([scale_perturb_factor])
    )
    ```

    If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
    specified then `scale += IdentityMatrix`. Otherwise specifying a
    `scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
    `scale_diag != None` means `scale += tf.diag(scale_diag)`.

    Args:
      shift: Floating-point `Tensor`. If this is set to `None`, no shift is
        applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix.
        When `scale_identity_multiplier = scale_diag = scale_tril = None` then
        `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
        to `scale`.
      scale_diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
        diagonal matrix.
        When `None` no diagonal term is added to `scale`.
      scale_tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ...  k, k], which represents a k x k
        lower triangular matrix.
        When `None` no `scale_tril` term is added to `scale`.
        The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Floating-point `Tensor` representing factor matrix
        with last two dimensions of shape `(k, r)`. When `None`, no rank-r
        update is added to `scale`.
      scale_perturb_diag: Floating-point `Tensor` representing the diagonal
        matrix. `scale_perturb_diag` has shape [N1, N2, ...  r], which
        represents an `r x r` diagonal matrix. When `None` low rank updates will
        take the form `scale_perturb_factor * scale_perturb_factor.T`.
      event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
        associated with a particular draw from the distribution. Must be 0 or 1.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `perturb_diag` is specified but not `perturb_factor`.
      TypeError: if `shift` has different `dtype` from `scale` arguments.
    """
    self._graph_parents = []
    self._name = name
    self._validate_args = validate_args
    # Ambiguous definition of low rank update.
    if scale_perturb_diag is not None and scale_perturb_factor is None:
      raise ValueError("When scale_perturb_diag is specified, "
                       "scale_perturb_factor must be specified.")
    # Special case, only handling a scaled identity matrix. We don't know its
    # dimensions, so this is special cased.
    # We don't check identity_multiplier, since below we set it to 1. if all
    # other scale args are None.
    self._is_only_identity_multiplier = (scale_tril is None and
                                         scale_diag is None and
                                         scale_perturb_factor is None)
    # When no args are specified, pretend the scale matrix is the identity
    # matrix.
    if self._is_only_identity_multiplier and scale_identity_multiplier is None:
      scale_identity_multiplier = 1.
    with self._name_scope("init", values=[
        shift, scale_identity_multiplier, scale_diag, scale_tril,
        scale_perturb_diag, scale_perturb_factor, event_ndims]):
      event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
      if validate_args:
        is_less_than_two = check_ops.assert_less(
            event_ndims, 2,
            message="event_ndims must be 0 or 1")
        event_ndims = control_flow_ops.with_dependencies(
            [is_less_than_two], event_ndims)
      self._shift = _as_tensor(shift, "shift")
      # self._create_scale_operator returns an OperatorPD in all cases except if
      # self._is_only_identity_multiplier; in which case it returns a scalar
      # Tensor.
      self._scale = self._create_scale_operator(
          identity_multiplier=scale_identity_multiplier,
          diag=scale_diag,
          tril=scale_tril,
          perturb_diag=scale_perturb_diag,
          perturb_factor=scale_perturb_factor,
          event_ndims=event_ndims,
          validate_args=validate_args)
      if (self._shift is not None and
          self._shift.dtype.base_dtype != self._scale.dtype.base_dtype):
        raise TypeError("shift.dtype({}) does not match scale.dtype({})".format(
            self._shift.dtype, self._scale.dtype))
      self._shaper = _DistributionShape(
          batch_ndims=self._infer_batch_ndims(),
          event_ndims=event_ndims,
          validate_args=validate_args)
      super(Affine, self).__init__(
          event_ndims=event_ndims,
          graph_parents=(
              [event_ndims] +
              [self._scale] if tensor_util.is_tensor(self._scale)
              else self._scale.inputs +
              [self._shift] if self._shift is not None else []),
          is_constant_jacobian=True,
          dtype=self._scale.dtype,
          validate_args=validate_args,
          name=name)
  def __init__(self,
               shift=None,
               scale=None,
               validate_args=False,
               name="affine_linear_operator"):
    """Instantiates the `AffineLinearOperator` bijector.

    Args:
      shift: Floating-point `Tensor`.
      scale:  Subclass of `LinearOperator`. Represents the (batch) positive
        definite matrix `M` in `R^{k x k}`.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      TypeError: if `scale` is not a `LinearOperator`.
      TypeError: if `shift.dtype` does not match `scale.dtype`.
      ValueError: if not `scale.is_non_singular`.
    """
    self._graph_parents = []
    self._name = name
    self._validate_args = validate_args
    graph_parents = []
    with self._name_scope("init", values=[shift]):
      # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
      dtype = dtypes.float32

      if shift is not None:
        shift = ops.convert_to_tensor(shift, name="shift")
        graph_parents += [shift]
        dtype = shift.dtype.base_dtype
      self._shift = shift

      if scale is not None:
        if (shift is not None and
            shift.dtype.base_dtype != scale.dtype.base_dtype):
          raise TypeError(
              "shift.dtype({}) is incompatible with scale.dtype({}).".format(
                  shift.dtype, scale.dtype))
        if not isinstance(scale, linear_operator.LinearOperator):
          raise TypeError("scale is not an instance of tf.LinearOperator")
        if validate_args and not scale.is_non_singular:
          raise ValueError("Scale matrix must be non-singular.")
        graph_parents += scale.graph_parents
        if scale.tensor_rank is not None:
          batch_ndims = scale.tensor_rank - 2
        else:
          batch_ndims = scale.tensor_rank_tensor() - 2
          graph_parents += [batch_ndims]
        if scale.dtype is not None:
          dtype = scale.dtype.base_dtype
      else:
        batch_ndims = 0  # We won't need shape inference when scale is None.
      self._scale = scale
      self._shaper = _DistributionShape(
          batch_ndims=batch_ndims,
          event_ndims=1,
          validate_args=validate_args)
      super(AffineLinearOperator, self).__init__(
          forward_min_event_ndims=1,
          graph_parents=graph_parents,
          is_constant_jacobian=True,
          dtype=dtype,
          validate_args=validate_args,
          name=name)
示例#30
0
    def __init__(self,
                 shift=None,
                 scale=None,
                 event_ndims=1,
                 validate_args=False,
                 name="affine_linear_operator"):
        """Instantiates the `AffineLinearOperator` bijector.

    Args:
      shift: Floating-point `Tensor`.
      scale:  Subclass of `LinearOperator`. Represents the (batch) positive
        definite matrix `M` in `R^{k x k}`.
      event_ndims: Scalar `integer` `Tensor` indicating the number of dimensions
        associated with a particular draw from the distribution. Must be 0 or 1.
      validate_args: Python `bool` indicating whether arguments should be
        checked for correctness.
      name: Python `str` name given to ops managed by this object.

    Raises:
      ValueError: if `event_ndims` is not 0 or 1.
      TypeError: if `scale` is not a `LinearOperator`.
      TypeError: if `shift.dtype` does not match `scale.dtype`.
      ValueError: if not `scale.is_non_singular`.
    """
        self._graph_parents = []
        self._name = name
        self._validate_args = validate_args
        graph_parents = []
        with self._name_scope("init", values=[shift]):
            event_ndims = ops.convert_to_tensor(event_ndims,
                                                name="event_ndims")
            if tensor_util.constant_value(event_ndims) is not None:
                event_ndims = tensor_util.constant_value(event_ndims)
                if event_ndims not in (0, 1):
                    raise ValueError(
                        "event_ndims({}) was not 0 or 1".format(event_ndims))
            else:
                if validate_args:
                    # Shape tool will catch if event_ndims is negative.
                    event_ndims = control_flow_ops.with_dependencies([
                        check_ops.assert_less(
                            event_ndims,
                            2,
                            message="event_ndims must be 0 or 1")
                    ], event_ndims)
                graph_parents += [event_ndims]

            # In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
            dtype = dtypes.float32

            if shift is not None:
                shift = ops.convert_to_tensor(shift, name="shift")
                graph_parents += [shift]
                dtype = shift.dtype.base_dtype
            self._shift = shift

            if scale is not None:
                if (shift is not None
                        and shift.dtype.base_dtype != scale.dtype.base_dtype):
                    raise TypeError(
                        "shift.dtype({}) is incompatible with scale.dtype({})."
                        .format(shift.dtype, scale.dtype))
                if not isinstance(scale, linear_operator.LinearOperator):
                    raise TypeError(
                        "scale is not an instance of tf.LinearOperator")
                if validate_args and not scale.is_non_singular:
                    raise ValueError("Scale matrix must be non-singular.")
                graph_parents += scale.graph_parents
                if scale.tensor_rank is not None:
                    batch_ndims = scale.tensor_rank - 2
                else:
                    batch_ndims = scale.tensor_rank_tensor() - 2
                    graph_parents += [batch_ndims]
                if scale.dtype is not None:
                    dtype = scale.dtype.base_dtype
            else:
                batch_ndims = 0  # We won't need shape inference when scale is None.
            self._scale = scale
            self._shaper = _DistributionShape(batch_ndims=batch_ndims,
                                              event_ndims=event_ndims,
                                              validate_args=validate_args)
            super(AffineLinearOperator,
                  self).__init__(event_ndims=event_ndims,
                                 graph_parents=graph_parents,
                                 is_constant_jacobian=True,
                                 dtype=dtype,
                                 validate_args=validate_args,
                                 name=name)