Example #1
0
    def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
        shape = list(shape)
        assert shape[-1] == shape[-2]

        batch_shape = shape[:-2]
        num_rows = shape[-1]

        # Uniform values that are at least length 1 from the origin.  Allows the
        # operator to be well conditioned.
        # Shape batch_shape
        multiplier = linear_operator_test_util.random_sign_uniform(
            shape=batch_shape, minval=1., maxval=2., dtype=dtype)

        operator = linalg_lib.LinearOperatorScaledIdentity(
            num_rows, multiplier)

        # Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
        if use_placeholder:
            multiplier_ph = array_ops.placeholder(dtype=dtype)
            multiplier = multiplier.eval()
            operator = linalg_lib.LinearOperatorScaledIdentity(
                num_rows, multiplier_ph)
            feed_dict = {multiplier_ph: multiplier}
        else:
            feed_dict = None

        multiplier_matrix = array_ops.expand_dims(
            array_ops.expand_dims(multiplier, -1), -1)
        mat = multiplier_matrix * linalg_ops.eye(
            num_rows, batch_shape=batch_shape, dtype=dtype)

        return operator, mat, feed_dict
Example #2
0
  def test_diag_matmul(self):
    operator1 = linalg_lib.LinearOperatorDiag([2., 3.])
    operator2 = linalg_lib.LinearOperatorDiag([1., 2.])
    operator3 = linalg_lib.LinearOperatorScaledIdentity(
        num_rows=2, multiplier=3.)
    operator_matmul = operator1.matmul(operator2)
    self.assertTrue(isinstance(
        operator_matmul,
        linalg_lib.LinearOperatorDiag))
    self.assertAllClose([2., 6.], self.evaluate(operator_matmul.diag))

    operator_matmul = operator2.matmul(operator1)
    self.assertTrue(isinstance(
        operator_matmul,
        linalg_lib.LinearOperatorDiag))
    self.assertAllClose([2., 6.], self.evaluate(operator_matmul.diag))

    operator_matmul = operator1.matmul(operator3)
    self.assertTrue(isinstance(
        operator_matmul,
        linalg_lib.LinearOperatorDiag))
    self.assertAllClose([6., 9.], self.evaluate(operator_matmul.diag))

    operator_matmul = operator3.matmul(operator1)
    self.assertTrue(isinstance(
        operator_matmul,
        linalg_lib.LinearOperatorDiag))
    self.assertAllClose([6., 9.], self.evaluate(operator_matmul.diag))
 def test_is_x_flags(self):
   operator = linalg_lib.LinearOperatorScaledIdentity(
       num_rows=2, multiplier=1.,
       is_positive_definite=False, is_non_singular=True)
   self.assertFalse(operator.is_positive_definite)
   self.assertTrue(operator.is_non_singular)
   self.assertTrue(operator.is_self_adjoint is None)
    def test_diag_solve(self):
        operator1 = linalg_lib.LinearOperatorDiag([2., 3.],
                                                  is_non_singular=True)
        operator2 = linalg_lib.LinearOperatorDiag([1., 2.],
                                                  is_non_singular=True)
        operator3 = linalg_lib.LinearOperatorScaledIdentity(
            num_rows=2, multiplier=3., is_non_singular=True)
        operator_solve = operator1.solve(operator2)
        self.assertTrue(
            isinstance(operator_solve, linalg_lib.LinearOperatorDiag))
        self.assertAllClose([0.5, 2 / 3.], self.evaluate(operator_solve.diag))

        operator_solve = operator2.solve(operator1)
        self.assertTrue(
            isinstance(operator_solve, linalg_lib.LinearOperatorDiag))
        self.assertAllClose([2., 3 / 2.], self.evaluate(operator_solve.diag))

        operator_solve = operator1.solve(operator3)
        self.assertTrue(
            isinstance(operator_solve, linalg_lib.LinearOperatorDiag))
        self.assertAllClose([3 / 2., 1.], self.evaluate(operator_solve.diag))

        operator_solve = operator3.solve(operator1)
        self.assertTrue(
            isinstance(operator_solve, linalg_lib.LinearOperatorDiag))
        self.assertAllClose([2 / 3., 1.], self.evaluate(operator_solve.diag))
Example #5
0
    def test_broadcast_matmul_and_solve(self):
        # These cannot be done in the automated (base test class) tests since they
        # test shapes that tf.batch_matmul cannot handle.
        # In particular, tf.batch_matmul does not broadcast.
        with self.cached_session() as sess:
            # Given this x and LinearOperatorScaledIdentity shape of (2, 1, 3, 3), the
            # broadcast shape of operator and 'x' is (2, 2, 3, 4)
            x = random_ops.random_normal(shape=(1, 2, 3, 4))

            # operator is 2.2 * identity (with a batch shape).
            operator = linalg_lib.LinearOperatorScaledIdentity(
                num_rows=3, multiplier=2.2 * array_ops.ones((2, 1)))

            # Batch matrix of zeros with the broadcast shape of x and operator.
            zeros = array_ops.zeros(shape=(2, 2, 3, 4), dtype=x.dtype)

            # Test matmul
            expected = x * 2.2 + zeros
            operator_matmul = operator.matmul(x)
            self.assertAllEqual(operator_matmul.get_shape(),
                                expected.get_shape())
            self.assertAllClose(*self.evaluate([operator_matmul, expected]))

            # Test solve
            expected = x / 2.2 + zeros
            operator_solve = operator.solve(x)
            self.assertAllEqual(operator_solve.get_shape(),
                                expected.get_shape())
            self.assertAllClose(*self.evaluate([operator_solve, expected]))
Example #6
0
    def test_broadcast_matmul_and_solve_scalar_scale_multiplier(self):
        # These cannot be done in the automated (base test class) tests since they
        # test shapes that tf.batch_matmul cannot handle.
        # In particular, tf.batch_matmul does not broadcast.
        with self.cached_session() as sess:
            # Given this x and LinearOperatorScaledIdentity shape of (3, 3), the
            # broadcast shape of operator and 'x' is (1, 2, 3, 4), which is the same
            # shape as x.
            x = random_ops.random_normal(shape=(1, 2, 3, 4))

            # operator is 2.2 * identity (with a batch shape).
            operator = linalg_lib.LinearOperatorScaledIdentity(num_rows=3,
                                                               multiplier=2.2)

            # Test matmul
            expected = x * 2.2
            operator_matmul = operator.matmul(x)
            self.assertAllEqual(operator_matmul.get_shape(),
                                expected.get_shape())
            self.assertAllClose(*self.evaluate([operator_matmul, expected]))

            # Test solve
            expected = x / 2.2
            operator_solve = operator.solve(x)
            self.assertAllEqual(operator_solve.get_shape(),
                                expected.get_shape())
            self.assertAllClose(*self.evaluate([operator_solve, expected]))
  def _operator_and_matrix(self, build_info, dtype, use_placeholder):
    shape = list(build_info.shape)
    assert shape[-1] == shape[-2]

    batch_shape = shape[:-2]
    num_rows = shape[-1]

    # Uniform values that are at least length 1 from the origin.  Allows the
    # operator to be well conditioned.
    # Shape batch_shape
    multiplier = linear_operator_test_util.random_sign_uniform(
        shape=batch_shape, minval=1., maxval=2., dtype=dtype)


    # Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
    lin_op_multiplier = multiplier

    if use_placeholder:
      lin_op_multiplier = array_ops.placeholder_with_default(
          multiplier, shape=None)

    operator = linalg_lib.LinearOperatorScaledIdentity(
        num_rows, lin_op_multiplier)

    multiplier_matrix = array_ops.expand_dims(
        array_ops.expand_dims(multiplier, -1), -1)
    matrix = multiplier_matrix * linalg_ops.eye(
        num_rows, batch_shape=batch_shape, dtype=dtype)

    return operator, matrix
Example #8
0
 def test_convert_variables_to_tensors(self):
     multiplier = variables_module.Variable(1.23)
     operator = linalg_lib.LinearOperatorScaledIdentity(
         num_rows=2, multiplier=multiplier)
     with self.cached_session() as sess:
         sess.run([multiplier.initializer])
         self.check_convert_variables_to_tensors(operator)
Example #9
0
 def test_scaled_identity_inverse_type(self):
     operator = linalg_lib.LinearOperatorScaledIdentity(
         num_rows=2,
         multiplier=3.,
         is_non_singular=True,
     )
     self.assertIsInstance(operator.inverse(),
                           linalg_lib.LinearOperatorScaledIdentity)
Example #10
0
 def test_scaled_identity_cholesky_type(self):
     operator = linalg_lib.LinearOperatorScaledIdentity(
         num_rows=2,
         multiplier=3.,
         is_positive_definite=True,
         is_self_adjoint=True,
     )
     self.assertIsInstance(operator.cholesky(),
                           linalg_lib.LinearOperatorScaledIdentity)
Example #11
0
    def test_wrong_matrix_dimensions_raises_dynamic(self):
        num_rows = array_ops.placeholder(dtypes.int32)
        x = array_ops.placeholder(dtypes.float32)

        with self.cached_session():
            operator = linalg_lib.LinearOperatorScaledIdentity(
                num_rows, multiplier=[1., 2], assert_proper_shapes=True)
            y = operator.matmul(x)
            with self.assertRaisesOpError("Incompatible.*dimensions"):
                y.eval(feed_dict={num_rows: 2, x: rng.rand(3, 3)})
 def test_float16_matmul(self):
   # float16 cannot be tested by base test class because tf.matrix_solve does
   # not work with float16.
   with self.cached_session():
     multiplier = rng.rand(3).astype(np.float16)
     operator = linalg_lib.LinearOperatorScaledIdentity(
         num_rows=2, multiplier=multiplier)
     x = rng.randn(2, 3).astype(np.float16)
     y = operator.matmul(x)
     self.assertAllClose(multiplier[..., None, None] * x, y.eval())
Example #13
0
    def test_wrong_matrix_dimensions_raises_dynamic(self):
        num_rows = array_ops.placeholder_with_default(2, shape=None)
        x = array_ops.placeholder_with_default(rng.rand(3,
                                                        3).astype(np.float32),
                                               shape=None)

        with self.cached_session():
            with self.assertRaisesError("Dimensions.*not.compatible"):
                operator = linalg_lib.LinearOperatorScaledIdentity(
                    num_rows, multiplier=[1., 2], assert_proper_shapes=True)
                self.evaluate(operator.matmul(x))
  def test_identity_solve(self):
    operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
    operator2 = linalg_lib.LinearOperatorScaledIdentity(
        num_rows=2, multiplier=3.)
    self.assertTrue(isinstance(
        operator1.solve(operator1),
        linalg_lib.LinearOperatorIdentity))

    operator_solve = operator1.solve(operator2)
    self.assertTrue(isinstance(
        operator_solve,
        linalg_lib.LinearOperatorScaledIdentity))
    self.assertAllClose(3., self.evaluate(operator_solve.multiplier))
Example #15
0
    def _operator_and_matrix(self,
                             build_info,
                             dtype,
                             use_placeholder,
                             ensure_self_adjoint_and_pd=False):

        shape = list(build_info.shape)
        assert shape[-1] == shape[-2]

        batch_shape = shape[:-2]
        num_rows = shape[-1]

        # Uniform values that are at least length 1 from the origin.  Allows the
        # operator to be well conditioned.
        # Shape batch_shape
        multiplier = linear_operator_test_util.random_sign_uniform(
            shape=batch_shape, minval=1., maxval=2., dtype=dtype)

        if ensure_self_adjoint_and_pd:
            # Abs on complex64 will result in a float32, so we cast back up.
            multiplier = math_ops.cast(math_ops.abs(multiplier), dtype=dtype)

        # Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
        lin_op_multiplier = multiplier

        if use_placeholder:
            lin_op_multiplier = array_ops.placeholder_with_default(multiplier,
                                                                   shape=None)

        operator = linalg_lib.LinearOperatorScaledIdentity(
            num_rows,
            lin_op_multiplier,
            is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
            is_positive_definite=True if ensure_self_adjoint_and_pd else None)

        multiplier_matrix = array_ops.expand_dims(
            array_ops.expand_dims(multiplier, -1), -1)
        matrix = multiplier_matrix * linalg_ops.eye(
            num_rows, batch_shape=batch_shape, dtype=dtype)

        return operator, matrix
Example #16
0
    def test_identity_matmul(self):
        operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
        operator2 = linalg_lib.LinearOperatorScaledIdentity(num_rows=2,
                                                            multiplier=3.)
        self.assertIsInstance(operator1.matmul(operator1),
                              linalg_lib.LinearOperatorIdentity)

        self.assertIsInstance(operator1.matmul(operator1),
                              linalg_lib.LinearOperatorIdentity)

        self.assertIsInstance(operator2.matmul(operator2),
                              linalg_lib.LinearOperatorScaledIdentity)

        operator_matmul = operator1.matmul(operator2)
        self.assertIsInstance(operator_matmul,
                              linalg_lib.LinearOperatorScaledIdentity)
        self.assertAllClose(3., self.evaluate(operator_matmul.multiplier))

        operator_matmul = operator2.matmul(operator1)
        self.assertIsInstance(operator_matmul,
                              linalg_lib.LinearOperatorScaledIdentity)
        self.assertAllClose(3., self.evaluate(operator_matmul.multiplier))
Example #17
0
 def test_assert_self_adjoint_does_not_raise_when_self_adjoint(self):
     with self.cached_session():
         operator = linalg_lib.LinearOperatorScaledIdentity(
             num_rows=2, multiplier=[1. + 0J])
         operator.assert_self_adjoint().run()  # Should not fail
def make_diag_scale(loc=None,
                    scale_diag=None,
                    scale_identity_multiplier=None,
                    shape_hint=None,
                    validate_args=False,
                    assert_positive=False,
                    name=None):
    """Creates a LinOp representing a diagonal matrix.

  Args:
    loc: Floating-point `Tensor`. This is used for inferring shape in the case
      where only `scale_identity_multiplier` is set.
    scale_diag: Floating-point `Tensor` representing the diagonal matrix.
      `scale_diag` has shape [N1, N2, ...  k], which represents a k x k
      diagonal matrix.
      When `None` no diagonal term is added to the LinOp.
    scale_identity_multiplier: floating point rank 0 `Tensor` representing a
      scaling done to the identity matrix.
      When `scale_identity_multiplier = scale_diag = scale_tril = None` then
      `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
      to `scale`.
    shape_hint: scalar integer `Tensor` representing a hint at the dimension of
      the identity matrix when only `scale_identity_multiplier` is set.
    validate_args: Python `bool` indicating whether arguments should be
      checked for correctness.
    assert_positive: Python `bool` indicating whether LinOp should be checked
      for being positive definite.
    name: Python `str` name given to ops managed by this object.

  Returns:
    `LinearOperator` representing a lower triangular matrix.

  Raises:
    ValueError:  If only `scale_identity_multiplier` is set and `loc` and
      `shape_hint` are both None.
  """
    def _maybe_attach_assertion(x):
        if not validate_args:
            return x
        if assert_positive:
            return control_flow_ops.with_dependencies([
                check_ops.assert_positive(
                    x, message="diagonal part must be positive"),
            ], x)
        return control_flow_ops.with_dependencies([
            check_ops.assert_none_equal(
                x,
                array_ops.zeros([], x.dtype),
                message="diagonal part must be non-zero")
        ], x)

    with ops.name_scope(name,
                        "make_diag_scale",
                        values=[loc, scale_diag, scale_identity_multiplier]):
        loc = _convert_to_tensor(loc, name="loc")
        scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
        scale_identity_multiplier = _convert_to_tensor(
            scale_identity_multiplier, name="scale_identity_multiplier")

        if scale_diag is not None:
            if scale_identity_multiplier is not None:
                scale_diag += scale_identity_multiplier[..., array_ops.newaxis]
            return linalg.LinearOperatorDiag(
                diag=_maybe_attach_assertion(scale_diag),
                is_non_singular=True,
                is_self_adjoint=True,
                is_positive_definite=assert_positive)

        if loc is None and shape_hint is None:
            raise ValueError("Cannot infer `event_shape` unless `loc` or "
                             "`shape_hint` is specified.")

        if shape_hint is None:
            shape_hint = loc.shape[-1]

        if scale_identity_multiplier is None:
            return linalg.LinearOperatorIdentity(
                num_rows=shape_hint,
                dtype=loc.dtype.base_dtype,
                is_self_adjoint=True,
                is_positive_definite=True,
                assert_proper_shapes=validate_args)

        return linalg.LinearOperatorScaledIdentity(
            num_rows=shape_hint,
            multiplier=_maybe_attach_assertion(scale_identity_multiplier),
            is_non_singular=True,
            is_self_adjoint=True,
            is_positive_definite=assert_positive,
            assert_proper_shapes=validate_args)
Example #19
0
 def test_ref_type_shape_args_raises(self):
     with self.assertRaisesRegexp(TypeError, "num_rows.*reference"):
         linalg_lib.LinearOperatorScaledIdentity(
             num_rows=variables_module.Variable(2), multiplier=1.23)
Example #20
0
 def test_assert_positive_definite_does_not_raise_when_positive(self):
     with self.cached_session():
         operator = linalg_lib.LinearOperatorScaledIdentity(num_rows=2,
                                                            multiplier=1.)
         operator.assert_positive_definite().run()  # Should not fail
Example #21
0
 def test_non_scalar_num_rows_raises_static(self):
     # Many "test_...num_rows" tests are performed in LinearOperatorIdentity.
     with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
         linalg_lib.LinearOperatorScaledIdentity(num_rows=[2],
                                                 multiplier=123.)
Example #22
0
 def test_wrong_matrix_dimensions_raises_static(self):
     operator = linalg_lib.LinearOperatorScaledIdentity(num_rows=2,
                                                        multiplier=2.2)
     x = rng.randn(3, 3).astype(np.float32)
     with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
         operator.matmul(x)
Example #23
0
 def test_assert_self_adjoint_raises_when_not_self_adjoint(self):
     with self.cached_session():
         operator = linalg_lib.LinearOperatorScaledIdentity(
             num_rows=2, multiplier=[1. + 1J])
         with self.assertRaisesOpError("not self-adjoint"):
             operator.assert_self_adjoint().run()
Example #24
0
 def test_tape_safe(self):
     multiplier = variables_module.Variable(1.23)
     operator = linalg_lib.LinearOperatorScaledIdentity(
         num_rows=2, multiplier=multiplier)
     self.check_tape_safe(operator)
Example #25
0
 def test_assert_non_singular_raises_when_singular(self):
     with self.cached_session():
         operator = linalg_lib.LinearOperatorScaledIdentity(
             num_rows=2, multiplier=[1., 2., 0.])
         with self.assertRaisesOpError("was singular"):
             operator.assert_non_singular().run()
Example #26
0
 def test_assert_non_singular_does_not_raise_when_non_singular(self):
     with self.cached_session():
         operator = linalg_lib.LinearOperatorScaledIdentity(
             num_rows=2, multiplier=[1., 2., 3.])
         operator.assert_non_singular().run()  # Should not fail
Example #27
0
 def test_assert_positive_definite_raises_when_negative(self):
     with self.cached_session():
         operator = linalg_lib.LinearOperatorScaledIdentity(num_rows=2,
                                                            multiplier=-1.)
         with self.assertRaisesOpError("not positive definite"):
             operator.assert_positive_definite().run()