def _log_abs_determinant(self):
     logging.warn(
         "Using (possibly slow) default implementation of determinant."
         "  Requires conversion to a dense matrix and O(N^3) operations.")
     if self._can_use_cholesky():
         diag = _linalg.diag_part(linalg_ops.cholesky(self.to_dense()))
         return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
     _, log_abs_det = linalg.slogdet(self.to_dense())
     return log_abs_det
Esempio n. 2
0
 def _dense_solve(self, rhs, adjoint=False, adjoint_arg=False):
     """Solve by conversion to a dense matrix."""
     if self.is_square is False:  # pylint: disable=g-bool-id-comparison
         raise NotImplementedError(
             "Solve is not yet implemented for non-square operators.")
     rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
     if self._can_use_cholesky():
         return linalg_ops.cholesky_solve(
             linalg_ops.cholesky(self.to_dense()), rhs)
     return linear_operator_util.matrix_solve_with_broadcast(
         self.to_dense(), rhs, adjoint=adjoint)
 def _solve(self, rhs, adjoint=False, adjoint_arg=False):
     """Default implementation of _solve."""
     if self.is_square is False:
         raise NotImplementedError(
             "Solve is not yet implemented for non-square operators.")
     logging.warn(
         "Using (possibly slow) default implementation of solve."
         "  Requires conversion to a dense matrix and O(N^3) operations.")
     rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
     if self._can_use_cholesky():
         return linear_operator_util.cholesky_solve_with_broadcast(
             linalg_ops.cholesky(self.to_dense()), rhs)
     return linear_operator_util.matrix_solve_with_broadcast(
         self.to_dense(), rhs, adjoint=adjoint)
Esempio n. 4
0
 def _assert_positive_definite(self):
   """Default implementation of _assert_positive_definite."""
   logging.warn(
       "Using (possibly slow) default implementation of "
       "assert_positive_definite."
       "  Requires conversion to a dense matrix and O(N^3) operations.")
   # If the operator is self-adjoint, then checking that
   # Cholesky decomposition succeeds + results in positive diag is necessary
   # and sufficient.
   if self.is_self_adjoint:
     return check_ops.assert_positive(
         _linalg.diag_part(linalg_ops.cholesky(self.to_dense())),
         message="Matrix was not positive definite.")
   # We have no generic check for positive definite.
   raise NotImplementedError("assert_positive_definite is not implemented.")
Esempio n. 5
0
    def _solve(self, rhs, adjoint=False, adjoint_arg=False):
        if self.base_operator.is_non_singular is False:
            raise ValueError(
                "Solve not implemented unless this is a perturbation of a "
                "non-singular LinearOperator.")
        # The Woodbury formula gives:
        # https://en.wikipedia.org/wiki/Woodbury_matrix_identity
        #   (L + UDV^H)^{-1}
        #   = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
        #   = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
        # where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
        # Note also that, with ^{-H} being the inverse of the adjoint,
        #   (L + UDV^H)^{-H}
        #   = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
        l = self.base_operator
        if adjoint:
            # If adjoint, U and V have flipped roles in the operator.
            v, u = self._get_uv_as_tensors()
            # Capacitance should still be computed with u=self.u and v=self.v, which
            # after the "flip" on the line above means u=v, v=u. I.e. no need to
            # "flip" in the capacitance call, since the call to
            # matrix_solve_with_broadcast below is done with the `adjoint` argument,
            # and this takes care of things.
            capacitance = self._make_capacitance(u=v, v=u)
        else:
            u, v = self._get_uv_as_tensors()
            capacitance = self._make_capacitance(u=u, v=v)

        # L^{-1} rhs
        linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
        # V^H L^{-1} rhs
        vh_linv_rhs = _linalg.matmul(v, linv_rhs, adjoint_a=True)
        # C^{-1} V^H L^{-1} rhs
        if self._use_cholesky:
            capinv_vh_linv_rhs = linalg_ops.cholesky_solve(
                linalg_ops.cholesky(capacitance), vh_linv_rhs)
        else:
            capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast(
                capacitance, vh_linv_rhs, adjoint=adjoint)
        # U C^{-1} V^H M^{-1} rhs
        u_capinv_vh_linv_rhs = _linalg.matmul(u, capinv_vh_linv_rhs)
        # L^{-1} U C^{-1} V^H L^{-1} rhs
        linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs,
                                            adjoint=adjoint)

        # L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
        return linv_rhs - linv_u_capinv_vh_linv_rhs
Esempio n. 6
0
    def _log_abs_determinant(self):
        # Recall
        #   det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
        #                  = det(C) det(D) det(L)
        log_abs_det_d = self.diag_operator.log_abs_determinant()
        log_abs_det_l = self.base_operator.log_abs_determinant()

        if self._use_cholesky:
            chol_cap_diag = _linalg.diag_part(
                linalg_ops.cholesky(self._make_capacitance()))
            log_abs_det_c = 2 * math_ops.reduce_sum(
                math_ops.log(chol_cap_diag), axis=[-1])
        else:
            det_c = _linalg.det(self._make_capacitance())
            log_abs_det_c = math_ops.log(math_ops.abs(det_c))
            if np.issubdtype(self.dtype, np.complexfloating):
                log_abs_det_c = _ops.cast(log_abs_det_c, dtype=self.dtype)

        return log_abs_det_c + log_abs_det_d + log_abs_det_l
Esempio n. 7
0
    def _solve(self, rhs, adjoint=False, adjoint_arg=False):
        if self.base_operator.is_non_singular is False:
            raise ValueError(
                "Solve not implemented unless this is a perturbation of a "
                "non-singular LinearOperator.")
        # The Woodbury formula gives:
        # https://en.wikipedia.org/wiki/Woodbury_matrix_identity
        #   (L + UDV^H)^{-1}
        #   = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
        #   = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
        # where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
        # Note also that, with ^{-H} being the inverse of the adjoint,
        #   (L + UDV^H)^{-H}
        #   = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
        l = self.base_operator
        if adjoint:
            v = self.u
            u = self.v
        else:
            v = self.v
            u = self.u

        # L^{-1} rhs
        linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
        # V^H L^{-1} rhs
        vh_linv_rhs = _linalg.matmul(v, linv_rhs, adjoint_a=True)
        # C^{-1} V^H L^{-1} rhs
        if self._use_cholesky:
            capinv_vh_linv_rhs = linalg_ops.cholesky_solve(
                linalg_ops.cholesky(self._make_capacitance()), vh_linv_rhs)
        else:
            capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast(
                self._make_capacitance(), vh_linv_rhs, adjoint=adjoint)
        # U C^{-1} V^H M^{-1} rhs
        u_capinv_vh_linv_rhs = _linalg.matmul(u, capinv_vh_linv_rhs)
        # L^{-1} U C^{-1} V^H L^{-1} rhs
        linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs,
                                            adjoint=adjoint)

        # L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
        return linv_rhs - linv_u_capinv_vh_linv_rhs
def _cholesky_linear_operator(linop):
    return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
        linalg_ops.cholesky(linop.to_dense()),
        is_non_singular=True,
        is_self_adjoint=False,
        is_square=True)
Esempio n. 9
0
    def __init__(self,
                 base_operator,
                 u,
                 diag_update=None,
                 v=None,
                 is_diag_update_positive=None,
                 is_non_singular=None,
                 is_self_adjoint=None,
                 is_positive_definite=None,
                 is_square=None,
                 name="LinearOperatorLowRankUpdate"):
        """Initialize a `LinearOperatorLowRankUpdate`.

    This creates a `LinearOperator` of the form `A = L + U D V^H`, with
    `L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
    diagonal matrix.

    If `L` is non-singular, solves and determinants are available.
    Solves/determinants both involve a solve/determinant of a `K x K` system.
    In the event that L and D are self-adjoint positive-definite, and U = V,
    this can be done using a Cholesky factorization.  The user should set the
    `is_X` matrix property hints, which will trigger the appropriate code path.

    Args:
      base_operator:  Shape `[B1,...,Bb, M, N]`.
      u:  Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
        This is `U` above.
      diag_update:  Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`
        as `base_operator`.  This is the diagonal of `D` above.
         Defaults to `D` being the identity operator.
      v:  Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`
         Defaults to `v = u`, in which case the perturbation is symmetric.
         If `M != N`, then `v` must be set since the perturbation is not square.
      is_diag_update_positive:  Python `bool`.
        If `True`, expect `diag_update > 0`.
      is_non_singular:  Expect that this operator is non-singular.
        Default is `None`, unless `is_positive_definite` is auto-set to be
        `True` (see below).
      is_self_adjoint:  Expect that this operator is equal to its hermitian
        transpose.  Default is `None`, unless `base_operator` is self-adjoint
        and `v = None` (meaning `u=v`), in which case this defaults to `True`.
      is_positive_definite:  Expect that this operator is positive definite.
        Default is `None`, unless `base_operator` is positive-definite
        `v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case
        this defaults to `True`.
        Note that we say an operator is positive definite when the quadratic
        form `x^H A x` has positive real part for all nonzero `x`.
      is_square:  Expect that this operator acts like square [batch] matrices.
      name: A name for this `LinearOperator`.

    Raises:
      ValueError:  If `is_X` flags are set in an inconsistent way.
    """
        dtype = base_operator.dtype

        if diag_update is not None:
            if is_diag_update_positive and np.issubdtype(
                    dtype, np.complexfloating):
                logging.warn(
                    "Note: setting is_diag_update_positive with a complex "
                    "dtype means that diagonal is real and positive.")

        if diag_update is None:
            if is_diag_update_positive is False:
                raise ValueError(
                    "Default diagonal is the identity, which is positive.  However, "
                    "user set 'is_diag_update_positive' to False.")
            is_diag_update_positive = True

        # In this case, we can use a Cholesky decomposition to help us solve/det.
        self._use_cholesky = (base_operator.is_positive_definite
                              and base_operator.is_self_adjoint
                              and is_diag_update_positive and v is None)

        # Possibly auto-set some characteristic flags from None to True.
        # If the Flags were set (by the user) incorrectly to False, then raise.
        if base_operator.is_self_adjoint and v is None and not np.issubdtype(
                dtype, np.complexfloating):
            if is_self_adjoint is False:
                raise ValueError(
                    "A = L + UDU^H, with L self-adjoint and D real diagonal.  Since"
                    " UDU^H is self-adjoint, this must be a self-adjoint operator."
                )
            is_self_adjoint = True

        # The condition for using a cholesky is sufficient for SPD, and
        # we no weaker choice of these hints leads to SPD.  Therefore,
        # the following line reads "if hints indicate SPD..."
        if self._use_cholesky:
            if (is_positive_definite is False or is_self_adjoint is False
                    or is_non_singular is False):
                raise ValueError(
                    "Arguments imply this is self-adjoint positive-definite operator."
                )
            is_positive_definite = True
            is_self_adjoint = True

        values = base_operator.graph_parents + [u, diag_update, v]
        with ops.name_scope(name, values=values):

            # Create U and V.
            self._u = ops.convert_to_tensor(u, name="u")
            if v is None:
                self._v = self._u
            else:
                self._v = ops.convert_to_tensor(v, name="v")

            if diag_update is None:
                self._diag_update = None
            else:
                self._diag_update = ops.convert_to_tensor(diag_update,
                                                          name="diag_update")

            # Create base_operator L.
            self._base_operator = base_operator
            graph_parents = base_operator.graph_parents + [
                self.u, self._diag_update, self.v
            ]
            graph_parents = [p for p in graph_parents if p is not None]

            super(LinearOperatorLowRankUpdate,
                  self).__init__(dtype=self._base_operator.dtype,
                                 graph_parents=graph_parents,
                                 is_non_singular=is_non_singular,
                                 is_self_adjoint=is_self_adjoint,
                                 is_positive_definite=is_positive_definite,
                                 is_square=is_square,
                                 name=name)

            # Create the diagonal operator D.
            self._set_diag_operators(diag_update, is_diag_update_positive)
            self._is_diag_update_positive = is_diag_update_positive

            self._check_shapes()

            # Pre-compute the so-called "capacitance" matrix
            #   C := D^{-1} + V^H L^{-1} U
            self._capacitance = self._make_capacitance()
            if self._use_cholesky:
                self._chol_capacitance = linalg_ops.cholesky(self._capacitance)