Beispiel #1
0
def basis(A):
    """Return orthogonal basis of A columns."""
    if A.is_cuda:
        # torch.orgqr is not available in CUDA
        Q = torch.linalg.qr(A).Q
    else:
        Q = torch.orgqr(*torch.geqrf(A))
    return Q
Beispiel #2
0
def _make_orthogonal(A):
    """ Assume that A is a tall matrix.
    Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative
    """
    X, tau = torch.geqrf(A)
    Q = torch.linalg.householder_product(X, tau)
    # The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs
    Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
    return Q
Beispiel #3
0
def basis(A):
    """Return orthogonal basis of A columns.
    """
    if A.is_cuda:
        # torch.orgqr is not available in CUDA
        Q, _ = torch.qr(A, some=True)
    else:
        Q = torch.orgqr(*torch.geqrf(A))
    return Q
Beispiel #4
0
 def blas_lapack_ops(self):
     m = torch.randn(3, 3)
     a = torch.randn(10, 3, 4)
     b = torch.randn(10, 4, 3)
     v = torch.randn(3)
     return (
         torch.addbmm(m, a, b),
         torch.addmm(torch.randn(2, 3), torch.randn(2, 3),
                     torch.randn(3, 3)),
         torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),
         torch.addr(torch.zeros(3, 3), v, v),
         torch.baddbmm(m, a, b),
         torch.bmm(a, b),
         torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3),
                            torch.randn(3, 3)),
         # torch.cholesky(a), # deprecated
         torch.cholesky_inverse(torch.randn(3, 3)),
         torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),
         torch.dot(v, v),
         torch.eig(m),
         torch.geqrf(a),
         torch.ger(v, v),
         torch.inner(m, m),
         torch.inverse(m),
         torch.det(m),
         torch.logdet(m),
         torch.slogdet(m),
         torch.lstsq(m, m),
         torch.lu(m),
         torch.lu_solve(m, *torch.lu(m)),
         torch.lu_unpack(*torch.lu(m)),
         torch.matmul(m, m),
         torch.matrix_power(m, 2),
         # torch.matrix_rank(m),
         torch.matrix_exp(m),
         torch.mm(m, m),
         torch.mv(m, v),
         # torch.orgqr(a, m),
         # torch.ormqr(a, m, v),
         torch.outer(v, v),
         torch.pinverse(m),
         # torch.qr(a),
         torch.solve(m, m),
         torch.svd(a),
         # torch.svd_lowrank(a),
         # torch.pca_lowrank(a),
         # torch.symeig(a), # deprecated
         # torch.lobpcg(a, b), # not supported
         torch.trapz(m, m),
         torch.trapezoid(m, m),
         torch.cumulative_trapezoid(m, m),
         # torch.triangular_solve(m, m),
         torch.vdot(v, v),
     )
Beispiel #5
0
    def right_inverse(self, Q: torch.Tensor) -> torch.Tensor:
        if Q.shape != self.shape:
            raise ValueError(
                f"Expected a matrix or batch of matrices of shape {self.shape}. "
                f"Got a tensor of shape {Q.shape}.")

        Q_init = Q
        n, k = Q.size(-2), Q.size(-1)
        transpose = n < k
        if transpose:
            Q = Q.mT
            n, k = k, n

        # We always make sure to always copy Q in every path
        if not hasattr(self, "base"):
            # Note [right_inverse expm cayley]
            # If we do not have use_trivialization=True, we just implement the inverse of the forward
            # map for the Householder. To see why, think that for the Cayley map,
            # we would need to find the matrix X \in R^{n x k} such that:
            # Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1)
            # A = Y - Y.mH
            # cayley(A)[:, :k]
            # gives the original tensor. It is not clear how to do this.
            # Perhaps via some algebraic manipulation involving the QR like that of
            # Corollary 2.2 in Edelman, Arias and Smith?
            if self.orthogonal_map == _OrthMaps.cayley or self.orthogonal_map == _OrthMaps.matrix_exp:
                raise NotImplementedError(
                    "It is not possible to assign to the matrix exponential "
                    "or the Cayley parametrizations when use_trivialization=False."
                )

            # If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition.
            # Here Q is always real because we do not support householder and complex matrices.
            # See note [Householder complex]
            A, tau = torch.geqrf(Q)
            # We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could
            # decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition
            # The diagonal of Q is the diagonal of R from the qr decomposition
            A.diagonal(dim1=-2, dim2=-1).sign_()
            # Equality with zero is ok because LAPACK returns exactly zero when it does not want
            # to use a particular reflection
            A.diagonal(dim1=-2, dim2=-1)[tau == 0.] *= -1
            return A.mT if transpose else A
        else:
            if n == k:
                # We check whether Q is orthogonal
                if not _is_orthogonal(Q):
                    Q = _make_orthogonal(Q)
                else:  # Is orthogonal
                    Q = Q.clone()
            else:
                # Complete Q into a full n x n orthogonal matrix
                N = torch.randn(*(Q.size()[:-2] + (n, n - k)),
                                dtype=Q.dtype,
                                device=Q.device)
                Q = torch.cat([Q, N], dim=-1)
                Q = _make_orthogonal(Q)
            self.base = Q

            # It is necessary to return the -Id, as we use the diagonal for the
            # Householder parametrization. Using -Id makes:
            # householder(torch.zeros(m,n)) == torch.eye(m,n)
            # Poor man's version of eye_like
            neg_Id = torch.zeros_like(Q_init)
            neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.)
            return neg_Id
Beispiel #6
0
print('Error:', torch.linalg.norm(M - Q @ R).item())
print('Orthogonality:', torch.linalg.norm(torch.eye(n,n, device = device) - Q @ Q.t()).item())


print('--- torch.qr ---')
begin = time.time()
Q, R = torch.qr(M)
end = time.time()

print('Time:', end - begin)
print('Error:', torch.linalg.norm(M - Q @ R).item())
print('Orthogonality:', torch.linalg.norm(torch.eye(n,n, device = device) - Q @ Q.t()).item())



if device == 'cpu':

    print('--- LAPACK geqrf + orgqr ---')
    begin = time.time()
    a, tau = torch.geqrf(M)
    Q = torch.orgqr(a, tau)
    end = time.time()
    print('Time:', end - begin)

    #print('--- NUMPY linalg.qr ---')
    #M = M.numpy()
    #begin = time.time()
    #Q, R = np.linalg.qr(M)
    #end = time.time()
    #print('Time:', end - begin)