コード例 #1
0
    def __rmul__(self, other):
        if other.__class__ in [_np.ndarray, _np.matrix]:
            dense = True
        elif _sp.issparse(other):
            dense = False
        elif ishamiltonian(other):
            return self._rmul_hamiltonian(other)
        elif isinstance(other, LinearOperator):
            return LinearOperator.dot(other.transpose(),
                                      self.transpose()).transpose()
        elif _np.isscalar(other):
            return self._mul_scalar(other)
        else:
            dense = True
            other = _np.asanyarray(other)

        if dense:
            if other.ndim == 1:
                return self.T._matvec(other)
            elif other.ndim == 2:
                if self._shape[0] != other.shape[1]:
                    raise ValueError(
                        "dimension mismatch with shapes {0} and {1}".format(
                            self._shape, other.shape))
                return (self.T._matmat(other.T)).T
            else:
                raise ValueError
        else:
            if self._shape[0] != other.shape[1]:
                raise ValueError(
                    "dimension mismatch with shapes {0} and {1}".format(
                        self._shape, other.shape))
            return (self.T._mul_sparse(other.T)).T
コード例 #2
0
# Setup the block solver

solver = BlockSolve(A, 50)

solver.find()

solver.form_preconditioner()

P_sparse = scipy.sparse.csr_matrix(solver.P)

# Testing against the ILU

iLU = spilu(scipy.sparse.csc_matrix(A), fill_factor=1, drop_tol=0)
iLUx = lambda x: iLU.solve(x)
iLU_P = LinearOperator(A.shape, iLUx)

ILU_product = iLU_P.dot(A.todense())

# Form a product

product = A.dot(solver.P)

# Get the condition number

R_product = np.linalg.cond(product)

ILU_product = np.linalg.cond(ILU_product)

print(
    "R_product was {} and ILU product was {}, therefore R_product is {} better than ILU_product"
    .format(R_product, ILU_product, ILU_product / R_product))
コード例 #3
0
class DualLinearSys():
    def __init__(self, A, B, nc, sigma=0.0, precond=None, projection=None):
        ''' Creates a linear operator such as
        
        A = [K C*^T]
            [C   0 ]
            
        B = [M  0]
            [0  0]
            
        [xk, lambda]^T = A^-1 M
        
        where lambda has size nc
        '''
        self.A = A
        self.B = B
        self.nc = nc
        self.precond = precond
        self.num_iters = 0
        self.ndofs = A.shape[0]
        self.u_dofs = self.ndofs - nc
        self.sigma = sigma

        if projection is None:
            self.P = sparse.eye(self.u_dofs)

        else:
            self.P = projection

        self.M = self.P.conj().T.dot(B[:self.u_dofs, :self.u_dofs]).dot(self.P)
        self.K = A[:self.u_dofs, :self.u_dofs]
        self.lu = sparse.linalg.splu(self.K - self.sigma * self.M)
        #lu = sparse.linalg.splu(self.K - sigma*self.M)
        #self.K_inv = lu.solve
        self.K_inv = LinearOperator((self.u_dofs, self.u_dofs),
                                    matvec=self.lu.solve)
        self.C = A[self.u_dofs:, :self.u_dofs]
        #self.F = lambda b : self.C.dot(self.K_inv(self.C.conj().T.dot(b)))
        #self.F LinearOperator((ndof,ndof), matvec = lambda b : self.C.dot(self.K_inv(self.C.conj().T.dot(b))))
        self.F = LinearOperator((nc, nc), matvec=self.F_operator)

    def F_operator(self, b):

        return self.C.dot(self.K_inv(self.C.conj().T.dot(b)))

    def solve(self, b):
        A = self.A
        B = self.B

        M = self.M
        u = self.P.dot(b[:self.u_dofs])
        u_prime = self.K_inv.dot((M.dot(u)))
        lambda_n1 = sparse.linalg.cg(self.F,
                                     self.C.dot(u_prime),
                                     M=self.precond,
                                     callback=self.counter,
                                     tol=1e-16)[0]

        u_n1 = self.P.dot(u_prime - self.K_inv(self.C.conj().T.dot(lambda_n1)))
        return np.concatenate((u_n1, lambda_n1))

    def counter(self, xk):
        ''' count number of iterations
        '''
        self.num_iters += 1
        #print(self.num_iters)

    def normM(self, b):
        B = self.B
        b_prime = np.array(B.dot(b)).flatten()
        return b.dot(b_prime)

    def getLinearOperator(self):
        ndof = self.A.shape[0]
        return LinearOperator((ndof, ndof), matvec=self.solve)
コード例 #4
0
class ConvolutionMatrix(object):
    """ Convolution matrix container.
    """
    def __init__(self, A=None, mv=None, rmv=None, shape=None):
        self.A = A
        if A is not None:
            self.shape = A.shape

            def mv(v):
                return A @ v

            def rmv(v):
                return A.conjugate().T @ v
        elif any([mv is not None, rmv is not None]):
            if shape:
                self.shape = shape
            else:
                print('If A is not given, its shape must be provided.')
                raise Exception(RuntimeError)
            if not callable(mv):
                print(
                    'Input mv was not a function. Both mv and rmv shoud be functions, or both empty.'
                )
                raise Exception(RuntimeError)
            elif not callable(rmv):
                print(
                    'Input rmv was not a function. Both mv and rmv shoud be functions, or both empty.'
                )
                raise Exception(RuntimeError)
        else:
            # One of both inputs are needed for ConvolutionMatrix creation
            print(
                'A was not an ndarray, and both multiplication functions A(x) and At(x) were not provided.'
            )
            raise Exception(RuntimeError)
        self.m = self.shape[0]
        self.n = self.shape[1]
        self.matrix = LinearOperator(self.shape, matvec=mv, rmatvec=rmv)
        self.check_adjoint()

    def validate_input(self, b0, opts):
        assert (np.abs(b0) == b0).all, 'b must be real-valued and non-negative'

        if opts.customx0:
            assert np.shape(opts.customx0) == (
                n, 1), 'customx0 must be a column vector of length n'

    def check_adjoint(self):
        """ Check that A and At are indeed ajoints of one another
        """
        y = np.random.randn(self.m)
        Aty = self.matrix.rmatvec(y)
        x = np.random.randn(self.n)
        Ax = self.matrix.matvec(x)
        inner_product1 = Ax.conjugate().T @ y
        inner_product2 = x.conjugate().T @ Aty
        error = np.abs(inner_product1 -
                       inner_product2) / np.abs(inner_product1)
        assert error < 1e-3, 'Invalid measurement operator:  At is not the adjoint of A.  Error = %.1f' % error
        print('Both matrices were adjoints', error)

    def hermitic(self):
        return

    def lsqr(self, b, tol, maxit, x0):
        """ Solution of the least squares problem for ConvolutionMatrix
        Gkp, opts.tol/100, opts.max_inner_iters, gk
        """
        if b.shape[1] > 0:
            b = b.reshape(-1)
        if x0.shape[1] > 0:
            x0 = x0.reshape(-1)
        # x, istop, itn, r1norm = lsqr(self.matrix, b, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
        ret = lsqr(self.matrix,
                   b,
                   damp=0.01,
                   atol=tol / 100,
                   btol=tol / 100,
                   iter_lim=maxit,
                   x0=x0)
        x = ret[0]
        return x

    def hmul(self, x):
        """ Hermitic mutliplication
        returns At*x
        """
        return self.matrix.rmatvec(x)

    def __mul__(self, x):
        return self.matrix.matvec(x)

    def __matmul__(self, x):
        """Implementation of left ConvolutionMatrix multiplication, i.e. A@x"""
        return self.matrix.dot(x)

    def __rmatmul__(self, x):
        """Implementation of right ConvolutionMatrix multiplication, i.e. x@A"""
        return

    def __rmul__(self, x):
        if type(x) is float:
            lvec = np.ones(self.shape[1]) * x
        else:
            lvec = x
        return x * self.A  # This is not optimal

    def calc_yeigs(self, m, b0, idx, squared=True):
        if squared:
            v = (idx * b0**2).reshape(-1)
        else:
            v = (idx * b0).reshape(-1)

        def ymatvec(x):
            return 1 / m * self.matrix.rmatvec(v * self.matrix.matvec(x))

        yfun = LinearOperator((self.n, self.n), matvec=ymatvec)
        [eval, x0] = eigs(yfun, k=1, which='LR', tol=1E-5)
        return x0
コード例 #5
0
def vibration_modes_lanczos(K,
                            M,
                            n=10,
                            shift=0.0,
                            Kinv_operator=None,
                            niter_max=40,
                            rtol=1E-14):
    r"""
    Make a modal analysis using a naive Lanczos iteration.

    Parameters
    ----------
    K : array_like
        stiffness matrix
    M : array_like
        mass matrix
    n : int
        number of modes to be computed
    shift : float
        shift the eigenvalue problem such that the eigenfrequencies around the shift (omega) are found
    Kinv_operator : LinearOperator
        LinearOperator solving Kx=b (i.e. multiplication :math:`K^{-1} b`)
        if None, the scipy eigsh solver will be used instead
    niter_max : int, optional
        Maximum number of Lanczos iterations
    rtol : float, optional
        relative tolerance

    Returns
    -------
    om : ndarray, shape(n)
        eigenfrequencies of the system
    Phi : ndarray, shape(ndim, n)
        Vibration modes of the system

    Note
    ----
    In comparison to the vibration_modes method, this method can use different linear solvers
    available via the Kinv_operator method and a naive Lanczos iteration. The modal_analysis method
    uses the Arpack solver which is more accurate but takes also much longer for
    large systems, since the factorization is very inefficient by using superLU.
    """
    if Kinv_operator is None:
        return vibration_modes(K, M, n, shift)

    if shift != 0.0:
        raise NotImplementedError(
            'The shift has not been implemented yet in the Lanczos solver')

    k_diag = K.diagonal().sum()

    # build up Krylov sequence
    n_rand = n
    n_dim = M.shape[0]

    residual = np.zeros(n)
    b = np.random.rand(n_dim, n_rand)
    b, _ = sp.linalg.qr(b, mode='economic')
    krylov_subspace = b

    def matvec(v):
        return Kinv_operator.dot(M.dot(v))

    A = LinearOperator(shape=(n_dim, n_dim), matvec=matvec)

    n_iter = niter_max + 1

    for n_iter in range(niter_max):
        print('Lanczos iteration # {}. '.format(n_iter), end='')
        new_directions = A.dot(b)
        krylov_subspace = np.concatenate((krylov_subspace, new_directions),
                                         axis=1)
        krylov_subspace, _ = sp.linalg.qr(krylov_subspace, mode='economic')
        b = krylov_subspace[:, -n_rand:]

        # solve interaction problem
        K_red = krylov_subspace.T @ K @ krylov_subspace
        M_red = krylov_subspace.T @ M @ krylov_subspace
        lambda_r, Phi_r = sp.linalg.eigh(K_red,
                                         M_red,
                                         overwrite_a=True,
                                         overwrite_b=True)
        Phi = krylov_subspace @ Phi_r[:, :n]

        # check the tolerance to be below machine epsilon with some buffer...
        for i in range(n):
            residual[i] = np.sum(abs(
                (-lambda_r[i] * M + K) @ Phi[:, i])) / k_diag

        print('Res max: {:.2e}'.format(np.max(residual)))
        if np.max(residual) < rtol:
            break

    if n_iter - 1 == niter_max:
        print('No convergence gained in the given iteration steps.')

    print('The Lanczos solver took ' +
          '{} iterations to solve for {} eigenvectors.'.format(n_iter + 1, n))
    omega = np.sqrt(abs(lambda_r[:n]))
    V = Phi[:, :n]
    # Little bit of sick hack: The negative sign is transferred to the
    # eigenfrequencies
    omega[lambda_r[:n] < 0] *= -1

    return omega, V