コード例 #1
0
def rho_block_D_inv_A(A, Dinv):
    """
    Return the (approx.) spectral radius of block D^-1 * A 
    
    Parameters
    ----------
    A : {sparse-matrix}
        size NxN
    Dinv : {array}
        Inverse of diagonal blocks of A
        size (N/blocksize, blocksize, blocksize)

    Returns
    -------
    approximate spectral radius of (Dinv A)

    Examples
    --------
    >>> from pyamg.gallery import poisson
    >>> from pyamg.relaxation.smoothing import rho_block_D_inv_A
    >>> from pyamg.util.utils import get_block_diag
    >>> import numpy
    >>> A = poisson((10,10), format='csr')
    >>> Dinv = get_block_diag(A, blocksize=4, inv_flag=True)
    
    """

    if not hasattr(A, 'rho_block_D_inv'):
        from scipy.sparse.linalg import LinearOperator

        blocksize = Dinv.shape[1]
        if Dinv.shape[1] != Dinv.shape[2]:
            raise ValueError('Dinv has incorrect dimensions')
        elif Dinv.shape[0] != A.shape[0] / blocksize:
            raise ValueError('Dinv and A have incompatible dimensions')

        Dinv = scipy.sparse.bsr_matrix( (Dinv, \
                scipy.arange(Dinv.shape[0]), scipy.arange(Dinv.shape[0]+1)), shape=A.shape)

        # Don't explicitly form Dinv*A
        def matvec(x):
            return Dinv * (A * x)

        D_inv_A = LinearOperator(A.shape, matvec, dtype=A.dtype)

        A.rho_block_D_inv = approximate_spectral_radius(D_inv_A)

    return A.rho_block_D_inv
コード例 #2
0
def _iterative_precondition(A, n, ss_args):
    """
    Internal function for preconditioning the steadystate problem for use
    with iterative solvers.
    """
    if settings.debug:
        logger.debug('Starting preconditioner.')
    _precond_start = time.time()
    try:
        P = spilu(A,
                  permc_spec=ss_args['permc_spec'],
                  drop_tol=ss_args['drop_tol'],
                  diag_pivot_thresh=ss_args['diag_pivot_thresh'],
                  fill_factor=ss_args['fill_factor'],
                  options=dict(ILU_MILU=ss_args['ILU_MILU']))

        M = LinearOperator((n**2, n**2), matvec=P.solve)
        _precond_end = time.time()
        ss_args['info']['permc_spec'] = ss_args['permc_spec']
        ss_args['info']['drop_tol'] = ss_args['drop_tol']
        ss_args['info']['diag_pivot_thresh'] = ss_args['diag_pivot_thresh']
        ss_args['info']['fill_factor'] = ss_args['fill_factor']
        ss_args['info']['ILU_MILU'] = ss_args['ILU_MILU']
        ss_args['info']['precond_time'] = _precond_end - _precond_start

        if settings.debug or ss_args['return_info']:
            if settings.debug:
                logger.debug('Preconditioning succeeded.')
                logger.debug('Precond. time: %f' %
                             (_precond_end - _precond_start))
            L_nnz = P.L.nnz
            U_nnz = P.U.nnz
            ss_args['info']['l_nnz'] = L_nnz
            ss_args['info']['u_nnz'] = U_nnz
            ss_args['info']['ilu_fill_factor'] = (L_nnz + U_nnz) / A.nnz
            e = np.ones(n**2, dtype=int)
            condest = la.norm(M * e, np.inf)
            ss_args['info']['ilu_condest'] = condest
            if settings.debug:
                logger.debug('L NNZ: %i ; U NNZ: %i' % (L_nnz, U_nnz))
                logger.debug('Fill factor: %f' % ((L_nnz + U_nnz) / A.nnz))
                logger.debug('iLU condest: %f' % condest)

    except Exception:
        raise Exception("Failed to build preconditioner. Try increasing " +
                        "fill_factor and/or drop_tol.")

    return M, ss_args
コード例 #3
0
ファイル: test_iterative.py プロジェクト: wanglun1996/scipy
def _check_reentrancy(solver, is_reentrant):
    def matvec(x):
        A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]])
        y, info = solver(A, x)
        assert_equal(info, 0)
        return y
    b = np.array([1, 1./2, 1./3])
    op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec,
                        dtype=b.dtype)

    if not is_reentrant:
        assert_raises(RuntimeError, solver, op, b)
    else:
        y, info = solver(op, b)
        assert_equal(info, 0)
        assert_allclose(y, [1, 1, 1])
コード例 #4
0
ファイル: cg_optimizer.py プロジェクト: ahefnycmu/rpsp
def _arrlist_lsq_solve(fA, b, iter=10):
    '''
    Min residual method on lists of numpy arrays. 
    fA is a function that takes an array list and outputs an array list.
    b is an array list. x = fA^-1 b
    https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.minres.html#scipy.sparse.linalg.minres
    '''
    x = [np.zeros_like(bb) for bb in b]
    #shape_arr = [bb.shape for bb in b]
    #for shapea in shape_arr: assert shapea==shape_arr[0]
    fAOp = LinearOperator((-1, -1), matvec=fA)
    x, info = minres(
        fAOp, b, x0=x, shift=1e-8, tol=1e-05
    )  #, maxiter=iter, xtype=None, M=None, callback=None, show=False, check=False)
    #r=(0.0,1e3)[info==0]
    return x, np.abs(info)
コード例 #5
0
    def _addSparseArpack(U, s, V, X, k=10):
        # based on scipy.sparse.linalg.svds source code (abcebd5913c323b796379ea7815edc6a1f004d6a)
        m, n = X.shape
        Us = U * s

        def matvec_AH_A(x):
            Ax = Us.dot(V.T.dot(x)) + X.dot(x)
            return V.dot(Us.T.dot(Ax)) + X.T.dot(Ax)

        AH_A = LinearOperator(matvec=matvec_AH_A, shape=(n, n), dtype=X.dtype)

        eigvals, eigvec = scipy.sparse.linalg.eigsh(AH_A, k=k)
        s2 = scipy.sqrt(eigvals)
        V2 = eigvec
        U2 = (Us.dot(V.T.dot(V2)) + X.dot(V2)) / s2
        return U2, s2, V2
コード例 #6
0
        def prec(psi):
            p = prec_inv(psi)

            def _apply(phi):
                # ml = pyamg.smoothed_aggregation_solver(p, phi)
                # out = ml.solve(b=phi, tol=1e-12)
                out = spsolve(p, phi)
                return out

            num_unknowns = len(self.mesh.points)
            return LinearOperator(
                (num_unknowns, num_unknowns),
                dtype=complex,
                matvec=_apply,
                rmatvec=_apply,
            )
コード例 #7
0
def SmoothnessFisher(loss, output, model):
    from scipy.sparse.linalg import eigsh
    from scipy.sparse.linalg import LinearOperator

    ndim = len(torch.nn.utils.parameters_to_vector(model.parameters()))

    def NumpyWrapper(vp):
        vp = vector_to_parameter_list(
            torch.tensor(np.squeeze(vp), device='cuda').float(),
            model.parameters())
        JHJv = FisherVectorProduct(loss, output, model, vp)
        return JHJv.cpu().numpy()

    A = LinearOperator((ndim, ndim), matvec=NumpyWrapper)
    lambda_max = eigsh(A, 1, which='LM', return_eigenvectors=0, tol=1e-2)
    return lambda_max
コード例 #8
0
    def aslinearoperator(self):
        def mv(v):
            if v.ndim == 2 and v.shape[1] == 1: v = v[:, 0]
            in_state = _StateRepDense(_np.ascontiguousarray(v, 'd'),
                                      self.state_space)
            return self.acton(in_state).to_dense(on_space='HilbertSchmidt')

        def rmv(v):
            if v.ndim == 2 and v.shape[1] == 1: v = v[:, 0]
            in_state = _StateRepDense(_np.ascontiguousarray(v, 'd'),
                                      self.state_space)
            return self.adjoint_acton(in_state).to_dense(
                on_space='HilbertSchmidt')

        return LinearOperator((self.dim, self.dim), matvec=mv,
                              rmatvec=rmv)  # transpose, adjoint, dot, matmat?
コード例 #9
0
def _complex2real(op):
    """For a given complex-valued operator C^n -> C^n, returns the
    corresponding real-valued operator R^{2n} -> R^{2n}."""
    def _jacobian_wrap_apply(x):
        # Build complex-valued representation.
        z = x[0::2] + 1j * x[1::2]
        z_out = op * z
        # Build real-valued representation.
        x_out = np.empty(x.shape)
        x_out[0::2] = z_out.real
        x_out[1::2] = z_out.imag
        return x_out

    return LinearOperator((2 * op.shape[0], 2 * op.shape[1]),
                          _jacobian_wrap_apply,
                          dtype=float)
コード例 #10
0
def sparse_cg(data):
    A, E, M, _, y0 = data

    def matvec(x):
        Ax = A.dot(x)
        return A.T.dot(sparse.linalg.spsolve(M, Ax)) + E.T.dot(E.dot(x))

    lop = LinearOperator((E.shape[1], E.shape[1]), dtype=float, matvec=matvec)

    ET_b = E.T.dot(y0)
    try:
        out = krylov.cg(lop, ET_b, tol=1.0e-10, maxiter=10000)
        x = out.xk
    except krypy.utils.ConvergenceError:
        x = np.nan
    return x
コード例 #11
0
ファイル: common.py プロジェクト: rfeinman/pytorch-minimize
def right_multiplied_operator(J, d):
    """Return J diag(d) as LinearOperator."""
    if isinstance(J, LinearOperator):
        if torch.is_tensor(d):
            d = d.data.cpu().numpy()
        return LinearOperator(J.shape,
                              matvec=lambda x: J.matvec(np.ravel(x) * d),
                              matmat=lambda X: J.matmat(X * d[:, np.newaxis]),
                              rmatvec=lambda x: d * J.rmatvec(x))
    elif isinstance(J, TorchLinearOperator):
        return TorchLinearOperator(J.shape,
                                   matvec=lambda x: J.matvec(x.view(-1) * d),
                                   rmatvec=lambda x: d * J.rmatvec(x))
    else:
        raise ValueError('Expected J to be a LinearOperator or '
                         'TorchLinearOperator but found {}'.format(type(J)))
コード例 #12
0
 def gmres(self, mat, rhs, lu):
     if 1:
         size = len(rhs)
         A = aslinearoperator(mat)
         M = LinearOperator((size, size), dtype=float, matvec=lu.solve)
         self.counter = 0
         sol, info = gmres(
             A,
             rhs,
             M=M,
             maxiter=10,
             #callback=self.callback,
             tol=1e-12)
         return sol
     else:
         return lu.solve(rhs)
コード例 #13
0
    def _nurbs_matrix_free(self):
        J = self.J.view().reshape(-1)
        R = self.R.view().reshape(-1)
        W = self.domain.ctrlpts[-1].view()
        W = W.reshape(self.domain.ctrlpts[-1].size,-1)

        # precompute basis matrices
        Bi = self.B[0].multiply(self.quadrature.weights[0])
        for k in range(1, self.domain.dim):
            Bi = kron(Bi, self.B[k].multiply(self.quadrature.weights[k]))
        Bi = Bi.tocsr().multiply(R).multiply(J)
        Bi = Bi.tocsc().multiply(W)

        Bj = self.B[0]
        for k in range(1, self.domain.dim):
            Bj = kron(Bj, self.B[k])
        Bj = Bj.tocsr().multiply(R)
        Bj = Bj.tocsc().multiply(W)

        # form A in matrix-free manner
        def mv(v):
            ## This ugly lady fixes the unnecessary 1st call to mv(v)
            global _LinOpInit_wthf
            if _LinOpInit_wthf == False:
                _LinOpInit_wthf = True
                return v
            ##
            global n_it_count
            n_it_count += 1
            print('iter: ', n_it_count)

            y = Bi.transpose().tocsr() @ v
            z = self.kernel(
                  _kern_pts_to_mulidx(self.xip),
                  _kern_pts_to_mulidx(self.xip),
                  y,
                  self.data)
            return Bi @ z


        Ashape = (np.prod(self.domain.nbfuns),)*2
        A = LinearOperator(Ashape, matvec=mv)

        # assemble B
        B = Bi.tocsr() @ Bj.transpose().tocsc()

        return A, B
コード例 #14
0
def get_Top_spec(n, coord, direction, state, env, verbosity=0):
    chi = env.chi
    ad = state.get_aux_bond_dims()[0]

    # depending on the direction, get unit-cell length
    if direction == (1, 0) or direction == (-1, 0):
        N = state.lX
    elif direction == (0, 1) or direction == (0, -1):
        N = state.lY
    else:
        raise ValueError("Invalid direction: " + str(direction))

    # multiply vector by transfer-op within torch and pass the result back in numpy
    #  --0 (chi)
    # v--1 (D^2)
    #  --2 (chi)

    # if state and env are on gpu, the matrix-vector product can be performed
    # there as well. Price to pay is the communication overhead of resulting vector
    def _mv(v):
        c0 = coord
        V = torch.as_tensor(v, device=state.device)
        V = V.view(chi, ad * ad, chi)
        for i in range(N):
            V = corrf.apply_TM_1sO(c0,
                                   direction,
                                   state,
                                   env,
                                   V,
                                   verbosity=verbosity)
            c0 = (c0[0] + direction[0], c0[1] + direction[1])
        V = V.view(chi * ad * ad * chi)
        v = V.cpu().numpy()
        return v

    T = LinearOperator((chi * ad * ad * chi, chi * ad * ad * chi), matvec=_mv)
    vals = eigs(T, k=n, v0=None, return_eigenvectors=False)

    # post-process and return as torch tensor with first and second column
    # containing real and imaginary parts respectively
    vals = np.copy(vals[::-1])  # descending order
    vals = (1.0 / np.abs(vals[0])) * vals
    L = torch.zeros((n, 2), dtype=state.dtype, device=state.device)
    L[:, 0] = torch.as_tensor(np.real(vals))
    L[:, 1] = torch.as_tensor(np.imag(vals))

    return L
コード例 #15
0
def LGMRES_solver(mps,
                  direction,
                  left_dominant,
                  right_dominant,
                  inhom,
                  x0,
                  precision=1e-10,
                  nmax=2000,
                  **kwargs):
    """
    see Appendix of arXiv:1801.02219 for details of this
    This routine uses scipy's sparse.lgmres module. tf.Tensors are mapped to numpy 
    and back to tf.Tensor for each application of the sparse matrix vector product.
    This is not optimal and will be improved in a future version
    Args:
        mps (InfiniteMPSCentralGauge):   an infinite mps
        direction (int or str):          if (1,'l','left'): do left multiplication
                                         if (-1,'r','right'): do right multiplication
        left_dominant (tf.Tensor):       tensor of shape (mps.D[0],mps.D[0])
                                         left dominant eigenvector of the unit-cell transfer operator of mps
        right_dominant (tf.Tensor):      tensor of shape (mps.D[-1],mps.D[-1])
                                         right dominant eigenvector of the unit-cell transfer operator of mps
        inhom (tf.Tensor):               vector of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])
    Returns:
        tf.Tensor
    """
    #mps.D[0] has to be mps.D[-1], so no distincion between direction='l' or direction='r' has to be made here
    if not tf.equal(mps.D[0], mps.D[-1]):
        raise ValueError(
            'in LGMRES_solver: mps.D[0]!=mps.D[-1], can only handle intinite MPS!'
        )
    inhom_numpy = tf.reshape(inhom, [mps.D[0] * mps.D[0]]).numpy()
    x0_numpy = tf.reshape(x0, [mps.D[0] * mps.D[0]]).numpy()
    mv = fct.partial(one_minus_pseudo_unitcell_transfer_op,
                     *[direction, mps, left_dominant, right_dominant])

    LOP = LinearOperator((int(mps.D[0])**2, int(mps.D[-1])**2),
                         matvec=mv,
                         dtype=mps.dtype.as_numpy_dtype)
    out, info = lgmres(A=LOP,
                       b=inhom_numpy,
                       x0=x0_numpy,
                       tol=precision,
                       maxiter=nmax,
                       **kwargs)

    return tf.reshape(tf.convert_to_tensor(out), [mps.D[0], mps.D[0]]), info
コード例 #16
0
def _solve_sparse_cg(A, M, E, y0):
    def matvec(x):
        Ax = A.dot(x)
        return A.T.dot(sparse.linalg.spsolve(M, Ax)) + E.T.dot(E.dot(x))

    lop = LinearOperator(shape=(E.shape[1], E.shape[1]),
                         dtype=float,
                         matvec=matvec)

    ET_b = E.T.dot(y0)
    x, _ = krylov.cg(lop, ET_b, tol=1.0e-10, maxiter=1000)

    # import matplotlib.pyplot as plt
    # plt.semilogy(out.resnorms)
    # plt.grid()
    # plt.show()
    return x
コード例 #17
0
def prox_F1_NC(u, c, p, y, fourier_op):
    n = u.shape[0]

    def mv(x):
        z = np.reshape(x[:n**2] + 1j * x[n**2:], (n, n))
        fx = np.reshape(
            fourier_op.adj_op(c * p**2 * fourier_op.op(z)) + z, (n**2, ))
        return np.concatenate([np.real(fx), np.imag(fx)])

    B = np.reshape(fourier_op.adj_op(c * p**2 * y) + u, (n**2, ))
    BR = np.concatenate([np.real(B), np.imag(B)])

    lin = LinearOperator((2 * n**2, 2 * n**2), matvec=mv)
    xf, _ = cg(lin, BR, tol=1e-6, maxiter=1000)
    xf = np.reshape(xf[:n**2] + 1j * xf[n**2:], (n, n))

    return xf
コード例 #18
0
  def seff(self, sext, comega=1j*0.0):
    """ This computes an effective two point field (scalar non-local potential) given an external two point field.
        L = L0 (1 - K L0)^-1
        We want therefore an effective X_eff for a given X_ext
        X_eff = (1 - K L0)^-1 X_ext   or   we need to solve linear equation
        (1 - K L0) X_eff = X_ext  

        The operator (1 - K L0) is named self.sext2seff_matvec """
    
    from scipy.sparse.linalg import gmres, lgmres as gmres_alias, LinearOperator
    assert sext.size==(self.norbs2), "%r,%r"%(sext.size,self.norbs2)

    self.comega_current = comega
    op = LinearOperator((self.norbs2,self.norbs2), matvec=self.sext2seff_matvec, dtype=self.dtypeComplex)
    sext_shape = np.require(sext.reshape(self.norbs2), dtype=self.dtypeComplex, requirements='C')
    resgm,info = gmres_alias(op, sext_shape, tol=self.tddft_iter_tol)
    return (resgm.reshape([self.norbs,self.norbs]),info)
コード例 #19
0
 def normalized_laplacian_norm(self, tol=None):
     if tol is None:
         tol = self.setup.eigs_tol
         
     n = self.core.n
     d_invsqrt = 1 / np.sqrt(self.core.apply(np.ones(n)))
     
     def matvec(v):
         return v - d_invsqrt * self.core.apply(d_invsqrt * v)
     
     nrm = eigsh(LinearOperator((n,n), dtype=np.float64, matvec=matvec),
                 k = 1,
                 which = 'LM',
                 tol = tol,
                 return_eigenvectors = False)[0]
     
     return nrm
コード例 #20
0
    def jacobian(self, psi, mu):
        keo = pyfvm.get_fvm_matrix(self.mesh, edge_kernels=[Energy(mu)])
        cv = self.mesh.control_volumes

        def _apply_jacobian(phi):
            return (keo * phi) / cv + alpha * phi + beta * phi.conj()

        alpha = self.V + self.g * 2.0 * (psi.real**2 + psi.imag**2)
        beta = self.g * psi**2

        num_unknowns = len(self.mesh.points)
        return LinearOperator(
            (num_unknowns, num_unknowns),
            dtype=complex,
            matvec=_apply_jacobian,
            rmatvec=_apply_jacobian,
        )
コード例 #21
0
def MXfunc(A, At, d1, d2, p1, p2, p3):
    """
    Compute P^{-1}X (PCG)

    y = P^{-1}*x
    """

    def matvec(vec):
        n = vec.shape[0] // 2
        x1 = vec[:n]
        x2 = vec[n:]

        return np.hstack([p1 * x1 - p2 * x2,
                          -p2 * x1 + p3 * x2])

    N = 2 * p1.shape[0]
    return LinearOperator((N, N), matvec=matvec)
コード例 #22
0
def truncated_svd(U, S, V=None):
  if not V:
    V = U
  n = U.shape[0]
  m = V.shape[0]

  def mat_vec(x):
    output_vec = np.dot(V.T, x)
    output_vec = S * output_vec
    return np.dot(U,output_vec)

  def rmat_vec(x):
    output_vec = np.dot(U.T, x)
    output_vec = S * output_vec
    return np.dot(V,output_vec)

  return LinearOperator((n,m),mat_vec, rmatvec = rmat_vec)
コード例 #23
0
ファイル: 1_virtual_boson.py プロジェクト: baaelbre/gcTNS
    def FindSSR(Q, R):

        chi = Q.shape[0]

        # construct transfer matrix handle and cast to LinearOperator
        def transferRightHandle(rho):
            rho = rho.reshape(chi, chi)
            return np.reshape(
                rho @ np.conj(Q).T + Q @ rho + R @ rho @ np.conj(R).T, chi**2)

        transferRight = LinearOperator((chi**2, chi**2),
                                       matvec=transferRightHandle)

        # calculate fixed point
        lam, r = eigs(transferRight, k=1, which='LM')

        return r.reshape(chi, chi)
コード例 #24
0
def right_multiplied_operator(J, d):
    """Return J diag(d) as LinearOperator."""
    J = aslinearoperator(J)

    def matvec(x):
        return J.matvec(np.ravel(x) * d)

    def matmat(X):
        return J.matmat(X * d[:, np.newaxis])

    def rmatvec(x):
        return d * J.rmatvec(x)

    return LinearOperator(J.shape,
                          matvec=matvec,
                          matmat=matmat,
                          rmatvec=rmatvec)
コード例 #25
0
def multigrid_stat(nu, f, u_guess, m, nu1, nu2, level, maxIter=50, tol=1e-12):
    """
    Function to run use multigrid as a solver with the stationary method as a 
    smoother. In this case we try to solve the initial system.

    Parameters
    ----------
    nu : positive real number
        regularization parameter
    nu1 : integer
        number of pre-smoothing steps used
    nu2 : integer
        number of post-smoothing steps used
    m : integer
        size of the current matrix
    u_guess : ndarray
        initial guess of the current run
    f : ndarray
        function of the current right-hand side of the system
    level : int
        maximum number of levels the algorithm should do in the recursion

    Returns
    -------
    u : ndarray
        solution after the last post-smoothing step
    res_norm : list
        list with the norm of the residuals
    k : int
        number of iterations neede

    """
    norm = create_norm(m)
    u_sol = u_guess
    op = get_system(m, nu)
    C = LinearOperator((m**2, m**2), op)
    res = norm(f - C(u_sol))
    res0 = res
    k = 1
    res_his = []
    res_his.append(res)
    while res / res0 >= tol and k < maxIter and res <= 10e10:
        u_sol, res = vcycle_stat(nu, nu1, nu2, m, u_sol, f, level)
        k += 1
        res_his.append(res / res0)
    return (u_sol, res_his, k)
コード例 #26
0
def left_multiplied_operator(J, d):
    """Return diag(d) J as LinearOperator."""
    J = aslinearoperator(J)

    def matvec(x):
        return d * J.matvec(x)

    def matmat(X):
        return d[:, np.newaxis] * J.matmat(X)

    def rmatvec(x):
        return J.rmatvec(x.ravel() * d)

    return LinearOperator(J.shape,
                          matvec=matvec,
                          matmat=matmat,
                          rmatvec=rmatvec)
コード例 #27
0
ファイル: mylinearsolve.py プロジェクト: agoel00/MBAweb
def mylinearsolve(X, b, n):
    def myresidual(x):
        xtop = x[:n]
        xbottom = x[n:]
        Axtop = xtop + (X @ xbottom)
        Axbottom = (X.conj().T @ xtop) + xbottom
        Ax = np.concatenate((Axtop, Axbottom))
        return Ax

    myresid = LinearOperator((2 * n, 2 * n), matvec=myresidual)

    zeta, _ = cg(myresid, b, tol=1e-8, maxiter=100)
    alpha = zeta[:n]
    beta = zeta[n:]
    alpha, beta = alpha.reshape((n, -1)), beta.reshape((n, -1))

    return alpha, beta
コード例 #28
0
def solve_eqn(B, grid_shape, d, dd, P, q, v, i):
    """Construct and solve the linear system of equations corresponding to a single PDE."""
    N = np.prod(grid_shape)
    m, n = 24, 25

    def a(u):
        u = u.reshape(grid_shape)
        Au = (u +
              0.5 * np.sum(q[j] * np.einsum(d[1:, 1:], (j, m), u, B[:s] +
                                            (m, ) + B[s + 1:]) -
                           P[j, j] * np.einsum(dd[1:, 1:], (j, m), u, B[:s] +
                                               (m, ) + B[s + 1:])
                           for s, j in enumerate(B)) -
              np.sum(P[j, k] * np.einsum(
                  d[1:, 1:], (j, m),
                  np.einsum(d[1:, 1:], (k, n), u, B[:t] +
                            (n, ) + B[t + 1:]), B[:s] + (m, ) + B[s + 1:])
                     for (s, j), (t, k) in combinations(enumerate(B), 2)))
        return Au.reshape(N)

    A = LinearOperator((N, N), matvec=a)
    b = (np.ones(grid_shape, dtype=float) - 0.5 * np.sum(
        q[j] * np.einsum(d[1:, 0],
                         (j, ), v[B[:s] + B[s + 1:]], B[:s] + B[s + 1:]) -
        P[j, j] * np.einsum(dd[1:, 0],
                            (j, ), v[B[:s] + B[s + 1:]], B[:s] + B[s + 1:])
        for s, j in enumerate(B) if i != j) + np.sum(P[j, k] * (
            (0. if i in (j, k) else np.einsum(
                d[1:, 0], (j, ),
                np.einsum(d[1:, 0],
                          (k, ), v[B[:s] + B[s + 1:t] + B[t + 1:]], B[:s] +
                          B[s + 1:t] + B[t + 1:]), B[:s] + B[s + 1:])) +
            (0. if i == j else np.einsum(
                d[1:, 0], (j, ),
                np.einsum(d[1:, 1:],
                          (k, n), v[B[:s] + B[s + 1:]], B[:s] + B[s + 1:t] +
                          (n, ) + B[t + 1:]), B[:s] + B[s + 1:])) +
            (0. if i == k else np.einsum(
                d[1:, 0], (k, ),
                np.einsum(d[1:, 1:], (j, m), v[B[:t] + B[t + 1:]], B[:s] +
                          (m, ) + B[s + 1:t] + B[t + 1:]), B[:t] + B[t + 1:]))
        ) for (s, j), (t, k) in combinations(enumerate(B), 2)))
    b = b.reshape(N)
    u, err = bicgstab(A, b)
    assert err == 0
    return u.reshape(grid_shape)
コード例 #29
0
ファイル: 1_virtual_boson.py プロジェクト: baaelbre/gcTNS
    def FindSSL(Q, R):

        chi = Q.shape[0]

        # construct transfer matrix handle and cast to LinearOperator
        def transferLeftHandle(rho):
            rho = rho.reshape(chi, chi)
            return np.reshape(
                rho @ Q + np.conj(Q).T @ rho + np.conj(R).T @ rho @ R, chi**2)

        transferLeft = LinearOperator((chi**2, chi**2),
                                      matvec=transferLeftHandle)

        # calculate fixed point
        lam, l = eigs(transferLeft, k=1, which='LM')

        return l.reshape(chi, chi)
コード例 #30
0
    def expand_t(self, v):
        """
        Expand array in dual basis

        This is a similar function to `evaluate` but with more accuracy by
         using the cg optimizing of linear equation, Ax=b.

        If `v` is a matrix of size `basis.ct`-by-..., `B` is the change-of-basis
        matrix of this basis, and `x` is a matrix of size `self.sz`-by-...,
        the function calculates x = (B * B')^(-1) * B * v, where the rows of `B`
        and columns of `x` are read as vectorized arrays.

        :param v: An array whose first dimension is to be expanded in this
            basis's dual. This dimension must be equal to `self.count`.
        :return: The coefficients of `v` expanded in the dual of `basis`. If more
            than one vector is supplied in `v`, the higher dimensions of the return
            value correspond to second and higher dimensions of `v`.

        .. seealso:: expand
        """
        ensure(v.shape[0] == self.count,
               f'First dimension of v must be {self.count}')

        v, sz_roll = unroll_dim(v, 2)
        b = vol_to_vec(self.evaluate(v))

        operator = LinearOperator(
            shape=(self.nres ** 3, self.nres ** 3),
            matvec=lambda x: vol_to_vec(self.evaluate(self.evaluate_t(vec_to_vol(x))))
        )

        # TODO: (from MATLAB implementation) - Check that this tolerance make sense for multiple columns in v
        tol = 10 * np.finfo(v.dtype).eps
        logger.info('Expanding array in dual basis')
        v, info = cg(operator, b, tol=tol)

        v = v[..., np.newaxis]

        if info != 0:
            raise RuntimeError('Unable to converge!')

        v = roll_dim(v, sz_roll)
        x = vec_to_vol(v)

        return x