示例#1
0
 def check_lortho(self, rtol=1e-5, atol=1e-8):
     """
     check L-orthogonal
     """
     tensm = asxp(
         self.array.reshape([np.prod(self.shape[:-1]), self.shape[-1]]))
     s = tensm.T.conj() @ tensm
     return xp.allclose(s, xp.eye(s.shape[0]), rtol=rtol, atol=atol)
示例#2
0
 def check_rortho(self, rtol=1e-5, atol=1e-8):
     """
     check R-orthogonal
     """
     tensm = asxp(
         self.array.reshape([self.shape[0],
                             np.prod(self.shape[1:])]))
     s = tensm @ tensm.T.conj()
     return xp.allclose(s, xp.eye(s.shape[0]), rtol=rtol, atol=atol)
示例#3
0
def expm_krylov(Afunc, dt, vstart: xp.ndarray, block_size=50):
    """
    Compute Krylov subspace approximation of the matrix exponential
    applied to input vector: `expm(dt*A)*v`.
    A is a hermitian matrix.
    Reference:
        M. Hochbruck and C. Lubich
        On Krylov subspace approximations to the matrix exponential operator
        SIAM J. Numer. Anal. 34, 1911 (1997)
    """

    # normalize starting vector
    vstart = xp.asarray(vstart)
    nrmv = float(xp.linalg.norm(vstart))
    assert nrmv > 0
    vstart = vstart / nrmv

    alpha = np.zeros(block_size)
    beta = np.zeros(block_size - 1)

    V = xp.empty((block_size, len(vstart)), dtype=vstart.dtype)
    V[0] = vstart
    res = None

    for j in range(len(vstart)):

        w = Afunc(V[j])
        alpha[j] = xp.vdot(w, V[j]).real

        if j == len(vstart) - 1:
            #logger.debug("the krylov subspace is equal to the full space")
            return _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1, :].T, nrmv,
                                dt), j + 1

        if len(V) == j + 1:
            V, old_V = xp.empty((len(V) + block_size, len(vstart)),
                                dtype=vstart.dtype), V
            V[:len(old_V)] = old_V
            del old_V
            alpha = np.concatenate([alpha, np.zeros(block_size)])
            beta = np.concatenate([beta, np.zeros(block_size)])

        w -= alpha[j] * V[j] + (beta[j - 1] * V[j - 1] if j > 0 else 0)
        beta[j] = xp.linalg.norm(w)
        if beta[j] < 100 * len(vstart) * np.finfo(float).eps:
            # logger.warning(f'beta[{j}] ~= 0 encountered during Lanczos iteration.')
            return _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1, :].T, nrmv,
                                dt), j + 1

        if 3 < j and j % 2 == 0:
            new_res = _expm_krylov(alpha[:j + 1], beta[:j], V[:j + 1].T, nrmv,
                                   dt)
            if res is not None and xp.allclose(res, new_res):
                return new_res, j + 1
            else:
                res = new_res
        V[j + 1] = w / beta[j]
示例#4
0
def test_expm(N, imag, block_size):
    a1 = np.random.rand(N, N) / N
    if imag:
        a1 = a1 + np.random.rand(N, N) / N / 1j
    a2 = xp.array(a1)
    v = np.random.rand(N)
    if imag:
        v = v + v / 1j
    res1 = expm(a1) @ v
    res2, _ = expm_krylov(lambda x: a2.dot(x), 1, xp.array(v), block_size)
    assert xp.allclose(res1, res2)
示例#5
0
def expm_krylov(Afunc, dt, vstart):
    """
    Compute Krylov subspace approximation of the matrix exponential
    applied to input vector: `expm(dt*A)*v`.
    Reference:
        M. Hochbruck and C. Lubich
        On Krylov subspace approximations to the matrix exponential operator
        SIAM J. Numer. Anal. 34, 1911 (1997)
    """

    # normalize starting vector
    vstart = xp.asarray(vstart)
    nrmv = xp.linalg.norm(vstart)
    assert nrmv > 0
    vstart = vstart / nrmv
    # max iteration
    MAX_ITER = 50

    alpha = np.zeros(MAX_ITER)
    beta  = np.zeros(MAX_ITER-1)

    V = xp.zeros((MAX_ITER, len(vstart)), dtype=vstart.dtype)
    V[0] = vstart
    res = None

    for j in range(len(vstart) - 1):
        if MAX_ITER - 1 == j:
            raise RuntimeError("krylov not converged")
        w = Afunc(V[j])
        alpha[j] = xp.vdot(w, V[j]).real
        w -= alpha[j]*V[j] + (beta[j-1]*V[j-1] if j > 0 else 0)
        beta[j] = xp.linalg.norm(w)
        if beta[j] < 100*len(vstart)*np.finfo(float).eps:
            logger.warning(f'beta[{j}] ~= 0 encountered during Lanczos iteration.')
            return _expm_krylov(alpha[:j+1], beta[:j], V[:j+1, :].T, nrmv, dt)

        if 3 < j and j % 2 == 0:
            new_res = _expm_krylov(alpha[:j+1], beta[:j], V[:j+1].T, nrmv, dt)
            if res is not None and xp.allclose(res, new_res):
                return new_res
            else:
                res = new_res
        V[j + 1] = w / beta[j]
    return _expm_krylov(alpha, beta, V.T, nrmv, dt)
示例#6
0
 def is_hermitian(self):
     full = self.full_operator()
     return xp.allclose(full.array.conj().T, full, atol=1e-7)
示例#7
0
 def nearly_zero(self):
     if backend.is_32bits:
         atol = 1e-10
     else:
         atol = 1e-20
     return xp.allclose(self.array, xp.zeros_like(self.array), atol=atol)
示例#8
0
    def kernel(self, restart=False, include_psi0=False):
        r"""calculate the roots

        Parameters
        ----------
        restart: bool, optional
            if restart from the former converged root. Default is ``False``.
            If ``restart = True``, ``include_psi0`` must be the same as the
            former calculation.
        include_psi0: bool, optional
            if the basis of Hamiltonian includes the ground state
                :math:`\Psi_0`. Default is ``False``.

        Returns
        -------
        e: np.ndarray
            the energy of the states, if ``include_psi0 = True``, the first
            element is the ground state energy, otherwise, it is the energy of
            the first excited state.

        """
        # right canonical mps
        mpo = self.hmpo
        nroots = self.nroots
        algo = self.algo
        site_num = mpo.site_num

        if not restart:
            # make sure that M is not redundant near the edge
            mps = self.mps.ensure_right_canon().canonicalise().normalize().canonicalise()
            logger.debug(f"reference mps shape, {mps}")
            mps_r_cano = mps.copy()
            assert mps.to_right 
            
            tangent_u = []
    
            for ims, ms in enumerate(mps):
                
                shape = list(ms.shape)
                u, s, vt = scipy.linalg.svd(ms.l_combine(), full_matrices=True)
                rank = len(s)
                if include_psi0 and ims == site_num-1: 
                    tangent_u.append(u.reshape(shape[:-1]+[-1]))
                else:
                    if rank < u.shape[1]:
                        tangent_u.append(u[:,rank:].reshape(shape[:-1]+[-1]))
                    else:
                        tangent_u.append(None)  # the tangent space is None

                mps[ims] = u[:,:rank].reshape(shape[:-1]+[-1])
                
                vt = xp.einsum("i, ij -> ij", asxp(s), asxp(vt))
                if ims == site_num-1:
                    assert vt.size == 1 and xp.allclose(vt, 1)
                else:
                    mps[ims+1] = asnumpy(tensordot(vt, mps[ims+1], ([-1],[0])))
                
            mps_l_cano = mps.copy() 
            mps_l_cano.to_right = False
            mps_l_cano.qnidx = site_num-1

        else:
            mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn
            cguess = []
            for iroot in range(len(tda_coeff_list)):
                tda_coeff = tda_coeff_list[iroot]
                x = [c.flatten() for c in tda_coeff if c is not None]
                x = np.concatenate(x,axis=None)
                cguess.append(x)
            cguess = np.stack(cguess, axis=1)

        xshape = [] 
        xsize = 0
        for ims in range(site_num):
            if tangent_u[ims] is None:
                xshape.append((0,0))
            else:
                if ims == site_num-1:
                    xshape.append((tangent_u[ims].shape[-1], 1))
                else:    
                    xshape.append((tangent_u[ims].shape[-1], mps_r_cano[ims+1].shape[0]))
                xsize += np.prod(xshape[-1])
        
        logger.debug(f"DMRG-TDA H dimension: {xsize}")
        
        if USE_GPU:
            oe_backend = "cupy"
        else:
            oe_backend = "numpy"
        
        mps_tangent = mps_r_cano.copy()
        environ = Environ(mps_tangent, mpo, "R")
        hdiag = []
        for ims in range(site_num):
            ltensor = environ.GetLR(
                "L", ims-1, mps_tangent, mpo, itensor=None,
                method="System"
            )
            rtensor = environ.GetLR(
                "R", ims+1, mps_tangent, mpo, itensor=None,
                method="Enviro"
            )
            if tangent_u[ims] is not None:
                u = asxp(tangent_u[ims])
                tmp = oe.contract("abc, ded, bghe, agl, chl -> ld", ltensor, rtensor,
                        asxp(mpo[ims]), u, u, backend=oe_backend)   
                hdiag.append(asnumpy(tmp))
            mps_tangent[ims] = mps_l_cano[ims]
        hdiag = np.concatenate(hdiag, axis=None)
    
        count = 0
        
        # recover the vector-like x back to the ndarray tda_coeff
        def reshape_x(x):
            tda_coeff = []
            offset = 0
            for shape in xshape:
                if shape == (0,0):
                    tda_coeff.append(None)
                else:
                    size = np.prod(shape)
                    tda_coeff.append(x[offset:size+offset].reshape(shape))
                    offset += size
            
            assert offset == xsize
            return tda_coeff
            
        def hop(x):
            # H*X
            nonlocal count
            count += 1
            
            assert len(x) == xsize
            tda_coeff = reshape_x(x)
    
            res = [np.zeros_like(coeff) if coeff is not None else None for coeff in tda_coeff]
            
            # fix ket and sweep bra and accumulate into res
            for ims in range(site_num):
                if tda_coeff[ims] is None:
                    assert tangent_u[ims] is None
                    continue
                
                # mix-canonical mps
                mps_tangent = merge(mps_l_cano, mps_r_cano, ims+1)
                mps_tangent[ims] = tensordot(tangent_u[ims], tda_coeff[ims], (-1, 0))
                
                mps_tangent_conj = mps_r_cano.copy()
                environ = Environ(mps_tangent, mpo, "R", mps_conj=mps_tangent_conj)
                
                for ims_conj in range(site_num):
                    ltensor = environ.GetLR(
                        "L", ims_conj-1, mps_tangent, mpo, itensor=None,
                        mps_conj=mps_tangent_conj,
                        method="System"
                    )
                    rtensor = environ.GetLR(
                        "R", ims_conj+1, mps_tangent, mpo, itensor=None,
                        mps_conj=mps_tangent_conj,
                        method="Enviro"
                    )
                    if tda_coeff[ims_conj] is not None:
                        # S-a   l-S
                        #     d
                        # O-b-O-f-O
                        #     e
                        # S-c   k-S
    
                        path = [
                            ([0, 1], "abc, cek -> abek"),
                            ([2, 0], "abek, bdef -> akdf"),
                            ([1, 0], "akdf, lfk -> adl"),
                        ]
                        out = multi_tensor_contract(
                            path, ltensor, asxp(mps_tangent[ims_conj]),
                            asxp(mpo[ims_conj]), rtensor
                        )
                        res[ims_conj] += asnumpy(tensordot(tangent_u[ims_conj], out,
                            ([0,1], [0,1])))
                    
                    # mps_conj combine 
                    mps_tangent_conj[ims_conj] = mps_l_cano[ims_conj]    
            
            res = [mat for mat in res if mat is not None]
    
            return np.concatenate(res, axis=None)
        
        if algo == "davidson":
            if restart:
                cguess = [cguess[:,i] for i in range(cguess.shape[1])]
            else:
                cguess = [np.random.random(xsize) - 0.5]
            precond = lambda x, e, *args: x / (hdiag - e + 1e-4)
            
            e, c = davidson(
                hop, cguess, precond, max_cycle=100,
                nroots=nroots, max_memory=64000
            )
            if nroots == 1:
                c = [c]
            c = np.stack(c, axis=1)

        elif algo == "primme":
            if not restart:
                cguess = None

            def multi_hop(x):
                if x.ndim == 1:
                    return hop(x)
                elif x.ndim == 2:
                    return np.stack([hop(x[:,i]) for i in range(x.shape[1])],axis=1)
                else:
                    assert False
    
            def precond(x): 
                if x.ndim == 1:
                    return np.einsum("i, i -> i", 1/(hdiag+1e-4), x)
                elif x.ndim ==2:
                    return np.einsum("i, ij -> ij", 1/(hdiag+1e-4), x)
                else:
                    assert False
            A = scipy.sparse.linalg.LinearOperator((xsize,xsize),
                    matvec=multi_hop, matmat=multi_hop)
            M = scipy.sparse.linalg.LinearOperator((xsize,xsize),
                    matvec=precond, matmat=precond)
            e, c = primme.eigsh(A, k=min(nroots,xsize), which="SA", 
                    v0=cguess,
                    OPinv=M,
                    method="PRIMME_DYNAMIC", 
                    tol=1e-6)
        else:
            assert False

        logger.debug(f"H*C times: {count}")
        
        tda_coeff_list = []
        for iroot in range(nroots):
            tda_coeff_list.append(reshape_x(c[:,iroot])) 
        
        self.e = np.array(e)
        self.wfn = [mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list]
        
        return self.e