コード例 #1
0
ファイル: mps.py プロジェクト: jiangtong1000/Renormalizer
def projector(ms: xp.ndarray) -> xp.ndarray:
    # projector
    proj = xp.tensordot(ms, ms.conj(), axes=(-1, -1))
    sz = int(np.prod(ms.shape[:-1]))
    Iden = xp.array(xp.diag(xp.ones(sz)), dtype=backend.real_dtype).reshape(proj.shape)
    proj = Iden - proj
    return proj
コード例 #2
0
ファイル: lib.py プロジェクト: sailfish009/Renormalizer
    def __init__(self, mps, mpo, domain=None, mps_conj=None):
        # todo: real disk and other backend
        # todo: contract_one_site_multi_mpo could generalize contract_one_site,
        # we could unify them in the future.

        # idx indicates the exact position of L or R, like
        # L(idx-1) - mpo(idx) - R(idx+1)
        self._virtual_disk = {}
        if type(mpo) is list:
            ndim = len(mpo) + 2
        else:
            ndim = 3
        self.sentinel = xp.ones([
            1,
        ] * ndim, dtype=backend.real_dtype)
        self._construct(mps, mpo, domain, mps_conj)
コード例 #3
0
    def approx_propagator(cls, mpo, dt, thresh=0):
        """
        e^-iHdt : approximate propagator MPO from Runge-Kutta methods
        """

        mps = Mps()
        mps.mol_list = mpo.mol_list
        mps.dim = [1] * (mpo.site_num + 1)
        mps.qn = [[0]] * (mpo.site_num + 1)
        mps.qnidx = mpo.site_num - 1
        mps.qntot = 0
        mps.threshold = thresh

        for impo in range(mpo.site_num):
            ms = xp.ones((1, mpo[impo].shape[1], 1),
                         dtype=backend.complex_dtype)
            mps.append(ms)
        approx_mpo_t0 = cls.from_mps(mps)

        approx_mpo = approx_mpo_t0.evolve(mpo, dt)

        return approx_mpo
コード例 #4
0
ファイル: matrix.py プロジェクト: jiangtong1000/Renormalizer
def ones(shape, dtype=None):
    if dtype is None:
        dtype = backend.real_dtype
    return Matrix(xp.ones(shape), dtype=dtype)
コード例 #5
0
    def optimize_cv(self, lr_group, direction, isite, num, percent=0.0):
        # depending on the spectratype, to restrict the exction
        first_LR = lr_group[0]
        second_LR = lr_group[1]
        if self.spectratype == "abs":
            constrain_qn = 1
        else:
            constrain_qn = 0
        # this function aims at solving the work equation of ZT CV-DMRG
        # L = <CV|op_a|CV>+2\eta<op_b|CV>, take a derivative to local CV
        # S-a-S-e-S                          S-a-S-d-S
        # |   d   |                          |   |   |
        # O-b-O-g-O  * CV[isite-1]  = -\eta  |   c   |
        # |   f   |                          |   |   |
        # S-c- -h-S                          S-b- -e-S

        # note to be a_mat * x = vec_b
        # the environment matrix

        if self.method == "1site":
            addlist = [isite - 1]
            first_L = asxp(first_LR[isite - 1])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 1])
            second_R = asxp(second_LR[isite])
        else:
            addlist = [isite - 2, isite - 1]
            first_L = asxp(first_LR[isite - 2])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 2])
            second_R = asxp(second_LR[isite])

        if direction == 'left':
            system = 'R'
        else:
            system = 'L'

        # this part just be similar with ground state calculation
        qnmat, qnbigl, qnbigr = svd_qn.construct_qnmat(
            self.cv_mps, self.mpo.pbond_list,
            addlist, self.method, system)
        xshape = qnmat.shape
        nonzeros = np.sum(qnmat == constrain_qn)

        if self.method == '1site':
            guess = self.cv_mps[isite - 1][qnmat == constrain_qn].reshape(nonzeros, 1)
            path_b = [([0, 1], "ab, acd->bcd"),
                      ([1, 0], "bcd, de->bce")]
            vec_b = multi_tensor_contract(
                path_b, second_L, self.b_oper[isite - 1], second_R
            )[qnmat == constrain_qn].reshape(nonzeros, 1)
        else:
            guess = tensordot(
                self.cv_mps[isite - 2], self.cv_mps[isite - 1], axes=(-1, 0)
            )
            guess = guess[qnmat == constrain_qn].reshape(nonzeros, 1)
            path_b = [([0, 1], "ab, acd->bcd"),
                      ([2, 0], "bcd, def->bcef"),
                      ([1, 0], "bcef, fg->bceg")]
            vec_b = multi_tensor_contract(
                path_b, second_L, self.b_oper[isite - 2],
                self.b_oper[isite - 1], second_R
            )[qnmat == constrain_qn].reshape(nonzeros, 1)

        if self.method == "2site":
            a_oper_isite2 = asxp(self.a_oper[isite - 2])
        else:
            a_oper_isite2 = None
        a_oper_isite1 = asxp(self.a_oper[isite - 1])

        # use the diagonal part of mat_a to construct the preconditinoner for linear solver
        if self.method == "1site":
            part_l = xp.einsum('abca->abc', first_L)
            part_r = xp.einsum('hfgh->hfg', first_R)
            path_pre = [([0, 1], "abc, bdef->acdef"),
                        ([1, 0], "acdef, hfg->acdehg")]
            pre_a_mat1 = multi_tensor_contract(path_pre, part_l, a_oper_isite1,
                                               part_r)
            path_pre2 = [([0, 1], "acdehg, ceig->adhi")]
            pre_a_mat1 = multi_tensor_contract(path_pre2, pre_a_mat1, a_oper_isite1)
            pre_a_mat1 = xp.einsum('adhd->adh', pre_a_mat1)[qnmat == constrain_qn]
            # pre_a_mat1 = xp.einsum('abca, bdef, cedg, hfgh->adh', first_L, a_oper_isite1,
            #                        a_oper_isite1, first_R)[qnmat == constrain_qn]
            cv_shape = self.cv_mps[isite - 1].shape
            pre_a_mat2 = xp.ones(cv_shape)[qnmat == constrain_qn]
            pre_a_mat = pre_a_mat1 + pre_a_mat2 * self.eta**2
        else:
            pre_a_mat1 = xp.einsum(
                'abca, bdef, cedg, fhij, gihk, ljkl->adhl', first_L, a_oper_isite2, a_oper_isite2,
                a_oper_isite1, a_oper_isite1, first_R)[qnmat == constrain_qn]
            cv_shape1 = self.cv_mps[isite - 2].shape
            cv_shape2 = self.cv_mps[isite - 1].shape
            new_shape = [cv_shape1[0], cv_shape1[1], cv_shape2[1], cv_shape2[2]]
            pre_a_mat2 = xp.ones(new_shape)[qnmat == constrain_qn]
            pre_a_mat = pre_a_mat1 + pre_a_mat2 * self.eta**2

        pre_a_mat = np.diag(1./asnumpy(pre_a_mat))

        count = 0

        def hop(c):
            nonlocal count
            count += 1
            xstruct = asxp(svd_qn.cvec2cmat(xshape, c, qnmat, constrain_qn))
            if self.method == "1site":
                path_a = [([0, 1], "abcd, aef->bcdef"),
                          ([3, 0], "bcdef, begh->cdfgh"),
                          ([2, 0], "cdfgh, cgij->dfhij"),
                          ([1, 0], "dfhij, fhjk->dik")]
                ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                           a_oper_isite1, a_oper_isite1, first_R)
                ax2 = xstruct
                ax = ax1 + ax2 * self.eta**2
            else:
                path_a = [([0, 1], "abcd, aefg->bcdefg"),
                          ([5, 0], "bcdefg, behi->cdfghi"),
                          ([4, 0], "cdfghi, ifjk->cdghjk"),
                          ([3, 0], "cdghjk, chlm->dgjklm"),
                          ([2, 0], "dgjklm, mjno->dgklno"),
                          ([1, 0], "dgklno, gkop->dlnp")]
                ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                           a_oper_isite2, a_oper_isite1,
                                           a_oper_isite2, a_oper_isite1,
                                           first_R)
                ax2 = xstruct
                ax = ax1 + ax2 * self.eta**2
            cout = ax[qnmat == constrain_qn].reshape(nonzeros, 1)
            return asnumpy(cout)

        mat_a = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), matvec=hop)
        # for the first two sweep, not use the previous matrix as initial guess
        # at the inital stage, they are far from from the optimized one
        if num in [1, 2]:
            x, info = scipy.sparse.linalg.cg(mat_a, asnumpy(vec_b), atol=0)
        else:
            x, info = scipy.sparse.linalg.cg(mat_a, asnumpy(vec_b), tol=1.e-5,
                                             x0=guess, M=pre_a_mat, atol=0)
        # logger.info(f'hop times:{count}')
        self.hop_time.append(count)
        if info != 0:
            logger.info(f"iteration solver not converged")

        # the value of the functional L
        l_value = np.inner(hop(x).reshape(1, nonzeros), x.reshape(1, nonzeros)
                     ) - 2 * np.inner(
                         asnumpy(vec_b).reshape(1, nonzeros), x.reshape(1, nonzeros))
        xstruct = svd_qn.cvec2cmat(xshape, x, qnmat, constrain_qn)
        x, xdim, xqn, compx = \
            solver.renormalization_svd(xstruct, qnbigl, qnbigr, system,
                                       constrain_qn, self.m_max, percent)
        if self.method == "1site":
            self.cv_mps[isite - 1] = x
            if direction == "left":
                if isite != 1:
                    self.cv_mps[isite - 2] = tensordot(
                        self.cv_mps[isite - 2], compx, axes=(-1, 0))
                    self.cv_mps.qn[isite - 1] = xqn
                else:
                    self.cv_mps[isite - 1] = tensordot(
                        compx, self.cv_mps[isite - 1], axes=(-1, 0))
                    self.cv_mps.qn[isite - 1] = [0]
            elif direction == "right":
                if isite != len(self.cv_mps):
                    self.cv_mps[isite] = tensordot(
                        compx, self.cv_mps[isite], axes=(-1, 0))
                    self.cv_mps.qn[isite] = xqn
                else:
                    self.cv_mps[isite - 1] = tensordot(
                        self.cv_mps[isite - 1], compx, axes=(-1, 0))
                    self.cv_mps.qn[isite] = [0]
        else:
            if direction == "left":
                self.cv_mps[isite - 1] = x
                self.cv_mps[isite - 2] = compx
            else:
                self.cv_mps[isite - 2] = x
                self.cv_mps[isite - 1] = compx
            self.cv_mps.qn[isite - 1] = xqn
        return l_value[0][0]
コード例 #6
0
ファイル: zerot.py プロジェクト: sailfish009/Renormalizer
    def optimize_cv(self, lr_group, isite, percent=0.0):
        # depending on the spectratype, to restrict the exction
        first_LR = lr_group[0]
        second_LR = lr_group[1]
        constrain_qn = self.cv_mps.qntot
        # this function aims at solving the work equation of ZT CV-DMRG
        # L = <CV|op_a|CV>+2\eta<op_b|CV>, take a derivative to local CV
        # S-a-S-e-S                          S-a-S-d-S
        # |   d   |                          |   |   |
        # O-b-O-g-O  * CV[isite-1]  = -\eta  |   c   |
        # |   f   |                          |   |   |
        # S-c- -h-S                          S-b- -e-S

        # note to be a_mat * x = vec_b
        # the environment matrix

        if self.method == "1site":
            cidx = [isite - 1]
            first_L = asxp(first_LR[isite - 1])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 1])
            second_R = asxp(second_LR[isite])
        else:
            cidx = [isite - 2, isite - 1]
            first_L = asxp(first_LR[isite - 2])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 2])
            second_R = asxp(second_LR[isite])

        # this part just be similar with ground state calculation
        qnbigl, qnbigr, qnmat = self.cv_mps._get_big_qn(cidx)
        xshape = qnmat.shape
        nonzeros = int(np.sum(qnmat == constrain_qn))
        if self.method == '1site':
            guess = self.cv_mps[isite - 1][qnmat == constrain_qn]
            path_b = [([0, 1], "ab, acd->bcd"),
                      ([1, 0], "bcd, de->bce")]
            vec_b = multi_tensor_contract(
                path_b, second_L, self.b_mps[isite - 1], second_R
            )[qnmat == constrain_qn]
        else:
            guess = tensordot(
                self.cv_mps[isite - 2], self.cv_mps[isite - 1], axes=(-1, 0)
            )[qnmat == constrain_qn]
            path_b = [([0, 1], "ab, acd->bcd"),
                      ([2, 0], "bcd, def->bcef"),
                      ([1, 0], "bcef, fg->bceg")]
            vec_b = multi_tensor_contract(
                path_b, second_L, self.b_mps[isite - 2],
                self.b_mps[isite - 1], second_R
            )[qnmat == constrain_qn]

        if self.method == "2site":
            a_oper_isite2 = asxp(self.a_oper[isite - 2])
        else:
            a_oper_isite2 = None
        a_oper_isite1 = asxp(self.a_oper[isite - 1])

        # use the diagonal part of mat_a to construct the preconditinoner
        # for linear solver
        part_l = xp.einsum('abca->abc', first_L)
        part_r = xp.einsum('hfgh->hfg', first_R)
        if self.method == "1site":
            #  S-a   d    h-S
            #  O-b  -O-   f-O
            #  |     e      |
            #  O-c  -O-   g-O
            #  S-a   i    h-S
            path_pre = [([0, 1], "abc, bdef -> acdef"),
                        ([1, 0], "acdef, ceig -> adfig")]
            a_diag = multi_tensor_contract(path_pre, part_l, a_oper_isite1,
                                           a_oper_isite1)
            a_diag = xp.einsum("adfdg -> adfg", a_diag)
            a_diag = xp.tensordot(a_diag, part_r,
                                  axes=([2, 3], [1, 2]))[qnmat == constrain_qn]
        else:
            #  S-a   d     k   h-S
            #  O-b  -O- j -O-  f-O
            #  |     e     l   |
            #  O-c  -O- m -O-  g-O
            #  S-a   i     n   h-S
            # first left half, second right half, last contraction

            path_pre = [([0, 1], "abc, bdej -> acdej"),
                        ([1, 0], "acdej, ceim -> adjim")]
            a_diagl = multi_tensor_contract(path_pre, part_l, a_oper_isite2,
                                            a_oper_isite2)
            a_diagl = xp.einsum("adjdm -> adjm", a_diagl)

            path_pre = [([0, 1], "hfg, jklf -> hgjkl"),
                        ([1, 0], "hgjkl, mlng -> hjkmn")]
            a_diagr = multi_tensor_contract(path_pre, part_r, a_oper_isite1,
                                            a_oper_isite1)
            a_diagr = xp.einsum("hjkmk -> khjm", a_diagr)

            a_diag = xp.tensordot(
                a_diagl, a_diagr, axes=([2, 3], [2, 3]))[qnmat == constrain_qn]

        a_diag = asnumpy(a_diag + xp.ones(nonzeros) * self.eta**2)
        M_x = lambda x: x / a_diag
        pre_M = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), M_x)

        count = 0

        # cache oe path
        if self.method == "2site":
            expr = oe.contract_expression(
                "abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm",
                first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1,
                a_oper_isite1, first_R, xshape,
                constants=[0, 1, 2, 3, 4, 5])

        def hop(c):
            nonlocal count
            count += 1
            xstruct = asxp(cvec2cmat(xshape, c, qnmat, constrain_qn))
            if self.method == "1site":
                path_a = [([0, 1], "abcd, aef->bcdef"),
                          ([3, 0], "bcdef, begh->cdfgh"),
                          ([2, 0], "cdfgh, cgij->dfhij"),
                          ([1, 0], "dfhij, fhjk->dik")]
                ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                            a_oper_isite1, a_oper_isite1, first_R)
            else:
                # opt_einsum v3.2.1 is not bad, ~10% faster than the hand-design
                # contraction path for this complicated cases and consumes a little bit less memory
                # this is the only place in renormalizer we use opt_einsum now.
                # we keep it here just for a demo.
                # ax1 = oe.contract("abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm",
                #        first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1,
                #        a_oper_isite1, first_R, xstruct)
                if USE_GPU:
                    oe_backend = "cupy"
                else:
                    oe_backend = "numpy"
                ax1 = expr(xstruct, backend=oe_backend)   
                #print(oe.contract_path("abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm",
                #        first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1,
                #        a_oper_isite1, first_R, xstruct))

                #path_a = [([0, 1], "abcd, aefg->bcdefg"),
                #          ([5, 0], "bcdefg, behi->cdfghi"),
                #          ([4, 0], "cdfghi, ifjk->cdghjk"),
                #          ([3, 0], "cdghjk, chlm->dgjklm"),
                #          ([2, 0], "dgjklm, mjno->dgklno"),
                #          ([1, 0], "dgklno, gkop->dlnp")]
                #ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                #                           a_oper_isite2, a_oper_isite1,
                #                           a_oper_isite2, a_oper_isite1,
                #                           first_R)
            ax = ax1 + xstruct * self.eta**2
            cout = ax[qnmat == constrain_qn]
            return asnumpy(cout)

        mat_a = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros),
                                                   matvec=hop)

        x, info = scipy.sparse.linalg.cg(mat_a, asnumpy(vec_b), tol=1.e-5,
                                         x0=asnumpy(guess),
                                         M=pre_M, atol=0)

        self.hop_time.append(count)
        if info != 0:
            logger.info(f"iteration solver not converged")
        # the value of the functional L
        l_value = xp.dot(asxp(hop(x)), asxp(x)) - 2 * xp.dot(vec_b, asxp(x))
        xstruct = cvec2cmat(xshape, x, qnmat, constrain_qn)
        self.cv_mps._update_mps(xstruct, cidx, qnbigl, qnbigr, self.m_max, percent)
        
        return float(l_value)
コード例 #7
0
    def analysis_dominant_config(self, thresh=0.8, alias=None, tda_m_trunc=20,
            return_compressed_mps=False):
        r""" analyze the dominant configuration of each tda root.
            The algorithm is to compress the tda wavefunction to a rank-1 Hartree
            state and get the ci coefficient of the largest configuration.
            Then, the configuration is subtracted from the tda wavefunction and
            redo the first step to get the second largest configuration. The
            two steps continue until the thresh is achieved.
        
        Parameters
        ----------
        thresh: float, optional
            the threshold to stop the analysis procedure of each root. 
            :math:`\sum_i |c_i|^2 > thresh`. Default is 0.8.
        alias: dict, optional
            The alias of each site. For example, ``alias={0:"v_0", 1:"v_2",
            2:"v_1"}``. Default is `None`. 
        tda_m_trunc: int, optional
            the ``m`` to compress a tda wavefunction. Default is 20.
        return_compressed_mps: bool, optional
            If ``True``, return the tda excited state as a single compressed
            mps. Default is `False`.
        
        Returns
        -------
        configs: dict
            The dominant configration of each root.
            ``configs = {0:[(config0, config_name0, ci_coeff0),(config1,
            config_name1, ci_coeff1),...], 1:...}``
        compressed_mps: List[renormalizer.mps.Mps]
            see the description in ``return_compressed_mps``.
        
        Note
        ----
        The compressed_mps is an approximation of the tda wavefunction with
        ``m=tda_m_trunc``.
        """

        mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn
            
        if alias is not None:
            assert len(alias) == mps_l_cano.site_num
        
        compressed_mps = []
        for iroot in range(self.nroots):
            logger.info(f"iroot: {iroot}")
            tda_coeff = tda_coeff_list[iroot]
            mps_tangent_list = []
            weight = []
            for ims in range(mps_l_cano.site_num):
                if tangent_u[ims] is None:
                    assert tda_coeff[ims] is None
                    continue
                weight.append(np.sum(tda_coeff[ims]**2))
                mps_tangent = merge(mps_l_cano, mps_r_cano, ims+1) 
                mps_tangent[ims] = asnumpy(tensordot(tangent_u[ims],
                    tda_coeff[ims],[-1,0]))
                mps_tangent_list.append(mps_tangent)
            
            assert np.allclose(np.sum(weight), 1)
            # sort the mps_tangent from large weight to small weight
            mps_tangent_list = [mps_tangent_list[i] for i in np.argsort(weight,axis=None)[::-1]]

            coeff_square_sum = 0
            mps_delete = None
            
            config_visited = []
            while coeff_square_sum < thresh:
                if mps_delete is None:
                    # first compress it to M=tda_m_trunc
                    mps_rank1 = compressed_sum(mps_tangent_list, batchsize=5,
                            temp_m_trunc=tda_m_trunc)
                else:
                    mps_rank1 = compressed_sum([mps_delete] + mps_tangent_list,
                            batchsize=5, temp_m_trunc=tda_m_trunc)
                if coeff_square_sum == 0 and return_compressed_mps:
                    compressed_mps.append(mps_rank1.copy())       
                mps_rank1 = mps_rank1.canonicalise().compress(temp_m_trunc=1)
                
                # get config with the largest coeff
                config = []
                for ims, ms in enumerate(mps_rank1):
                    ms = ms.array.flatten()**2
                    quanta = int(np.argmax(ms))
                    config.append(quanta)
               
                # check if the config has been visited
                if config in config_visited:
                    break
                
                config_visited.append(config)

                ci_coeff_list = []
                for mps_tangent in mps_tangent_list:
                    sentinel = xp.ones((1,1))
                    for ims, ms in enumerate(mps_tangent):
                        sentinel = sentinel.dot(asxp(ms[:,config[ims],:]))
                    ci_coeff_list.append(float(sentinel[0,0]))
                ci_coeff = np.sum(ci_coeff_list)
                coeff_square_sum += ci_coeff**2
                
                if alias is not None:
                    config_name = [f"{quanta}"+f"{alias[isite]}" for isite, quanta
                            in enumerate(config) if quanta != 0]
                    config_name = " ".join(config_name)
                    self.configs[iroot].append((config, config_name, ci_coeff))
                    logger.info(f"config: {config}, {config_name}")
                else:
                    self.configs[iroot].append((config, ci_coeff))
                    logger.info(f"config: {config}")

                logger.info(f"ci_coeff: {ci_coeff}, weight:{ci_coeff**2}")

                condition = {dof:config[idof] for idof, dof in
                        enumerate(self.model.dofs)}
                mps_delete_increment = Mps.hartree_product_state(self.model, condition).scale(-ci_coeff)
                if mps_delete is None:
                    mps_delete = mps_delete_increment
                else:
                    mps_delete = mps_delete + mps_delete_increment

            logger.info(f"coeff_square_sum: {coeff_square_sum}")
        
        return self.configs, compressed_mps
コード例 #8
0
ファイル: finitet.py プロジェクト: sailfish009/Renormalizer
    def optimize_cv(self, lr_group, isite, percent=0):
        if self.spectratype == "abs":
            # quantum number restriction, |1><0|
            up_exciton, down_exciton = 1, 0
        elif self.spectratype == "emi":
            # quantum number restriction, |0><1|
            up_exciton, down_exciton = 0, 1
        nexciton = 1
        first_LR, second_LR, third_LR, forth_LR = lr_group

        if self.method == "1site":
            add_list = [isite - 1]
            first_L = asxp(first_LR[isite - 1])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 1])
            second_R = asxp(second_LR[isite])
            third_L = asxp(third_LR[isite - 1])
            third_R = asxp(third_LR[isite])
            forth_L = asxp(forth_LR[isite - 1])
            forth_R = asxp(forth_LR[isite])
        else:
            add_list = [isite - 2, isite - 1]
            first_L = asxp(first_LR[isite - 2])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 2])
            second_R = asxp(second_LR[isite])
            third_L = asxp(third_LR[isite - 2])
            third_R = asxp(third_LR[isite])
            forth_L = asxp(forth_LR[isite - 2])
            forth_R = asxp(forth_LR[isite])

        xqnmat, xqnbigl, xqnbigr, xshape = \
            self.construct_X_qnmat(add_list)
        dag_qnmat, dag_qnbigl, dag_qnbigr = self.swap(xqnmat, xqnbigl, xqnbigr)
        nonzeros = int(
            np.sum(self.condition(dag_qnmat, [down_exciton, up_exciton])))

        if self.method == "1site":
            guess = moveaxis(self.cv_mpo[isite - 1], (1, 2), (2, 1))
        else:
            guess = tensordot(moveaxis(self.cv_mpo[isite - 2], (1, 2), (2, 1)),
                              moveaxis(self.cv_mpo[isite - 1]),
                              axes=(-1, 0))
        guess = guess[self.condition(dag_qnmat, [down_exciton, up_exciton])]

        if self.method == "1site":
            # define dot path
            path_1 = [([0, 1], "abcd, aefg -> bcdefg"),
                      ([3, 0], "bcdefg, bfhi -> cdeghi"),
                      ([2, 0], "cdeghi, chjk -> degijk"),
                      ([1, 0], "degijk, gikl -> dejl")]
            path_2 = [([0, 1], "abcd, aefg -> bcdefg"),
                      ([3, 0], "bcdefg, bfhi -> cdeghi"),
                      ([2, 0], "cdeghi, djek -> cghijk"),
                      ([1, 0], "cghijk, gilk -> chjl")]
            path_3 = [([0, 1], "ab, acde -> bcde"), ([1,
                                                      0], "bcde, ef -> bcdf")]

            vecb = multi_tensor_contract(
                path_3, forth_L, moveaxis(self.b_mpo[isite - 1], (1, 2),
                                          (2, 1)),
                forth_R)[self.condition(dag_qnmat, [down_exciton, up_exciton])]

        a_oper_isite = asxp(self.a_oper[isite - 1])
        h_mpo_isite = asxp(self.h_mpo[isite - 1])
        # construct preconditioner
        Idt = xp.identity(h_mpo_isite.shape[1])
        M1_1 = xp.einsum('abca->abc', first_L)
        path_m1 = [([0, 1], "abc, bdef->acdef"), ([1,
                                                   0], "acdef, cegh->adfgh")]
        M1_2 = multi_tensor_contract(path_m1, M1_1, a_oper_isite, a_oper_isite)
        M1_2 = xp.einsum("abcbd->abcd", M1_2)
        M1_3 = xp.einsum('ecde->ecd', first_R)
        M1_4 = xp.einsum('ff->f', Idt)
        path_m1 = [([0, 1], "abcd,ecd->abe"), ([1, 0], "abe,f->abef")]
        pre_M1 = multi_tensor_contract(path_m1, M1_2, M1_3, M1_4)
        pre_M1 = xp.moveaxis(pre_M1, [-2, -1], [-1, -2])[self.condition(
            dag_qnmat, [down_exciton, up_exciton])]

        M2_1 = xp.einsum('aeag->aeg', second_L)
        M2_2 = xp.einsum('eccf->ecf', a_oper_isite)
        M2_3 = xp.einsum('gbbh->gbh', h_mpo_isite)
        M2_4 = xp.einsum('dfdh->dfh', second_R)
        path_m2 = [([0, 1], "aeg,gbh->aebh"), ([2, 0], "aebh,ecf->abchf"),
                   ([1, 0], "abhcf,dfh->abcd")]
        pre_M2 = multi_tensor_contract(path_m2, M2_1, M2_3, M2_2, M2_4)
        pre_M2 = pre_M2[self.condition(dag_qnmat, [down_exciton, up_exciton])]

        M4_1 = xp.einsum('faah->fah', third_L)
        M4_4 = xp.einsum('gddi->gdi', third_R)
        M4_5 = xp.einsum('cc->c', Idt)
        M4_path = [([0, 1], "fah,febg->ahebg"), ([2, 0], "ahebg,hjei->abgji"),
                   ([1, 0], "abgji,gdi->abjd")]
        pre_M4 = multi_tensor_contract(M4_path, M4_1, h_mpo_isite, h_mpo_isite,
                                       M4_4)
        pre_M4 = xp.einsum('abbd->abd', pre_M4)
        pre_M4 = xp.tensordot(pre_M4, M4_5, axes=0)
        pre_M4 = xp.moveaxis(pre_M4, [2, 3], [3, 2])[self.condition(
            dag_qnmat, [down_exciton, up_exciton])]

        M_x = lambda x: asnumpy(
            asxp(x) /
            (pre_M1 + 2 * pre_M2 + pre_M4 + xp.ones(nonzeros) * self.eta**2))
        pre_M = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), M_x)

        count = 0

        def hop(x):
            nonlocal count
            count += 1
            dag_struct = asxp(self.dag2mat(xshape, x, dag_qnmat))
            if self.method == "1site":

                M1 = multi_tensor_contract(path_1, first_L, dag_struct,
                                           a_oper_isite, a_oper_isite, first_R)
                M2 = multi_tensor_contract(path_2, second_L, dag_struct,
                                           a_oper_isite, h_mpo_isite, second_R)
                M2 = xp.moveaxis(M2, (1, 2), (2, 1))
                M3 = multi_tensor_contract(path_2, third_L, h_mpo_isite,
                                           dag_struct, h_mpo_isite, third_R)
                M3 = xp.moveaxis(M3, (1, 2), (2, 1))
                cout = M1 + 2 * M2 + M3 + dag_struct * self.eta**2
            cout = cout[self.condition(dag_qnmat, [down_exciton, up_exciton])]
            return asnumpy(cout)

        # Matrix A
        mat_a = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros),
                                                   matvec=hop)

        x, info = scipy.sparse.linalg.cg(mat_a,
                                         asnumpy(vecb),
                                         tol=1.e-5,
                                         x0=asnumpy(guess),
                                         maxiter=500,
                                         M=pre_M,
                                         atol=0)
        # logger.info(f"linear eq dim: {nonzeros}")
        # logger.info(f'times for hop:{count}')
        self.hop_time.append(count)
        if info != 0:
            logger.warning(
                f"cg not converged, vecb.norm:{xp.linalg.norm(vecb)}")
        l_value = xp.dot(asxp(hop(x)), asxp(x)) - 2 * xp.dot(vecb, asxp(x))

        x = self.dag2mat(xshape, x, dag_qnmat)
        if self.method == "1site":
            x = np.moveaxis(x, [1, 2], [2, 1])
        x, xdim, xqn, compx = self.x_svd(x,
                                         xqnbigl,
                                         xqnbigr,
                                         nexciton,
                                         percent=percent)

        if self.method == "1site":
            self.cv_mpo[isite - 1] = x
            if not self.cv_mpo.to_right:
                if isite != 1:
                    self.cv_mpo[isite - 2] = \
                        tensordot(self.cv_mpo[isite - 2], compx, axes=(-1, 0))
                    self.cv_mpo.qn[isite - 1] = xqn
                    self.cv_mpo.qnidx = isite - 2
                else:
                    self.cv_mpo[isite - 1] = \
                        tensordot(compx, self.cv_mpo[isite - 1], axes=(-1, 0))
                    self.cv_mpo.qnidx = 0
            else:
                if isite != len(self.cv_mpo):
                    self.cv_mpo[isite] = \
                        tensordot(compx, self.cv_mpo[isite], axes=(-1, 0))
                    self.cv_mpo.qn[isite] = xqn
                    self.cv_mpo.qnidx = isite
                else:
                    self.cv_mpo[isite - 1] = \
                        tensordot(self.cv_mpo[isite - 1], compx, axes=(-1, 0))
                    self.cv_mpo.qnidx = self.cv_mpo.site_num - 1

        else:
            if not self.cv_mpo.to_right:
                self.cv_mpo[isite - 2] = compx
                self.cv_mpo[isite - 1] = x
                self.cv_mpo.qnidx = isite - 2
            else:
                self.cv_mpo[isite - 2] = x
                self.cv_mpo[isite - 1] = compx
                self.cv_mpo.qnidx = isite - 1
            self.cv_mpo.qn[isite - 1] = xqn

        return float(l_value)