Ejemplo n.º 1
0
def updatemps(vset, sset, qnset, compset, nexciton, Mmax, percent=0):
    """
    select basis to construct new mps, and complementary mps
    vset, compset is the column vector
    """
    sidx = select_basis(qnset,
                        sset,
                        range(nexciton + 1),
                        Mmax,
                        percent=percent)
    mpsdim = len(sidx)
    # need to set value column by column. better in CPU
    ms = np.zeros((vset.shape[0], mpsdim), dtype=vset.dtype)

    if compset is not None:
        compmps = np.zeros((compset.shape[0], mpsdim), dtype=compset.dtype)
    else:
        compmps = None

    mpsqn = []
    stot = 0.0
    for idim in range(mpsdim):
        ms[:, idim] = vset[:, sidx[idim]].copy()
        if (compset is not None) and sidx[idim] < compset.shape[1]:
            compmps[:, idim] = compset[:, sidx[idim]].copy() * sset[sidx[idim]]
        mpsqn.append(qnset[sidx[idim]])
        stot += sset[sidx[idim]]**2

    # print("discard:", 1.0 - stot)
    if compmps is not None:
        compmps = asxp(compmps)

    return asxp(ms), mpsdim, mpsqn, compmps
Ejemplo n.º 2
0
     def hop(x):
         # H*X
         nonlocal count
         count += 1
         
         assert len(x) == xsize
         tda_coeff = reshape_x(x)
 
         res = [np.zeros_like(coeff) if coeff is not None else None for coeff in tda_coeff]
         
         # fix ket and sweep bra and accumulate into res
         for ims in range(site_num):
             if tda_coeff[ims] is None:
                 assert tangent_u[ims] is None
                 continue
             
             # mix-canonical mps
             mps_tangent = merge(mps_l_cano, mps_r_cano, ims+1)
             mps_tangent[ims] = tensordot(tangent_u[ims], tda_coeff[ims], (-1, 0))
             
             mps_tangent_conj = mps_r_cano.copy()
             environ = Environ(mps_tangent, mpo, "R", mps_conj=mps_tangent_conj)
             
             for ims_conj in range(site_num):
                 ltensor = environ.GetLR(
                     "L", ims_conj-1, mps_tangent, mpo, itensor=None,
                     mps_conj=mps_tangent_conj,
                     method="System"
                 )
                 rtensor = environ.GetLR(
                     "R", ims_conj+1, mps_tangent, mpo, itensor=None,
                     mps_conj=mps_tangent_conj,
                     method="Enviro"
                 )
                 if tda_coeff[ims_conj] is not None:
                     # S-a   l-S
                     #     d
                     # O-b-O-f-O
                     #     e
                     # S-c   k-S
 
                     path = [
                         ([0, 1], "abc, cek -> abek"),
                         ([2, 0], "abek, bdef -> akdf"),
                         ([1, 0], "akdf, lfk -> adl"),
                     ]
                     out = multi_tensor_contract(
                         path, ltensor, asxp(mps_tangent[ims_conj]),
                         asxp(mpo[ims_conj]), rtensor
                     )
                     res[ims_conj] += asnumpy(tensordot(tangent_u[ims_conj], out,
                         ([0,1], [0,1])))
                 
                 # mps_conj combine 
                 mps_tangent_conj[ims_conj] = mps_l_cano[ims_conj]    
         
         res = [mat for mat in res if mat is not None]
 
         return np.concatenate(res, axis=None)
Ejemplo n.º 3
0
def renormalization_ddm(cstruct, qnbigl, qnbigr, domain, nexciton, Mmax, percent=0):
    """
        get the new mps, mpsdim, mpdqn, complementary mps to get the next guess
        with diagonalize reduced density matrix method (> 1 root)
    """
    nroots = len(cstruct)
    ddm = 0.0
    for iroot in range(nroots):
        if domain == "R":
            ddm += np.tensordot(
                cstruct[iroot],
                cstruct[iroot],
                axes=(range(qnbigl.ndim), range(qnbigl.ndim)),
            )
        else:
            ddm += np.tensordot(
                cstruct[iroot],
                cstruct[iroot],
                axes=(
                    range(qnbigl.ndim, cstruct[0].ndim),
                    range(qnbigl.ndim, cstruct[0].ndim),
                ),
            )
    ddm /= float(nroots)
    if domain == "L":
        Uset, Sset, qnnew = svd_qn.Csvd(ddm, qnbigl, qnbigl, nexciton, ddm=True)
    else:
        Uset, Sset, qnnew = svd_qn.Csvd(ddm, qnbigr, qnbigr, nexciton, ddm=True)
    mps, mpsdim, mpsqn, compmps = updatemps(
        Uset, Sset, qnnew, None, nexciton, Mmax, percent=percent
    )

    if domain == "R":
        return (
            xp.moveaxis(mps.reshape(list(qnbigr.shape) + [mpsdim]), -1, 0),
            mpsdim,
            mpsqn,
            tensordot(
                asxp(cstruct[0]),
                mps.reshape(list(qnbigr.shape) + [mpsdim]),
                axes=(range(qnbigl.ndim, cstruct[0].ndim), range(qnbigr.ndim)),
            ),
        )
    else:
        return (
            mps.reshape(list(qnbigl.shape) + [mpsdim]),
            mpsdim,
            mpsqn,
            tensordot(
                mps.reshape(list(qnbigl.shape) + [mpsdim]),
                asxp(cstruct[0]),
                axes=(range(qnbigl.ndim), range(qnbigl.ndim)),
            ),
        )
Ejemplo n.º 4
0
 def hop(c):
     nonlocal count
     count += 1
     xstruct = asxp(svd_qn.cvec2cmat(xshape, c, qnmat, constrain_qn))
     if self.method == "1site":
         path_a = [([0, 1], "abcd, aef->bcdef"),
                   ([3, 0], "bcdef, begh->cdfgh"),
                   ([2, 0], "cdfgh, cgij->dfhij"),
                   ([1, 0], "dfhij, fhjk->dik")]
         ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                    a_oper_isite1, a_oper_isite1, first_R)
         ax2 = xstruct
         ax = ax1 + ax2 * self.eta**2
     else:
         path_a = [([0, 1], "abcd, aefg->bcdefg"),
                   ([5, 0], "bcdefg, behi->cdfghi"),
                   ([4, 0], "cdfghi, ifjk->cdghjk"),
                   ([3, 0], "cdghjk, chlm->dgjklm"),
                   ([2, 0], "dgjklm, mjno->dgklno"),
                   ([1, 0], "dgklno, gkop->dlnp")]
         ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                    a_oper_isite2, a_oper_isite1,
                                    a_oper_isite2, a_oper_isite1,
                                    first_R)
         ax2 = xstruct
         ax = ax1 + ax2 * self.eta**2
     cout = ax[qnmat == constrain_qn].reshape(nonzeros, 1)
     return asnumpy(cout)
Ejemplo n.º 5
0
                def hop(x):
                    nonlocal count
                    count += 1
                    clist = []
                    if x.ndim == 1:
                        clist.append(x)
                    else:
                        for icol in range(x.shape[1]):
                            clist.append(x[:, icol])
                    res = []
                    for c in clist:
                        # convert c to initial structure according to qn pattern
                        cstruct = asxp(cvec2cmat(cshape, c, qnmat, mps.qntot))

                        if omega is None:
                            if method == "1site":
                                # S-a   l-S
                                #     d
                                # O-b-O-f-O
                                #     e
                                # S-c   k-S

                                path = [
                                    ([0, 1], "abc, adl -> bcdl"),
                                    ([2, 0], "bcdl, bdef -> clef"),
                                    ([1, 0], "clef, lfk -> cek"),
                                ]
                                cout = multi_tensor_contract(
                                    path, ltensor, cstruct, cmo[0], rtensor)
                            else:
                                # S-a       l-S
                                #     d   g
                                # O-b-O-f-O-j-O
                                #     e   h
                                # S-c       k-S
                                path = [
                                    ([0, 1], "abc, adgl -> bcdgl"),
                                    ([3, 0], "bcdgl, bdef -> cglef"),
                                    ([2, 0], "cglef, fghj -> clehj"),
                                    ([1, 0], "clehj, ljk -> cehk"),
                                ]
                                cout = multi_tensor_contract(
                                    path,
                                    ltensor,
                                    cstruct,
                                    cmo[0],
                                    cmo[1],
                                    rtensor,
                                )
                        else:
                            cout = expr(cstruct, backend=oe_backend)

                    # convert structure c to 1d according to qn
                        res.append(asnumpy(cout)[qnmat == mps.qntot])

                    if len(res) == 1:
                        return inverse * res[0]
                    else:
                        return inverse * np.stack(res, axis=1)
Ejemplo n.º 6
0
            def hop(c):
                # convert c to initial structure according to qn pattern
                cstruct = asxp(svd_qn.cvec2cmat(cshape, c, qnmat, nexciton))

                if method == "1site":
                    # S-a   l-S
                    #    d
                    # O-b-O-f-O
                    #    e
                    # S-c   k-S

                    path = [
                        ([0, 1], "abc, adl -> bcdl"),
                        ([2, 0], "bcdl, bdef -> clef"),
                        ([1, 0], "clef, lfk -> cek"),
                    ]
                    cout = multi_tensor_contract(
                        path, ltensor, cstruct, mo2, rtensor
                    )
                    # for small matrices, check hermite:
                    # a=tensordot(ltensor, mpo[imps], ((1), (0)))
                    # b=tensordot(a, rtensor, ((4), (1)))
                    # c=b.transpose((0, 2, 4, 1, 3, 5))
                    # d=c.reshape(16, 16)
                else:
                    # S-a       l-S
                    #    d   g
                    # O-b-O-f-O-j-O
                    #    e   h
                    # S-c       k-S
                    path = [
                        ([0, 1], "abc, adgl -> bcdgl"),
                        ([3, 0], "bcdgl, bdef -> cglef"),
                        ([2, 0], "cglef, fghj -> clehj"),
                        ([1, 0], "clehj, ljk -> cehk"),
                    ]
                    cout = multi_tensor_contract(
                        path,
                        ltensor,
                        cstruct,
                        mo1,
                        mo2,
                        rtensor,
                    )
                # convert structure c to 1d according to qn
                return inverse * asnumpy(cout)[qnmat == nexciton]
Ejemplo n.º 7
0
        def hop(x):
            nonlocal count
            count += 1
            dag_struct = asxp(self.dag2mat(xshape, x, dag_qnmat))
            if self.method == "1site":

                M1 = multi_tensor_contract(path_1, first_L, dag_struct,
                                           a_oper_isite, a_oper_isite, first_R)
                M2 = multi_tensor_contract(path_2, second_L, dag_struct,
                                           a_oper_isite, h_mpo_isite, second_R)
                M2 = xp.moveaxis(M2, (1, 2), (2, 1))
                M3 = multi_tensor_contract(path_2, third_L, h_mpo_isite,
                                           dag_struct, h_mpo_isite, third_R)
                M3 = xp.moveaxis(M3, (1, 2), (2, 1))
                cout = M1 + 2 * M2 + M3 + dag_struct * self.eta**2
            cout = cout[self.condition(dag_qnmat, [down_exciton, up_exciton])]
            return asnumpy(cout)
Ejemplo n.º 8
0
 def initialize_LR(self):
     # initialize the Lpart and Rpart
     first_LR = []
     first_LR.append(np.ones((1, 1, 1, 1)))
     second_LR = []
     second_LR.append(np.ones((1, 1)))
     for isite in range(1, len(self.cv_mps)):
         first_LR.append(None)
         second_LR.append(None)
     first_LR.append(np.ones((1, 1, 1, 1)))
     second_LR.append(np.ones((1, 1)))
     if self.cv_mps.to_right:
         path1 = [([0, 1], "abcd, efa->bcdef"),
                  ([3, 0], "bcdef, gfhb->cdegh"),
                  ([2, 0], "cdegh, ihjc->degij"),
                  ([1, 0], "degij, kjd->egik")]
         path2 = [([0, 1], "ab, cda->bcd"),
                  ([1, 0], "bcd, edb->ce")]
         for isite in range(len(self.cv_mps), 1, -1):
             first_LR[isite - 1] = asnumpy(multi_tensor_contract(
                 path1, first_LR[isite], self.cv_mps[isite - 1],
                 self.a_oper[isite - 1], self.a_oper[isite - 1],
                 self.cv_mps[isite - 1]))
             second_LR[isite - 1] = asnumpy(multi_tensor_contract(
                 path2, second_LR[isite], self.b_mps[isite - 1],
                 self.cv_mps[isite - 1]))
     else:
         path1 = [([0, 1], "abcd, aef->bcdef"),
                  ([3, 0], "bcdef, begh->cdfgh"),
                  ([2, 0], "cdfgh, cgij->dfhij"),
                  ([1, 0], "dfhij, dik->fhjk")]
         path2 = [([0, 1], "ab, acd->bcd"),
                  ([1, 0], "bcd, bce->de")]
         for isite in range(1, len(self.cv_mps)):
             mps_isite = asxp(self.cv_mps[isite - 1])
             first_LR[isite] = asnumpy(multi_tensor_contract(
                 path1, first_LR[isite - 1], mps_isite,
                 self.a_oper[isite - 1], self.a_oper[isite - 1],
                 self.cv_mps[isite - 1]))
             second_LR[isite] = asnumpy(multi_tensor_contract(
                 path2, second_LR[isite - 1], self.b_mps[isite - 1],
                 mps_isite))
     return [first_LR, second_LR]
Ejemplo n.º 9
0
        def hop(c):
            nonlocal count
            count += 1
            xstruct = asxp(cvec2cmat(xshape, c, qnmat, constrain_qn))
            if self.method == "1site":
                path_a = [([0, 1], "abcd, aef->bcdef"),
                          ([3, 0], "bcdef, begh->cdfgh"),
                          ([2, 0], "cdfgh, cgij->dfhij"),
                          ([1, 0], "dfhij, fhjk->dik")]
                ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                            a_oper_isite1, a_oper_isite1, first_R)
            else:
                # opt_einsum v3.2.1 is not bad, ~10% faster than the hand-design
                # contraction path for this complicated cases and consumes a little bit less memory
                # this is the only place in renormalizer we use opt_einsum now.
                # we keep it here just for a demo.
                # ax1 = oe.contract("abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm",
                #        first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1,
                #        a_oper_isite1, first_R, xstruct)
                if USE_GPU:
                    oe_backend = "cupy"
                else:
                    oe_backend = "numpy"
                ax1 = expr(xstruct, backend=oe_backend)   
                #print(oe.contract_path("abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm",
                #        first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1,
                #        a_oper_isite1, first_R, xstruct))

                #path_a = [([0, 1], "abcd, aefg->bcdefg"),
                #          ([5, 0], "bcdefg, behi->cdfghi"),
                #          ([4, 0], "cdfghi, ifjk->cdghjk"),
                #          ([3, 0], "cdghjk, chlm->dgjklm"),
                #          ([2, 0], "dgjklm, mjno->dgklno"),
                #          ([1, 0], "dgklno, gkop->dlnp")]
                #ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                #                           a_oper_isite2, a_oper_isite1,
                #                           a_oper_isite2, a_oper_isite1,
                #                           first_R)
            ax = ax1 + xstruct * self.eta**2
            cout = ax[qnmat == constrain_qn]
            return asnumpy(cout)
Ejemplo n.º 10
0
    def analysis_dominant_config(self, thresh=0.8, alias=None, tda_m_trunc=20,
            return_compressed_mps=False):
        r""" analyze the dominant configuration of each tda root.
            The algorithm is to compress the tda wavefunction to a rank-1 Hartree
            state and get the ci coefficient of the largest configuration.
            Then, the configuration is subtracted from the tda wavefunction and
            redo the first step to get the second largest configuration. The
            two steps continue until the thresh is achieved.
        
        Parameters
        ----------
        thresh: float, optional
            the threshold to stop the analysis procedure of each root. 
            :math:`\sum_i |c_i|^2 > thresh`. Default is 0.8.
        alias: dict, optional
            The alias of each site. For example, ``alias={0:"v_0", 1:"v_2",
            2:"v_1"}``. Default is `None`. 
        tda_m_trunc: int, optional
            the ``m`` to compress a tda wavefunction. Default is 20.
        return_compressed_mps: bool, optional
            If ``True``, return the tda excited state as a single compressed
            mps. Default is `False`.
        
        Returns
        -------
        configs: dict
            The dominant configration of each root.
            ``configs = {0:[(config0, config_name0, ci_coeff0),(config1,
            config_name1, ci_coeff1),...], 1:...}``
        compressed_mps: List[renormalizer.mps.Mps]
            see the description in ``return_compressed_mps``.
        
        Note
        ----
        The compressed_mps is an approximation of the tda wavefunction with
        ``m=tda_m_trunc``.
        """

        mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn
            
        if alias is not None:
            assert len(alias) == mps_l_cano.site_num
        
        compressed_mps = []
        for iroot in range(self.nroots):
            logger.info(f"iroot: {iroot}")
            tda_coeff = tda_coeff_list[iroot]
            mps_tangent_list = []
            weight = []
            for ims in range(mps_l_cano.site_num):
                if tangent_u[ims] is None:
                    assert tda_coeff[ims] is None
                    continue
                weight.append(np.sum(tda_coeff[ims]**2))
                mps_tangent = merge(mps_l_cano, mps_r_cano, ims+1) 
                mps_tangent[ims] = asnumpy(tensordot(tangent_u[ims],
                    tda_coeff[ims],[-1,0]))
                mps_tangent_list.append(mps_tangent)
            
            assert np.allclose(np.sum(weight), 1)
            # sort the mps_tangent from large weight to small weight
            mps_tangent_list = [mps_tangent_list[i] for i in np.argsort(weight,axis=None)[::-1]]

            coeff_square_sum = 0
            mps_delete = None
            
            config_visited = []
            while coeff_square_sum < thresh:
                if mps_delete is None:
                    # first compress it to M=tda_m_trunc
                    mps_rank1 = compressed_sum(mps_tangent_list, batchsize=5,
                            temp_m_trunc=tda_m_trunc)
                else:
                    mps_rank1 = compressed_sum([mps_delete] + mps_tangent_list,
                            batchsize=5, temp_m_trunc=tda_m_trunc)
                if coeff_square_sum == 0 and return_compressed_mps:
                    compressed_mps.append(mps_rank1.copy())       
                mps_rank1 = mps_rank1.canonicalise().compress(temp_m_trunc=1)
                
                # get config with the largest coeff
                config = []
                for ims, ms in enumerate(mps_rank1):
                    ms = ms.array.flatten()**2
                    quanta = int(np.argmax(ms))
                    config.append(quanta)
               
                # check if the config has been visited
                if config in config_visited:
                    break
                
                config_visited.append(config)

                ci_coeff_list = []
                for mps_tangent in mps_tangent_list:
                    sentinel = xp.ones((1,1))
                    for ims, ms in enumerate(mps_tangent):
                        sentinel = sentinel.dot(asxp(ms[:,config[ims],:]))
                    ci_coeff_list.append(float(sentinel[0,0]))
                ci_coeff = np.sum(ci_coeff_list)
                coeff_square_sum += ci_coeff**2
                
                if alias is not None:
                    config_name = [f"{quanta}"+f"{alias[isite]}" for isite, quanta
                            in enumerate(config) if quanta != 0]
                    config_name = " ".join(config_name)
                    self.configs[iroot].append((config, config_name, ci_coeff))
                    logger.info(f"config: {config}, {config_name}")
                else:
                    self.configs[iroot].append((config, ci_coeff))
                    logger.info(f"config: {config}")

                logger.info(f"ci_coeff: {ci_coeff}, weight:{ci_coeff**2}")

                condition = {dof:config[idof] for idof, dof in
                        enumerate(self.model.dofs)}
                mps_delete_increment = Mps.hartree_product_state(self.model, condition).scale(-ci_coeff)
                if mps_delete is None:
                    mps_delete = mps_delete_increment
                else:
                    mps_delete = mps_delete + mps_delete_increment

            logger.info(f"coeff_square_sum: {coeff_square_sum}")
        
        return self.configs, compressed_mps
Ejemplo n.º 11
0
    def optimize_cv(self, lr_group, direction, isite, num, percent=0.0):
        # depending on the spectratype, to restrict the exction
        first_LR = lr_group[0]
        second_LR = lr_group[1]
        if self.spectratype == "abs":
            constrain_qn = 1
        else:
            constrain_qn = 0
        # this function aims at solving the work equation of ZT CV-DMRG
        # L = <CV|op_a|CV>+2\eta<op_b|CV>, take a derivative to local CV
        # S-a-S-e-S                          S-a-S-d-S
        # |   d   |                          |   |   |
        # O-b-O-g-O  * CV[isite-1]  = -\eta  |   c   |
        # |   f   |                          |   |   |
        # S-c- -h-S                          S-b- -e-S

        # note to be a_mat * x = vec_b
        # the environment matrix

        if self.method == "1site":
            addlist = [isite - 1]
            first_L = asxp(first_LR[isite - 1])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 1])
            second_R = asxp(second_LR[isite])
        else:
            addlist = [isite - 2, isite - 1]
            first_L = asxp(first_LR[isite - 2])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 2])
            second_R = asxp(second_LR[isite])

        if direction == 'left':
            system = 'R'
        else:
            system = 'L'

        # this part just be similar with ground state calculation
        qnmat, qnbigl, qnbigr = svd_qn.construct_qnmat(
            self.cv_mps, self.mpo.pbond_list,
            addlist, self.method, system)
        xshape = qnmat.shape
        nonzeros = np.sum(qnmat == constrain_qn)

        if self.method == '1site':
            guess = self.cv_mps[isite - 1][qnmat == constrain_qn].reshape(nonzeros, 1)
            path_b = [([0, 1], "ab, acd->bcd"),
                      ([1, 0], "bcd, de->bce")]
            vec_b = multi_tensor_contract(
                path_b, second_L, self.b_oper[isite - 1], second_R
            )[qnmat == constrain_qn].reshape(nonzeros, 1)
        else:
            guess = tensordot(
                self.cv_mps[isite - 2], self.cv_mps[isite - 1], axes=(-1, 0)
            )
            guess = guess[qnmat == constrain_qn].reshape(nonzeros, 1)
            path_b = [([0, 1], "ab, acd->bcd"),
                      ([2, 0], "bcd, def->bcef"),
                      ([1, 0], "bcef, fg->bceg")]
            vec_b = multi_tensor_contract(
                path_b, second_L, self.b_oper[isite - 2],
                self.b_oper[isite - 1], second_R
            )[qnmat == constrain_qn].reshape(nonzeros, 1)

        if self.method == "2site":
            a_oper_isite2 = asxp(self.a_oper[isite - 2])
        else:
            a_oper_isite2 = None
        a_oper_isite1 = asxp(self.a_oper[isite - 1])

        # use the diagonal part of mat_a to construct the preconditinoner for linear solver
        if self.method == "1site":
            part_l = xp.einsum('abca->abc', first_L)
            part_r = xp.einsum('hfgh->hfg', first_R)
            path_pre = [([0, 1], "abc, bdef->acdef"),
                        ([1, 0], "acdef, hfg->acdehg")]
            pre_a_mat1 = multi_tensor_contract(path_pre, part_l, a_oper_isite1,
                                               part_r)
            path_pre2 = [([0, 1], "acdehg, ceig->adhi")]
            pre_a_mat1 = multi_tensor_contract(path_pre2, pre_a_mat1, a_oper_isite1)
            pre_a_mat1 = xp.einsum('adhd->adh', pre_a_mat1)[qnmat == constrain_qn]
            # pre_a_mat1 = xp.einsum('abca, bdef, cedg, hfgh->adh', first_L, a_oper_isite1,
            #                        a_oper_isite1, first_R)[qnmat == constrain_qn]
            cv_shape = self.cv_mps[isite - 1].shape
            pre_a_mat2 = xp.ones(cv_shape)[qnmat == constrain_qn]
            pre_a_mat = pre_a_mat1 + pre_a_mat2 * self.eta**2
        else:
            pre_a_mat1 = xp.einsum(
                'abca, bdef, cedg, fhij, gihk, ljkl->adhl', first_L, a_oper_isite2, a_oper_isite2,
                a_oper_isite1, a_oper_isite1, first_R)[qnmat == constrain_qn]
            cv_shape1 = self.cv_mps[isite - 2].shape
            cv_shape2 = self.cv_mps[isite - 1].shape
            new_shape = [cv_shape1[0], cv_shape1[1], cv_shape2[1], cv_shape2[2]]
            pre_a_mat2 = xp.ones(new_shape)[qnmat == constrain_qn]
            pre_a_mat = pre_a_mat1 + pre_a_mat2 * self.eta**2

        pre_a_mat = np.diag(1./asnumpy(pre_a_mat))

        count = 0

        def hop(c):
            nonlocal count
            count += 1
            xstruct = asxp(svd_qn.cvec2cmat(xshape, c, qnmat, constrain_qn))
            if self.method == "1site":
                path_a = [([0, 1], "abcd, aef->bcdef"),
                          ([3, 0], "bcdef, begh->cdfgh"),
                          ([2, 0], "cdfgh, cgij->dfhij"),
                          ([1, 0], "dfhij, fhjk->dik")]
                ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                           a_oper_isite1, a_oper_isite1, first_R)
                ax2 = xstruct
                ax = ax1 + ax2 * self.eta**2
            else:
                path_a = [([0, 1], "abcd, aefg->bcdefg"),
                          ([5, 0], "bcdefg, behi->cdfghi"),
                          ([4, 0], "cdfghi, ifjk->cdghjk"),
                          ([3, 0], "cdghjk, chlm->dgjklm"),
                          ([2, 0], "dgjklm, mjno->dgklno"),
                          ([1, 0], "dgklno, gkop->dlnp")]
                ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                           a_oper_isite2, a_oper_isite1,
                                           a_oper_isite2, a_oper_isite1,
                                           first_R)
                ax2 = xstruct
                ax = ax1 + ax2 * self.eta**2
            cout = ax[qnmat == constrain_qn].reshape(nonzeros, 1)
            return asnumpy(cout)

        mat_a = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), matvec=hop)
        # for the first two sweep, not use the previous matrix as initial guess
        # at the inital stage, they are far from from the optimized one
        if num in [1, 2]:
            x, info = scipy.sparse.linalg.cg(mat_a, asnumpy(vec_b), atol=0)
        else:
            x, info = scipy.sparse.linalg.cg(mat_a, asnumpy(vec_b), tol=1.e-5,
                                             x0=guess, M=pre_a_mat, atol=0)
        # logger.info(f'hop times:{count}')
        self.hop_time.append(count)
        if info != 0:
            logger.info(f"iteration solver not converged")

        # the value of the functional L
        l_value = np.inner(hop(x).reshape(1, nonzeros), x.reshape(1, nonzeros)
                     ) - 2 * np.inner(
                         asnumpy(vec_b).reshape(1, nonzeros), x.reshape(1, nonzeros))
        xstruct = svd_qn.cvec2cmat(xshape, x, qnmat, constrain_qn)
        x, xdim, xqn, compx = \
            solver.renormalization_svd(xstruct, qnbigl, qnbigr, system,
                                       constrain_qn, self.m_max, percent)
        if self.method == "1site":
            self.cv_mps[isite - 1] = x
            if direction == "left":
                if isite != 1:
                    self.cv_mps[isite - 2] = tensordot(
                        self.cv_mps[isite - 2], compx, axes=(-1, 0))
                    self.cv_mps.qn[isite - 1] = xqn
                else:
                    self.cv_mps[isite - 1] = tensordot(
                        compx, self.cv_mps[isite - 1], axes=(-1, 0))
                    self.cv_mps.qn[isite - 1] = [0]
            elif direction == "right":
                if isite != len(self.cv_mps):
                    self.cv_mps[isite] = tensordot(
                        compx, self.cv_mps[isite], axes=(-1, 0))
                    self.cv_mps.qn[isite] = xqn
                else:
                    self.cv_mps[isite - 1] = tensordot(
                        self.cv_mps[isite - 1], compx, axes=(-1, 0))
                    self.cv_mps.qn[isite] = [0]
        else:
            if direction == "left":
                self.cv_mps[isite - 1] = x
                self.cv_mps[isite - 2] = compx
            else:
                self.cv_mps[isite - 2] = x
                self.cv_mps[isite - 1] = compx
            self.cv_mps.qn[isite - 1] = xqn
        return l_value[0][0]
Ejemplo n.º 12
0
    def optimize_cv(self, lr_group, direction, isite, num, percent=0):
        if self.spectratype == "abs":
            # quantum number restriction, |1><0|
            up_exciton, down_exciton = 1, 0
        elif self.spectratype == "emi":
            # quantum number restriction, |0><1|
            up_exciton, down_exciton = 0, 1
        nexciton = 1
        first_LR, second_LR, third_LR, forth_LR = lr_group

        if self.method == "1site":
            add_list = [isite - 1]
            first_L = asxp(first_LR[isite - 1])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 1])
            second_R = asxp(second_LR[isite])
            third_L = asxp(third_LR[isite - 1])
            third_R = asxp(third_LR[isite])
            forth_L = asxp(forth_LR[isite - 1])
            forth_R = asxp(forth_LR[isite])
        else:
            add_list = [isite - 2, isite - 1]
            first_L = asxp(first_LR[isite - 2])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 2])
            second_R = asxp(second_LR[isite])
            third_L = asxp(third_LR[isite - 2])
            third_R = asxp(third_LR[isite])
            forth_L = asxp(forth_LR[isite - 2])
            forth_R = asxp(forth_LR[isite])

        xqnmat, xqnbigl, xqnbigr, xshape = \
            self.construct_X_qnmat(add_list, direction)
        dag_qnmat, dag_qnbigl, dag_qnbigr = self.swap(xqnmat, xqnbigl, xqnbigr,
                                                      direction)

        nonzeros = np.sum(self.condition(dag_qnmat,
                                         [down_exciton, up_exciton]))

        if self.method == "1site":
            guess = moveaxis(self.cv_mpo[isite - 1], (1, 2), (2, 1))
        else:
            guess = tensordot(moveaxis(self.cv_mpo[isite - 2], (1, 2), (2, 1)),
                              moveaxis(self.cv_mpo[isite - 1]),
                              axes=(-1, 0))
        guess = guess[self.condition(dag_qnmat,
                                     [down_exciton, up_exciton])].reshape(
                                         nonzeros, 1)

        if self.method == "1site":
            # define dot path
            path_1 = [([0, 1], "abc, adef -> bcdef"),
                      ([2, 0], "bcdef, begh -> cdfgh"),
                      ([1, 0], "cdfgh, fhi -> cdgi")]
            path_2 = [([0, 1], "abcd, aefg -> bcdefg"),
                      ([3, 0], "bcdefg, bfhi -> cdeghi"),
                      ([2, 0], "cdeghi, djek -> cghijk"),
                      ([1, 0], "cghijk, gilk -> chjl")]
            path_4 = [([0, 1], "ab, acde -> bcde"), ([1,
                                                      0], "bcde, ef -> bcdf")]

            vecb = multi_tensor_contract(
                path_4, forth_L,
                moveaxis(self.a_ket_mpo[isite - 1], (1, 2), (2, 1)), forth_R)
            vecb = -self.eta * vecb

        a_oper_isite = asxp(self.a_oper[isite - 1])
        b_oper_isite = asxp(self.b_oper[isite - 1])
        h_mpo_isite = asxp(self.h_mpo[isite - 1])
        # construct preconditioner
        Idt = xp.identity(h_mpo_isite.shape[1])
        M1_1 = xp.einsum('aea->ae', first_L)
        M1_2 = xp.einsum('eccf->ecf', a_oper_isite)
        M1_3 = xp.einsum('dfd->df', first_R)
        M1_4 = xp.einsum('bb->b', Idt)
        path_m1 = [([0, 1], "ae,b->aeb"), ([2, 0], "aeb,ecf->abcf"),
                   ([1, 0], "abcf, df->abcd")]
        pre_M1 = multi_tensor_contract(path_m1, M1_1, M1_4, M1_2, M1_3)
        pre_M1 = pre_M1[self.condition(dag_qnmat, [down_exciton, up_exciton])]

        M2_1 = xp.einsum('aeag->aeg', second_L)
        M2_2 = xp.einsum('eccf->ecf', b_oper_isite)
        M2_3 = xp.einsum('gbbh->gbh', h_mpo_isite)
        M2_4 = xp.einsum('dfdh->dfh', second_R)
        path_m2 = [([0, 1], "aeg,gbh->aebh"), ([2, 0], "aebh,ecf->abchf"),
                   ([1, 0], "abhcf,dfh->abcd")]
        pre_M2 = multi_tensor_contract(path_m2, M2_1, M2_3, M2_2, M2_4)
        pre_M2 = pre_M2[self.condition(dag_qnmat, [down_exciton, up_exciton])]

        M4_1 = xp.einsum('faah->fah', third_L)
        M4_4 = xp.einsum('gddi->gdi', third_R)
        M4_5 = xp.einsum('cc->c', Idt)
        M4_path = [([0, 1], "fah,febg->ahebg"), ([2, 0], "ahebg,hjei->abgji"),
                   ([1, 0], "abgji,gdi->abjd")]
        pre_M4 = multi_tensor_contract(M4_path, M4_1, h_mpo_isite, h_mpo_isite,
                                       M4_4)
        pre_M4 = xp.einsum('abbd->abd', pre_M4)
        pre_M4 = xp.tensordot(pre_M4, M4_5, axes=0)
        pre_M4 = xp.moveaxis(pre_M4, [2, 3], [3, 2])[self.condition(
            dag_qnmat, [down_exciton, up_exciton])]

        pre_M = (pre_M1 + 2 * pre_M2 + pre_M4)

        indices = np.array(range(nonzeros))
        indptr = np.array(range(nonzeros + 1))
        pre_M = scipy.sparse.csc_matrix((asnumpy(pre_M), indices, indptr),
                                        shape=(nonzeros, nonzeros))

        M_x = lambda x: scipy.sparse.linalg.spsolve(pre_M, x)
        M = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), M_x)

        count = 0

        def hop(x):
            nonlocal count
            count += 1
            dag_struct = asxp(self.dag2mat(xshape, x, dag_qnmat, direction))
            if self.method == "1site":

                M1 = multi_tensor_contract(path_1, first_L, dag_struct,
                                           a_oper_isite, first_R)
                M2 = multi_tensor_contract(path_2, second_L, dag_struct,
                                           b_oper_isite, h_mpo_isite, second_R)
                M2 = xp.moveaxis(M2, (1, 2), (2, 1))
                M3 = multi_tensor_contract(path_2, third_L, h_mpo_isite,
                                           dag_struct, h_mpo_isite, third_R)
                M3 = xp.moveaxis(M3, (1, 2), (2, 1))
                cout = M1 + 2 * M2 + M3
            cout = cout[self.condition(dag_qnmat,
                                       [down_exciton, up_exciton])].reshape(
                                           nonzeros, 1)
            return asnumpy(cout)

        # Matrix A and Vector b
        vecb = asnumpy(vecb)[self.condition(
            dag_qnmat, [down_exciton, up_exciton])].reshape(nonzeros, 1)
        mata = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros),
                                                  matvec=hop)

        # conjugate gradient method
        # x, info = scipy.sparse.linalg.cg(MatA, VecB, atol=0)
        if num == 1:
            x, info = scipy.sparse.linalg.cg(mata,
                                             vecb,
                                             tol=1.e-5,
                                             maxiter=500,
                                             M=M,
                                             atol=0)
        else:
            x, info = scipy.sparse.linalg.cg(mata,
                                             vecb,
                                             tol=1.e-5,
                                             x0=guess,
                                             maxiter=500,
                                             M=M,
                                             atol=0)
        # logger.info(f"linear eq dim: {nonzeros}")
        # logger.info(f'times for hop:{count}')
        self.hop_time.append(count)
        if info != 0:
            logger.warning(
                f"cg not converged, vecb.norm:{np.linalg.norm(vecb)}")
        l_value = np.inner(
            hop(x).reshape(1, nonzeros), x.reshape(1, nonzeros)) - \
            2 * np.inner(vecb.reshape(1, nonzeros), x.reshape(1, nonzeros))

        x = self.dag2mat(xshape, x, dag_qnmat, direction)
        if self.method == "1site":
            x = np.moveaxis(x, [1, 2], [2, 1])
        x, xdim, xqn, compx = self.x_svd(x,
                                         xqnbigl,
                                         xqnbigr,
                                         nexciton,
                                         direction,
                                         percent=percent)

        if self.method == "1site":
            self.cv_mpo[isite - 1] = x
            if direction == "left":
                if isite != 1:
                    self.cv_mpo[isite - 2] = \
                        tensordot(self.cv_mpo[isite - 2], compx, axes=(-1, 0))
                    self.cv_mpo.qn[isite - 1] = xqn
                else:
                    self.cv_mpo[isite - 1] = \
                        tensordot(compx, self.cv_mpo[isite - 1], axes=(-1, 0))
            elif direction == "right":
                if isite != len(self.cv_mpo):
                    self.cv_mpo[isite] = \
                        tensordot(compx, self.cv_mpo[isite], axes=(-1, 0))
                    self.cv_mpo.qn[isite] = xqn
                else:
                    self.cv_mpo[isite - 1] = \
                        tensordot(self.cv_mpo[isite - 1], compx, axes=(-1, 0))

        else:
            if direction == "left":
                self.cv_mpo[isite - 2] = compx
                self.cv_mpo[isite - 1] = x
            else:
                self.cv_mpo[isite - 2] = x
                self.cv_mpo[isite - 1] = compx
            self.cv_mpo.qn[isite - 1] = xqn

        return l_value[0][0]
Ejemplo n.º 13
0
    def optimize_cv(self, lr_group, isite, percent=0.0):
        # depending on the spectratype, to restrict the exction
        first_LR = lr_group[0]
        second_LR = lr_group[1]
        constrain_qn = self.cv_mps.qntot
        # this function aims at solving the work equation of ZT CV-DMRG
        # L = <CV|op_a|CV>+2\eta<op_b|CV>, take a derivative to local CV
        # S-a-S-e-S                          S-a-S-d-S
        # |   d   |                          |   |   |
        # O-b-O-g-O  * CV[isite-1]  = -\eta  |   c   |
        # |   f   |                          |   |   |
        # S-c- -h-S                          S-b- -e-S

        # note to be a_mat * x = vec_b
        # the environment matrix

        if self.method == "1site":
            cidx = [isite - 1]
            first_L = asxp(first_LR[isite - 1])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 1])
            second_R = asxp(second_LR[isite])
        else:
            cidx = [isite - 2, isite - 1]
            first_L = asxp(first_LR[isite - 2])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 2])
            second_R = asxp(second_LR[isite])

        # this part just be similar with ground state calculation
        qnbigl, qnbigr, qnmat = self.cv_mps._get_big_qn(cidx)
        xshape = qnmat.shape
        nonzeros = int(np.sum(qnmat == constrain_qn))
        if self.method == '1site':
            guess = self.cv_mps[isite - 1][qnmat == constrain_qn]
            path_b = [([0, 1], "ab, acd->bcd"),
                      ([1, 0], "bcd, de->bce")]
            vec_b = multi_tensor_contract(
                path_b, second_L, self.b_mps[isite - 1], second_R
            )[qnmat == constrain_qn]
        else:
            guess = tensordot(
                self.cv_mps[isite - 2], self.cv_mps[isite - 1], axes=(-1, 0)
            )[qnmat == constrain_qn]
            path_b = [([0, 1], "ab, acd->bcd"),
                      ([2, 0], "bcd, def->bcef"),
                      ([1, 0], "bcef, fg->bceg")]
            vec_b = multi_tensor_contract(
                path_b, second_L, self.b_mps[isite - 2],
                self.b_mps[isite - 1], second_R
            )[qnmat == constrain_qn]

        if self.method == "2site":
            a_oper_isite2 = asxp(self.a_oper[isite - 2])
        else:
            a_oper_isite2 = None
        a_oper_isite1 = asxp(self.a_oper[isite - 1])

        # use the diagonal part of mat_a to construct the preconditinoner
        # for linear solver
        part_l = xp.einsum('abca->abc', first_L)
        part_r = xp.einsum('hfgh->hfg', first_R)
        if self.method == "1site":
            #  S-a   d    h-S
            #  O-b  -O-   f-O
            #  |     e      |
            #  O-c  -O-   g-O
            #  S-a   i    h-S
            path_pre = [([0, 1], "abc, bdef -> acdef"),
                        ([1, 0], "acdef, ceig -> adfig")]
            a_diag = multi_tensor_contract(path_pre, part_l, a_oper_isite1,
                                           a_oper_isite1)
            a_diag = xp.einsum("adfdg -> adfg", a_diag)
            a_diag = xp.tensordot(a_diag, part_r,
                                  axes=([2, 3], [1, 2]))[qnmat == constrain_qn]
        else:
            #  S-a   d     k   h-S
            #  O-b  -O- j -O-  f-O
            #  |     e     l   |
            #  O-c  -O- m -O-  g-O
            #  S-a   i     n   h-S
            # first left half, second right half, last contraction

            path_pre = [([0, 1], "abc, bdej -> acdej"),
                        ([1, 0], "acdej, ceim -> adjim")]
            a_diagl = multi_tensor_contract(path_pre, part_l, a_oper_isite2,
                                            a_oper_isite2)
            a_diagl = xp.einsum("adjdm -> adjm", a_diagl)

            path_pre = [([0, 1], "hfg, jklf -> hgjkl"),
                        ([1, 0], "hgjkl, mlng -> hjkmn")]
            a_diagr = multi_tensor_contract(path_pre, part_r, a_oper_isite1,
                                            a_oper_isite1)
            a_diagr = xp.einsum("hjkmk -> khjm", a_diagr)

            a_diag = xp.tensordot(
                a_diagl, a_diagr, axes=([2, 3], [2, 3]))[qnmat == constrain_qn]

        a_diag = asnumpy(a_diag + xp.ones(nonzeros) * self.eta**2)
        M_x = lambda x: x / a_diag
        pre_M = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), M_x)

        count = 0

        # cache oe path
        if self.method == "2site":
            expr = oe.contract_expression(
                "abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm",
                first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1,
                a_oper_isite1, first_R, xshape,
                constants=[0, 1, 2, 3, 4, 5])

        def hop(c):
            nonlocal count
            count += 1
            xstruct = asxp(cvec2cmat(xshape, c, qnmat, constrain_qn))
            if self.method == "1site":
                path_a = [([0, 1], "abcd, aef->bcdef"),
                          ([3, 0], "bcdef, begh->cdfgh"),
                          ([2, 0], "cdfgh, cgij->dfhij"),
                          ([1, 0], "dfhij, fhjk->dik")]
                ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                                            a_oper_isite1, a_oper_isite1, first_R)
            else:
                # opt_einsum v3.2.1 is not bad, ~10% faster than the hand-design
                # contraction path for this complicated cases and consumes a little bit less memory
                # this is the only place in renormalizer we use opt_einsum now.
                # we keep it here just for a demo.
                # ax1 = oe.contract("abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm",
                #        first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1,
                #        a_oper_isite1, first_R, xstruct)
                if USE_GPU:
                    oe_backend = "cupy"
                else:
                    oe_backend = "numpy"
                ax1 = expr(xstruct, backend=oe_backend)   
                #print(oe.contract_path("abcd, befh, cfgi, hjkn, iklo, mnop, dglp -> aejm",
                #        first_L, a_oper_isite2, a_oper_isite2, a_oper_isite1,
                #        a_oper_isite1, first_R, xstruct))

                #path_a = [([0, 1], "abcd, aefg->bcdefg"),
                #          ([5, 0], "bcdefg, behi->cdfghi"),
                #          ([4, 0], "cdfghi, ifjk->cdghjk"),
                #          ([3, 0], "cdghjk, chlm->dgjklm"),
                #          ([2, 0], "dgjklm, mjno->dgklno"),
                #          ([1, 0], "dgklno, gkop->dlnp")]
                #ax1 = multi_tensor_contract(path_a, first_L, xstruct,
                #                           a_oper_isite2, a_oper_isite1,
                #                           a_oper_isite2, a_oper_isite1,
                #                           first_R)
            ax = ax1 + xstruct * self.eta**2
            cout = ax[qnmat == constrain_qn]
            return asnumpy(cout)

        mat_a = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros),
                                                   matvec=hop)

        x, info = scipy.sparse.linalg.cg(mat_a, asnumpy(vec_b), tol=1.e-5,
                                         x0=asnumpy(guess),
                                         M=pre_M, atol=0)

        self.hop_time.append(count)
        if info != 0:
            logger.info(f"iteration solver not converged")
        # the value of the functional L
        l_value = xp.dot(asxp(hop(x)), asxp(x)) - 2 * xp.dot(vec_b, asxp(x))
        xstruct = cvec2cmat(xshape, x, qnmat, constrain_qn)
        self.cv_mps._update_mps(xstruct, cidx, qnbigl, qnbigr, self.m_max, percent)
        
        return float(l_value)
Ejemplo n.º 14
0
    def kernel(self, restart=False, include_psi0=False):
        r"""calculate the roots

        Parameters
        ----------
        restart: bool, optional
            if restart from the former converged root. Default is ``False``.
            If ``restart = True``, ``include_psi0`` must be the same as the
            former calculation.
        include_psi0: bool, optional
            if the basis of Hamiltonian includes the ground state
                :math:`\Psi_0`. Default is ``False``.

        Returns
        -------
        e: np.ndarray
            the energy of the states, if ``include_psi0 = True``, the first
            element is the ground state energy, otherwise, it is the energy of
            the first excited state.

        """
        # right canonical mps
        mpo = self.hmpo
        nroots = self.nroots
        algo = self.algo
        site_num = mpo.site_num

        if not restart:
            # make sure that M is not redundant near the edge
            mps = self.mps.ensure_right_canon().canonicalise().normalize().canonicalise()
            logger.debug(f"reference mps shape, {mps}")
            mps_r_cano = mps.copy()
            assert mps.to_right 
            
            tangent_u = []
    
            for ims, ms in enumerate(mps):
                
                shape = list(ms.shape)
                u, s, vt = scipy.linalg.svd(ms.l_combine(), full_matrices=True)
                rank = len(s)
                if include_psi0 and ims == site_num-1: 
                    tangent_u.append(u.reshape(shape[:-1]+[-1]))
                else:
                    if rank < u.shape[1]:
                        tangent_u.append(u[:,rank:].reshape(shape[:-1]+[-1]))
                    else:
                        tangent_u.append(None)  # the tangent space is None

                mps[ims] = u[:,:rank].reshape(shape[:-1]+[-1])
                
                vt = xp.einsum("i, ij -> ij", asxp(s), asxp(vt))
                if ims == site_num-1:
                    assert vt.size == 1 and xp.allclose(vt, 1)
                else:
                    mps[ims+1] = asnumpy(tensordot(vt, mps[ims+1], ([-1],[0])))
                
            mps_l_cano = mps.copy() 
            mps_l_cano.to_right = False
            mps_l_cano.qnidx = site_num-1

        else:
            mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list = self.wfn
            cguess = []
            for iroot in range(len(tda_coeff_list)):
                tda_coeff = tda_coeff_list[iroot]
                x = [c.flatten() for c in tda_coeff if c is not None]
                x = np.concatenate(x,axis=None)
                cguess.append(x)
            cguess = np.stack(cguess, axis=1)

        xshape = [] 
        xsize = 0
        for ims in range(site_num):
            if tangent_u[ims] is None:
                xshape.append((0,0))
            else:
                if ims == site_num-1:
                    xshape.append((tangent_u[ims].shape[-1], 1))
                else:    
                    xshape.append((tangent_u[ims].shape[-1], mps_r_cano[ims+1].shape[0]))
                xsize += np.prod(xshape[-1])
        
        logger.debug(f"DMRG-TDA H dimension: {xsize}")
        
        if USE_GPU:
            oe_backend = "cupy"
        else:
            oe_backend = "numpy"
        
        mps_tangent = mps_r_cano.copy()
        environ = Environ(mps_tangent, mpo, "R")
        hdiag = []
        for ims in range(site_num):
            ltensor = environ.GetLR(
                "L", ims-1, mps_tangent, mpo, itensor=None,
                method="System"
            )
            rtensor = environ.GetLR(
                "R", ims+1, mps_tangent, mpo, itensor=None,
                method="Enviro"
            )
            if tangent_u[ims] is not None:
                u = asxp(tangent_u[ims])
                tmp = oe.contract("abc, ded, bghe, agl, chl -> ld", ltensor, rtensor,
                        asxp(mpo[ims]), u, u, backend=oe_backend)   
                hdiag.append(asnumpy(tmp))
            mps_tangent[ims] = mps_l_cano[ims]
        hdiag = np.concatenate(hdiag, axis=None)
    
        count = 0
        
        # recover the vector-like x back to the ndarray tda_coeff
        def reshape_x(x):
            tda_coeff = []
            offset = 0
            for shape in xshape:
                if shape == (0,0):
                    tda_coeff.append(None)
                else:
                    size = np.prod(shape)
                    tda_coeff.append(x[offset:size+offset].reshape(shape))
                    offset += size
            
            assert offset == xsize
            return tda_coeff
            
        def hop(x):
            # H*X
            nonlocal count
            count += 1
            
            assert len(x) == xsize
            tda_coeff = reshape_x(x)
    
            res = [np.zeros_like(coeff) if coeff is not None else None for coeff in tda_coeff]
            
            # fix ket and sweep bra and accumulate into res
            for ims in range(site_num):
                if tda_coeff[ims] is None:
                    assert tangent_u[ims] is None
                    continue
                
                # mix-canonical mps
                mps_tangent = merge(mps_l_cano, mps_r_cano, ims+1)
                mps_tangent[ims] = tensordot(tangent_u[ims], tda_coeff[ims], (-1, 0))
                
                mps_tangent_conj = mps_r_cano.copy()
                environ = Environ(mps_tangent, mpo, "R", mps_conj=mps_tangent_conj)
                
                for ims_conj in range(site_num):
                    ltensor = environ.GetLR(
                        "L", ims_conj-1, mps_tangent, mpo, itensor=None,
                        mps_conj=mps_tangent_conj,
                        method="System"
                    )
                    rtensor = environ.GetLR(
                        "R", ims_conj+1, mps_tangent, mpo, itensor=None,
                        mps_conj=mps_tangent_conj,
                        method="Enviro"
                    )
                    if tda_coeff[ims_conj] is not None:
                        # S-a   l-S
                        #     d
                        # O-b-O-f-O
                        #     e
                        # S-c   k-S
    
                        path = [
                            ([0, 1], "abc, cek -> abek"),
                            ([2, 0], "abek, bdef -> akdf"),
                            ([1, 0], "akdf, lfk -> adl"),
                        ]
                        out = multi_tensor_contract(
                            path, ltensor, asxp(mps_tangent[ims_conj]),
                            asxp(mpo[ims_conj]), rtensor
                        )
                        res[ims_conj] += asnumpy(tensordot(tangent_u[ims_conj], out,
                            ([0,1], [0,1])))
                    
                    # mps_conj combine 
                    mps_tangent_conj[ims_conj] = mps_l_cano[ims_conj]    
            
            res = [mat for mat in res if mat is not None]
    
            return np.concatenate(res, axis=None)
        
        if algo == "davidson":
            if restart:
                cguess = [cguess[:,i] for i in range(cguess.shape[1])]
            else:
                cguess = [np.random.random(xsize) - 0.5]
            precond = lambda x, e, *args: x / (hdiag - e + 1e-4)
            
            e, c = davidson(
                hop, cguess, precond, max_cycle=100,
                nroots=nroots, max_memory=64000
            )
            if nroots == 1:
                c = [c]
            c = np.stack(c, axis=1)

        elif algo == "primme":
            if not restart:
                cguess = None

            def multi_hop(x):
                if x.ndim == 1:
                    return hop(x)
                elif x.ndim == 2:
                    return np.stack([hop(x[:,i]) for i in range(x.shape[1])],axis=1)
                else:
                    assert False
    
            def precond(x): 
                if x.ndim == 1:
                    return np.einsum("i, i -> i", 1/(hdiag+1e-4), x)
                elif x.ndim ==2:
                    return np.einsum("i, ij -> ij", 1/(hdiag+1e-4), x)
                else:
                    assert False
            A = scipy.sparse.linalg.LinearOperator((xsize,xsize),
                    matvec=multi_hop, matmat=multi_hop)
            M = scipy.sparse.linalg.LinearOperator((xsize,xsize),
                    matvec=precond, matmat=precond)
            e, c = primme.eigsh(A, k=min(nroots,xsize), which="SA", 
                    v0=cguess,
                    OPinv=M,
                    method="PRIMME_DYNAMIC", 
                    tol=1e-6)
        else:
            assert False

        logger.debug(f"H*C times: {count}")
        
        tda_coeff_list = []
        for iroot in range(nroots):
            tda_coeff_list.append(reshape_x(c[:,iroot])) 
        
        self.e = np.array(e)
        self.wfn = [mps_l_cano, mps_r_cano, tangent_u, tda_coeff_list]
        
        return self.e
Ejemplo n.º 15
0
 def read(self, domain: str, siteidx: int):
     return asxp(self._virtual_disk[(domain, siteidx)])
Ejemplo n.º 16
0
    def variational_compress(self, mpo=None, guess=None):
        r"""Variational compress an mps/mpdm/mpo
        
        Parameters
        ----------
        mpo : renormalizer.mps.Mpo, optional 
            Default is ``None``. if mpo is not ``None``, the returned mps is
            an approximation of ``mpo @ self``
        guess : renormalizer.mps.MatrixProduct, optional
            Initial guess of compressed mps/mpdm/mpo. Default is ``None``. 
        
        Note
        ----
        the variational compress related configurations is defined in
        ``self`` if ``guess=None``, otherwise is defined in ``guess``

        Returns
        -------
        mp : renormalizer.mps.MatrixProduct
            a new compressed mps/mpdm/mpo, ``self`` is not overwritten.
            ``guess`` is overwritten.
        
        """

        if mpo is None:
            logger.info(
                "Recommend to use svd to compress a single mps/mpo/mpdm.")
            raise NotImplementedError

        if guess is None:
            # a minimal representation of self and mpo
            compressed_mpo = mpo.copy().canonicalise().compress(
                temp_m_trunc=self.compress_config.vguess_m[0])
            compressed_mps = self.copy().canonicalise().compress(
                temp_m_trunc=self.compress_config.vguess_m[1])
            # the attributes of guess would be the same as self
            guess = compressed_mpo.apply(compressed_mps)
        mps = guess
        mps.ensure_left_canon()
        logger.info(f"initial guess bond dims: {mps.bond_dims}")

        procedure = mps.compress_config.vprocedure
        method = mps.compress_config.vmethod

        environ = Environ(self, mpo, "L", mps_conj=mps.conj())

        converged = False
        for isweep, (mmax, percent) in enumerate(procedure):
            logger.debug(f"isweep: {isweep}")
            logger.debug(f"mmax, percent: {mmax}, {percent}")
            logger.debug(f"mps bond dims: {mps.bond_dims}")

            for imps in mps.iter_idx_list(full=True):
                if method == "2site" and \
                    ((mps.to_right and imps == mps.site_num-1)
                    or ((not mps.to_right) and imps == 0)):
                    break

                if mps.to_right:
                    lmethod, rmethod = "System", "Enviro"
                else:
                    lmethod, rmethod = "Enviro", "System"

                if method == "1site":
                    lidx = imps - 1
                    cidx = [imps]
                    ridx = imps + 1
                elif method == "2site":
                    if mps.to_right:
                        lidx = imps - 1
                        cidx = [imps, imps + 1]
                        ridx = imps + 2
                    else:
                        lidx = imps - 2
                        cidx = [imps - 1, imps]  # center site
                        ridx = imps + 1
                else:
                    assert False
                logger.debug(f"optimize site: {cidx}")

                ltensor = environ.GetLR("L",
                                        lidx,
                                        self,
                                        mpo,
                                        itensor=None,
                                        method=lmethod,
                                        mps_conj=mps.conj())
                rtensor = environ.GetLR("R",
                                        ridx,
                                        self,
                                        mpo,
                                        itensor=None,
                                        method=rmethod,
                                        mps_conj=mps.conj())

                # get the quantum number pattern
                qnbigl, qnbigr, qnmat = mps._get_big_qn(cidx)

                # center mo
                cmo = [asxp(mpo[idx]) for idx in cidx]
                cms = [asxp(self[idx]) for idx in cidx]
                if method == "1site":
                    if cms[0].ndim == 3:
                        # S-a   l-S
                        #     d
                        # O-b-O-f-O
                        #     e
                        # S-c   k-S

                        path = [
                            ([0, 1], "abc, cek -> abek"),
                            ([2, 0], "abek, bdef -> akdf"),
                            ([1, 0], "akdf, lfk -> adl"),
                        ]
                    elif cms[0].ndim == 4:
                        # S-a   l-S
                        #     d
                        # O-b-O-f-O
                        #     e
                        # S-c   k-S
                        #     g
                        path = [
                            ([0, 2], "abc, bdef -> acdef"),
                            ([2, 0], "acdef, cegk -> adfgk"),
                            ([1, 0], "adfgk, lfk -> adgl"),
                        ]
                    cout = multi_tensor_contract(path, ltensor, cms[0], cmo[0],
                                                 rtensor)
                else:
                    if USE_GPU:
                        oe_backend = "cupy"
                    else:
                        oe_backend = "numpy"
                    if cms[0].ndim == 3:
                        # S-a       l-S
                        #     d   g
                        # O-b-O-f-O-j-O
                        #     e   h
                        # S-c   m   k-S

                        cout = oe.contract(
                            "abc, bdef, fghj, ljk, cem, mhk -> adgl",
                            ltensor,
                            cmo[0],
                            cmo[1],
                            rtensor,
                            cms[0],
                            cms[1],
                            backend=oe_backend)
                    elif cms[0].ndim == 4:
                        # S-a       l-S
                        #     d   g
                        # O-b-O-f-O-j-O
                        #     e   h
                        # S-c   m   k-S
                        #     n   p
                        cout = oe.contract(
                            "abc, bdef, fghj, ljk, cenm, mhpk -> adngpl",
                            ltensor,
                            cmo[0],
                            cmo[1],
                            rtensor,
                            cms[0],
                            cms[1],
                            backend=oe_backend)
                # clean up the elements which do not meet the qn requirements
                cout[qnmat != mps.qntot] = 0
                mps._update_mps(cout, cidx, qnbigl, qnbigr, mmax, percent)

            mps._switch_direction()

            # check if convergence
            if isweep > 0 and percent == 0 and \
                    mps.distance(mps_old) / np.sqrt(mps.dot(mps.conj()).real) < mps.compress_config.vrtol:
                converged = True
                break

            mps_old = mps.copy()

        if converged:
            logger.info("Variational compress is converged!")
        else:
            logger.warning(
                "Variational compress is not converged! Please increase the procedure!"
            )

        # remove the redundant bond dimension near the boundary of the MPS
        mps.canonicalise()
        logger.info(f"{mps}")

        return mps
Ejemplo n.º 17
0
    def optimize_cv(self, lr_group, isite, percent=0):
        if self.spectratype == "abs":
            # quantum number restriction, |1><0|
            up_exciton, down_exciton = 1, 0
        elif self.spectratype == "emi":
            # quantum number restriction, |0><1|
            up_exciton, down_exciton = 0, 1
        nexciton = 1
        first_LR, second_LR, third_LR, forth_LR = lr_group

        if self.method == "1site":
            add_list = [isite - 1]
            first_L = asxp(first_LR[isite - 1])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 1])
            second_R = asxp(second_LR[isite])
            third_L = asxp(third_LR[isite - 1])
            third_R = asxp(third_LR[isite])
            forth_L = asxp(forth_LR[isite - 1])
            forth_R = asxp(forth_LR[isite])
        else:
            add_list = [isite - 2, isite - 1]
            first_L = asxp(first_LR[isite - 2])
            first_R = asxp(first_LR[isite])
            second_L = asxp(second_LR[isite - 2])
            second_R = asxp(second_LR[isite])
            third_L = asxp(third_LR[isite - 2])
            third_R = asxp(third_LR[isite])
            forth_L = asxp(forth_LR[isite - 2])
            forth_R = asxp(forth_LR[isite])

        xqnmat, xqnbigl, xqnbigr, xshape = \
            self.construct_X_qnmat(add_list)
        dag_qnmat, dag_qnbigl, dag_qnbigr = self.swap(xqnmat, xqnbigl, xqnbigr)
        nonzeros = int(
            np.sum(self.condition(dag_qnmat, [down_exciton, up_exciton])))

        if self.method == "1site":
            guess = moveaxis(self.cv_mpo[isite - 1], (1, 2), (2, 1))
        else:
            guess = tensordot(moveaxis(self.cv_mpo[isite - 2], (1, 2), (2, 1)),
                              moveaxis(self.cv_mpo[isite - 1]),
                              axes=(-1, 0))
        guess = guess[self.condition(dag_qnmat, [down_exciton, up_exciton])]

        if self.method == "1site":
            # define dot path
            path_1 = [([0, 1], "abcd, aefg -> bcdefg"),
                      ([3, 0], "bcdefg, bfhi -> cdeghi"),
                      ([2, 0], "cdeghi, chjk -> degijk"),
                      ([1, 0], "degijk, gikl -> dejl")]
            path_2 = [([0, 1], "abcd, aefg -> bcdefg"),
                      ([3, 0], "bcdefg, bfhi -> cdeghi"),
                      ([2, 0], "cdeghi, djek -> cghijk"),
                      ([1, 0], "cghijk, gilk -> chjl")]
            path_3 = [([0, 1], "ab, acde -> bcde"), ([1,
                                                      0], "bcde, ef -> bcdf")]

            vecb = multi_tensor_contract(
                path_3, forth_L, moveaxis(self.b_mpo[isite - 1], (1, 2),
                                          (2, 1)),
                forth_R)[self.condition(dag_qnmat, [down_exciton, up_exciton])]

        a_oper_isite = asxp(self.a_oper[isite - 1])
        h_mpo_isite = asxp(self.h_mpo[isite - 1])
        # construct preconditioner
        Idt = xp.identity(h_mpo_isite.shape[1])
        M1_1 = xp.einsum('abca->abc', first_L)
        path_m1 = [([0, 1], "abc, bdef->acdef"), ([1,
                                                   0], "acdef, cegh->adfgh")]
        M1_2 = multi_tensor_contract(path_m1, M1_1, a_oper_isite, a_oper_isite)
        M1_2 = xp.einsum("abcbd->abcd", M1_2)
        M1_3 = xp.einsum('ecde->ecd', first_R)
        M1_4 = xp.einsum('ff->f', Idt)
        path_m1 = [([0, 1], "abcd,ecd->abe"), ([1, 0], "abe,f->abef")]
        pre_M1 = multi_tensor_contract(path_m1, M1_2, M1_3, M1_4)
        pre_M1 = xp.moveaxis(pre_M1, [-2, -1], [-1, -2])[self.condition(
            dag_qnmat, [down_exciton, up_exciton])]

        M2_1 = xp.einsum('aeag->aeg', second_L)
        M2_2 = xp.einsum('eccf->ecf', a_oper_isite)
        M2_3 = xp.einsum('gbbh->gbh', h_mpo_isite)
        M2_4 = xp.einsum('dfdh->dfh', second_R)
        path_m2 = [([0, 1], "aeg,gbh->aebh"), ([2, 0], "aebh,ecf->abchf"),
                   ([1, 0], "abhcf,dfh->abcd")]
        pre_M2 = multi_tensor_contract(path_m2, M2_1, M2_3, M2_2, M2_4)
        pre_M2 = pre_M2[self.condition(dag_qnmat, [down_exciton, up_exciton])]

        M4_1 = xp.einsum('faah->fah', third_L)
        M4_4 = xp.einsum('gddi->gdi', third_R)
        M4_5 = xp.einsum('cc->c', Idt)
        M4_path = [([0, 1], "fah,febg->ahebg"), ([2, 0], "ahebg,hjei->abgji"),
                   ([1, 0], "abgji,gdi->abjd")]
        pre_M4 = multi_tensor_contract(M4_path, M4_1, h_mpo_isite, h_mpo_isite,
                                       M4_4)
        pre_M4 = xp.einsum('abbd->abd', pre_M4)
        pre_M4 = xp.tensordot(pre_M4, M4_5, axes=0)
        pre_M4 = xp.moveaxis(pre_M4, [2, 3], [3, 2])[self.condition(
            dag_qnmat, [down_exciton, up_exciton])]

        M_x = lambda x: asnumpy(
            asxp(x) /
            (pre_M1 + 2 * pre_M2 + pre_M4 + xp.ones(nonzeros) * self.eta**2))
        pre_M = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros), M_x)

        count = 0

        def hop(x):
            nonlocal count
            count += 1
            dag_struct = asxp(self.dag2mat(xshape, x, dag_qnmat))
            if self.method == "1site":

                M1 = multi_tensor_contract(path_1, first_L, dag_struct,
                                           a_oper_isite, a_oper_isite, first_R)
                M2 = multi_tensor_contract(path_2, second_L, dag_struct,
                                           a_oper_isite, h_mpo_isite, second_R)
                M2 = xp.moveaxis(M2, (1, 2), (2, 1))
                M3 = multi_tensor_contract(path_2, third_L, h_mpo_isite,
                                           dag_struct, h_mpo_isite, third_R)
                M3 = xp.moveaxis(M3, (1, 2), (2, 1))
                cout = M1 + 2 * M2 + M3 + dag_struct * self.eta**2
            cout = cout[self.condition(dag_qnmat, [down_exciton, up_exciton])]
            return asnumpy(cout)

        # Matrix A
        mat_a = scipy.sparse.linalg.LinearOperator((nonzeros, nonzeros),
                                                   matvec=hop)

        x, info = scipy.sparse.linalg.cg(mat_a,
                                         asnumpy(vecb),
                                         tol=1.e-5,
                                         x0=asnumpy(guess),
                                         maxiter=500,
                                         M=pre_M,
                                         atol=0)
        # logger.info(f"linear eq dim: {nonzeros}")
        # logger.info(f'times for hop:{count}')
        self.hop_time.append(count)
        if info != 0:
            logger.warning(
                f"cg not converged, vecb.norm:{xp.linalg.norm(vecb)}")
        l_value = xp.dot(asxp(hop(x)), asxp(x)) - 2 * xp.dot(vecb, asxp(x))

        x = self.dag2mat(xshape, x, dag_qnmat)
        if self.method == "1site":
            x = np.moveaxis(x, [1, 2], [2, 1])
        x, xdim, xqn, compx = self.x_svd(x,
                                         xqnbigl,
                                         xqnbigr,
                                         nexciton,
                                         percent=percent)

        if self.method == "1site":
            self.cv_mpo[isite - 1] = x
            if not self.cv_mpo.to_right:
                if isite != 1:
                    self.cv_mpo[isite - 2] = \
                        tensordot(self.cv_mpo[isite - 2], compx, axes=(-1, 0))
                    self.cv_mpo.qn[isite - 1] = xqn
                    self.cv_mpo.qnidx = isite - 2
                else:
                    self.cv_mpo[isite - 1] = \
                        tensordot(compx, self.cv_mpo[isite - 1], axes=(-1, 0))
                    self.cv_mpo.qnidx = 0
            else:
                if isite != len(self.cv_mpo):
                    self.cv_mpo[isite] = \
                        tensordot(compx, self.cv_mpo[isite], axes=(-1, 0))
                    self.cv_mpo.qn[isite] = xqn
                    self.cv_mpo.qnidx = isite
                else:
                    self.cv_mpo[isite - 1] = \
                        tensordot(self.cv_mpo[isite - 1], compx, axes=(-1, 0))
                    self.cv_mpo.qnidx = self.cv_mpo.site_num - 1

        else:
            if not self.cv_mpo.to_right:
                self.cv_mpo[isite - 2] = compx
                self.cv_mpo[isite - 1] = x
                self.cv_mpo.qnidx = isite - 2
            else:
                self.cv_mpo[isite - 2] = x
                self.cv_mpo[isite - 1] = compx
                self.cv_mpo.qnidx = isite - 1
            self.cv_mpo.qn[isite - 1] = xqn

        return float(l_value)
Ejemplo n.º 18
0
def optimize_mps(mps: Mps, mpo: Mpo, omega: float = None) -> Tuple[List, Mps]:
    r""" DMRG ground state algorithm and state-averaged excited states algorithm
    
    Parameters
    ----------
    mps : renormalizer.mps.Mps
        initial guess of mps
    mpo : renormalizer.mps.Mpo 
        mpo of Hamiltonian
    omega: float, optional
        target the eigenpair near omega with special variational function
        :math:(\hat{H}-\omega)^2. Default is `None`.

    Returns
    -------
    energy : list
        list of energy of each marco sweep.
        :math:`[e_0, e_0, \cdots, e_0]` if ``nroots=1``.
        :math:`[[e_0, \cdots, e_n], \dots, [e_0, \cdots, e_n]]` if ``nroots=n``.
    mps : renormalizer.mps.Mps
        optimized ground state mps. The input mps is overwritten and could not
        be used anymore.
    
    See Also
    --------
    renormalizer.utils.configs.OptimizeConfig : The optimization configuration.
    
    """
    algo = mps.optimize_config.algo
    method = mps.optimize_config.method
    procedure = mps.optimize_config.procedure
    inverse = mps.optimize_config.inverse
    nroots = mps.optimize_config.nroots

    assert method in ["2site", "1site"]
    logger.info(f"optimization method: {method}")
    logger.info(f"e_rtol: {mps.optimize_config.e_rtol}")
    logger.info(f"e_atol: {mps.optimize_config.e_atol}")

    if USE_GPU:
        oe_backend = "cupy"
    else:
        oe_backend = "numpy"

    # ensure that mps is left or right-canonical
    # TODO: start from a mix-canonical MPS
    if mps.is_left_canon:
        mps.ensure_right_canon()
        env = "R"
    else:
        mps.ensure_left_canon()
        env = "L"

    # in state-averged calculation, contains C of each state for better initial
    # guess
    averaged_ms = None

    # the index of active site of the returned mps
    res_mps_idx = None

    # target eigenstate close to omega with (H-omega)^2
    # construct the environment matrix
    if omega is not None:
        identity = Mpo.identity(mpo.model)
        mpo = mpo.add(identity.scale(-omega))
        environ = Environ(mps, [mpo, mpo], env)
    else:
        environ = Environ(mps, mpo, env)

    macro_iteration_result = []
    converged = False
    for isweep, (mmax, percent) in enumerate(procedure):
        logger.debug(f"isweep: {isweep}")
        logger.debug(f"mmax, percent: {mmax}, {percent}")
        logger.debug(f"{mps}")

        micro_iteration_result = []
        for imps in mps.iter_idx_list(full=True):
            if method == "2site" and \
                ((mps.to_right and imps == mps.site_num-1)
                or ((not mps.to_right) and imps == 0)):
                break

            if mps.to_right:
                lmethod, rmethod = "System", "Enviro"
            else:
                lmethod, rmethod = "Enviro", "System"

            if method == "1site":
                lidx = imps - 1
                cidx = [imps]
                ridx = imps + 1
            elif method == "2site":
                if mps.to_right:
                    lidx = imps - 1
                    cidx = [imps, imps + 1]
                    ridx = imps + 2
                else:
                    lidx = imps - 2
                    cidx = [imps - 1, imps]  # center site
                    ridx = imps + 1
            else:
                assert False
            logger.debug(f"optimize site: {cidx}")

            if omega is None:
                operator = mpo
            else:
                operator = [mpo, mpo]

            ltensor = environ.GetLR("L",
                                    lidx,
                                    mps,
                                    operator,
                                    itensor=None,
                                    method=lmethod)
            rtensor = environ.GetLR("R",
                                    ridx,
                                    mps,
                                    operator,
                                    itensor=None,
                                    method=rmethod)

            # get the quantum number pattern
            qnbigl, qnbigr, qnmat = mps._get_big_qn(cidx)
            cshape = qnmat.shape
            nonzeros = np.sum(qnmat == mps.qntot)
            logger.debug(f"Hmat dim: {nonzeros}")

            # center mo
            cmo = [asxp(mpo[idx]) for idx in cidx]

            if qnmat.size > 1000 and algo != "direct":
                # iterative algorithm

                # diagonal elements of H
                if omega is None:
                    tmp_ltensor = xp.einsum("aba -> ba", ltensor)
                    tmp_cmo0 = xp.einsum("abbc -> abc", cmo[0])
                    tmp_rtensor = xp.einsum("aba -> ba", rtensor)
                    if method == "1site":
                        #   S-a c f-S
                        #   O-b-O-g-O
                        #   S-a c f-S
                        path = [([0, 1], "ba, bcg -> acg"),
                                ([1, 0], "acg, gf -> acf")]
                        hdiag = multi_tensor_contract(
                            path, tmp_ltensor, tmp_cmo0,
                            tmp_rtensor)[(qnmat == mps.qntot)]
                    else:
                        #   S-a c   d f-S
                        #   O-b-O-e-O-g-O
                        #   S-a c   d f-S
                        tmp_cmo1 = xp.einsum("abbc -> abc", cmo[1])
                        path = [
                            ([0, 1], "ba, bce -> ace"),
                            ([0, 1], "edg, gf -> edf"),
                            ([0, 1], "ace, edf -> acdf"),
                        ]
                        hdiag = multi_tensor_contract(
                            path, tmp_ltensor, tmp_cmo0, tmp_cmo1,
                            tmp_rtensor)[(qnmat == mps.qntot)]
                else:
                    if method == "1site":
                        #   S-a d h-S
                        #   O-b-O-f-O
                        #   |   e   |
                        #   O-c-O-g-O
                        #   S-a d h-S
                        hdiag = oe.contract(
                            "abca, bdef, cedg, hfgh -> adh",
                            ltensor,
                            cmo[0],
                            cmo[0],
                            rtensor,
                            backend=oe_backend)[(qnmat == mps.qntot)]
                    else:
                        #   S-a d   h l-S
                        #   O-b-O-f-O-j-O
                        #   |   e   i   |
                        #   O-c-O-g-O-k-O
                        #   S-a d   h l-S
                        hdiag = oe.contract(
                            "abca, bdef, cedg, fhij, gihk, ljkl -> adhl",
                            ltensor,
                            cmo[0],
                            cmo[0],
                            cmo[1],
                            cmo[1],
                            rtensor,
                            backend=oe_backend)[(qnmat == mps.qntot)]

                hdiag = asnumpy(hdiag * inverse)

                # initial guess
                if method == "1site":
                    # initial guess   b-S-c
                    #                   a
                    if nroots == 1:
                        cguess = [asnumpy(mps[cidx[0]])[qnmat == mps.qntot]]
                    else:
                        cguess = []
                        if averaged_ms is not None:
                            for ms in averaged_ms:
                                cguess.append(asnumpy(ms)[qnmat == mps.qntot])
                else:
                    # initial guess b-S-c-S-e
                    #                 a   d
                    if nroots == 1:
                        cguess = [
                            asnumpy(
                                tensordot(mps[cidx[0]], mps[cidx[1]],
                                          axes=1)[qnmat == mps.qntot])
                        ]
                    else:
                        cguess = []
                        if averaged_ms is not None:
                            for ms in averaged_ms:
                                if mps.to_right:
                                    cguess.append(
                                        asnumpy(
                                            tensordot(
                                                ms, mps[cidx[1]],
                                                axes=1)[qnmat == mps.qntot]))
                                else:
                                    cguess.append(
                                        asnumpy(
                                            tensordot(
                                                mps[cidx[0]], ms,
                                                axes=1)[qnmat == mps.qntot]))
                if omega is not None:
                    if method == "1site":
                        #   S-a e j-S
                        #   O-b-O-g-O
                        #   |   f   |
                        #   O-c-O-i-O
                        #   S-d h k-S
                        expr = oe.contract_expression(
                            "abcd, befg, cfhi, jgik, aej -> dhk",
                            ltensor,
                            cmo[0],
                            cmo[0],
                            rtensor,
                            cshape,
                            constants=[0, 1, 2, 3])
                    else:
                        #   S-a e   j o-S
                        #   O-b-O-g-O-l-O
                        #   |   f   k   |
                        #   O-c-O-i-O-n-O
                        #   S-d h   m p-S
                        expr = oe.contract_expression(
                            "abcd, befg, cfhi, gjkl, ikmn, olnp, aejo -> dhmp",
                            ltensor,
                            cmo[0],
                            cmo[0],
                            cmo[1],
                            cmo[1],
                            rtensor,
                            cshape,
                            constants=[0, 1, 2, 3, 4, 5])

                count = 0

                def hop(x):
                    nonlocal count
                    count += 1
                    clist = []
                    if x.ndim == 1:
                        clist.append(x)
                    else:
                        for icol in range(x.shape[1]):
                            clist.append(x[:, icol])
                    res = []
                    for c in clist:
                        # convert c to initial structure according to qn pattern
                        cstruct = asxp(cvec2cmat(cshape, c, qnmat, mps.qntot))

                        if omega is None:
                            if method == "1site":
                                # S-a   l-S
                                #     d
                                # O-b-O-f-O
                                #     e
                                # S-c   k-S

                                path = [
                                    ([0, 1], "abc, adl -> bcdl"),
                                    ([2, 0], "bcdl, bdef -> clef"),
                                    ([1, 0], "clef, lfk -> cek"),
                                ]
                                cout = multi_tensor_contract(
                                    path, ltensor, cstruct, cmo[0], rtensor)
                            else:
                                # S-a       l-S
                                #     d   g
                                # O-b-O-f-O-j-O
                                #     e   h
                                # S-c       k-S
                                path = [
                                    ([0, 1], "abc, adgl -> bcdgl"),
                                    ([3, 0], "bcdgl, bdef -> cglef"),
                                    ([2, 0], "cglef, fghj -> clehj"),
                                    ([1, 0], "clehj, ljk -> cehk"),
                                ]
                                cout = multi_tensor_contract(
                                    path,
                                    ltensor,
                                    cstruct,
                                    cmo[0],
                                    cmo[1],
                                    rtensor,
                                )
                        else:
                            cout = expr(cstruct, backend=oe_backend)

                    # convert structure c to 1d according to qn
                        res.append(asnumpy(cout)[qnmat == mps.qntot])

                    if len(res) == 1:
                        return inverse * res[0]
                    else:
                        return inverse * np.stack(res, axis=1)

                if len(cguess) < nroots:
                    cguess.extend([
                        np.random.random([nonzeros]) - 0.5
                        for i in range(len(cguess), nroots)
                    ])

                if algo == "davidson":
                    precond = lambda x, e, *args: x / (hdiag - e + 1e-4)

                    e, c = davidson(hop,
                                    cguess,
                                    precond,
                                    max_cycle=100,
                                    nroots=nroots,
                                    max_memory=64000)
                    # if one root, davidson return e as np.float

                #elif algo == "arpack":
                #    # scipy arpack solver : much slower than pyscf/davidson
                #    A = scipy.sparse.linalg.LinearOperator((nonzeros,nonzeros), matvec=hop)
                #    e, c = scipy.sparse.linalg.eigsh(A, k=nroots, which="SA", v0=cguess)
                #    # scipy return numpy.array
                #    if nroots == 1:
                #        e = e[0]
                #elif algo == "lobpcg":
                #    precond = lambda x: scipy.sparse.diags(1/(hdiag+1e-4)) @ x
                #    A = scipy.sparse.linalg.LinearOperator((nonzeros,nonzeros),
                #            matvec=hop, matmat=hop)
                #    M = scipy.sparse.linalg.LinearOperator((nonzeros,nonzeros),
                #            matvec=precond, matmat=hop)
                #    e, c = scipy.sparse.linalg.lobpcg(A, np.array(cguess).T,
                #            M=M, largest=False)
                elif algo == "primme":
                    precond = lambda x: scipy.sparse.diags(1 /
                                                           (hdiag + 1e-4)) @ x
                    A = scipy.sparse.linalg.LinearOperator(
                        (nonzeros, nonzeros), matvec=hop, matmat=hop)
                    M = scipy.sparse.linalg.LinearOperator(
                        (nonzeros, nonzeros), matvec=precond, matmat=hop)
                    e, c = primme.eigsh(A,
                                        k=min(nroots, nonzeros),
                                        which="SA",
                                        v0=np.array(cguess).T,
                                        OPinv=M,
                                        method="PRIMME_DYNAMIC",
                                        tol=1e-6)
                else:
                    assert False
                logger.debug(f"use {algo}, HC hops: {count}")
            else:
                logger.debug(f"use direct eigensolver")

                # direct algorithm
                if omega is None:
                    if method == "1site":
                        # S-a   l-S
                        #     d
                        # O-b-O-f-O
                        #     e
                        # S-c   k-S
                        ham = oe.contract("abc,bdef,lfk->adlcek",
                                          ltensor,
                                          cmo[0],
                                          rtensor,
                                          backend=oe_backend)
                        ham = ham[:, :, :, qnmat == mps.qntot][
                            qnmat == mps.qntot, :] * inverse
                    else:
                        # S-a       l-S
                        #     d   g
                        # O-b-O-f-O-j-O
                        #     e   h
                        # S-c       k-S
                        ham = oe.contract("abc,bdef,fghj,ljk->adglcehk",
                                          ltensor, cmo[0], cmo[1], rtensor)
                        ham = ham[:, :, :, :, qnmat == mps.qntot][
                            qnmat == mps.qntot, :] * inverse
                else:
                    if method == "1site":
                        #   S-a e j-S
                        #   O-b-O-g-O
                        #   |   f   |
                        #   O-c-O-i-O
                        #   S-d h k-S
                        ham = oe.contract("abcd, befg, cfhi, jgik -> aejdhk",
                                          ltensor, cmo[0], cmo[0], rtensor)
                        ham = ham[:, :, :, qnmat == mps.qntot][
                            qnmat == mps.qntot, :] * inverse
                    else:
                        #   S-a e   j o-S
                        #   O-b-O-g-O-l-O
                        #   |   f   k   |
                        #   O-c-O-i-O-n-O
                        #   S-d h   m p-S
                        ham = oe.contract(
                            "abcd, befg, cfhi, gjkl, ikmn, olnp -> aejodhmp",
                            ltensor, cmo[0], cmo[0], cmo[1], cmo[1], rtensor)
                        ham = ham[:, :, :, :, qnmat == mps.qntot][
                            qnmat == mps.qntot, :] * inverse

                w, v = scipy.linalg.eigh(asnumpy(ham))
                if nroots == 1:
                    e = w[0]
                    c = v[:, 0]
                else:
                    e = w[:nroots]
                    c = [
                        v[:, iroot] for iroot in range(min(nroots, v.shape[1]))
                    ]
            # if multi roots, both davidson and primme return np.ndarray
            if nroots > 1:
                e = e.tolist()
            logger.debug(f"energy: {e}")
            micro_iteration_result.append(e)

            cstruct = cvec2cmat(cshape, c, qnmat, mps.qntot, nroots=nroots)
            # store the "optimal" mps (usually in the middle of each sweep)
            if res_mps_idx is not None and res_mps_idx == imps:
                if nroots == 1:
                    res_mps = mps.copy()
                    res_mps._update_mps(cstruct, cidx, qnbigl, qnbigr, mmax,
                                        percent)
                else:
                    res_mps = [mps.copy() for i in range(len(cstruct))]
                    for iroot in range(len(cstruct)):
                        res_mps[iroot]._update_mps(cstruct[iroot], cidx,
                                                   qnbigl, qnbigr, mmax,
                                                   percent)

            averaged_ms = mps._update_mps(cstruct, cidx, qnbigl, qnbigr, mmax,
                                          percent)

        mps._switch_direction()

        res_mps_idx = micro_iteration_result.index(min(micro_iteration_result))
        macro_iteration_result.append(micro_iteration_result[res_mps_idx])
        # check if convergence
        if isweep > 0 and percent == 0:
            v1, v2 = sorted(macro_iteration_result)[:2]
            if np.allclose(v1,
                           v2,
                           rtol=mps.optimize_config.e_rtol,
                           atol=mps.optimize_config.e_atol):
                converged = True
                break

    logger.debug(
        f"{isweep+1} sweeps are finished, lowest energy = {sorted(macro_iteration_result)[0]}"
    )
    if converged:
        logger.info("DMRG is converged!")
    else:
        logger.warning("DMRG is not converged! Please increase the procedure!")
        logger.info(
            f"The lowest two energies: {sorted(macro_iteration_result)[:2]}.")

    # remove the redundant basis near the edge
    if nroots == 1:
        res_mps = res_mps.normalize().ensure_left_canon().canonicalise()
        logger.info(f"{res_mps}")
    else:
        res_mps = [
            mp.normalize().ensure_left_canon().canonicalise() for mp in res_mps
        ]
        logger.info(f"{res_mps[0]}")
    return macro_iteration_result, res_mps
Ejemplo n.º 19
0
def select_basis(vset, sset, qnset, compset, Mmax, percent=0):
    """
    select basis to construct new mps, and complementary mps
    vset, compset is the column vector
    """
    # allowed qn subsection
    qnlist = set(qnset)
    # convert to dict
    basdic = dict()
    for i in range(len(qnset)):
        # clean quantum number outside qnlist
        if qnset[i] in qnlist:
            basdic[i] = [qnset[i], sset[i]]

    # each good quantum number block equally get percent/nblocks
    def block_select(basdic, qn, n):
        block_basdic = {i: basdic[i] for i in basdic if basdic[i][0] == qn}
        sort_block_basdic = sorted(block_basdic.items(),
                                   key=lambda x: x[1][1],
                                   reverse=True)
        nget = min(n, len(sort_block_basdic))
        # print(qn, "block # of retained basis", nget)
        sidx = [i[0] for i in sort_block_basdic[0:nget]]
        for idx in sidx:
            del basdic[idx]

        return sidx

    nbasis = min(len(basdic), Mmax)
    # print("# of selected basis", nbasis)
    sidx = []

    # equally select from each quantum number block
    if percent != 0:
        nbas_block = int(nbasis * percent / len(qnlist))
        for iqn in qnlist:
            sidx += block_select(basdic, iqn, nbas_block)

    # others
    nbasis = nbasis - len(sidx)

    sortbasdic = sorted(basdic.items(), key=lambda x: x[1][1], reverse=True)
    sidx += [i[0] for i in sortbasdic[0:nbasis]]

    assert len(sidx) == len(set(sidx))  # there must be no duplicated

    mpsdim = len(sidx)
    # need to set value column by column. better in CPU
    ms = np.zeros((vset.shape[0], mpsdim), dtype=vset.dtype)

    if compset is not None:
        compmps = np.zeros((compset.shape[0], mpsdim), dtype=compset.dtype)
    else:
        compmps = None

    mpsqn = []
    stot = 0.0
    for idim in range(mpsdim):
        ms[:, idim] = vset[:, sidx[idim]].copy()
        if (compset is not None) and sidx[idim] < compset.shape[1]:
            compmps[:, idim] = compset[:, sidx[idim]].copy() * sset[sidx[idim]]
        mpsqn.append(qnset[sidx[idim]])
        stot += sset[sidx[idim]]**2

    # print("discard:", 1.0 - stot)
    if compmps is not None:
        compmps = asxp(compmps)

    return asxp(ms), mpsdim, mpsqn, compmps
Ejemplo n.º 20
0
def optimize_mps_dmrg(mps, mpo):
    """
    1 or 2 site optimization procedure
    """

    method = mps.optimize_config.method
    procedure = mps.optimize_config.procedure
    inverse = mps.optimize_config.inverse
    nroots = mps.optimize_config.nroots

    assert method in ["2site", "1site"]
    # print("optimization method", method)

    nexciton = mps.nexciton

    # construct the environment matrix
    environ = Environ(mps, mpo, "L")

    nMPS = len(mps)
    # construct each sweep cycle scheme
    if method == "1site":
        loop = [["R", i] for i in range(nMPS - 1, -1, -1)] + [
            ["L", i] for i in range(0, nMPS)
        ]
    else:
        loop = [["R", i] for i in range(nMPS - 1, 0, -1)] + [
            ["L", i] for i in range(1, nMPS)
        ]

    # initial matrix
    ltensor = ones((1, 1, 1))
    rtensor = ones((1, 1, 1))

    energies = []
    for isweep, (mmax, percent) in enumerate(procedure):
        logger.debug(f"mmax, percent: {mmax}, {percent}")
        logger.debug(f"energy: {mps.expectation(mpo)}")
        logger.debug(f"{mps}")

        for system, imps in loop:
            if system == "R":
                lmethod, rmethod = "Enviro", "System"
            else:
                lmethod, rmethod = "System", "Enviro"

            if method == "1site":
                lsite = imps - 1
                addlist = [imps]
            else:
                lsite = imps - 2
                addlist = [imps - 1, imps]

            ltensor = environ.GetLR(
                "L", lsite, mps, mpo, itensor=ltensor, method=lmethod
            )
            rtensor = environ.GetLR(
                "R", imps + 1, mps, mpo, itensor=rtensor, method=rmethod
            )

            # get the quantum number pattern
            qnmat, qnbigl, qnbigr = svd_qn.construct_qnmat(
                mps, mpo.pbond_list, addlist, method, system
            )
            cshape = qnmat.shape

            # hdiag
            tmp_ltensor = xp.einsum("aba -> ba", asxp(ltensor))
            tmp_MPOimps = xp.einsum("abbc -> abc", asxp(mpo[imps]))
            tmp_rtensor = xp.einsum("aba -> ba", asxp(rtensor))
            if method == "1site":
                #   S-a c f-S
                #   O-b-O-g-O
                #   S-a c f-S
                path = [([0, 1], "ba, bcg -> acg"), ([1, 0], "acg, gf -> acf")]
                hdiag = multi_tensor_contract(
                    path, tmp_ltensor, tmp_MPOimps, tmp_rtensor
                )[(qnmat == nexciton)]
                # initial guess   b-S-c
                #                   a
                cguess = mps[imps][qnmat == nexciton].array
            else:
                #   S-a c   d f-S
                #   O-b-O-e-O-g-O
                #   S-a c   d f-S
                tmp_MPOimpsm1 = xp.einsum("abbc -> abc", asxp(mpo[imps - 1]))
                path = [
                    ([0, 1], "ba, bce -> ace"),
                    ([0, 1], "edg, gf -> edf"),
                    ([0, 1], "ace, edf -> acdf"),
                ]
                hdiag = multi_tensor_contract(
                    path, tmp_ltensor, tmp_MPOimpsm1, tmp_MPOimps, tmp_rtensor
                )[(qnmat == nexciton)]
                # initial guess b-S-c-S-e
                #                 a   d
                cguess = asnumpy(tensordot(mps[imps - 1], mps[imps], axes=1)[qnmat == nexciton])
            hdiag *= inverse
            nonzeros = np.sum(qnmat == nexciton)
            # print("Hmat dim", nonzeros)

            mo1 = asxp(mpo[imps-1])
            mo2 = asxp(mpo[imps])
            def hop(c):
                # convert c to initial structure according to qn pattern
                cstruct = asxp(svd_qn.cvec2cmat(cshape, c, qnmat, nexciton))

                if method == "1site":
                    # S-a   l-S
                    #    d
                    # O-b-O-f-O
                    #    e
                    # S-c   k-S

                    path = [
                        ([0, 1], "abc, adl -> bcdl"),
                        ([2, 0], "bcdl, bdef -> clef"),
                        ([1, 0], "clef, lfk -> cek"),
                    ]
                    cout = multi_tensor_contract(
                        path, ltensor, cstruct, mo2, rtensor
                    )
                    # for small matrices, check hermite:
                    # a=tensordot(ltensor, mpo[imps], ((1), (0)))
                    # b=tensordot(a, rtensor, ((4), (1)))
                    # c=b.transpose((0, 2, 4, 1, 3, 5))
                    # d=c.reshape(16, 16)
                else:
                    # S-a       l-S
                    #    d   g
                    # O-b-O-f-O-j-O
                    #    e   h
                    # S-c       k-S
                    path = [
                        ([0, 1], "abc, adgl -> bcdgl"),
                        ([3, 0], "bcdgl, bdef -> cglef"),
                        ([2, 0], "cglef, fghj -> clehj"),
                        ([1, 0], "clehj, ljk -> cehk"),
                    ]
                    cout = multi_tensor_contract(
                        path,
                        ltensor,
                        cstruct,
                        mo1,
                        mo2,
                        rtensor,
                    )
                # convert structure c to 1d according to qn
                return inverse * asnumpy(cout)[qnmat == nexciton]

            if nroots != 1:
                cguess = [cguess]
                for iroot in range(nroots - 1):
                    cguess.append(np.random.random([nonzeros]) - 0.5)

            precond = lambda x, e, *args: x / (asnumpy(hdiag) - e + 1e-4)

            e, c = davidson(
                hop, cguess, precond, max_cycle=100, nroots=nroots, max_memory=64000
            )
            # scipy arpack solver : much slower than davidson
            # A = spslinalg.LinearOperator((nonzeros,nonzeros), matvec=hop)
            # e, c = spslinalg.eigsh(A,k=1, which="SA",v0=cguess)
            # print("HC loops:", count[0])
            # logger.debug(f"isweep: {isweep}, e: {e}")

            energies.append(e)

            cstruct = svd_qn.cvec2cmat(cshape, c, qnmat, nexciton, nroots=nroots)

            if nroots == 1:
                # direct svd the coefficient matrix
                mt, mpsdim, mpsqn, compmps = renormalization_svd(
                    cstruct,
                    qnbigl,
                    qnbigr,
                    system,
                    nexciton,
                    Mmax=mmax,
                    percent=percent,
                )
            else:
                # diagonalize the reduced density matrix
                mt, mpsdim, mpsqn, compmps = renormalization_ddm(
                    cstruct,
                    qnbigl,
                    qnbigr,
                    system,
                    nexciton,
                    Mmax=mmax,
                    percent=percent,
                )

            if method == "1site":
                mps[imps] = mt
                if system == "L":
                    if imps != len(mps) - 1:
                        mps[imps + 1] = tensordot(compmps, mps[imps + 1].array, axes=1)
                        mps.qn[imps + 1] = mpsqn
                    else:
                        mps[imps] = tensordot(mps[imps].array, compmps, axes=1)
                        mps.qn[imps + 1] = [0]

                else:
                    if imps != 0:
                        mps[imps - 1] = tensordot(mps[imps - 1].array, compmps, axes=1)
                        mps.qn[imps] = mpsqn
                    else:
                        mps[imps] = tensordot(compmps, mps[imps].array, axes=1)
                        mps.qn[imps] = [0]
            else:
                if system == "L":
                    mps[imps - 1] = mt
                    mps[imps] = compmps
                else:
                    mps[imps] = mt
                    mps[imps - 1] = compmps

                # mps.dim_list[imps] = mpsdim
                mps.qn[imps] = mpsqn

    energies = np.array(energies)
    if nroots == 1:
        logger.debug("Optimization complete, lowest energy = %g", energies.min())

    return energies