示例#1
0
 def expectations(self, mpos) -> np.ndarray:
     if len(mpos) < 3:
         return np.array([self.expectation(mpo) for mpo in mpos])
     assert 2 < len(mpos)
     # id can be used as efficient hash because of `Matrix` implementation
     mpo_ids = np.array([[id(m) for m in mpo] for mpo in mpos])
     common_mpo_ids = mpo_ids[0].copy()
     mpo0_unique_idx = np.where(np.sum(mpo_ids == common_mpo_ids, axis=0) == 1)[0][0]
     common_mpo_ids[mpo0_unique_idx] = mpo_ids[1][mpo0_unique_idx]
     x, unique_idx = np.where(mpo_ids != common_mpo_ids)
     # should find one at each line
     assert np.allclose(x, np.arange(len(mpos)))
     common_mpo = list(mpos[0])
     common_mpo[mpo0_unique_idx] = mpos[1][mpo0_unique_idx]
     self_conj = self._expectation_conj()
     environ = Environ()
     environ.construct(self, self_conj, common_mpo, "l")
     environ.construct(self, self_conj, common_mpo, "r")
     res_list = []
     for idx, mpo in zip(unique_idx, mpos):
         l = environ.read("l", idx - 1)
         r = environ.read("r", idx + 1)
         path = self._expectation_path()
         res = multi_tensor_contract(path, l, self[idx], mpo[idx], self_conj[idx], r)
         res_list.append(float(res.real))
     return np.array(res_list)
示例#2
0
 def expectation(self, mpo, self_conj=None) -> float:
     if self_conj is None:
         self_conj = self._expectation_conj()
     environ = Environ()
     environ.construct(self, self_conj, mpo, "r")
     l = ones((1, 1, 1))
     r = environ.read("r", 1)
     path = self._expectation_path()
     return float(multi_tensor_contract(path, l, self[0], mpo[0], self_conj[0], r).real)
示例#3
0
def optimize_mps_dmrg(mps, mpo):
    """
    1 or 2 site optimization procedure
    """

    method = mps.optimize_config.method
    procedure = mps.optimize_config.procedure
    inverse = mps.optimize_config.inverse
    nroots = mps.optimize_config.nroots

    assert method in ["2site", "1site"]
    # print("optimization method", method)

    nexciton = mps.nexciton

    # construct the environment matrix
    environ = Environ()
    environ.construct(mps, mps, mpo, "L")

    nMPS = len(mps)
    # construct each sweep cycle scheme
    if method == "1site":
        loop = [["R", i]
                for i in range(nMPS - 1, -1, -1)] + [["L", i]
                                                     for i in range(0, nMPS)]
    else:
        loop = [["R", i]
                for i in range(nMPS - 1, 0, -1)] + [["L", i]
                                                    for i in range(1, nMPS)]

    # initial matrix
    ltensor = ones((1, 1, 1))
    rtensor = ones((1, 1, 1))

    energies = []
    for isweep, (mmax, percent) in enumerate(procedure):
        logger.debug(f"mmax, percent: {mmax}, {percent}")
        logger.debug(f"energy: {mps.expectation(mpo)}")
        logger.debug(f"{mps}")

        for system, imps in loop:
            if system == "R":
                lmethod, rmethod = "Enviro", "System"
            else:
                lmethod, rmethod = "System", "Enviro"

            if method == "1site":
                lsite = imps - 1
                addlist = [imps]
            else:
                lsite = imps - 2
                addlist = [imps - 1, imps]

            ltensor = environ.GetLR("L",
                                    lsite,
                                    mps,
                                    mps,
                                    mpo,
                                    itensor=ltensor,
                                    method=lmethod)
            rtensor = environ.GetLR("R",
                                    imps + 1,
                                    mps,
                                    mps,
                                    mpo,
                                    itensor=rtensor,
                                    method=rmethod)

            # get the quantum number pattern
            qnmat, qnbigl, qnbigr = construct_qnmat(mps, mpo.ephtable,
                                                    mpo.pbond_list, addlist,
                                                    method, system)
            cshape = qnmat.shape

            # hdiag
            tmp_ltensor = einsum("aba -> ba", ltensor)
            tmp_MPOimps = einsum("abbc -> abc", mpo[imps])
            tmp_rtensor = einsum("aba -> ba", rtensor)
            if method == "1site":
                #   S-a c f-S
                #   O-b-O-g-O
                #   S-a c f-S
                path = [([0, 1], "ba, bcg -> acg"), ([1, 0], "acg, gf -> acf")]
                hdiag = multi_tensor_contract(path, tmp_ltensor, tmp_MPOimps,
                                              tmp_rtensor)[(qnmat == nexciton)]
                # initial guess   b-S-c
                #                   a
                cguess = mps[imps][qnmat == nexciton]
            else:
                #   S-a c   d f-S
                #   O-b-O-e-O-g-O
                #   S-a c   d f-S
                tmp_MPOimpsm1 = einsum("abbc -> abc", mpo[imps - 1])
                path = [
                    ([0, 1], "ba, bce -> ace"),
                    ([0, 1], "edg, gf -> edf"),
                    ([0, 1], "ace, edf -> acdf"),
                ]
                hdiag = multi_tensor_contract(path, tmp_ltensor, tmp_MPOimpsm1,
                                              tmp_MPOimps,
                                              tmp_rtensor)[(qnmat == nexciton)]
                # initial guess b-S-c-S-e
                #                 a   d
                cguess = tensordot(mps[imps - 1], mps[imps],
                                   axes=1)[qnmat == nexciton]
            cguess = cguess.asnumpy()
            hdiag *= inverse
            nonzeros = np.sum(qnmat == nexciton)

            # print("Hmat dim", nonzeros)

            def hop(c):
                # convert c to initial structure according to qn pattern
                cstruct = cvec2cmat(cshape, c, qnmat, nexciton)

                if method == "1site":
                    # S-a   l-S
                    #    d
                    # O-b-O-f-O
                    #    e
                    # S-c   k-S

                    path = [
                        ([0, 1], "abc, adl -> bcdl"),
                        ([2, 0], "bcdl, bdef -> clef"),
                        ([1, 0], "clef, lfk -> cek"),
                    ]
                    cout = multi_tensor_contract(path, ltensor,
                                                 Matrix(cstruct), mpo[imps],
                                                 rtensor)
                    # for small matrices, check hermite:
                    # a=tensordot(ltensor, mpo[imps], ((1), (0)))
                    # b=tensordot(a, rtensor, ((4), (1)))
                    # c=b.transpose((0, 2, 4, 1, 3, 5))
                    # d=c.reshape(16, 16)
                else:
                    # S-a       l-S
                    #    d   g
                    # O-b-O-f-O-j-O
                    #    e   h
                    # S-c       k-S
                    path = [
                        ([0, 1], "abc, adgl -> bcdgl"),
                        ([3, 0], "bcdgl, bdef -> cglef"),
                        ([2, 0], "cglef, fghj -> clehj"),
                        ([1, 0], "clehj, ljk -> cehk"),
                    ]
                    cout = multi_tensor_contract(
                        path,
                        ltensor,
                        Matrix(cstruct),
                        mpo[imps - 1],
                        mpo[imps],
                        rtensor,
                    )
                # convert structure c to 1d according to qn
                return inverse * cout.asnumpy()[qnmat == nexciton]

            if nroots != 1:
                cguess = [cguess]
                for iroot in range(nroots - 1):
                    cguess.append(np.random.random([nonzeros]) - 0.5)

            precond = lambda x, e, *args: x / (hdiag.asnumpy() - e + 1e-4)

            e, c = davidson(hop,
                            cguess,
                            precond,
                            max_cycle=100,
                            nroots=nroots,
                            max_memory=64000)
            # scipy arpack solver : much slower than davidson
            # A = spslinalg.LinearOperator((nonzeros,nonzeros), matvec=hop)
            # e, c = spslinalg.eigsh(A,k=1, which="SA",v0=cguess)
            # print("HC loops:", count[0])
            # logger.debug(f"isweep: {isweep}, e: {e}")

            energies.append(e)

            cstruct = cvec2cmat(cshape, c, qnmat, nexciton, nroots=nroots)

            if nroots == 1:
                # direct svd the coefficient matrix
                mt, mpsdim, mpsqn, compmps = renormalization_svd(
                    cstruct,
                    qnbigl,
                    qnbigr,
                    system,
                    nexciton,
                    Mmax=mmax,
                    percent=percent,
                )
            else:
                # diagonalize the reduced density matrix
                mt, mpsdim, mpsqn, compmps = renormalization_ddm(
                    cstruct,
                    qnbigl,
                    qnbigr,
                    system,
                    nexciton,
                    Mmax=mmax,
                    percent=percent,
                )

            if method == "1site":
                mps[imps] = mt
                if system == "L":
                    if imps != len(mps) - 1:
                        mps[imps + 1] = tensordot(compmps,
                                                  mps[imps + 1],
                                                  axes=1)
                        mps.qn[imps + 1] = mpsqn
                    else:
                        mps[imps] = tensordot(mps[imps], compmps, axes=1)
                        mps.qn[imps + 1] = [0]

                else:
                    if imps != 0:
                        mps[imps - 1] = tensordot(mps[imps - 1],
                                                  compmps,
                                                  axes=1)
                        mps.qn[imps] = mpsqn
                    else:
                        mps[imps] = tensordot(compmps, mps[imps], axes=1)
                        mps.qn[imps] = [0]
            else:
                if system == "L":
                    mps[imps - 1] = mt
                    mps[imps] = compmps
                else:
                    mps[imps] = mt
                    mps[imps - 1] = compmps

                # mps.dim_list[imps] = mpsdim
                mps.qn[imps] = mpsqn

    energies = np.array(energies)
    if nroots == 1:
        logger.debug("Optimization complete, lowest energy = %g",
                     energies.min())

    return energies
示例#4
0
    def _evolve_dmrg_tdvp_ps(self, mpo, evolve_dt) -> "Mps":
        # PhysRevB.94.165116
        # TDVP projector splitting
        imag_time = np.iscomplex(evolve_dt)
        if imag_time:
            mps = self.copy()
            mps_conj = mps
        else:
            mps = self.to_complex()
            mps_conj = mps.conj()  # another copy, so 3x memory is used.

        # construct the environment matrix
        environ = Environ()
        # almost half is not used. Not a big deal.
        environ.construct(mps, mps_conj, mpo, "L")
        environ.construct(mps, mps_conj, mpo, "R")

        # a workaround for https://github.com/scipy/scipy/issues/10164
        if imag_time:
            evolve_dt = -evolve_dt.imag
            # used in calculating derivatives
            coef = -1
        else:
            coef = 1j

        # statistics for debug output
        cmf_rk_steps = []
        USE_RK = self.evolve_config.tdvp_ps_rk4
        # sweep for 2 rounds
        for i in range(2):
            for imps in mps.iter_idx_list(full=True):
                system = "L" if mps.left else "R"
                ltensor = environ.read("L", imps - 1)
                rtensor = environ.read("R", imps + 1)

                shape = list(mps[imps].shape)
                l_array = ltensor.array
                r_array = rtensor.array

                hop = hop_factory(l_array, r_array, mpo[imps].array, len(shape))

                def hop_svt(ms):
                    # S-a   l-S
                    #
                    # O-b - b-O
                    #
                    # S-c   k-S

                    path = [([0, 1], "abc, ck -> abk"), ([1, 0], "abk, lbk -> al")]
                    HC = multi_tensor_contract(path, l_array, ms, r_array)
                    return HC

                if USE_RK:
                    def func(t, y):
                        return hop(y.reshape(shape)).ravel() / coef
                    sol = solve_ivp(
                        func, (0, evolve_dt / 2.0), mps[imps].ravel().array, method="RK45"
                    )
                    cmf_rk_steps.append(len(sol.t))
                    mps_t = sol.y[:, -1]
                else:
                    # Can't use the same func because here H should be Hermitian
                    def func(y):
                        return hop(y.reshape(shape)).ravel()
                    mps_t = expm_krylov(func, (evolve_dt / 2) / coef, mps[imps].ravel().array)
                mps_t = mps_t.reshape(shape)

                qnbigl, qnbigr = mps._get_big_qn(imps)
                u, qnlset, v, qnrset = svd_qn.Csvd(
                    asnumpy(mps_t),
                    qnbigl,
                    qnbigr,
                    mps.qntot,
                    QR=True,
                    system=system,
                    full_matrices=False,
                )
                vt = v.T

                if mps.is_left_canon and imps != 0:
                    mps[imps] = vt.reshape([-1] + shape[1:])
                    mps_conj[imps] = mps[imps].conj()
                    mps.qn[imps] = qnrset

                    rtensor = environ.GetLR(
                        "R", imps, mps, mps_conj, mpo, itensor=rtensor, method="System"
                    )
                    r_array = rtensor.array

                    # reverse update u site
                    shape_u = u.shape

                    if USE_RK:
                        def func_u(t, y):
                            return hop_svt(y.reshape(shape_u)).ravel() / coef
                        sol_u = solve_ivp(
                            func_u, (0, -evolve_dt / 2), u.ravel(), method="RK45"
                        )
                        cmf_rk_steps.append(len(sol_u.t))
                        mps_t = sol_u.y[:, -1]
                    else:
                        def func_u(y):
                            return hop_svt(y.reshape(shape_u)).ravel()
                        mps_t = expm_krylov(func_u, (-evolve_dt / 2) / coef, u.ravel())
                    mps_t = mps_t.reshape(shape_u)
                    mps[imps - 1] = tensordot(
                        mps[imps - 1].array,
                        mps_t,
                        axes=(-1, 0),
                    )
                    mps_conj[imps - 1] = mps[imps - 1].conj()

                elif mps.is_right_canon and imps != len(mps) - 1:
                    mps[imps] = u.reshape(shape[:-1] + [-1])
                    mps_conj[imps] = mps[imps].conj()
                    mps.qn[imps + 1] = qnlset

                    ltensor = environ.GetLR(
                        "L", imps, mps, mps_conj, mpo, itensor=ltensor, method="System"
                    )
                    l_array = ltensor.array

                    # reverse update svt site
                    shape_svt = vt.shape

                    if USE_RK:
                        def func_svt(t, y):
                            return hop_svt(y.reshape(shape_svt)).ravel() / coef
                        sol_svt = solve_ivp(
                            func_svt, (0, -evolve_dt / 2), vt.ravel(), method="RK45"
                        )
                        cmf_rk_steps.append(len(sol_svt.t))
                        mps_t = sol_svt.y[:, -1]
                    else:
                        def func_svt(y):
                            return hop_svt(y.reshape(shape_svt)).ravel()
                        mps_t = expm_krylov(func_svt, (-evolve_dt / 2) / coef, vt.ravel())
                    mps_t = mps_t.reshape(shape_svt)
                    mps[imps + 1] = tensordot(
                        mps_t,
                        mps[imps + 1].array,
                        axes=(1, 0),
                    )
                    mps_conj[imps + 1] = mps[imps + 1].conj()

                else:
                    mps[imps] = mps_t
                    mps_conj[imps] = mps[imps].conj()
            mps._switch_direction()

        if USE_RK:
            steps_stat = stats.describe(cmf_rk_steps)
            logger.debug(f"TDVP-PS CMF steps: {steps_stat}")
            mps.evolve_config.stat = steps_stat

        return mps
示例#5
0
    def _evolve_dmrg_tdvp_mctdhnew(self, mpo, evolve_dt) -> "Mps":
        # new regularization scheme
        # JCP 148, 124105 (2018)
        # JCP 149, 044119 (2018)

        # a workaround for https://github.com/scipy/scipy/issues/10164
        imag_time = np.iscomplex(evolve_dt)
        if imag_time:
            evolve_dt = -evolve_dt.imag
            # used in calculating derivatives
            coef = -1
        else:
            coef = 1j

        if self.is_left_canon:
            assert self.check_left_canonical()
            self.canonicalise()

        mps = self.to_complex(inplace=True)

        # construct the environment matrix
        environ = Environ()
        environ.construct(mps, mps.conj(), mpo, "R")

        # initial matrix
        ltensor = ones((1, 1, 1))
        rtensor = ones((1, 1, 1))

        new_mps = mps.metacopy()

        # statistics for debug output
        cmf_rk_steps = []

        for imps in range(len(mps)):
            shape = list(mps[imps].shape)

            system = "L" if mps.left else "R"
            qnbigl, qnbigr = mps._get_big_qn(imps)
            u, s, qnlset, v, s, qnrset = svd_qn.Csvd(
                mps[imps].asnumpy(),
                qnbigl,
                qnbigr,
                mps.qntot,
                system=system,
                full_matrices=False,
            )
            vt = v.T

            mps[imps] = u.reshape(shape[:-1] + [-1])

            ltensor = environ.GetLR(
                "L", imps - 1, mps, mps.conj(), mpo, itensor=ltensor, method="System"
            )
            rtensor = environ.GetLR(
                "R", imps + 1, mps, mps.conj(), mpo, itensor=rtensor, method="Enviro"
            )

            epsilon = 1e-10
            epsilon = np.sqrt(epsilon)
            s = s + epsilon * np.exp(-s / epsilon)

            svt = Matrix(np.diag(s).dot(vt))

            rtensor = tensordot(rtensor, svt, axes=(2, 1))
            rtensor = tensordot(Matrix(vt).conj(), rtensor, axes=(1, 0))

            if imps != len(mps) - 1:
                mps[imps + 1] = tensordot(svt, mps[imps + 1], axes=(-1, 0))
                mps.qn[imps + 1] = qnlset
                new_mps.qn[imps + 1] = qnlset.copy()

            S_inv = xp.diag(1.0 / s)

            hop = hop_factory(ltensor, rtensor, mpo[imps], len(shape))

            func = integrand_func_factory(shape, hop, imps == len(mps) - 1, S_inv, coef)

            sol = solve_ivp(
                func, (0, evolve_dt), mps[imps].ravel().array, method="RK45"
            )
            cmf_rk_steps.append(len(sol.t))
            ms = sol.y[:, -1].reshape(shape)

            if imps == len(mps) - 1:
                new_mps[imps] = ms * s[0]
            else:
                new_mps[imps] = ms
        mps._switch_direction()
        new_mps._switch_direction()
        new_mps.canonicalise()

        steps_stat = stats.describe(cmf_rk_steps)
        logger.debug(f"TDVP-MCTDH CMF steps: {steps_stat}")
        # new_mps.evolve_config.stat = steps_stat

        return new_mps
示例#6
0
    def _evolve_dmrg_tdvp_mctdh(self, mpo, evolve_dt) -> "Mps":
        # TDVP for original MCTDH
        if self.is_right_canon:
            assert self.check_right_canonical()
            self.canonicalise()

        # a workaround for https://github.com/scipy/scipy/issues/10164
        imag_time = np.iscomplex(evolve_dt)
        if imag_time:
            evolve_dt = -evolve_dt.imag
            # used in calculating derivatives
            coef = -1
        else:
            coef = 1j

        # qn for this method has not been implemented
        self.use_dummy_qn = True
        self.clear_qn()
        mps = self.to_complex(inplace=True)
        mps_conj = mps.conj()
        environ = Environ()
        environ.construct(mps, mps_conj, mpo, "R")

        # initial matrix
        ltensor = np.ones((1, 1, 1))
        rtensor = np.ones((1, 1, 1))

        new_mps = self.metacopy()

        cmf_rk_steps = []

        for imps in range(len(mps)):
            ltensor = environ.GetLR(
                "L", imps - 1, mps, mps_conj, mpo, itensor=ltensor, method="System"
            )
            rtensor = environ.GetLR(
                "R", imps + 1, mps, mps_conj, mpo, itensor=rtensor, method="Enviro"
            )
            # density matrix
            S = transferMat(mps, mps_conj, "R", imps + 1).asnumpy()

            epsilon = 1e-8
            w, u = scipy.linalg.eigh(S)
            try:
                w = w + epsilon * np.exp(-w / epsilon)
            except FloatingPointError:
                logger.warning(f"eigenvalue of density matrix contains negative value")
                w -= 2 * w.min()
                w = w + epsilon * np.exp(-w / epsilon)
            # print
            # "sum w=", np.sum(w)
            # S  = u.dot(np.diag(w)).dot(np.conj(u.T))
            S_inv = xp.asarray(u.dot(np.diag(1.0 / w)).dot(np.conj(u.T)))

            # pseudo inverse
            # S_inv = scipy.linalg.pinvh(S,rcond=1e-2)

            shape = mps[imps].shape

            hop = hop_factory(ltensor, rtensor, mpo[imps], len(shape))

            func = integrand_func_factory(shape, hop, imps == len(mps) - 1, S_inv, coef)

            sol = solve_ivp(
                func, (0, evolve_dt), mps[imps].ravel().array, method="RK45"
            )
            # print
            # "CMF steps:", len(sol.t)
            cmf_rk_steps.append(len(sol.t))
            new_mps[imps] = sol.y[:, -1].reshape(shape)
            new_mps[imps].check_lortho()
            # print
            # "orthogonal1", np.allclose(np.tensordot(MPSnew[imps],
            #                                        np.conj(MPSnew[imps]), axes=([0, 1], [0, 1])),
            #                           np.diag(np.ones(MPSnew[imps].shape[2])))
        steps_stat = stats.describe(cmf_rk_steps)
        logger.debug(f"TDVP-MCTDH CMF steps: {steps_stat}")

        return new_mps