Exemple #1
0
def test_environ():
    mps = Mps.random(holstein_model, 1, 10)
    mpo = Mpo(holstein_model)
    mps = mps.evolve(mpo, 10)
    environ = Environ(mps, mpo)
    for i in range(len(mps) - 1):
        l = environ.read("L", i)
        r = environ.read("R", i + 1)
        e = complex(tensordot(l, r, axes=((0, 1, 2), (0, 1, 2)))).real
        assert pytest.approx(e) == mps.expectation(mpo)
Exemple #2
0
def test_environ_multi_mpo(mpdm):
    mps = Mps.random(holstein_model, 1, 10)
    if mpdm:
        mps = MpDm.from_mps(mps)
    mpo = Mpo(holstein_model)
    mps = mps.evolve(mpo, 10)
    environ = Environ(mps, mpo)
    environ_multi_mpo = Environ(mps, [mpo])
    for i in range(len(mps) - 1):
        l = environ.read("L", i)
        r = environ.read("R", i + 1)
        l2 = environ_multi_mpo.read("L", i)
        r2 = environ_multi_mpo.read("R", i + 1)
        assert np.allclose(asnumpy(l), asnumpy(l2))
        assert np.allclose(asnumpy(r), asnumpy(r2))
Exemple #3
0
 def expectations(self, mpos) -> np.ndarray:
     if len(mpos) < 3:
         return np.array([self.expectation(mpo) for mpo in mpos])
     assert 2 < len(mpos)
     # id can be used as efficient hash because of `Matrix` implementation
     mpo_ids = np.array([[id(m) for m in mpo] for mpo in mpos])
     common_mpo_ids = mpo_ids[0].copy()
     mpo0_unique_idx = np.where(np.sum(mpo_ids == common_mpo_ids, axis=0) == 1)[0][0]
     common_mpo_ids[mpo0_unique_idx] = mpo_ids[1][mpo0_unique_idx]
     x, unique_idx = np.where(mpo_ids != common_mpo_ids)
     # should find one at each line
     assert np.allclose(x, np.arange(len(mpos)))
     common_mpo = list(mpos[0])
     common_mpo[mpo0_unique_idx] = mpos[1][mpo0_unique_idx]
     self_conj = self._expectation_conj()
     environ = Environ()
     environ.construct(self, self_conj, common_mpo, "l")
     environ.construct(self, self_conj, common_mpo, "r")
     res_list = []
     for idx, mpo in zip(unique_idx, mpos):
         l = environ.read("l", idx - 1)
         r = environ.read("r", idx + 1)
         path = self._expectation_path()
         res = multi_tensor_contract(path, l, self[idx], mpo[idx], self_conj[idx], r)
         res_list.append(float(res.real))
     return np.array(res_list)
Exemple #4
0
 def expectation(self, mpo, self_conj=None) -> float:
     if self_conj is None:
         self_conj = self._expectation_conj()
     environ = Environ()
     environ.construct(self, self_conj, mpo, "r")
     l = ones((1, 1, 1))
     r = environ.read("r", 1)
     path = self._expectation_path()
     return float(multi_tensor_contract(path, l, self[0], mpo[0], self_conj[0], r).real)
Exemple #5
0
    def _evolve_dmrg_tdvp_ps(self, mpo, evolve_dt) -> "Mps":
        # PhysRevB.94.165116
        # TDVP projector splitting
        imag_time = np.iscomplex(evolve_dt)
        if imag_time:
            mps = self.copy()
            mps_conj = mps
        else:
            mps = self.to_complex()
            mps_conj = mps.conj()  # another copy, so 3x memory is used.

        # construct the environment matrix
        environ = Environ()
        # almost half is not used. Not a big deal.
        environ.construct(mps, mps_conj, mpo, "L")
        environ.construct(mps, mps_conj, mpo, "R")

        # a workaround for https://github.com/scipy/scipy/issues/10164
        if imag_time:
            evolve_dt = -evolve_dt.imag
            # used in calculating derivatives
            coef = -1
        else:
            coef = 1j

        # statistics for debug output
        cmf_rk_steps = []
        USE_RK = self.evolve_config.tdvp_ps_rk4
        # sweep for 2 rounds
        for i in range(2):
            for imps in mps.iter_idx_list(full=True):
                system = "L" if mps.left else "R"
                ltensor = environ.read("L", imps - 1)
                rtensor = environ.read("R", imps + 1)

                shape = list(mps[imps].shape)
                l_array = ltensor.array
                r_array = rtensor.array

                hop = hop_factory(l_array, r_array, mpo[imps].array, len(shape))

                def hop_svt(ms):
                    # S-a   l-S
                    #
                    # O-b - b-O
                    #
                    # S-c   k-S

                    path = [([0, 1], "abc, ck -> abk"), ([1, 0], "abk, lbk -> al")]
                    HC = multi_tensor_contract(path, l_array, ms, r_array)
                    return HC

                if USE_RK:
                    def func(t, y):
                        return hop(y.reshape(shape)).ravel() / coef
                    sol = solve_ivp(
                        func, (0, evolve_dt / 2.0), mps[imps].ravel().array, method="RK45"
                    )
                    cmf_rk_steps.append(len(sol.t))
                    mps_t = sol.y[:, -1]
                else:
                    # Can't use the same func because here H should be Hermitian
                    def func(y):
                        return hop(y.reshape(shape)).ravel()
                    mps_t = expm_krylov(func, (evolve_dt / 2) / coef, mps[imps].ravel().array)
                mps_t = mps_t.reshape(shape)

                qnbigl, qnbigr = mps._get_big_qn(imps)
                u, qnlset, v, qnrset = svd_qn.Csvd(
                    asnumpy(mps_t),
                    qnbigl,
                    qnbigr,
                    mps.qntot,
                    QR=True,
                    system=system,
                    full_matrices=False,
                )
                vt = v.T

                if mps.is_left_canon and imps != 0:
                    mps[imps] = vt.reshape([-1] + shape[1:])
                    mps_conj[imps] = mps[imps].conj()
                    mps.qn[imps] = qnrset

                    rtensor = environ.GetLR(
                        "R", imps, mps, mps_conj, mpo, itensor=rtensor, method="System"
                    )
                    r_array = rtensor.array

                    # reverse update u site
                    shape_u = u.shape

                    if USE_RK:
                        def func_u(t, y):
                            return hop_svt(y.reshape(shape_u)).ravel() / coef
                        sol_u = solve_ivp(
                            func_u, (0, -evolve_dt / 2), u.ravel(), method="RK45"
                        )
                        cmf_rk_steps.append(len(sol_u.t))
                        mps_t = sol_u.y[:, -1]
                    else:
                        def func_u(y):
                            return hop_svt(y.reshape(shape_u)).ravel()
                        mps_t = expm_krylov(func_u, (-evolve_dt / 2) / coef, u.ravel())
                    mps_t = mps_t.reshape(shape_u)
                    mps[imps - 1] = tensordot(
                        mps[imps - 1].array,
                        mps_t,
                        axes=(-1, 0),
                    )
                    mps_conj[imps - 1] = mps[imps - 1].conj()

                elif mps.is_right_canon and imps != len(mps) - 1:
                    mps[imps] = u.reshape(shape[:-1] + [-1])
                    mps_conj[imps] = mps[imps].conj()
                    mps.qn[imps + 1] = qnlset

                    ltensor = environ.GetLR(
                        "L", imps, mps, mps_conj, mpo, itensor=ltensor, method="System"
                    )
                    l_array = ltensor.array

                    # reverse update svt site
                    shape_svt = vt.shape

                    if USE_RK:
                        def func_svt(t, y):
                            return hop_svt(y.reshape(shape_svt)).ravel() / coef
                        sol_svt = solve_ivp(
                            func_svt, (0, -evolve_dt / 2), vt.ravel(), method="RK45"
                        )
                        cmf_rk_steps.append(len(sol_svt.t))
                        mps_t = sol_svt.y[:, -1]
                    else:
                        def func_svt(y):
                            return hop_svt(y.reshape(shape_svt)).ravel()
                        mps_t = expm_krylov(func_svt, (-evolve_dt / 2) / coef, vt.ravel())
                    mps_t = mps_t.reshape(shape_svt)
                    mps[imps + 1] = tensordot(
                        mps_t,
                        mps[imps + 1].array,
                        axes=(1, 0),
                    )
                    mps_conj[imps + 1] = mps[imps + 1].conj()

                else:
                    mps[imps] = mps_t
                    mps_conj[imps] = mps[imps].conj()
            mps._switch_direction()

        if USE_RK:
            steps_stat = stats.describe(cmf_rk_steps)
            logger.debug(f"TDVP-PS CMF steps: {steps_stat}")
            mps.evolve_config.stat = steps_stat

        return mps