Exemplo n.º 1
0
    def energy_1x1_nn(self, state, env_c4v):
        r"""
        :param state: wavefunction
        :param env_c4v: CTM c4v symmetric environment
        :type state: IPEPS
        :type env_c4v: ENV_C4V
        :return: energy per site
        :rtype: float

        For 1-site invariant c4v iPEPS with no 4-site term present in Hamiltonian it is enough 
        to construct a single reduced density matrix of a 2x1 nearest-neighbour sites. 
        Afterwards, the energy per site `e` is computed by evaluating individual terms 
        in the Hamiltonian through :math:`\langle \mathcal{O} \rangle = Tr(\rho_{2x1} \mathcal{O})`
        
        .. math:: 

            e = -\langle h2_{<\bf{0},\bf{x}>} \rangle - h_x \langle h1_{\bf{0}} \rangle
        """
        assert self.q == 0, "Non-zero value of 4-site term coupling"

        rdm2x1 = rdm_c4v.rdm2x1_sl(state, env_c4v)
        eSx = torch.einsum('ijaj,ia', rdm2x1, self.sx)
        eSzSz = torch.einsum('ijab,ijab', rdm2x1, self.szsz)
        energy_per_site = -2 * eSzSz - self.hx * eSx
        return energy_per_site
Exemplo n.º 2
0
    def ctmrg_conv_rdm2x1(state, env, history, ctm_args=cfg.ctm_args):
        if not history:
            history = dict({"log": []})
        rdm = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu)
        # rdm= rdm2x1(state, env, force_cpu=ctm_args.conv_check_cpu,
        #     verbosity=ctm_args.verbosity_rdm)
        dist = float('inf')
        if len(history["log"]) > 1:
            dist = torch.dist(rdm, history["rdm"], p=2).item()
        # log dist and observables
        if args.obs_freq>0 and \
            (len(history["log"])%args.obs_freq==0 or
            (len(history["log"])-1)%args.obs_freq==0):
            e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu)
            obs_values, obs_labels = model.eval_obs(
                state, env, force_cpu=ctm_args.conv_check_cpu)
            print(
                ", ".join([f"{len(history['log'])}", f"{dist}", f"{e_curr}"] +
                          [f"{v}" for v in obs_values]))
        else:
            print(f"{len(history['log'])}, {dist}")
        # update history
        history["rdm"] = rdm
        history["log"].append(dist)

        converged = dist < ctm_args.ctm_conv_tol
        if converged or len(history['log']) >= ctm_args.ctm_max_iter:
            log.info({
                "history_length": len(history['log']),
                "history": history['log'],
                "final_multiplets": compute_multiplets(env)
            })
            return converged, history
        return False, history
Exemplo n.º 3
0
    def eval_obs(self, state, env_c4v, force_cpu=False):
        r"""
        :param state: wavefunction
        :param env_c4v: CTM c4v symmetric environment
        :type state: IPEPS
        :type env_c4v: ENV_C4V
        :return:  expectation values of observables, labels of observables
        :rtype: list[float], list[str]

        Computes the following observables in order

            1. magnetization
            2. :math:`\langle S^z \rangle,\ \langle S^+ \rangle,\ \langle S^- \rangle`
    
        where the on-site magnetization is defined as
        
        .. math::
            
            \begin{align*}
            m &= \sqrt{ \langle S^z \rangle^2+\langle S^x \rangle^2+\langle S^y \rangle^2 }
            =\sqrt{\langle S^z \rangle^2+1/4(\langle S^+ \rangle+\langle S^- 
            \rangle)^2 -1/4(\langle S^+\rangle-\langle S^-\rangle)^2} \\
              &=\sqrt{\langle S^z \rangle^2 + 1/2\langle S^+ \rangle \langle S^- \rangle)}
            \end{align*}

        Usual spin components can be obtained through the following relations
        
        .. math::
            
            \begin{align*}
            S^+ &=S^x+iS^y               & S^x &= 1/2(S^+ + S^-)\\
            S^- &=S^x-iS^y\ \Rightarrow\ & S^y &=-i/2(S^+ - S^-)
            \end{align*}
        """
        # TODO optimize/unify ?
        # expect "list" of (observable label, value) pairs ?
        obs = dict()
        with torch.no_grad():
            rdm2x1= rdm_c4v.rdm2x1_sl(state,env_c4v,force_cpu=force_cpu,\
                verbosity=cfg.ctm_args.verbosity_rdm)
            obs[f"SS2x1"] = torch.einsum('ijab,ijab', rdm2x1, self.SS_rot)

            # reduce rdm2x1 to 1x1
            rdm1x1 = torch.einsum('ijaj->ia', rdm2x1)
            rdm1x1 = rdm1x1 / torch.trace(rdm1x1)
            for label, op in self.obs_ops.items():
                obs[f"{label}"] = torch.trace(rdm1x1 @ op)
            obs[f"m"] = sqrt(abs(obs[f"sz"]**2 + obs[f"sp"] * obs[f"sm"]))

        # prepare list with labels and values
        obs_labels = [f"m"] + [f"{lc}"
                               for lc in self.obs_ops.keys()] + [f"SS2x1"]
        obs_values = [obs[label] for label in obs_labels]
        return obs_values, obs_labels
Exemplo n.º 4
0
 def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args):
     if not history:
         history=dict({"log": []})
     rdm2x1= rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu)
     dist= float('inf')
     if len(history["log"]) > 0:
         dist= torch.dist(rdm2x1, history["rdm"], p=2).item()
     history["rdm"]=rdm2x1
     history["log"].append(dist)
     if dist<ctm_args.ctm_conv_tol or len(history["log"]) >= ctm_args.ctm_max_iter:
         log.info({"history_length": len(history['log']), "history": history['log']})
         return True, history
     return False, history
Exemplo n.º 5
0
 def ctmrg_conv_rdm2x1(state_ini, env, history, ctm_args=cfg.ctm_args):
     with torch.no_grad():
         if not history:
             history = dict({"log": []})
         rdm2x1 = rdm2x1_sl(state_ini,
                            env,
                            force_cpu=ctm_args.conv_check_cpu)
         dist = float('inf')
         if len(history["log"]) > 1:
             dist = torch.dist(rdm2x1, history["rdm"], p=2).item()
         # update history
         history["rdm"] = rdm2x1
         history["log"].append(dist)
     return False, history
Exemplo n.º 6
0
def compute_w0(B_tensor, env):
    """
    Be |phi'> the purified PEPS associated with A' tensor. 
    Compute the overlap <phi'|phi'>.

    Parameters
    ----------
    B_tensor : torch.tensor(4,4,4,4,4)

    Returns
    -------
    w0 : float
    """
    rdm = rdm2x1_sl((B_tensor, B_tensor), env)
    rdm = rdm.view((*[2] * 8)).contiguous()
    # contract ancilla degrees of freedom
    rdm = torch.einsum('abcdafch', rdm)
    w0 = torch.einsum('abab', rdm)
    return w0
Exemplo n.º 7
0
def compute_w1(A_tensor, B_tensor, gate, env):
    """
    Compute the overlap <phi|G|phi'>.

    Parameters
    ----------
    A_tensor, B_tensor : torch.tensor(4,4,4,4,4)

    gate : torch.tensor

    Returns
    -------
    w1 : float
    """
    rdm = rdm2x1_sl((A_tensor, B_tensor), env)
    rdm = rdm.view((*[2] * 8)).contiguous()
    # contract ancilla degrees of freedom
    rdm = torch.einsum('abcdafch', rdm)
    w1 = torch.einsum('abcd, cdab', rdm, gate)
    return w1
Exemplo n.º 8
0
    def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args):
        with torch.no_grad():
            if not history:
                history=dict({"log": []})
            rdm2x1= rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu)
            dist= float('inf')
            
            # compute observables
            e_curr = model.energy_1x1(state, env)
            obs_values, obs_labels = model.eval_obs(state, env)
            print(", ".join([f"{len(history['log'])}",f"{e_curr}"]+[f"{v}" for v in obs_values]))

            if len(history["log"]) > 1:
                dist= torch.dist(rdm2x1, history["rdm"], p=2).item()
            history["rdm"]=rdm2x1
            history["log"].append(dist)
            if dist<ctm_args.ctm_conv_tol:
                log.info({"history_length": len(history['log']), "history": history['log']})
                return True, history
        return False, history
Exemplo n.º 9
0
def compute_w2(A_tensor, gate, env):
    """
    Be |phi> the purified PEPS associated with A tensor. 
    Compute the overlap <phi|G*G|phi>.

    Parameters
    ----------
    A_tensor : torch.tensor(4,4,4,4,4)
    
    gate : torch.tensor

    Returns
    -------
    w2 : float
    """
    rdm = rdm2x1_sl((A_tensor, A_tensor), env)
    rdm = rdm.view((*[2] * 8)).contiguous()
    # contract ancilla degrees of freedom
    rdm = torch.einsum('abcdafch', rdm)
    w2 = torch.einsum('cdij,ijkl->cdkl', gate, gate)
    w2 = torch.einsum('abcd, cdab', rdm, w2)
    return w2