Ejemplo n.º 1
0
def _dense_ham_term(H):
    h1, (h2L, h2R) = H
    D = h1.shape[0]
    dtype = h1.dtype

    E = tf.eye(D, dtype=dtype)

    h = tensornetwork.ncon([h1, E], [(-1, -3), (-2, -4)])
    for (hl, hr) in zip(h2L, h2R):
        h += tensornetwork.ncon([hl, hr], [(-1, -3), (-2, -4)])

    return h
Ejemplo n.º 2
0
def _full_ham_top(H):
    h1, (h2L, h2R) = H
    D = h1.shape[0]
    dtype = h1.dtype

    E = tf.eye(D, dtype=dtype)

    fullH = tensornetwork.ncon([h1, E], [(-1, -3), (-2, -4)])
    fullH += tensornetwork.ncon([E, h1], [(-1, -3), (-2, -4)])
    for (hl, hr) in zip(h2L, h2R):
        fullH += tensornetwork.ncon([hl, hr], [(-1, -3), (-2, -4)])
    for (hl, hr) in zip(h2R, h2L):
        fullH += tensornetwork.ncon([hl, hr], [(-1, -3), (-2, -4)])

    return tf.reshape(fullH, (D**2, D**2))
Ejemplo n.º 3
0
def opt_energy_env_1site(iso_012, h_op_1site, h_mpo_2site, state_1site):
    iso_021 = tf.transpose(iso_012, (0, 2, 1))
    terms_012, terms_021 = _ascend_op_to_1site_partial(h_op_1site, h_mpo_2site,
                                                       iso_012, iso_021)
    terms = terms_012 + tf.transpose(terms_021, (0, 2, 1))
    env = tensornetwork.ncon([state_1site, terms], [(0, -1), (0, -2, -3)])
    return env
Ejemplo n.º 4
0
def ascend_uniform_MPO_to_top(mpo_tensor_dense, isos_012):
    """MPO ordering:
          3
          |
       0--m--1
          |
          2
    """
    L = len(isos_012)
    for l in range(L):
        # NOTE: There is no attempt to be economical with transpose here!
        mpo_tensor_dense = tensornetwork.ncon([
            isos_012[l],
            tf.conj(isos_012[l]), mpo_tensor_dense, mpo_tensor_dense
        ], [(-4, 2, 0), (-3, 3, 4), (1, -2, 4, 0), (-1, 1, 3, 2)])
    op = tensornetwork.ncon([mpo_tensor_dense], [(0, 0, -1, -2)])
    return op
Ejemplo n.º 5
0
def descend_state_1site_R(state_1site, iso_012):  #χ^4
    """Descends a state from the top to the rightmost index of the isometry `iso`.
    Physically, if `iso` has 012 ordering, this is a descent to the right and
    if `iso` has 021 ordering, this is a descent to the left.
    """
    return tensornetwork.ncon(
        [iso_012, state_1site, tf.conj(iso_012)], [(1, 2, -1), (1, 0),
                                                   (0, 2, -2)])
Ejemplo n.º 6
0
def _mpo_with_state(iso_012, iso_021, h_mpo_2site, state_1site):
    # contract ascended hamiltonian at level `lup` with nearest 1-site descended state
    h2L, h2R = h_mpo_2site

    envL = [
        tensornetwork.ncon(
            [state_1site, iso_021, h, tf.conj(iso_012)],
            [(0, 2), (0, -1, 1), (3, 1), (2, 3, -2)])  # one transpose required
        for h in h2L
    ]

    envR = [
        tensornetwork.ncon(
            [state_1site, iso_012, h, tf.conj(iso_021)],
            [(0, 2), (0, -1, 1), (3, 1), (2, 3, -2)])  # one transpose required
        for h in h2R
    ]

    return envL, envR
Ejemplo n.º 7
0
def _ascend_op_2site_to_1site_partial(mpo_2site, iso_012, iso_021):
    op2L, op2R = mpo_2site

    M = len(op2L)  # MPO bond dimension

    terms = []
    for m in range(M):
        # permute result to 012 order: M mild transposes
        iso_op_mpo_L_012 = tensornetwork.ncon([iso_021, op2L[m]], [(-1, -3, 0), (-2, 0)])

        terms.append(_ascend_partial(op2R[m], iso_op_mpo_L_012))
    iso_op_2site_012 = sum(terms)

    return iso_op_2site_012
Ejemplo n.º 8
0
def expand_bonds(isos, new_Ds, new_top_rank=None):
    old_Ds = [iso.shape[1] for iso in isos] + [isos[-1].shape[0]]

    if new_top_rank is None:
        new_top_rank = old_Ds[-1]
    new_Ds = new_Ds + [new_top_rank]

    if new_Ds[0] != old_Ds[0]:
        raise ValueError("Bottom dimension expansion not supported!")

    isos_new = [iso for iso in isos]
    for i in range(len(isos)):
        # Absorb dimension-expanding isometries on indices as needed
        if old_Ds[i + 1] != new_Ds[i + 1]:
            v = random_isometry(
                old_Ds[i + 1], new_Ds[i + 1], dtype=isos_new[i].dtype)
            isos_new[i] = tensornetwork.ncon([v, isos_new[i]], [(0, -1), (0, -2, -3)])
            if i + 1 < len(isos):
                isos_new[i + 1] = tensornetwork.ncon(
                    [tf.conj(v), tf.conj(v), isos_new[i + 1]], [(0, -2),
                                                                (1, -3),
                                                                (-1, 0, 1)])
    return isos_new
Ejemplo n.º 9
0
    def _compute_env(lvl, reflect=False):
        # TODO: Could shorten this a bit by doing only left or right at one time
        h2 = h2s_above[lvl]
        if reflect:
            h2 = reflect_mpo_2site(h2)

        envL, envR = _mpo_with_state(*isos_wt_above[lvl], h2,
                                     states_1site_above[lvl])

        # descend envs back down to the level of the gap
        for lvl2 in reversed(range(lvl)):
            iso_012_l2, iso_021_l2 = isos_wt_above[lvl2]
            if reflect:
                envR = _descend_energy_env_L(envR, iso_021_l2)
                envL = _descend_energy_env_R(envL, iso_012_l2)
            else:
                envL = _descend_energy_env_L(envL, iso_021_l2)
                envR = _descend_energy_env_R(envR, iso_012_l2)

        if reflect:
            iso_h2_L, iso_h2_R = iso_h2R_012, iso_h2L_012
        else:
            iso_h2_L, iso_h2_R = iso_h2L_012, iso_h2R_012

        # contract with the hamiltonian + isometry opposite the gap
        envL = sum(
            tensornetwork.ncon([eL, ihR], [(0, -1), (0, -2, -3)])
            for eL, ihR in zip(envL, iso_h2_R))

        envR = sum(
            tensornetwork.ncon([eR, ihL], [(0, -1), (0, -2, -3)])
            for eR, ihL in zip(envR, iso_h2_L))

        # weight each term according to the number of occurrences
        # in the translation-invariant tree
        weight = 1 / 2.0**(lvl + 1)
        return (envL + envR) * weight, weight
Ejemplo n.º 10
0
def _energy_expval_env(isos_012, h_op_1site, h_mpo_2site, states_1site_above):
    if len(isos_012) == 1:  # top of tree
        h_mpo_2site = add_mpos_2site(h_mpo_2site,
                                     reflect_mpo_2site(h_mpo_2site))
        env = opt_energy_env_1site(isos_012[0], h_op_1site, h_mpo_2site,
                                   states_1site_above[0])
    else:
        env1 = opt_energy_env_1site(isos_012[0], h_op_1site, h_mpo_2site,
                                    states_1site_above[0])
        env2 = opt_energy_env_2site(isos_012, h_mpo_2site,
                                    states_1site_above[1:])
        env = env1 + env2 / 2
        # NOTE: There are *two* environments for each Ham. term spanning two
        #       isometries. To get the correct energy we must divide env2 by 2.
    nsites = 2**(len(isos_012) - 1)
    return tensornetwork.ncon([tf.conj(isos_012[0]), env], [(0, 1, 2),
                                                   (0, 1, 2)]) * nsites
Ejemplo n.º 11
0
def shift_ham(H, shift="auto"):
    h1, (h2L, h2R) = H
    D = h1.shape[0]
    dtype = h1.dtype

    if shift == "auto":
        e1 = tf.reduce_max(tf.cast(tf.linalg.eigvalsh(h1), dtype.real_dtype))

        h2 = sum([
            tensornetwork.ncon([hl, hr], [(-1, -3), (-2, -4)])
            for (hl, hr) in zip(h2L, h2R)
        ])
        h2 = tf.reshape(h2, (D**2, D**2))
        e2 = tf.reduce_max(tf.cast(tf.linalg.eigvalsh(h2), dtype.real_dtype))

        shift = tf.cast(e1 + e2, dtype)

    if shift != 0.0:
        H = (h1 - shift * tf.eye(D, dtype=dtype), (h2L, h2R))

    return H, shift
Ejemplo n.º 12
0
def opt_energy_env(isos_012,
                   h_op_1site,
                   h_mpo_2site,
                   states_1site_above,
                   envsq_dtype=None):
    if len(isos_012) == 1:  # top of tree
        h_mpo_2site = add_mpos_2site(h_mpo_2site,
                                     reflect_mpo_2site(h_mpo_2site))
        env = opt_energy_env_1site(isos_012[0], h_op_1site, h_mpo_2site,
                                   states_1site_above[0])
    else:
        env1 = opt_energy_env_1site(isos_012[0], h_op_1site, h_mpo_2site,
                                    states_1site_above[0])
        env2 = opt_energy_env_2site(isos_012, h_mpo_2site,
                                    states_1site_above[1:])
        env = env1 + env2

    if envsq_dtype is not None:
        env = tf.cast(env, envsq_dtype)

    env_sq = tensornetwork.ncon([env, tf.conj(env)], [(-1, 0, 1), (-2, 0, 1)])
    return env, env_sq
Ejemplo n.º 13
0
def _ascend_partial(op, iso):
    """Ascend an operator through the right index of an isometry.
    For 012 (021) index ordering, this is equivalent to ascending from the
    physical right (left).
    Cost: D^4."""
    return tensornetwork.ncon([iso, op], [(-1, -2, 0), (-3, 0)])
Ejemplo n.º 14
0
def _iso_from_svd(u, vh):
    return tensornetwork.ncon([u, vh], [(-1, 0), (0, -2, -3)])
Ejemplo n.º 15
0
 def RY(self, target, theta):
     l1 = self.select_op_1(target)
     l2 = self.select_target_1(target)
     self.psi = tn.ncon([ry(theta), self.psi], [l1, l2])
Ejemplo n.º 16
0
    def CRY(self, control, target, theta):
        l1 = self.select_op_2(control, target)
        l2 = self.select_target_2(control, target)

        self.psi = tn.ncon([cry(theta), self.psi], [l1, l2])
Ejemplo n.º 17
0
expectX = np.trace(rho[0].reshape(chi_b**3, chi_b**3) @
                   np.kron(np.eye(32), sX))
# energy_exact = (-2 / np.sin(np.pi / (2 * n_sites))) / n_sites  # PBC (Ising)
energy_exact = (-4 / np.sin(np.pi / n_sites)) / n_sites  # PBC (XX model)
energy_error = energy_per_site - energy_exact
bias0 = 2 * energy_per_site
ham[0] = ham[z] - (bias0 * np.eye(chi_b**3)).reshape([chi_b] * 6)

print('Iteration: %d of %d, Energy: %f, Energy Error: %e, XMag: %e\n'
      % (0, n_iterations, energy_per_site, energy_error, expectX))

# do optimization iterations
en_keep = [0] * n_iterations
for p in range(n_iterations):
  for z in range(n_levels):
    bias = tn.ncon([ham[z], rho[z]], [[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]])
    # print('bias: %f \n' % (bias))
    chi_temp = ham[z].shape[0]
    ham_temp = ham[z] - (((bias + bias_shift) *
                          np.eye(chi_temp**3)).reshape([chi_temp] * 6))

    if right_on:
      # RightLink
      tensors = [w[z], rho[z + 1], w[z].conj()]
      connects = [[4, -3, 1], [3, 2, -2, 3, 1, -4], [4, -1, 2]]
      con_order = [3, 2, 4, 1]
      rhotemp = tn.ncon(tensors, connects, con_order)
      _, proj = trunct_eigh(rhotemp, chi_m)
      gam1, gam2 = RightLink(ham_temp, u[z], w[z], rho[z + 1], proj, chi_m)

      if (u[z].shape[3] < u[z].shape[1]):
Ejemplo n.º 18
0
 def update_parameters(self, new_parameters):
     self.params = tn.Node(new_parameters)
     self.params_x = tn.ncon([self.params, self.Domain_tensor],
                             [[-1, 1], [-2, 1, -3]])
Ejemplo n.º 19
0
    def CU3(self, control, target, theta3):
        l1 = self.select_op_2(control, target)
        l2 = self.select_target_2(control, target)

        self.state = tn.ncon([cu3(theta3), self.state], [l1, l2])
Ejemplo n.º 20
0
    def CRZ(self, control, target, theta):
        l1 = self.select_op_2(control, target)
        l2 = self.select_target_2(control, target)

        self.state = tn.ncon([crz(theta), self.state], [l1, l2])
Ejemplo n.º 21
0
 def CZ(self, control, target):
     l1 = self.select_op_2(control, target)
     l2 = self.select_target_2(control, target)
     self.state = tn.ncon([cz, self.state], [l1, l2])
Ejemplo n.º 22
0
 def U3(self, target, theta3):
     l1 = self.select_op_1(target)
     l2 = self.select_target_1(target)
     self.state = tn.ncon([u3(theta3), self.state], [l1, l2])
Ejemplo n.º 23
0
 def RZ(self, target, theta):
     l1 = self.select_op_1(target)
     l2 = self.select_target_1(target)
     self.state = tn.ncon([rz(theta), self.state], [l1, l2])
Ejemplo n.º 24
0
 def Z(self, target):
     l1 = self.select_op_1(target)
     l2 = self.select_target_1(target)
     self.state = tn.ncon([z, self.state], [l1, l2])
Ejemplo n.º 25
0
E, V = eigh(ham_final, link_charges=Z2Charge([0, 1]), which='SA')
en_even1 = E.todense()[0].item()
en_odd1 = E.todense()[1].item()

# compare with analytic energies
assert np.allclose(en_even0, en_even1)
assert np.allclose(en_odd0, en_odd1)
"""
Example 2: compute truncated eigendecomposition of a reduced density matrix,
keeping only the eigenvalues above some cut-off threshold
"""

rho_temp = BT.fromdense([ind_chib1] + [ind_chib0],
                        np.array([[1, 0], [0, 0]], dtype=float))
V = V.reshape([2**(n_sites // 2), 2**(n_sites // 2), 2])
rho_half = tn.ncon([V, rho_temp, V.conj()], [[-1, 1, 2], [2, 3], [-2, 1, 3]])

# decomp with evalues sorted by magnitude
E2, V2 = eigh(rho_half,
              which='LM',
              full_sort=False,
              threshold=1e-10,
              max_kept=15)
rho_recover = V2 @ BLA.diag(E2) @ V2.T.conj()
assert np.allclose(rho_half.todense(), rho_recover.todense())

# decomp with evalues sorted by magnitude within each charge block
E2, V2 = eigh(rho_half, which='LM', threshold=1e-10, full_sort=False)
rho_recover = V2 @ BLA.diag(E2) @ V2.T.conj()
assert np.allclose(rho_half.todense(), rho_recover.todense())
Ejemplo n.º 26
0
 def CY(self, control, target):
     l1 = self.select_op_2(control, target)
     l2 = self.select_target_2(control, target)
     self.psi = tn.ncon([cy, self.psi], [l1, l2])
Ejemplo n.º 27
0
    def _simulate(self, initialstate, reortho=False, verbose=False):
        """
        do a lanczos simulation

        Parameters:
        -------------------------
        initialstate: tf.Tensor,
                      the initial state
        reortho:      bool
                      if True, krylov vectors are reorthogonalized at each step (costly)
                      the current implementation is not optimal: there are better ways to do this
        verbose:      bool
        verbosity flag
        """
        self.delta = tf.cast(self.delta, initialstate.dtype)
        self.deltaEta = tf.cast(self.deltaEta, initialstate.dtype)

        dtype = self.matvec(initialstate).dtype
        #initialization:
        xn = copy.deepcopy(initialstate)
        xn /= tf.sqrt(
            tn.ncon([tf.conj(xn), xn],
                    [range(len(xn.shape)),
                     range(len(xn.shape))]))

        xn_minus_1 = tf.zeros(initialstate.shape, dtype=dtype)
        converged = False
        it = 0
        kn = []
        epsn = []
        self.vecs = []
        first = True
        while converged == False:
            knval = tf.sqrt(
                tn.ncon([tf.conj(xn), xn],
                        [range(len(xn.shape)),
                         range(len(xn.shape))]))
            if tf.cond(tf.less(tf.abs(knval), tf.abs(self.delta)),
                       lambda: True, lambda: False):
                break
            kn.append(knval)
            xn = xn / kn[-1]
            #store the Lanczos vector for later

            if reortho == True:
                for v in self.vecs:
                    xn -= tn.ncon([tf.conj(v), xn],
                                  [range(len(v.shape)),
                                   range(len(xn.shape))]) * v
            self.vecs.append(xn)
            Hxn = self.matvec(xn)
            epsn.append(
                tn.ncon([tf.conj(xn), Hxn],
                        [range(len(xn.shape)),
                         range(len(Hxn.shape))]))
            if ((it % self.Ndiag) == 0) & (len(epsn) >= 1):
                #diagonalize the effective Hamiltonian

                Heff = tf.convert_to_tensor(np.diag(epsn) +
                                            np.diag(kn[1:], 1) +
                                            np.diag(tf.conj(kn[1:]), -1),
                                            dtype=dtype)
                eta, u = tf.linalg.eigh(Heff)  #could use a tridiag solver
                if first == False:
                    if tf.abs(tf.linalg.norm(eta - etaold)) < tf.abs(
                            self.deltaEta):
                        converged = True
                first = False
                etaold = eta[0]
            if it > 0:
                Hxn -= (self.vecs[-1] * epsn[-1])
                Hxn -= (self.vecs[-2] * kn[-1])
            else:
                Hxn -= (self.vecs[-1] * epsn[-1])
            xn = Hxn
            it = it + 1
            if it > self.ncv:
                break
        self.Heff = tf.convert_to_tensor(np.diag(epsn) + np.diag(kn[1:], 1) +
                                         np.diag(np.conj(kn[1:]), -1),
                                         dtype=dtype)
        eta, u = tf.linalg.eigh(self.Heff)  #could use tridiag
        states = []
        for n2 in range(min(1, eta.shape[0])):
            state = tf.zeros(initialstate.shape, dtype=initialstate.dtype)
            for n1 in range(len(self.vecs)):
                state += self.vecs[n1] * u[n1, n2]
            states.append(state / tf.sqrt(
                tn.ncon([tf.conj(state), state],
                        [range(len(state.shape)),
                         range(len(state.shape))])))
        return eta[0], states[0], converged
Ejemplo n.º 28
0
def opt_energy_env_2site(isos_012, h_mpo_2site, states_1site_above):
    isos_wt = isos_with_transposes(isos_012)
    iso_012, iso_021 = isos_wt[0]
    isos_wt_above = isos_wt[1:]
    levels_above = len(isos_wt_above)

    # Ascend two-site Hamiltonian terms through to the bottom of the final isometry
    h2s_above = _ascend_op_2site_to_2site_many(h_mpo_2site, isos_wt)

    # hamiltonian with isometry opposite the gap
    h2L, h2R = h_mpo_2site
    iso_h2R_012 = [
        tensornetwork.ncon([iso_021, h], [(-1, -3, 0), (-2, 0)]) for h in h2R
    ]  # transpose to 012
    iso_h2L_012 = [tensornetwork.ncon([iso_012, h], [(-1, -2, 0), (-3, 0)]) for h in h2L]

    def _compute_env(lvl, reflect=False):
        # TODO: Could shorten this a bit by doing only left or right at one time
        h2 = h2s_above[lvl]
        if reflect:
            h2 = reflect_mpo_2site(h2)

        envL, envR = _mpo_with_state(*isos_wt_above[lvl], h2,
                                     states_1site_above[lvl])

        # descend envs back down to the level of the gap
        for lvl2 in reversed(range(lvl)):
            iso_012_l2, iso_021_l2 = isos_wt_above[lvl2]
            if reflect:
                envR = _descend_energy_env_L(envR, iso_021_l2)
                envL = _descend_energy_env_R(envL, iso_012_l2)
            else:
                envL = _descend_energy_env_L(envL, iso_021_l2)
                envR = _descend_energy_env_R(envR, iso_012_l2)

        if reflect:
            iso_h2_L, iso_h2_R = iso_h2R_012, iso_h2L_012
        else:
            iso_h2_L, iso_h2_R = iso_h2L_012, iso_h2R_012

        # contract with the hamiltonian + isometry opposite the gap
        envL = sum(
            tensornetwork.ncon([eL, ihR], [(0, -1), (0, -2, -3)])
            for eL, ihR in zip(envL, iso_h2_R))

        envR = sum(
            tensornetwork.ncon([eR, ihL], [(0, -1), (0, -2, -3)])
            for eR, ihL in zip(envR, iso_h2_L))

        # weight each term according to the number of occurrences
        # in the translation-invariant tree
        weight = 1 / 2.0**(lvl + 1)
        return (envL + envR) * weight, weight

    weightsum = 0.0
    env_total = []
    for lvl in range(levels_above):
        env, weight = _compute_env(lvl)
        weightsum += weight
        env_total.append(env)

    # Now compute the boundary term
    env, weight = _compute_env(levels_above - 1, reflect=True)
    weightsum += weight
    env_total.append(env)

    env_total = sum(env_total)

    assert weightsum == 1.0

    return env_total
def optimize_mod_binary_mera(hamAB_0,
                             hamBA_0,
                             rhoAB_0,
                             rhoBA_0,
                             wC,
                             vC,
                             uC,
                             numiter=1000,
                             refsym=True,
                             nsteps_steady_state=8,
                             verbose=0,
                             opt_u=True,
                             opt_vw=True,
                             numpy_update=True,
                             opt_all_layers=False,
                             opt_u_after=9):
    """
    optimization of a scale invariant modified binary MERA tensor network
    Args:
        hamAB_0 (tf.Tensor): bottom-layer Hamiltonians in AB lattices
        hamBA_0 (tf.Tensor): bottom-layer Hamiltonians in BA lattices
        rhoAB_0 (tf.Tensor):  reduced densit matrix on a-b lattice
        rhoBA_0 (tf.Tensor):  reduced densit matrix on b-a lattice
        wC (list of tf.Tensor): isometries of the MERA, with 
        vC (list of tf.Tensor): isometries of the MERA, with 
        uC (list of tf.Tensor): disentanglers of the MERA
        numiter (int):  number of iteration steps 
        refsym (bool):  if `True`, impose reflection symmetry 
        nsteps_steady_state (int): number of power-method iteration steps for calculating the 
                                   steady state density matrices 
        verbose (int): verbosity flag
        opt_u (bool):  if `False`, skip disentangler optimization 
        opt_vw (bool):  if `False`, skip isometry optimization 
        numpy_update (bool): if `True`, use numpy svd to calculate updates
        opt_all_layers (bool): if `True`, optimize all layers
                               if `False`, optimize only truncating layers
        opt_u_after (int): start optimizing disentangler only after `opt_u_after` initial optimization steps

    Returns: 
        wC (list of tf.Tensor): optimized isometries
        vC (list of tf.Tensor): optimized isometries
        uC (list of tf.Tensor): optimized disentanglers
        rhoAB (tf.Tensor):      steady state density matrices on the A-B lattice at the top layer 
        rhoBA (tf.Tensor):      steady state density matrices on the B-A lattice at the top layer 
        run_times (list of float): run times per iteration step 
        Energies (list of float): energies per iteration step 
    """
    dtype = rhoAB_0.dtype

    hamAB = [0 for x in range(len(vC) + 1)]
    hamBA = [0 for x in range(len(vC) + 1)]
    rhoAB = [0 for x in range(len(vC) + 1)]
    rhoBA = [0 for x in range(len(vC) + 1)]

    hamAB[0] = hamAB_0
    hamBA[0] = hamBA_0

    chi1 = hamAB[0].shape[0]

    bias = tf.math.reduce_max(
        tf.linalg.eigvalsh(tf.reshape(hamAB[0], (chi1 * chi1, chi1 * chi1))))
    hamAB[0] = hamAB[0] - bias * tf.reshape(tf.eye(chi1 * chi1, dtype=dtype),
                                            (chi1, chi1, chi1, chi1))
    hamBA[0] = hamBA[0] - bias * tf.reshape(tf.eye(chi1 * chi1, dtype=dtype),
                                            (chi1, chi1, chi1, chi1))

    skip_layer = [misc_mera.skip_layer(w) for w in wC]
    for p in range(len(wC)):
        if skip_layer[p]:
            hamAB[p + 1], hamBA[p + 1] = ascending_super_operator(
                hamAB[p], hamBA[p], wC[p], vC[p], uC[p], refsym)

    Energies = []
    run_times = []
    for k in range(numiter):
        t1 = time.time()
        rhoAB_0, rhoBA_0 = steady_state_density_matrices(
            nsteps_steady_state, rhoAB_0, rhoBA_0, wC[-1], vC[-1], uC[-1],
            refsym)
        rhoAB[-1] = rhoAB_0
        rhoBA[-1] = rhoBA_0
        for p in range(len(rhoAB) - 2, -1, -1):
            rhoAB[p], rhoBA[p] = descending_super_operator(
                rhoAB[p + 1], rhoBA[p + 1], wC[p], vC[p], uC[p], refsym)

        if verbose > 0:
            if np.mod(k, 10) == 1:
                Energies.append(
                    (tn.ncon([rhoAB[0], hamAB[0]], [[1, 2, 3, 4], [1, 2, 3, 4]]
                             ) + tn.ncon([rhoBA[0], hamBA[0]],
                                         [[1, 2, 3, 4], [1, 2, 3, 4]])) / 4 +
                    bias / 2)
                stdout.write(
                    '\rIteration: %i of %i: E = %.8f, err = %.16f at D = %i with %i layers'
                    % (int(k), int(numiter), float(Energies[-1]),
                       float(Energies[-1] + 4 / np.pi, ), int(
                           wC[-1].shape[2]), len(wC)))
                stdout.flush()

        for p in range(len(wC)):
            if (not opt_all_layers) and skip_layer[p]:
                continue

            if k >= opt_u_after:
                uEnv = get_env_disentangler(hamAB[p], hamBA[p], rhoBA[p + 1],
                                            wC[p], vC[p], uC[p], refsym)
                if opt_u:
                    if refsym:
                        uEnv = uEnv + tf.transpose(uEnv, (1, 0, 3, 2))
                    if numpy_update:
                        uC[p] = misc_mera.u_update_svd_numpy(uEnv)
                    else:
                        uC[p] = misc_mera.u_update_svd(uEnv)

            wEnv = get_env_w_isometry(hamAB[p], hamBA[p], rhoBA[p + 1],
                                      rhoAB[p + 1], wC[p], vC[p], uC[p])
            if opt_vw:
                if numpy_update:
                    wC[p] = misc_mera.w_update_svd_numpy(wEnv)
                else:
                    wC[p] = misc_mera.w_update_svd(wEnv)
                if refsym:
                    vC[p] = wC[p]
                else:
                    vEnv = get_env_v_isometry(hamAB[p], hamBA[p], rhoBA[p + 1],
                                              rhoAB[p + 1], wC[p], vC[p],
                                              uC[p])
                    vC[p] = misc_mera.w_update_svd(vEnv)

            hamAB[p + 1], hamBA[p + 1] = ascending_super_operator(
                hamAB[p], hamBA[p], wC[p], vC[p], uC[p], refsym)

        run_times.append(time.time() - t1)
        if verbose > 2:
            print('time per iteration: ', run_times[-1])

    return wC, vC, uC, rhoAB[-1], rhoBA[-1], run_times, Energies
Ejemplo n.º 30
0
 def Y(self, target):
     l1 = self.select_op_1(target)
     l2 = self.select_target_1(target)
     self.psi = tn.ncon([y, self.psi], [l1, l2])
Ejemplo n.º 31
0
# np.save('XXData_temp12.npy', (u, w, v, rho, ham))

# warm-up sweep
ham = [0] * (n_levels + 1)
ham[0] = ham_init.copy()
for z in range(n_levels):
    ham[z + 1] = 2 * LiftHam(ham[z], u[z], w[z])

# diagonalize top level Hamiltonian, find GS within the charge=0 sector
ham_top = (ham[n_levels] + ham[n_levels].transpose([1, 2, 0, 4, 5, 3]) +
           ham[n_levels].transpose([2, 0, 1, 5, 3, 4]))
_, v = eigh(ham_top, link_charges=U1Charge([0]))

# lower the density matrix, compute spectrum of 1-site density
rho = [0] * (n_levels + 1)
rho[n_levels] = tn.ncon([v.conj(), v], [[-1, -2, -3, 1], [-4, -5, -6, 1]])
spect_chi = [0] * (n_levels + 1)
sp_temp, _ = eigh(tn.ncon([rho[n_levels]], [[-1, 1, 2, -2, 1, 2]]),
                  full_sort=True)
spect_chi[n_levels] = sp_temp.todense()
for z in reversed(range(n_levels)):
    rho[z] = LowerDensity(u[z], w[z], rho[z + 1])
    sp_temp, _ = eigh(tn.ncon([rho[z]], [[-1, 1, 2, -2, 1, 2]]),
                      full_sort=True)
    spect_chi[z] = sp_temp.todense()

# en_bias = tn.ncon([ham[0], rho[0]], [[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]]).item()
# chi_temp = ham[0].shape[0]
# eye_temp = ((en_bias) * np.eye(chi_temp**3).reshape([chi_temp] * 6))
# ham[0] = ham[0] - BT.fromdense(ham[0].sparse_shape, eye_temp)
# for z in range(n_levels):
Ejemplo n.º 32
0
def wegner_ncon(f, G, holes, particles, occA, occB, occC, occD):

    # - Decouple off-diagonal 1B and 2B pieces
    fod = np.zeros(f.shape)
    fod[np.ix_(particles, holes)] += f[np.ix_(particles, holes)]
    fod[np.ix_(holes, particles)] += f[np.ix_(holes, particles)]
    fd = f - fod

    God = np.zeros(G.shape)
    God[np.ix_(particles, particles, holes,
               holes)] += G[np.ix_(particles, particles, holes, holes)]
    God[np.ix_(holes, holes, particles,
               particles)] += G[np.ix_(holes, holes, particles, particles)]
    Gd = G - God

    # - Calculate 1B generator
    # first term
    sum1_1b_1 = ncon([fd, fod], [(-1, 0), (0, -2)]).numpy()
    sum1_1b_2 = np.transpose(sum1_1b_1)
    sum1_1b = sum1_1b_1 - sum1_1b_2

    # second term
    sum2_1b_1 = ncon([fd, God], [(0, 1), (1, -1, 0, -2)]).numpy()
    sum2_1b_2 = ncon([fod, Gd], [(0, 1), (1, -1, 0, -2)]).numpy()
    sum2_1b_3 = sum2_1b_1 - sum2_1b_2
    sum2_1b = ncon([occA, sum2_1b_3], [(-1, -2, 0, 1), (0, 1)]).numpy()

    # third term
    sum3_1b_1 = ncon([occC, God], [(-1, -2, -3, 0, 1, 2),
                                   (0, 1, 2, -4)]).numpy()
    sum3_1b_2 = ncon([Gd, sum3_1b_1], [(2, -1, 0, 1), (0, 1, 2, -2)]).numpy()
    sum3_1b_3 = np.transpose(sum3_1b_2)
    sum3_1b = sum3_1b_2 - sum3_1b_3

    eta1B = sum1_1b + sum2_1b + 0.5 * sum3_1b

    # - Calculate 2B generator
    # first term (P_ij piece)
    sum1_2b_1 = ncon([fd, God], [(-1, 0), (0, -2, -3, -4)]).numpy()
    sum1_2b_2 = ncon([fod, Gd], [(-1, 0), (0, -2, -3, -4)]).numpy()
    sum1_2b_3 = sum1_2b_1 - sum1_2b_2
    sum1_2b_4 = np.transpose(sum1_2b_3, [1, 0, 2, 3])
    sum1_2b_5 = sum1_2b_3 - sum1_2b_4

    # first term (P_kl piece)
    sum1_2b_6 = ncon([fd, God], [(0, -3), (-1, -2, 0, -4)]).numpy()
    sum1_2b_7 = ncon([fod, Gd], [(0, -3), (-1, -2, 0, -4)]).numpy()
    sum1_2b_8 = sum1_2b_6 - sum1_2b_7
    sum1_2b_9 = np.transpose(sum1_2b_8, [0, 1, 3, 2])
    sum1_2b_10 = sum1_2b_8 - sum1_2b_9

    sum1_2b = sum1_2b_5 - sum1_2b_10

    # second term
    sum2_2b_1 = ncon([occB, God], [(-1, -2, 0, 1), (0, 1, -3, -4)]).numpy()
    sum2_2b_2 = ncon([occB, Gd], [(-1, -2, 0, 1), (0, 1, -3, -4)]).numpy()
    sum2_2b_3 = ncon([Gd, sum2_2b_1], [(-1, -2, 0, 1), (0, 1, -3, -4)]).numpy()
    sum2_2b_4 = ncon([God, sum2_2b_2], [(-1, -2, 0, 1),
                                        (0, 1, -3, -4)]).numpy()
    sum2_2b = sum2_2b_3 - sum2_2b_4

    # third term
    sum3_2b_1 = ncon([Gd, God], [(0, -1, 1, -3), (1, -2, 0, -4)]).numpy()
    sum3_2b_2 = np.transpose(sum3_2b_1, [1, 0, 2, 3])
    sum3_2b_3 = np.transpose(sum3_2b_1, [0, 1, 3, 2])
    sum3_2b_4 = np.transpose(sum3_2b_1, [1, 0, 3, 2])
    sum3_2b_5 = sum3_2b_1 - sum3_2b_2 - sum3_2b_3 + sum3_2b_4
    sum3_2b = ncon([occA, sum3_2b_5], [(0, 1, -1, -2), (0, 1, -3, -4)]).numpy()

    eta2B = sum1_2b + 0.5 * sum2_2b + sum3_2b

    return (eta1B, eta2B)
Ejemplo n.º 33
0
 def _ascend(op, iso, iso_conj):
     return tensornetwork.ncon([iso_conj, op, iso], [(-1, 3, 1), (1, 2),
                                                     (-2, 3, 2)])
Ejemplo n.º 34
0
def _iso_from_uinv(env, env_uinv):
    return tensornetwork.ncon([env_uinv, env], [(-1, 0), (0, -2, -3)])
Ejemplo n.º 35
0
def _iso_from_svd(u, vh):
    return tensornetwork.ncon([u, vh], [(-1, 1), (1, -2, -3)])
Ejemplo n.º 36
0
def _complete_partial_ascend(iso_op, iso):
    """Complete a partial operator ascension performed by `_ascend_partial()`.
    This contracts with the conjugated isometry.
    Cost: D^4."""
    return tensornetwork.ncon([tf.conj(iso), iso_op], [(-1, 0, 1), (-2, 0, 1)])
Ejemplo n.º 37
0
def _ascend_partial(op, iso):
    """Ascend an operator through the right index of an isometry.
    For 012 (021) index ordering, this is equivalent to ascending from the
    physical right (left).
    Cost: D^4."""
    return tensornetwork.ncon([iso, op], [(-1, -2, 1), (-3, 1)])
Ejemplo n.º 38
0
def check_iso(iso):
    sq = tensornetwork.ncon([iso, tf.conj(iso)], [(-1, 0, 1), (-2, 0, 1)])
    return tf.norm(sq - tf.eye(sq.shape[0], dtype=sq.dtype))
Ejemplo n.º 39
0
def outermat(A, B):
    chi = A.shape[0]
    contract = [A, B]
    idxs = [[-2, -1], [-3, -4]]
    return tn.ncon(contract, idxs).reshape((chi**2, chi**2))
Ejemplo n.º 40
0
def single_mpo_heff_np(mpo, L, R, A):
    tensors = [L, A, mpo, R]
    idxs = [[2, -1, 1], [1, 3, 4], [2, 5, -2, 3], [5, -3, 4]]
    newA = tensornetwork.ncon(tensors, idxs)
    return newA
Ejemplo n.º 41
0
def flow(f, G, eta1B, eta2B, holes, particles, occA, occB, occC, occD):

    # - Calculate dE/ds
    # first term
    sum1_0b_1 = ncon([occA, eta1B], [(0, 1, -1, -2), (0, 1)]).numpy()
    sum1_0b = ncon([sum1_0b_1, f], [(0, 1), (1, 0)]).numpy()

    # second term
    sum2_0b_1 = np.matmul(eta2B, occD)
    sum2_0b = ncon([sum2_0b_1, G], [(0, 1, 2, 3), (2, 3, 0, 1)]).numpy()

    dE = sum1_0b + 0.5 * sum2_0b

    # - Calculate df/ds
    # first term
    sum1_1b_1 = ncon([eta1B, f], [(-1, 0), (0, -2)]).numpy()
    sum1_1b_2 = np.transpose(sum1_1b_1)
    sum1_1b = sum1_1b_1 + sum1_1b_2

    # second term (might need to fix)
    sum2_1b_1 = ncon([eta1B, G], [(0, 1), (1, -1, 0, -2)]).numpy()
    sum2_1b_2 = ncon([f, eta2B], [(0, 1), (1, -1, 0, -2)]).numpy()
    sum2_1b_3 = sum2_1b_1 - sum2_1b_2
    sum2_1b = ncon([occA, sum2_1b_3], [(-1, -2, 0, 1), (0, 1)]).numpy()

    # third term
    sum3_1b_1 = ncon([occC, G], [(-1, -2, -3, 0, 1, 2), (0, 1, 2, -4)]).numpy()
    sum3_1b_2 = ncon([eta2B, sum3_1b_1], [(2, -1, 0, 1),
                                          (0, 1, 2, -2)]).numpy()
    sum3_1b_3 = np.transpose(sum3_1b_2)
    sum3_1b = sum3_1b_2 + sum3_1b_3

    df = sum1_1b + sum2_1b + 0.5 * sum3_1b

    # - Calculate dG/ds
    # first term (P_ij piece)
    sum1_2b_1 = ncon([eta1B, G], [(-1, 0), (0, -2, -3, -4)]).numpy()
    sum1_2b_2 = ncon([f, eta2B], [(-1, 0), (0, -2, -3, -4)]).numpy()
    sum1_2b_3 = sum1_2b_1 - sum1_2b_2
    sum1_2b_4 = np.transpose(sum1_2b_3, [1, 0, 2, 3])
    sum1_2b_5 = sum1_2b_3 - sum1_2b_4

    # first term (P_kl piece)
    sum1_2b_6 = ncon([eta1B, G], [(0, -3), (-1, -2, 0, -4)]).numpy()
    sum1_2b_7 = ncon([f, eta2B], [(0, -3), (-1, -2, 0, -4)]).numpy()
    sum1_2b_8 = sum1_2b_6 - sum1_2b_7
    sum1_2b_9 = np.transpose(sum1_2b_8, [0, 1, 3, 2])
    sum1_2b_10 = sum1_2b_8 - sum1_2b_9

    sum1_2b = sum1_2b_5 - sum1_2b_10

    # second term
    sum2_2b_1 = ncon([occB, G], [(-1, -2, 0, 1), (0, 1, -3, -4)]).numpy()
    sum2_2b_2 = ncon([occB, eta2B], [(-1, -2, 0, 1), (0, 1, -3, -4)]).numpy()
    sum2_2b_3 = ncon([eta2B, sum2_2b_1], [(-1, -2, 0, 1),
                                          (0, 1, -3, -4)]).numpy()
    sum2_2b_4 = ncon([G, sum2_2b_2], [(-1, -2, 0, 1), (0, 1, -3, -4)]).numpy()
    sum2_2b = sum2_2b_3 - sum2_2b_4

    # third term
    sum3_2b_1 = ncon([eta2B, G], [(0, -1, 1, -3), (1, -2, 0, -4)]).numpy()
    sum3_2b_2 = np.transpose(sum3_2b_1, [1, 0, 2, 3])
    sum3_2b_3 = np.transpose(sum3_2b_1, [0, 1, 3, 2])
    sum3_2b_4 = np.transpose(sum3_2b_1, [1, 0, 3, 2])
    sum3_2b_5 = sum3_2b_1 - sum3_2b_2 - sum3_2b_3 + sum3_2b_4
    sum3_2b = ncon([occA, sum3_2b_5], [(0, 1, -1, -2), (0, 1, -3, -4)]).numpy()

    dG = sum1_2b + 0.5 * sum2_2b + sum3_2b

    return (dE, df, dG)
Ejemplo n.º 42
0
def single_mpo_heff2(mpo, L, R, A):
    tensors = [L, A, mpo, R]
    idxs = [[3, -1, 1], [1, 5, 2], [3, 4, -2, 5], [4, -3, 2]]
    newA = tensornetwork.ncon(tensors, idxs, backend="jax")
    return newA
Ejemplo n.º 43
0
def CenterLink(ham, u, w, u1, w1, rho2, chi, thres_in):

    # find projection onto reduced subspace
    tensors = [w1, w1, rho2, w1.conj(), w1.conj()]
    connects = [[-4, 6, 2], [7, -3, 1], [3, 4, 5, 1, 2, 5], [-2, 6, 4],
                [7, -1, 3]]
    cont_order = [5, 7, 6, 2, 4, 3, 1]
    rhotemp = tn.ncon(tensors, connects, cont_order)

    # _, proj = eigh(rhotemp.conj(), which='LM', max_kept=chi_m,
    #                 full_sort=False)
    dmid, projtemp = eigh(rhotemp.conj(),
                          which='LM',
                          threshold=0.01 * thres_in,
                          full_sort=True)
    proj = projtemp.conj()
    # print(thres_z1)
    # print(proj.shape[2])
    # print(dmid.todense())

    # evaluate centered environments and update tensors
    # (chi^4)*(chi_p^5)
    tensors = [ham, u, u, u.conj(), u.conj(), w.conj()]
    connects = [[5, 6, 7, 2, 3, 4], [1, 2, -4, -5], [3, 4, -6, -7],
                [1, 5, -1, 8], [6, 7, 9, -3], [8, 9, -2]]
    con_order = [8, 3, 4, 6, 7, 5, 9, 2, 1]
    temp1 = tn.ncon(tensors, connects, con_order)

    # (chi^4)*(chi_p^5)
    tensors = [ham, u, u, u.conj(), u.conj(), w.conj()]
    connects = [[4, 5, 6, 1, 2, 3], [1, 2, -4, -5], [3, 9, -6, -7],
                [4, 5, -1, 7], [6, 9, 8, -3], [7, 8, -2]]
    con_order = [4, 5, 1, 2, 8, 6, 7, 3, 9]
    ham7 = temp1 + tn.ncon(tensors, connects, con_order)

    # # (chi^4)*(chi_p^2)
    # tensors = [w1,w1,rho2,w1.conj(),w1.conj()]
    # connects = [[-4,6,2],[7,-3,1],[3,4,5,1,2,5],[-2,6,4],[7,-1,3]]
    # cont_order = [5,7,6,2,4,3,1]
    # rhotemp = tn.ncon(tensors,connects,cont_order)
    # _, proj = trunct_eigh(rhotemp, chi_m)

    # Network 1 - leading cost: (chi^2)*(chi_p^6)*(chi_c^1)
    tensors = [
        w,
        w.conj(),
        w.conj(), u1,
        u1.conj(),
        u1.conj(), w1, w1, w1,
        w1.conj(),
        w1.conj(),
        w1.conj(), rho2, ham7, proj
    ]
    connects = [[18, 1, 2], [-1, 19, 4], [20, 1, 3], [2, 25, 5, 6],
                [4, 21, 7, 8], [3, 25, 9, 22], [24, 5, 13], [11, 23, 12],
                [6, 10, 14], [8, 9, 16], [11, 7, 15], [22, 10, 17],
                [15, 16, 17, 12, 13, 14], [19, 21, 20, -2, -3, -4, 18],
                [23, 24, -5]]
    cont_order = [
        9, 11, 1, 12, 15, 24, 10, 17, 14, 13, 23, 5, 6, 25, 16, 22, 2, 3, 7, 8,
        18, 20, 21, 4, 19
    ]
    temp1 = tn.ncon(tensors, connects, cont_order)

    # Network 2 - leading cost: (chi^3)*(chi_p^6)
    tensors = [
        w, w,
        w.conj(),
        w.conj(), u1,
        u1.conj(),
        u1.conj(), w1, w1, w1,
        w1.conj(),
        w1.conj(),
        w1.conj(), rho2, ham7,
        w.conj(), proj
    ]
    connects = [[20, 21, 3], [1, 19, 2], [1, 22, 5], [23, -2, 4], [2, 3, 6, 7],
                [5, 24, 8, 9], [4, 25, 10, 26], [7, 27, 14], [12, 6, 13],
                [28, 11, 15], [9, 10, 17], [12, 8, 16], [26, 11, 18],
                [16, 17, 18, 13, 14, 15], [22, 24, 23, 19, 20, 21, -1],
                [-3, -4, 25], [27, 28, -5]]
    cont_order = [
        5, 4, 1, 12, 20, 21, 25, 27, 11, 15, 18, 13, 16, 14, 28, 17, 22, 24,
        19, 3, 2, 8, 9, 6, 7, 23, 26, 10
    ]
    temp2 = tn.ncon(tensors, connects, cont_order)

    # Network 3 - leading cost: (chi^3)*(chi_p^6)
    tensors = [
        w,
        w.conj(), u1,
        u1.conj(),
        u1.conj(), w1, w1, w1,
        w1.conj(),
        w1.conj(),
        w1.conj(), rho2, w,
        w.conj(),
        w.conj(), ham7, proj
    ]
    connects = [[21, 22, 1], [-1, -2, 2], [1, 16, 3, 4], [2, 20, 5, 6],
                [26, 17, 7, 19], [28, 3, 11], [9, 27, 10], [4, 8, 12],
                [6, 7, 14], [9, 5, 13], [19, 8, 15], [13, 14, 15, 10, 11, 12],
                [23, 18, 16], [24, 18, 17], [-3, 25, 20],
                [25, 26, 24, -4, 21, 22, 23], [27, 28, -5]]
    cont_order = [
        9, 20, 16, 8, 21, 22, 2, 28, 18, 12, 15, 10, 13, 11, 27, 1, 24, 23, 14,
        26, 17, 3, 4, 7, 19, 25, 5, 6
    ]
    temp3 = tn.ncon(tensors, connects, cont_order)

    # Network 4 - leading cost: (chi^2)*(chi_p^6)*(chi_c^1)
    tensors = [
        w, u1,
        u1.conj(),
        u1.conj(), w1, w1, w1,
        w1.conj(),
        w1.conj(),
        w1.conj(), rho2,
        w.conj(),
        w.conj(), ham7, proj
    ]
    connects = [[18, 19, 1], [25, 1, 2, 3], [25, 17, 4, 5], [22, 15, 6, 16],
                [3, 23, 10], [8, 2, 9], [24, 7, 11], [5, 6, 13], [8, 4, 12],
                [16, 7, 14], [12, 13, 14, 9, 10, 11], [20, -4, 15],
                [18, 21, 17], [21, 22, 20, 19, -1, -2, -3], [23, 24, -5]]
    cont_order = [
        8, 18, 7, 11, 14, 5, 23, 9, 12, 10, 24, 2, 3, 25, 4, 13, 1, 17, 16, 6,
        19, 21, 22, 15, 20
    ]
    temp4 = tn.ncon(tensors, connects, cont_order)

    q = -orthog_sym(tn.ncon([temp1 + temp2 + temp3 + temp4,
                             proj.conj()], [[-1, -2, -3, -4, 1], [-5, -6, 1]]),
                    pivot=4).conj()
    # q = orthog(tn.ncon([temp1+temp2+temp3+temp4,proj.conj()],[[-1,-2,-3,-4,1],[-5,-6,1]]), pivot=4).conj()

    # Network 2 - leading cost: (chi_p^8)
    tensors = [w1, w1, rho2, w1.conj(), w1.conj(), q, q.conj()]
    connects = [[11, 6, 2], [7, 10, 1], [3, 4, 5, 1, 2, 5], [9, 6, 4],
                [7, 8, 3], [-3, -4, 13, 12, 10, 11], [-1, -2, 13, 12, 8, 9]]
    cont_order = [5, 6, 2, 4, 7, 3, 1, 9, 8, 11, 10, 13, 12]
    temp1 = tn.ncon(tensors, connects, cont_order)

    # Network 3 - leading cost: (chi_p^8)
    tensors = [w1, w1, rho2, w1.conj(), w1.conj(), q, q.conj()]
    connects = [[11, 6, 2], [7, 10, 1], [3, 4, 5, 1, 2, 5], [9, 6, 4],
                [7, 8, 3], [13, 12, -3, -4, 10, 11], [13, 12, -1, -2, 8, 9]]
    cont_order = [5, 7, 1, 3, 6, 2, 4, 11, 10, 9, 8, 13, 12]
    temp2 = tn.ncon(tensors, connects, cont_order)

    rhotemp = 0.5 * (temp1 + temp2)
    qenv = q

    _, w_new = eigh(rhotemp.conj(), which='LM', max_kept=chi, full_sort=False)
    u_new = orthog_sym(
        tn.ncon([qenv, w_new.conj(), w_new.conj()],
                [[1, 2, 3, 4, -3, -4], [1, 2, -1], [3, 4, -2]]),
        pivot=2)
    return w_new, u_new
Ejemplo n.º 44
0
def opt_energy_env_2site(isos_012, h_mpo_2site, states_1site_above):
    isos_wt = isos_with_transposes(isos_012)
    iso_012, iso_021 = isos_wt[0]
    isos_wt_above = isos_wt[1:]
    levels_above = len(isos_wt_above)

    # Ascend two-site Hamiltonian terms through to the bottom of the final isometry
    h2s_above = _ascend_op_2site_to_2site_many(h_mpo_2site, isos_wt)

    # hamiltonian with isometry opposite the gap
    h2L, h2R = h_mpo_2site
    iso_h2R_012 = [
        tensornetwork.ncon([iso_021, h], [(-1, -3, 1), (-2, 1)]) for h in h2R
    ]  # transpose to 012
    iso_h2L_012 = [
        tensornetwork.ncon([iso_012, h], [(-1, -2, 1), (-3, 1)]) for h in h2L
    ]

    def _compute_env(lvl, reflect=False):
        # TODO: Could shorten this a bit by doing only left or right at one time
        h2 = h2s_above[lvl]
        if reflect:
            h2 = reflect_mpo_2site(h2)

        envL, envR = _mpo_with_state(*isos_wt_above[lvl], h2,
                                     states_1site_above[lvl])

        # descend envs back down to the level of the gap
        for lvl2 in reversed(range(lvl)):
            iso_012_l2, iso_021_l2 = isos_wt_above[lvl2]
            if reflect:
                envR = _descend_energy_env_L(envR, iso_021_l2)
                envL = _descend_energy_env_R(envL, iso_012_l2)
            else:
                envL = _descend_energy_env_L(envL, iso_021_l2)
                envR = _descend_energy_env_R(envR, iso_012_l2)

        if reflect:
            iso_h2_L, iso_h2_R = iso_h2R_012, iso_h2L_012
        else:
            iso_h2_L, iso_h2_R = iso_h2L_012, iso_h2R_012

        # contract with the hamiltonian + isometry opposite the gap
        envL = sum(
            tensornetwork.ncon([eL, ihR], [(1, -1), (1, -2, -3)])
            for eL, ihR in zip(envL, iso_h2_R))

        envR = sum(
            tensornetwork.ncon([eR, ihL], [(1, -1), (1, -2, -3)])
            for eR, ihL in zip(envR, iso_h2_L))

        # weight each term according to the number of occurrences
        # in the translation-invariant tree
        weight = 1 / 2.0**(lvl + 1)
        return (envL + envR) * weight, weight

    weightsum = 0.0
    env_total = []
    for lvl in range(levels_above):
        env, weight = _compute_env(lvl)
        weightsum += weight
        env_total.append(env)

    # Now compute the boundary term
    env, weight = _compute_env(levels_above - 1, reflect=True)
    weightsum += weight
    env_total.append(env)

    env_total = sum(env_total)

    assert weightsum == 1.0

    return env_total
Ejemplo n.º 45
0
def RightLink(ham, u, w, rho1, thres_in):

    tensors = [w, rho1, w.conj()]
    connects = [[4, -3, 1], [3, 2, -2, 3, 1, -4], [4, -1, 2]]
    con_order = [3, 2, 4, 1]
    rhotemp = tn.ncon(tensors, connects, con_order)
    _, proj = eigh(rhotemp.conj(),
                   which='LM',
                   threshold=0.1 * thres_in,
                   full_sort=False)

    tensors = [
        ham, u,
        u.conj(),
        u.conj(), w, w,
        w.conj(),
        w.conj(),
        w.conj(), rho1,
        proj.conj()
    ]

    # Network 1 - leading cost: (chi^5)*(chi_p^2)*(chi_c^1)
    connects = [[5, 6, 7, 4, -1, -2], [3, 4, 12, 13], [3, 5, 14, 15],
                [6, 7, 16, 17], [13, 19, 2], [11, 12, 1], [15, 16, 9],
                [11, 14, 8], [17, -3, 10], [8, 9, 10, 1, 2, 18], [19, 18, -4]]
    cont_order = [
        11, 1, 8, 19, 2, 18, 15, 12, 13, 3, 14, 9, 6, 7, 17, 5, 4, 16, 10
    ]
    temp1 = tn.ncon(tensors, connects, cont_order)

    # Network 2 - leading cost: (chi^3)*(chi_p^5)*(chi_c^1)
    connects = [[5, 6, 7, 3, 4, -1], [3, 4, 12, 13], [5, 6, 14, 15],
                [7, -2, 16, 17], [13, 18, 2], [11, 12, 1], [15, 16, 9],
                [11, 14, 8], [17, -3, 10], [8, 9, 10, 1, 2, 19], [18, 19, -4]]
    cont_order = [
        11, 1, 8, 18, 2, 19, 5, 6, 3, 4, 9, 14, 15, 12, 13, 17, 7, 10, 16
    ]
    temp2 = tn.ncon(tensors, connects, cont_order)

    # Network 3 - leading cost: (chi^5)*(chi_p^2)*(chi_c^1)
    connects = [[5, 6, 7, -2, 3, 4], [3, 4, -3, 12], [-1, 5, 13, 14],
                [6, 7, 15, 16], [11, 18, 1], [12, 17, 2], [14, 15, 9],
                [11, 13, 8], [16, 17, 10], [8, 9, 10, 1, 19, 2], [18, 19, -4]]
    cont_order = [
        11, 1, 8, 18, 19, 17, 10, 2, 9, 13, 14, 16, 15, 3, 4, 5, 6, 7, 12
    ]
    temp3 = tn.ncon(tensors, connects, cont_order)

    # Network 4 - leading cost: (chi^6)*(chi_p^1)*(chi_c^1)
    connects = [[4, 5, 6, -1, -2, 3], [3, 17, -3, 11], [4, 5, 12, 13],
                [6, 17, 14, 15], [10, 18, 1], [11, 16, 2], [13, 14, 8],
                [10, 12, 7], [15, 16, 9], [7, 8, 9, 1, 19, 2], [18, 19, -4]]
    cont_order = [
        10, 1, 7, 18, 19, 16, 9, 2, 8, 14, 15, 17, 11, 12, 13, 4, 5, 6, 3
    ]
    temp4 = tn.ncon(tensors, connects, cont_order)

    gam1 = tn.ncon([temp1 + temp2 + temp3 + temp4, proj],
                   [[-1, -2, -3, 1], [-4, -5, 1]])

    tensors = [
        ham, u, u,
        u.conj(),
        u.conj(), w, w,
        w.conj(),
        w.conj(),
        w.conj(), rho1
    ]

    # Network 2 - leading cost: (chi^4)*(chi_p^5)
    connects = [[7, 8, 9, 4, 5, 6], [3, 4, -2, 13], [5, 6, 14, 15],
                [3, 7, 16, 17], [8, 9, 18, 19], [13, 14, 1], [15, 20, 2],
                [17, 18, 11], [-1, 16, 10], [19, 20, 12],
                [10, 11, 12, -3, 1, 2]]
    cont_order = [
        20, 8, 9, 13, 5, 6, 4, 14, 7, 3, 18, 17, 19, 15, 1, 11, 2, 12, 16, 10
    ]
    temp1 = tn.ncon(tensors, connects, cont_order)

    # Network 3 - leading cost: (chi^4)*(chi_p^5)
    connects = [[6, 7, 8, 3, 4, 5], [3, 4, -2, 12], [5, 20, 13, 14],
                [6, 7, 15, 16], [8, 20, 17, 18], [12, 13, 1], [14, 19, 2],
                [16, 17, 10], [-1, 15, 9], [18, 19, 11], [9, 10, 11, -3, 1, 2]]
    cont_order = [
        6, 7, 3, 4, 17, 8, 16, 5, 20, 19, 12, 13, 18, 14, 10, 1, 2, 11, 15, 9
    ]
    temp2 = tn.ncon(tensors, connects, cont_order)

    gam2 = temp1 + temp2

    if (u.shape[3] < u.shape[1]):
        y = eye_sym([u.charges[1][0], u.charges[3][0]],
                    [u._flows[1], u._flows[3]])
        upr = orthog_sym(tn.ncon([u, y.conj()], [[-1, -2, -3, 1], [-4, 1]]),
                         pivot=2)
        wpr = orthog_sym(tn.ncon([w, y], [[1, -2, -3], [-1, 1]]), pivot=2)
        gam2pr = tn.ncon([gam2, y.conj()], [[1, -2, -3], [-1, 1]])
    else:
        upr = u
        wpr = w
        gam2pr = gam2

    for g in range(10):
        wpr = orthog_sym(
            tn.ncon([gam1, upr], [[1, 2, -2, 3, -3], [1, 2, 3, -1]]) + gam2pr,
            pivot=2).conj()
        upr = orthog_sym(tn.ncon([gam1, wpr],
                                 [[-1, -2, 1, -3, 2], [-4, 1, 2]]),
                         pivot=2).conj()

    if (u.shape[3] < u.shape[1]):
        rhotemp = tn.ncon([wpr, wpr.conj(), rho1],
                          [[-2, 5, 4], [-1, 5, 3], [3, 1, 2, 4, 1, 2]])
        dtemp, y = eigh(rhotemp.conj() / BLA.trace(rhotemp),
                        which='LM',
                        threshold=0.5 * thres_in,
                        full_sort=False)
        chi_temp = sum(dtemp.todense() > thres_in) + 1
        dtemp, y = eigh(rhotemp.conj() / BLA.trace(rhotemp),
                        which='LM',
                        max_kept=chi_temp,
                        full_sort=False)
        u = tn.ncon([upr, y], [[-1, -2, -3, 1], [1, -4]])
        w = tn.ncon([wpr, y.conj()], [[1, -2, -3], [1, -1]])
    else:
        u = upr
        w = wpr

    return u, w
Ejemplo n.º 46
0
def _iso_from_uinv(env, env_uinv):
    return tensornetwork.ncon([env_uinv, env], [(-1, 1), (1, -2, -3)])
Ejemplo n.º 47
0
 def _ascend(op, iso, iso_conj):
     return tensornetwork.ncon([iso_conj, op, iso], [(-1, 2, 0), (0, 1), (-2, 2, 1)])
Ejemplo n.º 48
0
def _complete_partial_ascend(iso_op, iso):
    """Complete a partial operator ascension performed by `_ascend_partial()`.
    This contracts with the conjugated isometry.
    Cost: D^4."""
    return tensornetwork.ncon([tf.conj(iso), iso_op], [(-1, 1, 2), (-2, 1, 2)])
Ejemplo n.º 49
0
def optimize_mod_binary_mera(hamAB_0,
                             hamBA_0,
                             rhoAB_0,
                             rhoBA_0,
                             wC,
                             vC,
                             uC,
                             numiter=1000,
                             refsym=True,
                             nsteps_steady_state=8,
                             verbose=0,
                             opt_u=True,
                             opt_vw=True,
                             numpy_update=True,
                             opt_all_layers=False,
                             opt_u_after=9):
    """
    ------------------------
    adapted from Glen Evenbly (c) for www.tensors.net, (v1.1) - last modified 24/1/2019
    ------------------------
    optimization of a scale invariant modified binary MERA tensor network
    Parameters:
    ----------------------------
    hamAB_0, hamBA_0:      tf.Tensor
                           bottom-layer Hamiltonians in AB and BA sublattices
    rhoAB_0, rhoBA_0:      tf.Tensor 
                           initial values for steady-state density matrices
    wC, vC, uC:            list of tf.Tensor 
                           isometries (wC, vC) and disentanglers (uC) of the MERA, with 
                           bottom layers first 
    numiter:               int 
                           number of iteration steps 
    refsym:                bool 
                           impose reflection symmetry 
    nsteps_steady_state:   int 
                           number of power-methodf iteration steps for calculating the 
                           steady state density matrices 
    verbose:               int 
                           verbosity flag 
    opt_u, opt_uv:         bool 
                           if False, skip unitary or isometry optimization 
    numpy_update:          bool
                           if True, use numpy svd to calculate update of disentanglers
    opt_all_layers:        bool
                           if True, optimize all layers
                           if False, optimize only truncating layers
    opt_u_after:           int 
                           start optimizing disentangler only after `opt_u_after` initial optimization steps
    Returns: 
    -------------------------------
    (wC, vC, uC, rhoAB, rhoBA, run_times, Energies)
    wC, vC, uC:             list of tf.Tensor 
                            obtimized MERA tensors
    rhoAB, rhoBA:           tf.Tensor 
                            steady state density matrices at the top layer 
    run_times:              list 
                            run times per iteration step 
    Energies:               list 
                            energies at each iteration step
    """
    dtype = rhoAB_0.dtype

    hamAB = [0 for x in range(len(vC) + 1)]
    hamBA = [0 for x in range(len(vC) + 1)]
    rhoAB = [0 for x in range(len(vC) + 1)]
    rhoBA = [0 for x in range(len(vC) + 1)]

    hamAB[0] = hamAB_0
    hamBA[0] = hamBA_0

    chi1 = hamAB[0].shape[0]

    bias = tf.math.reduce_max(
        tf.linalg.eigvalsh(tf.reshape(hamAB[0], (chi1 * chi1, chi1 * chi1))))
    hamAB[0] = hamAB[0] - bias * tf.reshape(
        tf.eye(chi1 * chi1, dtype=dtype), (chi1, chi1, chi1, chi1))
    hamBA[0] = hamBA[0] - bias * tf.reshape(
        tf.eye(chi1 * chi1, dtype=dtype), (chi1, chi1, chi1, chi1))

    skip_layer = [misc_mera.skip_layer(w) for w in wC]
    for p in range(len(wC)):
        if skip_layer[p]:
            hamAB[p + 1], hamBA[p + 1] = ascending_super_operator(
                hamAB[p], hamBA[p], wC[p], vC[p], uC[p], refsym)

    Energies = []
    run_times = []
    for k in range(numiter):
        t1 = time.time()
        rhoAB_0, rhoBA_0 = steady_state_density_matrices(
            nsteps_steady_state, rhoAB_0, rhoBA_0, wC[-1], vC[-1], uC[-1],
            refsym)
        rhoAB[-1] = rhoAB_0
        rhoBA[-1] = rhoBA_0
        for p in range(len(rhoAB) - 2, -1, -1):
            rhoAB[p], rhoBA[p] = descending_super_operator(
                rhoAB[p + 1], rhoBA[p + 1], wC[p], vC[p], uC[p], refsym)

        if verbose > 0:
            if np.mod(k, 10) == 1:
                Energies.append(
                    (tn.ncon([rhoAB[0], hamAB[0]],
                               [[1, 2, 3, 4], [1, 2, 3, 4]]) + tn.
                     ncon([rhoBA[0], hamBA[0]], [[1, 2, 3, 4], [1, 2, 3, 4]])) /
                    4 + bias / 2)
                stdout.write(
                    '\rIteration: %i of %i: E = %.8f, err = %.16f at D = %i with %i layers'
                    % (int(k), int(numiter), float(Energies[-1]),
                       float(Energies[-1] + 4 / np.pi,), int(wC[-1].shape[2]),
                       len(wC)))
                stdout.flush()

        for p in range(len(wC)):
            if (not opt_all_layers) and skip_layer[p]:
                continue

            if k >= opt_u_after:
                uEnv = get_env_disentangler(hamAB[p], hamBA[p], rhoBA[p + 1],
                                            wC[p], vC[p], uC[p], refsym)
                if opt_u:
                    if refsym:
                        uEnv = uEnv + tf.transpose(uEnv, (1, 0, 3, 2))
                    if numpy_update:
                        uC[p] = misc_mera.u_update_svd_numpy(uEnv)
                    else:
                        uC[p] = misc_mera.u_update_svd(uEnv)

            wEnv = get_env_w_isometry(hamAB[p], hamBA[p], rhoBA[p + 1],
                                      rhoAB[p + 1], wC[p], vC[p], uC[p])
            if opt_vw:
                if numpy_update:
                    wC[p] = misc_mera.w_update_svd_numpy(wEnv)
                else:
                    wC[p] = misc_mera.w_update_svd(wEnv)
                if refsym:
                    vC[p] = wC[p]
                else:
                    vEnv = get_env_v_isometry(hamAB[p], hamBA[p], rhoBA[p + 1],
                                              rhoAB[p + 1], wC[p], vC[p], uC[p])
                    vC[p] = misc_mera.w_update_svd(vEnv)

            hamAB[p + 1], hamBA[p + 1] = ascending_super_operator(
                hamAB[p], hamBA[p], wC[p], vC[p], uC[p], refsym)

        run_times.append(time.time() - t1)
        if verbose > 2:
            print('time per iteration: ', run_times[-1])

    return wC, vC, uC, rhoAB[-1], rhoBA[-1], run_times, Energies