def test_ascending_descending(chi, dtype):
    """
    test if ascending and descending operations are doing the right thing
    """
    wC, uC, rho_0 = bml.initialize_binary_MERA_random(phys_dim=2,
                                                      chi=chi,
                                                      dtype=dtype)
    wC, uC = bml.unlock_layer(wC, uC)  #add a transitional layer
    wC, uC = bml.unlock_layer(wC, uC)  #add a transitional layer
    ham_0 = bml.initialize_TFI_hams(dtype)
    rho = [0 for n in range(len(wC) + 1)]
    ham = [0 for n in range(len(wC) + 1)]
    rho[-1] = bml.steady_state_density_matrix(10, rho_0, wC[-1], uC[-1])
    ham[0] = ham_0
    for p in range(len(rho) - 2, -1, -1):
        rho[p] = bml.descending_super_operator(rho[p + 1], wC[p], uC[p])
    for p in range(len(wC)):
        ham[p + 1] = bml.ascending_super_operator(ham[p], wC[p], uC[p])
    energies = [
        tn.ncon([rho[p], ham[p]], [[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]])
        for p in range(len(rho))
    ]
    np.testing.assert_allclose(
        np.array(
            [energies[p] / energies[p + 1] for p in range(len(energies) - 1)]),
        0.5)
def test_steady_state(chi, dtype):
    isometry = misc_mera.w_update_svd_numpy(
        np.random.rand(chi, chi, chi).astype(dtype.as_numpy_dtype))
    unitary = misc_mera.u_update_svd_numpy(
        np.random.rand(chi, chi, chi, chi).astype(dtype.as_numpy_dtype))
    rho = tf.reshape(
        tf.eye(chi * chi * chi, dtype=dtype), (chi, chi, chi, chi, chi, chi))
    rho_ss = bml.steady_state_density_matrix(
        nsteps=60, rho=rho, isometry=isometry, unitary=unitary)
    rho_test = bml.descending_super_operator(rho_ss, isometry, unitary)
    np.testing.assert_array_less(rho_ss - rho_test, 1E-6)
    def get_energies(wC, uC, rho_0, ham_0):
        rho = [0 for n in range(len(wC) + 1)]
        ham = [0 for n in range(len(wC) + 1)]

        rho[-1] = bml.steady_state_density_matrix(10, rho_0, wC[-1], uC[-1])
        ham[0] = ham_0
        for p in range(len(rho) - 2, -1, -1):
            rho[p] = bml.descending_super_operator(rho[p + 1], wC[p], uC[p])
        for p in range(len(wC)):
            ham[p + 1] = bml.ascending_super_operator(ham[p], wC[p], uC[p])
        energies = [
            tn.ncon([rho[p], ham[p]], [[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]])
            for p in range(len(rho))
        ]
        return energies
Exemple #4
0
def benchmark_descending_operator(rho, w, u, num_layers):
    """
    run benchmark for descending super operator
    Args: 
        rhoab (tf.Tensor):  reduced densit matrix on a-b lattice
        rhoba (tf.Tensor):  reduced densit matrix on b-a lattice
        w   (tf.Tensor):  isometry
        v   (tf.Tensor):  isometry
        u   (tf.Tensor):  disentangler
        num_layers(int):  number of layers over which to descend the hamiltonian
    Returns:
        runtime (float):  the runtime
    """

    t1 = time.time()
    for p in range(num_layers):
        rho = bml.descending_super_operator(rho, w, u)
    return time.time() - t1