Beispiel #1
0
def initialize_mod_binary_MERA(phys_dim, chi, dtype=tf.float64):
    """
    Parameters:
    -------------------
    phys_dim:         int 
                      Hilbert space dimension of the bottom layer
    chi:              int 
                      maximum bond dimension
    dtype:            tensorflow dtype
                      dtype of the MERA tensors
    Returns:
    -------------------
    (wC, vC, uC, rhoAB, rhoBA)
    wC, vC, uC:      list of tf.Tensor
    rhoAB, rhoBA:    tf.Tensor
    """
    wC = []
    vC = []
    uC = []
    n = 0
    while True:
        wC.append(
            tf.reshape(
                tf.eye(
                    min(phys_dim**(2**(n + 1)), chi**2),
                    min(phys_dim**(2**(n + 1)), chi),
                    dtype=dtype),
                (min(phys_dim**(2**n), chi), min(phys_dim**(2**n), chi),
                 min(phys_dim**(2**(n + 1)), chi))))
        vC.append(
            tf.reshape(
                tf.eye(
                    min(phys_dim**(2**(n + 1)), chi**2),
                    min(phys_dim**(2**(n + 1)), chi),
                    dtype=dtype),
                (min(phys_dim**(2**n), chi), min(phys_dim**(2**n), chi),
                 min(phys_dim**(2**(n + 1)), chi))))
        uC.append(
            tf.reshape(
                tf.eye(min(phys_dim**(2**(n + 1)), chi**2), dtype=dtype),
                (min(phys_dim**(2**n), chi), min(phys_dim**(2**n), chi),
                 min(phys_dim**(2**n), chi), min(phys_dim**(2**n), chi))))
        n += 1
        if misc_mera.all_same_chi(wC[-1]):
            break

    chi_top = wC[-1].shape[2]
    rhoAB = tf.reshape(
        tf.eye(chi_top * chi_top, dtype=dtype),
        (chi_top, chi_top, chi_top, chi_top))

    rhoBA = tf.reshape(
        tf.eye(chi_top * chi_top, dtype=dtype),
        (chi_top, chi_top, chi_top, chi_top))

    return wC, vC, uC, rhoAB, rhoBA
Beispiel #2
0
def increase_bond_dimension_by_adding_layers(chi_new, wC, vC, uC):
    """
    deprecated
    increase the bond dimension of the MERA to `chi_new`
    by padding tensors in the last layer with zeros. If the desired `chi_new` cannot
    be obtained from padding, adds layers of Tensors
    the last layer is guaranteed to have uniform bond dimension

    Parameters:
    --------------------------------
    chi_new:         int 
                     new bond dimenion
    wC, vC, uC:      list of tf.Tensor 
                     MERA isometries and disentanglers


    Returns:         
    --------------------------------
    (wC, vC, uC):    list of tf.Tensors
    """
    if misc_mera.all_same_chi(wC[-1], vC[-1],
                              uC[-1]) and (wC[-1].shape[2] >= chi_new):
        #nothing to do here
        return wC, vC, uC
    elif misc_mera.all_same_chi(wC[-1], vC[-1],
                                uC[-1]) and (wC[-1].shape[2] < chi_new):
        chi = min(chi_new, wC[-1].shape[0] * wC[-1].shape[1])
        wC[-1] = misc_mera.pad_tensor(wC[-1],
                                      [wC[-1].shape[0], wC[-1].shape[1], chi])
        vC[-1] = misc_mera.pad_tensor(vC[-1],
                                      [vC[-1].shape[0], vC[-1].shape[1], chi])
        wC_temp = copy.deepcopy(wC[-1])
        vC_temp = copy.deepcopy(vC[-1])
        uC_temp = copy.deepcopy(uC[-1])
        wC.append(misc_mera.pad_tensor(wC_temp, [chi, chi, chi]))
        vC.append(misc_mera.pad_tensor(vC_temp, [chi, chi, chi]))
        uC.append(misc_mera.pad_tensor(uC_temp, [chi, chi, chi, chi]))
        return increase_bond_dimension_by_adding_layers(chi_new, wC, vC, uC)

    elif not misc_mera.all_same_chi(wC[-1], vC[-1], uC[-1]):
        raise ValueError('chis of last layer have to be all the same!')
def initialize_mod_binary_MERA(phys_dim, chi, dtype=tf.float64):
    """
    initialize a modified binary MERA network
    Args:
        phys_dim (int): Hilbert space dimension of the bottom layer
        chi (int): maximum bond dimension
        dtype (tensorflow dtype): dtype of the MERA tensors
    Returns:
        wC (list of tf.Tensor):   padded MERA isometries and disentanglers
        vC (list of tf.Tensor):   MERA isometries and disentanglers
        uC (list of tf.Tensor):   padded MERA isometries and disentanglers
        rhoAB (tf.Tensor):        reduced density on the AB lattice
        rhoBA (tf.Tensor):        reduced density on the BA lattice
    """
    wC = []
    vC = []
    uC = []
    n = 0
    while True:
        wC.append(
            tf.reshape(
                tf.eye(min(phys_dim**(2**(n + 1)), chi**2),
                       min(phys_dim**(2**(n + 1)), chi),
                       dtype=dtype),
                (min(phys_dim**(2**n), chi), min(
                    phys_dim**(2**n), chi), min(phys_dim**(2**(n + 1)), chi))))
        vC.append(
            tf.reshape(
                tf.eye(min(phys_dim**(2**(n + 1)), chi**2),
                       min(phys_dim**(2**(n + 1)), chi),
                       dtype=dtype),
                (min(phys_dim**(2**n), chi), min(
                    phys_dim**(2**n), chi), min(phys_dim**(2**(n + 1)), chi))))
        uC.append(
            tf.reshape(
                tf.eye(min(phys_dim**(2**(n + 1)), chi**2), dtype=dtype),
                (min(phys_dim**(2**n), chi), min(phys_dim**(2**n), chi),
                 min(phys_dim**(2**n), chi), min(phys_dim**(2**n), chi))))
        n += 1
        if misc_mera.all_same_chi(wC[-1]):
            break

    chi_top = wC[-1].shape[2]
    rhoAB = tf.reshape(tf.eye(chi_top * chi_top, dtype=dtype),
                       (chi_top, chi_top, chi_top, chi_top))

    rhoBA = tf.reshape(tf.eye(chi_top * chi_top, dtype=dtype),
                       (chi_top, chi_top, chi_top, chi_top))

    return wC, vC, uC, rhoAB, rhoBA
Beispiel #4
0
def get_scaling_dims(loadname, savename, use_gpu=False, k=11):

    with open(loadname, 'rb') as f:
        wC, uC = pickle.load(f)

    fname = 'binary_mera_optimization'
    rootdir = os.getcwd()
    if not os.path.exists(fname):
        os.mkdir(fname)
    os.chdir(fname)
    DEVICES = tf.contrib.eager.list_devices()
    print("Available devices:")
    for i, device in enumerate(DEVICES):
        print("%d) %s" % (i, device))
    CPU = '/device:CPU:0'
    GPU = '/job:localhost/replica:0/task:0/device:GPU:0'
    if use_gpu:
        specified_device_type = GPU
        name = 'GPU'
    else:
        specified_device_type = CPU
        name = 'CPU'

    filename = savename
    scaling_dims = {}
    # with open(filename, 'rb') as f:
    #     scaling_dims = pickle.load(f)

    with tf.device(device):
        for n in reversed(range(len(wC) - 2, len(wC))):
            print(np.array(wC[n].shape))
            if not misc_mera.all_same_chi(wC[n]):
                continue
            scaling_dims[n] = bml.get_scaling_dimensions(wC[n], uC[n], k=k)
            print(scaling_dims[n])
            with open(filename, 'wb') as f:
                pickle.dump(scaling_dims, f)
def pad_mera_tensors(chi_new, wC, vC, uC, noise=0.0):
    """
    increase the bond dimension of the MERA to `chi_new`
    by padding tensors in all layers with zeros. If the desired `chi_new` cannot
    be obtained from padding, adds layers of Tensors
    the last layer is guaranteed to have uniform bond dimension
    Args:
        chi_new (int):                 new bond dimenion
        wC (list of tf.Tensor):   MERA isometries and disentanglers
        vC (list of tf.Tensor):   MERA isometries and disentanglers
        uC (list of tf.Tensor):   MERA isometries and disentanglers
        noise (float):            amplitude of uniform noise added to the padded tensors
    Returns: 
        wC (list of tf.Tensor):   padded MERA isometries and disentanglers
        vC (list of tf.Tensor):   MERA isometries and disentanglers
        uC (list of tf.Tensor):   padded MERA isometries and disentanglers
    """

    all_chis = [t.shape[n] for t in wC for n in range(len(t.shape))]
    if not np.all([c <= chi_new for c in all_chis]):
        #nothing to increase
        return wC, vC, uC

    chi_0 = wC[0].shape[0]
    wC[0] = misc_mera.pad_tensor(wC[0], [chi_0, chi_0, min(chi_new, chi_0**2)])
    vC[0] = misc_mera.pad_tensor(vC[0], [chi_0, chi_0, min(chi_new, chi_0**2)])

    for n in range(1, len(wC)):
        wC[n] = misc_mera.pad_tensor(wC[n], [
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**(n + 1)))
        ])
        vC[n] = misc_mera.pad_tensor(vC[n], [
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**(n + 1)))
        ])
        uC[n] = misc_mera.pad_tensor(uC[n], [
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n))
        ])
        wC[n] += (tf.random_uniform(shape=wC[n].shape, dtype=wC[n].dtype) *
                  noise)
        vC[n] += (tf.random_uniform(shape=vC[n].shape, dtype=vC[n].dtype) *
                  noise)
        uC[n] += (tf.random_uniform(shape=uC[n].shape, dtype=uC[n].dtype) *
                  noise)

    n = len(wC)
    while not misc_mera.all_same_chi(wC[-1]):
        wC.append(
            misc_mera.pad_tensor(wC[-1], [
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**(n + 1)))
            ]))
        vC.append(
            misc_mera.pad_tensor(vC[-1], [
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**(n + 1)))
            ]))
        uC.append(
            misc_mera.pad_tensor(uC[-1], [
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n))
            ]))
        wC[-1] += (tf.random_uniform(
            shape=wC[-1].shape, minval=-1, maxval=1, dtype=wC[-1].dtype) *
                   noise)
        vC[-1] += (tf.random_uniform(
            shape=vC[-1].shape, minval=-1, maxval=1, dtype=vC[-1].dtype) *
                   noise)
        uC[-1] += (tf.random_uniform(
            shape=uC[-1].shape, minval=-1, maxval=1, dtype=uC[-1].dtype) *
                   noise)

        n += 1

    return wC, vC, uC