示例#1
0
def test_padding(chi, dtype):
    wC, uC, rho_0 = bml.initialize_binary_MERA_random(phys_dim=2,
                                                      chi=chi,
                                                      dtype=dtype)
    wC, uC = bml.unlock_layer(wC, uC)  #add a transitional layer
    wC, uC = bml.unlock_layer(wC, uC)  #add a transitional layer
    ham_0 = bml.initialize_TFI_hams(dtype)

    def get_energies(wC, uC, rho_0, ham_0):
        rho = [0 for n in range(len(wC) + 1)]
        ham = [0 for n in range(len(wC) + 1)]

        rho[-1] = bml.steady_state_density_matrix(10, rho_0, wC[-1], uC[-1])
        ham[0] = ham_0
        for p in range(len(rho) - 2, -1, -1):
            rho[p] = bml.descending_super_operator(rho[p + 1], wC[p], uC[p])
        for p in range(len(wC)):
            ham[p + 1] = bml.ascending_super_operator(ham[p], wC[p], uC[p])
        energies = [
            tn.ncon([rho[p], ham[p]], [[1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6]])
            for p in range(len(rho))
        ]
        return energies

    energies_1 = get_energies(wC, uC, rho_0, ham_0)

    chi_new = chi + 1
    wC, uC = bml.pad_mera_tensors(chi_new, wC, uC)
    rho_0 = misc_mera.pad_tensor(rho_0, [chi_new] * 6)
    energies_2 = get_energies(wC, uC, rho_0, ham_0)
    np.testing.assert_allclose(energies_1, energies_2)
示例#2
0
def increase_bond_dimension_by_adding_layers(chi_new, wC, vC, uC):
    """
    deprecated
    increase the bond dimension of the MERA to `chi_new`
    by padding tensors in the last layer with zeros. If the desired `chi_new` cannot
    be obtained from padding, adds layers of Tensors
    the last layer is guaranteed to have uniform bond dimension

    Parameters:
    --------------------------------
    chi_new:         int 
                     new bond dimenion
    wC, vC, uC:      list of tf.Tensor 
                     MERA isometries and disentanglers


    Returns:         
    --------------------------------
    (wC, vC, uC):    list of tf.Tensors
    """
    if misc_mera.all_same_chi(wC[-1], vC[-1],
                              uC[-1]) and (wC[-1].shape[2] >= chi_new):
        #nothing to do here
        return wC, vC, uC
    elif misc_mera.all_same_chi(wC[-1], vC[-1],
                                uC[-1]) and (wC[-1].shape[2] < chi_new):
        chi = min(chi_new, wC[-1].shape[0] * wC[-1].shape[1])
        wC[-1] = misc_mera.pad_tensor(wC[-1],
                                      [wC[-1].shape[0], wC[-1].shape[1], chi])
        vC[-1] = misc_mera.pad_tensor(vC[-1],
                                      [vC[-1].shape[0], vC[-1].shape[1], chi])
        wC_temp = copy.deepcopy(wC[-1])
        vC_temp = copy.deepcopy(vC[-1])
        uC_temp = copy.deepcopy(uC[-1])
        wC.append(misc_mera.pad_tensor(wC_temp, [chi, chi, chi]))
        vC.append(misc_mera.pad_tensor(vC_temp, [chi, chi, chi]))
        uC.append(misc_mera.pad_tensor(uC_temp, [chi, chi, chi, chi]))
        return increase_bond_dimension_by_adding_layers(chi_new, wC, vC, uC)

    elif not misc_mera.all_same_chi(wC[-1], vC[-1], uC[-1]):
        raise ValueError('chis of last layer have to be all the same!')
def pad_mera_tensors(chi_new, wC, vC, uC, noise=0.0):
    """
    increase the bond dimension of the MERA to `chi_new`
    by padding tensors in all layers with zeros. If the desired `chi_new` cannot
    be obtained from padding, adds layers of Tensors
    the last layer is guaranteed to have uniform bond dimension
    Args:
        chi_new (int):                 new bond dimenion
        wC (list of tf.Tensor):   MERA isometries and disentanglers
        vC (list of tf.Tensor):   MERA isometries and disentanglers
        uC (list of tf.Tensor):   MERA isometries and disentanglers
        noise (float):            amplitude of uniform noise added to the padded tensors
    Returns: 
        wC (list of tf.Tensor):   padded MERA isometries and disentanglers
        vC (list of tf.Tensor):   MERA isometries and disentanglers
        uC (list of tf.Tensor):   padded MERA isometries and disentanglers
    """

    all_chis = [t.shape[n] for t in wC for n in range(len(t.shape))]
    if not np.all([c <= chi_new for c in all_chis]):
        #nothing to increase
        return wC, vC, uC

    chi_0 = wC[0].shape[0]
    wC[0] = misc_mera.pad_tensor(wC[0], [chi_0, chi_0, min(chi_new, chi_0**2)])
    vC[0] = misc_mera.pad_tensor(vC[0], [chi_0, chi_0, min(chi_new, chi_0**2)])

    for n in range(1, len(wC)):
        wC[n] = misc_mera.pad_tensor(wC[n], [
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**(n + 1)))
        ])
        vC[n] = misc_mera.pad_tensor(vC[n], [
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**(n + 1)))
        ])
        uC[n] = misc_mera.pad_tensor(uC[n], [
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n)),
            min(chi_new, chi_0**(2**n))
        ])
        wC[n] += (tf.random_uniform(shape=wC[n].shape, dtype=wC[n].dtype) *
                  noise)
        vC[n] += (tf.random_uniform(shape=vC[n].shape, dtype=vC[n].dtype) *
                  noise)
        uC[n] += (tf.random_uniform(shape=uC[n].shape, dtype=uC[n].dtype) *
                  noise)

    n = len(wC)
    while not misc_mera.all_same_chi(wC[-1]):
        wC.append(
            misc_mera.pad_tensor(wC[-1], [
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**(n + 1)))
            ]))
        vC.append(
            misc_mera.pad_tensor(vC[-1], [
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**(n + 1)))
            ]))
        uC.append(
            misc_mera.pad_tensor(uC[-1], [
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n)),
                min(chi_new, chi_0**(2**n))
            ]))
        wC[-1] += (tf.random_uniform(
            shape=wC[-1].shape, minval=-1, maxval=1, dtype=wC[-1].dtype) *
                   noise)
        vC[-1] += (tf.random_uniform(
            shape=vC[-1].shape, minval=-1, maxval=1, dtype=vC[-1].dtype) *
                   noise)
        uC[-1] += (tf.random_uniform(
            shape=uC[-1].shape, minval=-1, maxval=1, dtype=uC[-1].dtype) *
                   noise)

        n += 1

    return wC, vC, uC
示例#4
0
def run_binary_mera_optimization_TFI(chis=[4, 6, 8],
                                     niters=[200, 300, 1000],
                                     embeddings=None,
                                     dtype=tf.float64,
                                     verbose=1,
                                     nsteps_steady_state=4,
                                     numpy_update=True,
                                     opt_u_after=40,
                                     opt_all_layers=None,
                                     wC=0,
                                     uC=0,
                                     rho_0=0,
                                     noises=None,
                                     filename=None):
    """
    optimize a binary mera to approximate the ground-state of the infinite transverse field Ising model
    Args:
        chis (list of int):   bond dimension of successive MERA simulations 
        niters (list of int): number of optimization steps of successive MERA optimizations 
        embeddings (list of str or None): type of embedding scheme used to embed mera into the next larger bond dimension 
                                          entries can be: 'p' or 'pad' for padding with zeros without, if possible, adding new layers 
                                                          'a' or 'add' for adding new layer with increased  bond dimension
                                                          'n'          for keeping the MERA as it is (e.g. for resuming optimization)
                                          the first entry will be ignored for the case where no `wC` and `uC` tensors are passed
        dtype (tensorflow dtype):      dtype
        verbose (int):                 verbosity flag, if `verbose>0`, print out info  during optimization
        nsteps_steady_state (int):     number of power-method iteration steps for calculating the 
                                       steady state density matrices 
        numpy_update (bool):           if True, use numpy svd to calculate update of disentanglers and isometries
        opt_u_after (int):             start optimizing disentangler only after `opt_u_after` initial optimization steps
        opt_all_layers (bool):         if `True`, optimize all layers
                                       if `False`, only optimize truncating layers
        wC (list of tf.Tensor or 0.0): initial values for isometries; if `0.0`, initialize with  identities
        uC (list of tf.Tensor or 0.0): initial values for disentanglers; if `0.0`, initialize with  identities
        rho_0 (tf.Tensor or 0.0):      initial value for reduced density matrix; if `0.0`, initialize with  identities
        noises (list of float):        noise values for initializing new layers

    Returns: 
        wC (list of tf.Tensor): optimized isometries of the MERA
        uC (list of tf.Tensor): optimized disentanglers of the MERA
        energies (list of tf.Tensor): energies per iteration steps
        walltimes (list of float):    walltimes per iteration step 
    """

    if not embeddings:
        embeddings = ['p'] * len(chis)
    if not noises:
        noises = [0.0] * len(chis)
    if not opt_all_layers:
        opt_all_layers = [True] * len(chis)

    init = False
    if wC == 0:
        init = True
        wC, _, _ = bml.initialize_binary_MERA_identities(phys_dim=2,
                                                         chi=chis[0],
                                                         dtype=dtype)
    if uC == 0:
        init = True
        _, uC, _ = bml.initialize_binary_MERA_identities(phys_dim=2,
                                                         chi=chis[0],
                                                         dtype=dtype)
    if rho_0 == 0:
        _, _, rho_0 = bml.initialize_binary_MERA_identities(phys_dim=2,
                                                            chi=chis[0],
                                                            dtype=dtype)

    ham_0 = bml.initialize_TFI_hams(dtype=dtype)

    data = {'profile': {}, 'energies': {}}

    for chi, niter, which, noise, opt_all in zip(chis, niters, embeddings,
                                                 noises, opt_all_layers):
        energies = []
        walltimes = []
        if not init:
            if which in ('a', 'add'):
                wC, uC = bml.unlock_layer(wC, uC, noise=noise)
                wC, uC = bml.pad_mera_tensors(chi, wC, uC, noise=noise)
            elif which in ('p', 'pad'):
                wC, uC = bml.pad_mera_tensors(chi, wC, uC, noise=noise)

        rho_0 = misc_mera.pad_tensor(rho_0, [chi, chi, chi, chi, chi, chi])

        wC, uC, rho_0, times, es = bml.optimize_binary_mera(
            ham_0=ham_0,
            #rho_0=rho_0,
            wC=wC,
            uC=uC,
            numiter=niter,
            nsteps_steady_state=nsteps_steady_state,
            verbose=verbose,
            opt_u=True,
            opt_w=True,
            numpy_update=numpy_update,
            opt_u_after=opt_u_after,
            opt_all_layers=opt_all)
        energies.extend(es)
        walltimes.extend(times)
        data['profile'][chi] = walltimes
        data['energies'][chi] = energies
        init = False
        if filename:
            with open(filename + '_tensors.pickle', 'wb') as f:
                pickle.dump([wC, uC], f)
            with open('energies_walltimes_' + filename + '.pickle', 'wb') as f:
                pickle.dump(data, f)

    return wC, uC, walltimes, energies
def run_mod_binary_mera_optimization_TFI(chis=[8, 12, 16],
                                         niters=[200, 300, 1000],
                                         embeddings=None,
                                         dtype=tf.float64,
                                         verbose=1,
                                         refsym=True,
                                         nsteps_steady_state=4,
                                         opt_u_after=9,
                                         noise=0.0):
    """
    run a modified binary mera optimization
    Args:
        chis (list):          bond dimension of successive MERA simulations 
        niters (list):        number of optimization steps of successive MERA optimizations 
        embeddings (list):    type of embeddings scheme used to embed mera into the next larger bond dimension 
                              elements can be: 'p' or 'pad' for padding with zeros without, if possible, adding new layers 
                                               'a' or 'add' for adding new layer with increased  bond dimension
        dtype (tensorflow dtype):  tensorflow dtype 
        verbose (int):             verbosity flag 
        refsym (bool):             if `True`, impose reflection symmetry 
        nsteps_steady_state (int): number power iteration of steps used to obtain the steady state reduced 
                                   density matrix
        noise (float):             noise amplitude for initializing new layers and/or padding existing ones

    Returns: 
        energies (list):   list of tf.Tensor of shape () 
                           energies at iterations steps
        walltimes (list):  walltimes per iteration step 
        wC (list):         isometries wC
        vC (list):         isometries vC
        uC (list):         disentanglers uC
    Raises:
        ValueError if `chis`,`niters` and `embeddings` are of different lengths
    """

    if not embeddings:
        embeddings = ['p'] * len(chi)
    wC, vC, uC, rhoAB_0, rhoBA_0 = mbml.initialize_mod_binary_MERA(
        phys_dim=4, chi=chis[0], dtype=dtype)
    hamAB_0, hamBA_0 = mbml.initialize_TFI_hams(dtype=dtype)
    energies = []
    walltimes = []
    init = True
    if not ([len(chis), len(niters), len(embeddings)] == [len(chis)] * 3):
        raise ValueError(
            '`chis`, `niter` and `embeddings` need to be of same lengths')
    for chi, niter, which in zip(chis, niters, embeddings):
        if not init:
            if which in ('a', 'add'):
                wC, vC, uC = mbml.unlock_layer(wC, vC, uC, noise=noise)
                wC, vC, uC = mbml.pad_mera_tensors(chi, wC, vC, uC, noise=noise)
            elif which in ('p', 'pad'):
                wC, vC, uC = mbml.pad_mera_tensors(chi, wC, vC, uC, noise=noise)

        rhoAB_0, rhoBA_0 = misc_mera.pad_tensor(
            rhoAB_0, [chi, chi, chi, chi]), misc_mera.pad_tensor(
                rhoBA_0, [chi, chi, chi, chi])
        wC, vC, uC, rhoAB_0, rhoBA_0, times, es = mbml.optimize_mod_binary_mera(
            hamAB_0=hamAB_0,
            hamBA_0=hamBA_0,
            rhoAB_0=rhoAB_0,
            rhoBA_0=rhoBA_0,
            wC=wC,
            vC=vC,
            uC=uC,
            verbose=verbose,
            numiter=niter,
            opt_u=True,
            opt_vw=True,
            refsym=refsym,
            nsteps_steady_state=nsteps_steady_state,
            opt_u_after=opt_u_after)
        energies.extend(es)
        walltimes.extend(times)
        init = False
    return energies, walltimes, wC, vC, uC