Example #1
0
  def shift_unitcell(self, sites):
    """
        
        """
    self.position(sites)
    new_lb = self.left_envs[sites]
    new_rb = self.right_envs[sites - 1]
    centermatrix = self.mps.mat
    self.mps.position(len(self.mps))  #move centermatrix to the right
    new_center_matrix = misc_mps.ncon([self.mps.mat, self.mps.connector],
                                      [[-1, 1], [1, -2]])

    self.mps.pos = sites
    self.mps.mat = centermatrix
    self.mps.position(0)
    new_center_matrix = misc_mps.ncon([new_center_matrix, self.mps.mat],
                                      [[-1, 1], [1, -2]])
    tensors = [self.mps[n] for n in range(sites, len(self.mps))
              ] + [self.mps[n] for n in range(sites)]
    self.mps._tensors = tensors
    self.mpo._tensors = [self.mpo[n] for n in range(sites, len(self.mps))
                        ] + [self.mpo[n] for n in range(sites)]
    self.mps.connector = tf.linalg.inv(centermatrix)
    self.mps.mat = new_center_matrix
    self.mps.pos = len(self.mps) - sites
    self.lb = new_lb
    self.rb = new_rb
    self.update()
Example #2
0
  def __init__(self,
               mps,
               mpo,
               name='InfiniteDMRG',
               precision=1E-12,
               precision_canonize=1E-12,
               nmax=1000,
               nmax_canonize=1000,
               ncv=40,
               numeig=1,
               pinv=1E-20,
               power_method=False):

    # if not isinstance(mps, InfiniteMPSCentralGauge):
    #     raise TypeError(
    #         'in InfiniteDMRGEngine.__init__(...): mps of type InfiniteMPSCentralGauge expected, got {0}'
    #         .format(type(mps)))

    mps.restore_form(
        precision=precision_canonize,
        ncv=ncv,
        nmax=nmax_canonize,
        numeig=numeig,
        power_method=power_method,
        pinv=pinv)  #this leaves state in left-orthogonal form

    lb, hl = misc_mps.compute_steady_state_Hamiltonian_GMRES(
        'l',
        mps,
        mpo,
        left_dominant=tf.diag(tf.ones(mps.D[-1], dtype=mps.dtype)),
        right_dominant=misc_mps.ncon([mps.mat, tf.conj(mps.mat)],
                                     [[-1, 1], [-2, 1]]),
        precision=precision,
        nmax=nmax)

    rmps = mps.get_right_orthogonal_imps(
        precision=precision_canonize,
        ncv=ncv,
        nmax=nmax_canonize,
        numeig=numeig,
        pinv=pinv,
        restore_form=False)

    rb, hr = misc_mps.compute_steady_state_Hamiltonian_GMRES(
        'r',
        rmps,
        mpo,
        right_dominant=tf.diag(tf.ones(mps.D[0], dtype=mps.dtype)),
        left_dominant=misc_mps.ncon([mps.mat, tf.conj(mps.mat)],
                                    [[1, -1], [1, -2]]),
        precision=precision,
        nmax=nmax)

    left_dominant = misc_mps.ncon([mps.mat, tf.conj(mps.mat)],
                                  [[1, -1], [1, -2]])
    out = mps.unitcell_transfer_op('l', left_dominant)

    super().__init__(mps=mps, mpo=mpo, lb=lb, rb=rb, name=name)
Example #3
0
def tridiag_tensorflow(vecs, alpha, beta):
    Heff = tf.contrib.distributions.tridiag(beta, alpha, tf.conj(beta))
    eta, u = tf.linalg.eigh(Heff)  #could use tridiag
    out = misc_mps.ncon([vecs, u], [[1, -1, -2, -3], [1, -4]])
    out = out[:, :, :, 0]
    out = tf.math.divide(out, tf.linalg.norm(out))
    return eta[0], out
Example #4
0
 def predictor(mps, sample, pos):
     mps.position(pos)
     mps.compute_data_environments(sample)
     left = tf.ones(shape = [1], dtype = mps.dtype)
     for site in range(mps.label_pos):
         left =  misc_mps.ncon([left, mps.get_tensor(site), sample[0,:,site]],[[1], [1,2,-1], [2]])
         Z = tf.linalg.norm(left)
         left /= Z
     right= tf.ones(shape = [1], dtype = mps.dtype)
     for site in reversed(range(mps.label_pos + 1, len(mps))):
         #print(site)
         right = misc_mps.ncon([mps.get_tensor(site), sample[0,:,site - 1], right],[[-1,1,2], [1], [2]])
         Z = tf.linalg.norm(right)
         right /= Z
     pred= misc_mps.ncon([left, mps.get_tensor(mps.label_pos), right],[[1], [1, -1, 2], [2]])
     return pred / tf.linalg.norm(pred)            
Example #5
0
def do_lanczos_simple_tensorarray(L, mpo, R, initial_state, ncv, delta):
    """
    do a lanczos simulation (using a tf.while_loop)

    Parameters:
    -------------------------

    ...... fill in ....

    Returns:
    ----------------------------
    (vecs,alpha,beta)
    """

    dtype = initial_state.dtype
    vecs = tf.TensorArray(
        dtype,
        element_shape=initial_state.shape,
        size=ncv + 1,
        clear_after_read=False,
    )
    vecs = vecs.write(0, tf.zeros(shape=initial_state.shape, dtype=dtype))
    Hxn = initial_state
    alphas = tf.TensorArray(dtype, ncv)
    betas = tf.TensorArray(dtype, ncv)

    for n in range(ncv):
        xn = Hxn  #vecs[n+1]
        beta = tf.linalg.norm(xn)
        betas = betas.write(n, beta)
        xn = tf.math.divide(xn, beta)
        vecs = vecs.write(n + 1, xn)  #[n+1]=xn
        Hxn = misc_mps.ncon([L, xn, mpo, R],
                            [[1, -1, 2], [1, 3, 4], [2, 5, -2, 3], [4, -3, 5]])
        alpha = misc_mps.ncon([
            tf.reshape(tf.conj(xn), [xn.shape[0] * xn.shape[1] * xn.shape[2]]),
            tf.reshape(Hxn, [Hxn.shape[0] * Hxn.shape[1] * Hxn.shape[2]])
        ], [[1], [1]])
        alphas = alphas.write(n, alpha)
        Hxn = Hxn - tf.multiply(vecs.read(n), beta) - tf.multiply(xn, alpha)
        #last = Hxn#vecs.append(Hxn)
    #return vecs[1:-1],tf.contrib.autograph.stack(alphas),tf.contrib.autograph.stack(betas)
    return ncv, vecs.stack()[1:], alphas.stack(), betas.stack()[1:]
Example #6
0
 def gram_schmidt_step(j, basis, v):
     """Makes v orthogonal to the j'th vector in basis."""
     #v_shape = v.get_shape()
     basis_vec = basis.read(j)
     v -= misc_mps.ncon([
         tf.reshape(tf.conj(basis_vec), [
             basis_vec.shape[0] * basis_vec.shape[1] * basis_vec.shape[2]
         ]),
         tf.reshape(v, [v.shape[0] * v.shape[1] * v.shape[2]])
     ], [[1], [1]]) * basis_vec
     #v.set_shape(v_shape)
     return j + 1, basis, v
Example #7
0
    def do_lanczos_step(n, lanstate):
        xn = lanstate.UN_krylov_vectors.read(n)
        beta = tf.linalg.norm(xn)
        xn = tf.math.divide(xn, beta)
        if reortho == True:
            orthogonalize(n - 1, lanstate.krylov_vectors, xn)

        Hxn = misc_mps.ncon([L, xn, mpo, R],
                            [[1, -1, 2], [1, 3, 4], [2, 5, -2, 3], [4, -3, 5]])
        #alpha=misc_mps.ncon([tf.conj(xn),Hxn],[[1,2,3],[1,2,3]])
        alpha = misc_mps.ncon([tf.conj(xn), Hxn], [[1, 2, 3], [1, 2, 3]])
        # alpha = misc_mps.ncon([
        #     tf.reshape(tf.conj(xn), [xn.shape[0] * xn.shape[1] * xn.shape[2]]),
        #     tf.reshape(Hxn, [Hxn.shape[0] * Hxn.shape[1] * Hxn.shape[2]])
        # ], [[1], [1]])
        Hxn = Hxn - tf.multiply(lanstate.krylov_vectors.read(n),
                                beta) - tf.multiply(xn, alpha)
        return n + 1, update_state(old=lanstate,
                                   n=n,
                                   Hxn=Hxn,
                                   xn=xn,
                                   alpha=alpha,
                                   beta=beta)
Example #8
0
def do_lanczos_simple(L, mpo, R, initial_state, ncv, delta):
    """
    do a lanczos simulation (using a tf.while_loop)

    Parameters:
    -------------------------

    ...... fill in ....

    Returns:
    ----------------------------
    (vecs,alpha,beta)
    """

    dtype = initial_state.dtype
    vecs = [tf.math.multiply(initial_state, 0.0)]
    vecs.append(initial_state)
    alphas, betas = [], []
    #tf.contrib.autograph.set_element_type(alphas, dtype)
    #tf.contrib.autograph.set_element_type(betas, dtype)
    #betas.append(tf.linalg.norm(vecs[0]))
    for n in range(ncv):
        xn = vecs[n + 1]
        beta = tf.linalg.norm(xn)
        betas.append(beta)
        xn = tf.math.divide(xn, beta)
        vecs[n + 1] = xn
        Hxn = misc_mps.ncon([L, xn, mpo, R],
                            [[1, -1, 2], [1, 3, 4], [2, 5, -2, 3], [4, -3, 5]])
        alpha = misc_mps.ncon([
            tf.reshape(tf.conj(xn), [xn.shape[0] * xn.shape[1] * xn.shape[2]]),
            tf.reshape(Hxn, [Hxn.shape[0] * Hxn.shape[1] * Hxn.shape[2]])
        ], [[1], [1]])
        alphas.append(alpha)
        Hxn = Hxn - tf.multiply(vecs[n], beta) - tf.multiply(xn, alpha)
        vecs.append(Hxn)
    return ncv, vecs[1:-1], alphas, betas[1:]
Example #9
0
  def _optimize_1s_local(self,
                         site,
                         sweep_dir,
                         ncv=40,
                         Ndiag=10,
                         landelta=1E-5,
                         landeltaEta=1E-5,
                         verbose=0):

    if sweep_dir in (-1, 'r', 'right'):
      if self.mps.pos != site:
        raise ValueError(
            '_optimize_1s_local for sweep_dir={2}: site={0} != mps.pos={1}'.
            format(site, self.mps.pos, sweep_dir))
    if sweep_dir in (1, 'l', 'left'):
      if self.mps.pos != (site + 1):
        raise ValueError(
            '_optimize_1s_local for sweep_dir={2}: site={0}, mps.pos={1}'.
            format(site, self.mps.pos, sweep_dir))

    if sweep_dir in (-1, 'r', 'right'):
      #NOTE (martin) don't use get_tensor here
      initial = misc_mps.ncon([self.mps.mat, self.mps[site]],
                              [[-1, 1], [1, -2, -3]])
    elif sweep_dir in (1, 'l', 'left'):
      #NOTE (martin) don't use get_tensor here
      initial = misc_mps.ncon([self.mps[site], self.mps.mat],
                              [[-1, -2, 1], [1, -3]])

    if self.walltime_log:
      t1 = time.time()
    nit, vecs, alpha, beta = LZ.do_lanczos(
        L=self.left_envs[site],
        mpo=self.mpo[site],
        R=self.right_envs[site],
        initial_state=initial,
        ncv=np.min([
            ncv,
            int(initial.shape[0]) * int(initial.shape[1]) * int(
                initial.shape[2])
        ]),
        delta=landelta)

    if self.walltime_log:
      self.walltime_log(
          lan=[(time.time() - t1) / float(nit)] * int(nit),
          QR=[],
          add_layer=[],
          num_lan=[int(nit)])

    e, opt = LZ.tridiag(vecs, alpha, beta)
    Dnew = opt.shape[2]
    # if verbose == (-1):
    #     print(f"SS-DMRG  site={site}: optimized E={e}")

    if verbose > 0:
      stdout.write(
          "\rSS-DMRG it=%i/%i, site=%i/%i: optimized E=%.16f+%.16f at D=%i" %
          (self._it, self.Nsweeps, site, len(self.mps), np.real(e), np.imag(e),
           Dnew))
      stdout.flush()

    if verbose > 1:
      print("")

    if self.walltime_log:
      t1 = time.time()
    if sweep_dir in (-1, 'r', 'right'):
      A, mat, Z = misc_mps.prepare_tensor_QR(opt, direction='l')
      A /= Z
    elif sweep_dir in (1, 'l', 'left'):
      mat, B, Z = misc_mps.prepare_tensor_QR(opt, direction='r')
      B /= Z
    if self.walltime_log:
      self.walltime_log(lan=[], QR=[time.time() - t1], add_layer=[], num_lan=[])

    self.mps.mat = mat
    if sweep_dir in (-1, 'r', 'right'):
      self.mps._tensors[site] = A
      self.mps.pos += 1
      self.left_envs[site + 1] = self.add_layer(
          B=self.left_envs[site],
          mps_tensor=self.mps[site],
          mpo_tensor=self.mpo[site],
          conj_mps_tensor=self.mps[site],
          direction=1,
          walltime_log=self.walltime_log)

    elif sweep_dir in (1, 'l', 'left'):
      self.mps._tensors[site] = B
      self.mps.pos = site
      self.right_envs[site - 1] = self.add_layer(
          B=self.right_envs[site],
          mps_tensor=self.mps[site],
          mpo_tensor=self.mpo[site],
          conj_mps_tensor=self.mps[site],
          direction=-1,
          walltime_log=self.walltime_log)
    return e
Example #10
0
  def _optimize_2s_local(self,
                         thresh=1E-10,
                         D=None,
                         ncv=40,
                         Ndiag=10,
                         landelta=1E-5,
                         landeltaEta=1E-5,
                         verbose=0):
    raise NotImplementedError()
    mpol = self.mpo[self.mpo.pos - 1]
    mpor = self.mpo[self.mpo.pos]
    Ml, Mc, dl, dlp = mpol.shape
    Mc, Mr, dr, drp = mpor.shape
    mpo = tf.reshape(
        misc_mps.ncon([mpol, mpor], [[-1, 1, -3, -5], [1, -2, -4, -6]]),
        [Ml, Mr, dl * dr, dlp * drp])
    initial = misc_mps.ncon(
        [self.mps[self.mps.pos - 1], self.mps.mat, self.mps[self.mps.pos]],
        [[-1, -2, 1], [1, 2], [2, -3, -4]])
    Dl, dl, dr, Dr = initial.shape
    tf.reshape(initial, [Dl, dl * dr, Dr])
    if self.walltime_log:
      t1 = time.time()

    nit, vecs, alpha, beta = LZ.do_lanczos(
        L=self.left_envs[self.mps.pos - 1],
        mpo=mpo,
        R=self.right_envs[self.mps.pos],
        initial_state=initial,
        ncv=ncv,
        delta=landelta)
    if self.walltime_log:
      self.walltime_log(
          lan=[(time.time() - t1) / float(nit)] * int(nit),
          QR=[],
          add_layer=[],
          num_lan=[int(nit)])

    temp = tf.reshape(
        tf.reshape(opt, [
            self.mps.D[self.mps.pos - 1], dlp, drp, self.mps.D[self.mps.pos + 1]
        ]), [])
    opt.split(mps_merge_data).transpose(0, 2, 3, 1).merge([[0, 1], [2, 3]])

    U, S, V = temp.svd(truncation_threshold=thresh, D=D)
    Dnew = S.shape[0]
    if verbose > 0:
      stdout.write(
          "\rTS-DMRG it=%i/%i, sites=(%i,%i)/%i: optimized E=%.16f+%.16f at D=%i"
          % (self._it, self.Nsweeps, self.mps.pos - 1, self.mps.pos,
             len(self.mps), tf.real(e), tf.imag(e), Dnew))
      stdout.flush()
    if verbose > 1:
      print("")

    Z = np.sqrt(misc_mps.ncon([S, S], [[1], [1]]))
    self.mps.mat = S.diag() / Z

    self.mps[self.mps.pos - 1] = U.split([merge_data[0],
                                          [U.shape[1]]]).transpose(0, 2, 1)
    self.mps[self.mps.pos] = V.split([[V.shape[0]], merge_data[1]]).transpose(
        0, 2, 1)
    self.left_envs[self.mps.pos] = self.add_layer(
        B=self.left_envs[self.mps.pos - 1],
        mps_tensor=self.mps[self.mps.pos - 1],
        mpo_tensor=self.mpo[self.mps.pos - 1],
        conj_mps_tensor=self.mps[self.mps.pos - 1],
        direction=1)

    self.right_envs[self.mps.pos - 1] = self.add_layer(
        B=self.right_envs[self.mps.pos],
        mps_tensor=self.mps[self.mps.pos],
        mpo_tensor=self.mpo[self.mps.pos],
        conj_mps_tensor=self.mps[self.mps.pos],
        direction=-1)
    return e
Example #11
0
    def _simulate(self, initialstate, reortho=False, verbose=False):
        """
        do a lanczos simulation

        Parameters:
        -------------------------
        initialstate: tf.Tensor,
                      the initial state
        reortho:      bool
                      if True, krylov vectors are reorthogonalized at each step (costly)
                      the current implementation is not optimal: there are better ways to do this
        verbose:      bool
        verbosity flag
        """
        self.delta = tf.cast(self.delta, initialstate.dtype)
        self.deltaEta = tf.cast(self.deltaEta, initialstate.dtype)

        dtype = self.matvec(initialstate).dtype
        #initialization:
        xn = copy.deepcopy(initialstate)
        xn /= tf.sqrt(
            misc_mps.ncon([tf.conj(xn), xn],
                          [range(len(xn.shape)),
                           range(len(xn.shape))]))

        xn_minus_1 = tf.zeros(initialstate.shape, dtype=dtype)
        converged = False
        it = 0
        kn = []
        epsn = []
        self.vecs = []
        first = True
        while converged == False:
            knval = tf.sqrt(
                misc_mps.ncon([tf.conj(xn), xn],
                              [range(len(xn.shape)),
                               range(len(xn.shape))]))
            if tf.cond(tf.less(tf.abs(knval), tf.abs(self.delta)),
                       lambda: True, lambda: False):
                break
            kn.append(knval)
            xn = xn / kn[-1]
            #store the Lanczos vector for later

            if reortho == True:
                for v in self.vecs:
                    xn -= misc_mps.ncon(
                        [tf.conj(v), xn],
                        [range(len(v.shape)),
                         range(len(xn.shape))]) * v
            self.vecs.append(xn)
            Hxn = self.matvec(xn)
            epsn.append(
                misc_mps.ncon([tf.conj(xn), Hxn],
                              [range(len(xn.shape)),
                               range(len(Hxn.shape))]))
            if ((it % self.Ndiag) == 0) & (len(epsn) >= 1):
                #diagonalize the effective Hamiltonian

                Heff = tf.convert_to_tensor(np.diag(epsn) +
                                            np.diag(kn[1:], 1) +
                                            np.diag(tf.conj(kn[1:]), -1),
                                            dtype=dtype)
                eta, u = tf.linalg.eigh(Heff)  #could use a tridiag solver
                if first == False:
                    if tf.abs(tf.linalg.norm(eta - etaold)) < tf.abs(
                            self.deltaEta):
                        converged = True
                first = False
                etaold = eta[0]
            if it > 0:
                Hxn -= (self.vecs[-1] * epsn[-1])
                Hxn -= (self.vecs[-2] * kn[-1])
            else:
                Hxn -= (self.vecs[-1] * epsn[-1])
            xn = Hxn
            it = it + 1
            if it > self.ncv:
                break
        self.Heff = tf.convert_to_tensor(np.diag(epsn) + np.diag(kn[1:], 1) +
                                         np.diag(np.conj(kn[1:]), -1),
                                         dtype=dtype)
        eta, u = tf.linalg.eigh(self.Heff)  #could use tridiag
        states = []
        for n2 in range(min(1, eta.shape[0])):
            state = tf.zeros(initialstate.shape, dtype=initialstate.dtype)
            for n1 in range(len(self.vecs)):
                state += self.vecs[n1] * u[n1, n2]
            states.append(state / tf.sqrt(
                misc_mps.ncon(
                    [tf.conj(state), state],
                    [range(len(state.shape)),
                     range(len(state.shape))])))
        return eta[0], states[0], converged