Example #1
0
def fm_state(N, anti=False):
    r"""Get a 1-D n-site antiferromagnetic state |+-+- ... +->
    or ferromagnetic state |++ ... +>

    Parameters
    ----------
    N : int
        (Even) number of sites.
    anti : bool

    Returns
    -------
    mps : [(2, 1, 1) ndarray]
        A list of MPS matrixes.
    """
    # local bond dimension, 0=up, 1=down
    up = np.zeros((2, 1, 1))
    up[0, 0, 0] = 1
    down = np.zeros((2, 1, 1))
    down[1, 0, 0] = 1
    if anti:
        mps = [up, down] * int(N / 2)
    else:
        mps = [up] * N
    return mps
Example #2
0
def tensor_train_template(init_rho, pb_index, rank=1):
    """Get rho_n from rho in a Tensor Train representation.

    Parameters
    ----------
    rho : np.ndarray
    """
    n_vec = np.zeros((rank, ), dtype=DTYPE)
    n_vec[0] = 1.0
    root_array = np.tensordot(init_rho, n_vec, axes=0)

    root = Tensor(name='root', array=root_array, axis=None)
    max_terms = len(pb_index)

    # +2: i and j
    root[0] = (Leaf(name=max_terms), 0)
    root[1] = (Leaf(name=max_terms + 1), 0)

    for i in pb_index:
        assert rank <= i

    train = [root]
    for k in range(max_terms):
        if k < max_terms - 1:
            array = np.eye(rank, pb_index[k] * rank)
            array = np.reshape(array, (rank, -1, rank))
        else:
            array = np.eye(rank, pb_index[k])
        spf = Tensor(name=k, array=array, axis=0)
        l = Leaf(name=k)
        spf[0] = (train[-1], 2)
        spf[1] = (l, 0)
        train.append(spf)

    return root
Example #3
0
    def init_state(self):
        r"""Form the initial vector according to shape list::

            n_0|   |   |n_p-1
              C_0 ... C_p-1
                 \ | /
              m_0 \|/ m_p-1
                   A

        Returns
        -------
        init : (self.size,) ndarray
            Formally, init = np.concatenate([C_0, ..., C_p-1, A], axis=None),
            where C_i is a (n_i * m_i,) ndarray, i <- {0, ..., p-1},
            A is a (M,) ndarray, and M = m_0 * ... * m_p-1, m_i < n_i.
        """
        dvr_list = self.dvr_list
        c_list = []
        for i, (_, m_i) in enumerate(self.shape_list[:-1]):
            _, v_i = dvr_list[i].solve(n_state=m_i)
            v_i = np.transpose(v_i)
            c_list.append(np.reshape(v_i, -1))
        vec_a = np.zeros(self.size_list[-1])
        vec_a[0] = 1.0
        vec_list = c_list + [vec_a]
        init = np.concatenate(vec_list, axis=None)
        self.vec = init
        self.update_mod_terms()
        return init
Example #4
0
    def _sp_op(self, i, mat, h_list, mod_term, err=1.e-6):
        if not h_list:
            return np.zeros((mat.shape))

        logging.debug(__('> OP on mat {}...', i))

        n, m = mat.shape
        partial_transform = self._partial_transform
        a = self.get_sub_vec(-1)
        a_h = np.conj(a)
        density = self._partial_product(i, a, a_h)
        inv_density = linalg.inv(density + np.identity(m) * err)
        sp = self.get_sub_vec(i)
        sp_h = np.conj(np.transpose(sp))
        projection = np.identity(n) - np.dot(sp, sp_h)

        tmp = partial_transform(i, a, mat)
        for mat_j in h_list:
            tmp = partial_transform(i, tmp, mat_j)
        for j, mat_j in mod_term:
            if j != i:
                tmp = partial_transform(j, tmp, mat_j)
        tmp = self._partial_product(i, tmp, a_h)
        ans = np.dot(projection, np.dot(tmp, inv_density))

        return ans
Example #5
0
def tensor_tree_template(init_rho, pb_index, rank=1, nbranch=2):
    """Get rho_n from rho in a Tensor Tree representation.

    Parameters
    ----------
    rho : np.ndarray
    """
    n_state = get_n_state(init_rho)
    n_vec = np.zeros((rank, ), dtype=DTYPE)
    n_vec[0] = 1.0
    root_array = np.tensordot(init_rho, n_vec, axes=0)
    max_terms = len(pb_index)

    for i in pb_index:
        assert rank <= i

    # generate leaves
    leaves = list(range(max_terms))

    class new_spf(object):
        counter = 0
        prefix = 'SPF'

        def __new__(cls):
            name = cls.prefix + str(cls.counter)
            cls.counter += 1
            return name

    importance = list(reversed(range(len(pb_index))))
    graph, spf_root = huffman_tree(
        leaves,
        importances=importance,
        obj_new=new_spf,
        n_branch=nbranch,
    )

    root = 'root'
    graph[root] = [str(max_terms), str(max_terms + 1), spf_root]

    print(graph, root)

    root = Tensor.generate(graph, root)
    root.set_array(root_array)
    bond_dict = {}
    # Leaves
    l_range = list(pb_index) + [n_state] * 2
    for s, i, t, j in root.linkage_visitor():
        if isinstance(t, Leaf):
            bond_dict[(s, i, t, j)] = l_range[int(t.name)]
        else:
            bond_dict[(s, i, t, j)] = rank
    autocomplete(root, bond_dict)

    return root
Example #6
0
    def davidson_precondition(cls, dim, matvec, noise=None):
        """Stadard precondition in Davidson algorithm

        Parameters
        ----------
        dim : int
        matvec : (dim,) ndarray -> (dim,) ndarray
        noise: float, optional
        """
        if noise is None:
            noise = math.sqrt(cls.tol)
        diag = np.zeros(dim)
        for i in range(dim):
            _v = np.zeros(dim)
            _v[i] = 1.
            diag[i] = matvec(_v)[i]

        def _precondition(residual, ritz_val, ritz_vec, _diag=diag):
            return residual / (ritz_val - _diag + noise)
        return _precondition
Example #7
0
def matrix_repr(op, basis, cut_off=None, num_prec=None):
    # op is a function.
    n_dims = len(basis)
    A = np.zeros((n_dims, n_dims))
    x = sym.symbols('x')
    for i in range(n_dims):
        for j in range(i + 1):
            mel = matrix_element(basis[i],
                                 op,
                                 basis[j],
                                 cut_off=cut_off,
                                 num_prec=num_prec)
            A[i, j] = mel
            A[j, i] = mel
    return A
Example #8
0
    def gen_extended_rho(self, rho):
        """Get rho_n from rho with the conversion:
            rho[n_0, ..., n_(k-1), i, j]

        Parameters
        ----------
        rho : np.ndarray
        """
        shape = list(rho.shape)
        assert len(shape) == 2 and shape[0] == shape[1]
        # Let: rho_n[0, i, j] = rho and rho_n[n, i, j] = 0
        ext = np.zeros((np.prod(self.n_dims), ))
        ext[0] = 1
        rho_n = np.reshape(np.tensordot(ext, rho, axes=0),
                           list(self.n_dims) + shape)
        return np.array(rho_n, dtype=DTYPE)
Example #9
0
def heisenberg(N, J=1.0, Jz=1.0, h=0):
    r"""Generate a MPO for Heisenberg Model.

    .. math::

        H = \sum^{N-2}_{i=0} \frac{J}{2} (S^+_i S^-_{i+1} + S^-_i S^+_{i+1})
            + J_z S^z_i S^z_{i+1}
            - \sum^{N-1}_{i=0} h S^z_i

    For 1-D antiferromagnetic, :math:`J = J_z = 1`.

    Parameters
    ----------
    N : int
        number of sites.
    J : float
        coupling constant.
    Jz : float
        coupling constant in z-direction.
    h : float
        external magnetic field.

    Returns
    -------
    mpo : [(5, 5, 2, 2) ndarray]
        A list of MPO matrixes.
    """
    # Local operators
    I = np.eye(2)
    Z = np.zeros((2, 2))
    Sz = np.array([[0.5, 0.0], [0.0, -0.5]])
    Sp = np.array([[0., 0.], [1., 0.]])
    Sm = np.array([[0., 1.], [0., 0.]])
    # left-hand edge: 1*5
    Wfirst = np.array(
        [[-h * Sz, (J / 2.) * Sm, (J / 2.) * Sp, (Jz / 2.) * Sz, I]])
    # mid: 5*5
    W = np.array([[I, Z, Z, Z, Z], [Sp, Z, Z, Z, Z], [Sm, Z, Z, Z, Z],
                  [Sz, Z, Z, Z, Z],
                  [-h * Sz, (J / 2.) * Sm, (J / 2.) * Sp, (Jz / 2.) * Sz, I]])
    # right-hand edge: 5*1
    Wlast = np.array([[I], [Sp], [Sm], [Sz], [-h * Sz]])
    mpo = [Wfirst] + ([W] * (N - 2)) + [Wlast]
    return mpo
Example #10
0
def autocomplete(root, n_bond_dict):
    """Autocomplete the tensors linked to `self.root` with suitable initial
    value.

    Parameters
    ----------
    root : Tensor
    n_bond_dict : {Leaf: int}
        A dictionary to specify the dimensions of each primary basis.
    """
    for t in root.visitor(leaf=False):
        if t.array is None:
            axis = t.axis
            n_children = []
            for i, child, j in t.children():
                n_children.append(n_bond_dict[(t, i, child, j)])
            if axis is not None:
                p, p_i = t[axis]
                n_parent = n_bond_dict[(p, p_i, t, axis)]
                shape = [n_parent] + n_children
            else:
                n_parent = 1
                shape = n_children
            array = np.zeros((n_parent, np.prod(n_children)))
            for n, v_i in zip(triangular(n_children), array):
                v_i[n] = 1.
            array = np.reshape(array, shape)
            if axis is not None:
                array = np.moveaxis(array, 0, axis)
            t.set_array(array)
            t.normalize(forced=True)
            assert (
                t.axis is None or
                np.linalg.matrix_rank(t.local_norm()) == t.shape[t.axis]
            )
    if __debug__:
        for t in root.visitor():
            t.check_completness(strict=True)
    return
Example #11
0
def simple_heom(init_rho, n_indices):
    """Get rho_n from rho with the conversion:
        rho[i, j, n_0, ..., n_(k-1)]

    Parameters
    ----------
    rho : np.ndarray
    """
    n_state = get_n_state(init_rho)
    # Let: rho_n[0, :, :] = rho and rho_n[n, :, :] = 0
    ext = np.zeros((np.prod(n_indices), ))
    ext[0] = 1.0
    new_shape = [n_state, n_state] + list(n_indices)
    rho_n = np.reshape(np.tensordot(init_rho, ext, axes=0), new_shape)

    root = Tensor(name='root', array=rho_n, axis=None)
    d = len(n_indices)
    root[0] = (Leaf(name=d), 0)
    root[1] = (Leaf(name=d + 1), 0)
    for k in range(d):  # +2: i and j
        root[k + 2] = (Leaf(name=k), 0)

    return root
Example #12
0
    def __init__(self, matvec, init_vecs, n_vals=1, precondition=None):
        self._matvec = matvec
        self._n_vals = n_vals
        self._precondition = precondition
        self._diag = None
        self._trial_vecs = list(init_vecs)
        self._search_space = []
        self._column_space = []
        self._max_space = self.max_space + 3 * n_vals
        self._submatrix = np.zeros([self._max_space] * 2, dtype='d')

        self._last_ritz_vals = None
        self._last_convergence = None

        self._ritz_vals = None
        self._get_ritz_vecs = None    # function returns an iterator
        self._get_col_ritz_vecs = None    # function returns an iterator
        self._residuals = None    # iterator
        self._residual_norms = None
        self._convergence = None

        self.eigvals = None
        self.eigvecs = None
Example #13
0
    def autocomplete(self, n_bond_dict, max_entangled=False):
        """Autocomplete the tensors linked to `self.root` with suitable initial
        value.

        Parameters
        ----------
        n_bond_dict : {Leaf: int}
            A dictionary to specify the dimensions of each primary basis.
        max_entangled : bool
            Whether to use the max entangled state as initial value (for finite
            temperature and imaginary-time propagation).  Default is `False`.
        """
        for t in self.root.visitor(leaf=False):
            if t.array is None:
                axis = t.axis
                if max_entangled and not any(t.children(leaf=False)):
                    if len(list(t.children(leaf=True))) != 2 or axis is None:
                        raise RuntimeError('Not correct tensor graph for FT.')
                    for i, leaf, j in t.children():
                        if not leaf.name.endswith("'"):
                            n_leaf = n_bond_dict[(t, i, leaf, j)]
                            break
                    p, p_i = t[axis]
                    n_parent = n_bond_dict[(p, p_i, t, axis)]
                    vec_i = np.diag(np.ones((n_leaf, )) / np.sqrt(n_leaf))
                    vec_i = np.reshape(vec_i, -1)
                    init_vecs = [vec_i]
                    print(np.shape(init_vecs),
                          np.shape(self._local_matvec(leaf)))
                    da = DavidsonAlgorithm(self._local_matvec(leaf),
                                           init_vecs=init_vecs,
                                           n_vals=n_parent)
                    array = da.kernel(search_mode=True)
                    if len(array) >= n_parent:
                        array = array[:n_parent]
                    else:
                        for j in range(n_parent - len(array)):
                            v = np.zeros((n_leaf**2, ))
                            v[j] = 1.0
                            array.append(v)
                    assert len(array) == n_parent
                    assert np.allclose(array[0], vec_i)
                    array = np.reshape(array, (n_parent, n_leaf, n_leaf))
                else:
                    n_children = []
                    for i, child, j in t.children():
                        n_children.append(n_bond_dict[(t, i, child, j)])
                    if axis is not None:
                        p, p_i = t[axis]
                        n_parent = n_bond_dict[(p, p_i, t, axis)]
                        shape = [n_parent] + n_children
                    else:
                        n_parent = 1
                        shape = n_children
                    array = np.zeros((n_parent, np.prod(n_children)))
                    for n, v_i in zip(self.triangular(n_children), array):
                        v_i[n] = 1.
                    array = np.reshape(array, shape)
                    if axis is not None:
                        array = np.moveaxis(array, 0, axis)
                t.set_array(array)
                t.normalize(forced=True)
                assert (t.axis is None or np.linalg.matrix_rank(t.local_norm())
                        == t.shape[t.axis])
        if __debug__:
            for t in self.root.visitor():
                t.check_completness(strict=True)
        return