Exemple #1
0
def tensor_train_template(init_rho, pb_index, rank=1):
    """Get rho_n from rho in a Tensor Train representation.

    Parameters
    ----------
    rho : np.ndarray
    """
    n_vec = np.zeros((rank, ), dtype=DTYPE)
    n_vec[0] = 1.0
    root_array = np.tensordot(init_rho, n_vec, axes=0)

    root = Tensor(name='root', array=root_array, axis=None)
    max_terms = len(pb_index)

    # +2: i and j
    root[0] = (Leaf(name=max_terms), 0)
    root[1] = (Leaf(name=max_terms + 1), 0)

    for i in pb_index:
        assert rank <= i

    train = [root]
    for k in range(max_terms):
        if k < max_terms - 1:
            array = np.eye(rank, pb_index[k] * rank)
            array = np.reshape(array, (rank, -1, rank))
        else:
            array = np.eye(rank, pb_index[k])
        spf = Tensor(name=k, array=array, axis=0)
        l = Leaf(name=k)
        spf[0] = (train[-1], 2)
        spf[1] = (l, 0)
        train.append(spf)

    return root
Exemple #2
0
 def init_state(self):
     v = 1.
     for i in range(self.rank):
         _, v_i = self.dvr_list[i].solve(n_state=1)
         v = np.tensordot(v, v_i[0], axes=0)
     v = np.reshape(v, -1)
     return v
Exemple #3
0
def tensor_tree_template(init_rho, pb_index, rank=1, nbranch=2):
    """Get rho_n from rho in a Tensor Tree representation.

    Parameters
    ----------
    rho : np.ndarray
    """
    n_state = get_n_state(init_rho)
    n_vec = np.zeros((rank, ), dtype=DTYPE)
    n_vec[0] = 1.0
    root_array = np.tensordot(init_rho, n_vec, axes=0)
    max_terms = len(pb_index)

    for i in pb_index:
        assert rank <= i

    # generate leaves
    leaves = list(range(max_terms))

    class new_spf(object):
        counter = 0
        prefix = 'SPF'

        def __new__(cls):
            name = cls.prefix + str(cls.counter)
            cls.counter += 1
            return name

    importance = list(reversed(range(len(pb_index))))
    graph, spf_root = huffman_tree(
        leaves,
        importances=importance,
        obj_new=new_spf,
        n_branch=nbranch,
    )

    root = 'root'
    graph[root] = [str(max_terms), str(max_terms + 1), spf_root]

    print(graph, root)

    root = Tensor.generate(graph, root)
    root.set_array(root_array)
    bond_dict = {}
    # Leaves
    l_range = list(pb_index) + [n_state] * 2
    for s, i, t, j in root.linkage_visitor():
        if isinstance(t, Leaf):
            bond_dict[(s, i, t, j)] = l_range[int(t.name)]
        else:
            bond_dict[(s, i, t, j)] = rank
    autocomplete(root, bond_dict)

    return root
Exemple #4
0
    def gen_extended_rho(self, rho):
        """Get rho_n from rho with the conversion:
            rho[n_0, ..., n_(k-1), i, j]

        Parameters
        ----------
        rho : np.ndarray
        """
        shape = list(rho.shape)
        assert len(shape) == 2 and shape[0] == shape[1]
        # Let: rho_n[0, i, j] = rho and rho_n[n, i, j] = 0
        ext = np.zeros((np.prod(self.n_dims), ))
        ext[0] = 1
        rho_n = np.reshape(np.tensordot(ext, rho, axes=0),
                           list(self.n_dims) + shape)
        return np.array(rho_n, dtype=DTYPE)
Exemple #5
0
def simple_heom(init_rho, n_indices):
    """Get rho_n from rho with the conversion:
        rho[i, j, n_0, ..., n_(k-1)]

    Parameters
    ----------
    rho : np.ndarray
    """
    n_state = get_n_state(init_rho)
    # Let: rho_n[0, :, :] = rho and rho_n[n, :, :] = 0
    ext = np.zeros((np.prod(n_indices), ))
    ext[0] = 1.0
    new_shape = [n_state, n_state] + list(n_indices)
    rho_n = np.reshape(np.tensordot(init_rho, ext, axes=0), new_shape)

    root = Tensor(name='root', array=rho_n, axis=None)
    d = len(n_indices)
    root[0] = (Leaf(name=d), 0)
    root[1] = (Leaf(name=d + 1), 0)
    for k in range(d):  # +2: i and j
        root[k + 2] = (Leaf(name=k), 0)

    return root
Exemple #6
0
    beta=beta,
)

model = SBM(
    sys_ham=np.array([[-0.5 * e, v], [v, 0.5 * e]], dtype=DTYPE),
    sys_op=np.array([[-0.5, 0.0], [0.0, 0.5]], dtype=DTYPE),
    ph_parameters=ph_parameters,
    ph_dims=(dof * [max_tier]),
    bath_corr=drude,
    bath_dims=[max_tier],
)

# init state
A, B = 1.0, 1.0
wfn_0 = np.array([A, B]) / np.sqrt(A**2 + B**2)
rho_0 = np.tensordot(wfn_0, wfn_0, axes=0)

# Propagation
dt_unit = Quantity(0.001, 'fs').value_in_au
callback_interval = 100
count = 10_000


def test_heom(fname=None):
    ph_dims = list(np.repeat(model.ph_dims, 2))
    n_dims = ph_dims if model.bath_dims is None else ph_dims + model.bath_dims
    print(n_dims)

    root = tensor_tree_template(rho_0, n_dims, rank=rank_heom)
    leaves = root.leaves()
    h_list = model.heom_h_list(leaves[0], leaves[1], leaves[2:], beta=beta)
Exemple #7
0
    from minitn.heom.noise import Drude
    from minitn.lib.units import Quantity

    # System
    e = Quantity(6500, 'cm-1').value_in_au
    v = Quantity(500, 'cm-1').value_in_au
    # Bath
    lambda_0 = Quantity(2000, 'cm-1').value_in_au  # reorganization energy
    omega_0 = Quantity(2000, 'cm-1').value_in_au  # vibrational frequency
    beta = Quantity(300, 'K').value_in_au  # temperature
    # Superparameters
    max_terms = 5  # (terms used in the expansion of the correlation function)
    max_tier = 10  # (number of possble values for each n_k in the extended rho)

    h = np.array([[0, v], [v, e]])

    op = np.array([[0, 0], [0, 1]])

    corr = Drude(lambda_0, omega_0, max_terms, beta)
    heom = Hierachy([max_tier] * max_terms, h, op, corr)
    phi = [1 / np.sqrt(2), 1 / np.sqrt(2)]
    phi /= np.linalg.norm(phi)
    rho_0 = np.tensordot(phi, phi, axes=0)

    init_rho = heom.gen_extended_rho(rho_0)
    print(init_rho.shape)
    for n, term in enumerate(heom.diff()):
        print('- Term {}:'.format(n))
        for label, array in term:
            print('Label: {}, shape: {}'.format(label, array.shape))
Exemple #8
0
 def _partial_product(array1, i, array2, j):
     l1, l2 = array1.ndim, array2.ndim
     ans = np.tensordot(array1, array2, axes=([i], [j]))
     ans = np.moveaxis(ans, list(range(l1 - 1, l1 + l2 - 2)), list(range(i, i + l2 - 1)))
     return ans