def __init__( self, conf_list, shape_list, fast=False, hbar=1., m_e=1., ): r""" N-dimensional DVR using sine-DVR for 1-D:: Parameters ---------- conf_list : [(float, float, int)] shape_list : [(int, int), ..., (int, int)] ``shape_list == [(n_1, m_1), ..., (n_p, m_p)]``, which correspoed to the structure of state tree. hbar : float, optional hbar : float, optional fast : bool, optional """ super(MCTDH, self).__init__(conf_list, fast=fast, hbar=hbar, m_e=m_e) # shape[1] is m and shape[0] is n shape_a = [shape[1] for shape in shape_list] # add shape of A tensor to the end self.shape_list = shape_list + [shape_a] self.size_list = [np.prod(shape) for shape in self.shape_list] self.size = sum(self.size_list) self.h_terms = None self.mod_terms = None self.vec = None
def __init__(self, L, W, R): self.L = L self.W = W self.R = R self.io_shape = [W.shape[3], L.shape[2], R.shape[2]] self.size = np.prod(self.io_shape) super(EnvTensor, self).__init__('d', (self.size, self.size))
def tensorize(self, vec, use_aux=False, shape_dict=None): """Read a vector and set the arrays in the tensors in the network of self. Parameters ---------- vec : (n,) ndarray A vector which should have the same shape with self.vectorize() use_aux : bool `True` to write the arrays in .aux, else to write arrays in .array. shape_dict: dict Where to load the shape of each tensor. """ start = 0 for t in self.visitor(leaf=False): shape = t.shape if shape_dict is None else shape_dict[t] end = start + np.prod(shape) array = np.reshape(vec[start:end], shape) if use_aux: t.aux = array else: t.set_array(array) start = end return
def gen_extended_rho(self, rho): """Get rho_n from rho with the conversion: rho[n_0, ..., n_(k-1), i, j] Parameters ---------- rho : np.ndarray """ shape = list(rho.shape) assert len(shape) == 2 and shape[0] == shape[1] # Let: rho_n[0, i, j] = rho and rho_n[n, i, j] = 0 ext = np.zeros((np.prod(self.n_dims), )) ext[0] = 1 rho_n = np.reshape(np.tensordot(ext, rho, axes=0), list(self.n_dims) + shape) return np.array(rho_n, dtype=DTYPE)
def __init__(self, conf_list, hbar=1., m_e=1., fast=False): self.rank = len(conf_list) self.n_list = [] self.bound_list = [] self.dvr_list = [] DVR_1d = FastSineDVR if fast else SineDVR for i in range(self.rank): lower_bound, upper_bound, n_dvr = conf_list[i] self.n_list.append(n_dvr) self.bound_list.append((lower_bound, upper_bound)) sp_dvr = DVR_1d(lower_bound, upper_bound, n_dvr, hbar=hbar, m_e=m_e) self.dvr_list.append(sp_dvr) self.dim = np.prod(self.n_list) self.grid_points_list = [dvr_i.grid_points for dvr_i in self.dvr_list] self.hbar = hbar self.h_list = None self.v_rst = None self._diag_v_rst = None self.energy = None self.eigenstates = None
def autocomplete(root, n_bond_dict): """Autocomplete the tensors linked to `self.root` with suitable initial value. Parameters ---------- root : Tensor n_bond_dict : {Leaf: int} A dictionary to specify the dimensions of each primary basis. """ for t in root.visitor(leaf=False): if t.array is None: axis = t.axis n_children = [] for i, child, j in t.children(): n_children.append(n_bond_dict[(t, i, child, j)]) if axis is not None: p, p_i = t[axis] n_parent = n_bond_dict[(p, p_i, t, axis)] shape = [n_parent] + n_children else: n_parent = 1 shape = n_children array = np.zeros((n_parent, np.prod(n_children))) for n, v_i in zip(triangular(n_children), array): v_i[n] = 1. array = np.reshape(array, shape) if axis is not None: array = np.moveaxis(array, 0, axis) t.set_array(array) t.normalize(forced=True) assert ( t.axis is None or np.linalg.matrix_rank(t.local_norm()) == t.shape[t.axis] ) if __debug__: for t in root.visitor(): t.check_completness(strict=True) return
def simple_heom(init_rho, n_indices): """Get rho_n from rho with the conversion: rho[i, j, n_0, ..., n_(k-1)] Parameters ---------- rho : np.ndarray """ n_state = get_n_state(init_rho) # Let: rho_n[0, :, :] = rho and rho_n[n, :, :] = 0 ext = np.zeros((np.prod(n_indices), )) ext[0] = 1.0 new_shape = [n_state, n_state] + list(n_indices) rho_n = np.reshape(np.tensordot(init_rho, ext, axes=0), new_shape) root = Tensor(name='root', array=rho_n, axis=None) d = len(n_indices) root[0] = (Leaf(name=d), 0) root[1] = (Leaf(name=d + 1), 0) for k in range(d): # +2: i and j root[k + 2] = (Leaf(name=k), 0) return root
def projector(self, comp=False): """[Deprecated] Return the projector corresponding to self. Returns ------- ans : ndarray """ axis = self.axis if axis is not None: array = self.array shape = self.shape dim = shape.pop(self.axis) comp_dim = np.prod(shape) array = np.moveaxis(array, axis, -1) array = np.reshape(array, (-1, dim)) array_h = np.conj(np.transpose(array)) ans = np.dot(array, array_h) if comp: identity = np.identity(comp_dim) ans = identity - ans ans = np.reshape(ans, shape * 2) return ans else: raise RuntimeError('Need to specific the normalization axis!')
def autocomplete(self, n_bond_dict, max_entangled=False): """Autocomplete the tensors linked to `self.root` with suitable initial value. Parameters ---------- n_bond_dict : {Leaf: int} A dictionary to specify the dimensions of each primary basis. max_entangled : bool Whether to use the max entangled state as initial value (for finite temperature and imaginary-time propagation). Default is `False`. """ for t in self.root.visitor(leaf=False): if t.array is None: axis = t.axis if max_entangled and not any(t.children(leaf=False)): if len(list(t.children(leaf=True))) != 2 or axis is None: raise RuntimeError('Not correct tensor graph for FT.') for i, leaf, j in t.children(): if not leaf.name.endswith("'"): n_leaf = n_bond_dict[(t, i, leaf, j)] break p, p_i = t[axis] n_parent = n_bond_dict[(p, p_i, t, axis)] vec_i = np.diag(np.ones((n_leaf, )) / np.sqrt(n_leaf)) vec_i = np.reshape(vec_i, -1) init_vecs = [vec_i] print(np.shape(init_vecs), np.shape(self._local_matvec(leaf))) da = DavidsonAlgorithm(self._local_matvec(leaf), init_vecs=init_vecs, n_vals=n_parent) array = da.kernel(search_mode=True) if len(array) >= n_parent: array = array[:n_parent] else: for j in range(n_parent - len(array)): v = np.zeros((n_leaf**2, )) v[j] = 1.0 array.append(v) assert len(array) == n_parent assert np.allclose(array[0], vec_i) array = np.reshape(array, (n_parent, n_leaf, n_leaf)) else: n_children = [] for i, child, j in t.children(): n_children.append(n_bond_dict[(t, i, child, j)]) if axis is not None: p, p_i = t[axis] n_parent = n_bond_dict[(p, p_i, t, axis)] shape = [n_parent] + n_children else: n_parent = 1 shape = n_children array = np.zeros((n_parent, np.prod(n_children))) for n, v_i in zip(self.triangular(n_children), array): v_i[n] = 1. array = np.reshape(array, shape) if axis is not None: array = np.moveaxis(array, 0, axis) t.set_array(array) t.normalize(forced=True) assert (t.axis is None or np.linalg.matrix_rank(t.local_norm()) == t.shape[t.axis]) if __debug__: for t in self.root.visitor(): t.check_completness(strict=True) return
def split(self, axis, indice=None, root=None, child=None, rank=None, err=None, normalized=False): """Split the root Tensor to a certain axis/certain axes. Parameters ---------- axis : {int, [int]} rank : int Max rank in SVD err : float Max error in SVD indice : (int, int) Linkage between root and child root : Tensor Tensor to be a new root node. `None` to create a new Tensor. child : Tensor Tensor to be a new child node. `None` to create a new Tensor. Returns ------- root : Tensor New root node in the same environment of self. child : Tensor New child node in the same environment of self. Notes ----- When split a Tensor, this method should let root.unite(i) (i.e. unite with child) be a inversion in terms of the tensor network. """ if self.axis is not None: raise RuntimeError('Can only split the root Tensor!') try: axes1 = list(sorted(axis)) except TypeError: axes1 = [axis] default_indice = (0, axis) else: default_indice = (0, 0) axes2 = [i for i in range(self.order) if i not in axes1] index1, index2 = indice if indice is not None else default_indice # save all data needed in `self` a = self.array children = list(self.children(axis=None)) name = self.name shape = self.shape shape1, shape2 = [shape[i] for i in axes1], [shape[i] for i in axes2] # name settings only for clarity.. if '+' in name: name1, name2 = name.split('+') else: name1, name2 = name + '\'', name # Calculate arrays for new tensors for n, i in enumerate(axes1): a = np.moveaxis(a, i, n) a = np.reshape(a, (np.prod([1] + shape1), np.prod([1] + shape2))) u, s, vh = compressed_svd(a, rank=rank, err=err) root_array = np.reshape(np.dot(u, s), shape1 + [-1]) root_array = np.moveaxis(root_array, -1, index1) child_array = np.reshape(vh, [-1] + shape2) child_array = np.moveaxis(child_array, 0, index2) # Create/write new tensors. cls = type(self) if root is None: root = cls(name=name1, array=root_array, axis=None, normalized=normalized) else: root.axis = None root.set_array(root_array) if child is None: child = cls(name=name2, array=child_array, axis=index2, normalized=normalized) else: child.axis = index2 child.set_array(child_array) # Fix linkage info axes1.insert(index1, None) axes2.insert(index2, None) unlink = self.unlink link = self.link link_info = [(root, index1, child, index2)] for i, t, j in children: is_1 = i in axes1 axes = axes1 if is_1 else axes2 tensor = root if is_1 else child unlink(self, i, t, j) link_info.append((tensor, axes.index(i), t, j)) for linkage in link_info: link(*linkage) return root, child
def __init__(self, h_list, v_rst): self.h_list = h_list self.v_rst = v_rst self.io_sizes = [h_i.shape[0] for h_i in h_list] shape = [np.prod(self.io_sizes)] * 2 super(_Hamiltonian, self).__init__('d', shape)