def diff(t, y): """This function will not change the arrays in tensor network. """ tensor.set_array(np.reshape(y, tensor.shape)) ans = np.zeros_like(y) for n in self.term_visitor(): ans += np.reshape(self._single_diff(tensor, n), -1) return ans
def diff(t, y): """This function will not change the arrays in tensor network. """ origin = tensor.array tensor.set_array(np.reshape(y, tensor.shape)) ans = np.zeros_like(y) self.time = t if not imaginary else None for n in self.term_visitor(use_cache=True): ans += np.reshape(self._single_eom(tensor, n, cache=cache), -1) if self.init_energy is not None: ans -= self.init_energy * y ans /= self.coefficient() tensor.set_array(origin) return ans
def _matvec(self, vec): v = np.reshape(vec, self.io_sizes) ans = np.zeros_like(v) for i, h_i in enumerate(self.h_list): v_i = np.swapaxes(v, -1, i) size_i = (self.io_sizes[:i] + self.io_sizes[i + 1:] + [self.io_sizes[i]]) v_i = np.reshape(v_i, (-1, self.io_sizes[i])) tmp = np.array(list(map(h_i.dot, v_i))) tmp = np.reshape(tmp, size_i) ans += np.swapaxes(tmp, -1, i) ans = np.reshape(ans, -1) if self.v_rst is not None: ans = ans + self.v_rst * vec return ans
def lowering(self, vec): self.check_vec(vec) ans = np.zeros_like(vec) tmp = np.array([np.sqrt(i) for i in range(self.dim)]) * vec ans[:-1] = tmp[1:] return ans
def raising(self, vec): self.check_vec(vec) ans = np.zeros_like(vec) ans[1:] = vec[:-1] ans *= np.array([np.sqrt(i) for i in range(self.dim)]) return ans
def _matvec(self, vec): ans = np.zeros_like(vec, dtype=complex) for i in range(self.n_terms): ans += self.term_func(i, vec) return ans