def setUp(self): """Generate random parameters for a tensor train""" # set tolerance for relative errors self.tol = 10e-10 # generate random order in [3,5] self.order = np.random.randint(3, 6) # generate random ranks in [3,5] self.ranks = [1] + list( np.random.randint(3, high=6, size=self.order - 1)) + [1] # generate random row and column dimensions in [3,5] self.row_dims = list(np.random.randint(3, high=6, size=self.order)) self.col_dims = list(np.random.randint(3, high=6, size=self.order)) # define cores self.cores = [ 2 * np.random.rand(self.ranks[i], self.row_dims[i], self.col_dims[i], self.ranks[i + 1]) - 1 for i in range(self.order) ] # construct tensor train self.t = TT(self.cores)
def setUp(self): """Construct a Toeplitz matrix for testing the routines in sle.py""" # set tolerance for the error self.tol = 1e-7 # set order of the resulting TT operator self.order = 10 # generate Toeplitz matrix self.operator_mat = sp.linalg.toeplitz(np.arange(1, 2**self.order + 1), np.arange(1, 2**self.order + 1)) # decompose Toeplitz matrix into TT format self.operator_tt = TT(self.operator_mat.reshape([2] * 2 * self.order)) # define right-hand side as vector of all ones (matrix case) self.rhs_mat = np.ones(self.operator_mat.shape[0]) # define right-hand side as tensor train of all ones (tensor case) self.rhs_tt = tt.ones(self.operator_tt.row_dims, [1] * self.operator_tt.order) # define initial tensor train for solving the system of linear equations self.initial_tt = tt.ones(self.operator_tt.row_dims, [1] * self.operator_tt.order, ranks=5).ortho_right()
def test_pinv(self): """test pinv""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t = TT(cores) # compute pseudoinverse t_pinv = TT.pinv(t, self.order - 1) # matricize tensor trains t = t.full().reshape([np.prod(self.row_dims[:-1]), self.row_dims[-1]]) t_pinv = t_pinv.full().reshape( [np.prod(self.row_dims[:-1]), self.row_dims[-1]]).transpose() # compute relative errors rel_err_1 = np.linalg.norm(t @ t_pinv @ t - t) / np.linalg.norm(t) rel_err_2 = np.linalg.norm(t_pinv @ t @ t_pinv - t_pinv) / np.linalg.norm(t_pinv) rel_err_3 = np.linalg.norm((t @ t_pinv).transpose() - t @ t_pinv) / np.linalg.norm(t @ t_pinv) rel_err_4 = np.linalg.norm((t_pinv @ t).transpose() - t_pinv @ t) / np.linalg.norm(t_pinv @ t) # check if relative errors are smaller than tolerance self.assertLess(rel_err_1, self.tol) self.assertLess(rel_err_2, self.tol) self.assertLess(rel_err_3, self.tol) self.assertLess(rel_err_4, self.tol)
def setUp(self): """Generate random parameters for a tensor train""" # set tolerance for relative errors self.tol = 1e-7 # set threshold and maximum rank for orthonormalization self.threshold = 1e-14 self.max_rank = 50 # generate random order in [3,5] self.order = np.random.randint(3, 6) # generate random ranks in [3,5] self.ranks = [1] + list( np.random.randint(3, high=6, size=self.order - 1)) + [1] # generate random row and column dimensions in [3,5] self.row_dims = list(np.random.randint(3, high=6, size=self.order)) self.col_dims = list(np.random.randint(3, high=6, size=self.order)) # define cores self.cores = [ 2 * np.random.rand(self.ranks[i], self.row_dims[i], self.col_dims[i], self.ranks[i + 1]) - 1 for i in range(self.order) ] # construct tensor train self.t = TT(self.cores, threshold=self.threshold, max_rank=self.max_rank)
def test_right_orthonormalization(self): """test right-orthonormalization""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t_col = TT(cores) # right-orthonormalize t t_right = t_col.ortho_right() # test if cores are right-orthonormal err = 0 for i in range(1, self.order): c = np.tensordot(t_right.cores[i], t_right.cores[i], axes=([1, 3], [1, 3])).squeeze() if np.linalg.norm(c - np.eye(t_right.ranks[i])) > self.tol: err += 1 # convert t_col to full format and flatten t_full = t_col.full().flatten() # compute relative error rel_err = np.linalg.norm(t_right.full().flatten() - t_full) / np.linalg.norm(t_full) # check if t_right is right-orthonormal and equal to t_col self.assertEqual(err, 0) self.assertLess(rel_err, self.tol)
def test_construction_from_array(self): """test tensor train class for arrays""" # convert t to full format and construct tensor train form array t_full = self.t.full() # construct tensor train t_tmp = TT(t_full, threshold=self.threshold, max_rank=self.max_rank) # compute difference, convert to full format, and flatten t_diff = (self.t - t_tmp).full().flatten() # compute relative error rel_err = np.linalg.norm(t_diff) / np.linalg.norm(t_full.flatten()) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) # check if construction fails if number of dimensions is not a multiple of 2 with self.assertRaises(ValueError): TT(np.random.rand(1, 2, 3)) # check if construction fails if input is neither a list of cores nor an ndarray with self.assertRaises(TypeError): TT(None)
def multisponge(dimension, level): """ Construction of a multisponge. Generate a binary tensor representing a multisponge fractal (e.g., Sierpinski carpet, Menger sponge, etc.), see [1]_, by exploiting the tensor-train format and Kronecker products. Parameters ---------- dimension : int dimension (>1) of the multisponge level : int level of the fractal construction to generate Returns ------- np.ndarray tensor representing the multisponge fractal References ---------- .. [1] P. Gelß, C. Schütte, "Tensor-generated fractals: Using tensor decompositions for creating self-similar patterns", arXiv:1812.00814, 2018 """ if dimension > 1: # construct generating tensor cores = [np.zeros([1, 3, 1, 2])] cores[0][0, :, 0, 0] = [1, 1, 1] cores[0][0, :, 0, 1] = [1, 0, 1] for _ in range(1, dimension - 1): cores.append(np.zeros([2, 3, 1, 2])) cores[-1][0, :, 0, 0] = [1, 0, 1] cores[-1][1, :, 0, 0] = [0, 1, 0] cores[-1][1, :, 0, 1] = [1, 0, 1] cores.append(np.zeros([2, 3, 1, 1])) cores[-1][0, :, 0, 0] = [1, 0, 1] cores[-1][1, :, 0, 0] = [0, 1, 0] generator = TT(cores) generator = generator.full().reshape(generator.row_dims) # construct fractal in the form of a binary tensor fractal = generator for i in range(2, level + 1): fractal = np.kron(fractal, generator) fractal = fractal.astype(int) else: raise ValueError('dimension must be larger than 1') return fractal
def approximate(x, psi_x, thresholds: list, max_ranks: list): """Approximate psi_x using HOSVD and HOCUR, respectively. Parameters ---------- x: array snapshot matrix psi_x: array transformed data matrix thresholds: list of floats tresholds for HOSVD max_ranks: list of ints maximum ranks for HOCUR Returns ------- ranks_hosvd: list of lists of ints ranks of the approximations computed with HOSVD errors_hosvd: list of floats relative errors of the approximations computed with HOSVD ranks_hocur: list of lists of ints ranks of the approximations computed with HOCUR errors_hocur: list of floats relative errors of the approximations computed with HOCUR """ # define returns ranks_hosvd = [] errors_hosvd = [] ranks_hocur = [] errors_hocur = [] start_time = utl.progress('Approximation in TT format', 0) # reshape psi_x into tensor psi_x_full = psi_x.reshape([number_of_boxes, number_of_boxes, 1, 1, 1, psi_x.shape[1]]) # approximate psi_x using HOSVD for i in range(len(thresholds)): psi_approx = TT(psi_x_full, threshold=thresholds[i]) ranks_hosvd.append(psi_approx.ranks) errors_hosvd.append(np.linalg.norm(psi_x_full - psi_approx.full()) / np.linalg.norm(psi_x_full)) utl.progress('Approximation in TT format', 100 * (i + 1) / 6, cpu_time=_time.time() - start_time) # approximate psi_x using HOCUR for i in range(len(max_ranks)): psi_approx = tdt.hocur(x, basis_list, max_ranks[i], repeats=3, multiplier=100, progress=False) ranks_hocur.append(psi_approx.ranks) errors_hocur.append(np.linalg.norm(psi_x - psi_approx.transpose(cores=2).matricize()) / np.linalg.norm(psi_x)) utl.progress('Approximation in TT format', 100 * (i + 4) / 6, cpu_time=_time.time() - start_time) return ranks_hosvd, errors_hosvd, ranks_hocur, errors_hocur
def approximated_dynamics(_, theta): """Construction of the right-hand side of the system from the coefficient tensor""" cores = [np.zeros([1, theta.shape[0] + 1, 1, 1])] + [np.zeros([1, theta.shape[0] + 1, 1, 1]) for _ in range(1, p)] for q in range(p): cores[q][0, :, 0, 0] = [1] + [psi[q](theta[r]) for r in range(theta.shape[0])] psi_x = TT(cores) psi_x = psi_x.full().reshape(np.prod(psi_x.row_dims), 1) rhs = psi_x.transpose() @ xi rhs = rhs.reshape(rhs.size) return rhs
def vicsek_fractal(dimension, level): """Construction of a Vicsek fractal Generate a binary tensor representing a Vicsek fractal, see [1]_, by exploiting the tensor-train format and Kronecker products. Parameters ---------- dimension: int (>1) dimension of the Vicsek fractal level: int level of the fractal construction to generate Returns ------- fractal: ndarray tensor representing the Vicsek fractal References ---------- .. [1] P. Gelß, C. Schütte, "Tensor-generated fractals: Using tensor decompositions for creating self-similar patterns", arXiv:1812.00814, 2018 """ if dimension > 1: # construct generating tensor cores = [None] * dimension cores[0] = np.zeros([1, 3, 1, 2]) cores[0][0, :, 0, 0] = [1, 1, 1] cores[0][0, :, 0, 1] = [0, 1, 0] cores[dimension - 1] = np.zeros([2, 3, 1, 1]) cores[dimension - 1][0, :, 0, 0] = [0, 1, 0] cores[dimension - 1][1, :, 0, 0] = [1, 0, 1] for i in range(1, dimension - 1): cores[i] = np.zeros([2, 3, 1, 2]) cores[i][0, :, 0, 0] = [0, 1, 0] cores[i][1, :, 0, 0] = [1, 0, 1] cores[i][1, :, 0, 1] = [0, 1, 0] generator = TT(cores) generator = generator.full().reshape(generator.row_dims) # construct fractal in the form of a binary tensor fractal = generator for i in range(2, level + 1): fractal = np.kron(fractal, generator) fractal = fractal.astype(int) else: fractal = None return fractal
def test_orthonormalization(self): """test orthonormalization""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t_col = TT(cores) # orthonormalize t t_right = t_col.ortho(threshold=1e-14) # test if cores are right-orthonormal err = 0 for i in range(1, self.order): c = np.tensordot(t_right.cores[i], t_right.cores[i], axes=([1, 3], [1, 3])).squeeze() if np.linalg.norm(c - np.eye(t_right.ranks[i])) > self.tol: err += 1 # convert t_col to full format and flatten t_full = t_col.full().flatten() # compute relative error rel_err = np.linalg.norm(t_right.full().flatten() - t_full) / np.linalg.norm(t_full) # check if t_right is right-orthonormal and equal to t_col self.assertEqual(err, 0) self.assertLess(rel_err, self.tol) # check if orthonormalization fails if maximum rank is not positive with self.assertRaises(ValueError): t_col.ortho(max_rank=0) # check if orthonormalization fails if threshold is negative with self.assertRaises(ValueError): t_col.ortho(threshold=-1)
def _tt_decomposition_one_snapshot(x_k, basis_list, b_k, sigma_k): """ Calculate the exact tt_decomposition of dPsi(x_k). Parameters ---------- x_k : np.ndarray k-th snapshot, shape (d,) basis_list : list[list[Function]] list of basis functions in every mode b_k : np.ndarray drift at the snapshot x_k, shape (d,) sigma_k : np.ndarray diffusion at the snapshot x_k, shape (d, d2) Returns ------- TT tt_decomposition of dPsi(x_k) """ # number of modes p = len(basis_list) # cores of dPsi(x_k) cores = [_dPsix(basis_list[0], x_k, b_k, sigma_k, position='first')] for i in range(1, p - 1): cores.append(_dPsix(basis_list[i], x_k, b_k, sigma_k, position='middle')) cores.append(_dPsix(basis_list[p - 1], x_k, b_k, sigma_k, position='last')) return TT(cores)
def _tt_decomposition_one_snapshot_reversible(x_k, basis_list, sigma_k): """ Calculate the exact tt_decomposition of dPsi(x_k). Parameters ---------- x_k : np.ndarray snapshot, size (d,) basis_list : list[list[Function]] list of basis functions in every mode sigma_k : np.ndarray diffusion at snapshot x_k, shape (d, d2) Returns ------- TT tt decomposition of dPsi(x_k) """ # number of modes p = len(basis_list) # insert elements of core 1 cores = [_dPsix_reversible(basis_list[0], x_k, position='first')] # insert elements of cores 2,...,p-1 for i in range(1, p - 1): cores.append(_dPsix_reversible(basis_list[i], x_k, position='middle')) # insert elements of core p cores.append(_dPsix_reversible(basis_list[p - 1], x_k, position='last')) # insert elements of core p + 1 cores.append(sigma_k[:, :, None, None]) return TT(cores)
def test_addition(self): """test addition/subtraction of tensor trains""" # compute difference of t and itself t_diff = (self.t - self.t) # convert to full array and reshape to vector t_diff = t_diff.full().flatten() # convert t to full array and reshape to vector t_tmp = self.t.full().flatten() # compute relative error rel_err = np.linalg.norm(t_diff) / np.linalg.norm(t_tmp) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) # check if addition fails when inputs do not have the same dimensions with self.assertRaises(ValueError): cores = self.t.cores cores[0] = np.random.rand(self.ranks[0], self.row_dims[0] + 1, self.col_dims[0], self.ranks[1]) self.t + TT(cores) # check if addition fails when input is not a tensor train with self.assertRaises(TypeError): self.t + 0
def test_construction_from_cores(self): """test tensor train class for list of cores""" # check if all parameters are correct self.assertEqual(self.t.order, self.order) self.assertEqual(self.t.ranks, self.ranks) self.assertEqual(self.t.row_dims, self.row_dims) self.assertEqual(self.t.col_dims, self.col_dims) self.assertEqual(self.t.cores, self.cores) # check if construction fails if ranks are inconsistent with self.assertRaises(ValueError): TT([np.random.rand(1, 2, 3, 3), np.random.rand(4, 3, 2, 1)]) # check if construction fails if cores are not 4-dimensional with self.assertRaises(ValueError): TT([np.random.rand(1, 2, 3), np.random.rand(3, 3, 2, 1)])
def signaling_cascade(d): """Signaling cascade Model for a cascading process on a genetic network consisting of genes of species S_1 , ..., S_d. For a detailed description of the process and the construction of the corresponding TT operator, we refer to [1]_. Arguments --------- d: int number of species (= order of the operator) Returns ------- operator: instance of TT class TT operator of the model References ---------- .. [1] P. Gelß, "The Tensor-Train Format and Its Applications", dissertation, FU Berlin, 2017 """ # define core elements s_mat_0 = 0.7 * (np.eye(64, k=-1) - np.eye(64)) + 0.07 * ( np.eye(64, k=1) - np.eye(64)).dot(np.diag(np.arange(64))) s_mat = 0.07 * (np.eye(64, k=1) - np.eye(64)).dot(np.diag(np.arange(64))) l_mat = np.diag(np.arange(64)).dot( np.diag(np.reciprocal(np.arange(5.0, 69.0)))) i_mat = np.eye(64) m_mat = np.eye(64, k=-1) - np.eye(64) # make operator stochastic s_mat_0[-1, -1] = -0.07 * 63 m_mat[-1, -1] = 0 # define TT cores cores = [np.zeros([1, 64, 64, 3])] cores[0][0, :, :, 0] = s_mat_0 cores[0][0, :, :, 1] = l_mat cores[0][0, :, :, 2] = i_mat for k in range(1, d - 1): cores.append(np.zeros([3, 64, 64, 3])) cores[k][0, :, :, 0] = i_mat cores[k][1, :, :, 0] = m_mat cores[k][2, :, :, 0] = s_mat cores[k][2, :, :, 1] = l_mat cores[k][2, :, :, 2] = i_mat cores.append(np.zeros([3, 64, 64, 1])) cores[d - 1][0, :, :, 0] = i_mat cores[d - 1][1, :, :, 0] = m_mat cores[d - 1][2, :, :, 0] = s_mat # define TT operator operator = TT(cores) return operator
def two_step_destruction(k_1, k_2, m): """"Two-step destruction Model for a two-step mechanism for the destruction of molecules. For a detailed description of the process and the construction of the corresponding TT operator, we refer to [1]_. Arguments --------- k_1: float rate constant for the first reaction k_2: float rate constant for the second reaction m: int exponent determining the maximum number of molecules Returns ------- operator: instance of TT class TT operator of the process References ---------- .. [1] P. Gelß, "The Tensor-Train Format and Its Applications", dissertation, FU Berlin, 2017 """ # define dimensions and ranks n = [2**m, 2**(m + 1), 2**m, 2**m] r = [1, 3, 5, 3, 1] # define TT cores cores = [np.zeros([r[i], n[i], n[i], r[i + 1]]) for i in range(4)] cores[0][0, :, :, 0] = np.eye(n[0]) cores[0][0, :, :, 1] = k_1 * np.eye(n[0], k=1) @ np.diag(np.arange(n[0])) cores[0][0, :, :, 2] = -k_1 * np.diag(np.arange(n[0])) cores[1][0, :, :, 0] = np.eye(n[1]) cores[1][0, :, :, 1] = k_2 * np.eye(n[1], k=1) @ np.diag(np.arange(n[1])) cores[1][0, :, :, 2] = -k_2 * np.diag(np.arange(n[1])) cores[1][1, :, :, 3] = np.eye(n[1], k=1) @ np.diag(np.arange(n[1])) cores[1][2, :, :, 4] = np.diag(np.arange(n[1])) cores[2][0, :, :, 0] = np.eye(n[2]) cores[2][1, :, :, 1] = np.eye(n[2], k=1) @ np.diag(np.arange(n[2])) cores[2][2, :, :, 2] = np.diag(np.arange(n[2])) cores[2][3, :, :, 2] = np.eye(n[2], k=-1) cores[2][4, :, :, 2] = np.eye(n[2]) cores[3][0, :, :, 0] = (np.eye(n[3], k=1) - np.eye(n[3])) @ np.diag(np.arange(n[3])) cores[3][1, :, :, 0] = np.eye(n[3], k=-1) cores[3][2, :, :, 0] = np.eye(n[3]) # define operator operator = TT(cores) return operator
def test_1_norm(self): """test 1-norm""" # construct non-operator tensor train cores = [ np.abs(self.cores[i][:, :, 0:1, :]) for i in range(self.order) ] t_col = TT(cores) # convert to full array and flatten t_full = t_col.full().flatten() # compute norms norm_tt = t_col.norm(p=1) norm_full = np.linalg.norm(t_full, 1) # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol)
def cantor_dust(dimension, level): """ Construction of a (multidimensional) Cantor dust. Generate a binary tensor representing a Cantor dust, see [1]_, by exploiting the tensor-train format and Kronecker products. Parameters ---------- dimension : int dimension of the Cantor dust level : int level of the fractal construction to generate Returns ------- np.ndarray tensor representing the Cantor dust References ---------- .. [1] P. Gelß, C. Schütte, "Tensor-generated fractals: Using tensor decompositions for creating self-similar patterns", arXiv:1812.00814, 2018 """ # construct generating tensor cores = [] for _ in range(dimension): cores.append(np.zeros([1, 3, 1, 1])) cores[-1][0, :, 0, 0] = [1, 0, 1] generator = TT(cores) generator = generator.full().reshape(generator.row_dims) # construct fractal in the form of a binary tensor fractal = generator for i in range(2, level + 1): fractal = np.kron(fractal, generator) fractal = fractal.astype(int) return fractal
def test_2_norm(self): """test 2-norm""" # construct tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] tt_col = TT(cores) # transpose tt_row = tt_col.transpose() # convert to full matrix tt_mat = tt_col.matricize() # compute norms norm_tt_row = tt_row.norm(p=2) norm_tt_col = tt_col.norm(p=2) norm_full = np.linalg.norm(tt_mat, 2) # compute relative errors rel_err_row = (norm_tt_row - norm_full) / norm_full rel_err_col = (norm_tt_col - norm_full) / norm_full # define tensor-train operator tt_op = self.t # convert to full matrix tt_mat = tt_op.matricize() # compute norms norm_tt = tt_op.norm(p=2) norm_full = np.linalg.norm(tt_mat, 'fro') # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative errors are smaller than tolerance self.assertLess(rel_err_row, self.tol) self.assertLess(rel_err_col, self.tol) self.assertLess(rel_err, self.tol)
def rgb_fractal(matrix_r, matrix_g, matrix_b, level): """Construction of an RGB fractal Generate a 3-dimensional tensor representing an RGB fractal, see [1]_, by exploiting the tensor-train format. Parameters ---------- matrix_r: ndarray matrix representing red primaries matrix_g: ndarray matrix representing green primaries matrix_b: ndarray matrix representing blue primaries level: int level of the fractal construction to generate Returns ------- fractal: ndarray tensor representing the RGB fractal References ---------- .. [1] P. Gelß, C. Schütte, "Tensor-generated fractals: Using tensor decompositions for creating self-similar patterns", arXiv:1812.00814, 2018 """ # dimension of RGB matrices n = matrix_r.shape[0] # construct RGB fractal cores = [None] * level cores[0] = np.zeros([1, n, n, 3]) cores[0][0, :, :, 0] = matrix_r cores[0][0, :, :, 1] = matrix_g cores[0][0, :, :, 2] = matrix_b for i in range(1, level): cores[i] = np.zeros([3, n, n, 3]) cores[i][0, :, :, 0] = matrix_r cores[i][1, :, :, 1] = matrix_g cores[i][2, :, :, 2] = matrix_b cores.append(np.zeros([3, 3, 1, 1])) cores[level][0, :, 0, 0] = [1, 0, 0] cores[level][1, :, 0, 0] = [0, 1, 0] cores[level][2, :, 0, 0] = [0, 0, 1] fractal = TT(cores).full().reshape([n**level, 3, n**level]).transpose([0, 2, 1]) return fractal
def test_1_norm(self): """test 1-norm""" # construct tensor train without negative entries cores = [ np.abs(self.cores[i][:, :, 0:1, :]) for i in range(self.order) ] tt_col = TT(cores) # transpose tt_row = tt_col.transpose() # convert to full matrix tt_mat = tt_col.matricize() # compute norms norm_tt_row = tt_row.norm(p=1) norm_tt_col = tt_col.norm(p=1) norm_full = np.linalg.norm(tt_mat, 1) # compute relative errors rel_err_row = (norm_tt_row - norm_full) / norm_full rel_err_col = (norm_tt_col - norm_full) / norm_full # construct tensor-train operator without negative entries cores = [np.abs(self.cores[i][:, :, :, :]) for i in range(self.order)] tt_op = TT(cores) # convert to full matrix tt_mat = tt_op.matricize() # compute norms norm_tt = tt_op.norm(p=1) norm_full = np.linalg.norm(tt_mat, 1) # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative errors are smaller than tolerance self.assertLess(rel_err_row, self.tol) self.assertLess(rel_err_col, self.tol) self.assertLess(rel_err, self.tol)
def ulam_2d(transitions, states, simulations): """ TT approximation of the Perron-Frobenius operator in 2D. Given transitions of particles in a 2-dimensional potential, compute the approximation of the corresponding Perron- Frobenius operator in TT format. See [1]_ for details. Parameters ---------- transitions : np.ndarray matrix containing the transitions, each row is of the form [x_1, x_2, y_1, y_2] representing a transition from state (x_1, x_2) to (y_1, y_2) states : list[int] number of states in x- and y-direction simulations : int number of simulations per state Returns ------- TT TT approximation of the Perron-Frobenius operator References ---------- .. [1] P. Gelß. "The Tensor-Train Format and Its Applications: Modeling and Analysis of Chemical Reaction Networks, Catalytic Processes, Fluid Flows, and Brownian Dynamics", Freie Universität Berlin, 2017 """ # find unique indices for transitions in the first dimension [ind_unique, ind_inv] = np.unique(transitions[[0, 2], :], axis=1, return_inverse=True) rank = ind_unique.shape[1] # construct core for the first dimension cores = [np.zeros([1, states[0], states[0], rank])] for i in range(rank): cores[0][0, ind_unique[0, i] - 1, ind_unique[1, i] - 1, i] = 1 # construct core for the second dimension cores.append(np.zeros([rank, states[1], states[1], 1])) for i in range(transitions.shape[1]): cores[1][ind_inv[i], transitions[1, i] - 1, transitions[3, i] - 1, 0] += 1 # transpose and normalize operator operator = (1 / simulations) * TT(cores).transpose() return operator
def simple_test_case(): # initialize random tensors T = np.random.random((2, 3, 4, 5)) U = np.random.random((4, 5, 6)) # calculate tensordot using numpy TU = np.tensordot(T, U, axes=([2, 3], [0, 1])) # convert to TT format T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1))) print('T:') print(T_tt) U_tt = TT(np.reshape(U, (4, 5, 6, 1, 1, 1))) print('\n\nU:') print(U_tt) # calculate tensordot in TT format TU_back = T_tt.tensordot(U_tt, 2) print('\n\ntensordot <T,U>:') print(TU_back) # convert back to full tensor format and compare the results TU_back = np.squeeze(TU_back.full()) print('\n\nerror: {}'.format(np.linalg.norm(TU_back - TU)))
def test_construction_from_array(self): """test tensor train class for arrays""" # convert t to full format and construct tensor train form array t_full = self.t.full() # construct tensor train t_tmp = TT(t_full) # compute difference, convert to full format, and flatten t_diff = (self.t - t_tmp).full().flatten() # compute relative error rel_err = np.linalg.norm(t_diff) / np.linalg.norm(t_full.flatten()) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol)
def test_scalar_multiplication(self): """test scalar multiplication""" # random constant in [0,10] c = 10 * np.random.rand(1)[0] # multiply tensor train with scalar value, convert to full array, and reshape to vector t_tmp = TT.__mul__(self.t, c) t_tmp = t_tmp.full().flatten() # convert t to full array and reshape to vector t_full = self.t.full().flatten() # compute error err = np.linalg.norm(t_tmp) / np.linalg.norm(t_full) - c # check if error is smaller than tolerance self.assertLess(err, self.tol)
def kuramoto_coefficients(d, w): """ Construction of the exact coefficient tensor for the application of MANDy to the Kuramoto model using the basis set {1, x, x^2, x^3}. See [1]_ for details. Parameters ---------- d : int number of oscillators w : np.ndarray natural frequencies Returns ------- TT exact coefficient tensor References ---------- .. [1] P. Gelß, S. Klus, J. Eisert, C. Schütte, "Multidimensional Approximation of Nonlinear Dynamical Systems", arXiv:1809.02448, 2018 """ cores = [ np.zeros([1, d + 1, 1, 2 * d + 1]), np.zeros([2 * d + 1, d + 1, 1, 2 * d + 1]), np.zeros([2 * d + 1, d, 1, 1]) ] cores[0][0, 0, 0, 0] = 1 cores[1][0, 0, 0, 0] = 1 cores[2][0, :, 0, 0] = w for q in range(d): cores[0][0, 1:, 0, 2 * q + 1] = (2 / d) * np.ones([d]) cores[0][0, q + 1, 0, 2 * q + 1] = 0 cores[1][2 * q + 1, q + 1, 0, 2 * q + 1] = 1 cores[2][2 * q + 1, q, 0, 0] = 1 cores[0][0, q + 1, 0, 2 * q + 2] = 1 cores[1][2 * q + 2, 0, 0, 2 * q + 2] = 0.2 cores[1][2 * q + 2, 1:, 0, 2 * q + 2] = -(2 / d) * np.ones([d]) cores[1][2 * q + 2, q + 1, 0, 2 * q + 2] = 0 cores[2][2 * q + 2, q, 0, 0] = 1 coefficient_tensor = TT(cores) return coefficient_tensor
def average_numbers_of_cars(series): # define array average_noc = np.zeros([len(series), series[0].order]) # loop over time steps for i in range(len(series)): # loop over species for j in range(series[0].order): # define tensor train to compute average number of cars cores = [ np.ones([1, series[0].row_dims[k], 1, 1]) for k in range(series[0].order) ] cores[j] = np.zeros([1, series[0].row_dims[j], 1, 1]) cores[j][0, :, 0, 0] = np.arange(series[0].row_dims[j]) tensor_mean = TT(cores) # define entry of average_noc average_noc[i, j] = series[i].transpose() @ tensor_mean return average_noc
def mean_concentrations(series): """Mean concentrations of TT series Compute mean concentrations of a given time series in TT format representing probability distributions of, e.g., a chemical reaction network.. Parameters ---------- series: list of instances of TT class Returns ------- mean: ndarray(#time_steps,#species) mean concentrations of the species over time """ # define array mean = np.zeros([len(series), series[0].order]) # loop over time steps for i in range(len(series)): # loop over species for j in range(series[0].order): # define tensor train to compute mean concentration of jth species cores = [ np.ones([1, series[0].row_dims[k], 1, 1]) for k in range(series[0].order) ] cores[j] = np.zeros([1, series[0].row_dims[j], 1, 1]) cores[j][0, :, 0, 0] = np.arange(series[0].row_dims[j]) tensor_mean = TT(cores) # define entry of mean mean[i, j] = (series[i].transpose() @ tensor_mean).element( [0] * 2 * series[0].order) return mean
def test_operator(self): """test operator check""" # check t t_check = self.t.isoperator() # construct non-operator tensor trains cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] u = TT(cores) cores = [self.cores[i][:, 0:1, :, :] for i in range(self.order)] v = TT(cores) # check u and v u_check = u.isoperator() v_check = v.isoperator() # check if operator checks are correct self.assertTrue(t_check) self.assertFalse(u_check) self.assertFalse(v_check)