def approximate(x, psi_x, thresholds: list, max_ranks: list): """Approximate psi_x using HOSVD and HOCUR, respectively. Parameters ---------- x: array snapshot matrix psi_x: array transformed data matrix thresholds: list of floats tresholds for HOSVD max_ranks: list of ints maximum ranks for HOCUR Returns ------- ranks_hosvd: list of lists of ints ranks of the approximations computed with HOSVD errors_hosvd: list of floats relative errors of the approximations computed with HOSVD ranks_hocur: list of lists of ints ranks of the approximations computed with HOCUR errors_hocur: list of floats relative errors of the approximations computed with HOCUR """ # define returns ranks_hosvd = [] errors_hosvd = [] ranks_hocur = [] errors_hocur = [] start_time = utl.progress('Approximation in TT format', 0) # reshape psi_x into tensor psi_x_full = psi_x.reshape([number_of_boxes, number_of_boxes, 1, 1, 1, psi_x.shape[1]]) # approximate psi_x using HOSVD for i in range(len(thresholds)): psi_approx = TT(psi_x_full, threshold=thresholds[i]) ranks_hosvd.append(psi_approx.ranks) errors_hosvd.append(np.linalg.norm(psi_x_full - psi_approx.full()) / np.linalg.norm(psi_x_full)) utl.progress('Approximation in TT format', 100 * (i + 1) / 6, cpu_time=_time.time() - start_time) # approximate psi_x using HOCUR for i in range(len(max_ranks)): psi_approx = tdt.hocur(x, basis_list, max_ranks[i], repeats=3, multiplier=100, progress=False) ranks_hocur.append(psi_approx.ranks) errors_hocur.append(np.linalg.norm(psi_x - psi_approx.transpose(cores=2).matricize()) / np.linalg.norm(psi_x)) utl.progress('Approximation in TT format', 100 * (i + 4) / 6, cpu_time=_time.time() - start_time) return ranks_hosvd, errors_hosvd, ranks_hocur, errors_hocur
def approximated_dynamics(_, theta): """Construction of the right-hand side of the system from the coefficient tensor""" cores = [np.zeros([1, theta.shape[0] + 1, 1, 1])] + [np.zeros([1, theta.shape[0] + 1, 1, 1]) for _ in range(1, p)] for q in range(p): cores[q][0, :, 0, 0] = [1] + [psi[q](theta[r]) for r in range(theta.shape[0])] psi_x = TT(cores) psi_x = psi_x.full().reshape(np.prod(psi_x.row_dims), 1) rhs = psi_x.transpose() @ xi rhs = rhs.reshape(rhs.size) return rhs
def test_1_norm(self): """test 1-norm""" # construct tensor train without negative entries cores = [ np.abs(self.cores[i][:, :, 0:1, :]) for i in range(self.order) ] tt_col = TT(cores) # transpose tt_row = tt_col.transpose() # convert to full matrix tt_mat = tt_col.matricize() # compute norms norm_tt_row = tt_row.norm(p=1) norm_tt_col = tt_col.norm(p=1) norm_full = np.linalg.norm(tt_mat, 1) # compute relative errors rel_err_row = (norm_tt_row - norm_full) / norm_full rel_err_col = (norm_tt_col - norm_full) / norm_full # construct tensor-train operator without negative entries cores = [np.abs(self.cores[i][:, :, :, :]) for i in range(self.order)] tt_op = TT(cores) # convert to full matrix tt_mat = tt_op.matricize() # compute norms norm_tt = tt_op.norm(p=1) norm_full = np.linalg.norm(tt_mat, 1) # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative errors are smaller than tolerance self.assertLess(rel_err_row, self.tol) self.assertLess(rel_err_col, self.tol) self.assertLess(rel_err, self.tol)
def test_2_norm(self): """test 2-norm""" # construct tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] tt_col = TT(cores) # transpose tt_row = tt_col.transpose() # convert to full matrix tt_mat = tt_col.matricize() # compute norms norm_tt_row = tt_row.norm(p=2) norm_tt_col = tt_col.norm(p=2) norm_full = np.linalg.norm(tt_mat, 2) # compute relative errors rel_err_row = (norm_tt_row - norm_full) / norm_full rel_err_col = (norm_tt_col - norm_full) / norm_full # define tensor-train operator tt_op = self.t # convert to full matrix tt_mat = tt_op.matricize() # compute norms norm_tt = tt_op.norm(p=2) norm_full = np.linalg.norm(tt_mat, 'fro') # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative errors are smaller than tolerance self.assertLess(rel_err_row, self.tol) self.assertLess(rel_err_col, self.tol) self.assertLess(rel_err, self.tol)
class TestTT(TestCase): def setUp(self): """Generate random parameters for a tensor train""" # set tolerance for relative errors self.tol = 1e-7 # set threshold and maximum rank for orthonormalization self.threshold = 1e-14 self.max_rank = 50 # generate random order in [3,5] self.order = np.random.randint(3, 6) # generate random ranks in [3,5] self.ranks = [1] + list( np.random.randint(3, high=6, size=self.order - 1)) + [1] # generate random row and column dimensions in [3,5] self.row_dims = list(np.random.randint(3, high=6, size=self.order)) self.col_dims = list(np.random.randint(3, high=6, size=self.order)) # define cores self.cores = [ 2 * np.random.rand(self.ranks[i], self.row_dims[i], self.col_dims[i], self.ranks[i + 1]) - 1 for i in range(self.order) ] # construct tensor train self.t = TT(self.cores, threshold=self.threshold, max_rank=self.max_rank) def test_construction_from_cores(self): """test tensor train class for list of cores""" # check if all parameters are correct self.assertEqual(self.t.order, self.order) self.assertEqual(self.t.ranks, self.ranks) self.assertEqual(self.t.row_dims, self.row_dims) self.assertEqual(self.t.col_dims, self.col_dims) self.assertEqual(self.t.cores, self.cores) # check if construction fails if ranks are inconsistent with self.assertRaises(ValueError): TT([np.random.rand(1, 2, 3, 3), np.random.rand(4, 3, 2, 1)]) # check if construction fails if cores are not 4-dimensional with self.assertRaises(ValueError): TT([np.random.rand(1, 2, 3), np.random.rand(3, 3, 2, 1)]) def test_representation(self): """test string representation of tensor trains""" # get string representation string = self.t.__repr__() # check if string is not empty self.assertIsNotNone(string) def test_element(self): """test element extraction""" # indices of last entry indices = self.row_dims + self.col_dims # check if element extraction fails if indices are out of range with self.assertRaises(IndexError): indices[0] += 1 self.t.element(indices) # check if element extraction fails if number of indices is not correct with self.assertRaises(ValueError): self.t.element(indices[1:]) # check if element extraction fails if an index is not an integer with self.assertRaises(TypeError): # noinspection PyTypeChecker indices[0] = None self.t.element(indices) # check if element extraction fails if input is not a list of integers with self.assertRaises(TypeError): # noinspection PyTypeChecker self.t.element("a") def test_conversion(self): """test conversion to full format and element extraction""" # convert to full format t_full = self.t.full() # number of wrong entries err = 0 # loop through all elements of the tensor for i in range(np.int(np.prod(self.row_dims + self.col_dims))): # convert flat index j = np.unravel_index(i, self.row_dims + self.col_dims) # extract elements of both representations v = self.t.element(list(j)) w = t_full[j] # count wrong entries if (v - w) / v > self.tol: err += 1 # check if no wrong entry exists self.assertEqual(err, 0) def test_matricize(self): """test matricization of tensor trains""" # matricize t t_mat = self.t.matricize() # convert t to full array and reshape t_full = self.t.full().reshape( [np.prod(self.row_dims), np.prod(self.col_dims)]) # compute relative error rel_err = np.linalg.norm(t_mat - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_addition(self): """test addition/subtraction of tensor trains""" # compute difference of t and itself t_diff = (self.t - self.t) # convert to full array and reshape to vector t_diff = t_diff.full().flatten() # convert t to full array and reshape to vector t_tmp = self.t.full().flatten() # compute relative error rel_err = np.linalg.norm(t_diff) / np.linalg.norm(t_tmp) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) # check if addition fails when inputs do not have the same dimensions with self.assertRaises(ValueError): cores = self.t.cores cores[0] = np.random.rand(self.ranks[0], self.row_dims[0] + 1, self.col_dims[0], self.ranks[1]) self.t + TT(cores) # check if addition fails when input is not a tensor train with self.assertRaises(TypeError): self.t + 0 def test_scalar_multiplication(self): """test scalar multiplication""" # random constant in [0,10] c = 10 * np.random.rand(1)[0] # multiply tensor train with scalar value, convert to full array, and reshape to vector t_tmp = c * self.t t_tmp = t_tmp.full().flatten() # convert t to full array and reshape to vector t_full = self.t.full().flatten() # compute error err = np.linalg.norm(t_tmp) / np.linalg.norm(t_full) - c # check if error is smaller than tolerance self.assertLess(err, self.tol) # check if multiplication fails when input is neither integer, float, nor complex with self.assertRaises(TypeError): self.t * "a" def test_transpose(self): """test transpose of tensor trains""" # transpose in TT format, convert to full format, and reshape to vector t_trans = self.t.transpose().full().flatten() # convert to full format, transpose, and rehape to vector t_full = self.t.full().transpose( list(np.arange(self.order) + self.order) + list(np.arange(self.order))).flatten() # compute relative error rel_err = np.linalg.norm(t_trans - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_multiplication(self): """test multiplication of tensor trains""" # multiply t with its tranpose t_tmp = self.t.transpose().dot(self.t) # convert to full format and reshape to vector t_tmp = t_tmp.full().flatten() # convert t to full format and matricize t_full = self.t.full().reshape( [np.prod(self.row_dims), np.prod(self.col_dims)]) # multiply with its transpose and flatten t_full = (t_full.transpose().dot(t_full)).flatten() # compute relative error rel_err = np.linalg.norm(t_tmp - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) # check if multiplication fails when dimensions do not match with self.assertRaises(ValueError): t_tmp = self.t.copy() t_tmp.cores[0] = np.random.rand(self.ranks[0], self.row_dims[0] + 1, self.col_dims[0], self.ranks[1]) self.t.transpose().dot(t_tmp) # check if multiplication fails when input is not a tensor train with self.assertRaises(TypeError): self.t.dot(0) def test_construction_from_array(self): """test tensor train class for arrays""" # convert t to full format and construct tensor train form array t_full = self.t.full() # construct tensor train t_tmp = TT(t_full, threshold=self.threshold, max_rank=self.max_rank) # compute difference, convert to full format, and flatten t_diff = (self.t - t_tmp).full().flatten() # compute relative error rel_err = np.linalg.norm(t_diff) / np.linalg.norm(t_full.flatten()) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) # check if construction fails if number of dimensions is not a multiple of 2 with self.assertRaises(ValueError): TT(np.random.rand(1, 2, 3)) # check if construction fails if input is neither a list of cores nor an ndarray with self.assertRaises(TypeError): TT(None) def test_operator(self): """test operator check""" # check t t_check = self.t.isoperator() # construct non-operator tensor trains cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] u = TT(cores) cores = [self.cores[i][:, 0:1, :, :] for i in range(self.order)] v = TT(cores) # check u and v u_check = u.isoperator() v_check = v.isoperator() # check if operator checks are correct self.assertTrue(t_check) self.assertFalse(u_check) self.assertFalse(v_check) def test_left_orthonormalization(self): """test left-orthonormalization""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t_col = TT(cores) # left-orthonormalize t t_left = t_col.ortho_left(threshold=1e-14) # test if cores are left-orthonormal err = 0 for i in range(self.order - 1): c = np.tensordot(t_left.cores[i], t_left.cores[i], axes=([0, 1], [0, 1])).squeeze() if np.linalg.norm(c - np.eye(t_left.ranks[i + 1])) > self.tol: err += 1 # convert t_col to full format and flatten t_full = t_col.full().flatten() # compute relative error rel_err = np.linalg.norm(t_left.full().flatten() - t_full) / np.linalg.norm(t_full) # check if t_left is left-orthonormal and equal to t_col self.assertEqual(err, 0) self.assertLess(rel_err, self.tol) # check if orthonormalization fails if maximum rank is not positive with self.assertRaises(ValueError): t_col.ortho_left(max_rank=0) # check if orthonormalization fails if threshold is negative with self.assertRaises(ValueError): t_col.ortho_left(threshold=-1) # check if orthonormalization fails if start and end indices are not integers with self.assertRaises(TypeError): t_col.ortho_left(start_index="a") t_col.ortho_left(end_index="b") def test_right_orthonormalization(self): """test right-orthonormalization""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t_col = TT(cores) # right-orthonormalize t t_right = t_col.ortho_right(threshold=1e-14) # test if cores are right-orthonormal err = 0 for i in range(1, self.order): c = np.tensordot(t_right.cores[i], t_right.cores[i], axes=([1, 3], [1, 3])).squeeze() if np.linalg.norm(c - np.eye(t_right.ranks[i])) > self.tol: err += 1 # convert t_col to full format and flatten t_full = t_col.full().flatten() # compute relative error rel_err = np.linalg.norm(t_right.full().flatten() - t_full) / np.linalg.norm(t_full) # check if t_right is right-orthonormal and equal to t_col self.assertEqual(err, 0) self.assertLess(rel_err, self.tol) # check if orthonormalization fails if maximum rank is not positive with self.assertRaises(ValueError): t_col.ortho_right(max_rank=0) # check if orthonormalization fails if threshold is negative with self.assertRaises(ValueError): t_col.ortho_right(threshold=-1) # check if orthonormalization fails if start and end indices are not integers with self.assertRaises(TypeError): t_col.ortho_right(start_index="a") t_col.ortho_right(end_index="b") def test_orthonormalization(self): """test orthonormalization""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t_col = TT(cores) # orthonormalize t t_right = t_col.ortho(threshold=1e-14) # test if cores are right-orthonormal err = 0 for i in range(1, self.order): c = np.tensordot(t_right.cores[i], t_right.cores[i], axes=([1, 3], [1, 3])).squeeze() if np.linalg.norm(c - np.eye(t_right.ranks[i])) > self.tol: err += 1 # convert t_col to full format and flatten t_full = t_col.full().flatten() # compute relative error rel_err = np.linalg.norm(t_right.full().flatten() - t_full) / np.linalg.norm(t_full) # check if t_right is right-orthonormal and equal to t_col self.assertEqual(err, 0) self.assertLess(rel_err, self.tol) # check if orthonormalization fails if maximum rank is not positive with self.assertRaises(ValueError): t_col.ortho(max_rank=0) # check if orthonormalization fails if threshold is negative with self.assertRaises(ValueError): t_col.ortho(threshold=-1) def test_1_norm(self): """test 1-norm""" # construct tensor train without negative entries cores = [ np.abs(self.cores[i][:, :, 0:1, :]) for i in range(self.order) ] tt_col = TT(cores) # transpose tt_row = tt_col.transpose() # convert to full matrix tt_mat = tt_col.matricize() # compute norms norm_tt_row = tt_row.norm(p=1) norm_tt_col = tt_col.norm(p=1) norm_full = np.linalg.norm(tt_mat, 1) # compute relative errors rel_err_row = (norm_tt_row - norm_full) / norm_full rel_err_col = (norm_tt_col - norm_full) / norm_full # construct tensor-train operator without negative entries cores = [np.abs(self.cores[i][:, :, :, :]) for i in range(self.order)] tt_op = TT(cores) # convert to full matrix tt_mat = tt_op.matricize() # compute norms norm_tt = tt_op.norm(p=1) norm_full = np.linalg.norm(tt_mat, 1) # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative errors are smaller than tolerance self.assertLess(rel_err_row, self.tol) self.assertLess(rel_err_col, self.tol) self.assertLess(rel_err, self.tol) def test_2_norm(self): """test 2-norm""" # construct tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] tt_col = TT(cores) # transpose tt_row = tt_col.transpose() # convert to full matrix tt_mat = tt_col.matricize() # compute norms norm_tt_row = tt_row.norm(p=2) norm_tt_col = tt_col.norm(p=2) norm_full = np.linalg.norm(tt_mat, 2) # compute relative errors rel_err_row = (norm_tt_row - norm_full) / norm_full rel_err_col = (norm_tt_col - norm_full) / norm_full # define tensor-train operator tt_op = self.t # convert to full matrix tt_mat = tt_op.matricize() # compute norms norm_tt = tt_op.norm(p=2) norm_full = np.linalg.norm(tt_mat, 'fro') # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative errors are smaller than tolerance self.assertLess(rel_err_row, self.tol) self.assertLess(rel_err_col, self.tol) self.assertLess(rel_err, self.tol) def test_p_norm(self): """test for p-norm, p>2""" with self.assertRaises(ValueError): self.t.norm(p=3) def test_qtt2tt_tt2qtt(self): """test qtt2tt and tt2qtt""" # suppose t to be in QTT format and contract first two cores of t t_tt = self.t.qtt2tt([2, self.order - 2]) # convert t_tt to full array, and flatten t_tt_full = t_tt.full().flatten() # split the cores of t_tt, convert to full array, and flatten t_qtt = t_tt.tt2qtt([self.row_dims[0:2], self.row_dims[2:]], [self.col_dims[0:2], self.col_dims[2:]], threshold=1e-14) # convert t_qtt to full array, and flatten t_qtt_full = t_qtt.full().flatten() # convert t and to full format and flatten t_full = self.t.full().flatten() # compute relative errors rel_err_tt = np.linalg.norm(t_tt_full - t_full) / np.linalg.norm(t_full) rel_err_qtt = np.linalg.norm(t_qtt_full - t_full) / np.linalg.norm(t_full) # check if relative errors are smaller than tolerance self.assertLess(rel_err_tt, self.tol) self.assertLess(rel_err_qtt, self.tol) def test_pinv(self): """test pinv""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t = TT(cores) # compute pseudoinverse t_pinv = TT.pinv(t, self.order - 1) # matricize tensor trains t = t.full().reshape([np.prod(self.row_dims[:-1]), self.row_dims[-1]]) t_pinv = t_pinv.full().reshape( [np.prod(self.row_dims[:-1]), self.row_dims[-1]]).transpose() # compute relative errors rel_err_1 = np.linalg.norm(t.dot(t_pinv).dot(t) - t) / np.linalg.norm(t) rel_err_2 = np.linalg.norm(t_pinv.dot(t).dot(t_pinv) - t_pinv) / np.linalg.norm(t_pinv) rel_err_3 = np.linalg.norm((t.dot(t_pinv)).transpose() - t.dot(t_pinv)) / np.linalg.norm( t.dot(t_pinv)) rel_err_4 = np.linalg.norm((t_pinv.dot(t)).transpose() - t_pinv.dot(t)) / np.linalg.norm( t_pinv.dot(t)) # check if relative errors are smaller than tolerance self.assertLess(rel_err_1, self.tol) self.assertLess(rel_err_2, self.tol) self.assertLess(rel_err_3, self.tol) self.assertLess(rel_err_4, self.tol) def test_zeros(self): """test tensor train of all zeros""" # construct tensor train of all zeros t_zeros = tt.zeros(self.t.row_dims, self.t.col_dims) # compute norm t_norm = np.linalg.norm(t_zeros.full().flatten()) # check if norm is 0 self.assertEqual(t_norm, 0) def test_ones(self): """test tensor train of all ones""" # construct tensor train of all ones, convert to full format, and flatten t_ones = tt.ones(self.row_dims, self.col_dims).full().flatten() # construct full array of all ones t_full = np.ones( np.int(np.prod(self.row_dims)) * np.int(np.prod(self.col_dims))) # compute relative error rel_err = np.linalg.norm(t_ones - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_eye(self): """test identity tensor train""" # construct identity tensor train, convert to full format, and flatten t_eye = tt.eye(self.row_dims).full().flatten() # construct identity matrix and flatten t_full = np.eye(np.int(np.prod(self.row_dims))).flatten() # compute relative error rel_err = np.linalg.norm(t_eye - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_unit(self): """test unit tensor train""" # construct unit tensor train, convert to full format, and flatten t_unit = tt.unit(self.row_dims, [0] * self.order).full().flatten() # construct unit vector t_full = np.eye(np.int(np.prod(self.row_dims)), 1).T # compute relative error rel_err = np.linalg.norm(t_unit - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_random(self): """test random tensor train""" # construct random tensor train t_rand = tt.rand(self.row_dims, self.col_dims) # check if attributes are correct self.assertEqual(t_rand.order, self.order) self.assertEqual(t_rand.row_dims, self.row_dims) self.assertEqual(t_rand.col_dims, self.col_dims) self.assertEqual(t_rand.ranks, [1] * (self.order + 1)) def test_uniform(self): """test uniform tensor train""" # construct uniform tensor train norm = 10 * np.random.rand() t_uni = tt.uniform(self.row_dims, ranks=self.ranks[1], norm=norm) # compute norms norm_tt = t_uni.norm() # compute relative error rel_err = (norm_tt - norm) / norm # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol)
class TestTT(TestCase): def setUp(self): """Generate random parameters for a tensor train""" # set tolerance for relative errors self.tol = 10e-10 # generate random order in [3,5] self.order = np.random.randint(3, 6) # generate random ranks in [3,5] self.ranks = [1] + list( np.random.randint(3, high=6, size=self.order - 1)) + [1] # generate random row and column dimensions in [3,5] self.row_dims = list(np.random.randint(3, high=6, size=self.order)) self.col_dims = list(np.random.randint(3, high=6, size=self.order)) # define cores self.cores = [ 2 * np.random.rand(self.ranks[i], self.row_dims[i], self.col_dims[i], self.ranks[i + 1]) - 1 for i in range(self.order) ] # construct tensor train self.t = TT(self.cores) def test_construction_from_cores(self): """test tensor train class for list of cores""" # check if all parameters are correct self.assertEqual(self.t.order, self.order) self.assertEqual(self.t.ranks, self.ranks) self.assertEqual(self.t.row_dims, self.row_dims) self.assertEqual(self.t.col_dims, self.col_dims) self.assertEqual(self.t.cores, self.cores) def test_conversion(self): """test conversion to full format and element extraction""" # convert to full format t_full = self.t.full() # number of wrong entries err = 0 # loop through all elements of the tensor for i in range(np.int(np.prod(self.row_dims + self.col_dims))): # convert flat index j = np.unravel_index(i, self.row_dims + self.col_dims) # extract elements of both representations v = self.t.element(list(j)) w = t_full[j] # count wrong entries if (v - w) / v > self.tol: err += 1 # check if no wrong entry exists self.assertEqual(err, 0) def test_matricize(self): """test matricization of tensor trains""" # matricize t t_mat = self.t.matricize() # convert t to full array and reshape t_full = self.t.full().reshape( [np.prod(self.row_dims), np.prod(self.col_dims)]) # compute relative error rel_err = np.linalg.norm(t_mat - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_addition(self): """test addition/subtraction of tensor trains""" # compute difference of t and itself t_diff = (self.t - self.t) # convert to full array and reshape to vector t_diff = t_diff.full().flatten() # convert t to full array and reshape to vector t_tmp = self.t.full().flatten() # compute relative error rel_err = np.linalg.norm(t_diff) / np.linalg.norm(t_tmp) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_scalar_multiplication(self): """test scalar multiplication""" # random constant in [0,10] c = 10 * np.random.rand(1)[0] # multiply tensor train with scalar value, convert to full array, and reshape to vector t_tmp = TT.__mul__(self.t, c) t_tmp = t_tmp.full().flatten() # convert t to full array and reshape to vector t_full = self.t.full().flatten() # compute error err = np.linalg.norm(t_tmp) / np.linalg.norm(t_full) - c # check if error is smaller than tolerance self.assertLess(err, self.tol) def test_transpose(self): """test transpose of tensor trains""" # transpose in TT format, convert to full format, and reshape to vector t_trans = self.t.transpose().full().flatten() # convert to full format, transpose, and rehape to vector t_full = self.t.full().transpose( list(np.arange(self.order) + self.order) + list(np.arange(self.order))).flatten() # compute relative error rel_err = np.linalg.norm(t_trans - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_multiplication(self): """test multiplication of tensor trains""" # multiply t with its tranpose t_tmp = self.t.transpose() @ self.t # convert to full format and reshape to vector t_tmp = t_tmp.full().flatten() # convert t to full format and matricize t_full = self.t.full().reshape( [np.prod(self.row_dims), np.prod(self.col_dims)]) # multiply with its transpose and flatten t_full = (t_full.transpose() @ t_full).flatten() # compute relative error rel_err = np.linalg.norm(t_tmp - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_construction_from_array(self): """test tensor train class for arrays""" # convert t to full format and construct tensor train form array t_full = self.t.full() # construct tensor train t_tmp = TT(t_full) # compute difference, convert to full format, and flatten t_diff = (self.t - t_tmp).full().flatten() # compute relative error rel_err = np.linalg.norm(t_diff) / np.linalg.norm(t_full.flatten()) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_operator(self): """test operator check""" # check t t_check = self.t.isoperator() # construct non-operator tensor trains cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] u = TT(cores) cores = [self.cores[i][:, 0:1, :, :] for i in range(self.order)] v = TT(cores) # check u and v u_check = u.isoperator() v_check = v.isoperator() # check if operator checks are correct self.assertTrue(t_check) self.assertFalse(u_check) self.assertFalse(v_check) def test_left_orthonormalization(self): """test left-orthonormalization""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t_col = TT(cores) # left-orthonormalize t t_left = t_col.ortho_left() # test if cores are left-orthonormal err = 0 for i in range(self.order - 1): c = np.tensordot(t_left.cores[i], t_left.cores[i], axes=([0, 1], [0, 1])).squeeze() if np.linalg.norm(c - np.eye(t_left.ranks[i + 1])) > self.tol: err += 1 # convert t_col to full format and flatten t_full = t_col.full().flatten() # compute relative error rel_err = np.linalg.norm(t_left.full().flatten() - t_full) / np.linalg.norm(t_full) # check if t_left is left-orthonormal and equal to t_col self.assertEqual(err, 0) self.assertLess(rel_err, self.tol) def test_right_orthonormalization(self): """test right-orthonormalization""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t_col = TT(cores) # right-orthonormalize t t_right = t_col.ortho_right() # test if cores are right-orthonormal err = 0 for i in range(1, self.order): c = np.tensordot(t_right.cores[i], t_right.cores[i], axes=([1, 3], [1, 3])).squeeze() if np.linalg.norm(c - np.eye(t_right.ranks[i])) > self.tol: err += 1 # convert t_col to full format and flatten t_full = t_col.full().flatten() # compute relative error rel_err = np.linalg.norm(t_right.full().flatten() - t_full) / np.linalg.norm(t_full) # check if t_right is right-orthonormal and equal to t_col self.assertEqual(err, 0) self.assertLess(rel_err, self.tol) def test_1_norm(self): """test 1-norm""" # construct non-operator tensor train cores = [ np.abs(self.cores[i][:, :, 0:1, :]) for i in range(self.order) ] t_col = TT(cores) # convert to full array and flatten t_full = t_col.full().flatten() # compute norms norm_tt = t_col.norm(p=1) norm_full = np.linalg.norm(t_full, 1) # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_2_norm(self): """test 2-norm""" # convert to full array and flatten t_full = self.t.full().flatten() # compute norms norm_tt = self.t.norm() norm_full = np.linalg.norm(t_full) # compute relative error rel_err = (norm_tt - norm_full) / norm_full # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_qtt2tt_tt2qtt(self): """test qtt2tt and tt2qtt""" # suppose t to be in QTT format and contract first two cores of t t_tt = self.t.qtt2tt([2, self.order - 2]) # convert t_tt to full array, and flatten t_tt_full = t_tt.full().flatten() # split the cores of t_tt, convert to full array, and flatten t_qtt = t_tt.tt2qtt([self.row_dims[0:2], self.row_dims[2:]], [self.col_dims[0:2], self.col_dims[2:]]) # convert t_qtt to full array, and flatten t_qtt_full = t_qtt.full().flatten() # convert t and to full format and flatten t_full = self.t.full().flatten() # compute relative errors rel_err_tt = np.linalg.norm(t_tt_full - t_full) / np.linalg.norm(t_full) rel_err_qtt = np.linalg.norm(t_qtt_full - t_full) / np.linalg.norm(t_full) # check if relative errors are smaller than tolerance self.assertLess(rel_err_tt, self.tol) self.assertLess(rel_err_qtt, self.tol) def test_pinv(self): """test pinv""" # construct non-operator tensor train cores = [self.cores[i][:, :, 0:1, :] for i in range(self.order)] t = TT(cores) # compute pseudoinverse t_pinv = TT.pinv(t, self.order - 1) # matricize tensor trains t = t.full().reshape([np.prod(self.row_dims[:-1]), self.row_dims[-1]]) t_pinv = t_pinv.full().reshape( [np.prod(self.row_dims[:-1]), self.row_dims[-1]]).transpose() # compute relative errors rel_err_1 = np.linalg.norm(t @ t_pinv @ t - t) / np.linalg.norm(t) rel_err_2 = np.linalg.norm(t_pinv @ t @ t_pinv - t_pinv) / np.linalg.norm(t_pinv) rel_err_3 = np.linalg.norm((t @ t_pinv).transpose() - t @ t_pinv) / np.linalg.norm(t @ t_pinv) rel_err_4 = np.linalg.norm((t_pinv @ t).transpose() - t_pinv @ t) / np.linalg.norm(t_pinv @ t) # check if relative errors are smaller than tolerance self.assertLess(rel_err_1, self.tol) self.assertLess(rel_err_2, self.tol) self.assertLess(rel_err_3, self.tol) self.assertLess(rel_err_4, self.tol) def test_zeros(self): """test tensor train of all zeros""" # construct tensor train of all zeros t_zeros = tt.zeros(self.t.row_dims, self.t.col_dims) # compute norm t_norm = np.linalg.norm(t_zeros.full().flatten()) # check if norm is 0 self.assertEqual(t_norm, 0) def test_ones(self): """test tensor train of all ones""" # construct tensor train of all ones, convert to full format, and flatten t_ones = tt.ones(self.row_dims, self.col_dims).full().flatten() # construct full array of all ones t_full = np.ones( np.int(np.prod(self.row_dims)) * np.int(np.prod(self.col_dims))) # compute relative error rel_err = np.linalg.norm(t_ones - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_identity(self): """test identity tensor train""" # construct identity tensor train, convert to full format, and flatten t_eye = tt.eye(self.row_dims).full().flatten() # construct identity matrix and flatten t_full = np.eye(np.int(np.prod(self.row_dims))).flatten() # compute relative error rel_err = np.linalg.norm(t_eye - t_full) / np.linalg.norm(t_full) # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol) def test_uniform(self): """test uniform tensor train""" # construct uniform tensor train norm = 10 * np.random.rand() t_uni = tt.uniform(self.row_dims, ranks=self.ranks, norm=norm) # compute norms norm_tt = t_uni.norm() # compute relative error rel_err = (norm_tt - norm) / norm # check if relative error is smaller than tolerance self.assertLess(rel_err, self.tol)