Exemplo n.º 1
0
 def testAdd(self):
     # Sum two TT-tensors.
     tt_a = initializers.random_tensor((2, 1, 3, 4), tt_rank=2)
     tt_b = initializers.random_tensor((2, 1, 3, 4),
                                       tt_rank=[1, 2, 4, 3, 1])
     with self.test_session() as sess:
         res_actual = ops.full(ops.add(tt_a, tt_b))
         res_actual2 = ops.full(tt_a + tt_b)
         res_desired = ops.full(tt_a) + ops.full(tt_b)
         to_run = [res_actual, res_actual2, res_desired]
         res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
         self.assertAllClose(res_actual_val, res_desired_val)
         self.assertAllClose(res_actual2_val, res_desired_val)
Exemplo n.º 2
0
 def testMultiply(self):
     # Multiply two TT-tensors.
     tt_a = initializers.random_tensor((1, 2, 3, 4), tt_rank=2)
     tt_b = initializers.random_tensor((1, 2, 3, 4),
                                       tt_rank=[1, 1, 4, 3, 1])
     with self.test_session() as sess:
         res_actual = ops.full(ops.multiply(tt_a, tt_b))
         res_actual2 = ops.full(tt_a * tt_b)
         res_desired = ops.full(tt_a) * ops.full(tt_b)
         to_run = [res_actual, res_actual2, res_desired]
         res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
         self.assertAllClose(res_actual_val, res_desired_val)
         self.assertAllClose(res_actual2_val, res_desired_val)
Exemplo n.º 3
0
    def testRandomTensor(self):
        shapes = [[3, 4], [3, 4], [3, 4], [3, 4], [1, -2], [1.1, 2], [[3, 4]]]
        tt_ranks = [-2, 1.5, [2, 3, 4, 5], [1.5], 2, 2, 2]
        bad_cases = zip(shapes, tt_ranks)
        for case in bad_cases:
            with self.assertRaises(ValueError):
                initializers.random_tensor(case[0], tt_rank=case[1])

        for case in bad_cases:
            with self.assertRaises(ValueError):
                initializers.tensor_with_random_cores(case[0], tt_rank=case[1])

        with self.assertRaises(NotImplementedError):
            initializers.random_tensor([1, 2], mean=1.0)
Exemplo n.º 4
0
 def testOrthogonalizeRightToLeft(self):
     shape = (2, 4, 3, 3)
     tt_ranks = (1, 5, 2, 17, 1)
     updated_tt_ranks = (1, 5, 2, 3, 1)
     tens = initializers.random_tensor(shape,
                                       tt_rank=tt_ranks,
                                       dtype=self.dtype)
     orthogonal = decompositions.orthogonalize_tt_cores(tens,
                                                        left_to_right=False)
     with self.test_session() as sess:
         tens_val, orthogonal_val = sess.run(
             [ops.full(tens), ops.full(orthogonal)])
         self.assertAllClose(tens_val, orthogonal_val, atol=1e-5, rtol=1e-5)
         dynamic_tt_ranks = shapes.tt_ranks(orthogonal).eval()
         self.assertAllEqual(updated_tt_ranks, dynamic_tt_ranks)
         # Check that the TT-cores are orthogonal.
         for core_idx in range(1, 4):
             core = orthogonal.tt_cores[core_idx]
             core = tf.reshape(
                 core, (updated_tt_ranks[core_idx],
                        shape[core_idx] * updated_tt_ranks[core_idx + 1]))
             should_be_eye = tf.matmul(core, tf.transpose(core))
             should_be_eye_val = sess.run(should_be_eye)
             self.assertAllClose(np.eye(updated_tt_ranks[core_idx]),
                                 should_be_eye_val)
Exemplo n.º 5
0
 def testMultiply(self):
     # Multiply two TT-tensors.
     tt_a = initializers.random_tensor((1, 2, 3, 4),
                                       tt_rank=2,
                                       dtype=self.dtype)
     tt_b = initializers.random_tensor((1, 2, 3, 4),
                                       tt_rank=[1, 1, 4, 3, 1],
                                       dtype=self.dtype)
     res_actual = ops.full(ops.multiply(tt_a, tt_b))
     res_actual2 = ops.full(tt_a * tt_b)
     res_desired = ops.full(tt_a) * ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = self.evaluate(
         to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Exemplo n.º 6
0
    def testTensorIndexing(self):
        tens = initializers.random_tensor((3, 3, 4), dtype=self.dtype)
        with self.test_session() as sess:
            desired = ops.full(tens)[:, :, :]
            actual = ops.full(tens[:, :, :])
            desired, actual = sess.run([desired, actual])
            self.assertAllClose(desired, actual)
            desired = ops.full(tens)[1, :, :]
            actual = ops.full(tens[1, :, :])
            desired, actual = sess.run([desired, actual])
            self.assertAllClose(desired, actual)
            desired = ops.full(tens)[1:2, 1, :]
            actual = ops.full(tens[1:2, 1, :])
            desired, actual = sess.run([desired, actual])
            self.assertAllClose(desired, actual)
            desired = ops.full(tens)[0:3, :, 3]
            actual = ops.full(tens[0:3, :, 3])
            desired, actual = sess.run([desired, actual])
            self.assertAllClose(desired, actual)
            desired = ops.full(tens)[1, :, 3]
            actual = ops.full(tens[1, :, 3])
            desired, actual = sess.run([desired, actual])
            self.assertAllClose(desired, actual)

            # Wrong number of dims.
            with self.assertRaises(ValueError):
                tens[1, :, 3, :]
            with self.assertRaises(ValueError):
                tens[1, 1]
Exemplo n.º 7
0
 def testAdd(self):
     # Sum two TT-tensors.
     tt_a = initializers.random_tensor((2, 1, 3, 4),
                                       tt_rank=2,
                                       dtype=self.dtype)
     tt_b = initializers.random_tensor((2, 1, 3, 4),
                                       tt_rank=[1, 2, 4, 3, 1],
                                       dtype=self.dtype)
     res_actual = ops.full(ops.add(tt_a, tt_b))
     res_actual2 = ops.full(tt_a + tt_b)
     res_desired = ops.full(tt_a) + ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = self.evaluate(
         to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Exemplo n.º 8
0
 def testMultiplyUnknownBatchSizeBroadcasting(self):
     c1 = tf.placeholder(tf.float32, [None, 1, 3, 2])
     c2 = tf.placeholder(tf.float32, [None, 2, 3, 1])
     tt_a = TensorTrainBatch([c1, c2])
     tt_b = initializers.random_tensor_batch((3, 3),
                                             tt_rank=3,
                                             batch_size=1)
     tt_c = initializers.random_tensor((3, 3), tt_rank=3)
     res_ab = ops.full(ops.multiply(tt_a, tt_b))
     res_ba = ops.full(ops.multiply(tt_b, tt_a))
     res_ac = ops.full(ops.multiply(tt_a, tt_c))
     res_ca = ops.full(ops.multiply(tt_c, tt_a))
     res_desired_ab = ops.full(tt_a) * ops.full(tt_b)
     res_desired_ac = ops.full(tt_a) * ops.full(tt_c)
     to_run = [
         res_ab, res_ba, res_ac, res_ca, res_desired_ab, res_desired_ac
     ]
     feed_dict = {
         c1: np.random.rand(7, 1, 3, 2),
         c2: np.random.rand(7, 2, 3, 1)
     }
     with self.test_session() as sess:
         ab, ba, ac, ca, des_ab, des_ac = sess.run(to_run,
                                                   feed_dict=feed_dict)
         self.assertAllClose(ab, des_ab)
         self.assertAllClose(ba, des_ab)
         self.assertAllClose(ac, des_ac)
         self.assertAllClose(ca, des_ac)
Exemplo n.º 9
0
 def testFlatInnerTTTensbySparseTens(self):
     # Inner product between a TT-tensor and a sparse tensor.
     shape_list = ((2, 2), (2, 3, 4), (4, 2, 5, 2))
     rank_list = (1, 2)
     np.random.seed(1)
     with self.test_session() as sess:
         for shape in shape_list:
             for rank in rank_list:
                 for num_elements in [1, 10]:
                     tt_1 = initializers.random_tensor(shape, tt_rank=rank)
                     sparse_flat_indices = np.random.choice(
                         np.prod(shape), num_elements).astype(int)
                     sparse_indices = np.unravel_index(
                         sparse_flat_indices, shape)
                     sparse_indices = np.vstack(sparse_indices).transpose()
                     values = np.random.randn(num_elements).astype(
                         np.float32)
                     sparse_2 = tf.SparseTensor(indices=sparse_indices,
                                                values=values,
                                                dense_shape=shape)
                     res_actual = ops.flat_inner(tt_1, sparse_2)
                     res_actual_val, tt_1_val = sess.run(
                         [res_actual, ops.full(tt_1)])
                     res_desired_val = tt_1_val.flatten(
                     )[sparse_flat_indices].dot(values)
                     self.assertAllClose(res_actual_val, res_desired_val)
Exemplo n.º 10
0
 def testAssign(self):
   old_init = initializers.random_tensor([2, 3, 2], tt_rank=2)
   tt = variables.get_variable('tt', initializer=old_init)
   new_init = initializers.random_tensor([2, 3, 2], tt_rank=2)
   assigner = variables.assign(tt, new_init)
   with self.test_session():
     tf.global_variables_initializer().run()
     init_value = ops.full(tt).eval()
     assigner_value = ops.full(assigner).eval()
     after_value = ops.full(tt)
     after_value = after_value.eval()
     self.assertAllClose(assigner_value, after_value)
     # Assert that the value actually changed:
     abs_diff = np.linalg.norm((init_value - after_value).flatten())
     rel_diff = abs_diff / np.linalg.norm((init_value).flatten())
     self.assertGreater(rel_diff, 0.2)
Exemplo n.º 11
0
 def testProjectOnItself(self):
   # Projection of X into the tangent space of itself is X: P_x(x) = x.
   tens = initializers.random_tensor((2, 3, 4), dtype=self.dtype)
   proj = riemannian.project_sum(tens, tens)
   with self.test_session() as sess:
     actual_val, desired_val = sess.run((ops.full(proj), ops.full(tens)))
     self.assertAllClose(desired_val, actual_val)
Exemplo n.º 12
0
    def testCastFloat(self):
        # Test cast function for float tt-tensors.
        tt_x = initializers.random_tensor((2, 3, 2), tt_rank=2)

        casted = ops.cast(tt_x, self.dtype)
        casted_val = self.evaluate(ops.full(casted))
        self.assertEqual(self.dtype, casted.dtype)
        self.assertTrue(self.dtype, casted_val.dtype)
Exemplo n.º 13
0
 def testGatherND(self):
     idx = [[0, 0, 0], [0, 1, 2], [0, 1, 0]]
     tt = initializers.random_tensor((3, 4, 5), tt_rank=2, dtype=self.dtype)
     res_np = ops.gather_nd(tt, idx)
     res_desired = tf.gather_nd(ops.full(tt), idx)
     to_run = [res_np, res_desired]
     res_np_v, des_v = self.evaluate(to_run)
     self.assertAllClose(res_np_v, des_v)
Exemplo n.º 14
0
 def testPlaceholderTensorIndexing(self):
     tens = initializers.random_tensor((3, 3, 4), dtype=self.dtype)
     with self.test_session() as sess:
         start = tf.placeholder(tf.int32)
         end = tf.placeholder(tf.int32)
         desired = ops.full(tens)[1:3, 1, :3]
         actual = ops.full(tens[start:end, start, :end])
         desired, actual = sess.run([desired, actual], {start: 1, end: 3})
         self.assertAllClose(desired, actual)
Exemplo n.º 15
0
 def testFlatInnerTTTensbyTTTens(self):
     # Inner product between two TT-tensors.
     shape_list = ((2, 2), (2, 3, 4), (4, 2, 5, 2))
     rank_list = (1, 2)
     with self.test_session() as sess:
         for shape in shape_list:
             for rank in rank_list:
                 tt_1 = initializers.random_tensor(shape, tt_rank=rank)
                 tt_2 = initializers.random_tensor(shape, tt_rank=rank)
                 res_actual = ops.flat_inner(tt_1, tt_2)
                 tt_1_full = tf.reshape(ops.full(tt_1), (1, -1))
                 tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1))
                 res_desired = tf.matmul(tt_1_full, tt_2_full)
                 res_actual_val, res_desired_val = sess.run(
                     [res_actual, res_desired])
                 self.assertAllClose(res_actual_val,
                                     np.squeeze(res_desired_val),
                                     rtol=1e-5)
Exemplo n.º 16
0
  def testToAndFromDeltas(self):
    # Test converting to and from deltas representation of the tangent space
    # element.
    what = initializers.random_tensor((2, 3, 4), 4, dtype=self.dtype)
    where = initializers.random_tensor((2, 3, 4), 3, dtype=self.dtype)
    projected = riemannian.project(what, where)

    deltas = riemannian.tangent_space_to_deltas(projected)
    reconstructed_projected = riemannian.deltas_to_tangent_space(deltas, where)
    # Tangent space element norm can be computed from deltas norm.
    projected_normsq_desired = ops.frobenius_norm_squared(projected)
    projected_normsq_actual = tf.add_n([tf.reduce_sum(c * c) for c in deltas])
    desired_val, actual_val = self.evaluate((ops.full(projected),
                                        ops.full(reconstructed_projected)))
    self.assertAllClose(desired_val, actual_val)
    desired_val, actual_val = self.evaluate((projected_normsq_desired,
                                        projected_normsq_actual))
    self.assertAllClose(desired_val, actual_val)
Exemplo n.º 17
0
 def testCompareProjectSumAndProject(self):
   # Compare results of project_sum and project.
   tens = initializers.random_tensor_batch((2, 3, 4), 3, batch_size=4)
   tangent_tens = initializers.random_tensor((2, 3, 4), 4)
   project_sum = riemannian.project_sum(tens, tangent_tens, tf.eye(4))
   project = riemannian.project(tens, tangent_tens)
   with self.test_session() as sess:
     res = sess.run((ops.full(project_sum), ops.full(project)))
     project_sum_val, project_val = res
     self.assertAllClose(project_sum_val, project_val)
Exemplo n.º 18
0
 def testAssign(self):
     old_init = initializers.random_tensor([2, 3, 2],
                                           tt_rank=2,
                                           dtype=self.dtype)
     tt = variables.get_variable('tt', initializer=old_init)
     new_init = initializers.random_tensor([2, 3, 2],
                                           tt_rank=2,
                                           dtype=self.dtype)
     self.evaluate(tf.compat.v1.global_variables_initializer())
     init_value = self.evaluate(ops.full(tt))
     assigner = variables.assign(tt, new_init)
     assigner_value = self.evaluate(ops.full(assigner))
     after_value = ops.full(tt)
     after_value = self.evaluate(after_value)
     self.assertAllClose(assigner_value, after_value)
     # Assert that the value actually changed:
     abs_diff = np.linalg.norm((init_value - after_value).flatten())
     rel_diff = abs_diff / np.linalg.norm((init_value).flatten())
     self.assertGreater(rel_diff, 0.2)
Exemplo n.º 19
0
    def testCastFloat(self):
        # Test cast function for float tt-tensors.
        tt_x = initializers.random_tensor((2, 3, 2), tt_rank=2)

        with self.test_session() as sess:
            for dtype in [tf.float16, tf.float32, tf.float64]:
                casted = ops.cast(tt_x, dtype)
                casted_val = sess.run(ops.full(casted))
                self.assertEqual(dtype, casted.dtype)
                self.assertTrue(dtype, casted_val.dtype)
Exemplo n.º 20
0
 def testCompareProjectSumAndProject(self):
   # Compare results of project_sum and project.
   tens = initializers.random_tensor_batch((2, 3, 4), 3, batch_size=4,
                                           dtype=self.dtype)
   tangent_tens = initializers.random_tensor((2, 3, 4), 4,
                                             dtype=self.dtype)
   project_sum = riemannian.project_sum(tens, tangent_tens, np.eye(4))
   project = riemannian.project(tens, tangent_tens)
   res = self.evaluate((ops.full(project_sum), ops.full(project)))
   project_sum_val, project_val = res
   self.assertAllClose(project_sum_val, project_val)
Exemplo n.º 21
0
 def testMultiplyByNumber(self):
     # Multiply a tensor by a number.
     tt = initializers.random_tensor((1, 2, 3), tt_rank=(1, 2, 3, 1))
     with self.test_session() as sess:
         res_actual = ops.full(ops.multiply(tt, 4))
         res_actual2 = ops.full(4.0 * tt)
         res_desired = 4.0 * ops.full(tt)
         to_run = [res_actual, res_actual2, res_desired]
         res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
         self.assertAllClose(res_actual_val, res_desired_val)
         self.assertAllClose(res_actual2_val, res_desired_val)
Exemplo n.º 22
0
 def testProjectSum(self):
   # Test projecting a batch of TT-tensors.
   tens = initializers.random_tensor_batch((2, 3, 4), batch_size=3)
   tangent_tens = initializers.random_tensor((2, 3, 4), 3)
   weighted_sum = tens[0] + tens[1] + tens[2]
   direct_proj = riemannian.project_sum(weighted_sum, tangent_tens)
   actual_proj = riemannian.project_sum(tens, tangent_tens)
   with self.test_session() as sess:
     res = sess.run((ops.full(direct_proj), ops.full(actual_proj)))
     desired_val, actual_val = res
     self.assertAllClose(desired_val, actual_val)
Exemplo n.º 23
0
  def testPairwiseFlatInnerTensor(self):
    # Compare pairwise_flat_inner_projected against naive implementation.
    what1 = initializers.random_tensor_batch((2, 3, 4), 4, batch_size=3)
    what2 = initializers.random_tensor_batch((2, 3, 4), 4, batch_size=4)
    where = initializers.random_tensor((2, 3, 4), 3)
    projected1 = riemannian.project(what1, where)
    projected2 = riemannian.project(what2, where)
    desired = batch_ops.pairwise_flat_inner(projected1, projected2)
    actual = riemannian.pairwise_flat_inner_projected(projected1, projected2)
    with self.test_session() as sess:
      desired_val, actual_val = sess.run((desired, actual))
      self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5)

    with self.assertRaises(ValueError):
      # Second argument is not a projection on the tangent space.
      riemannian.pairwise_flat_inner_projected(projected1, what2)
    where2 = initializers.random_tensor((2, 3, 4), 3)
    another_projected2 = riemannian.project(what2, where2)
    with self.assertRaises(ValueError):
      # The arguments are projections on different tangent spaces.
      riemannian.pairwise_flat_inner_projected(projected1, another_projected2)
Exemplo n.º 24
0
 def testCoreRenorm(self):
     a = initializers.random_tensor(3 * (10, ), tt_rank=7, dtype=self.dtype)
     b = ops.renormalize_tt_cores(a)
     var_list = [ops.full(a), ops.full(b)]
     af, bf = self.evaluate(var_list)
     b_cores = self.evaluate(b.tt_cores)
     b_cores_norms = []
     for cr in b_cores:
         b_cores_norms.append(np.linalg.norm(cr))
     self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5)
     self.assertAllClose(b_cores_norms, b_cores_norms[0] * np.ones(
         (len(b_cores))))
Exemplo n.º 25
0
  def testAddNProjected(self):
    # Add several TT-objects from the same tangent space.
    what1 = initializers.random_tensor_batch((2, 3, 4), 4, batch_size=3)
    what2 = initializers.random_tensor_batch((2, 3, 4), 3, batch_size=3)
    where = initializers.random_tensor((2, 3, 4), 3)
    projected1 = riemannian.project(what1, where)
    projected2 = riemannian.project(what2, where)
    desired = ops.full(projected1 + projected2)
    actual = ops.full(riemannian.add_n_projected((projected1, projected2)))
    with self.test_session() as sess:
      desired_val, actual_val = sess.run((desired, actual))
      self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5)

    with self.assertRaises(ValueError):
      # Second argument is not a projection on the tangent space.
      riemannian.add_n_projected((projected1, what2))
    where2 = initializers.random_tensor((2, 3, 4), 3)
    another_projected2 = riemannian.project(what2, where2)
    with self.assertRaises(ValueError):
      # The arguments are projections on different tangent spaces.
      riemannian.add_n_projected((projected1, another_projected2))
Exemplo n.º 26
0
 def testRoundTensor(self):
   shape = (2, 1, 4, 3, 3)
   np.random.seed(1)
   tens = initializers.random_tensor(shape, tt_rank=15,
                                     dtype=self.dtype)
   rounded_tens = decompositions.round(tens, max_tt_rank=9)
   vars = [ops.full(tens), ops.full(rounded_tens)]
   tens_value, rounded_tens_value = self.evaluate(vars)
   # TODO: why so bad accuracy?
   self.assertAllClose(tens_value, rounded_tens_value, atol=1e-4, rtol=1e-4)
   dynamic_tt_ranks = self.evaluate(shapes.tt_ranks(rounded_tens))
   self.assertAllEqual([1, 2, 2, 8, 3, 1], dynamic_tt_ranks)
Exemplo n.º 27
0
 def testMultiplyBatchByTensor(self):
   tt_a = initializers.random_tensor((3, 3, 3), tt_rank=2, dtype=self.dtype)
   tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5,
                                           dtype=self.dtype)
   with self.test_session() as sess:
     res_actual = ops.full(ops.multiply(tt_a, tt_b))
     res_actual2 = ops.full(ops.multiply(tt_b, tt_a))
     res_desired = ops.full(tt_a) * ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Exemplo n.º 28
0
 def testGatherND(self):
   idx = [[0, 0, 0], [0, 1, 2], [0, 1, 0]]
   pl_idx = tf.placeholder(tf.int32, [None, 3])
   tt = initializers.random_tensor((3, 4, 5), tt_rank=2, dtype=self.dtype)
   res_np = ops.gather_nd(tt, idx)
   res_pl = ops.gather_nd(tt, pl_idx)
   res_desired = tf.gather_nd(ops.full(tt), idx)
   to_run = [res_np, res_pl, res_desired]
   with self.test_session() as sess:
     res_np_v, res_pl_v, des_v = sess.run(to_run, feed_dict={pl_idx: idx})
     self.assertAllClose(res_np_v, des_v)
     self.assertAllClose(res_pl_v, res_pl_v)
Exemplo n.º 29
0
 def testOnesLikeAndZerosLike(self):
     a = initializers.random_tensor([2, 3, 4])
     b = initializers.ones_like(a)
     c = initializers.zeros_like(a)
     var_list = [ops.full(b), ops.full(c)]
     with self.test_session() as sess:
         bf, cf = sess.run(var_list)
         self.assertAllClose(bf, np.ones((2, 3, 4)))
         self.assertAllClose(cf, np.zeros((2, 3, 4)))
     with self.assertRaises(ValueError):
         initializers.ones_like(1)
     with self.assertRaises(ValueError):
         initializers.zeros_like(1)
Exemplo n.º 30
0
 def testAttributes(self):
   # Test that after converting an initializer into a variable all the
   # attributes stays the same.
   tens = initializers.random_tensor([2, 3, 2], tt_rank=2)
   tens_v = variables.get_variable('tt_tens', initializer=tens)
   mat = initializers.random_matrix([[3, 2, 2], [3, 3, 3]], tt_rank=3)
   mat_v = variables.get_variable('tt_mat', initializer=mat)
   for (init, var) in [[tens, tens_v], [mat, mat_v]]:
     self.assertEqual(init.get_shape(), var.get_shape())
     self.assertEqual(init.get_raw_shape(), var.get_raw_shape())
     self.assertEqual(init.ndims(), var.ndims())
     self.assertEqual(init.get_tt_ranks(), var.get_tt_ranks())
     self.assertEqual(init.is_tt_matrix(), var.is_tt_matrix())