Exemple #1
0
  def testPairwiseFlatInnerMatrix(self):
    # Compare pairwise_flat_inner_projected against naive implementation.
    what1 = initializers.random_matrix_batch(((2, 3, 4), None), 4, batch_size=3,
                                             dtype=self.dtype)
    what2 = initializers.random_matrix_batch(((2, 3, 4), None), 4, batch_size=4,
                                             dtype=self.dtype)
    where = initializers.random_matrix(((2, 3, 4), None), 3,
                                       dtype=self.dtype)
    projected1 = riemannian.project(what1, where)
    projected2 = riemannian.project(what2, where)
    desired = batch_ops.pairwise_flat_inner(projected1, projected2)
    actual = riemannian.pairwise_flat_inner_projected(projected1, projected2)
    with self.test_session() as sess:
      desired_val, actual_val = sess.run((desired, actual))
      self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5)

    with self.assertRaises(ValueError):
      # Second argument is not a projection on the tangent space.
      riemannian.pairwise_flat_inner_projected(projected1, what2)
    where2 = initializers.random_matrix(((2, 3, 4), None), 3,
                                        dtype=self.dtype)
    another_projected2 = riemannian.project(what2, where2)
    with self.assertRaises(ValueError):
      # The arguments are projections on different tangent spaces.
      riemannian.pairwise_flat_inner_projected(projected1, another_projected2)
Exemple #2
0
 def testBilinearFormBatch(self):
     # Test bilinear form for batch of tensors.
     shape_list = (((2, 2), (3, 4)), ((2, 3, 4), (2, 2, 2)))
     rank_list = (1, 2)
     for tensor_shape in shape_list:
         for rank in rank_list:
             A = initializers.random_matrix(tensor_shape,
                                            tt_rank=rank,
                                            dtype=self.dtype)
             b = initializers.random_matrix_batch((tensor_shape[0], None),
                                                  tt_rank=rank,
                                                  batch_size=5,
                                                  dtype=self.dtype)
             c = initializers.random_matrix_batch((tensor_shape[1], None),
                                                  tt_rank=rank,
                                                  batch_size=5,
                                                  dtype=self.dtype)
             res_actual = ops.bilinear_form(A, b, c)
             vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)]
             res_actual_val, A_val, b_val, c_val = self.evaluate(vars)
             res_desired = np.diag(b_val[:, :,
                                         0].dot(A_val).dot(c_val[:, :,
                                                                 0].T))
             self.assertAllClose(res_actual_val,
                                 np.squeeze(res_desired),
                                 atol=1e-5,
                                 rtol=1e-5)
Exemple #3
0
 def testTTMatTimesTTMatBroadcasting(self):
   # Multiply a batch of TT-matrices by another batch of TT-matrices with
   # broadcasting.
   left_shape = (2, 3)
   sum_shape = (4, 3)
   right_shape = (4, 4)
   with self.test_session() as sess:
     tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape),
                                                 tt_rank=3, batch_size=3,
                                                 dtype=self.dtype)
     tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape),
                                                 dtype=self.dtype)
     # TT-batch by one element TT-batch
     res_actual = ops.matmul(tt_mat_1, tt_mat_2)
     res_actual = ops.full(res_actual)
     # TT by TT-batch.
     res_actual2 = ops.matmul(ops.transpose(tt_mat_2[0]), ops.transpose(tt_mat_1))
     res_actual2 = ops.full(ops.transpose(res_actual2))
     res_desired = tf.einsum('oij,jk->oik', ops.full(tt_mat_1),
                             ops.full(tt_mat_2[0]))
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
     self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5)
     self.assertAllClose(res_actual2_val, res_desired_val, atol=1e-5,
                         rtol=1e-5)
Exemple #4
0
 def testPairwiseFlatInnerVectorsWithMatrix(self):
     # Test pairwise_flat_inner of a batch of TT vectors with providing a matrix,
     # so we should compute
     # res[i, j] = tt_vectors[i] ^ T * matrix * tt_vectors[j]
     tt_vectors_1 = initializers.random_matrix_batch(((2, 3), None),
                                                     batch_size=2,
                                                     dtype=self.dtype)
     tt_vectors_2 = initializers.random_matrix_batch(((2, 3), None),
                                                     batch_size=3,
                                                     dtype=self.dtype)
     matrix = initializers.random_matrix(((2, 3), (2, 3)), dtype=self.dtype)
     res_actual = batch_ops.pairwise_flat_inner(tt_vectors_1, tt_vectors_2,
                                                matrix)
     full_vectors_1 = tf.reshape(ops.full(tt_vectors_1), (2, 6))
     full_vectors_2 = tf.reshape(ops.full(tt_vectors_2), (3, 6))
     with self.test_session() as sess:
         res = sess.run(
             (res_actual, full_vectors_1, full_vectors_2, ops.full(matrix)))
         res_actual_val, vectors_1_val, vectors_2_val, matrix_val = res
         res_desired_val = np.zeros((2, 3))
         for i in range(2):
             for j in range(3):
                 curr_val = np.dot(vectors_1_val[i], matrix_val)
                 curr_val = np.dot(curr_val, vectors_2_val[j])
                 res_desired_val[i, j] = curr_val
         self.assertAllClose(res_desired_val, res_actual_val)
Exemple #5
0
 def testAddSameBatchSize(self):
   # Sum two TT-matrices with the same batch size.
   tt_a = initializers.random_matrix_batch(((2, 1, 4), None), tt_rank=2,
                                           batch_size=3, dtype=self.dtype)
   tt_b = initializers.random_matrix_batch(((2, 1, 4), None),
                                           tt_rank=[1, 2, 4, 1], batch_size=3,
                                           dtype=self.dtype)
   with self.test_session() as sess:
     res_actual = ops.full(ops.add(tt_a, tt_b))
     res_actual2 = ops.full(tt_a + tt_b)
     res_desired = ops.full(tt_a) + ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Exemple #6
0
 def testAddBroadcasting(self):
   # Sum two TT-matrices with broadcasting.
   tt_a = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)), tt_rank=2,
                                           batch_size=3, dtype=self.dtype)
   tt_b = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)),
                                           tt_rank=[1, 2, 4, 1], batch_size=1,
                                           dtype=self.dtype)
   with self.test_session() as sess:
     res_actual = ops.full(ops.add(tt_a, tt_b))
     res_actual2 = ops.full(tt_b + tt_a)
     res_desired = ops.full(tt_a) + ops.full(tt_b)
     to_run = [res_actual, res_actual2, res_desired]
     res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run)
     self.assertAllClose(res_actual_val, res_desired_val)
     self.assertAllClose(res_actual2_val, res_desired_val)
Exemple #7
0
 def testShapeOverflow(self):
     large_shape = [10] * 20
     tensor = initializers.random_matrix_batch([large_shape, large_shape],
                                               batch_size=5,
                                               dtype=self.dtype)
     shape = tensor.get_shape()
     self.assertEqual([5, 10**20, 10**20], shape)
Exemple #8
0
 def testTranspose(self):
     # Transpose a batch of TT-matrices.
     with self.test_session() as sess:
         tt = initializers.random_matrix_batch(((2, 3, 4), (2, 2, 2)),
                                               batch_size=2)
         res_actual = ops.full(ops.transpose(tt))
         res_actual_val, tt_val = sess.run([res_actual, ops.full(tt)])
         self.assertAllClose(tt_val.transpose((0, 2, 1)), res_actual_val)
Exemple #9
0
 def testTranspose(self):
     # Transpose a batch of TT-matrices.
     tt = initializers.random_matrix_batch(((2, 3, 4), (2, 2, 2)),
                                           batch_size=2,
                                           dtype=self.dtype)
     res_actual = ops.full(ops.transpose(tt))
     res_actual_val, tt_val = self.evaluate([res_actual, ops.full(tt)])
     self.assertAllClose(tt_val.transpose((0, 2, 1)), res_actual_val)
Exemple #10
0
 def testIsKronKron(self):
     # Tests _is_kron on a Kronecker matrix batch
     initializer = initializers.random_matrix_batch(((2, 3), (3, 2)),
                                                    tt_rank=1,
                                                    batch_size=3)
     kron_mat_batch = variables.get_variable('kron_mat_batch',
                                             initializer=initializer)
     self.assertTrue(kr._is_kron(kron_mat_batch))
Exemple #11
0
 def testIsKronNonKron(self):
     # Tests _is_kron on a non-Kronecker matrix batch
     initializer = initializers.random_matrix_batch(((2, 3), (3, 2)),
                                                    tt_rank=2,
                                                    batch_size=3)
     tt_mat_batch = variables.get_variable('tt_mat_batch',
                                           initializer=initializer)
     self.assertFalse(kr._is_kron(tt_mat_batch))
Exemple #12
0
 def testPairwiseFlatInnerMatrix(self):
     # Test pairwise_flat_inner of a batch of TT matrices.
     tt_vectors_1 = initializers.random_matrix_batch(((2, 3), (2, 3)),
                                                     batch_size=5)
     tt_vectors_2 = initializers.random_matrix_batch(((2, 3), (2, 3)),
                                                     batch_size=5)
     res_actual = batch_ops.pairwise_flat_inner(tt_vectors_1, tt_vectors_2)
     full_vectors_1 = tf.reshape(ops.full(tt_vectors_1), (5, 36))
     full_vectors_2 = tf.reshape(ops.full(tt_vectors_2), (5, 36))
     res_desired = tf.matmul(full_vectors_1, tf.transpose(full_vectors_2))
     res_desired = tf.squeeze(res_desired)
     with self.test_session() as sess:
         res_actual_val, res_desired_val = sess.run(
             (res_actual, res_desired))
         self.assertAllClose(res_desired_val,
                             res_actual_val,
                             atol=1e-5,
                             rtol=1e-5)
Exemple #13
0
 def testTTMatTimesTTMatSameBatchSize(self):
   # Multiply a batch of TT-matrices by another batch of TT-matrices with the
   # same batch sizes.
   left_shape = (2, 3)
   sum_shape = (4, 3)
   right_shape = (4, 4)
   with self.test_session() as sess:
     tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape),
                                                 tt_rank=3, batch_size=3,
                                                 dtype=self.dtype)
     tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape),
                                                 batch_size=3,
                                                 dtype=self.dtype)
     res_actual = ops.matmul(tt_mat_1, tt_mat_2)
     res_actual = ops.full(res_actual)
     res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2))
     res_actual_val, res_desired_val = sess.run([res_actual, res_desired])
     # TODO: why so bad accuracy?
     self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5)
Exemple #14
0
  def testCastFloat(self):
    # Test cast function for float tt-matrices and vectors.
    tt_mat = initializers.random_matrix_batch(((2, 3), (3, 2)), tt_rank=2,
                                              batch_size=3)

    with self.test_session() as sess:
      casted = ops.cast(tt_mat, self.dtype)
      casted_val = sess.run(ops.full(casted))
      self.assertEqual(self.dtype, casted.dtype)
      self.assertTrue(self.dtype, casted_val.dtype)
Exemple #15
0
 def testBatchMultiply(self):
     # Test multiplying batch of TTMatrices by individual numbers.
     tt = initializers.random_matrix_batch(((2, 3), (3, 3)), batch_size=3)
     weights = [0.1, 0, -10]
     actual = batch_ops.multiply_along_batch_dim(tt, weights)
     individual_desired = [weights[i] * tt[i:i + 1] for i in range(3)]
     desired = batch_ops.concat_along_batch_dim(individual_desired)
     with self.test_session() as sess:
         desired_val, acutual_val = sess.run(
             (ops.full(desired), ops.full(actual)))
         self.assertAllClose(desired_val, acutual_val)
Exemple #16
0
 def testConcatMatrixPlaceholders(self):
     # Test concating TTMatrices of unknown batch sizes along batch dimension.
     number_of_objects = tf.placeholder(tf.int32)
     all = initializers.random_matrix_batch(((2, 3), (2, 3)), batch_size=5)
     actual = batch_ops.concat_along_batch_dim(
         (all[:number_of_objects], all[number_of_objects:]))
     with self.test_session() as sess:
         desired_val, actual_val = sess.run(
             (ops.full(all), ops.full(actual)),
             feed_dict={number_of_objects: 2})
         self.assertAllClose(desired_val, actual_val)
Exemple #17
0
 def testProjectMatmul(self):
   # Project a TT-matrix times TT-vector on a TT-vector.
   tt_mat = initializers.random_matrix(((2, 3, 4), (2, 3, 4)))
   tt_vec_what = initializers.random_matrix_batch(((2, 3, 4), None),
                                                  batch_size=3)
   tt_vec_where = initializers.random_matrix(((2, 3, 4), None))
   proj = riemannian.project_matmul(tt_vec_what, tt_vec_where, tt_mat)
   matvec = ops.matmul(tt_mat, tt_vec_what)
   proj_desired = riemannian.project(matvec, tt_vec_where)
   with self.test_session() as sess:
     actual_val, desired_val = sess.run((ops.full(proj), ops.full(proj_desired)))
     self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5)
Exemple #18
0
 def testRandomMatrixBatch(self):
     shapes = [[1, 2, 3], [[1, 2], [1, 2, 3]], [[-1, 2, 3], [1, 2, 3]],
               [[0.5, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]],
               [[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]],
               [[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]],
               [[1, 2, 3], [1, 2, 3]]]
     tt_ranks = [2, 2, 2, 2, -1, [[[1]]], [2.5, 3], 2, 2]
     bs = 7 * [1] + [-1] + [0.5]
     bad_cases = zip(shapes, tt_ranks, bs)
     for case in bad_cases:
         with self.assertRaises(ValueError):
             initializers.random_matrix_batch(case[0],
                                              tt_rank=case[1],
                                              batch_size=case[2])
     for case in bad_cases:
         with self.assertRaises(ValueError):
             initializers.matrix_batch_with_random_cores(case[0],
                                                         tt_rank=case[1],
                                                         batch_size=case[2])
     with self.assertRaises(NotImplementedError):
         initializers.random_matrix_batch([[1, 2, 3], [1, 2, 3]], mean=1.0)
Exemple #19
0
 def testGramMatrix(self):
     # Test Gram Matrix of a batch of TT vectors.
     tt_vectors = initializers.random_matrix_batch(((2, 3), None),
                                                   batch_size=5)
     res_actual = batch_ops.gram_matrix(tt_vectors)
     full_vectors = tf.reshape(ops.full(tt_vectors), (5, 6))
     res_desired = tf.matmul(full_vectors, tf.transpose(full_vectors))
     res_desired = tf.squeeze(res_desired)
     with self.test_session() as sess:
         res_actual_val, res_desired_val = sess.run(
             (res_actual, res_desired))
         self.assertAllClose(res_desired_val, res_actual_val)
Exemple #20
0
    def testConcatMatrix(self):
        # Test concating TTMatrix batches along batch dimension.
        first = initializers.random_matrix_batch(((2, 3), (3, 3)),
                                                 batch_size=1,
                                                 dtype=self.dtype)
        second = initializers.random_matrix_batch(((2, 3), (3, 3)),
                                                  batch_size=4,
                                                  dtype=self.dtype)
        third = initializers.random_matrix_batch(((2, 3), (3, 3)),
                                                 batch_size=3,
                                                 dtype=self.dtype)
        first_res = batch_ops.concat_along_batch_dim((first))
        first_res = ops.full(first_res)
        first_second_res = batch_ops.concat_along_batch_dim((first, second))
        first_second_res = ops.full(first_second_res)
        first_second_third_res = batch_ops.concat_along_batch_dim(
            (first, second, third))
        first_second_third_res = ops.full(first_second_third_res)

        first_full = ops.full(first)
        second_full = ops.full(second)
        third_full = ops.full(third)
        first_desired = first_full
        first_second_desired = tf.concat((first_full, second_full), axis=0)
        first_second_third_desired = tf.concat(
            (first_full, second_full, third_full), axis=0)
        with self.test_session() as sess:
            res = sess.run((first_res, first_second_res,
                            first_second_third_res, first_desired,
                            first_second_desired, first_second_third_desired))
            first_res_val = res[0]
            first_second_res_val = res[1]
            first_second_third_res_val = res[2]
            first_desired_val = res[3]
            first_second_desired_val = res[4]
            first_second_third_desired_val = res[5]
            self.assertAllClose(first_res_val, first_desired_val)
            self.assertAllClose(first_second_res_val, first_second_desired_val)
            self.assertAllClose(first_second_third_res_val,
                                first_second_third_desired_val)
Exemple #21
0
 def testInv(self):
     # Tests the inv function
     initializer = initializers.random_matrix_batch(((2, 3, 2), (2, 3, 2)),
                                                    tt_rank=1,
                                                    batch_size=3,
                                                    dtype=self.dtype)
     kron_mat_batch = variables.get_variable('kron_mat_batch',
                                             initializer=initializer)
     init_op = tf.compat.v1.global_variables_initializer()
     self.evaluate(init_op)
     desired = np.linalg.inv(self.evaluate(ops.full(kron_mat_batch)))
     actual = self.evaluate(ops.full(kr.inv(kron_mat_batch)))
     self.assertAllClose(desired, actual, atol=1e-4)
Exemple #22
0
 def testQuadraticFormBatch(self):
     # Test quadratic form for batch of tensors.
     shape_list = (((2, 2), (3, 4)), ((2, 3, 4), (2, 2, 2)))
     rank_list = (1, 2)
     with self.test_session() as sess:
         for tensor_shape in shape_list:
             for rank in rank_list:
                 A = initializers.random_matrix(tensor_shape, tt_rank=rank)
                 b = initializers.random_matrix_batch(
                     (tensor_shape[0], None), tt_rank=rank, batch_size=5)
                 c = initializers.random_matrix_batch(
                     (tensor_shape[1], None), tt_rank=rank, batch_size=5)
                 res_actual = ops.quadratic_form(A, b, c)
                 vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)]
                 res_actual_val, A_val, b_val, c_val = sess.run(vars)
                 res_desired = np.diag(b_val[:, :,
                                             0].dot(A_val).dot(c_val[:, :,
                                                                     0].T))
                 self.assertAllClose(res_actual_val,
                                     np.squeeze(res_desired),
                                     atol=1e-5,
                                     rtol=1e-5)
Exemple #23
0
 def testDet(self):
     # Tests the determinant function
     initializer = initializers.random_matrix_batch(((2, 3, 2), (2, 3, 2)),
                                                    tt_rank=1,
                                                    batch_size=3)
     kron_mat_batch = variables.get_variable('kron_mat_batch',
                                             initializer=initializer)
     init_op = tf.global_variables_initializer()
     with self.test_session() as sess:
         sess.run(init_op)
         desired = tf.matrix_determinant(ops.full(kron_mat_batch)).eval()
         actual = kr.determinant(kron_mat_batch).eval()
         self.assertAllClose(desired, actual)
Exemple #24
0
 def testInv(self):
     # Tests the inv function
     initializer = initializers.random_matrix_batch(((2, 3, 2), (2, 3, 2)),
                                                    tt_rank=1,
                                                    batch_size=3)
     kron_mat_batch = variables.get_variable('kron_mat_batch',
                                             initializer=initializer)
     init_op = tf.global_variables_initializer()
     with self.test_session() as sess:
         sess.run(init_op)
         desired = np.linalg.inv(ops.full(kron_mat_batch).eval())
         actual = ops.full(kr.inv(kron_mat_batch)).eval()
         self.assertAllClose(desired, actual, atol=1e-4)
Exemple #25
0
 def testSlogDet(self):
   # Tests the slog_determinant function
   
   tf.set_random_seed(1) # negative and positive determinants
   initializer = initializers.random_matrix_batch(((2, 3), (2, 3)), tt_rank=1, 
                                                  batch_size=3,
                                                  dtype=self.dtype)
   kron_mat_batch = variables.get_variable('kron_mat_batch', 
                                           initializer=initializer)
 
   init_op = tf.global_variables_initializer()
   with self.test_session() as sess:
      # negative derminant
     sess.run(init_op)
     desired_sign, desired_det = np.linalg.slogdet(
                                               ops.full(kron_mat_batch).eval())
     actual_sign, actual_det = sess.run(kr.slog_determinant(kron_mat_batch))
     self.assertAllEqual(desired_sign, actual_sign)
     self.assertAllClose(desired_det, actual_det)
Exemple #26
0
 def testGramMatrixWithMatrix(self):
     # Test Gram Matrix of a batch of TT vectors with providing a matrix, so we
     # should compute
     # res[i, j] = tt_vectors[i] ^ T * matrix * tt_vectors[j]
     tt_vectors = initializers.random_matrix_batch(((2, 3), None),
                                                   batch_size=4)
     matrix = initializers.random_matrix(((2, 3), (2, 3)))
     res_actual = batch_ops.gram_matrix(tt_vectors, matrix)
     full_vectors = tf.reshape(ops.full(tt_vectors), (4, 6))
     with self.test_session() as sess:
         res = sess.run((res_actual, full_vectors, ops.full(matrix)))
         res_actual_val, vectors_val, matrix_val = res
         res_desired_val = np.zeros((4, 4))
         for i in range(4):
             for j in range(4):
                 curr_val = np.dot(vectors_val[i], matrix_val)
                 curr_val = np.dot(curr_val, vectors_val[j])
                 res_desired_val[i, j] = curr_val
         self.assertAllClose(res_desired_val,
                             res_actual_val,
                             atol=1e-5,
                             rtol=1e-5)
Exemple #27
0
  def testToAndFromDeltasBatch(self):
    # Test converting to and from deltas representation of the tangent space
    # element in the batch case.
    what = initializers.random_matrix_batch(((2, 3, 4), (3, 3, 3)), 4,
                                            batch_size=3, dtype=self.dtype)
    where = initializers.random_matrix(((2, 3, 4), (3, 3, 3)), 3,
                                       dtype=self.dtype)
    projected = riemannian.project(what, where)

    deltas = riemannian.tangent_space_to_deltas(projected)
    reconstructed_projected = riemannian.deltas_to_tangent_space(deltas, where)
    # Tangent space element norm can be computed from deltas norm.
    projected_normsq_desired = ops.frobenius_norm_squared(projected)
    d_normssq = [tf.reduce_sum(tf.reshape(c, (3, -1)) ** 2, 1) for c in deltas]
    projected_normsq_actual = tf.add_n(d_normssq)

    desired_val, actual_val = self.evaluate((ops.full(projected),
                                        ops.full(reconstructed_projected)))
    self.assertAllClose(desired_val, actual_val)
    desired_val, actual_val = self.evaluate((projected_normsq_desired,
                                        projected_normsq_actual))
    self.assertAllClose(desired_val, actual_val)
Exemple #28
0
 def testLazyShapeOverflow(self):
     large_shape = [10] * 20
     tensor = initializers.random_matrix_batch([large_shape, large_shape],
                                               batch_size=5,
                                               dtype=self.dtype)
     self.assertAllEqual([5, 10**20, 10**20], shapes.lazy_shape(tensor))