Пример #1
0
    def test_conversion(self):
        """test conversion to full format and element extraction"""

        # convert to full format
        t_full = self.t.full()

        # number of wrong entries
        err = 0

        # loop through all elements of the tensor
        for i in range(np.int(np.prod(self.row_dims + self.col_dims))):

            # convert flat index
            j = np.unravel_index(i, self.row_dims + self.col_dims)

            # extract elements of both representations
            v = self.t.element(list(j))
            w = t_full[j]

            # count wrong entries
            if (v - w) / v > self.tol:
                err += 1

        # check if no wrong entry exists
        self.assertEqual(err, 0)

        with self.assertRaises(ValueError):
            rand([1, 2], [2, 3], [2, 2, 2]).full()
Пример #2
0
    def test_full_both(self):
        # contraction over both full
        T = np.random.random((3, 4, 5))
        U = np.random.random((3, 4, 5))
        TU = np.tensordot(T, U, axes=([0, 1, 2], [0, 1, 2]))

        T_tt = TT(np.reshape(T, (3, 4, 5, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (3, 4, 5, 1, 1, 1)))
        T_tt.tensordot(U_tt, 3, overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # operator
        T_tt = rand([2, 3, 4], [5, 6, 7], ranks=2)
        U_tt = rand([2, 3, 4], [5, 6, 7], ranks=3)
        T = T_tt.full()
        U = U_tt.full()

        TU = np.tensordot(T, U, axes=([0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]))
        T_tt.tensordot(U_tt, 3, overwrite=True)
        TU_back = T_tt.full()
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)
Пример #3
0
    def test_concatenate(self):
        # test concatenate with other TT
        p = self.t.order
        t_other = rand(row_dims=[2, 3], col_dims=[3, 2], ranks=[1, 3, 1])
        concat = self.t.concatenate(t_other)
        for i in range(concat.order):
            if i < p:
                err = concat.cores[i] - self.t.cores[i]
            else:
                err = concat.cores[i] - t_other.cores[i - p]
            self.assertLess(np.linalg.norm(err), self.tol)

        t_other = rand(row_dims=[2, 3], col_dims=[3, 2], ranks=[3, 3, 1])
        with self.assertRaises(ValueError):
            concat = self.t.concatenate(t_other)

        # test concatenate with list of cores
        t_other = []
        ranks = [1, 2, 3, 1]
        for i in range(len(ranks) - 1):
            t_other.append(np.random.random((ranks[i], np.random.randint(1, 4), np.random.randint(1, 4), ranks[i + 1])))
        concat = self.t.concatenate(t_other)
        for i in range(concat.order):
            if i < p:
                err = concat.cores[i] - self.t.cores[i]
            else:
                err = concat.cores[i] - t_other[i - p]
            self.assertLess(np.linalg.norm(err), self.tol)

        t_other.append(np.zeros((2, 3)))
        with self.assertRaises(ValueError):
            concat = self.t.concatenate(t_other)
        t_other = [np.random.random((3, 2, 2, 2))]
        with self.assertRaises(ValueError):
            concat = self.t.concatenate(t_other)
Пример #4
0
    def test_random(self):
        """test random tensor train"""

        # construct random tensor train
        t_rand = tt.rand(self.row_dims, self.col_dims)

        # check if attributes are correct
        self.assertEqual(t_rand.order, self.order)
        self.assertEqual(t_rand.row_dims, self.row_dims)
        self.assertEqual(t_rand.col_dims, self.col_dims)
        self.assertEqual(t_rand.ranks, [1] * (self.order + 1))
Пример #5
0
    def test_rank_tensordot(self):
        t = rand([2, 3, 4], [4, 2, 1], [2, 3, 4, 2])
        mat_front = np.random.random((1, 2))
        mat_back = np.random.random((2, 1))
        t2 = t.rank_tensordot(mat_back)
        t2 = t2.rank_tensordot(mat_front, mode='first')

        t.cores[-1] = np.tensordot(t.cores[-1], mat_back, axes=([3], [0]))
        t.cores[0] = np.tensordot(mat_front, t.cores[0], axes=([1], [0]))
        t.ranks = [t.cores[i].shape[0] for i in range(t.order)] + [t.cores[-1].shape[3]]
        err = t.full() - t2.full()
        self.assertLess(np.linalg.norm(err), self.tol)

        with self.assertRaises(ValueError):
            t.rank_tensordot(np.zeros((2, 3, 2)))
        with self.assertRaises(ValueError):
            t.rank_tensordot(np.zeros((3, 3)))
        with self.assertRaises(ValueError):
            t.rank_tensordot(np.zeros((3, 3)), mode='first')
Пример #6
0
    def test_1_axis(self):
        # test contraction over 1 axis
        # last-first contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((5, 6, 7))
        TU = np.tensordot(T, U, axes=([3], [0]))

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (5, 6, 7, 1, 1, 1)))
        TU_back = T_tt.tensordot(U_tt, 1)

        TU_back = np.squeeze(TU_back.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # last-last contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((6, 7, 5))
        TU = np.tensordot(T, U, axes=([3], [2]))
        TU = np.transpose(TU, [0, 1, 2, 4, 3])

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (6, 7, 5, 1, 1, 1)))
        T_tt.tensordot(U_tt, 1, mode='last-last', overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # first-last contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((6, 7, 2))
        TU = np.tensordot(T, U, axes=([0], [2]))
        TU = np.transpose(TU, [3, 4, 0, 1, 2])

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (6, 7, 2, 1, 1, 1)))
        T_tt.tensordot(U_tt, 1, mode='first-last', overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # first-first contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((2, 6, 7))
        TU = np.tensordot(T, U, axes=([0], [0]))
        TU = np.transpose(TU, [4, 3, 0, 1, 2])

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (2, 6, 7, 1, 1, 1)))
        T_tt.tensordot(U_tt, 1, mode='first-first', overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # last-first contraction for operator
        T_tt = rand([2, 3, 4], [5, 6, 7], ranks=3)
        U_tt = rand([4, 8, 9], [7, 8, 9], ranks=2)
        T = T_tt.full()
        U = U_tt.full()

        TU = np.tensordot(T, U, axes=([2, 5], [0, 3]))
        TU = np.transpose(TU, [0, 1, 4, 5, 2, 3, 6, 7])
        T_tt.tensordot(U_tt, 1, overwrite=True)
        TU_back = T_tt.full()
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)
Пример #7
0
    def test_full_other(self):
        # contraction over full U
        # last-first contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((3, 4, 5))
        TU = np.tensordot(T, U, axes=([1, 2, 3], [0, 1, 2]))

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (3, 4, 5, 1, 1, 1)))
        T_tt.tensordot(U_tt, 3, overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # last-last contraction
        T = np.random.random((2, 3, 4, 5, 6))
        U = np.random.random((4, 5, 6))
        TU = np.tensordot(T, U, axes=([2, 3, 4], [0, 1, 2]))

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 6, 1, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (4, 5, 6, 1, 1, 1)))
        T_tt.tensordot(U_tt, 3, mode='last-last', overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # first-last contraction
        T = np.random.random((2, 3, 4, 5, 6))
        U = np.random.random((2, 3, 4))
        TU = np.tensordot(T, U, axes=([0, 1, 2], [0, 1, 2]))

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 6, 1, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (2, 3, 4, 1, 1, 1)))
        T_tt.tensordot(U_tt, 3, mode='first-last', overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # first-first contraction
        T = np.random.random((2, 3, 4, 5, 6))
        U = np.random.random((2, 3, 4))
        TU = np.tensordot(T, U, axes=([0, 1, 2], [0, 1, 2]))

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 6, 1, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (2, 3, 4, 1, 1, 1)))
        T_tt.tensordot(U_tt, 3, mode='first-first', overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # last-first contraction for operator
        T_tt = rand([2, 3, 4, 5], [5, 6, 7, 8], ranks=2)
        U_tt = rand([4, 5], [7, 8], ranks=3)
        T = T_tt.full()
        U = U_tt.full()

        TU = np.tensordot(T, U, axes=([2, 3, 6, 7], [0, 1, 2, 3]))
        T_tt.tensordot(U_tt, 2, overwrite=True)
        TU_back = T_tt.full()
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)
Пример #8
0
    def test_multiple_axes(self):
        # contraction over multiple axis
        # last-first contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((3, 4, 5, 6, 7))
        TU = np.tensordot(T, U, axes=([1, 2, 3], [0, 1, 2]))

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (3, 4, 5, 6, 7, 1, 1, 1, 1, 1)))
        T_tt.tensordot(U_tt, 3, overwrite=True)

        TU_back = np.squeeze(T_tt.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # last-last contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((6, 7, 4, 5))
        TU = np.tensordot(T, U, axes=([2, 3], [2, 3]))
        TU = np.transpose(TU, [0, 1, 3, 2])

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (6, 7, 4, 5, 1, 1, 1, 1)))
        TU_back = T_tt.tensordot(U_tt, 2, mode='last-last')

        TU_back = np.squeeze(TU_back.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # first-last contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((6, 7, 2, 3))
        TU = np.tensordot(T, U, axes=([0, 1], [2, 3]))
        TU = np.transpose(TU, [2, 3, 0, 1])

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (6, 7, 2, 3, 1, 1, 1, 1)))
        TU_back = T_tt.tensordot(U_tt, 2, mode='first-last')

        TU_back = np.squeeze(TU_back.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # first-first contraction
        T = np.random.random((2, 3, 4, 5))
        U = np.random.random((2, 3, 6, 7))
        TU = np.tensordot(T, U, axes=([0, 1], [0, 1]))
        TU = np.transpose(TU, [3, 2, 0, 1])

        T_tt = TT(np.reshape(T, (2, 3, 4, 5, 1, 1, 1, 1)))
        U_tt = TT(np.reshape(U, (2, 3, 6, 7, 1, 1, 1, 1)))
        TU_back = T_tt.tensordot(U_tt, 2, mode='first-first')

        TU_back = np.squeeze(TU_back.full())
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)

        # last-last contraction for operator
        T_tt = rand([3, 4, 5], [6, 7, 8], ranks=3)
        U_tt = rand([8, 9, 4, 5], [2, 3, 7, 8], ranks=2)
        T = T_tt.full()
        U = U_tt.full()

        TU = np.tensordot(T, U, axes=([1, 2, 4, 5], [2, 3, 6, 7]))
        TU = np.transpose(TU, [0, 3, 2, 1, 5, 4])
        T_tt.tensordot(U_tt, 2, mode='last-last', overwrite=True)
        TU_back = T_tt.full()
        error = np.linalg.norm(TU_back - TU)
        self.assertLess(error, self.tol)