예제 #1
0
    def test_axpby(self):
        ttOp1 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2])
        ttOp1.setTTranks(2)
        ttOp2 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2])

        pitts_py.randomize(ttOp1)
        pitts_py.randomize(ttOp2)

        ttOp12 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2])
        pitts_py.copy(ttOp2, ttOp12)
        pitts_py.axpby(0.33, ttOp1, -0.97, ttOp12)

        ttX = pitts_py.TensorTrain_double([3, 2, 2])
        ttX.setTTranks(2)
        pitts_py.randomize(ttX)

        ttY = pitts_py.TensorTrain_double([2, 3, 4])
        pitts_py.apply(ttOp12, ttX, ttY)

        ttY1 = pitts_py.TensorTrain_double([2, 3, 4])
        pitts_py.apply(ttOp1, ttX, ttY1)

        ttY2 = pitts_py.TensorTrain_double([2, 3, 4])
        pitts_py.apply(ttOp2, ttX, ttY2)

        ttY12 = pitts_py.TensorTrain_double([2, 3, 4])
        pitts_py.copy(ttY2, ttY12)
        nrm = pitts_py.axpby(0.33, ttY1, -0.97, ttY12)

        err = pitts_py.axpby(-nrm, ttY12, 1., ttY)
        self.assertLess(err, 1.e-8)
예제 #2
0
    def test_apply_zero(self):
        ttOp = pitts_py.TensorTrainOperator_double([2, 3, 2], [3, 4, 1])
        ttX = pitts_py.TensorTrain_double([3, 4, 1])
        ttY = pitts_py.TensorTrain_double([2, 3, 2])

        ttOp.setZero()
        pitts_py.randomize(ttX)
        pitts_py.apply(ttOp, ttX, ttY)
        self.assertEqual(0, pitts_py.norm2(ttY))
예제 #3
0
    def test_dot(self):
        tt1 = pitts_py.TensorTrain_double([2, 5, 3])
        tt2 = pitts_py.TensorTrain_double([2, 5, 3])

        tt1.setUnit([0, 1, 2])
        tt2.setUnit([0, 2, 2])

        np.testing.assert_almost_equal(1., pitts_py.dot(tt1, tt1))
        np.testing.assert_almost_equal(0., pitts_py.dot(tt1, tt2))
        np.testing.assert_almost_equal(1., pitts_py.dot(tt2, tt2))
예제 #4
0
 def test_copy(self):
     tt = pitts_py.TensorTrain_double([2, 4, 3])
     tt2 = pitts_py.TensorTrain_double([2, 4, 3])
     tt.setUnit([0, 1, 2])
     pitts_py.copy(tt, tt2)
     tt.setUnit([1, 0, 0])
     fullTensor = pitts_py.toDense(tt2)
     fullTensor_ref = np.zeros([2, 4, 3])
     fullTensor_ref[0, 1, 2] = 1
     np.testing.assert_array_almost_equal(fullTensor_ref, fullTensor)
예제 #5
0
    def test_apply_identity(self):
        ttOp = pitts_py.TensorTrainOperator_double([5, 3, 3], [5, 3, 3])
        ttX = pitts_py.TensorTrain_double([5, 3, 3])
        ttY = pitts_py.TensorTrain_double([5, 3, 3])

        ttOp.setEye()
        pitts_py.randomize(ttX)
        pitts_py.apply(ttOp, ttX, ttY)
        err = pitts_py.axpby(-1, ttX, 1, ttY)
        self.assertLess(err, 1.e-8)
예제 #6
0
 def test_setOnes(self):
     tt = pitts_py.TensorTrain_double([2, 3, 4])
     tt.setTTranks([2, 4])
     tt.setOnes()
     self.assertEqual([1, 1], tt.getTTranks())
     fullTensor = pitts_py.toDense(tt)
     np.testing.assert_array_almost_equal(np.ones([2, 3, 4]), fullTensor)
예제 #7
0
    def test_norm(self):
        tt = pitts_py.TensorTrain_double([2, 5, 3])
        tt.setTTranks([2, 2])
        pitts_py.randomize(tt)

        np.testing.assert_almost_equal(np.sqrt(pitts_py.dot(tt, tt)),
                                       pitts_py.norm2(tt))
예제 #8
0
def tt_gmres(AOp, b, nrm_b, eps=1.e-6, maxIter=20, verbose=True):
    """ Tensor-train GMRES algorithm without restart """

    # assumes b is normalized and nrm_b is the desired rhs norm
    # define initial subspace
    beta = nrm_b
    curr_beta = beta
    V = [b]
    m = maxIter
    H = np.mat(np.zeros((m + 1, m), order='F'))

    if verbose:
        print("TT-GMRES: initial residual norm: %g, max. rank: %d" %
              (beta, np.max(b.getTTranks())))

    for j in range(m):
        delta = eps / (curr_beta / beta)
        w = pitts_py.TensorTrain_double(b.dimensions())
        w_nrm = AOp(V[j], w, delta / m)
        for i in range(j + 1):
            H[i, j] = w_nrm * pitts_py.dot(w, V[i])
            w_nrm = pitts_py.axpby(-H[i, j], V[i], w_nrm, w, delta / m)
        if verbose:
            print("TT-GMRES: iteration %d, new Krylov vector max. rank: %d" %
                  (j, np.max(w.getTTranks())))
        H[j + 1, j] = w_nrm
        V = V + [w]
        Hj = H[:j + 2, :j + 1]
        betae = np.zeros(j + 2)
        betae[0] = beta
        # solving Hj * y = beta e_1
        y, curr_beta, rank, s = np.linalg.lstsq(Hj, betae, rcond=None)
        curr_beta = np.sqrt(curr_beta[0]) if curr_beta.size > 0 else 0
        if verbose:
            print("TT-GMRES:               LSTSQ resirual norm: %g " %
                  (curr_beta / beta))
        if curr_beta / beta <= eps:
            break

    x = pitts_py.TensorTrain_double(b.dimensions())
    x.setZero()
    nrm_x = 0
    for i in range(len(y)):
        nrm_x = pitts_py.axpby(y[i], V[i], nrm_x, x, eps / m)
    if verbose:
        print("TT-GMRES: solution max rank %d" % np.max(x.getTTranks()))
    return x, nrm_x
예제 #9
0
    def test_example(self):
        pitts_py.initialize()

        # do something interesting...
        tt = pitts_py.TensorTrain_double([5,5,5])
        pitts_py.randomize(tt)

        pitts_py.finalize()
예제 #10
0
 def test_setSubTensor_invalidShape(self):
     tt = pitts_py.TensorTrain_double([3, 2, 5])
     with self.assertRaises(IndexError):
         tt.setSubTensor(10, [[[1], [2], [3]]])
     with self.assertRaises(ValueError):
         tt.setSubTensor(0, [1, 2, 3])
     tt.setSubTensor(0, [[[1], [2], [3]]])
     with self.assertRaises(ValueError):
         tt.setSubTensor(1, [[[1], [2], [3]]])
예제 #11
0
 def test_setUnit(self):
     tt = pitts_py.TensorTrain_double([2, 3, 4])
     tt.setTTranks([2, 4])
     tt.setUnit([1, 0, 3])
     self.assertEqual([1, 1], tt.getTTranks())
     fullTensor = pitts_py.toDense(tt)
     fullTensor_ref = np.zeros([2, 3, 4])
     fullTensor_ref[1, 0, 3] = 1
     np.testing.assert_array_almost_equal(fullTensor_ref, fullTensor)
예제 #12
0
 def test_getSubTensor_unit(self):
     tt = pitts_py.TensorTrain_double([3, 2, 5])
     tt.setUnit([1, 0, 2])
     t1 = tt.getSubTensor(0)
     t2 = tt.getSubTensor(1)
     t3 = tt.getSubTensor(2)
     np.testing.assert_array_almost_equal([[[0], [1], [0]]], t1)
     np.testing.assert_array_almost_equal([[[1], [0]]], t2)
     np.testing.assert_array_almost_equal([[[0], [0], [1], [0], [0]]], t3)
예제 #13
0
 def test_getSubTensor_zeros(self):
     tt = pitts_py.TensorTrain_double([3, 2, 5])
     tt.setZero()
     t1 = tt.getSubTensor(0)
     t2 = tt.getSubTensor(1)
     t3 = tt.getSubTensor(2)
     np.testing.assert_array_almost_equal(np.zeros([1, 3, 1]), t1)
     np.testing.assert_array_almost_equal(np.zeros([1, 2, 1]), t2)
     np.testing.assert_array_almost_equal(np.zeros([1, 5, 1]), t3)
예제 #14
0
    def test_normalize(self):
        tt = pitts_py.TensorTrain_double([2, 5, 3])
        tt.setTTranks([2, 2])
        pitts_py.randomize(tt)

        norm_ref = pitts_py.norm2(tt)
        norm = pitts_py.normalize(tt)
        np.testing.assert_almost_equal(norm_ref, norm)
        np.testing.assert_almost_equal(1., pitts_py.norm2(tt))
예제 #15
0
 def test_setGetSubTensor_large(self):
     tt = pitts_py.TensorTrain_double([50, 100, 20])
     tt.setTTranks([2, 3])
     pitts_py.randomize(tt)
     t1_ref = np.random.rand(1, 50, 2)
     t2_ref = np.random.rand(2, 100, 3)
     t3_ref = np.random.rand(3, 20, 1)
     tt.setSubTensor(0, t1_ref)
     tt.setSubTensor(1, t2_ref)
     tt.setSubTensor(2, t3_ref)
     np.testing.assert_array_almost_equal(t1_ref, tt.getSubTensor(0))
     np.testing.assert_array_almost_equal(t2_ref, tt.getSubTensor(1))
     np.testing.assert_array_almost_equal(t3_ref, tt.getSubTensor(2))
예제 #16
0
def cdist2_TTapprox(X, dims_samples, dims_features, rank_tolerance, max_rank, debug=False):
    """approximate the pair-wise squared distances of all rows using a TT decomposition of the data"""
    # convert to TT format
    dims = dims_samples + dims_features
    n_samples = np.prod(dims_samples)
    n_features = np.prod(dims_features)
    Xm = pitts_py.MultiVector_double(n_samples*n_features//dims[-1], dims[-1])
    work = pitts_py.MultiVector_double()
    Xm_view = np.array(Xm, copy=False)

    # scale by norm -> length 1
    Xnorms = np.linalg.norm(X,axis=1)
    Xm_view[...] = (np.diag(1 / Xnorms) @ X).reshape(Xm_view.shape, order='F')

    Xtt = pitts_py.fromDense(Xm, work, dims, rankTolerance=0.01, maxRank=200)
    if debug:
        X_approx = pitts_py.toDense(Xtt).reshape((n_samples, n_features), order='F') * Xnorms[:, np.newaxis]
        plt.scatter(X[:,0], X[:,1])
        plt.scatter(X_approx[:,0], X_approx[:,1])
        plt.savefig('scatter_01_approx.png')
        plt.close()

        plt.scatter(X[:,1], X[:,2])
        plt.scatter(X_approx[:,1], X_approx[:,2])
        plt.savefig('scatter_12_approx.png')
        plt.close()

        plt.scatter(X[:,3], X[:,4])
        plt.scatter(X_approx[:,3], X_approx[:,4])
        plt.savefig('scatter_34_approx.png')
        plt.close()

    # setup approximated (US) from SVD (U S V^T)
    d_samples = len(dims_samples)
    Xtt_ranks = Xtt.getTTranks()
    k = Xtt_ranks[d_samples-1]
    US_tt = pitts_py.TensorTrain_double(dims_samples + [k,])
    US_tt.setTTranks(Xtt_ranks[:d_samples-1] + [k,])
    for i in range(d_samples):
        US_tt.setSubTensor(i, Xtt.getSubTensor(i))
    # last sub-tensor is just the identity to get an additional direction
    US_tt.setSubTensor(d_samples, np.eye(k,k).reshape((k,k,1)))
    US_approx = pitts_py.toDense(US_tt).reshape((n_samples, k), order='F')
    if debug:
        plt.matshow(US_approx)
        plt.savefig('US_approx.png')
        plt.close()
    US_approx = np.diag(Xnorms) @ US_approx
    return cdist2(US_approx)
예제 #17
0
 def test_setGetSubTensor(self):
     tt = pitts_py.TensorTrain_double([3, 2, 5])
     tt.setTTranks([2, 3])
     pitts_py.randomize(tt)
     t1_ref = np.array([1, 2, 3, 4, 5, 6]).reshape([1, 3, 2])
     t2_ref = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
                        21]).reshape([2, 2, 3])
     t3_ref = np.array([
         101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
         114, 115
     ]).reshape([3, 5, 1])
     tt.setSubTensor(0, t1_ref)
     tt.setSubTensor(1, t2_ref)
     tt.setSubTensor(2, t3_ref)
     np.testing.assert_array_almost_equal(t1_ref, tt.getSubTensor(0))
     np.testing.assert_array_almost_equal(t2_ref, tt.getSubTensor(1))
     np.testing.assert_array_almost_equal(t3_ref, tt.getSubTensor(2))
예제 #18
0
    def test_randomize(self):
        tt = pitts_py.TensorTrain_double([2, 5, 3])
        self.assertEqual([1, 1], tt.getTTranks())

        fullTensor1 = pitts_py.toDense(tt)

        pitts_py.randomize(tt)
        self.assertEqual([1, 1], tt.getTTranks())
        fullTensor2 = pitts_py.toDense(tt)

        pitts_py.randomize(tt)
        fullTensor3 = pitts_py.toDense(tt)

        tt.setTTranks([2, 3])
        pitts_py.randomize(tt)
        self.assertEqual([2, 3], tt.getTTranks())
        fullTensor4 = pitts_py.toDense(tt)

        # check for big enough differences...
        self.assertTrue(np.linalg.norm(fullTensor1 - fullTensor2) > 1.e-4)
        self.assertTrue(np.linalg.norm(fullTensor2 - fullTensor3) > 1.e-4)
        self.assertTrue(np.linalg.norm(fullTensor3 - fullTensor4) > 1.e-4)
예제 #19
0
    def test_rightNormalize(self):
        tt = pitts_py.TensorTrain_double([2, 5, 3])
        tt.setTTranks([2, 2])
        pitts_py.randomize(tt)

        norm_ref = pitts_py.norm2(tt)
        norm = pitts_py.rightNormalize(tt)
        np.testing.assert_almost_equal(norm_ref, norm)
        np.testing.assert_almost_equal(1., pitts_py.norm2(tt))

        t1 = tt.getSubTensor(0)
        t2 = tt.getSubTensor(1)
        t3 = tt.getSubTensor(2)
        r1 = tt.getTTranks()[0]
        r2 = tt.getTTranks()[1]
        t1_mat = t1.reshape([1, t1.size // 1])
        t2_mat = t2.reshape([r1, t2.size // r1])
        t3_mat = t3.reshape([r2, t3.size // r2])
        np.testing.assert_array_almost_equal(
            np.eye(1, 1), np.dot(t1_mat, t1_mat.transpose()))
        np.testing.assert_array_almost_equal(
            np.eye(r1, r1), np.dot(t2_mat, t2_mat.transpose()))
        np.testing.assert_array_almost_equal(
            np.eye(2, 2), np.dot(t3_mat, t3_mat.transpose()))
예제 #20
0
            eye_i = TTOp_dummy.getSubTensor(iDim)
            tridi_i = np.zeros((n_i,n_i))
            for i in range(n_i):
                for j in range(n_i):
                    if i == j:
                        tridi_i[i,j] = 2. / (n_i+1)
                    elif i+1 == j or i-1 == j:
                        tridi_i[i,j] = -1. / (n_i+1)
                    else:
                        tridi_i[i,j] = 0
            TTOp_dummy.setSubTensor(iDim, tridi_i.reshape(1,n_i,n_i,1))
            pitts_py.axpby(1, TTOp_dummy, 1, TTOp)
            TTOp_dummy.setSubTensor(iDim, eye_i)
        return TTOp

    TTOp = LaplaceOperator([10,]*5)

    x0 = pitts_py.TensorTrain_double(TTOp.row_dimensions())
    x0.setOnes()

    sigma, q = tt_jacobi_davidson(TTOp, x0, symmetric=True, eps=1.e-8)

    r = pitts_py.TensorTrain_double(x0.dimensions())
    pitts_py.apply(TTOp, q, r)
    sigma_ref = pitts_py.dot(q, r)
    r_nrm = pitts_py.axpby(-sigma, q, 1, r)
    print("Residual norm: %g" % r_nrm)
    print("Est. eigenvalue: %g, real Ritz value: %g, error: %g" % (sigma, sigma_ref, np.abs(sigma-sigma_ref)))

    pitts_py.finalize()
예제 #21
0
    return x, nrm_x


if __name__ == '__main__':
    pitts_py.initialize()

    TTOp = pitts_py.TensorTrainOperator_double([2, 3, 3, 2, 4, 10, 7],
                                               [2, 3, 3, 2, 4, 10, 7])
    TTOp.setTTranks(1)
    pitts_py.randomize(TTOp)
    TTOpEye = pitts_py.TensorTrainOperator_double([2, 3, 3, 2, 4, 10, 7],
                                                  [2, 3, 3, 2, 4, 10, 7])
    TTOpEye.setEye()
    pitts_py.axpby(1, TTOpEye, 0.1, TTOp)

    xref = pitts_py.TensorTrain_double(TTOp.row_dimensions())
    xref.setOnes()
    b = pitts_py.TensorTrain_double(TTOp.row_dimensions())
    pitts_py.apply(TTOp, xref, b)
    nrm_b = pitts_py.normalize(b)

    def AOp(x, y, eps):
        pitts_py.apply(TTOp, x, y)
        y_nrm = pitts_py.normalize(y, eps)
        return y_nrm

    x, nrm_x = tt_gmres(AOp, b, nrm_b, maxIter=30, eps=1.e-4)
    print("nrm_x %g" % nrm_x)

    r = pitts_py.TensorTrain_double(b.dimensions())
    pitts_py.apply(TTOp, x, r)
예제 #22
0
def tt_jacobi_davidson(A, x0, symmetric, eps=1.e-6, maxIter=20, arnoldiIter=5, gmresTol=0.01, gmresIter=10, verbose=True):
    """ Simplistic tensor-train Jacobi-Davidson algorithm """

    assert(x0.dimensions() == A.col_dimensions())

    # create empty search space
    W = list()
    AW = list()
    H = np.zeros((0,0))

    for j in range(maxIter):
        if j == 0:
            # initial search direction
            v = pitts_py.TensorTrain_double(A.col_dimensions())
            pitts_py.copy(x0, v)
            pitts_py.normalize(v, 0.01*eps)
        elif j < arnoldiIter:
            # start with some Arnoldi iterations
            v = r
        else:
            # calculate new search direction
            def JDop(x, y, eps):
                # Jacobi-Davidson operator with projections
                # y = (I - q q^T) (A - sigma I) (I - q q^T) x
                # we only do
                # y = (I - q q^T) (A - sigma I) x
                # because we assume that x is already orthogonal to q
                pitts_py.apply(A, x, y)
                y_nrm = pitts_py.normalize(y, 0.01*eps)
                y_nrm = pitts_py.axpby(-sigma, x, 1, y, 0.01*eps)
                qTy = y_nrm * pitts_py.dot(q, y)
                y_nrm = pitts_py.axpby(-qTy, q, y_nrm, y, 0.01*eps)
                return y_nrm
            v, _ = tt_gmres(JDop, r, r_nrm, eps=gmresTol, maxIter=gmresIter, verbose=False)

        # orthogonalize new search direction wrt. previous vectors
        for iOrtho in range(5):
            max_wTv = 0
            for i in range(j):
                wTv = pitts_py.dot(W[i], v)
                max_wTv = max(max_wTv, abs(wTv))
                if abs(wTv) > 0.01*eps:
                    pitts_py.axpby(-wTv, W[i], 1., v, 0.01*eps)
            if max_wTv < 0.01*eps:
                break
        Av = pitts_py.TensorTrain_double(A.row_dimensions())
        pitts_py.apply(A, v, Av)

        W = W + [v]
        AW = AW + [Av]

        # # calculate orthogonality error
        # WtW = np.zeros((j+1,j+1))
        # for i in range(j+1):
        #     for k in range(j+1):
        #         WtW[i,k] = pitts_py.dot(W[i], W[k])
        # WtW_err = np.linalg.norm(WtW - np.eye(j+1,j+1))
        # print('WtW_err', WtW_err)

        # update H = W^T AW
        H = np.pad(H, ((0,1),(0,1)))
        for i in range(j):
            H[i,-1] = pitts_py.dot(W[i], Av)
            H[-1,i] = H[i,-1] if symmetric else pitts_py.dot(v,AW[i])
        H[-1,-1] = pitts_py.dot(v, Av)

        # compute Schur decomposition H QH = QH RH
        if symmetric:
            (RH,QH) = np.linalg.eigh(H)
            eigIdx = 0
        else:
            (RH,QH) = np.linalg.eig(H)
            eigIdx = np.argmin(RH)

        # compute Ritz value and vector
        sigma = RH[eigIdx]
        q = pitts_py.TensorTrain_double(A.col_dimensions())
        q_nrm = 0
        for i in range(j+1):
            q_nrm = pitts_py.axpby(QH[i,eigIdx], W[i], q_nrm, q, 0.01*eps)

        # calculate residual r = A*q-sigma*q
        r = pitts_py.TensorTrain_double(A.col_dimensions())
        pitts_py.apply(A, q, r)
        r_nrm = pitts_py.axpby(-sigma, q, 1, r, 0.01*eps)
        # explicitly orthogonalize r wrt. q (we have approximation errors)
        qTr = pitts_py.dot(q,r)
        pitts_py.axpby(-qTr, q, 1, r, 0.01*eps)
        if verbose:
            print("TT-JacobiDavidson: Iteration %d, approximated eigenvalue: %g, residual norm: %g (orthog. error %g)" %(j, sigma, r_nrm, qTr))

        # abort if accurate enough
        if r_nrm < eps:
            break

    # return resulting eigenvalue and eigenvector approximation
    return sigma, q
예제 #23
0
 def test_copy_dimensionMismatch(self):
     tt = pitts_py.TensorTrain_double([2, 4, 3])
     tt2 = pitts_py.TensorTrain_double([2, 4, 2])
     with self.assertRaises(ValueError):
         pitts_py.copy(tt, tt2)
예제 #24
0
 def test_createTensorTrain_double(self):
     tt = pitts_py.TensorTrain_double([3, 4, 5])
     self.assertEqual([3, 4, 5], tt.dimensions())
     self.assertEqual([1, 1], tt.getTTranks())
예제 #25
0
 def test_axpby_dimensionMismatch(self):
     tt = pitts_py.TensorTrain_double([2, 4, 3])
     tt2 = pitts_py.TensorTrain_double([2, 4, 2])
     with self.assertRaises(ValueError):
         nrm = pitts_py.axpby(1, tt, 2, tt2)
예제 #26
0
 def test_setGetTTranks(self):
     tt = pitts_py.TensorTrain_double([2, 2, 2, 2, 2])
     self.assertEqual([1, 1, 1, 1], tt.getTTranks())
     tt.setTTranks([1, 3, 5, 2])
     self.assertEqual([1, 3, 5, 2], tt.getTTranks())