示例#1
0
    def test_axpby(self):
        ttOp1 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2])
        ttOp1.setTTranks(2)
        ttOp2 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2])

        pitts_py.randomize(ttOp1)
        pitts_py.randomize(ttOp2)

        ttOp12 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2])
        pitts_py.copy(ttOp2, ttOp12)
        pitts_py.axpby(0.33, ttOp1, -0.97, ttOp12)

        ttX = pitts_py.TensorTrain_double([3, 2, 2])
        ttX.setTTranks(2)
        pitts_py.randomize(ttX)

        ttY = pitts_py.TensorTrain_double([2, 3, 4])
        pitts_py.apply(ttOp12, ttX, ttY)

        ttY1 = pitts_py.TensorTrain_double([2, 3, 4])
        pitts_py.apply(ttOp1, ttX, ttY1)

        ttY2 = pitts_py.TensorTrain_double([2, 3, 4])
        pitts_py.apply(ttOp2, ttX, ttY2)

        ttY12 = pitts_py.TensorTrain_double([2, 3, 4])
        pitts_py.copy(ttY2, ttY12)
        nrm = pitts_py.axpby(0.33, ttY1, -0.97, ttY12)

        err = pitts_py.axpby(-nrm, ttY12, 1., ttY)
        self.assertLess(err, 1.e-8)
示例#2
0
 def JDop(x, y, eps):
     # Jacobi-Davidson operator with projections
     # y = (I - q q^T) (A - sigma I) (I - q q^T) x
     # we only do
     # y = (I - q q^T) (A - sigma I) x
     # because we assume that x is already orthogonal to q
     pitts_py.apply(A, x, y)
     y_nrm = pitts_py.normalize(y, 0.01*eps)
     y_nrm = pitts_py.axpby(-sigma, x, 1, y, 0.01*eps)
     qTy = y_nrm * pitts_py.dot(q, y)
     y_nrm = pitts_py.axpby(-qTy, q, y_nrm, y, 0.01*eps)
     return y_nrm
示例#3
0
def tt_gmres(AOp, b, nrm_b, eps=1.e-6, maxIter=20, verbose=True):
    """ Tensor-train GMRES algorithm without restart """

    # assumes b is normalized and nrm_b is the desired rhs norm
    # define initial subspace
    beta = nrm_b
    curr_beta = beta
    V = [b]
    m = maxIter
    H = np.mat(np.zeros((m + 1, m), order='F'))

    if verbose:
        print("TT-GMRES: initial residual norm: %g, max. rank: %d" %
              (beta, np.max(b.getTTranks())))

    for j in range(m):
        delta = eps / (curr_beta / beta)
        w = pitts_py.TensorTrain_double(b.dimensions())
        w_nrm = AOp(V[j], w, delta / m)
        for i in range(j + 1):
            H[i, j] = w_nrm * pitts_py.dot(w, V[i])
            w_nrm = pitts_py.axpby(-H[i, j], V[i], w_nrm, w, delta / m)
        if verbose:
            print("TT-GMRES: iteration %d, new Krylov vector max. rank: %d" %
                  (j, np.max(w.getTTranks())))
        H[j + 1, j] = w_nrm
        V = V + [w]
        Hj = H[:j + 2, :j + 1]
        betae = np.zeros(j + 2)
        betae[0] = beta
        # solving Hj * y = beta e_1
        y, curr_beta, rank, s = np.linalg.lstsq(Hj, betae, rcond=None)
        curr_beta = np.sqrt(curr_beta[0]) if curr_beta.size > 0 else 0
        if verbose:
            print("TT-GMRES:               LSTSQ resirual norm: %g " %
                  (curr_beta / beta))
        if curr_beta / beta <= eps:
            break

    x = pitts_py.TensorTrain_double(b.dimensions())
    x.setZero()
    nrm_x = 0
    for i in range(len(y)):
        nrm_x = pitts_py.axpby(y[i], V[i], nrm_x, x, eps / m)
    if verbose:
        print("TT-GMRES: solution max rank %d" % np.max(x.getTTranks()))
    return x, nrm_x
示例#4
0
    def test_apply_identity(self):
        ttOp = pitts_py.TensorTrainOperator_double([5, 3, 3], [5, 3, 3])
        ttX = pitts_py.TensorTrain_double([5, 3, 3])
        ttY = pitts_py.TensorTrain_double([5, 3, 3])

        ttOp.setEye()
        pitts_py.randomize(ttX)
        pitts_py.apply(ttOp, ttX, ttY)
        err = pitts_py.axpby(-1, ttX, 1, ttY)
        self.assertLess(err, 1.e-8)
示例#5
0
 def LaplaceOperator(dims):
     TTOp = pitts_py.TensorTrainOperator_double(dims, dims)
     TTOp.setZero()
     TTOp_dummy = pitts_py.TensorTrainOperator_double(dims, dims)
     TTOp_dummy.setEye()
     for iDim in range(len(dims)):
         n_i = dims[iDim]
         eye_i = TTOp_dummy.getSubTensor(iDim)
         tridi_i = np.zeros((n_i,n_i))
         for i in range(n_i):
             for j in range(n_i):
                 if i == j:
                     tridi_i[i,j] = 2. / (n_i+1)
                 elif i+1 == j or i-1 == j:
                     tridi_i[i,j] = -1. / (n_i+1)
                 else:
                     tridi_i[i,j] = 0
         TTOp_dummy.setSubTensor(iDim, tridi_i.reshape(1,n_i,n_i,1))
         pitts_py.axpby(1, TTOp_dummy, 1, TTOp)
         TTOp_dummy.setSubTensor(iDim, eye_i)
     return TTOp
示例#6
0
    def test_axpby(self):
        fullTensor1 = np.random.rand(2, 4, 3)
        fullTensor2 = np.random.rand(2, 4, 3)

        tt1 = pitts_py.fromDense_classical(fullTensor1)
        tt2 = pitts_py.fromDense_classical(fullTensor2)

        nrm2 = pitts_py.axpby(1.5, tt1, -0.75, tt2)

        result = nrm2 * pitts_py.toDense(tt2)

        np.testing.assert_almost_equal(1.5 * fullTensor1 - 0.75 * fullTensor2,
                                       result)
示例#7
0
 def test_axpby_dimensionMismatch(self):
     ttOp = pitts_py.TensorTrainOperator_double([2, 4, 3], [2, 2, 2])
     ttOp2 = pitts_py.TensorTrainOperator_double([2, 4, 2], [2, 2, 2])
     with self.assertRaises(ValueError):
         pitts_py.axpby(1., ttOp, 2., ttOp2)
示例#8
0
 def test_axpby_dimensionMismatch(self):
     tt = pitts_py.TensorTrain_double([2, 4, 3])
     tt2 = pitts_py.TensorTrain_double([2, 4, 2])
     with self.assertRaises(ValueError):
         nrm = pitts_py.axpby(1, tt, 2, tt2)
示例#9
0
def tt_jacobi_davidson(A, x0, symmetric, eps=1.e-6, maxIter=20, arnoldiIter=5, gmresTol=0.01, gmresIter=10, verbose=True):
    """ Simplistic tensor-train Jacobi-Davidson algorithm """

    assert(x0.dimensions() == A.col_dimensions())

    # create empty search space
    W = list()
    AW = list()
    H = np.zeros((0,0))

    for j in range(maxIter):
        if j == 0:
            # initial search direction
            v = pitts_py.TensorTrain_double(A.col_dimensions())
            pitts_py.copy(x0, v)
            pitts_py.normalize(v, 0.01*eps)
        elif j < arnoldiIter:
            # start with some Arnoldi iterations
            v = r
        else:
            # calculate new search direction
            def JDop(x, y, eps):
                # Jacobi-Davidson operator with projections
                # y = (I - q q^T) (A - sigma I) (I - q q^T) x
                # we only do
                # y = (I - q q^T) (A - sigma I) x
                # because we assume that x is already orthogonal to q
                pitts_py.apply(A, x, y)
                y_nrm = pitts_py.normalize(y, 0.01*eps)
                y_nrm = pitts_py.axpby(-sigma, x, 1, y, 0.01*eps)
                qTy = y_nrm * pitts_py.dot(q, y)
                y_nrm = pitts_py.axpby(-qTy, q, y_nrm, y, 0.01*eps)
                return y_nrm
            v, _ = tt_gmres(JDop, r, r_nrm, eps=gmresTol, maxIter=gmresIter, verbose=False)

        # orthogonalize new search direction wrt. previous vectors
        for iOrtho in range(5):
            max_wTv = 0
            for i in range(j):
                wTv = pitts_py.dot(W[i], v)
                max_wTv = max(max_wTv, abs(wTv))
                if abs(wTv) > 0.01*eps:
                    pitts_py.axpby(-wTv, W[i], 1., v, 0.01*eps)
            if max_wTv < 0.01*eps:
                break
        Av = pitts_py.TensorTrain_double(A.row_dimensions())
        pitts_py.apply(A, v, Av)

        W = W + [v]
        AW = AW + [Av]

        # # calculate orthogonality error
        # WtW = np.zeros((j+1,j+1))
        # for i in range(j+1):
        #     for k in range(j+1):
        #         WtW[i,k] = pitts_py.dot(W[i], W[k])
        # WtW_err = np.linalg.norm(WtW - np.eye(j+1,j+1))
        # print('WtW_err', WtW_err)

        # update H = W^T AW
        H = np.pad(H, ((0,1),(0,1)))
        for i in range(j):
            H[i,-1] = pitts_py.dot(W[i], Av)
            H[-1,i] = H[i,-1] if symmetric else pitts_py.dot(v,AW[i])
        H[-1,-1] = pitts_py.dot(v, Av)

        # compute Schur decomposition H QH = QH RH
        if symmetric:
            (RH,QH) = np.linalg.eigh(H)
            eigIdx = 0
        else:
            (RH,QH) = np.linalg.eig(H)
            eigIdx = np.argmin(RH)

        # compute Ritz value and vector
        sigma = RH[eigIdx]
        q = pitts_py.TensorTrain_double(A.col_dimensions())
        q_nrm = 0
        for i in range(j+1):
            q_nrm = pitts_py.axpby(QH[i,eigIdx], W[i], q_nrm, q, 0.01*eps)

        # calculate residual r = A*q-sigma*q
        r = pitts_py.TensorTrain_double(A.col_dimensions())
        pitts_py.apply(A, q, r)
        r_nrm = pitts_py.axpby(-sigma, q, 1, r, 0.01*eps)
        # explicitly orthogonalize r wrt. q (we have approximation errors)
        qTr = pitts_py.dot(q,r)
        pitts_py.axpby(-qTr, q, 1, r, 0.01*eps)
        if verbose:
            print("TT-JacobiDavidson: Iteration %d, approximated eigenvalue: %g, residual norm: %g (orthog. error %g)" %(j, sigma, r_nrm, qTr))

        # abort if accurate enough
        if r_nrm < eps:
            break

    # return resulting eigenvalue and eigenvector approximation
    return sigma, q
示例#10
0
            eye_i = TTOp_dummy.getSubTensor(iDim)
            tridi_i = np.zeros((n_i,n_i))
            for i in range(n_i):
                for j in range(n_i):
                    if i == j:
                        tridi_i[i,j] = 2. / (n_i+1)
                    elif i+1 == j or i-1 == j:
                        tridi_i[i,j] = -1. / (n_i+1)
                    else:
                        tridi_i[i,j] = 0
            TTOp_dummy.setSubTensor(iDim, tridi_i.reshape(1,n_i,n_i,1))
            pitts_py.axpby(1, TTOp_dummy, 1, TTOp)
            TTOp_dummy.setSubTensor(iDim, eye_i)
        return TTOp

    TTOp = LaplaceOperator([10,]*5)

    x0 = pitts_py.TensorTrain_double(TTOp.row_dimensions())
    x0.setOnes()

    sigma, q = tt_jacobi_davidson(TTOp, x0, symmetric=True, eps=1.e-8)

    r = pitts_py.TensorTrain_double(x0.dimensions())
    pitts_py.apply(TTOp, q, r)
    sigma_ref = pitts_py.dot(q, r)
    r_nrm = pitts_py.axpby(-sigma, q, 1, r)
    print("Residual norm: %g" % r_nrm)
    print("Est. eigenvalue: %g, real Ritz value: %g, error: %g" % (sigma, sigma_ref, np.abs(sigma-sigma_ref)))

    pitts_py.finalize()
示例#11
0
    if verbose:
        print("TT-GMRES: solution max rank %d" % np.max(x.getTTranks()))
    return x, nrm_x


if __name__ == '__main__':
    pitts_py.initialize()

    TTOp = pitts_py.TensorTrainOperator_double([2, 3, 3, 2, 4, 10, 7],
                                               [2, 3, 3, 2, 4, 10, 7])
    TTOp.setTTranks(1)
    pitts_py.randomize(TTOp)
    TTOpEye = pitts_py.TensorTrainOperator_double([2, 3, 3, 2, 4, 10, 7],
                                                  [2, 3, 3, 2, 4, 10, 7])
    TTOpEye.setEye()
    pitts_py.axpby(1, TTOpEye, 0.1, TTOp)

    xref = pitts_py.TensorTrain_double(TTOp.row_dimensions())
    xref.setOnes()
    b = pitts_py.TensorTrain_double(TTOp.row_dimensions())
    pitts_py.apply(TTOp, xref, b)
    nrm_b = pitts_py.normalize(b)

    def AOp(x, y, eps):
        pitts_py.apply(TTOp, x, y)
        y_nrm = pitts_py.normalize(y, eps)
        return y_nrm

    x, nrm_x = tt_gmres(AOp, b, nrm_b, maxIter=30, eps=1.e-4)
    print("nrm_x %g" % nrm_x)