def test_apply_zero(self): ttOp = pitts_py.TensorTrainOperator_double([2, 3, 2], [3, 4, 1]) ttX = pitts_py.TensorTrain_double([3, 4, 1]) ttY = pitts_py.TensorTrain_double([2, 3, 2]) ttOp.setZero() pitts_py.randomize(ttX) pitts_py.apply(ttOp, ttX, ttY) self.assertEqual(0, pitts_py.norm2(ttY))
def test_apply_identity(self): ttOp = pitts_py.TensorTrainOperator_double([5, 3, 3], [5, 3, 3]) ttX = pitts_py.TensorTrain_double([5, 3, 3]) ttY = pitts_py.TensorTrain_double([5, 3, 3]) ttOp.setEye() pitts_py.randomize(ttX) pitts_py.apply(ttOp, ttX, ttY) err = pitts_py.axpby(-1, ttX, 1, ttY) self.assertLess(err, 1.e-8)
def JDop(x, y, eps): # Jacobi-Davidson operator with projections # y = (I - q q^T) (A - sigma I) (I - q q^T) x # we only do # y = (I - q q^T) (A - sigma I) x # because we assume that x is already orthogonal to q pitts_py.apply(A, x, y) y_nrm = pitts_py.normalize(y, 0.01*eps) y_nrm = pitts_py.axpby(-sigma, x, 1, y, 0.01*eps) qTy = y_nrm * pitts_py.dot(q, y) y_nrm = pitts_py.axpby(-qTy, q, y_nrm, y, 0.01*eps) return y_nrm
def test_axpby(self): ttOp1 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2]) ttOp1.setTTranks(2) ttOp2 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2]) pitts_py.randomize(ttOp1) pitts_py.randomize(ttOp2) ttOp12 = pitts_py.TensorTrainOperator_double([2, 3, 4], [3, 2, 2]) pitts_py.copy(ttOp2, ttOp12) pitts_py.axpby(0.33, ttOp1, -0.97, ttOp12) ttX = pitts_py.TensorTrain_double([3, 2, 2]) ttX.setTTranks(2) pitts_py.randomize(ttX) ttY = pitts_py.TensorTrain_double([2, 3, 4]) pitts_py.apply(ttOp12, ttX, ttY) ttY1 = pitts_py.TensorTrain_double([2, 3, 4]) pitts_py.apply(ttOp1, ttX, ttY1) ttY2 = pitts_py.TensorTrain_double([2, 3, 4]) pitts_py.apply(ttOp2, ttX, ttY2) ttY12 = pitts_py.TensorTrain_double([2, 3, 4]) pitts_py.copy(ttY2, ttY12) nrm = pitts_py.axpby(0.33, ttY1, -0.97, ttY12) err = pitts_py.axpby(-nrm, ttY12, 1., ttY) self.assertLess(err, 1.e-8)
def tt_jacobi_davidson(A, x0, symmetric, eps=1.e-6, maxIter=20, arnoldiIter=5, gmresTol=0.01, gmresIter=10, verbose=True): """ Simplistic tensor-train Jacobi-Davidson algorithm """ assert(x0.dimensions() == A.col_dimensions()) # create empty search space W = list() AW = list() H = np.zeros((0,0)) for j in range(maxIter): if j == 0: # initial search direction v = pitts_py.TensorTrain_double(A.col_dimensions()) pitts_py.copy(x0, v) pitts_py.normalize(v, 0.01*eps) elif j < arnoldiIter: # start with some Arnoldi iterations v = r else: # calculate new search direction def JDop(x, y, eps): # Jacobi-Davidson operator with projections # y = (I - q q^T) (A - sigma I) (I - q q^T) x # we only do # y = (I - q q^T) (A - sigma I) x # because we assume that x is already orthogonal to q pitts_py.apply(A, x, y) y_nrm = pitts_py.normalize(y, 0.01*eps) y_nrm = pitts_py.axpby(-sigma, x, 1, y, 0.01*eps) qTy = y_nrm * pitts_py.dot(q, y) y_nrm = pitts_py.axpby(-qTy, q, y_nrm, y, 0.01*eps) return y_nrm v, _ = tt_gmres(JDop, r, r_nrm, eps=gmresTol, maxIter=gmresIter, verbose=False) # orthogonalize new search direction wrt. previous vectors for iOrtho in range(5): max_wTv = 0 for i in range(j): wTv = pitts_py.dot(W[i], v) max_wTv = max(max_wTv, abs(wTv)) if abs(wTv) > 0.01*eps: pitts_py.axpby(-wTv, W[i], 1., v, 0.01*eps) if max_wTv < 0.01*eps: break Av = pitts_py.TensorTrain_double(A.row_dimensions()) pitts_py.apply(A, v, Av) W = W + [v] AW = AW + [Av] # # calculate orthogonality error # WtW = np.zeros((j+1,j+1)) # for i in range(j+1): # for k in range(j+1): # WtW[i,k] = pitts_py.dot(W[i], W[k]) # WtW_err = np.linalg.norm(WtW - np.eye(j+1,j+1)) # print('WtW_err', WtW_err) # update H = W^T AW H = np.pad(H, ((0,1),(0,1))) for i in range(j): H[i,-1] = pitts_py.dot(W[i], Av) H[-1,i] = H[i,-1] if symmetric else pitts_py.dot(v,AW[i]) H[-1,-1] = pitts_py.dot(v, Av) # compute Schur decomposition H QH = QH RH if symmetric: (RH,QH) = np.linalg.eigh(H) eigIdx = 0 else: (RH,QH) = np.linalg.eig(H) eigIdx = np.argmin(RH) # compute Ritz value and vector sigma = RH[eigIdx] q = pitts_py.TensorTrain_double(A.col_dimensions()) q_nrm = 0 for i in range(j+1): q_nrm = pitts_py.axpby(QH[i,eigIdx], W[i], q_nrm, q, 0.01*eps) # calculate residual r = A*q-sigma*q r = pitts_py.TensorTrain_double(A.col_dimensions()) pitts_py.apply(A, q, r) r_nrm = pitts_py.axpby(-sigma, q, 1, r, 0.01*eps) # explicitly orthogonalize r wrt. q (we have approximation errors) qTr = pitts_py.dot(q,r) pitts_py.axpby(-qTr, q, 1, r, 0.01*eps) if verbose: print("TT-JacobiDavidson: Iteration %d, approximated eigenvalue: %g, residual norm: %g (orthog. error %g)" %(j, sigma, r_nrm, qTr)) # abort if accurate enough if r_nrm < eps: break # return resulting eigenvalue and eigenvector approximation return sigma, q
eye_i = TTOp_dummy.getSubTensor(iDim) tridi_i = np.zeros((n_i,n_i)) for i in range(n_i): for j in range(n_i): if i == j: tridi_i[i,j] = 2. / (n_i+1) elif i+1 == j or i-1 == j: tridi_i[i,j] = -1. / (n_i+1) else: tridi_i[i,j] = 0 TTOp_dummy.setSubTensor(iDim, tridi_i.reshape(1,n_i,n_i,1)) pitts_py.axpby(1, TTOp_dummy, 1, TTOp) TTOp_dummy.setSubTensor(iDim, eye_i) return TTOp TTOp = LaplaceOperator([10,]*5) x0 = pitts_py.TensorTrain_double(TTOp.row_dimensions()) x0.setOnes() sigma, q = tt_jacobi_davidson(TTOp, x0, symmetric=True, eps=1.e-8) r = pitts_py.TensorTrain_double(x0.dimensions()) pitts_py.apply(TTOp, q, r) sigma_ref = pitts_py.dot(q, r) r_nrm = pitts_py.axpby(-sigma, q, 1, r) print("Residual norm: %g" % r_nrm) print("Est. eigenvalue: %g, real Ritz value: %g, error: %g" % (sigma, sigma_ref, np.abs(sigma-sigma_ref))) pitts_py.finalize()
def AOp(x, y, eps): pitts_py.apply(TTOp, x, y) y_nrm = pitts_py.normalize(y, eps) return y_nrm
if __name__ == '__main__': pitts_py.initialize() TTOp = pitts_py.TensorTrainOperator_double([2, 3, 3, 2, 4, 10, 7], [2, 3, 3, 2, 4, 10, 7]) TTOp.setTTranks(1) pitts_py.randomize(TTOp) TTOpEye = pitts_py.TensorTrainOperator_double([2, 3, 3, 2, 4, 10, 7], [2, 3, 3, 2, 4, 10, 7]) TTOpEye.setEye() pitts_py.axpby(1, TTOpEye, 0.1, TTOp) xref = pitts_py.TensorTrain_double(TTOp.row_dimensions()) xref.setOnes() b = pitts_py.TensorTrain_double(TTOp.row_dimensions()) pitts_py.apply(TTOp, xref, b) nrm_b = pitts_py.normalize(b) def AOp(x, y, eps): pitts_py.apply(TTOp, x, y) y_nrm = pitts_py.normalize(y, eps) return y_nrm x, nrm_x = tt_gmres(AOp, b, nrm_b, maxIter=30, eps=1.e-4) print("nrm_x %g" % nrm_x) r = pitts_py.TensorTrain_double(b.dimensions()) pitts_py.apply(TTOp, x, r) r_nrm = pitts_py.axpby(nrm_b, b, -nrm_x, r) print("Residual norm: %g" % (r_nrm / nrm_b)) err = pitts_py.axpby(-1, xref, nrm_x, x)