def build(self, eps=1e-10, method='svd', rs=None, fix_rank=False, Jinit=None, delta=1e-4, maxit=100, mv_eps=1e-6, mv_maxit=100, max_ranks=None, kickrank=None): """ Common interface for the construction of the approximation. :param float eps: [default == 1e-10] For method=='svd': precision with which to approximate the input tensor. For method=='ttcross': TT-rounding tolerance for rank-check. :param string method: 'svd' use singular value decomposition to construct the TT representation :cite:`Oseledets2011`, 'ttcross' use low rank skeleton approximation to construct the TT representation :cite:`Oseledets2010`, 'ttdmrg' uses Tensor Train Renormalization Cross to construct the TT representation :cite:`Savostyanov2011,Savostyanov2013`, 'ttdmrgcross' uses 'ttdmrg' with 'ttcross' approximation of supercores :param list rs: list of integer ranks of different cores. If ``None`` then the incremental TTcross approach will be used. (method=='ttcross') :param bool fix_rank: determines whether the rank is allowed to be increased (method=='ttcross') :param list Jinit: list of list of integers containing the r starting columns in the lowrankapprox routine for each core. If ``None`` then pick them randomly. (method=='ttcross') :param float delta: accuracy parameter in the TT-cross routine (method=='ttcross'). It is the relative error in Frobenious norm between two successive iterations. :param int maxit: maximum number of iterations in the lowrankapprox routine (method=='ttcross') :param float mv_eps: accuracy parameter for each usage of the maxvol algorithm (method=='ttcross') :param int mv_maxit: maximum number of iterations in the maxvol routine (method=='ttcross') :param bool fix_rank: Whether the rank is allowed to increase :param list max_ranks: Maximum ranks to be used to limit the trunaction rank due to ``eps``. The first and last elements of the list must be ``1``, e.g. ``[1,...,1]``. Default: ``None``. :param int kickrank: rank overshooting for 'ttdmrg' .. note:: Weights are not removed after computation, because cannot be trivially removed from the folded qauntics approximation! The weights need to be removed manually. For example: >>> wqtt.build() >>> wtt = wqtt.to_TTvec() >>> wtt.remove_weights() """ WTTvec._build_preprocess(self) TTvec.build(self, eps=eps, method=method, rs=rs, fix_rank=fix_rank, Jinit=Jinit, delta=delta, maxit=maxit, mv_eps=mv_eps, mv_maxit=mv_maxit, max_ranks=max_ranks, kickrank=kickrank) QTTvec._build_postprocess(self) WTTvec._build_postprocess(self) return self
def __imul__(A, B): if isinstance(A, TTmat) and isinstance(B, TTmat): # Check dim consistency if A.nrows != B.nrows or A.ncols != B.ncols: raise NameError( "tensor.TTmat.mul: Matrices of non consistent dimensions") return TTvec.__imul__(A, B)
def __init__(self, A, W, base=2, store_location="", store_object=None, store_freq=1, store_overwrite=False, multidim_point=None): TTvec.__init__(self, A, store_location=store_location, store_object=store_object, store_freq=store_freq, store_overwrite=store_overwrite, multidim_point=multidim_point) WTTvec._init(self, W) QTTvec._init(self, base)
def __getitem__(self, idxs): """ Return the item at a certain index. The index is formed as follows: idxs = (rowidxs,colidxs) = ((i_1,...,i_d),(j_1,...,j_d)) """ if not self.init: raise NameError( "tensor.TTmat.__getitem__: TTmat not initialized correctly") return TTvec.__getitem__( self, mat_to_tt_idxs(idxs[0], idxs[1], self.nrows, self.ncols))
def __getitem__(self, idxs): """ Get item function: indexes are entered in with respect to the unfolded mode sizes. """ if not self.init: raise NameError( "TensorToolbox.QTTvec.__getitem__: QTT not initialized correctly" ) # Check whether index out of bounds if any(map(operator.ge, idxs, self.get_global_shape())): raise NameError( "TensorToolbox.QTTvec.__getitem__: Index out of bounds") # Compute the index of the folding representation from the unfolded representation return TTvec.__getitem__( self, idxfold(self.shape(), idxunfold(self.get_global_shape(), idxs)))
def to_TTvec(self): icore = 0 TTs = [] for subshape, gs in zip(self.folded_shape, self.get_global_shape()): tmpcore = self.TT[icore] icore += 1 for i in range(1, len(subshape)): tmpcore = np.tensordot(tmpcore, self.TT[icore], ((tmpcore.ndim - 1, ), (0, ))) icore += 1 tmpcore = np.reshape(tmpcore, (tmpcore.shape[0], np.prod( tmpcore.shape[1:-1]), tmpcore.shape[-1])) # Truncate the core mode to the global_shape tmpcore = tmpcore[:, :gs, :] TTs.append(tmpcore) return TTvec(TTs).build()
def __setstate__(self, state): TTvec.__setstate__(state)
def __getstate__(self): return TTvec.__getstate__(self)
def dot(self, B): if isinstance(B, TTvec) and not isinstance(B, TTmat): if not self.init or not B.init: raise NameError( "TensorToolbox.TTmat.dot: TT not initialized correctly") # TT matrix-vector dot product # Check consistency if (self.sparse_only and len(self.sparse_TT) != len(B.TT)) or ( not self.sparse_only and len(self.TT) != len(B.TT)): raise NameError( "TensorToolbox.TTmat.dot: A and B must have the same number of cores" ) for bsize, Acols in zip(B.shape(), self.ncols): if bsize != Acols: raise NameError( "TensorToolbox.TTmat.dot: Matrix and Vector mode dimensions are not consistent" ) Y = [] for i, (Ai, Bi, is_sparse, sp_A) in enumerate( zip(self.TT, B.TT, self.is_sparse, self.sparse_TT)): Bi_rsh = np.transpose(Bi, axes=(1, 0, 2)) Bi_rsh = np.reshape(Bi_rsh, (Bi.shape[1], Bi.shape[0] * Bi.shape[2])) if is_sparse: Yi_rsh = sp_A.dot(Bi_rsh) else: Ai_rsh = np.reshape(Ai, (Ai.shape[0], self.nrows[i], self.ncols[i], Ai.shape[2])) Ai_rsh = np.transpose(Ai_rsh, axes=(0, 3, 1, 2)) Ai_rsh = np.reshape(Ai_rsh, (Ai.shape[0] * Ai.shape[2] * self.nrows[i], self.ncols[i])) Yi_rsh = np.dot(Ai_rsh, Bi_rsh) Ai0 = self.ranks()[i] Ai2 = self.ranks()[i + 1] Yi_rsh = np.reshape( Yi_rsh, (Ai0, Ai2, self.nrows[i], Bi.shape[0], Bi.shape[2])) Yi_rsh = np.transpose(Yi_rsh, axes=(0, 3, 2, 1, 4)) Yi = np.reshape( Yi_rsh, (Ai0 * Bi.shape[0], self.nrows[i], Ai2 * Bi.shape[2])) Y.append(Yi) if isinstance(B, WTTvec): return WTTvec(Y, B.sqrtW).build() else: return TTvec(Y).build() elif isinstance(B, TTmat): if not self.init or not B.init: raise NameError( "TensorToolbox.TTmat.dot: TT not initialized correctly") # TT matrix-matrix dot product # Check consistency if len(self.TT) != len(B.TT): raise NameError( "TensorToolbox.TTmat.dot: A and B must have the same number of cores" ) for Brows, Acols in zip(B.nrows, self.ncols): if Brows != Acols: raise NameError( "TensorToolbox.TTmat.dot: Matrices mode dimensions are not consistent" ) Y = [] for i, (Ai, Bi) in enumerate(zip(self.TT, B.TT)): Ai_rsh = np.reshape( Ai, (Ai.shape[0], self.nrows[i], self.ncols[i], Ai.shape[2])) Ai_rsh = np.transpose(Ai_rsh, axes=(0, 3, 1, 2)) Ai_rsh = np.reshape( Ai_rsh, (Ai.shape[0] * Ai.shape[2] * self.nrows[i], self.ncols[i])) Bi_rsh = np.reshape( Bi, (Bi.shape[0], B.nrows[i], B.ncols[i], Bi.shape[2])) Bi_rsh = np.transpose(Bi_rsh, axes=(1, 0, 3, 2)) Bi_rsh = np.reshape( Bi_rsh, (B.nrows[i], Bi.shape[0] * Bi.shape[2] * B.ncols[i])) Yi_rsh = np.dot(Ai_rsh, Bi_rsh) Yi_rsh = np.reshape(Yi_rsh, (Ai.shape[0], Ai.shape[2], self.nrows[i], Bi.shape[0], Bi.shape[2], B.ncols[i])) Yi_rsh = np.transpose(Yi_rsh, axes=(0, 3, 2, 5, 1, 4)) Yi = np.reshape(Yi_rsh, (Ai.shape[0] * Bi.shape[0], self.nrows[i] * B.ncols[i], Ai.shape[2] * Bi.shape[2])) Y.append(Yi) return TTmat(Y, self.nrows, B.ncols).build() elif isinstance(B, np.ndarray): if not self.init: raise NameError( "TensorToolbox.multilinalg.dot: TT not initialized correctly" ) # matrix-vector dot product with TTmat and full vector # Check consistency if len(self.shape()) != B.ndim: raise NameError( "TensorToolbox.multilinalg.dot: A and B must have the same number of dimensions" ) for bsize, Acols in zip(B.shape, self.ncols): if bsize != Acols: raise NameError( "TensorToolbox.multilinalg.dot: Matrix and Vector mode dimensions are not consistent" ) Bshape = B.shape Y = np.reshape(B, ((1, ) + Bshape)) Yshape = Y.shape for k, Ak in enumerate(self.TT): # Note: Ak(alpha_{k-1},(i_k,j_k),alpha_{k}) # Reshape it to Ak((alpha_{k},i_k),(alpha_{k-1},j_k)) alpha0 = Ak.shape[0] alpha1 = Ak.shape[2] Ak_rsh = np.reshape( Ak, (alpha0, self.nrows[k], self.ncols[k], alpha1)) Ak_rsh = np.transpose( Ak_rsh, axes=(3, 1, 0, 2)) # Ak(alpha_{k},i_k,alpha_{k},j_k) Ak_rsh = np.reshape( Ak_rsh, (alpha1 * self.nrows[k], alpha0 * self.ncols[k])) # Reshape Y to Y((alpha_{k-1},j_k),(i_1,..,i_k-1,j_k+1,..,j_d)) Y = np.transpose(Y, axes=(0, k + 1) + tuple(range(1, k + 1)) + tuple(range(k + 2, len(Bshape) + 1))) Y = np.reshape(Y, (alpha0 * Yshape[k + 1], int( round( np.prod(Yshape[1:k + 1]) * np.prod(Yshape[k + 2:]))))) # Dot product Y = np.dot(Ak_rsh, Y) # Reshape Y Y = np.reshape(Y, (alpha1, self.nrows[k]) + Yshape[1:k + 1] + Yshape[k + 2:]) Y = np.transpose(Y, axes=(0, ) + tuple(range(2, k + 2)) + (1, ) + tuple(range(k + 2, len(Bshape) + 1))) Yshape = Y.shape if Y.shape[0] != 1: raise NameError( "TensorToolbox.multilinalg.dot: Last core dimenstion error" ) Y = np.reshape(Y, Y.shape[1:]) return Y else: raise AttributeError("TensorToolbox.TTmat.dot: wrong input type")