Ejemplo n.º 1
0
def QTTzerosvec(d, N, base):
    """ Returns the rank-1 multidimensional vector of zeros in Quantics Tensor Train format
    
    Args:
       d (int): number of dimensions
       N (int or list): If int then uniform sizes are used for all the dimensions, if list of int then len(N) == d and each dimension will use different size
       base (int): QTT base
    
    Returns:
       QTTvec The rank-1 multidim vector of zeros in Tensor Train format
    """
    from TensorToolbox.core import Candecomp
    from TensorToolbox.core import zerosvec

    if isint(N):
        N = [N for i in range(d)]

    for sizedim in N:
        if np.remainder(math.log(sizedim) / math.log(base),
                        1.0) > np.spacing(1):
            raise NameError(
                "TensorToolbox.QTTvec.QTTzerosvec: base is not a valid base of N"
            )

    L = int(np.around(math.log(np.prod(N)) / math.log(base)))

    tt = zerosvec(L, [base for i in range(L)])

    return QTTvec(tt.TT, global_shape=N).build()
Ejemplo n.º 2
0
def eye(d, N):
    """ Returns the multidimensional identity operator in Tensor Train format
    
    Args:
       d (int): number of dimensions
       N (int or list): If int then uniform sizes are used for all the dimensions, if list of int then len(N) == d and each dimension will use different size
    
    Returns:
       TTmat The multidim identity matrix in Tensor Train format

    Note:
       TODO: improve construction avoiding passage through Candecomp
    """
    from TensorToolbox.core import Candecomp
    if isint(N):
        If = np.eye(N).flatten().reshape((1, N**2))
        CPtmp = [If for i in range(d)]
    elif isinstance(N, list):
        CPtmp = [
            np.eye(N[i]).flatten().reshape((1, N[i]**2)) for i in range(d)
        ]

    CP_id = Candecomp(CPtmp)
    TT_id = TTmat(CP_id, nrows=N, ncols=N, is_sparse=[True] * d).build()
    return TT_id
Ejemplo n.º 3
0
def randmat(d, nrows, ncols):
    """ Returns the rank-1 multidimensional random matrix in Tensor Train format
    
    Args:
       d (int): number of dimensions
       nrows (int or list): If int then uniform sizes are used for all the dimensions, if list of int then len(nrows) == d and each dimension will use different size
       ncols (int or list): If int then uniform sizes are used for all the dimensions, if list of int then len(ncols) == d and each dimension will use different size
    
    Returns:
       TTmat The rank-1 multidim random matrix in Tensor Train format
    """
    import numpy.random as npr
    from TensorToolbox.core import Candecomp

    if isint(nrows): nrows = [nrows for i in range(d)]
    if isint(ncols): ncols = [ncols for i in range(d)]
    CPtmp = [
        npr.random(nrows[i] * ncols[i]).reshape((1, nrows[i] * ncols[i])) + 0.5
        for i in range(d)
    ]
    CP_rand = Candecomp(CPtmp)
    TT_rand = TTmat(CP_rand, nrows, ncols).build()
    return TT_rand
    def build(self,
              eps=1e-10,
              method='svd',
              rs=None,
              fix_rank=False,
              Jinit=None,
              delta=1e-4,
              maxit=100,
              mv_eps=1e-6,
              mv_maxit=100,
              max_ranks=None,
              kickrank=2):
        """ Common interface for the construction of the approximation.

        :param float eps: [default == 1e-10] For method=='svd': precision with which to approximate the input tensor. For method=='ttcross': TT-rounding tolerance for rank-check.
        :param string method: 'svd' use singular value decomposition to construct the TT representation :cite:`Oseledets2011`, 'ttcross' use low rank skeleton approximation to construct the TT representation :cite:`Oseledets2010`, 'ttdmrg' uses Tensor Train Renormalization Cross to construct the TT representation :cite:`Savostyanov2011,Savostyanov2013`, 'ttdmrgcross' uses 'ttdmrg' with 'ttcross' approximation of supercores
        :param list rs: list of integer ranks of different cores. If ``None`` then the incremental TTcross approach will be used. (method=='ttcross')
        :param bool fix_rank: determines whether the rank is allowed to be increased (method=='ttcross')
        :param list Jinit: list of list of integers containing the r starting columns in the lowrankapprox routine for each core. If ``None`` then pick them randomly. (method=='ttcross')
        :param float delta: accuracy parameter in the TT-cross routine (method=='ttcross'). It is the relative error in Frobenious norm between two successive iterations.
        :param int maxit: maximum number of iterations in the lowrankapprox routine (method=='ttcross')
        :param float mv_eps: accuracy parameter for each usage of the maxvol algorithm (method=='ttcross')
        :param int mv_maxit: maximum number of iterations in the maxvol routine (method=='ttcross')
        :param bool fix_rank: Whether the rank is allowed to increase
        :param list max_ranks: Maximum ranks to be used to limit the trunaction rank due to ``eps``. The first and last elements of the list must be ``1``, e.g. ``[1,...,1]``. Default: ``None``.
        :param int kickrank: rank overshooting for 'ttdmrg'
        """

        nrows = self.full_nrows
        ncols = self.full_ncols

        if isint(nrows) and isint(ncols):
            nrows = [nrows]
            ncols = [ncols]

        if len(nrows) != len(ncols):
            raise NameError(
                "TensorToolbox.QTTmat.__init__: len(nrows)!=len(ncols)")

        self.full_nrows = nrows
        self.full_ncols = ncols

        if isinstance(self.A, np.ndarray):

            for i, sizedim in enumerate(self.A.shape):
                if sizedim != self.full_nrows[i] * self.full_ncols[i]:
                    raise NameError(
                        "TensorToolbox.QTTmat.__init__: Array dimension not consistent with nrows and ncols"
                    )
                if np.remainder(np.log(sizedim) / np.log(self.basemat),
                                1.0) > np.spacing(1):
                    raise NameError(
                        "TensorToolbox.QTTmat.__init__: base is not a valid base of A.size"
                    )

            self.L = int(np.log(self.A.size) / np.log(self.basemat))

            # Prepare interleaved idxs (wtf!!!)
            Ls = [
                int(
                    np.log(self.full_nrows[i] * self.full_ncols[i]) /
                    np.log(self.basemat)) for i in range(self.ndims())
            ]
            idxs = []
            for j in range(self.ndims()):
                offset = np.sum(2 * Ls[:j], dtype=int)
                for i in range(Ls[j]):
                    idxs.append(offset + i)
                    idxs.append(offset + Ls[j] + i)

            # Fold, re-order and reshape
            self.A = np.reshape(self.A, [self.base for i in range(2 * self.L)])
            self.A = np.transpose(self.A, axes=idxs)
            self.A = np.reshape(self.A, [self.basemat for i in range(self.L)])

            super(QTTmat, self).build(eps=eps,
                                      method=method,
                                      rs=rs,
                                      fix_rank=fix_rank,
                                      Jinit=Jinit,
                                      delta=delta,
                                      maxit=maxit,
                                      mv_eps=mv_eps,
                                      mv_maxit=mv_maxit,
                                      max_ranks=max_ranks,
                                      kickrank=kickrank)

        elif isinstance(self.A, list):

            super(QTTmat, self).build(eps=eps,
                                      method=method,
                                      rs=rs,
                                      fix_rank=fix_rank,
                                      Jinit=Jinit,
                                      delta=delta,
                                      maxit=maxit,
                                      mv_eps=mv_eps,
                                      mv_maxit=mv_maxit,
                                      max_ranks=max_ranks,
                                      kickrank=kickrank)

            # Check that unfolded nrows,ncols are consistent with the dimension of A
            if np.prod(self.shape()) != np.prod(self.full_nrows) * np.prod(
                    self.full_ncols):
                self.init = False
                raise NameError(
                    "TensorToolbox.QTTmat.__init__: unfolded nrows,ncols not consistent with shape of A"
                )
            for nrow, ncol in zip(self.full_nrows, self.full_ncols):
                if np.remainder(
                        np.log(nrow * ncol) / np.log(self.basemat),
                        1.0) > np.spacing(1):
                    self.init = False
                    raise NameError(
                        "TensorToolbox.QTTmat.__init__: base is not a valid base for the selected nrows,ncols"
                    )

            self.L = len(self.shape())

        return self
Ejemplo n.º 5
0
    def build(self,
              eps=1e-10,
              method='svd',
              rs=None,
              fix_rank=False,
              Jinit=None,
              delta=1e-4,
              maxit=100,
              mv_eps=1e-6,
              mv_maxit=100,
              max_ranks=None,
              kickrank=2):
        """ Common interface for the construction of the approximation.

        :param float eps: [default == 1e-10] For method=='svd': precision with which to approximate the input tensor. For method=='ttcross': TT-rounding tolerance for rank-check.
        :param string method: 'svd' use singular value decomposition to construct the TT representation :cite:`Oseledets2011`, 'ttcross' use low rank skeleton approximation to construct the TT representation :cite:`Oseledets2010`, 'ttdmrg' uses Tensor Train Renormalization Cross to construct the TT representation :cite:`Savostyanov2011,Savostyanov2013`, 'ttdmrgcross' uses 'ttdmrg' with 'ttcross' approximation of supercores
        :param list rs: list of integer ranks of different cores. If ``None`` then the incremental TTcross approach will be used. (method=='ttcross')
        :param bool fix_rank: determines whether the rank is allowed to be increased (method=='ttcross')
        :param list Jinit: list of list of integers containing the r starting columns in the lowrankapprox routine for each core. If ``None`` then pick them randomly. (method=='ttcross')
        :param float delta: accuracy parameter in the TT-cross routine (method=='ttcross'). It is the relative error in Frobenious norm between two successive iterations.
        :param int maxit: maximum number of iterations in the lowrankapprox routine (method=='ttcross')
        :param float mv_eps: accuracy parameter for each usage of the maxvol algorithm (method=='ttcross')
        :param int mv_maxit: maximum number of iterations in the maxvol routine (method=='ttcross')
        :param bool fix_rank: Whether the rank is allowed to increase
        :param list max_ranks: Maximum ranks to be used to limit the trunaction rank due to ``eps``. The first and last elements of the list must be ``1``, e.g. ``[1,...,1]``. Default: ``None``.
        :param int kickrank: rank overshooting for 'ttdmrg'
        """

        nrows = self.nrows
        ncols = self.ncols
        is_sparse = self.is_sparse

        self.nrows = []
        self.ncols = []
        self.is_sparse = []
        self.sparse_TT = []

        if isinstance(self.A, list) and np.all(
            [(isinstance(self.A[i], scsp.csr_matrix)
              or isinstance(self.A[i], scsp.csc_matrix)
              or isinstance(self.A[i], scsp.dia_matrix))
             for i in range(len(self.A))]):
            if self.sparse_ranks == None:
                raise AttributeError(
                    "The parameter sparse_ranks must be defined for only-sparse initialization"
                )

            if len(self.sparse_ranks) - 1 != len(self.A):
                raise AttributeError(
                    "The condition len(sparse_ranks)-1 == len(A) must hold.")

            self.sparse_only = True
            self.sparse_TT = self.A

            if isint(nrows) and isin(ncols):
                d = len(self.sparse_TT)
                self.nrows = [nrows for i in range(d)]
                self.ncols = [ncols for i in range(d)]
            elif isinstance(nrows, list) and isinstance(ncols, list):
                self.nrows = nrows
                self.ncols = ncols
            else:
                self.init = False
                raise TypeError(
                    "tensor.TTmat.__init__: types of nrows and ncols are inconsistent."
                )

            self.is_sparse = [True] * len(self.sparse_TT)
            self.TT = [None] * len(self.sparse_TT)

            self.init = True

        elif isinstance(self.A, list) and np.any(
            [(isinstance(self.A[i], scsp.csr_matrix)
              or isinstance(self.A[i], scsp.csc_matrix)
              or isinstance(self.A[i], scsp.dia_matrix))
             for i in range(len(self.A))]):
            raise TypeError(
                "Mixed sparse/full initialization not implemented yet")
        else:
            self.sparse_only = False
            super(TTmat, self).build(eps=eps,
                                     method=method,
                                     rs=rs,
                                     fix_rank=fix_rank,
                                     Jinit=Jinit,
                                     delta=delta,
                                     maxit=maxit,
                                     mv_eps=mv_eps,
                                     mv_maxit=mv_maxit,
                                     max_ranks=max_ranks,
                                     kickrank=kickrank)
            if isint(nrows) and isint(ncols):
                d = len(self.TT)
                self.nrows = [nrows for i in range(d)]
                self.ncols = [ncols for i in range(d)]
            elif isinstance(nrows, list) and isinstance(ncols, list):
                self.nrows = nrows
                self.ncols = ncols
            else:
                self.init = False
                raise TypeError(
                    "tensor.TTmat.__init__: types of nrows and ncols are inconsistent."
                )

            if is_sparse == None:
                self.is_sparse = [False] * len(self.TT)
            elif len(is_sparse) != len(self.TT):
                raise TypeError(
                    "tensor.TTmat.__init__: parameter is_sparse must be of length d=A.ndims."
                )
            else:
                self.is_sparse = is_sparse

            for i, (is_sp, Ai) in enumerate(zip(self.is_sparse, self.TT)):
                if is_sp:
                    Ai_rsh = np.reshape(Ai, (Ai.shape[0], self.nrows[i],
                                             self.ncols[i], Ai.shape[2]))
                    Ai_rsh = np.transpose(Ai_rsh, axes=(0, 3, 1, 2))
                    Ai_rsh = np.reshape(Ai_rsh, (Ai.shape[0] * Ai.shape[2] *
                                                 self.nrows[i], self.ncols[i]))
                    self.sparse_TT.append(scsp.csr_matrix(Ai_rsh))
                else:
                    self.sparse_TT.append(None)

            # Check that all the mode sizes are equal to rows*cols
            for i, msize in enumerate(self.shape()):
                if msize != self.nrows[i] * self.ncols[i]:
                    self.init = False
                    raise NameError(
                        "tensor.TTmat.__init__: the %d-th TT mode size must be equal to nrows[%d]*ncols[%d]"
                        % (i, i, i))

        return self