Esempio n. 1
0
def trl(x, weight, bias=None, **kwargs):
    """Tensor Regression Layer

    Parameters
    ----------
    x : torch.tensor
        batch of inputs
    weight : FactorizedTensor
        factorized weights of the TRL
    bias : torch.Tensor, optional
        1D tensor, by default None

    Returns
    -------
    result
        input x contracted with regression weights
    """
    if isinstance(weight, TuckerTensor):
        return tucker_trl(x, weight, bias=bias, **kwargs)
    else:
        if bias is None:
            return tenalg.inner(x, weight.to_tensor(), n_modes=tl.ndim(x) - 1)
        else:
            return tenalg.inner(x, weight.to_tensor(),
                                n_modes=tl.ndim(x) - 1) + bias
Esempio n. 2
0
def monotonicity_prox(tensor, decreasing=False):
    """
    This function projects each column of the input array on the set of arrays so that
          x[1] <= x[2] <= ... <= x[n] (decreasing=False)
                        or
          x[1] >= x[2] >= ... >= x[n] (decreasing=True)
    is satisfied columnwise.

    Parameters
    ----------
    tensor : ndarray
    decreasing : If it is True, function returns columnwise
                 monotone decreasing tensor. Otherwise, returned array
                 will be monotone increasing.
                 Default: True

    Returns
    -------
    ndarray
          A tensor of which columns' are monotonic.

    References
    ----------
    .. [1]: G. Chierchia, E. Chouzenoux, P. L. Combettes, and J.-C. Pesquet
            "The Proximity Operator Repository. User's guide"
    """
    if tl.ndim(tensor) == 1:
        tensor = tl.reshape(tensor, [tl.shape(tensor)[0], 1])
    elif tl.ndim(tensor) > 2:
        raise ValueError(
            "Monotonicity prox doesn't support an input which has more than 2 dimensions."
        )
    tensor_mon = tl.copy(tensor)
    if decreasing:
        tensor_mon = tl.flip(tensor_mon, axis=0)
    row, column = tl.shape(tensor_mon)
    cum_sum = tl.cumsum(tensor_mon, axis=0)
    for j in range(column):
        assisted_tensor = tl.zeros([row, row])
        for i in range(row):
            if i == 0:
                assisted_tensor = tl.index_update(
                    assisted_tensor, tl.index[i, i:], cum_sum[i:, j] /
                    tl.tensor(tl.arange(row - i) + 1, **tl.context(tensor)))
            else:
                assisted_tensor = tl.index_update(
                    assisted_tensor, tl.index[i, i:],
                    (cum_sum[i:, j] - cum_sum[i - 1, j]) /
                    tl.tensor(tl.arange(row - i) + 1, **tl.context(tensor)))
        tensor_mon = tl.index_update(tensor_mon, tl.index[:, j],
                                     tl.max(assisted_tensor, axis=0))
        for i in reversed(range(row - 1)):
            if tensor_mon[i, j] > tensor_mon[i + 1, j]:
                tensor_mon = tl.index_update(tensor_mon, tl.index[i, j],
                                             tensor_mon[i + 1, j])
    if decreasing:
        tensor_mon = tl.flip(tensor_mon, axis=0)
    return tensor_mon
Esempio n. 3
0
def z_score_values(A, cell_dim):
    """ Function that takes in the values tensor and z-scores it. """
    assert cell_dim < tl.ndim(A)
    convAxes = tuple([i for i in range(tl.ndim(A)) if i != cell_dim])
    convIDX = [None] * tl.ndim(A)
    convIDX[cell_dim] = slice(None)

    sigma = np.std(A, axis=convAxes)
    return A / sigma[tuple(convIDX)]
Esempio n. 4
0
def contract(tensor1, modes1, tensor2, modes2):
    """Tensor contraction between two tensors on specified modes
    
    Parameters
    ----------
    tensor1 : tl.tensor
    modes1 : int list or int
        modes on which to contract tensor1
    tensor2 : tl.tensor
    modes2 : int list or int
        modes on which to contract tensor2

    Returns
    -------
    contraction : tensor1 contracted with tensor2 on the specified modes
    """
    if isinstance(modes1, int):
        modes1 = [modes1]
    if isinstance(modes2, int):
        modes2 = [modes2]
    modes1 = list(modes1)
    modes2 = list(modes2)

    if len(modes1) != len(modes2):
        raise ValueError(
            'Can only contract two tensors along the same number of modes'
            '(len(modes1) == len(modes2))'
            'However, got {} modes for tensor 1 and {} mode for tensor 2'
            '(modes1={}, and modes2={})'.format(len(modes1), len(modes2),
                                                modes1, modes2))

    contraction_dims = [tl.shape(tensor1)[i] for i in modes1]
    if contraction_dims != [tl.shape(tensor2)[i] for i in modes2]:
        raise ValueError(
            'Trying to contract tensors over modes of different sizes'
            '(contracting modes of sizes {} and {}'.format(
                contraction_dims, [tl.shape(tensor2)[i] for i in modes2]))
    shared_dim = int(np.prod(contraction_dims))

    modes1_free = [i for i in range(tl.ndim(tensor1)) if i not in modes1]
    free_shape1 = [tl.shape(tensor1)[i] for i in modes1_free]

    tensor1 = tl.reshape(tl.transpose(tensor1, modes1_free + modes1),
                         (int(np.prod(free_shape1)), shared_dim))

    modes2_free = [i for i in range(tl.ndim(tensor2)) if i not in modes2]
    free_shape2 = [tl.shape(tensor2)[i] for i in modes2_free]

    tensor2 = tl.reshape(tl.transpose(tensor2, modes2 + modes2_free),
                         (shared_dim, int(np.prod(free_shape2))))

    res = tl.dot(tensor1, tensor2)
    return tl.reshape(res, tuple(free_shape1 + free_shape2))
Esempio n. 5
0
def _validate_tt_matrix(tt_tensor):
    factors = tt_tensor
    n_factors = len(factors)

    if n_factors < 1:
        raise ValueError(
            'A Tensor-Train (MPS) tensor should be composed of at least one factor.'
            'However, {} factor was given.'.format(n_factors))

    rank = []
    left_shape = []
    right_shape = []
    for index, factor in enumerate(factors):
        current_rank, current_left_shape, current_right_shape, next_rank = tl.shape(
            factor)

        # Check that factors are third order tensors
        if not tl.ndim(factor) == 4:
            raise ValueError(
                'A TTMatrix expresses a tensor as fourth order factors (tt-cores).\n'
                'However, tl.ndim(factors[{}]) = {}'.format(
                    index, tl.ndim(factor)))
        # Consecutive factors should have matching ranks
        if index and tl.shape(factors[index - 1])[-1] != current_rank:
            raise ValueError(
                'Consecutive factors should have matching ranks\n'
                ' -- e.g. tl.shape(factors[0])[-1]) == tl.shape(factors[1])[0])\n'
                'However, tl.shape(factor[{}])[-1] == {} but'
                ' tl.shape(factor[{}])[0] == {} '.format(
                    index - 1,
                    tl.shape(factors[index - 1])[-1], index, current_rank))
        # Check for boundary conditions
        if (index == 0) and current_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[0].shape[0] == 1.'
                'However, got factor[0].shape[0] = {}.'.format(current_rank))
        if (index == n_factors - 1) and next_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[-1].shape[2] == 1.'
                'However, got factor[{}].shape[2] = {}.'.format(
                    n_factors, next_rank))

        left_shape.append(current_left_shape)
        right_shape.append(current_right_shape)

        rank.append(current_rank)

    # Add last rank (boundary condition)
    rank.append(next_rank)

    return tuple(left_shape) + tuple(right_shape), tuple(rank)
Esempio n. 6
0
def _validate_tt_tensor(tt_tensor):
    factors = tt_tensor
    n_factors = len(factors)

    if isinstance(tt_tensor, TTTensor):
        # it's already been validated at creation
        return tt_tensor.shape, tt_tensor.rank
    elif isinstance(tt_tensor, (float, int)):  #0-order tensor
        return 0, 0

    rank = []
    shape = []
    for index, factor in enumerate(factors):
        current_rank, current_shape, next_rank = tl.shape(factor)

        # Check that factors are third order tensors
        if not tl.ndim(factor) == 3:
            raise ValueError(
                'TT expresses a tensor as third order factors (tt-cores).\n'
                'However, tl.ndim(factors[{}]) = {}'.format(
                    index, tl.ndim(factor)))
        # Consecutive factors should have matching ranks
        if index and tl.shape(factors[index - 1])[2] != current_rank:
            raise ValueError(
                'Consecutive factors should have matching ranks\n'
                ' -- e.g. tl.shape(factors[0])[2]) == tl.shape(factors[1])[0])\n'
                'However, tl.shape(factor[{}])[2] == {} but'
                ' tl.shape(factor[{}])[0] == {} '.format(
                    index - 1,
                    tl.shape(factors[index - 1])[2], index, current_rank))
        # Check for boundary conditions
        if (index == 0) and current_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[0].shape[0] == 1.'
                'However, got factor[0].shape[0] = {}.'.format(current_rank))
        if (index == n_factors - 1) and next_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[-1].shape[2] == 1.'
                'However, got factor[{}].shape[2] = {}.'.format(
                    n_factors, next_rank))

        shape.append(current_shape)
        rank.append(current_rank)

    # Add last rank (boundary condition)
    rank.append(next_rank)

    return tuple(shape), tuple(rank)
Esempio n. 7
0
    def __getitem__(self, indices):
        if not isinstance(indices, Iterable):
            indices = [indices]

        output_shape = []
        indexed_factors = []
        factors = self.factors
        weights = self.weights

        for (index, shape) in zip(indices, self.tensorized_shape):
            if isinstance(shape, int):
                # We are indexing a "regular" mode
                factor, *factors = factors

                if isinstance(index, (np.integer, int)):
                    weights = weights * factor[index, :]
                else:
                    factor = factor[index, :]
                    indexed_factors.append(factor)
                    output_shape.append(factor.shape[0])

            else:
                # We are indexing a tensorized mode

                if index == slice(None) or index == ():
                    # Keeping all indices (:)
                    indexed_factors.extend(factors[:len(shape)])
                    output_shape.append(shape)

                else:
                    if isinstance(index, slice):
                        # Since we've already filtered out :, this is a partial slice
                        # Convert into list
                        max_index = math.prod(shape)
                        index = list(range(*index.indices(max_index)))

                    if isinstance(index, Iterable):
                        output_shape.append(len(index))

                    index = np.unravel_index(index, shape)
                    # Index the whole tensorized shape, resulting in a single factor
                    factor = 1
                    for idx, ff in zip(index, factors[:len(shape)]):
                        factor *= ff[idx, :]

                    if tl.ndim(factor) == 2:
                        indexed_factors.append(factor)
                    else:
                        weights = weights * factor

                factors = factors[len(shape):]

        indexed_factors.extend(factors)
        output_shape.extend(self.tensorized_shape[len(indices):])

        if indexed_factors:
            return self.__class__(weights,
                                  indexed_factors,
                                  tensorized_shape=output_shape)
        return tl.sum(weights)
Esempio n. 8
0
def tucker_conv(x, tucker_tensor, bias=None, stride=1, padding=0, dilation=1):
    # Extract the rank from the actual decomposition in case it was changed by, e.g. dropout
    rank = tucker_tensor.rank

    batch_size = x.shape[0]
    n_dim = tl.ndim(x)

    # Change the number of channels to the rank
    x_shape = list(x.shape)
    x = x.reshape((batch_size, x_shape[1], -1)).contiguous()

    # This can be done with a tensor contraction
    # First conv == tensor contraction
    # from (in_channels, rank) to (rank == out_channels, in_channels, 1)
    x = F.conv1d(x, tl.transpose(tucker_tensor.factors[1]).unsqueeze(2))

    x_shape[1] = rank[1]
    x = x.reshape(x_shape)

    modes = list(range(2, n_dim+1))
    weight = tl.tenalg.multi_mode_dot(tucker_tensor.core, tucker_tensor.factors[2:], modes=modes)
    x = convolve(x, weight, bias=None, stride=stride, padding=padding)

    # Revert back number of channels from rank to output_channels
    x_shape = list(x.shape)
    x = x.reshape((batch_size, x_shape[1], -1))
    # Last conv == tensor contraction
    # From (out_channels, rank) to (out_channels, in_channels == rank, 1)
    x = F.conv1d(x, tucker_tensor.factors[0].unsqueeze(2), bias=bias)

    x_shape[1] = x.shape[1]
    x = x.reshape(x_shape)

    return x
Esempio n. 9
0
def initialize_factors(tensor, rank, random_state=None, non_negative=False):
    """Initialize factors used in `parafac`.

    Factor matrices are initialized using `random_state`.

    Parameters
    ----------
    tensor : ndarray
    rank : int
    random_state: int
        set to ensure reproducibility
    non_negative : bool, default is False
        if True, non-negative factors are returned

    Returns
    -------
    factors : ndarray list
        List of initialized factors of the CP decomposition where element `i`
        is of shape (tensor.shape[i], rank)

    """
    rng = check_random_state(random_state)

    factors = [
        tl.tensor(rng.random_sample((tensor.shape[i], rank)),
                  **tl.context(tensor)) for i in range(tl.ndim(tensor))
    ]
    if non_negative:
        return [tl.abs(f) for f in factors]
    else:
        return factors

    raise ValueError('Initialization method "{}" not recognized'.format(init))
Esempio n. 10
0
def initialize_factors(tensor, rank, init='svd', svd='numpy_svd', random_state=None, non_negative=False):
    r"""Initialize factors used in `parafac`.

    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.

    Parameters
    ----------
    tensor : ndarray
    rank : int
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned

    Returns
    -------
    factors : ndarray list
        List of initialized factors of the CP decomposition where element `i`
        is of shape (tensor.shape[i], rank)

    """
    rng = check_random_state(random_state)

    if init == 'random':
        factors = [tl.tensor(rng.random_sample((tensor.shape[i], rank)), **tl.context(tensor)) for i in range(tl.ndim(tensor))]
        if non_negative:
            return [tl.abs(f) for f in factors]
        else:
            return factors

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                    svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, _, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                # factor = tl.tensor(np.zeros((U.shape[0], rank)), **tl.context(tensor))
                # factor[:, tensor.shape[mode]:] = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                # factor[:, :tensor.shape[mode]] = U
                random_part = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)
            if non_negative:
                factors.append(tl.abs(U[:, :rank]))
            else:
                factors.append(U[:, :rank])
        return factors

    raise ValueError('Initialization method "{}" not recognized'.format(init))
Esempio n. 11
0
def tucker(tensor,
           rank=None,
           ranks=None,
           n_iter_max=100,
           init='svd',
           svd='numpy_svd',
           tol=10e-5,
           random_state=None,
           mask=None,
           verbose=False):
    """Tucker decomposition via Higher Order Orthogonal Iteration (HOI)

        Decomposes `tensor` into a Tucker decomposition:
        ``tensor = [| core; factors[0], ...factors[-1] |]`` [1]_

    Parameters
    ----------
    tensor : ndarray
    ranks : None or int list
            size of the core tensor, ``(len(ranks) == tensor.ndim)``
    rank : None or int
            number of components
    n_iter_max : int
                 maximum number of iteration
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD,
        acceptable values in tensorly.SVD_FUNS
    tol : float, optional
          tolerance: the algorithm stops when the variation in
          the reconstruction error is less than the tolerance
    random_state : {None, int, np.random.RandomState}
    verbose : int, optional
        level of verbosity

    Returns
    -------
    core : ndarray of size `ranks`
            core tensor of the Tucker decomposition
    factors : ndarray list
            list of factors of the Tucker decomposition.
            Its ``i``-th element is of shape ``(tensor.shape[i], ranks[i])``

    References
    ----------
    .. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
       SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
    """
    modes = list(range(tl.ndim(tensor)))
    return partial_tucker(tensor,
                          modes,
                          rank=rank,
                          ranks=ranks,
                          n_iter_max=n_iter_max,
                          init=init,
                          svd=svd,
                          tol=tol,
                          random_state=random_state,
                          mask=mask,
                          verbose=verbose)
Esempio n. 12
0
def svd_init_fac(tensor, rank):
    """
    svd initialization of factor matrices for a given tensor and rank
    
    Parameters
    ----------
    tensor : tensor
    rank : int

    Returns
    -------
    factors : list of matrices

  """
    factors = []
    for mode in range(tl.ndim(tensor)):
        # unfolding of a given mode
        unfolded = tl.unfold(tensor, mode)
        if rank <= tl.shape(tensor)[mode]:
            u, s, v = tl.partial_svd(
                unfolded,
                n_eigenvecs=rank)  # first rank eigenvectors/values (ascendent)
        else:
            u, s, v = tl.partial_svd(unfolded,
                                     n_eigenvecs=tl.shape(tensor)[mode])
            # completed by random columns
            u = np.append(u,
                          np.random.random(
                              (np.shape(u)[0], rank - tl.shape(tensor)[mode])),
                          axis=1)
            # sometimes we have singular matrix error for als
        factors += [u]
    return (factors)
Esempio n. 13
0
def _validate_mps_tensor(mps_tensor):
    factors = mps_tensor
    n_factors = len(factors)

    if n_factors < 2:
        raise ValueError(
            'A Matrix-Product-State (ttrain) tensor should be composed of at least two factors and a core.'
            'However, {} factor was given.'.format(n_factors))

    rank = []
    shape = []
    for index, factor in enumerate(factors):
        current_rank, current_shape, next_rank = tl.shape(factor)

        # Check that factors are third order tensors
        if not tl.ndim(factor) == 3:
            raise ValueError(
                'MPS expresses a tensor as third order factors (tt-cores).\n'
                'However, tl.ndim(factors[{}]) = {}'.format(
                    index, tl.ndim(factor)))
        # Consecutive factors should have matching ranks
        if index and tl.shape(factors[index - 1])[2] != current_rank:
            raise ValueError(
                'Consecutive factors should have matching ranks\n'
                ' -- e.g. tl.shape(factors[0])[2]) == tl.shape(factors[1])[0])\n'
                'However, tl.shape(factor[{}])[2] == {} but'
                ' tl.shape(factor[{}])[0] == {} '.format(
                    index - 1,
                    tl.shape(factors[index - 1])[2], index, current_rank))
        # Check for boundary conditions
        if (index == 0) and current_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[0].shape[0] == 1.'
                'However, got factor[0].shape[0] = {}.'.format(current_rank))
        if (index == n_factors - 1) and next_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[-1].shape[2] == 1.'
                'However, got factor[{}].shape[2] = {}.'.format(
                    n_factors, next_rank))

        shape.append(current_shape)
        rank.append(current_rank)

    # Add last rank (boundary condition)
    rank.append(next_rank)

    return tuple(shape), tuple(rank)
Esempio n. 14
0
def tensor_train_matrix(tensor, rank):
    """Decompose a tensor into a matrix in tt-format
    
    Parameters
    ----------
    tensor : tensorized matrix 
        if your input matrix is of size (4, 9) and your tensorized_shape (2, 2, 3, 3)
        then tensor should be tl.reshape(matrix, (2, 2, 3, 3))
    rank : 'same', float or int tuple
        - if 'same' creates a decomposition with the same number of parameters as `tensor`
        - if float, creates a decomposition with `rank` x the number of parameters of `tensor`
        - otherwise, the actual rank to be used, e.g. (1, rank_2, ..., 1) of size tensor.ndim//2. Note that boundary conditions dictate that the first rank = last rank = 1.
    
    Returns
    -------
    tt_matrix
    """
    order = tl.ndim(tensor)
    n_input = order // 2  # (n_output = n_input)

    if tl.ndim(tensor) != n_input * 2:
        msg = 'The tensor should have as many dimensions for inputs and outputs, i.e. order should be even '
        msg += f'but got a tensor of order tl.ndim(tensor)={order} which is odd.'
        raise ValueError(msg)

    in_shape = tl.shape(tensor)[:n_input]
    out_shape = tl.shape(tensor)[n_input:]

    if n_input == 1:
        # A TTM with a single factor is just a matrix...
        return TTMatrix([tensor.reshape(1, in_shape[0], out_shape[0], 1)])

    new_idx = list([
        idx for tuple_ in zip(range(n_input), range(n_input, 2 * n_input))
        for idx in tuple_
    ])
    new_shape = list([a * b for (a, b) in zip(in_shape, out_shape)])
    tensor = tl.reshape(tl.transpose(tensor, new_idx), new_shape)

    factors = tensor_train(tensor, rank).factors
    for i in range(len(factors)):
        factors[i] = tl.reshape(
            factors[i], (factors[i].shape[0], in_shape[i], out_shape[i], -1))

    return TTMatrix(factors)
Esempio n. 15
0
def als(tensor,rank,factors=None,it_max=100,tol=1e-7,list_factors=False,error_fast=True,time_rec=False):
  """
    ALS methode of CP decomposition

    Parameters
    ----------
    tensor : tensor
    rank : int
    factors : list of matrices, optional
        an initial factor matrices. The default is None.
    it_max : int, optional
        maximal number of iteration. The default is 100.
    tol : float, optional
        error tolerance. The default is 1e-7.
    list_factors : boolean, optional
        If true, then return factor matrices of each iteration. The default is False.
    error_fast : boolean, optional
        If true, use err_fast to compute data fitting error, otherwise, use err. The default is True.
    time_rec : boolean, optional
        If true, return computation time of each iteration. The default is False.

    Returns
    -------
    the CP decomposition, number of iteration and termination criterion. 
    list_fac and list_time are optional.
  """
  N=tl.ndim(tensor) # order of tensor
  norm_tensor=tl.norm(tensor) # norm of tensor
  if time_rec == True : list_time=[]
  if list_factors==True : list_fac=[] # list of factor matrices

  if (factors==None): factors=svd_init_fac(tensor,rank)

  weights=None
  it=0
  if list_factors==True : list_fac.append(copy.deepcopy(factors))
  error=[err(tensor,weights,factors)/norm_tensor]
  while (error[len(error)-1]>tol and it<it_max):
    if time_rec == True : tic=time.time() 
    for n in range(N):
      V=np.ones((rank,rank))
      for i in range(len(factors)):
        if i != n : V=V*tl.dot(tl.transpose(factors[i]),factors[i])
      W=tl.cp_tensor.unfolding_dot_khatri_rao(tensor, (None,factors), n) 
      factors[n]= tl.transpose(tl.solve(tl.transpose(V),tl.transpose(W)))
    if list_factors==True : list_fac.append(copy.deepcopy(factors))
    it=it+1
    if (error_fast==False) : error.append(err(tensor,weights,factors)/norm_tensor)
    else : error.append(err_fast(norm_tensor,factors[N-1],V,W)/norm_tensor)
    if time_rec == True : 
      toc=time.time() 
      list_time.append(toc-tic)
  # weights,factors=tl.cp_tensor.cp_normalize((None,factors))
  if list_factors==True and time_rec==True: return(weights,factors,it,error,list_fac,list_time)
  if time_rec==True : return(weights,factors,it,error,list_time)
  if list_factors==True : return(weights,factors,it,error,list_fac)
  return(weights,factors,it,error)
Esempio n. 16
0
def tucker_trl(x, weight, project_input=False, bias=None):
    n_input = tl.ndim(x) - 1
    if project_input:
        x = tenalg.multi_mode_dot(x,
                                  weight.factors[:n_input],
                                  modes=range(1, n_input + 1),
                                  transpose=True)
        regression_weights = tenalg.multi_mode_dot(weight.core,
                                                   weight.factors[n_input:],
                                                   modes=range(
                                                       n_input, weight.order))
    else:
        regression_weights = weight.to_tensor()

    if bias is None:
        return tenalg.inner(x, regression_weights, n_modes=tl.ndim(x) - 1)
    else:
        return tenalg.inner(x, regression_weights,
                            n_modes=tl.ndim(x) - 1) + bias
Esempio n. 17
0
def dtd(factors_old, X_old, X_new, rank, n_iter=1, mu=1, verbose=False):

    weights = tl.ones(rank)
    if verbose:
        X = tl.tensor(np.concatenate((X_old, X_new)))
    n_dim = tl.ndim(X_old)
    U = factors_old.copy()

    for i in range(n_iter):
        # temporal mode for A1
        V = tl.tensor(np.ones((rank, rank)))
        for j, factor in enumerate(U):
            if j != 0:
                V = V * tl.dot(tl.transpose(factor), factor)
        mttkrp = unfolding_dot_khatri_rao(X_new, (None, U), 0)
        A1 = tl.transpose(tl.solve(tl.transpose(V), tl.transpose(mttkrp)))

        # non-temporal mode
        for mode in range(1, n_dim):
            U1 = U.copy()
            U1[0] = A1
            V = tl.tensor(np.ones((rank, rank)))
            W = tl.tensor(np.ones((rank, rank)))
            for j, factor in enumerate(U):
                factor_old = factors_old[j]
                if j != mode:
                    W = W * tl.dot(tl.transpose(factor_old), factor)
                    if j == 0:
                        V = V * (mu * tl.dot(tl.transpose(factor), factor) +
                                 tl.dot(tl.transpose(A1), A1))
                    else:
                        V = V * tl.dot(tl.transpose(factor), factor)
            mttkrp0 = mu * tl.dot(factors_old[mode], W)
            mttkrp1 = unfolding_dot_khatri_rao(X_new, (None, U1), mode)
            U[mode] = tl.transpose(
                tl.solve(tl.transpose(V), tl.transpose(mttkrp0 + mttkrp1)))

        # temporal mode for A0
        V = tl.tensor(np.ones((rank, rank)))
        W = tl.tensor(np.ones((rank, rank)))
        for j, factor in enumerate(U):
            factor_old = factors_old[j]
            if j != 0:
                V = V * tl.dot(tl.transpose(factor), factor)
                W = W * tl.dot(tl.transpose(factor_old), factor)
        mttkrp = tl.dot(factors_old[0], W)
        U[0] = tl.transpose(tl.solve(tl.transpose(V), tl.transpose(mttkrp)))
        if verbose:
            U1 = U.copy()
            U1[0] = np.concatenate((U[0], A1))
            X_est = construct_tensor(U1)
            compare_tensors(X, X_est)

    U[0] = np.concatenate((U[0].copy(), A1))
    return KruskalTensor((weights, U))
Esempio n. 18
0
def symmetric_parafac_power_iteration(tensor,
                                      rank,
                                      n_repeat=10,
                                      n_iteration=10,
                                      verbose=False):
    """Symmetric CP Decomposition via Robust Symmetric Tensor Power Iteration

    Parameters
    ----------
    tensor : tl.tensor
        input tensor to decompose, must be symmetric of shape (size, )*order
    rank : int
        rank of the decomposition (number of rank-1 components)
    n_repeat : int, default is 10
        number of initializations to be tried
    n_iterations : int, default is 10
        number of power iterations
    verbose : bool
        level of verbosity

    Returns
    -------
    (weights, factor)

    weights : 1-D tl.tensor of length `rank`
        contains the eigenvalue of each eigenvector
    factor : 2-D tl.tensor of shape (size, rank)
        each column corresponds to one eigenvector
    """
    rank = validate_cp_rank(tl.shape(tensor), rank=rank)

    order = tl.ndim(tensor)
    size = tl.shape(tensor)[0]

    if not tl.shape(tensor) == (size, ) * order:
        raise ValueError(
            'The input tensor does not have the same size along each mode.')

    factor = []
    weigths = []

    for _ in range(rank):
        eigenval, eigenvec, deflated = symmetric_power_iteration(
            tensor,
            n_repeat=n_repeat,
            n_iteration=n_iteration,
            verbose=verbose)
        factor.append(eigenvec)
        weigths.append(eigenval)
        tensor = deflated

    factor = tl.stack(factor, axis=1)
    weigths = tl.stack(weigths)

    return weigths, factor
Esempio n. 19
0
def _validate_tr_tensor(tr_tensor):
    factors = tr_tensor
    n_factors = len(factors)

    if n_factors < 2:
        raise ValueError(
            'A Tensor Ring tensor should be composed of at least two factors.'
            'However, {} factor was given.'.format(n_factors))

    rank = []
    shape = []
    for index, factor in enumerate(factors):
        current_rank, current_shape, next_rank = tl.shape(factor)

        # Check that factors are third order tensors
        if not tl.ndim(factor) == 3:
            raise ValueError(
                'TR expresses a tensor as third order factors (tr-cores).\n'
                'However, tl.ndim(factors[{}]) = {}'.format(
                    index, tl.ndim(factor)))

        # Consecutive factors should have matching ranks
        if tl.shape(factors[index - 1])[2] != current_rank:
            raise ValueError(
                'Consecutive factors should have matching ranks\n'
                ' -- e.g. tl.shape(factors[0])[2]) == tl.shape(factors[1])[0])\n'
                'However, tl.shape(factor[{}])[2] == {} but'
                ' tl.shape(factor[{}])[0] == {}'.format(
                    index - 1,
                    tl.shape(factors[index - 1])[2], index, current_rank))

        shape.append(current_shape)
        rank.append(current_rank)

    # Add last rank (boundary condition)
    rank.append(next_rank)

    return tuple(shape), tuple(rank)
Esempio n. 20
0
def tensordot(tensor1, tensor2, modes, batched_modes=()):
    """Batched tensor contraction between two tensors on specified modes
    
    Parameters
    ----------
    tensor1 : tl.tensor
    tensor2 : tl.tensor
    modes : int list or int
        modes on which to contract tensor1 and tensor2
    batched_modes : int or tuple[int]

    Returns
    -------
    contraction : tensor1 contracted with tensor2 on the specified modes
    """
    modes1, modes2 = _validate_contraction_modes(tensor1.shape, tensor2.shape,
                                                 modes)
    batch_modes1, batch_modes2 = _validate_contraction_modes(
        tensor1.shape, tensor2.shape, batched_modes, batched_modes=True)

    start = ord('a')
    order_t1 = tl.ndim(tensor1)
    all_modes1 = [chr(start + i) for i in range(order_t1)]
    all_modes2 = [chr(start + i + order_t1) for i in range(tl.ndim(tensor2))]

    for m1, m2 in zip(modes1 + batch_modes1, modes2 + batch_modes2):
        all_modes2[m2] = all_modes1[m1]

    remaining_modes1 = [j for i, j in enumerate(all_modes1) if i not in modes1]
    remaining_modes2 = [
        j for i, j in enumerate(all_modes2) if i not in modes2 + batch_modes2
    ]
    remaining_modes = remaining_modes1 + remaining_modes2
    to_str = lambda x: ''.join(x)
    equation = f'{to_str(all_modes1)},{to_str(all_modes2)}->{to_str(remaining_modes)}'

    return tl.einsum(equation, tensor1, tensor2)
Esempio n. 21
0
def test_non_negative_tucker(monkeypatch):
    """Test for non-negative Tucker"""
    rng = tl.check_random_state(1234)

    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1
    tensor = tl.tensor(rng.random_sample((3, 4, 3)) + 1)
    core, factors = tucker(tensor, rank=[3, 4, 3], n_iter_max=200, verbose=1)
    nn_core, nn_factors = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=100)

    # Make sure all components are positive
    for factor in nn_factors:
        assert_(tl.all(factor >= 0))
    assert_(tl.all(nn_core >= 0))

    reconstructed_tensor = tucker_to_tensor((core, factors))
    nn_reconstructed_tensor = tucker_to_tensor((nn_core, nn_factors))
    error = tl.norm(reconstructed_tensor - nn_reconstructed_tensor, 2)
    error /= tl.norm(reconstructed_tensor, 2)
    assert_(error < tol_norm_2,
            'norm 2 of reconstruction error higher than tol')

    # Test the max abs difference between the reconstruction and the tensor
    assert_(tl.norm(reconstructed_tensor - nn_reconstructed_tensor, 'inf') < tol_max_abs,
              'abs norm of reconstruction error higher than tol')

    core_svd, factors_svd = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=500, init='svd', verbose=1)
    core_random, factors_random = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=200, init='random', random_state=1234)
    rec_svd = tucker_to_tensor((core_svd, factors_svd))
    rec_random = tucker_to_tensor((core_random, factors_random))
    error = tl.norm(rec_svd - rec_random, 2)
    error /= tl.norm(rec_svd, 2)
    assert_(error < tol_norm_2,
            'norm 2 of difference between svd and random init too high')
    assert_(tl.norm(rec_svd - rec_random, 'inf') < tol_max_abs,
            'abs norm of difference between svd and random init too high')

    # Test for a single rank passed
    # (should be used for all modes)
    rank = 3
    target_shape = (rank, )*tl.ndim(tensor)
    core, factors = non_negative_tucker(tensor, rank=rank)
    assert_(tl.shape(core) == target_shape, 'core has the wrong shape, got {}, but expected {}.'.format(tl.shape(core), target_shape))
    for i, f in enumerate(factors):
        expected_shape = (tl.shape(tensor)[i], rank)
        assert_(tl.shape(f) == expected_shape, '{}-th factor has the wrong shape, got {}, but expected {}.'.format(
                i, tl.shape(f), expected_shape))

    assert_class_wrapper_correctly_passes_arguments(monkeypatch, non_negative_tucker, Tucker_NN, ignore_args={'return_errors'}, rank=3)
Esempio n. 22
0
def parafac_power_iteration(tensor,
                            rank,
                            n_repeat=10,
                            n_iteration=10,
                            verbose=0):
    """CP Decomposition via Robust Tensor Power Iteration

    Parameters
    ----------
    tensor : tl.tensor
        input tensor to decompose
    rank : int
        rank of the decomposition (number of rank-1 components)
    n_repeat : int, default is 10
        number of initializations to be tried
    n_iteration : int, default is 10
        number of power iterations
    verbose : bool
        level of verbosity

    Returns
    -------
    (weights, factors)

    weights : 1-D tl.tensor of length `rank`
        contains the eigenvalue of each eigenvector
    factors : list of 2-D tl.tensor of shape (size, rank)
        Each column of each factor corresponds to one eigenvector
    """
    rank = validate_cp_rank(tl.shape(tensor), rank=rank)

    order = tl.ndim(tensor)
    factors = []
    weigths = []

    for _ in range(rank):
        eigenval, eigenvec, deflated = power_iteration(tensor,
                                                       n_repeat=n_repeat,
                                                       n_iteration=n_iteration,
                                                       verbose=verbose)
        factors.append(eigenvec)
        weigths.append(eigenval)
        tensor = deflated

    factors = [tl.stack([f[i] for f in factors], axis=1) for i in range(order)]
    weigths = tl.stack(weigths)

    return weigths, factors
Esempio n. 23
0
def tl_sample_uniform(tensor, nsamp):
    """Uniformly sample 'nsamp' indices from a tensor 'tensor' along with
    corresponding values and the weight of the sample.

    Parameters
    ----------
    X : ndarray
      Dense tensor
    nsamp : integer
      number of samples

    Returns
    -------
    subs : ndarray
        Subscripts (indices)
    vals : ndarray
        Values
    wgts : ndarray
        Weights
    """
    d = tl.ndim(tensor)
    shp = tl.shape(tensor)
    tsz = 1
    for i in shp:
        tsz *= i

    # generate subscripts
    subSamp = lambda x, y: np.ceil(x * y)
    subs = subSamp(np.random.rand(nsamp, d), shp)
    subs = subs.astype(int) - 1  # adjust for zero-indexing

    # quick check that indices are in bounds
    if tl.min(subs) < 0:
        print("Bad subscripts generated for sampling.")
        sys.exit(1)

    # capture corresponding values for subscripts
    vals = []
    for i in subs:
        index = tuple(i.tolist())
        vals.append(tensor[index])
    vals = tl.reshape(tl.tensor(vals), (len(vals), 1))

    # calculate weights for sample
    wgts = tsz / nsamp * tl.ones((nsamp, 1))

    return subs, vals, wgts
Esempio n. 24
0
def err_rand(tensor, weights, factors, nb_samples, indices_list=None):
    """
    Error estimation proposed in CPRAND

    Parameters
    ----------
    tensor : tensor
    weights : vector
        the weights of CP decomposition
    factors : list of matrices
        factor matrices of CP decomposition
    nb_samples : int
        nb of sample
    indices_list : tuple, optional
        indices list of sample. The default is None.

    Returns
    -------
    float
        the error estimation and the used indices_list

  """
    # if indices_list is not given
    if indices_list == None:
        indices_list = [
            np.random.choice(tl.shape(m)[0], nb_samples) for m in factors
        ]
        # works if nb_samples <= tl.shape(m)[0] for m in factors
        indices_list = [i.tolist() for i in indices_list]
        indices_list = tuple(indices_list)
    est_values = []
    # nb of terms in tensor
    P = 1
    for i in tl.shape(tensor):
        P = P * i
    for i in range(nb_samples):
        if weights is None: value = 1
        else: value = weights
        for mode in range(tl.ndim(tensor)):
            value = value * factors[mode][indices_list[mode][i], :]
        est_values += [sum(value)]
    list_e = (tensor[indices_list] - est_values)**2
    # assume max(list_e) = 1 if terms are in [0,1]
    return (np.sqrt(sum(list_e) * P / nb_samples), indices_list)
Esempio n. 25
0
def initialize_cp(tensor: np.ndarray, matrix: np.ndarray, rank: int):
    r"""Initialize factors used in `parafac`.
    Parameters
    ----------
    tensor : ndarray
    rank : int
    Returns
    -------
    factors : CPTensor
        An initial cp tensor.
    """
    factors = []
    for mode in range(tl.ndim(tensor)):
        unfold = tl.unfold(tensor, mode)

        if mode == 0 and (matrix is not None):
            unfold = np.hstack((unfold, matrix))

        # Remove completely missing columns
        unfold = unfold[:, np.sum(np.isfinite(unfold), axis=0) > 2]

        # Impute by PCA
        outt = PCA(unfold,
                   ncomp=1,
                   method="nipals",
                   missing="fill-em",
                   standardize=False,
                   demean=False,
                   normalize=False,
                   max_em_iter=1000)
        recon_pca = outt.scores @ outt.loadings.T
        unfold[np.isnan(unfold)] = recon_pca[np.isnan(unfold)]

        U = np.linalg.svd(unfold)[0]

        if U.shape[1] < rank:
            # This is a hack but it seems to do the job for now
            pad_part = np.random.rand(U.shape[0], rank - U.shape[1])
            U = tl.concatenate([U, pad_part], axis=1)

        factors.append(U[:, :rank])

    return tl.cp_tensor.CPTensor((None, factors))
Esempio n. 26
0
    def parafac(self, tensor, rank, n_iter_max=100, tol=1e-8):
        factors = initialize_factors(tensor, rank)
        rec_errors = []
        norm_tensor = tl.norm(tensor, 2)

        for iteration in range(n_iter_max):
            for mode in range(tl.ndim(tensor)):
                # No reverse of factors, because tensorly's unfold works different
                # First frontal slice:
                # array([[ 1,  2,  3,  4],
                #        [ 5,  6,  7,  8],
                #        [ 9, 10, 11, 12]])
                #
                # Second frontal slice:
                # array([[13, 14, 15, 16],
                #        [17, 18, 19, 20],
                #        [21, 22, 23, 24]])
                #
                # 0th unfolding:
                # array([[ 1, 13,  2, 14,  3, 15,  4, 16],
                #        [ 5, 17,  6, 18,  7, 19,  8, 20],
                #        [ 9, 21, 10, 22, 11, 23, 12, 24]])
                mode_factors = [f for i, f in enumerate(factors) if i != mode]
                mode_sq_factors = [f.T @ f for f in mode_factors]
                unfold = tl.unfold(tensor, mode)
                m1 = khatri_rao(mode_factors)
                # Fix for tensorly's singular trouble
                m2 = np.linalg.pinv(reduce(lambda x, y: x * y, mode_sq_factors))
                factor = unfold @ m1 @ m2
                factors[mode] = factor

            rec_error = tl.norm(tensor - tl.kruskal_to_tensor((None, factors)), order=2)
            rec_error = rec_error / norm_tensor
            rec_errors.append(rec_error)

            if iteration >= 1:
                rec_error_decrease = abs(rec_errors[-2] - rec_errors[-1])
                stop_flag = rec_error_decrease < tol
                
                if stop_flag:
                    break

        return kruskal_normalise(KruskalTensor((None, factors)))
Esempio n. 27
0
def random_init_fac(tensor, rank, nn=False):
    """
    Random initialization of factor matrices for a given tensor and rank
    random numbers on [0,1)
                       
    Parameters
    ----------
    tensor : tensor
    rank : int

    Returns
    -------
    factors : list of matrices
  """
    factors = []
    for mode in range(tl.ndim(tensor)):
        factors += [np.random.random(
            (tl.shape(tensor)[mode], rank))]  # random on [0,1)
    return factors
Esempio n. 28
0
def test_unfolding_dot_khatri_rao():
    """Test for unfolding_dot_khatri_rao
    
    Check against other version check sparse safe
    """
    shape = (10, 10, 10, 4)
    rank = 5
    tensor = tl.tensor(np.random.random(shape))
    weights, factors = random_cp(shape=shape, rank=rank, 
                                      full=False, normalise_factors=True)
    
    for mode in range(tl.ndim(tensor)):
        # Version forming explicitely the khatri-rao product
        unfolded = unfold(tensor, mode)
        kr_factors = khatri_rao(factors, weights=weights, skip_matrix=mode)
        true_res = tl.dot(unfolded, kr_factors)

        # Efficient sparse-safe version
        res = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
        assert_array_almost_equal(true_res, res, decimal=3)
Esempio n. 29
0
def one_ntd_step_mu(tensor, ranks, in_core, in_factors, beta, norm_tensor,
                   fixed_modes, normalize, mode_core_norm):
    """
    One step of Multiplicative Uodate applied for every mode of the tensor
    and on the core.
    """
    # Copy
    core = in_core.copy()
    factors = in_factors.copy()

    # Generating the mode update sequence
    modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
    
    for mode in modes_list:
        factors[mode] = mu.mu_betadivmin(factors[mode], tl.unfold(tl.tenalg.multi_mode_dot(core, factors, skip = mode), mode), tl.unfold(tensor,mode), beta)

    core = mu.mu_tensorial(core, factors, tensor, beta)

    if normalize[-1]:
        unfolded_core = tl.unfold(core, mode_core_norm)
        for idx_mat in range(unfolded_core.shape[0]):
            if tl.norm(unfolded_core[idx_mat]) != 0:
                unfolded_core[idx_mat] = unfolded_core[idx_mat] / tl.norm(unfolded_core[idx_mat], 2)
        core = tl.fold(unfolded_core, mode_core_norm, core.shape)

    # # Adding the l1 norm value to the reconstruction error
    # sparsity_error = 0
    # for index, sparse in enumerate(sparsity_coefficients):
    #     if sparse:
    #         if index < len(factors):
    #             sparsity_error += 2 * (sparse * np.linalg.norm(factors[index], ord=1))
    #         elif index == len(factors):
    #             sparsity_error += 2 * (sparse * tl.norm(core, 1))
    #         else:
    #             raise NotImplementedError("TODEBUG: Too many sparsity coefficients, should have been raised before.")

    reconstructed_tensor = tl.tenalg.multi_mode_dot(core, factors)

    cost_fct_val = beta_div.beta_divergence(tensor, reconstructed_tensor, beta)
    
    return core, factors, cost_fct_val
Esempio n. 30
0
def general_conv1d_(x, kernel, mode, bias=None, stride=1, padding=0, groups=1, dilation=1, verbose=False):
    """General 1D convolution along the mode-th dimension

    Parameters
    ----------
    x : batch-dize, in_channels, K1, ..., KN
    kernel : out_channels, in_channels/groups, K{mode}
    mode : int
        weight along which to perform the decomposition
    stride : int
    padding : int
    groups : 1
        typically would be equal to thhe number of input-channels
        at least for CP convolutions

    Returns
    -------
    x convolved with the given kernel, along dimension `mode`
    """
    if verbose:
        print(f'Convolving {x.shape} with {kernel.shape} along mode {mode}, '
              f'stride={stride}, padding={padding}, groups={groups}')

    in_channels = tl.shape(x)[1]
    n_dim = tl.ndim(x)
    permutation = list(range(n_dim))
    spatial_dim = permutation.pop(mode)
    channels_dim = permutation.pop(1)
    permutation += [channels_dim, spatial_dim]
    x = tl.transpose(x, permutation)
    x_shape = list(x.shape)
    x = tl.reshape(x, (-1, in_channels, x_shape[-1]))
    x = F.conv1d(x.contiguous(), kernel, bias=bias, stride=stride, dilation=dilation, padding=padding, groups=groups)
    x_shape[-2:] = x.shape[-2:]
    x = tl.reshape(x, x_shape)
    permutation = list(range(n_dim))[:-2]
    permutation.insert(1, n_dim - 2)
    permutation.insert(mode, n_dim - 1)
    x = tl.transpose(x, permutation)
    
    return x