Example #1
0
def mode_dot(tensor, matrix_or_vector, mode):
    """n-mode product of a tensor and a matrix or vector at the specified mode

        Mathematically: :math:`\\text{tensor} \\times_{\\text{mode}} \\text{matrix or vector}`


        Parameters
        ----------
        tensor : ndarray
            tensor of shape ``(i_1, ..., i_k, ..., i_N)``
        matrix_or_vector : ndarray
            1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )``
            matrix or vectors to which to n-mode multiply the tensor
        mode : int

        Returns
        -------
        ndarray
            `mode`-mode product of `tensor` by `matrix_or_vector`
            * of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)` if matrix_or_vector is a matrix
            * of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)` if matrix_or_vector is a vector

        See also
        --------
        multi_mode_dot : chaining several mode_dot in one call
        """
    # the mode along which to fold might decrease if we take product with a vector
    fold_mode = mode
    new_shape = list(tensor.shape)

    if T.ndim(matrix_or_vector) == 2:  # Tensor times matrix
        # Test for the validity of the operation
        if matrix_or_vector.shape[1] != tensor.shape[mode]:
            raise ValueError(
                'shapes {0} and {1} not aligned in mode-{2} multiplication: {3} (mode {2}) != {4} (dim 1 of matrix)'
                .format(tensor.shape, matrix_or_vector.shape, mode,
                        tensor.shape[mode], matrix_or_vector.shape[1]))
        new_shape[mode] = matrix_or_vector.shape[0]
        vec = False

    elif T.ndim(matrix_or_vector) == 1:  # Tensor times vector
        if matrix_or_vector.shape[0] != tensor.shape[mode]:
            raise ValueError(
                'shapes {0} and {1} not aligned for mode-{2} multiplication: {3} (mode {2}) != {4} (vector size)'
                .format(tensor.shape, matrix_or_vector.shape, mode,
                        tensor.shape[mode], matrix_or_vector.shape[0]))
        if len(new_shape) > 1:
            new_shape.pop(mode)
        else:
            new_shape = [1]
        vec = True

    else:
        raise ValueError(
            'Can only take n_mode_product with a vector or a matrix.'
            'Provided array of dimension {} not in [1, 2].'.format(
                T.ndim(matrix_or_vector)))

    res = T.dot(matrix_or_vector, unfold(tensor, mode))

    if vec:  # We contracted with a vector, leading to a vector
        return vec_to_tensor(res, shape=new_shape)
    else:  # tensor times vec: refold the unfolding
        return fold(res, fold_mode, new_shape)
Example #2
0
def unimodality_prox(tensor):
    """
    This function projects each column of the input array on the set of arrays so that
          x[1] <= x[2] <= x[j] >= x[j+1]... >= x[n]
    is satisfied columnwise.

    Parameters
    ----------
    tensor : ndarray

    Returns
    -------
    ndarray
         A tensor of which columns' distribution are unimodal.

    References
    ----------
    .. [1]: Bro, R., & Sidiropoulos, N. D. (1998). Least squares algorithms under
            unimodality and non‐negativity constraints. Journal of Chemometrics:
            A Journal of the Chemometrics Society, 12(4), 223-247.
    """
    if tl.ndim(tensor) == 1:
        tensor = tl.vec_to_tensor(tensor, [tl.shape(tensor)[0], 1])
    elif tl.ndim(tensor) > 2:
        raise ValueError(
            "Unimodality prox doesn't support an input which has more than 2 dimensions."
        )

    tensor_unimodal = tl.copy(tensor)
    monotone_increasing = tl.tensor(monotonicity_prox(tensor),
                                    **tl.context(tensor))
    monotone_decreasing = tl.tensor(monotonicity_prox(tensor, decreasing=True),
                                    **tl.context(tensor))
    # Next line finds mutual peak points
    values = tl.tensor(
        tl.to_numpy((tensor - monotone_decreasing >= 0)) * tl.to_numpy(
            (tensor - monotone_increasing >= 0)), **tl.context(tensor))

    sum_inc = tl.where(values == 1,
                       tl.cumsum(tl.abs(tensor - monotone_increasing), axis=0),
                       tl.tensor(0, **tl.context(tensor)))
    sum_inc = tl.where(values == 1,
                       sum_inc - tl.abs(tensor - monotone_increasing),
                       tl.tensor(0, **tl.context(tensor)))
    sum_dec = tl.where(
        tl.flip(values, axis=0) == 1,
        tl.cumsum(tl.abs(
            tl.flip(tensor, axis=0) - tl.flip(monotone_decreasing, axis=0)),
                  axis=0), tl.tensor(0, **tl.context(tensor)))
    sum_dec = tl.where(
        tl.flip(values, axis=0) == 1, sum_dec -
        tl.abs(tl.flip(tensor, axis=0) - tl.flip(monotone_decreasing, axis=0)),
        tl.tensor(0, **tl.context(tensor)))

    difference = tl.where(values == 1, sum_inc + tl.flip(sum_dec, axis=0),
                          tl.max(sum_inc + tl.flip(sum_dec, axis=0)))
    min_indice = tl.argmin(tl.tensor(difference), axis=0)

    for i in range(len(min_indice)):
        tensor_unimodal = tl.index_update(
            tensor_unimodal, tl.index[:int(min_indice[i]), i],
            monotone_increasing[:int(min_indice[i]), i])
        tensor_unimodal = tl.index_update(
            tensor_unimodal, tl.index[int(min_indice[i] + 1):, i],
            monotone_decreasing[int(min_indice[i] + 1):, i])
    return tensor_unimodal