Exemplo n.º 1
0
    def decompose(self, tensor, rank, keep_meta=0):
        """ Performs tucker decomposition via Higher Order Singular Value Decomposition (HOSVD)

        Parameters
        ----------
        tensor : Tensor
            Multidimensional data to be decomposed
        rank : tuple
            Desired multilinear rank for the given `tensor`
        keep_meta : int
            Keep meta information about modes of the given `tensor`.
            0 - the output will have default values for the meta data
            1 - keep only mode names
            2 - keep mode names and indices

        Returns
        -------
        tensor_tkd : TensorTKD
            Tucker representation of the `tensor`
        """
        if not isinstance(tensor, Tensor):
            raise TypeError(
                "Parameter `tensor` should be an object of `Tensor` class!")
        if not isinstance(rank, tuple):
            raise TypeError("Parameter `rank` should be passed as a tuple!")
        if tensor.order != len(rank):
            raise ValueError(
                "Parameter `rank` should be tuple of the same length as the order of a tensor:\n"
                "{} != {} (tensor.order != len(rank))".format(
                    tensor.order, len(rank)))
        fmat = [np.array([])] * tensor.order
        core = tensor.copy()
        # TODO: can add check for self.process here
        if not self.process:
            self.process = tuple(range(tensor.order))
        for mode in range(tensor.order):
            if mode not in self.process:
                fmat[mode] = np.eye(tensor.shape[mode])
                continue
            tensor_unfolded = unfold(tensor.data, mode)
            U, _, _, = svd(tensor_unfolded, rank[mode])
            fmat[mode] = U
            core.mode_n_product(U.T, mode=mode)
        tensor_tkd = TensorTKD(fmat=fmat, core_values=core.data)
        if self.verbose:
            residual = residual_tensor(tensor, tensor_tkd)
            print('Relative error of approximation = {}'.format(
                abs(residual.frob_norm / tensor.frob_norm)))

        if keep_meta == 1:
            mode_names = {i: mode.name for i, mode in enumerate(tensor.modes)}
            tensor_tkd.set_mode_names(mode_names=mode_names)
        elif keep_meta == 2:
            tensor_tkd.copy_modes(tensor)
        else:
            pass

        return tensor_tkd
Exemplo n.º 2
0
def mse(tensor_true, tensor_pred):
    """ Mean squared error

    Parameters
    ----------
    tensor_true : Tensor
    tensor_pred : {Tensor, TensorCPD, TensorTKD, TensorTT}

    Returns
    -------
    float
    """
    tensor_res = residual_tensor(tensor_true, tensor_pred)
    return np.mean(tensor_res.data ** 2)
Exemplo n.º 3
0
def residual_rel_error(tensor_true, tensor_pred):
    """ Relative error of approximation

    Parameters
    ----------
    tensor_true : Tensor
    tensor_pred : {Tensor, TensorCPD, TensorTKD, TensorTT}

    Returns
    -------
    float
    """
    tensor_res = residual_tensor(tensor_true, tensor_pred)
    return tensor_res.frob_norm / tensor_true.frob_norm
Exemplo n.º 4
0
def mape(tensor_true, tensor_pred):
    """ Mean absolute percentage error

    Parameters
    ----------
    tensor_true : Tensor
    tensor_pred : {Tensor, TensorCPD, TensorTKD, TensorTT}

    Returns
    -------
    float
    """
    # TODO: Fix cases when tensor_pred.data contains zeros (division by zero -> inf)
    tensor_res = residual_tensor(tensor_true, tensor_pred)
    return np.mean(np.abs(np.divide(tensor_res.data, tensor_true.data)))
Exemplo n.º 5
0
    def decompose(self, tensor, rank, keep_meta=0):
        """ Performs TT-SVD on the `tensor` with respect to the specified `rank`

        Parameters
        ----------
        tensor : Tensor
            Multidimensional data to be decomposed
        rank : tuple
            Desired tt-rank for the given `tensor`
        keep_meta : int
            Keep meta information about modes of the given `tensor`.
            0 - the output will have default values for the meta data
            1 - keep only mode names
            2 - keep mode names and indices

        Returns
        -------
        tensor_tt : TensorTT
            Tensor train representation of the `tensor`

        Notes
        -----
        Reshaping of the data is performed with respect to the FORTRAN ordering. This makes it easy to compare results
        with the MATLAB implementation by Oseledets. This doesn't really matter (apart from time it takes to compute),
        as long as we do exactly the opposite for the reconstruction
        """
        # TODO: implement using C ordering for the reshape
        if not isinstance(tensor, Tensor):
            raise TypeError("Parameter `tensor` should be an object of `Tensor` class!")
        if not isinstance(rank, tuple):
            raise TypeError("Parameter `rank` should be passed as a tuple!")

        # since we consider core tensors to be only of order 3
        if (tensor.order - 1) != len(rank):
            raise ValueError("Incorrect number of values in `rank`:\n"
                             "{} != {} (tensor.order-1 != len(rank))".format(tensor.order, len(rank)))
        # since TT decomposition should compress data
        if any(rank[i] > tensor.shape[i] for i in range(len(rank))):
            raise ValueError("Some values in `rank` are greater then the corresponding mode sizes of a `tensor`:\n"
                             "{} > {} (rank > tensor.shape)".format(rank, tensor.shape[:-1]))
        if rank[-1] > tensor.shape[-1]:
            raise ValueError("The last value in `rank` is greater then the last mode size of a `tensor`:\n"
                             "{} > {} (rank[-1] > tensor.shape[-1])".format(rank[-1], tensor.shape[-1]))

        cores = []
        sizes = tensor.shape
        rank = (1,) + rank + (1,)
        C = tensor.data
        for k in range(tensor.order-1):
            rows = rank[k] * sizes[k]
            C = np.reshape(C, [rows, -1], order='F')
            U, S, V = _svd_tt(C, rank[k + 1])
            # Shouldn't slow down much since order of tensors is not big in general
            if k == 0:
                new_core = np.reshape(U, [sizes[k], rank[k+1]], order='F')
            else:
                new_core = np.reshape(U, [rank[k], sizes[k], rank[k+1]], order='F')
            cores.append(new_core)
            C = np.dot(V, np.diag(S)).T
        new_core = C
        cores.append(new_core)
        tensor_tt = TensorTT(core_values=cores)
        if self.verbose:
            residual = residual_tensor(tensor, tensor_tt)
            print('Relative error of approximation = {}'.format(abs(residual.frob_norm / tensor.frob_norm)))
        if keep_meta == 1:
            mode_names = {i: mode.name for i, mode in enumerate(tensor.modes)}
            tensor_tt.set_mode_names(mode_names=mode_names)
        elif keep_meta == 2:
            tensor_tt.copy_modes(tensor)
        else:
            pass
        return tensor_tt
Exemplo n.º 6
0
    def decompose(self, tensor, rank, keep_meta=0):
        """ Performs tucker decomposition via Higher Order Orthogonal Iteration (HOOI)

        Parameters
        ----------
        tensor : Tensor
            Multidimensional data to be decomposed
        rank : tuple
            Desired multilinear rank for the given `tensor`
        keep_meta : int
            Keep meta information about modes of the given `tensor`.
            0 - the output will have default values for the meta data
            1 - keep only mode names
            2 - keep mode names and indices

        Returns
        -------
        tensor_tkd : TensorTKD
            Tucker representation of the `tensor`
        """
        if not isinstance(tensor, Tensor):
            raise TypeError(
                "Parameter `tensor` should be an object of `Tensor` class!")
        if not isinstance(rank, tuple):
            raise TypeError("Parameter `rank` should be passed as a tuple!")
        if tensor.order != len(rank):
            raise ValueError(
                "Parameter `rank` should be tuple of the same length as the order of a tensor:\n"
                "{} != {} (tensor.order != len(rank))".format(
                    tensor.order, len(rank)))
        tensor_tkd = None
        fmat_hooi = self._init_fmat(tensor, rank)
        norm = tensor.frob_norm
        if not self.process:
            self.process = tuple(range(tensor.order))
        for n_iter in range(self.max_iter):

            # Update factor matrices
            for i in self.process:
                tensor_approx = tensor.copy()
                for mode, fmat in enumerate(fmat_hooi):
                    if mode == i:
                        continue
                    tensor_approx.mode_n_product(fmat.T, mode=mode)
                fmat_hooi[i], _, _ = svd(tensor_approx.unfold(i).data,
                                         rank=rank[i])

            # Update core
            core = tensor.copy()
            for mode, fmat in enumerate(fmat_hooi):
                core.mode_n_product(fmat.T, mode=mode)

            # Update cost
            tensor_tkd = TensorTKD(fmat=fmat_hooi, core_values=core.data)
            residual = residual_tensor(tensor, tensor_tkd)
            self.cost.append(abs(residual.frob_norm / norm))
            if self.verbose:
                print('Iter {}: relative error of approximation = {}'.format(
                    n_iter, self.cost[-1]))

            # Check termination conditions
            if self.cost[-1] <= self.epsilon:
                if self.verbose:
                    print(
                        'Relative error of approximation has reached the acceptable level: {}'
                        .format(self.cost[-1]))
                break
            if self.converged:
                if self.verbose:
                    print('Converged in {} iteration(s)'.format(len(
                        self.cost)))
                break
        if self.verbose and not self.converged and self.cost[-1] > self.epsilon:
            print('Maximum number of iterations ({}) has been reached. '
                  'Variation = {}'.format(self.max_iter,
                                          abs(self.cost[-2] - self.cost[-1])))

        if keep_meta == 1:
            mode_names = {i: mode.name for i, mode in enumerate(tensor.modes)}
            tensor_tkd.set_mode_names(mode_names=mode_names)
        elif keep_meta == 2:
            tensor_tkd.copy_modes(tensor)
        else:
            pass

        return tensor_tkd
Exemplo n.º 7
0
    def decompose(self, tensor, rank, keep_meta=0, kr_reverse=False):
        """ Performs CPD-ALS on the ``tensor`` with respect to the specified ``rank``

        Parameters
        ----------
        tensor : Tensor
            Multi-dimensional data to be decomposed
        rank : tuple
            Desired Kruskal rank for the given ``tensor``. Should contain only one value.
            If it is greater then any of dimensions then random initialisation is used
        keep_meta : int
            Keep meta information about modes of the given ``tensor``.
            0 - the output will have default values for the meta data
            1 - keep only mode names
            2 - keep mode names and indices
        kr_reverse : bool

        Returns
        -------
        tensor_cpd : TensorCPD
            CP representation of the ``tensor``

        Notes
        -----
        khatri-rao product should be of matrices in reversed order. But this will duplicate original data (e.g. images)
        Probably this has something to do with data ordering in Python and how it relates to kr product
        """
        if not isinstance(tensor, Tensor):
            raise TypeError(
                "Parameter `tensor` should be an object of `Tensor` class!")
        if not isinstance(rank, tuple):
            raise TypeError("Parameter `rank` should be passed as a tuple!")
        if len(rank) != 1:
            raise ValueError(
                "Parameter `rank` should be tuple with only one value!")

        self.cost = []  # Reset cost every time when method decompose is called
        tensor_cpd = None
        fmat = self._init_fmat(tensor, rank)
        core_values = np.repeat(np.array([1]), rank)
        norm = tensor.frob_norm
        lm = np.arange(tensor.order).tolist()
        for n_iter in range(self.max_iter):

            # Update factor matrices
            for mode in lm:
                kr_result, idxlist = sampled_khatri_rao(
                    fmat, sample_size=self.sample_size, skip_matrix=mode)
                lmodes = lm[:mode] + lm[mode + 1:]
                xs = np.array([
                    tensor.access(m, lmodes)
                    for m in np.array(idxlist).T.tolist()
                ])

                # Solve kr_result^-1 * xs
                pos_def = np.dot(kr_result.T, kr_result)
                corr_term = np.dot(kr_result.T, xs)
                min_result = np.linalg.solve(pos_def, corr_term)
                fmat[mode] = min_result.T

            # Update cost
            tensor_cpd = TensorCPD(fmat=fmat, core_values=core_values)
            residual = residual_tensor(tensor, tensor_cpd)
            self.cost.append(abs(residual.frob_norm / norm))
            if self.verbose:
                print('Iter {}: relative error of approximation = {}'.format(
                    n_iter, self.cost[-1]))

            # Check termination conditions
            if self.cost[-1] <= self.epsilon:
                if self.verbose:
                    print(
                        'Relative error of approximation has reached the acceptable level: {}'
                        .format(self.cost[-1]))
                break
            if self.converged:
                if self.verbose:
                    print('Converged in {} iteration(s)'.format(len(
                        self.cost)))
                break
        if self.verbose and not self.converged and self.cost[-1] > self.epsilon:
            print('Maximum number of iterations ({}) has been reached. '
                  'Variation = {}'.format(self.max_iter,
                                          abs(self.cost[-2] - self.cost[-1])))

        if keep_meta == 1:
            mode_names = {i: mode.name for i, mode in enumerate(tensor.modes)}
            tensor_cpd.set_mode_names(mode_names=mode_names)
        elif keep_meta == 2:
            tensor_cpd.copy_modes(tensor)
        else:
            pass
        return tensor_cpd
Exemplo n.º 8
0
    def decompose(self,
                  tensor,
                  rank,
                  keep_meta=0,
                  kr_reverse=False,
                  factor_mat=None):
        """ Performs CPD-ALS on the ``tensor`` with respect to the specified ``rank``

        Parameters
        ----------
        tensor : Tensor
            Multi-dimensional data to be decomposed
        rank : tuple
            Desired Kruskal rank for the given ``tensor``. Should contain only one value.
            If it is greater then any of dimensions then random initialisation is used
        keep_meta : int
            Keep meta information about modes of the given ``tensor``.
            0 - the output will have default values for the meta data
            1 - keep only mode names
            2 - keep mode names and indices
        kr_reverse : bool
        factor_mat : list(np.ndarray)
            Initial list of factor matrices.
            Specifying this option will ignore ``init``.

        Returns
        -------
        tensor_cpd : TensorCPD
            CP representation of the ``tensor``

        Notes
        -----
        khatri-rao product should be of matrices in reversed order. But this will duplicate original data (e.g. images)
        Probably this has something to do with data ordering in Python and how it relates to kr product
        """
        if not isinstance(tensor, Tensor):
            raise TypeError(
                "Parameter `tensor` should be an object of `Tensor` class!")
        if not isinstance(rank, tuple):
            raise TypeError("Parameter `rank` should be passed as a tuple!")
        if len(rank) != 1:
            raise ValueError(
                "Parameter `rank` should be tuple with only one value!")
        if factor_mat is None:
            fmat = self._init_fmat(tensor, rank)
        else:
            if not isinstance(factor_mat, list):
                raise TypeError(
                    "Parameter `factor_mat` should be a list object")
            if not all(isinstance(m, np.ndarray) for m in factor_mat):
                raise TypeError(
                    "Parameter `factor_mat` should be a list object of np.ndarray objects"
                )
            # Dimensionality checks
            if len(factor_mat) != tensor.order:
                raise ValueError(
                    "Parameter `factor_mat` should be of the same length as the tensor order"
                )
            if not all(m.shape == (mode, rank[0])
                       for m, mode in zip(factor_mat, tensor.shape)):
                raise ValueError(
                    "Parameter `factor_mat` should have the shape [mode_n x r]. Incorrect shapes!"
                )
            fmat = factor_mat.copy()

        self.cost = []  # Reset cost every time when method decompose is called
        tensor_cpd = None
        core_values = np.repeat(np.array([1]), rank)
        norm = tensor.frob_norm
        for n_iter in range(self.max_iter):

            # Update factor matrices
            for mode in range(tensor.order):
                kr_result = khatri_rao(fmat,
                                       skip_matrix=mode,
                                       reverse=kr_reverse)
                hadamard_result = hadamard([
                    np.dot(mat.T, mat) for i, mat in enumerate(fmat)
                    if i != mode
                ])
                # Do consecutive multiplication of np.ndarray
                update = functools.reduce(np.dot, [
                    tensor.unfold(mode, inplace=False).data, kr_result,
                    np.linalg.pinv(hadamard_result)
                ])
                fmat[mode] = update

            # Update cost
            tensor_cpd = TensorCPD(fmat=fmat, core_values=core_values)
            residual = residual_tensor(tensor, tensor_cpd)
            self.cost.append(abs(residual.frob_norm / norm))
            if self.verbose:
                print('Iter {}: relative error of approximation = {}'.format(
                    n_iter, self.cost[-1]))

            # Check termination conditions
            if self.cost[-1] <= self.epsilon:
                if self.verbose:
                    print(
                        'Relative error of approximation has reached the acceptable level: {}'
                        .format(self.cost[-1]))
                break
            if self.converged:
                if self.verbose:
                    print('Converged in {} iteration(s)'.format(len(
                        self.cost)))
                break
        if self.verbose and not self.converged and self.cost[-1] > self.epsilon:
            print('Maximum number of iterations ({}) has been reached. '
                  'Variation = {}'.format(self.max_iter,
                                          abs(self.cost[-2] - self.cost[-1])))

        if keep_meta == 1:
            mode_names = {i: mode.name for i, mode in enumerate(tensor.modes)}
            tensor_cpd.set_mode_names(mode_names=mode_names)
        elif keep_meta == 2:
            tensor_cpd.copy_modes(tensor)
        else:
            pass
        return tensor_cpd