Beispiel #1
0
def test_set_backend_local_threadsafe():
    pytest.importorskip('torch')

    global_default = tl.get_backend()

    with ThreadPoolExecutor(max_workers=1) as executor:

        with tl.backend_context('numpy', local_threadsafe=True):
            assert tl.get_backend() == 'numpy'
            # Changes only happen locally in this thread
            assert executor.submit(tl.get_backend).result() == global_default

        # Set the global default backend
        try:
            tl.set_backend('pytorch', local_threadsafe=False)

            # Changed toplevel default in all threads
            assert executor.submit(tl.get_backend).result() == 'pytorch'

            with tl.backend_context('numpy', local_threadsafe=True):
                assert tl.get_backend() == 'numpy'

                def check():
                    assert tl.get_backend() == 'pytorch'
                    with tl.backend_context('numpy', local_threadsafe=True):
                        assert tl.get_backend() == 'numpy'
                    assert tl.get_backend() == 'pytorch'

                executor.submit(check).result()
        finally:
            tl.set_backend(global_default, local_threadsafe=False)
            executor.submit(tl.set_backend, global_default).result()

        assert tl.get_backend() == global_default
        assert executor.submit(tl.get_backend).result() == global_default
Beispiel #2
0
def test_svd():
    """Test for the SVD functions"""
    tol = 0.1
    tol_orthogonality = 0.01

    for name, svd_fun in T.SVD_FUNS.items():
        sizes = [(100, 100), (100, 5), (10, 10), (10, 4), (5, 100)]
        n_eigenvecs = [90, 4, 5, 4, 5]

        for s, n in zip(sizes, n_eigenvecs):
            matrix = np.random.random(s)
            matrix_backend = T.tensor(matrix)
            fU, fS, fV = svd_fun(matrix_backend, n_eigenvecs=n)
            U, S, V = svd(matrix)
            U, S, V = U[:, :n], S[:n], V[:n, :]

            assert_array_almost_equal(
                np.abs(S),
                T.abs(fS),
                decimal=3,
                err_msg=
                'eigenvals not correct for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'
                .format(name, tl.get_backend(), n, s))

            # True reconstruction error (based on numpy SVD)
            true_rec_error = np.sum((matrix - np.dot(U,
                                                     S.reshape(
                                                         (-1, 1)) * V))**2)
            # Reconstruction error with the backend's SVD
            rec_error = T.sum(
                (matrix_backend - T.dot(fU,
                                        T.reshape(fS, (-1, 1)) * fV))**2)
            # Check that the two are similar
            assert_(
                true_rec_error - rec_error <= tol,
                msg=
                'Reconstruction not correct for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'
                .format(name, tl.get_backend(), n, s))

            # Check for orthogonality when relevant
            if name != 'symeig_svd':
                left_orthogonality_error = T.norm(
                    T.dot(T.transpose(fU), fU) - T.eye(n))
                assert_(
                    left_orthogonality_error <= tol_orthogonality,
                    msg=
                    'Left eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'
                    .format(name, tl.get_backend(), n, s))
                right_orthogonality_error = T.norm(
                    T.dot(T.transpose(fU), fU) - T.eye(n))
                assert_(
                    right_orthogonality_error <= tol_orthogonality,
                    msg=
                    'Right eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'
                    .format(name, tl.get_backend(), n, s))

        # Should fail on non-matrices
        with assert_raises(ValueError):
            tensor = T.tensor(np.random.random((3, 3, 3)))
            svd_fun(tensor)
def initialize_factors(tensor, rank, init='svd', svd='numpy_svd', random_state=None, non_negative=False):
    r"""Initialize factors used in `parafac`.

    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.

    Parameters
    ----------
    tensor : ndarray
    rank : int
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned

    Returns
    -------
    factors : ndarray list
        List of initialized factors of the CP decomposition where element `i`
        is of shape (tensor.shape[i], rank)

    """
    rng = check_random_state(random_state)

    if init == 'random':
        factors = [tl.tensor(rng.random_sample((tensor.shape[i], rank)), **tl.context(tensor)) for i in range(tl.ndim(tensor))]
        if non_negative:
            return [tl.abs(f) for f in factors]
        else:
            return factors

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                    svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, _, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                # factor = tl.tensor(np.zeros((U.shape[0], rank)), **tl.context(tensor))
                # factor[:, tensor.shape[mode]:] = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                # factor[:, :tensor.shape[mode]] = U
                random_part = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)
            if non_negative:
                factors.append(tl.abs(U[:, :rank]))
            else:
                factors.append(U[:, :rank])
        return factors

    raise ValueError('Initialization method "{}" not recognized'.format(init))
Beispiel #4
0
def _get_svd(svd):
    if svd in tl.SVD_FUNS:
        return tl.SVD_FUNS[svd]
    else:
        message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
            svd, tl.get_backend(), tl.SVD_FUNS)
        raise ValueError(message)
Beispiel #5
0
def initialize_tucker(tensor, rank, modes, random_state, init='svd', svd='numpy_svd', non_negative= False):
    """
    Initialize core and factors used in `tucker`.
    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.
    
    Parameters
    ----------
    tensor : ndarray
    rank : int
           number of components
    modes : int list
    random_state : {None, int, np.random.RandomState}
    init : {'svd', 'random', cptensor}, optional
    svd : str, default is 'numpy_svd'
          function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned
    
    Returns
    -------
    core    : ndarray
              initialized core tensor 
    factors : list of factors
    """
    try:
        svd_fun = tl.SVD_FUNS[svd]
    except KeyError:
        message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
        raise ValueError(message)
    # Initialisation
    if init == 'svd':
        factors = []
        for index, mode in enumerate(modes):
            U, S, V = svd_fun(unfold(tensor, mode), n_eigenvecs=rank[index], random_state=random_state)   
            
            if non_negative is True: 
                U = make_svd_non_negative(tensor, U, S, V, nntype="nndsvd")
            
            factors.append(U[:, :rank[index]])        
        # The initial core approximation is needed here for the masking step
        core = multi_mode_dot(tensor, factors, modes=modes, transpose=True)        
        if non_negative is True:
            core = tl.abs(core) 
            
    elif init == 'random':
        rng = tl.check_random_state(random_state)
        core = tl.tensor(rng.random_sample(rank) + 0.01, **tl.context(tensor))  # Check this
        factors = [tl.tensor(rng.random_sample(s), **tl.context(tensor)) for s in zip(tl.shape(tensor), rank)]
        if non_negative is True:
            factors = [tl.abs(f) for f in factors]
            core = tl.abs(core) 
    else:
        (core, factors) = init
 
    return core, factors
Beispiel #6
0
def initialize_decomposition(tensor_slices,
                             rank,
                             init='random',
                             svd='numpy_svd',
                             random_state=None):
    r"""Initiate a random PARAFAC2 decomposition given rank and tensor slices

    Parameters
    ----------
    tensor_slices : Iterable of ndarray
    rank : int
    init : {'random', 'svd', CPTensor, Parafac2Tensor}, optional
    random_state : `np.random.RandomState`

    Returns
    -------
    parafac2_tensor : Parafac2Tensor
        List of initialized factors of the CP decomposition where element `i`
        is of shape (tensor.shape[i], rank)

    """
    context = tl.context(tensor_slices[0])
    shapes = [m.shape for m in tensor_slices]

    if init == 'random':
        return random_parafac2(shapes,
                               rank,
                               full=False,
                               random_state=random_state,
                               **context)
    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        padded_tensor = _pad_by_zeros(tensor_slices)
        A = svd_fun(unfold(padded_tensor, 0), n_eigenvecs=rank)[0]
        C = svd_fun(unfold(padded_tensor, 2), n_eigenvecs=rank)[0]
        B = T.eye(rank, **context)
        projections = _compute_projections(tensor_slices, (A, B, C), svd_fun)
        return Parafac2Tensor((None, (A, B, C), projections))

    elif isinstance(init, (tuple, list, Parafac2Tensor, CPTensor)):
        try:
            decomposition = Parafac2Tensor.from_CPTensor(
                init, parafac2_tensor_ok=True)
        except ValueError:
            raise ValueError(
                'If initialization method is a mapping, then it must '
                'be possible to convert it to a Parafac2Tensor instance')
        if decomposition.rank != rank:
            raise ValueError(
                'Cannot init with a decomposition of different rank')
        return decomposition
    raise ValueError('Initialization method "{}" not recognized'.format(init))
def test_cpd_als_tensorly(benchmark):
    for datatype in BACKEND_TYPES:
        tl.set_backend(datatype)
        assert tl.get_backend() == datatype

        _, input_tensor_val = init_rand_cp(dim, size, rank)
        input_tensor = tl.tensor(input_tensor_val, dtype='float64')
        factors = benchmark(parafac,
                            input_tensor,
                            rank=rank,
                            init='random',
                            tol=0,
                            n_iter_max=1,
                            verbose=0)
Beispiel #8
0
def test_partial_tucker():
    """Test for the Partial Tucker decomposition"""
    rng = check_random_state(1234)
    tol_norm_2 = 10e-3
    tol_max_abs = 10e-1
    tensor = tl.tensor(rng.random_sample((3, 4, 3)))
    modes = [1, 2]
    for svd_func in tl.SVD_FUNS:
        if tl.get_backend() == 'tensorflow_graph' and svd_func == 'numpy_svd':
            continue  # TODO(craymichael)

        core, factors = partial_tucker(tensor,
                                       modes,
                                       rank=None,
                                       n_iter_max=200,
                                       svd=svd_func,
                                       verbose=True)
        reconstructed_tensor = multi_mode_dot(core, factors, modes=modes)
        norm_rec = tl.to_numpy(tl.norm(reconstructed_tensor, 2))
        norm_tensor = tl.to_numpy(tl.norm(tensor, 2))
        assert_((norm_rec - norm_tensor) / norm_rec < tol_norm_2)

        # Test the max abs difference between the reconstruction and the tensor
        assert_(
            tl.to_numpy(tl.max(tl.abs(norm_rec - norm_tensor))) < tol_max_abs)

        # Test the shape of the core and factors
        ranks = [3, 1]
        core, factors = partial_tucker(tensor,
                                       modes=modes,
                                       rank=ranks,
                                       n_iter_max=100,
                                       svd=svd_func,
                                       verbose=1)
        for i, rank in enumerate(ranks):
            assert_equal(
                factors[i].shape, (tensor.shape[i + 1], ranks[i]),
                err_msg="factors[{}].shape={}, expected {} (svd=\"{}\")".
                format(i, factors[i].shape, (tensor.shape[i + 1], ranks[i]),
                       svd_func))
        assert_equal(core.shape, [tensor.shape[0]] + ranks,
                     err_msg="Core.shape={}, "
                     "expected {} (svd=\"{}\")".format(
                         core.shape, [tensor.shape[0]] + ranks, svd_func))
Beispiel #9
0
def test_set_backend():
    torch = pytest.importorskip('torch')

    toplevel_backend = tl.get_backend()

    # Set in context manager
    with tl.backend_context('numpy'):
        assert tl.get_backend() == 'numpy'
        assert isinstance(tl.tensor([1, 2, 3]), np.ndarray)
        assert isinstance(T.tensor([1, 2, 3]), np.ndarray)
        assert tl.float32 is T.float32 is np.float32

        with tl.backend_context('pytorch'):
            assert tl.get_backend() == 'pytorch'
            assert torch.is_tensor(tl.tensor([1, 2, 3]))
            assert torch.is_tensor(T.tensor([1, 2, 3]))
            assert tl.float32 is T.float32 is torch.float32

        # Sets back to numpy
        assert tl.get_backend() == 'numpy'
        assert isinstance(tl.tensor([1, 2, 3]), np.ndarray)
        assert isinstance(T.tensor([1, 2, 3]), np.ndarray)
        assert tl.float32 is T.float32 is np.float32

    # Reset back to initial backend
    assert tl.get_backend() == toplevel_backend

    # Set not in context manager
    tl.set_backend('pytorch')
    assert tl.get_backend() == 'pytorch'
    tl.set_backend(toplevel_backend)

    assert tl.get_backend() == toplevel_backend

    # Improper name doesn't reset backend
    with assert_raises(ValueError):
        tl.set_backend('not-a-real-backend')
    assert tl.get_backend() == toplevel_backend
Beispiel #10
0
    def __init__(self, tensor, order_names, order_labels=None, mask=None, device=None):
        # Init BaseTensor
        BaseTensor.__init__(self)

        if device is None:
            self.tensor = tl.tensor(tensor)
            self.mask = mask
        else:
            if tl.get_backend() == 'pytorch':
                self.tensor = tl.tensor(tensor, device=device)
                if mask is not None:
                    self.mask = tl.tensor(mask, device=device)
                else:
                    self.mask = mask
            else:
                self.tensor = tl.tensor(tensor)
                self.mask = mask
        self.order_names = order_names
        if order_labels is None:
            self.order_labels = ['Dimension-{}'.format(i+1) for i in range(self.tensor.shape)]
        else:
            self.order_labels = order_labels
        assert len(self.tensor.shape) == len(self.order_labels), "The length of order_labels must match the number of orders/dimensions in the tensor"
Beispiel #11
0
import pytest
import tensorly as tl
from tensorly import random
from ..tt_matrix import tt_matrix_to_matrix, tt_matrix_to_tensor, tt_matrix_to_vec

skip_mxnet= pytest.mark.skipif(tl.get_backend() == "mxnet", 
                 reason="MXNet currently does not support transpose for tensors of order > 6.")

 # TODO: Remove once MXNet supports transpose for > 6th order tensors
@skip_mxnet
def test_tt_matrix_manipulation():
    """Test for tt_matrix manipulation"""
    shape = (2, 2, 2, 3, 3, 3)
    n_rows, n_cols = 8, 27
    tt_matrix = random.random_tt_matrix(shape, rank=2, full=False)
    rec = tt_matrix_to_tensor(tt_matrix)
    assert(tl.shape(rec) == shape)

    mat = tt_matrix_to_matrix(tt_matrix)
    assert(tl.shape(mat) == (n_rows, n_cols))

    vec = tt_matrix_to_vec(tt_matrix)
    assert(tl.shape(vec) == (n_rows*n_cols,))
Beispiel #12
0
def test_svd():
    """Test for the SVD functions"""
    tol = 0.1
    tol_orthogonality = 0.01

    for name, svd_fun in T.SVD_FUNS.items():
        sizes = [(100, 100), (100, 5), (10, 10), (10, 4), (5, 100)]
        n_eigenvecs = [90, 4, 5, 4, 5]

        for s, n in zip(sizes, n_eigenvecs):
            matrix = np.random.random(s)
            matrix_backend = T.tensor(matrix)
            fU, fS, fV = svd_fun(matrix_backend, n_eigenvecs=n)
            U, S, V = svd(matrix)
            U, S, V = U[:, :n], S[:n], V[:n, :]

            assert_array_almost_equal(
                np.abs(S),
                T.abs(fS),
                decimal=3,
                err_msg=
                'eigenvals not correct for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'
                .format(name, tl.get_backend(), n, s))

            # True reconstruction error (based on numpy SVD)
            true_rec_error = np.sum((matrix - np.dot(U,
                                                     S.reshape(
                                                         (-1, 1)) * V))**2)
            # Reconstruction error with the backend's SVD
            rec_error = T.sum(
                (matrix_backend - T.dot(fU,
                                        T.reshape(fS, (-1, 1)) * fV))**2)
            # Check that the two are similar
            assert_(
                true_rec_error - rec_error <= tol,
                msg=
                'Reconstruction not correct for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'
                .format(name, tl.get_backend(), n, s))

            # Check for orthogonality when relevant
            left_orthogonality_error = T.norm(
                T.dot(T.transpose(fU), fU) - T.eye(n))
            assert_(
                left_orthogonality_error <= tol_orthogonality,
                msg=
                'Left eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'
                .format(name, tl.get_backend(), n, s))
            right_orthogonality_error = T.norm(
                T.dot(fV, T.transpose(fV)) - T.eye(n))
            assert_(
                right_orthogonality_error <= tol_orthogonality,
                msg=
                'Right eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'
                .format(name, tl.get_backend(), n, s))

        # Should fail on non-matrices
        with assert_raises(ValueError):
            tensor = T.tensor(np.random.random((3, 3, 3)))
            svd_fun(tensor)

        # Test for singular matrices (some eigenvals will be zero)
        # Rank at most 5
        matrix = tl.dot(tl.randn((20, 5), seed=12), tl.randn((5, 20), seed=23))
        U, S, V = tl.partial_svd(matrix, n_eigenvecs=6, random_state=0)
        true_rec_error = tl.sum((matrix - tl.dot(U,
                                                 tl.reshape(S,
                                                            (-1, 1)) * V))**2)
        assert_(true_rec_error <= tol)
        assert_(np.isfinite(T.to_numpy(U)).all(),
                msg="Left singular vectors are not finite")
        assert_(np.isfinite(T.to_numpy(V)).all(),
                msg="Right singular vectors are not finite")

        # Test orthonormality when  max_dim > n_eigenvecs > matrix_rank
        matrix = tl.dot(tl.randn((4, 2), seed=1), tl.randn((2, 4), seed=12))
        U, S, V = tl.partial_svd(matrix, n_eigenvecs=3, random_state=0)
        left_orthogonality_error = T.norm(T.dot(T.transpose(U), U) - T.eye(3))
        assert_(left_orthogonality_error <= tol_orthogonality)
        right_orthogonality_error = T.norm(T.dot(V, T.transpose(V)) - T.eye(3))
        assert_(right_orthogonality_error <= tol_orthogonality)

        # Test if partial_svd returns the same result for the same setting
        matrix = T.tensor(np.random.random((20, 5)))
        random_state = np.random.RandomState(0)
        U1, S1, V1 = tl.partial_svd(matrix,
                                    n_eigenvecs=2,
                                    random_state=random_state)
        U2, S2, V2 = tl.partial_svd(matrix, n_eigenvecs=2, random_state=0)
        assert_array_equal(U1, U2)
        assert_array_equal(S1, S2)
        assert_array_equal(V1, V2)
Beispiel #13
0
    res_svd = non_negative_parafac(tensor, rank=3, n_iter_max=100,
                                       tol=10e-4, init='svd')
    res_random = non_negative_parafac(tensor, rank=3, n_iter_max=100, tol=10e-4,
                                          init='random', random_state=1234, verbose=0)
    rec_svd = kruskal_to_tensor(res_svd)
    rec_random = kruskal_to_tensor(res_random)
    error = T.norm(rec_svd - rec_random, 2)
    error /= T.norm(rec_svd, 2)
    assert_(error < tol_norm_2,
            'norm 2 of difference between svd and random init too high')
    assert_(T.max(T.abs(rec_svd - rec_random)) < tol_max_abs,
            'abs norm of difference between svd and random init too high')


@pytest.mark.xfail(tl.get_backend() == 'tensorflow', reason='Fails on tensorflow')
def test_sample_khatri_rao():
    """ Test for sample_khatri_rao
    """

    rng = check_random_state(1234)
    t_shape = (8, 9, 10)
    rank = 3
    tensor = T.tensor(rng.random_sample(t_shape)+1)
    weights, factors = parafac(tensor, rank=rank, n_iter_max=120)
    num_samples = 4
    skip_matrix = 1
    sampled_kr, sampled_indices, sampled_rows = sample_khatri_rao(factors, num_samples, skip_matrix=skip_matrix,
                                                                  return_sampled_rows=True)
    assert_(T.shape(sampled_kr) == (num_samples, rank),
              'Sampled shape of khatri-rao product is inconsistent')
Beispiel #14
0
def partial_tucker(tensor,
                   modes,
                   rank=None,
                   n_iter_max=100,
                   init='svd',
                   tol=10e-5,
                   svd='numpy_svd',
                   random_state=None,
                   verbose=False,
                   mask=None):
    """Partial tucker decomposition via Higher Order Orthogonal Iteration (HOI)

        Decomposes `tensor` into a Tucker decomposition exclusively along the provided modes.

    Parameters
    ----------
    tensor : ndarray
    modes : int list
            list of the modes on which to perform the decomposition
    rank : None, int or int list
        size of the core tensor, ``(len(ranks) == tensor.ndim)``
        if int, the same rank is used for all modes
    n_iter_max : int
                 maximum number of iteration
    init : {'svd', 'random'}, or TuckerTensor optional
        if a TuckerTensor is provided, this is used for initialization
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD,
        acceptable values in tensorly.SVD_FUNS
    tol : float, optional
          tolerance: the algorithm stops when the variation in
          the reconstruction error is less than the tolerance
    random_state : {None, int, np.random.RandomState}
    verbose : int, optional
        level of verbosity
    mask : ndarray
        array of booleans with the same shape as ``tensor`` should be 0 where
        the values are missing and 1 everywhere else. Note:  if tensor is
        sparse, then mask should also be sparse with a fill value of 1 (or
        True).

    Returns
    -------
    core : ndarray
            core tensor of the Tucker decomposition
    factors : ndarray list
            list of factors of the Tucker decomposition.
            with ``core.shape[i] == (tensor.shape[i], ranks[i]) for i in modes``

    """
    if rank is None:
        message = "No value given for 'rank'. The decomposition will preserve the original size."
        warnings.warn(message, Warning)
        rank = [tl.shape(tensor)[mode] for mode in modes]
    elif isinstance(rank, int):
        message = "Given only one int for 'rank' instead of a list of {} modes. Using this rank for all modes.".format(
            len(modes))
        warnings.warn(message, Warning)
        rank = tuple(rank for _ in modes)
    else:
        rank = tuple(rank)

    if mask is not None and init == "svd":
        message = "Masking occurs after initialization. Therefore, random initialization is recommended."
        warnings.warn(message, Warning)

    try:
        svd_fun = tl.SVD_FUNS[svd]
    except KeyError:
        message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
            svd, tl.get_backend(), tl.SVD_FUNS)
        raise ValueError(message)

    # SVD init
    if init == 'svd':
        factors = []
        for index, mode in enumerate(modes):
            eigenvecs, _, _ = svd_fun(unfold(tensor, mode),
                                      n_eigenvecs=rank[index],
                                      random_state=random_state)
            factors.append(eigenvecs)

        # The initial core approximation is needed here for the masking step
        core = multi_mode_dot(tensor, factors, modes=modes, transpose=True)
    elif init == 'random':
        rng = tl.check_random_state(random_state)
        # len(rank) == len(modes) but we still want a core dimension for the modes not optimized
        core_shape = list(tl.shape(tensor))
        for (i, e) in enumerate(modes):
            core_shape[e] = rank[i]
        core = tl.tensor(rng.random_sample(core_shape), **tl.context(tensor))
        factors = [
            tl.tensor(rng.random_sample((tl.shape(tensor)[mode], rank[index])),
                      **tl.context(tensor))
            for (index, mode) in enumerate(modes)
        ]
    else:
        (core, factors) = init

    rec_errors = []
    norm_tensor = tl.norm(tensor, 2)

    for iteration in range(n_iter_max):
        if mask is not None:
            tensor = tensor * mask + multi_mode_dot(
                core, factors, modes=modes, transpose=False) * (1 - mask)

        for index, mode in enumerate(modes):
            core_approximation = multi_mode_dot(tensor,
                                                factors,
                                                modes=modes,
                                                skip=index,
                                                transpose=True)
            eigenvecs, _, _ = svd_fun(unfold(core_approximation, mode),
                                      n_eigenvecs=rank[index],
                                      random_state=random_state)
            factors[index] = eigenvecs

        core = multi_mode_dot(tensor, factors, modes=modes, transpose=True)

        # The factors are orthonormal and therefore do not affect the reconstructed tensor's norm
        rec_error = sqrt(
            abs(norm_tensor**2 - tl.norm(core, 2)**2)) / norm_tensor
        rec_errors.append(rec_error)

        if iteration > 1:
            if verbose:
                print('reconstruction error={}, variation={}.'.format(
                    rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

            if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
                if verbose:
                    print('converged in {} iterations.'.format(iteration))
                break

    return (core, factors)
Beispiel #15
0
    def __initialize_factors(self,
                             tensor,
                             svd='numpy_svd',
                             non_negative=False,
                             custom=None):
        """Initialize random or SVD-guided factors for TCA depending on TCA type

		Parameters
		----------
		tensor : torch.Tensor
			The tensor of activity of N neurons, T timepoints and K trials of shape N, T, K
		svd : str, optional
			Type of SVD algorithm to use (default is numpy_svd)
		non_negative : bool, optional
			A flag used to specify if factors generated must be strictyl positive (default is False)
		custom : int, optional 
			A flag used to specify which factor should be strictly positive for 'custom parafac' (default is None)

		Raises
		------
		ValueError
			If svd does not contain a valid SVD algorithm reference
			If self.init variable does not contain a valid intialization method

		Returns
		-------
		list
			List of initialized tensors
		"""
        rng = tensorly.random.check_random_state(self.random_state)
        if self.init == 'random':
            if custom:
                factors = [
                    tl.tensor(
                        rng.random_sample(
                            (tensor.shape[i], self.rank)) * 2 - 1,
                        **tl.context(tensor)) for i in range(self.dimension)
                ]
                factors = [
                    f if int(i) == int(custom) else tl.abs(f)
                    for i, f in enumerate(factors)
                ]

            elif non_negative:
                factors = [
                    tl.tensor(rng.random_sample((tensor.shape[i], self.rank)),
                              **tl.context(tensor))
                    for i in range(self.dimension)
                ]
                factors = [
                    tl.abs(f) for f in factors
                ]  # See if this line is useful depending on random function used

            else:
                factors = [
                    tl.tensor(
                        rng.random_sample(
                            (tensor.shape[i], self.rank)) * 2 - 1,
                        **tl.context(tensor)) for i in range(self.dimension)
                ]

            return factors

        elif self.init == 'svd':
            try:
                svd_fun = tl.SVD_FUNS[svd]
            except KeyError:
                message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                    svd, tl.get_backend(), tl.SVD_FUNS)
                raise ValueError(message)

            factors = []
            for mode in range(tl.ndim(tensor)):
                U, *_ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

                if tensor.shape[mode] < rank:
                    random_part = tl.tensor(
                        rng.random_sample(
                            (U.shape[0], rank - tl.shape(tensor)[mode])),
                        **tl.context(tensor))

                    U = tl.concatenate([U, random_part], axis=1)

                if non_negative or custom == mode:
                    factors.append(tl.abs(U[:, :rank]))
                else:
                    factors.append(U[:, :rank])

            return factors
        else:
            raise ValueError(
                'Initialization method "{}" not recognized'.format(self.init))
Beispiel #16
0
import tensorly as tl

import pytest
import numpy as np
import itertools
import numpy.random as npr

from ..mps_decomposition_cross import matrix_product_state_cross
from ....mps_tensor import mps_to_tensor
from ....random import check_random_state
from tensorly.testing import assert_

skip_if_tensorflow = pytest.mark.skipif(
    tl.get_backend() == "tensorflow",
    reason="Operation not supported in TensorFlow")


@skip_if_tensorflow
def test_matrix_product_state_cross_1():
    """ Test for matrix_product_state """
    rng = check_random_state(1234)

    ## Test 1

    # Create tensor with random elements
    d = 3
    n = 4
    tensor = (np.arange(n**d).reshape((n, ) * d))
    tensor = tl.tensor(tensor)

    tensor_shape = tensor.shape
Beispiel #17
0
def initialize_constrained_parafac(tensor, rank, init='svd', svd='numpy_svd',
                                   random_state=None, non_negative=None, l1_reg=None,
                                   l2_reg=None, l2_square_reg=None, unimodality=None, normalize=None,
                                   simplex=None, normalized_sparsity=None,
                                   soft_sparsity=None, smoothness=None, monotonicity=None,
                                   hard_sparsity=None):
    r"""Initialize factors used in `constrained_parafac`.

    Parameters
    ----------

    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices with uniform distribution using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor. If init is a previously initialized `cp tensor`, all
    the weights are pulled in the last factor and then the weights are set to "1" for the output tensor.
    Lastly, factors are updated with proximal operator according to the selected constraint(s), so that they satisfy the
    imposed constraints (does not apply to cptensor initialization).

    Parameters
    ----------
    tensor : ndarray
    rank : int
    random_state : {None, int, np.random.RandomState}
    init : {'svd', 'random', cptensor}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool or dictionary
        This constraint is clipping negative values to '0'. If it is True non-negative constraint is applied to all modes.
    l1_reg : float or list or dictionary, optional
    l2_reg : float or list or dictionary, optional
    l2_square_reg : float or list or dictionary, optional
    unimodality : bool or dictionary, optional
        If it is True unimodality constraint is applied to all modes.
    normalize : bool or dictionary, optional
        This constraint divides all the values by maximum value of the input array. If it is True normalize constraint
        is applied to all modes.
    simplex : float or list or dictionary, optional
    normalized_sparsity : float or list or dictionary, optional
    soft_sparsity : float or list or dictionary, optional
    smoothness : float or list or dictionary, optional
    monotonicity : bool or dictionary, optional
    hard_sparsity : float or list or dictionary, optional
    Returns
    -------
    factors : CPTensor
        An initial cp tensor.
    """
    n_modes = tl.ndim(tensor)
    rng = tl.check_random_state(random_state)

    if init == 'random':
        weights, factors = random_cp(tl.shape(tensor), rank, normalise_factors=False, **tl.context(tensor))

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, S, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            # Put SVD initialization on the same scaling as the tensor in case normalize_factors=False
            if mode == 0:
                idx = min(rank, tl.shape(S)[0])
                U = tl.index_update(U, tl.index[:, :idx], U[:, :idx] * S[:idx])

            if tensor.shape[mode] < rank:
                random_part = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])),
                                        **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)

            factors.append(U[:, :rank])

    elif isinstance(init, (tuple, list, CPTensor)):
        try:
            weights, factors = CPTensor(init)

            if tl.all(weights == 1):
                weights, factors = CPTensor((None, factors))
            else:
                weights_avg = tl.prod(weights) ** (1.0 / tl.shape(weights)[0])
                for i in range(len(factors)):
                    factors[i] = factors[i] * weights_avg
            kt = CPTensor((None, factors))
            return kt
        except ValueError:
            raise ValueError(
                'If initialization method is a mapping, then it must '
                'be possible to convert it to a CPTensor instance'
            )
    else:
        raise ValueError('Initialization method "{}" not recognized'.format(init))

    for i in range(n_modes):
        factors[i] = proximal_operator(factors[i], non_negative=non_negative, l1_reg=l1_reg,
                                       l2_reg=l2_reg, l2_square_reg=l2_square_reg, unimodality=unimodality,
                                       normalize=normalize, simplex=simplex, normalized_sparsity=normalized_sparsity,
                                       soft_sparsity=soft_sparsity, smoothness=smoothness,
                                       monotonicity=monotonicity, hard_sparsity=hard_sparsity, n_const=n_modes, order=i)
    kt = CPTensor((None, factors))
    return kt
Beispiel #18
0
def partial_tucker(tensor,
                   modes,
                   rank=None,
                   n_iter_max=100,
                   init='svd',
                   tol=10e-5,
                   svd='numpy_svd',
                   random_state=None,
                   verbose=False,
                   ranks=None):
    """Partial tucker decomposition via Higher Order Orthogonal Iteration (HOI)

        Decomposes `tensor` into a Tucker decomposition exclusively along the provided modes.

    Parameters
    ----------
    tensor : ndarray
    modes : int list
            list of the modes on which to perform the decomposition
    rank : None or int list
            size of the core tensor, ``(len(ranks) == len(modes))``
    n_iter_max : int
                 maximum number of iteration
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD,
        acceptable values in tensorly.SVD_FUNS
    tol : float, optional
          tolerance: the algorithm stops when the variation in
          the reconstruction error is less than the tolerance
    random_state : {None, int, np.random.RandomState}
    verbose : int, optional
        level of verbosity

    Returns
    -------
    core : ndarray 
            core tensor of the Tucker decomposition
    factors : ndarray list
            list of factors of the Tucker decomposition.
            with ``core.shape[i] == (tensor.shape[i], ranks[i]) for i in modes``

    """
    if ranks is not None:
        if rank is not None:
            raise ValueError(
                "Cannot specify both 'rank' and deprecated 'ranks' args")
        message = "'ranks' is deprecated, please use 'rank' instead"
        warnings.warn(message, DeprecationWarning)
        rank = ranks

    if rank is None:
        message = "No value given for 'rank'. The decomposition will preserve the original size."
        warnings.warn(message, Warning)
        rank = [tl.shape(tensor)[mode] for mode in modes]
    elif isinstance(rank, int):
        message = "Given only one int for 'rank' instead of a list of {} modes. " \
                  "Using this rank for all modes.".format(len(modes))
        warnings.warn(message, Warning)
        rank = [rank for _ in modes]

    try:
        svd_fun = tl.SVD_FUNS[svd]
    except KeyError:
        message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
            svd, tl.get_backend(), tl.SVD_FUNS)
        raise ValueError(message)

    # SVD init
    if init == 'svd':
        factors = []
        for index, mode in enumerate(modes):
            eigenvecs, _, _ = svd_fun(unfold(tensor, mode),
                                      n_eigenvecs=rank[index])
            factors.append(eigenvecs)
    else:
        rng = check_random_state(random_state)
        core = tl.tensor(rng.random_sample(rank), **tl.context(tensor))
        factors = [
            tl.tensor(rng.random_sample((tl.shape(tensor)[mode], rank[index])),
                      **tl.context(tensor))
            for (index, mode) in enumerate(modes)
        ]

    norm_tensor = tl.norm(tensor, 2)

    if tl.get_backend() == 'tensorflow_graph':
        import tensorflow as tf

        def cond(dparams, core, *factors):  # The condition to exit while loop
            condition = tf.less(dparams.index, n_iter_max)  # Max iters reached

            if verbose:
                print_op = tf.cond(
                    tf.greater(dparams.index, 1),
                    true_fn=lambda: tf.print('reconstruction error=',
                                             dparams.error,
                                             ', variation=',
                                             dparams.variation,
                                             output_stream=sys.stdout,
                                             sep=''),
                    false_fn=lambda: tf.constant(True))
                with tf.control_dependencies([print_op]):
                    dparams = dparams._replace(
                        variation=tf.identity(dparams.variation))
            if tol:

                def tol_check():
                    above_tol = tf.greater(tl.abs(dparams.variation), tol)
                    # Print convergence iterations
                    if verbose:
                        print_op = tf.cond(above_tol,
                                           true_fn=lambda: tf.constant(True),
                                           false_fn=lambda: tf.print(
                                               'converged in',
                                               dparams.index,
                                               'iterations.',
                                               output_stream=sys.stdout))
                        with tf.control_dependencies([print_op]):
                            above_tol = tf.identity(above_tol)
                    return tf.logical_and(condition, above_tol)

                condition = tf.cond(tf.greater(dparams.index, 1),
                                    true_fn=tol_check,
                                    false_fn=lambda: condition)
            return condition

        def body(dparams, core, *factors):  # While loop body
            factors = list(factors)
            core = _common_snippet_1(tensor, modes, factors, rank, svd_fun)
            rec_error_curr = _common_snippet_2(core, norm_tensor)

            dparams = dparams._replace(
                variation=dparams.error - rec_error_curr,
                error=rec_error_curr,
                index=dparams.index + 1,
            )
            return [dparams, core, *factors]

        dtype = tl.get_dtype(tensor)

        dummy_core = tl.reshape(tl.tensor([], dtype=dtype),
                                (0, ) * tl.ndim(tensor))

        dparams = DecompParams(
            index=tl.tensor(0),
            error=tl.tensor(float('+inf'), dtype=dtype),
            variation=tl.tensor(float('+inf'), dtype=dtype),
        )

        _, core, *factors = tf.while_loop(
            cond,
            body,
            back_prop=False,
            parallel_iterations=1,
            loop_vars=(dparams, dummy_core, *factors),
            shape_invariants=(DecompParams(*(tf.TensorShape(tuple()), ) *
                                           len(dparams)),
                              tf.TensorShape((None, ) * tl.ndim(tensor)),
                              *map(tf.TensorShape, map(tl.shape, factors))))

        # Update shape information of core
        core_shape = list(tl.shape(tensor))
        for mode, rank_i in zip(modes, rank):
            core_shape[mode] = rank_i
        core.set_shape(core_shape)

    else:
        rec_error_curr = float('+inf')

        for iteration in range(n_iter_max):
            core = _common_snippet_1(tensor, modes, factors, rank, svd_fun)
            rec_error_prev = rec_error_curr
            rec_error_curr = _common_snippet_2(core, norm_tensor)

            if iteration > 1:
                if verbose:
                    print('reconstruction error={}, variation={}.'.format(
                        rec_error_curr, rec_error_prev - rec_error_curr))

                if tol and abs(rec_error_prev - rec_error_curr) < tol:
                    if verbose:
                        print('converged in {} iterations.'.format(iteration))
                    break

    return core, factors
import tensorly as tl

import pytest
import numpy as np
import itertools

from .._gcp import (gcp, tl_gcp_fg, vec2factors, factors2vec, validate_opt, \
                    validate_type, generate_test_tensor, tl_sample_uniform, gcp_fg_est_helper)
from ....cp_tensor import (cp_to_tensor, CPTensor, cp_norm)

from tensorly.testing import assert_

skip_if_backend = pytest.mark.skipif(tl.get_backend() in ("tensorflow", "jax", "cupy", "mxnet"),
                                     reason=f"Operation not supported in {tl.get_backend()}")

@skip_if_backend
def test_gcp_1():
    """ Test for generalized CP"""

    ## Test 1 - shapes and dimensions

    # Create tensor with random elements
    rng = tl.check_random_state(1234)
    d = 3
    n = 4
    shape = (40, 50, 60)
    tensor = tl.tensor(rng.random(shape), dtype=tl.float32)
    # tensor = (np.arange(n**d, dtype=float).reshape((n,)*d))
    # tensor = tl.tensor(tensor)  # a 4 x 4 x 4 tensor

    tensor_shape = tensor.shape
                                      tol=10e-4,
                                      init='random',
                                      random_state=1234,
                                      verbose=0)
    rec_svd = kruskal_to_tensor(res_svd)
    rec_random = kruskal_to_tensor(res_random)
    error = T.norm(rec_svd - rec_random, 2)
    error /= T.norm(rec_svd, 2)
    assert_(error < tol_norm_2,
            'norm 2 of difference between svd and random init too high')
    assert_(
        T.max(T.abs(rec_svd - rec_random)) < tol_max_abs,
        'abs norm of difference between svd and random init too high')


@pytest.mark.xfail(tl.get_backend() == 'tensorflow',
                   reason='Fails on tensorflow')
def test_sample_khatri_rao():
    """ Test for sample_khatri_rao
    """

    rng = check_random_state(1234)
    t_shape = (8, 9, 10)
    rank = 3
    tensor = T.tensor(rng.random_sample(t_shape) + 1)
    weights, factors = parafac(tensor, rank=rank, n_iter_max=120)
    num_samples = 4
    skip_matrix = 1
    sampled_kr, sampled_indices, sampled_rows = sample_khatri_rao(
        factors,
        num_samples,
Beispiel #21
0
def partial_tucker(tensor, modes, rank=None, n_iter_max=100, init='svd', tol=10e-5,
                   svd='numpy_svd', random_state=None, verbose=False, ranks=None):
    """Partial tucker decomposition via Higher Order Orthogonal Iteration (HOI)

        Decomposes `tensor` into a Tucker decomposition exclusively along the provided modes.

    Parameters
    ----------
    tensor : ndarray
    modes : int list
            list of the modes on which to perform the decomposition
    ranks : None or int list
            size of the core tensor, ``(len(ranks) == len(modes))``
    n_iter_max : int
                 maximum number of iteration
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD,
        acceptable values in tensorly.SVD_FUNS
    tol : float, optional
          tolerance: the algorithm stops when the variation in
          the reconstruction error is less than the tolerance
    random_state : {None, int, np.random.RandomState}
    verbose : int, optional
        level of verbosity

    Returns
    -------
    core : ndarray 
            core tensor of the Tucker decomposition
    factors : ndarray list
            list of factors of the Tucker decomposition.
            with ``core.shape[i] == (tensor.shape[i], ranks[i]) for i in modes``

    """
    if ranks is not None:
        message = "'ranks' is depreciated, please use 'rank' instead"
        warnings.warn(message, DeprecationWarning)
        rank = ranks

    if rank is None:
        message = "No value given for 'rank'. The decomposition will preserve the original size."
        warnings.warn(message, Warning)
        rank = [tl.shape(tensor)[mode] for mode in modes]
    elif isinstance(rank, int):
        message = "Given only one int for 'rank' intead of a list of {} modes. Using this rank for all modes.".format(len(modes))
        warnings.warn(message, Warning)
        rank = [rank for _ in modes]

    try:
        svd_fun = tl.SVD_FUNS[svd]
    except KeyError:
        message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
        raise ValueError(message)

    # SVD init
    if init == 'svd':
        factors = []
        for index, mode in enumerate(modes):
            eigenvecs, _, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank[index])
            factors.append(eigenvecs)
    else:
        rng = check_random_state(random_state)
        core = tl.tensor(rng.random_sample(rank), **tl.context(tensor))
        factors = [tl.tensor(rng.random_sample((tl.shape(tensor)[mode], rank[index])), **tl.context(tensor)) for (index, mode) in enumerate(modes)]

    rec_errors = []
    norm_tensor = tl.norm(tensor, 2)

    for iteration in range(n_iter_max):
        for index, mode in enumerate(modes):
            core_approximation = multi_mode_dot(tensor, factors, modes=modes, skip=index, transpose=True)
            eigenvecs, _, _ = svd_fun(unfold(core_approximation, mode), n_eigenvecs=rank[index])
            factors[index] = eigenvecs

        core = multi_mode_dot(tensor, factors, modes=modes, transpose=True)

        # The factors are orthonormal and therefore do not affect the reconstructed tensor's norm
        rec_error = sqrt(abs(norm_tensor**2 - tl.norm(core, 2)**2)) / norm_tensor
        rec_errors.append(rec_error)

        if iteration > 1:
            if verbose:
                print('reconstruction error={}, variation={}.'.format(
                    rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

            if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
                if verbose:
                    print('converged in {} iterations.'.format(iteration))
                break

    return core, factors
Beispiel #22
0
    def __init__(self, rnaseq_matrices, ppi_data, order_labels=None, context_names=None, how='inner',
                 communication_score='expression_mean', complex_sep=None, upper_letter_comparison=True,
                 interaction_columns=('A', 'B'), group_ppi_by=None, group_ppi_method='gmean', device=None,
                 verbose=True):
        # Asserts
        if group_ppi_by is not None:
            assert group_ppi_by in ppi_data.columns, "Using {} for grouping PPIs is not possible. Not present among columns in ppi_data".format(group_ppi_by)


        # Init BaseTensor
        BaseTensor.__init__(self)

        # Generate expression values for protein complexes in PPI data
        if complex_sep is not None:
            if verbose:
                print('Getting expression values for protein complexes')
            col_a_genes, complex_a, col_b_genes, complex_b, complexes = get_genes_from_complexes(ppi_data=ppi_data,
                                                                                                 complex_sep=complex_sep,
                                                                                                 interaction_columns=interaction_columns
                                                                                                 )
            mod_rnaseq_matrices = [add_complexes_to_expression(rnaseq, complexes) for rnaseq in rnaseq_matrices]
        else:
            mod_rnaseq_matrices = [df.copy() for df in rnaseq_matrices]

        # Uppercase for Gene names
        if upper_letter_comparison:
            for df in mod_rnaseq_matrices:
                df.index = [idx.upper() for idx in df.index]

        # Deduplicate gene names
        mod_rnaseq_matrices = [df[~df.index.duplicated(keep='first')] for df in mod_rnaseq_matrices]

        # Get context CCC tensor
        tensor, genes, cells, ppi_names, mask = build_context_ccc_tensor(rnaseq_matrices=mod_rnaseq_matrices,
                                                                         ppi_data=ppi_data,
                                                                         how=how,
                                                                         communication_score=communication_score,
                                                                         complex_sep=complex_sep,
                                                                         upper_letter_comparison=upper_letter_comparison,
                                                                         interaction_columns=interaction_columns,
                                                                         group_ppi_by=group_ppi_by,
                                                                         group_ppi_method=group_ppi_method,
                                                                         verbose=verbose)

        # Generate names for the elements in each dimension (order) in the tensor
        if context_names is None:
            context_names = ['C-' + str(i) for i in range(1, len(mod_rnaseq_matrices)+1)]
            # for PPIS use ppis, and sender & receiver cells, use cells

        # Save variables for this class
        self.communication_score = communication_score
        self.how = how
        if device is None:
            self.tensor = tl.tensor(tensor)
            self.mask = mask
        else:
            if tl.get_backend() == 'pytorch':
                self.tensor = tl.tensor(tensor, device=device)
                if mask is not None:
                    self.mask = tl.tensor(mask, device=device)
                else:
                    self.mask = mask
            else:
                self.tensor = tl.tensor(tensor)
                self.mask = mask
        self.genes = genes
        self.cells = cells
        self.order_labels = order_labels
        self.order_names = [context_names, ppi_names, self.cells, self.cells]
Beispiel #23
0
 def check():
     assert tl.get_backend() == 'pytorch'
     with tl.backend_context('numpy', local_threadsafe=True):
         assert tl.get_backend() == 'numpy'
     assert tl.get_backend() == 'pytorch'
Beispiel #24
0
def initialize_nn_cp(tensor,
                     rank,
                     init='svd',
                     svd='numpy_svd',
                     random_state=None,
                     normalize_factors=False,
                     nntype='nndsvda'):
    r"""Initialize factors used in `parafac`.

    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.

    Parameters
    ----------
    tensor : ndarray
    rank : int
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    nntype : {'nndsvd', 'nndsvda'}
        Whether to fill small values with 0.0 (nndsvd), or the tensor mean (nndsvda, default).

    Returns
    -------
    factors : CPTensor
        An initial cp tensor.

    """
    rng = tl.check_random_state(random_state)

    if init == 'random':
        kt = random_cp(tl.shape(tensor),
                       rank,
                       normalise_factors=False,
                       random_state=rng,
                       **tl.context(tensor))

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, S, V = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            # Apply nnsvd to make non-negative
            U = make_svd_non_negative(tensor, U, S, V, nntype)

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                random_part = tl.tensor(
                    rng.random_sample(
                        (U.shape[0], rank - tl.shape(tensor)[mode])),
                    **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)

            factors.append(U[:, :rank])

        kt = CPTensor((None, factors))

    # If the initialisation is a precomputed decomposition, we double check its validity and return it
    elif isinstance(init, (tuple, list, CPTensor)):
        # TODO: Test this
        try:
            kt = CPTensor(init)
        except ValueError:
            raise ValueError(
                'If initialization method is a mapping, then it must '
                'be possible to convert it to a CPTensor instance')
        return kt
    else:
        raise ValueError(
            'Initialization method "{}" not recognized'.format(init))

    # Make decomposition feasible by taking the absolute value of all factor matrices
    kt.factors = [tl.abs(f) for f in kt[1]]

    if normalize_factors:
        kt = cp_normalize(kt)

    return kt
Beispiel #25
0
def initialize_cp(tensor,
                  rank,
                  init='svd',
                  svd='numpy_svd',
                  random_state=None,
                  normalize_factors=False):
    r"""Initialize factors used in `parafac`.

    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices with uniform distribution using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor. If init is a previously initialized `cp tensor`, all
    the weights are pulled in the last factor and then the weights are set to "1" for the output tensor.

    Parameters
    ----------
    tensor : ndarray
    rank : int
    init : {'svd', 'random', cptensor}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned

    Returns
    -------
    factors : CPTensor
        An initial cp tensor.

    """
    rng = tl.check_random_state(random_state)

    if init == 'random':
        kt = random_cp(tl.shape(tensor),
                       rank,
                       normalise_factors=False,
                       random_state=rng,
                       **tl.context(tensor))

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, S, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            # Put SVD initialization on the same scaling as the tensor in case normalize_factors=False
            if mode == 0:
                idx = min(rank, tl.shape(S)[0])
                U = tl.index_update(U, tl.index[:, :idx], U[:, :idx] * S[:idx])

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                random_part = tl.tensor(
                    rng.random_sample(
                        (U.shape[0], rank - tl.shape(tensor)[mode])),
                    **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)

            factors.append(U[:, :rank])

        kt = CPTensor((None, factors))

    elif isinstance(init, (tuple, list, CPTensor)):
        # TODO: Test this
        try:
            if normalize_factors is True:
                warnings.warn(
                    'It is not recommended to initialize a tensor with normalizing. Consider normalizing the tensor before using this function'
                )

            kt = CPTensor(init)
            weights, factors = kt

            if tl.all(weights == 1):
                kt = CPTensor((None, factors))
            else:
                weights_avg = tl.prod(weights)**(1.0 / tl.shape(weights)[0])
                for i in range(len(factors)):
                    factors[i] = factors[i] * weights_avg
                kt = CPTensor((None, factors))
        except ValueError:
            raise ValueError(
                'If initialization method is a mapping, then it must '
                'be possible to convert it to a CPTensor instance')
    else:
        raise ValueError(
            'Initialization method "{}" not recognized'.format(init))

    if normalize_factors:
        kt = cp_normalize(kt)

    return kt
Beispiel #26
0
import numpy as np

import tensorly as tl
from ...random import random_kruskal, check_random_state
from ..robust_decomposition import robust_pca
from ...testing import assert_array_equal, assert_, assert_array_almost_equal

# TODO(craymichael)
import pytest
@pytest.mark.xfail(tl.get_backend() == 'tensorflow_graph', reason='Fails on tensorflow graph')
def test_RPCA():
    """Test for RPCA"""
    tol = 1e-5

    sample = np.array([[1., 2, 3, 4],
                       [2, 4, 6, 8]])
    clean = np.vstack([sample[None, ...]]*100)
    noise_probability = 0.05
    rng = check_random_state(12345)
    noise = rng.choice([0., 100., -100.], size=clean.shape, replace=True,
                      p=[1 - noise_probability, noise_probability/2, noise_probability/2])
    tensor = tl.tensor(clean + noise)
    corrupted_clean = np.copy(clean)
    corrupted_noise = np.copy(noise)
    clean = tl.tensor(clean)
    noise = tl.tensor(noise)
    clean_pred, noise_pred = robust_pca(tensor, mask=None, reg_E=0.4, mu_max=10e12,
                                        learning_rate=1.2,
                                        n_iter_max=200, tol=tol, verbose=True)
    # check recovery
    assert_array_almost_equal(tensor, clean_pred+noise_pred, decimal=tol)
Beispiel #27
0
def initialize_cp(tensor,
                  rank,
                  init='svd',
                  svd='numpy_svd',
                  random_state=None,
                  non_negative=False,
                  normalize_factors=False):
    r"""Initialize factors used in `parafac`.
    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.
    Parameters
    ----------
    tensor : ndarray
    rank : int
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned
    Returns
    -------
    factors : CPTensor
        An initial cp tensor.
    """
    rng = check_random_state(random_state)

    if init == 'random':
        # factors = [tl.tensor(rng.random_sample((tensor.shape[i], rank)), **tl.context(tensor)) for i in range(tl.ndim(tensor))]
        # kt = CPTensor((None, factors))
        return random_cp(tl.shape(tensor),
                         rank,
                         normalise_factors=False,
                         random_state=rng,
                         **tl.context(tensor))

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, S, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            # Put SVD initialization on the same scaling as the tensor in case normalize_factors=False
            if mode == 0:
                idx = min(rank, tl.shape(S)[0])
                U = tl.index_update(U, tl.index[:, :idx], U[:, :idx] * S[:idx])

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                # factor = tl.tensor(np.zeros((U.shape[0], rank)), **tl.context(tensor))
                # factor[:, tensor.shape[mode]:] = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                # factor[:, :tensor.shape[mode]] = U
                random_part = tl.tensor(
                    rng.random_sample(
                        (U.shape[0], rank - tl.shape(tensor)[mode])),
                    **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)

            factors.append(U[:, :rank])

        kt = CPTensor((None, factors))

    elif isinstance(init, (tuple, list, CPTensor)):
        # TODO: Test this
        try:
            kt = CPTensor(init)
        except ValueError:
            raise ValueError(
                'If initialization method is a mapping, then it must '
                'be possible to convert it to a CPTensor instance')
    else:
        raise ValueError(
            'Initialization method "{}" not recognized'.format(init))

    if non_negative:
        kt.factors = [tl.abs(f) for f in kt[1]]

    if normalize_factors:
        kt = cp_normalize(kt)

    return kt
import tensorly as tl

import pytest
import numpy as np
import itertools
import numpy.random as npr

from ..mps_decomposition_cross import matrix_product_state_cross
from ....mps_tensor import mps_to_tensor
from ....random import check_random_state
from tensorly.testing import assert_

skip_if_tensorflow = pytest.mark.skipif(tl.get_backend() == "tensorflow",
                                        reason="Operation not supported in TensorFlow")

@skip_if_tensorflow
def test_matrix_product_state_cross_1():
    """ Test for matrix_product_state """
    rng = check_random_state(1234)

    ## Test 1

    # Create tensor with random elements
    d = 3
    n = 4
    tensor = (np.arange(n**d).reshape((n,)*d))
    tensor = tl.tensor(tensor)


    tensor_shape = tensor.shape
import tensorly as tl

import pytest
import numpy as np
import itertools
import numpy.random as npr

from ..mps_decomposition_cross import matrix_product_state_cross
from ....mps_tensor import mps_to_tensor
from ....random import check_random_state
from tensorly.testing import assert_

skip_if_tensorflow = pytest.mark.skipif(
    tl.get_backend() == "tensorflow",
    reason="Operation not supported in TensorFlow")
skip_if_jax = pytest.mark.skipif(tl.get_backend() == "jax",
                                 reason="Operation not supported in JAX")


@skip_if_jax
@skip_if_tensorflow
def test_matrix_product_state_cross_1():
    """ Test for matrix_product_state """
    rng = check_random_state(1234)

    ## Test 1

    # Create tensor with random elements
    d = 3
    n = 4
    tensor = (np.arange(n**d).reshape((n, ) * d))
Beispiel #30
0
import numpy as np

from ..tucker_regression import TuckerRegressor
from ...base import tensor_to_vec, partial_tensor_to_vec
from ...metrics.regression import RMSE
from ... import backend as T
from ...testing import assert_

# TODO(craymichael)
import pytest
import tensorly as tl


@pytest.mark.xfail(tl.get_backend() == 'tensorflow_graph',
                   reason='Fails on tensorflow graph')
def test_TuckerRegressor():
    """Test for TuckerRegressor"""

    # Parameter of the experiment
    image_height = 10
    image_width = 10
    n_channels = 3
    ranks = [5, 5, 2]
    tol = 0.05

    # Generate random samples
    X = T.tensor(
        np.random.normal(size=(1200, image_height, image_width, n_channels),
                         loc=0,
                         scale=1))
    regression_weights = np.zeros((image_height, image_width, n_channels))