コード例 #1
0
ファイル: outer_product.py プロジェクト: sz144/tensorly
def outer(tensors):
    """Returns a generalized outer product of the two tensors
    
    Parameters 
    ----------
    tensor1 : tensor
        of shape (J1, ..., JN)
    tensor2 : tensor
        of shape (K1, ..., KM)
        
    Returns
    -------
    outer product of tensor1 and tensor2 
        of shape (J1, ..., JN, K1, ..., KM)
    """
    for i, tensor in enumerate(tensors):
        if i:
            shape = tl.shape(tensor)
            s1 = len(shape)

            shape_1 = shape_res + (1, ) * s1
            shape_2 = (1, ) * sres + shape

            res = tl.reshape(res, shape_1) * tl.reshape(tensor, shape_2)
        else:
            res = tensor

        shape_res = tl.shape(res)
        sres = len(shape_res)

    return res
コード例 #2
0
def batched_tensor_dot(tensor1, tensor2):
    """Returns a generalized outer product of the two tensors
    
    Parameters 
    ----------
    tensor1 : tensor
        of shape (n_samples, J1, ..., JN)
    tensor2 : tensor
        of shape (n_samples, K1, ..., KM)
        
    Returns
    -------
    outer product of tensor1 and tensor2 
        of shape (n_samples, J1, ..., JN, K1, ..., KM)
    """
    shape_1 = tl.shape(tensor1)
    s1 = len(shape_1) - 1
    shape_2 = tl.shape(tensor2)
    s2 = len(shape_2) - 1

    n_samples = shape_2[0]

    if n_samples != shape_1[0]:
        raise ValueError(
            f'tensor1 has a batch-size of {s1[0]} but tensor2 has a batch-size of {n_samples}'
            'tensor1 and tensor2 should have the same batch-size.')

    shape_1 = shape_1 + (1, ) * s2
    shape_2 = (n_samples, ) + (1, ) * s1 + shape_2[1:]

    return tl.reshape(tensor1, shape_1) * tl.reshape(tensor2, shape_2)
コード例 #3
0
def svd_init_fac(tensor, rank):
    """
    svd initialization of factor matrices for a given tensor and rank
    
    Parameters
    ----------
    tensor : tensor
    rank : int

    Returns
    -------
    factors : list of matrices

  """
    factors = []
    for mode in range(tl.ndim(tensor)):
        # unfolding of a given mode
        unfolded = tl.unfold(tensor, mode)
        if rank <= tl.shape(tensor)[mode]:
            u, s, v = tl.partial_svd(
                unfolded,
                n_eigenvecs=rank)  # first rank eigenvectors/values (ascendent)
        else:
            u, s, v = tl.partial_svd(unfolded,
                                     n_eigenvecs=tl.shape(tensor)[mode])
            # completed by random columns
            u = np.append(u,
                          np.random.random(
                              (np.shape(u)[0], rank - tl.shape(tensor)[mode])),
                          axis=1)
            # sometimes we have singular matrix error for als
        factors += [u]
    return (factors)
コード例 #4
0
def monotonicity_prox(tensor, decreasing=False):
    """
    This function projects each column of the input array on the set of arrays so that
          x[1] <= x[2] <= ... <= x[n] (decreasing=False)
                        or
          x[1] >= x[2] >= ... >= x[n] (decreasing=True)
    is satisfied columnwise.

    Parameters
    ----------
    tensor : ndarray
    decreasing : If it is True, function returns columnwise
                 monotone decreasing tensor. Otherwise, returned array
                 will be monotone increasing.
                 Default: True

    Returns
    -------
    ndarray
          A tensor of which columns' are monotonic.

    References
    ----------
    .. [1]: G. Chierchia, E. Chouzenoux, P. L. Combettes, and J.-C. Pesquet
            "The Proximity Operator Repository. User's guide"
    """
    if tl.ndim(tensor) == 1:
        tensor = tl.reshape(tensor, [tl.shape(tensor)[0], 1])
    elif tl.ndim(tensor) > 2:
        raise ValueError(
            "Monotonicity prox doesn't support an input which has more than 2 dimensions."
        )
    tensor_mon = tl.copy(tensor)
    if decreasing:
        tensor_mon = tl.flip(tensor_mon, axis=0)
    row, column = tl.shape(tensor_mon)
    cum_sum = tl.cumsum(tensor_mon, axis=0)
    for j in range(column):
        assisted_tensor = tl.zeros([row, row])
        for i in range(row):
            if i == 0:
                assisted_tensor = tl.index_update(
                    assisted_tensor, tl.index[i, i:], cum_sum[i:, j] /
                    tl.tensor(tl.arange(row - i) + 1, **tl.context(tensor)))
            else:
                assisted_tensor = tl.index_update(
                    assisted_tensor, tl.index[i, i:],
                    (cum_sum[i:, j] - cum_sum[i - 1, j]) /
                    tl.tensor(tl.arange(row - i) + 1, **tl.context(tensor)))
        tensor_mon = tl.index_update(tensor_mon, tl.index[:, j],
                                     tl.max(assisted_tensor, axis=0))
        for i in reversed(range(row - 1)):
            if tensor_mon[i, j] > tensor_mon[i + 1, j]:
                tensor_mon = tl.index_update(tensor_mon, tl.index[i, j],
                                             tensor_mon[i + 1, j])
    if decreasing:
        tensor_mon = tl.flip(tensor_mon, axis=0)
    return tensor_mon
コード例 #5
0
def symmetric_parafac_power_iteration(tensor,
                                      rank,
                                      n_repeat=10,
                                      n_iteration=10,
                                      verbose=False):
    """Symmetric CP Decomposition via Robust Symmetric Tensor Power Iteration

    Parameters
    ----------
    tensor : tl.tensor
        input tensor to decompose, must be symmetric of shape (size, )*order
    rank : int
        rank of the decomposition (number of rank-1 components)
    n_repeat : int, default is 10
        number of initializations to be tried
    n_iterations : int, default is 10
        number of power iterations
    verbose : bool
        level of verbosity

    Returns
    -------
    (weights, factor)

    weights : 1-D tl.tensor of length `rank`
        contains the eigenvalue of each eigenvector
    factor : 2-D tl.tensor of shape (size, rank)
        each column corresponds to one eigenvector
    """
    rank = validate_cp_rank(tl.shape(tensor), rank=rank)

    order = tl.ndim(tensor)
    size = tl.shape(tensor)[0]

    if not tl.shape(tensor) == (size, ) * order:
        raise ValueError(
            'The input tensor does not have the same size along each mode.')

    factor = []
    weigths = []

    for _ in range(rank):
        eigenval, eigenvec, deflated = symmetric_power_iteration(
            tensor,
            n_repeat=n_repeat,
            n_iteration=n_iteration,
            verbose=verbose)
        factor.append(eigenvec)
        weigths.append(eigenval)
        tensor = deflated

    factor = tl.stack(factor, axis=1)
    weigths = tl.stack(weigths)

    return weigths, factor
コード例 #6
0
def test_tt_matrix_manipulation():
    """Test for tt_matrix manipulation"""
    shape = (2, 2, 2, 3, 3, 3)
    tt_matrix = random.random_tt_matrix(shape, rank=2, full=False)
    rec = tt_matrix_to_tensor(tt_matrix)
    assert (tl.shape(rec) == shape)

    mat = tt_matrix_to_matrix(tt_matrix)
    assert (tl.shape(mat) == (8, 27))

    vec = tt_matrix_to_vec(tt_matrix)
    assert (tl.shape(vec) == (8 * 27, ))
コード例 #7
0
ファイル: contraction.py プロジェクト: wagner-s/tensorly
def contract(tensor1, modes1, tensor2, modes2):
    """Tensor contraction between two tensors on specified modes
    
    Parameters
    ----------
    tensor1 : tl.tensor
    modes1 : int list or int
        modes on which to contract tensor1
    tensor2 : tl.tensor
    modes2 : int list or int
        modes on which to contract tensor2

    Returns
    -------
    contraction : tensor1 contracted with tensor2 on the specified modes
    """
    if isinstance(modes1, int):
        modes1 = [modes1]
    if isinstance(modes2, int):
        modes2 = [modes2]
    modes1 = list(modes1)
    modes2 = list(modes2)

    if len(modes1) != len(modes2):
        raise ValueError(
            'Can only contract two tensors along the same number of modes'
            '(len(modes1) == len(modes2))'
            'However, got {} modes for tensor 1 and {} mode for tensor 2'
            '(modes1={}, and modes2={})'.format(len(modes1), len(modes2),
                                                modes1, modes2))

    contraction_dims = [tl.shape(tensor1)[i] for i in modes1]
    if contraction_dims != [tl.shape(tensor2)[i] for i in modes2]:
        raise ValueError(
            'Trying to contract tensors over modes of different sizes'
            '(contracting modes of sizes {} and {}'.format(
                contraction_dims, [tl.shape(tensor2)[i] for i in modes2]))
    shared_dim = int(np.prod(contraction_dims))

    modes1_free = [i for i in range(tl.ndim(tensor1)) if i not in modes1]
    free_shape1 = [tl.shape(tensor1)[i] for i in modes1_free]

    tensor1 = tl.reshape(tl.transpose(tensor1, modes1_free + modes1),
                         (int(np.prod(free_shape1)), shared_dim))

    modes2_free = [i for i in range(tl.ndim(tensor2)) if i not in modes2]
    free_shape2 = [tl.shape(tensor2)[i] for i in modes2_free]

    tensor2 = tl.reshape(tl.transpose(tensor2, modes2 + modes2_free),
                         (shared_dim, int(np.prod(free_shape2))))

    res = tl.dot(tensor1, tensor2)
    return tl.reshape(res, tuple(free_shape1 + free_shape2))
コード例 #8
0
ファイル: tt_matrix.py プロジェクト: sz144/tensorly
def _validate_tt_matrix(tt_tensor):
    factors = tt_tensor
    n_factors = len(factors)

    if n_factors < 1:
        raise ValueError(
            'A Tensor-Train (MPS) tensor should be composed of at least one factor.'
            'However, {} factor was given.'.format(n_factors))

    rank = []
    left_shape = []
    right_shape = []
    for index, factor in enumerate(factors):
        current_rank, current_left_shape, current_right_shape, next_rank = tl.shape(
            factor)

        # Check that factors are third order tensors
        if not tl.ndim(factor) == 4:
            raise ValueError(
                'A TTMatrix expresses a tensor as fourth order factors (tt-cores).\n'
                'However, tl.ndim(factors[{}]) = {}'.format(
                    index, tl.ndim(factor)))
        # Consecutive factors should have matching ranks
        if index and tl.shape(factors[index - 1])[-1] != current_rank:
            raise ValueError(
                'Consecutive factors should have matching ranks\n'
                ' -- e.g. tl.shape(factors[0])[-1]) == tl.shape(factors[1])[0])\n'
                'However, tl.shape(factor[{}])[-1] == {} but'
                ' tl.shape(factor[{}])[0] == {} '.format(
                    index - 1,
                    tl.shape(factors[index - 1])[-1], index, current_rank))
        # Check for boundary conditions
        if (index == 0) and current_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[0].shape[0] == 1.'
                'However, got factor[0].shape[0] = {}.'.format(current_rank))
        if (index == n_factors - 1) and next_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[-1].shape[2] == 1.'
                'However, got factor[{}].shape[2] = {}.'.format(
                    n_factors, next_rank))

        left_shape.append(current_left_shape)
        right_shape.append(current_right_shape)

        rank.append(current_rank)

    # Add last rank (boundary condition)
    rank.append(next_rank)

    return tuple(left_shape) + tuple(right_shape), tuple(rank)
コード例 #9
0
ファイル: test_tucker.py プロジェクト: sz144/tensorly
def test_non_negative_tucker(monkeypatch):
    """Test for non-negative Tucker"""
    rng = tl.check_random_state(1234)

    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1
    tensor = tl.tensor(rng.random_sample((3, 4, 3)) + 1)
    core, factors = tucker(tensor, rank=[3, 4, 3], n_iter_max=200, verbose=1)
    nn_core, nn_factors = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=100)

    # Make sure all components are positive
    for factor in nn_factors:
        assert_(tl.all(factor >= 0))
    assert_(tl.all(nn_core >= 0))

    reconstructed_tensor = tucker_to_tensor((core, factors))
    nn_reconstructed_tensor = tucker_to_tensor((nn_core, nn_factors))
    error = tl.norm(reconstructed_tensor - nn_reconstructed_tensor, 2)
    error /= tl.norm(reconstructed_tensor, 2)
    assert_(error < tol_norm_2,
            'norm 2 of reconstruction error higher than tol')

    # Test the max abs difference between the reconstruction and the tensor
    assert_(tl.norm(reconstructed_tensor - nn_reconstructed_tensor, 'inf') < tol_max_abs,
              'abs norm of reconstruction error higher than tol')

    core_svd, factors_svd = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=500, init='svd', verbose=1)
    core_random, factors_random = non_negative_tucker(tensor, rank=[3, 4, 3], n_iter_max=200, init='random', random_state=1234)
    rec_svd = tucker_to_tensor((core_svd, factors_svd))
    rec_random = tucker_to_tensor((core_random, factors_random))
    error = tl.norm(rec_svd - rec_random, 2)
    error /= tl.norm(rec_svd, 2)
    assert_(error < tol_norm_2,
            'norm 2 of difference between svd and random init too high')
    assert_(tl.norm(rec_svd - rec_random, 'inf') < tol_max_abs,
            'abs norm of difference between svd and random init too high')

    # Test for a single rank passed
    # (should be used for all modes)
    rank = 3
    target_shape = (rank, )*tl.ndim(tensor)
    core, factors = non_negative_tucker(tensor, rank=rank)
    assert_(tl.shape(core) == target_shape, 'core has the wrong shape, got {}, but expected {}.'.format(tl.shape(core), target_shape))
    for i, f in enumerate(factors):
        expected_shape = (tl.shape(tensor)[i], rank)
        assert_(tl.shape(f) == expected_shape, '{}-th factor has the wrong shape, got {}, but expected {}.'.format(
                i, tl.shape(f), expected_shape))

    assert_class_wrapper_correctly_passes_arguments(monkeypatch, non_negative_tucker, Tucker_NN, ignore_args={'return_errors'}, rank=3)
コード例 #10
0
def test_tucker_dropout():
    """Test for Tucker Dropout"""
    shape = (10, 11, 12)
    rank = (7, 8, 9)
    tensor = FactorizedTensor.new(shape, rank=rank, factorization='Tucker')
    tensor = tensor_dropout(tensor, 1)
    core = tensor().core
    assert (tl.shape(core) == (1, 1, 1))

    remove_tensor_dropout(tensor)
    assert (not tensor._forward_hooks)

    tensor = tensor_dropout(tensor, 0)
    core = tensor().core
    assert (tl.shape(core) == rank)
コード例 #11
0
ファイル: tt_tensor.py プロジェクト: ninasiam/tensorly
def _validate_tt_tensor(tt_tensor):
    factors = tt_tensor
    n_factors = len(factors)

    if isinstance(tt_tensor, TTTensor):
        # it's already been validated at creation
        return tt_tensor.shape, tt_tensor.rank
    elif isinstance(tt_tensor, (float, int)):  #0-order tensor
        return 0, 0

    rank = []
    shape = []
    for index, factor in enumerate(factors):
        current_rank, current_shape, next_rank = tl.shape(factor)

        # Check that factors are third order tensors
        if not tl.ndim(factor) == 3:
            raise ValueError(
                'TT expresses a tensor as third order factors (tt-cores).\n'
                'However, tl.ndim(factors[{}]) = {}'.format(
                    index, tl.ndim(factor)))
        # Consecutive factors should have matching ranks
        if index and tl.shape(factors[index - 1])[2] != current_rank:
            raise ValueError(
                'Consecutive factors should have matching ranks\n'
                ' -- e.g. tl.shape(factors[0])[2]) == tl.shape(factors[1])[0])\n'
                'However, tl.shape(factor[{}])[2] == {} but'
                ' tl.shape(factor[{}])[0] == {} '.format(
                    index - 1,
                    tl.shape(factors[index - 1])[2], index, current_rank))
        # Check for boundary conditions
        if (index == 0) and current_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[0].shape[0] == 1.'
                'However, got factor[0].shape[0] = {}.'.format(current_rank))
        if (index == n_factors - 1) and next_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[-1].shape[2] == 1.'
                'However, got factor[{}].shape[2] = {}.'.format(
                    n_factors, next_rank))

        shape.append(current_shape)
        rank.append(current_rank)

    # Add last rank (boundary condition)
    rank.append(next_rank)

    return tuple(shape), tuple(rank)
コード例 #12
0
def simplex_prox(tensor, parameter):
    """
    Projects the input tensor on the simplex of radius parameter.

    Parameters
    ----------
    tensor : ndarray
    parameter : float

    Returns
    -------
    ndarray

    References
    ----------
    .. [1]: Held, Michael, Philip Wolfe, and Harlan P. Crowder.
            "Validation of subgradient optimization."
            Mathematical programming 6.1 (1974): 62-88.
    """
    _, col = tl.shape(tensor)
    tensor = tl.clip(tensor, 0, tl.max(tensor))
    tensor_sort = tl.sort(tensor, axis=0, descending=True)

    to_change = tl.sum(tl.where(
        tensor_sort > (tl.cumsum(tensor_sort, axis=0) - parameter), 1.0, 0.0),
                       axis=0)
    difference = tl.zeros(col)
    for i in range(col):
        if to_change[i] > 0:
            difference = tl.index_update(
                difference, tl.index[i],
                tl.cumsum(tensor_sort, axis=0)[int(to_change[i] - 1), i])
    difference = (difference - parameter) / to_change
    return tl.clip(tensor - difference, a_min=0)
コード例 #13
0
def initialize_factors(tensor, rank, init='svd', svd='numpy_svd', random_state=None, non_negative=False):
    r"""Initialize factors used in `parafac`.

    The type of initialization is set using `init`. If `init == 'random'` then
    initialize factor matrices using `random_state`. If `init == 'svd'` then
    initialize the `m`th factor matrix using the `rank` left singular vectors
    of the `m`th unfolding of the input tensor.

    Parameters
    ----------
    tensor : ndarray
    rank : int
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    non_negative : bool, default is False
        if True, non-negative factors are returned

    Returns
    -------
    factors : ndarray list
        List of initialized factors of the CP decomposition where element `i`
        is of shape (tensor.shape[i], rank)

    """
    rng = check_random_state(random_state)

    if init == 'random':
        factors = [tl.tensor(rng.random_sample((tensor.shape[i], rank)), **tl.context(tensor)) for i in range(tl.ndim(tensor))]
        if non_negative:
            return [tl.abs(f) for f in factors]
        else:
            return factors

    elif init == 'svd':
        try:
            svd_fun = tl.SVD_FUNS[svd]
        except KeyError:
            message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
                    svd, tl.get_backend(), tl.SVD_FUNS)
            raise ValueError(message)

        factors = []
        for mode in range(tl.ndim(tensor)):
            U, _, _ = svd_fun(unfold(tensor, mode), n_eigenvecs=rank)

            if tensor.shape[mode] < rank:
                # TODO: this is a hack but it seems to do the job for now
                # factor = tl.tensor(np.zeros((U.shape[0], rank)), **tl.context(tensor))
                # factor[:, tensor.shape[mode]:] = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                # factor[:, :tensor.shape[mode]] = U
                random_part = tl.tensor(rng.random_sample((U.shape[0], rank - tl.shape(tensor)[mode])), **tl.context(tensor))
                U = tl.concatenate([U, random_part], axis=1)
            if non_negative:
                factors.append(tl.abs(U[:, :rank]))
            else:
                factors.append(U[:, :rank])
        return factors

    raise ValueError('Initialization method "{}" not recognized'.format(init))
コード例 #14
0
def linear_blocktt(tensor, tt_matrix, transpose=True):
    if transpose:
        contraction_axis = 1
    else:
        contraction_axis = 0
    ndim = len(tt_matrix.tensorized_shape[contraction_axis])
    tensor = tensor.reshape(-1, *tt_matrix.tensorized_shape[contraction_axis])

    bs = 'a'
    start = ord(bs) + 1
    in_idx = bs + ''.join(chr(i) for i in [start + i for i in range(ndim)])
    factors_idx = []
    for i in range(ndim):
        if transpose:
            idx = [
                start + ndim * 2 + i, start + ndim + i, start + i,
                start + ndim * 2 + i + 1
            ]
        else:
            idx = [
                start + ndim * 2 + i, start + i, start + ndim + i,
                start + ndim * 2 + i + 1
            ]
        factors_idx.append(''.join(chr(j) for j in idx))
    out_idx = bs + ''.join(
        chr(i) for i in [start + ndim + i for i in range(ndim)])
    eq = in_idx + ',' + ','.join(i for i in factors_idx) + '->' + out_idx
    res = tl.einsum(eq, tensor, *tt_matrix.factors)
    return tl.reshape(res, (tl.shape(res)[0], -1))
コード例 #15
0
ファイル: _cprand.py プロジェクト: cohenjer/PIRS8
def err_rand_fast(tensor, A, V, W, indices_list, nb_samples=None):
    """
    randomised err_fast as for als

    Parameters
    ----------
    tensor : tensor
    A : matrix
        factor matrix
    V : matrix
        random matrix V defined as in CPRAND
    W : matrix
        random matrix W defined as in CPRAND
    indices_list : tuple
        indices list used for V and W
    nb_samples : int, optional
        sample size. The default is None.

    Returns
    -------
    error estimation, used indices list

  """
    # randomised tensor norm
    norm_tensor = tl.norm(tensor[indices_list])
    res = sum(sum(V * (np.transpose(A).dot(A))))
    res = norm_tensor**2 + res - 2 * sum(sum(W * A))
    if nb_samples == None: nb_samples = np.shape(indices_list[0])[0]
    res = res / nb_samples
    P = 1
    for i in tl.shape(tensor):
        P = P * i
    return (np.sqrt(res * P), indices_list)
コード例 #16
0
ファイル: test_tt_tensor.py プロジェクト: TripleEss/tensorly
def test_tt_to_tensor_random():
    """ Test for tt_to_tensor

        Uses random tensor as input
    """

    # Create tensor with random elements
    tensor = tl.tensor(np.random.rand(3, 4, 5, 6, 2, 10))
    tensor_shape = tensor.shape

    # Find TT decomposition of the tensor
    rank = 10
    factors = tensor_train(tensor, rank)

    # Reconstruct the original tensor
    reconstructed_tensor = tl.tt_to_tensor(factors)
    assert_(tl.shape(reconstructed_tensor) == tensor_shape)

    # Check that the rank is 10
    D = len(factors)
    for k in range(D):
        (r_prev, _, r_k) = factors[k].shape
        assert (r_prev <=
                rank), "TT rank with index " + str(k) + "exceeds rank"
        assert (r_k <=
                rank), "TT rank with index " + str(k + 1) + "exceeds rank"
コード例 #17
0
def test_gcp_sgd():
    """ Test sgd optimization functionality """
    # Create a random tensor
    np.random.seed(1234)
    d = 3
    shp = (100, 20, 30)
    size = 1
    for i in shp:
        size *= i
    data = np.random.rand(size)
    tensor = tl.tensor(data.reshape(shp, order='F'), dtype=tl.float64)
    rank = 10
    mTen = gcp(tensor, rank, type='normal', opt='sgd', maxiters=100, epciters=10)

    assert (mTen is not None), "gcp returned null"
    assert (len(mTen[1]) == d), "Number of factors should be 3, currently has " + str(len(mTen[1]))

    # Check each factor matrices has the correct number of columns
    for k in range(d):
        rows, columns = tl.shape(mTen[1][k])
        assert (columns == rank), "Factor matrix {} needs {} columns, but only has {}".format(i + 1, rank, columns)

    # Check CPTensor has same number of elements as tensor
    mTen = tl.cp_to_tensor(mTen)
    assert (tensor.size == mTen.size), "Unequal number of tensor elements. Tensor: {} CPTensor: {}".format(tensor.size,
                                                                                                           tl.cp_to_tensor(
                                                                                                               mTen).size)
    score = 1 - (tl.norm(tensor - mTen) / tl.norm(tensor))
    print("Score: {0:0.4f}".format(score))
コード例 #18
0
def test_tl_sample_uniform():
    """
    Test uniform random tensor sampler
    """
    tensor = tl.tensor(np.arange(24).reshape(3, 4, 2))
    nsamp = 10
    subs, vals, wgts = tl_sample_uniform(tensor, nsamp)

    # check shape in first dimension match nsamp
    assert(tl.shape(subs)[0] == nsamp)
    assert(tl.shape(vals)[0] == nsamp)
    assert(tl.shape(wgts)[0] == nsamp)

    # confirm subs/vals correspond to tensor values
    for i in range(tensor.ndim):
        assert(tensor[tuple(subs[i, :])] == vals[i])
コード例 #19
0
ファイル: _tt_matrix.py プロジェクト: sz144/tensorly
def tt_matrix_to_tensor(tt_matrix):
    """Returns the full tensor whose TT-Matrix decomposition is given by 'factors'

        Re-assembles 'factors', which represent a tensor in TT-Matrix format
        into the corresponding full tensor

    Parameters
    ----------
    factors: list of 4D-arrays
              TT-Matrix factors (known as core) of shape (rank_k, left_dim_k, right_dim_k, rank_{k+1})

    Returns
    -------
    output_tensor: ndarray
                   tensor whose TT-Matrix decomposition was given by 'factors'
    """
    # Each core is of shape (rank_left, size_in, size_out, rank_right)
    rank, in_shape, out_shape, rank_right = zip(*(tl.shape(f) for f in tt_matrix))
    rank += (rank_right[-1], )                           
    ndim = len(in_shape)
    
    # Intertwine the dims 
    # full_shape = in_shape[0], out_shape[0], in_shape[1], ...
    full_shape = sum(zip(*(in_shape, out_shape)), ())
    order = list(range(0, ndim*2, 2)) + list(range(1, ndim*2, 2))

    for i, factor in enumerate(tt_matrix):
        if not i:
            # factor = factor.squeeze(0)
            res = tl.reshape(factor, (factor.shape[1], -1))
        else:
            res = tl.dot(tl.reshape(res, (-1, rank[i])), tl.reshape(factor, (rank[i], -1)))
    res = tl.reshape(res, full_shape)
    
    return tl.transpose(res, order)
コード例 #20
0
 def from_matrix(cls, matrix, tensorized_row_shape, tensorized_column_shape, rank, **kwargs):
     if matrix.ndim > 2:
         n_matrices = _ensure_tuple(tl.shape(matrix)[:-2])
     else:
         n_matrices = ()
     tensor = matrix.reshape((*n_matrices, *tensorized_row_shape, *tensorized_column_shape))
     return cls.from_tensor(tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices, **kwargs)
コード例 #21
0
def test_gcp_1():
    """ Test for generalized CP"""

    ## Test 1 - shapes and dimensions

    # Create tensor with random elements
    rng = tl.check_random_state(1234)
    d = 3
    n = 4
    shape = (40, 50, 60)
    tensor = tl.tensor(rng.random(shape), dtype=tl.float32)
    # tensor = (np.arange(n**d, dtype=float).reshape((n,)*d))
    # tensor = tl.tensor(tensor)  # a 4 x 4 x 4 tensor

    tensor_shape = tensor.shape

    # Find gcp decomposition of the tensor
    rank = 20
    mTen = gcp(tensor, rank, type='normal', state=rng, maxiters=1e5)
    print(mTen)
    assert(mTen is not None), "gcp returned null"
    assert(len(mTen[1]) == d), "Number of factors should be 3, currently has " + str(len(mTen[1]))

    # Check each factor matrices has the correct number of columns
    for k in range(d):
        rows, columns = tl.shape(mTen[1][k])
        assert(columns == rank), "Factor matrix {} needs {} columns, but only has {}".format(i+1, rank, columns)

    # Check CPTensor has same number of elements as tensor
    mTen = tl.cp_to_tensor(mTen)
    assert(tensor.size == mTen.size), "Unequal number of tensor elements. Tensor: {} CPTensor: {}".format(tensor.size,tl.cp_to_tensor(mTen).size)
    score = 1 - (tl.norm(tensor - mTen)/tl.norm(tensor))
    print("Score: {}".format(score))
コード例 #22
0
    def from_matrix(cls, matrix, tensorized_row_shape, tensorized_column_shape, rank, factorization='CP', **kwargs):
        """Create a Tensorized Matrix by tensorizing and decomposing an existing matrix


        Parameters
        ----------
        matrix : torch.tensor of order 2
            matrix to decompose
        tensorized_row_shape : tuple[int]
            The first dimension (rows) of the matrix will be tensorized to that shape
        tensorized_column_shape : tuple[int]
            The second dimension (columns) of the matrix will be tensorized to that shape
        rank : int, 'same' or float
            rank of the decomposition
        n_matrices : tuple or int, default is ()
            if not (), indicates how many matrices have to be jointly factorized
        factorization : {'CP', 'TT', 'Tucker'}, optional
            Tensor factorization to use to decompose the tensor, by default 'CP'

        Returns
        -------
        TensorizedMatrix
            Matrix in Tensorized and Factorized form.

        Raises
        ------
        ValueError
            If the factorization given does not exist. 
        """
        if matrix.ndim > 2:
            batch_dims = _ensure_tuple(tl.shape(matrix)[:-2])
        else:
            batch_dims = ()
        tensor = matrix.reshape((*batch_dims, *tensorized_row_shape, *tensorized_column_shape))
        return cls.from_tensor(tensor, batch_dims + (tensorized_row_shape, tensorized_column_shape), rank, factorization=factorization, **kwargs)
コード例 #23
0
def _validate_mps_tensor(mps_tensor):
    factors = mps_tensor
    n_factors = len(factors)

    if n_factors < 2:
        raise ValueError(
            'A Matrix-Product-State (ttrain) tensor should be composed of at least two factors and a core.'
            'However, {} factor was given.'.format(n_factors))

    rank = []
    shape = []
    for index, factor in enumerate(factors):
        current_rank, current_shape, next_rank = tl.shape(factor)

        # Check that factors are third order tensors
        if not tl.ndim(factor) == 3:
            raise ValueError(
                'MPS expresses a tensor as third order factors (tt-cores).\n'
                'However, tl.ndim(factors[{}]) = {}'.format(
                    index, tl.ndim(factor)))
        # Consecutive factors should have matching ranks
        if index and tl.shape(factors[index - 1])[2] != current_rank:
            raise ValueError(
                'Consecutive factors should have matching ranks\n'
                ' -- e.g. tl.shape(factors[0])[2]) == tl.shape(factors[1])[0])\n'
                'However, tl.shape(factor[{}])[2] == {} but'
                ' tl.shape(factor[{}])[0] == {} '.format(
                    index - 1,
                    tl.shape(factors[index - 1])[2], index, current_rank))
        # Check for boundary conditions
        if (index == 0) and current_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[0].shape[0] == 1.'
                'However, got factor[0].shape[0] = {}.'.format(current_rank))
        if (index == n_factors - 1) and next_rank != 1:
            raise ValueError(
                'Boundary conditions dictate factor[-1].shape[2] == 1.'
                'However, got factor[{}].shape[2] = {}.'.format(
                    n_factors, next_rank))

        shape.append(current_shape)
        rank.append(current_rank)

    # Add last rank (boundary condition)
    rank.append(next_rank)

    return tuple(shape), tuple(rank)
コード例 #24
0
def smoothness_prox(tensor, regularizer):
    """Proximal operator for smoothness

    Parameters
    ----------
    tensor : ndarray
    regularizer : float

    Returns
    -------
    ndarray

    """
    diag_matrix = tl.diag(2 * regularizer * tl.ones(tl.shape(tensor)[0]) + 1) + \
                  tl.diag(-regularizer * tl.ones(tl.shape(tensor)[0] - 1), k=-1) + \
                  tl.diag(-regularizer * tl.ones(tl.shape(tensor)[0] - 1), k=1)
    return tl.solve(diag_matrix, tensor)
コード例 #25
0
def test_tt_n_param():
    """Test for _tt_n_param"""
    tensor_shape = (2, 3, 4, 1, 5)
    rank = (1, 3, 2, 2, 4, 1)
    factors = random_tt(shape=tensor_shape, rank=rank)
    true_n_param = np.sum([np.prod(tl.shape(f)) for f in factors])
    n_param = _tt_n_param(tensor_shape, rank)
    assert_equal(n_param, true_n_param)
コード例 #26
0
ファイル: _tt.py プロジェクト: sz144/tensorly
def tensor_train_matrix(tensor, rank):
    """Decompose a tensor into a matrix in tt-format
    
    Parameters
    ----------
    tensor : tensorized matrix 
        if your input matrix is of size (4, 9) and your tensorized_shape (2, 2, 3, 3)
        then tensor should be tl.reshape(matrix, (2, 2, 3, 3))
    rank : 'same', float or int tuple
        - if 'same' creates a decomposition with the same number of parameters as `tensor`
        - if float, creates a decomposition with `rank` x the number of parameters of `tensor`
        - otherwise, the actual rank to be used, e.g. (1, rank_2, ..., 1) of size tensor.ndim//2. Note that boundary conditions dictate that the first rank = last rank = 1.
    
    Returns
    -------
    tt_matrix
    """
    order = tl.ndim(tensor)
    n_input = order // 2  # (n_output = n_input)

    if tl.ndim(tensor) != n_input * 2:
        msg = 'The tensor should have as many dimensions for inputs and outputs, i.e. order should be even '
        msg += f'but got a tensor of order tl.ndim(tensor)={order} which is odd.'
        raise ValueError(msg)

    in_shape = tl.shape(tensor)[:n_input]
    out_shape = tl.shape(tensor)[n_input:]

    if n_input == 1:
        # A TTM with a single factor is just a matrix...
        return TTMatrix([tensor.reshape(1, in_shape[0], out_shape[0], 1)])

    new_idx = list([
        idx for tuple_ in zip(range(n_input), range(n_input, 2 * n_input))
        for idx in tuple_
    ])
    new_shape = list([a * b for (a, b) in zip(in_shape, out_shape)])
    tensor = tl.reshape(tl.transpose(tensor, new_idx), new_shape)

    factors = tensor_train(tensor, rank).factors
    for i in range(len(factors)):
        factors[i] = tl.reshape(
            factors[i], (factors[i].shape[0], in_shape[i], out_shape[i], -1))

    return TTMatrix(factors)
コード例 #27
0
def err_rand(tensor, weights, factors, nb_samples, indices_list=None):
    """
    Error estimation proposed in CPRAND

    Parameters
    ----------
    tensor : tensor
    weights : vector
        the weights of CP decomposition
    factors : list of matrices
        factor matrices of CP decomposition
    nb_samples : int
        nb of sample
    indices_list : tuple, optional
        indices list of sample. The default is None.

    Returns
    -------
    float
        the error estimation and the used indices_list

  """
    # if indices_list is not given
    if indices_list == None:
        indices_list = [
            np.random.choice(tl.shape(m)[0], nb_samples) for m in factors
        ]
        # works if nb_samples <= tl.shape(m)[0] for m in factors
        indices_list = [i.tolist() for i in indices_list]
        indices_list = tuple(indices_list)
    est_values = []
    # nb of terms in tensor
    P = 1
    for i in tl.shape(tensor):
        P = P * i
    for i in range(nb_samples):
        if weights is None: value = 1
        else: value = weights
        for mode in range(tl.ndim(tensor)):
            value = value * factors[mode][indices_list[mode][i], :]
        est_values += [sum(value)]
    list_e = (tensor[indices_list] - est_values)**2
    # assume max(list_e) = 1 if terms are in [0,1]
    return (np.sqrt(sum(list_e) * P / nb_samples), indices_list)
コード例 #28
0
ファイル: tatd.py プロジェクト: dawonahn/TATD
def krprod(factors, indices_list):
    ''' Implement Khatri Rao Product for given nonzeros' indicies '''

    rank = tl.shape(factors[0])[1]
    nnz = len(indices_list[0])
    nonzeros = tl.ones((nnz, rank), **tl.context(factors[0]))

    for indices, factor in zip(indices_list, factors):
        nonzeros = nonzeros * factor[indices, :]

    return torch.sum(nonzeros, dim=1)
コード例 #29
0
ファイル: outer_product.py プロジェクト: sz144/tensorly
def batched_outer(tensors):
    """Returns a generalized outer product of the two tensors
    
    Parameters 
    ----------
    tensor1 : tensor
        of shape (n_samples, J1, ..., JN)
    tensor2 : tensor
        of shape (n_samples, K1, ..., KM)
        
    Returns
    -------
    outer product of tensor1 and tensor2 
        of shape (n_samples, J1, ..., JN, K1, ..., KM)
    """
    for i, tensor in enumerate(tensors):
        if i:

            shape = tl.shape(tensor)
            size = len(shape) - 1

            n_samples = shape[0]

            if n_samples != shape_res[0]:
                raise ValueError(
                    f'Tensor {i} has a batch-size of {n_samples} but those before had a batch-size of {shape_res[0]}, '
                    'all tensors should have the same batch-size.')

            shape_1 = shape_res + (1, ) * size
            shape_2 = (n_samples, ) + (1, ) * size_res + shape[1:]

            res = tl.reshape(res, shape_1) * tl.reshape(tensor, shape_2)
        else:
            res = tensor

        shape_res = tl.shape(res)
        size_res = len(shape_res) - 1

    return res
コード例 #30
0
def test_tucker():
    """Test for the Tucker decomposition"""
    rng = check_random_state(1234)

    tol_norm_2 = 10e-3
    tol_max_abs = 10e-1
    tensor = tl.tensor(rng.random_sample((3, 4, 3)))
    core, factors = tucker(tensor, rank=None, n_iter_max=200, verbose=True)
    reconstructed_tensor = tucker_to_tensor((core, factors))
    norm_rec = tl.norm(reconstructed_tensor, 2)
    norm_tensor = tl.norm(tensor, 2)
    assert ((norm_rec - norm_tensor) / norm_rec < tol_norm_2)

    # Test the max abs difference between the reconstruction and the tensor
    assert (tl.max(tl.abs(reconstructed_tensor - tensor)) < tol_max_abs)

    # Test the shape of the core and factors
    ranks = [2, 3, 1]
    core, factors = tucker(tensor, rank=ranks, n_iter_max=100, verbose=1)
    for i, rank in enumerate(ranks):
        assert_equal(factors[i].shape, (tensor.shape[i], ranks[i]),
                     err_msg="factors[{}].shape={}, expected {}".format(
                         i, factors[i].shape, (tensor.shape[i], ranks[i])))
        assert_equal(tl.shape(core)[i],
                     rank,
                     err_msg="Core.shape[{}]={}, "
                     "expected {}".format(i, core.shape[i], rank))

    # Random and SVD init should converge to a similar solution
    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1

    core_svd, factors_svd = tucker(tensor,
                                   rank=[3, 4, 3],
                                   n_iter_max=200,
                                   init='svd',
                                   verbose=1)
    core_random, factors_random = tucker(tensor,
                                         rank=[3, 4, 3],
                                         n_iter_max=200,
                                         init='random',
                                         random_state=1234)
    rec_svd = tucker_to_tensor((core_svd, factors_svd))
    rec_random = tucker_to_tensor((core_random, factors_random))
    error = tl.norm(rec_svd - rec_random, 2)
    error /= tl.norm(rec_svd, 2)
    assert_(error < tol_norm_2,
            'norm 2 of difference between svd and random init too high')
    assert_(
        tl.max(tl.abs(rec_svd - rec_random)) < tol_max_abs,
        'abs norm of difference between svd and random init too high')