Esempio n. 1
0
def test_svd():
    """Test for the SVD functions"""
    tol = 0.1
    tol_orthogonality = 0.01

    for name, svd_fun in T.SVD_FUNS.items():
        sizes = [(100, 100), (100, 5), (10, 10), (10, 4), (5, 100)]
        n_eigenvecs = [90, 4, 5, 4, 5]

        for s, n in zip(sizes, n_eigenvecs):
            matrix = np.random.random(s)
            matrix_backend = T.tensor(matrix)
            fU, fS, fV = svd_fun(matrix_backend, n_eigenvecs=n)
            U, S, V = svd(matrix)
            U, S, V = U[:, :n], S[:n], V[:n, :]

            assert_array_almost_equal(np.abs(S), T.abs(fS), decimal=3,
                err_msg='eigenvals not correct for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
                        name, tl.get_backend(), n, s))

            # True reconstruction error (based on numpy SVD)
            true_rec_error = np.sum((matrix - np.dot(U, S.reshape((-1, 1))*V))**2)
            # Reconstruction error with the backend's SVD
            rec_error = T.sum((matrix_backend - T.dot(fU, T.reshape(fS, (-1, 1))*fV))**2)
            # Check that the two are similar
            assert_(true_rec_error - rec_error <= tol,
                msg='Reconstruction not correct for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
                        name, tl.get_backend(), n, s))

            # Check for orthogonality when relevant
            if name != 'symeig_svd':
                left_orthogonality_error = T.norm(T.dot(T.transpose(fU), fU) - T.eye(n))
                assert_(left_orthogonality_error <= tol_orthogonality,
                    msg='Left eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
                            name, tl.get_backend(), n, s))
                right_orthogonality_error = T.norm(T.dot(T.transpose(fU), fU) - T.eye(n))
                assert_(right_orthogonality_error <= tol_orthogonality,
                    msg='Right eigenvecs not orthogonal for "{}" svd fun VS svd and backend="{}, for {} eigenenvecs, and size {}".'.format(
                        name, tl.get_backend(), n, s))

        # Should fail on non-matrices
        with assert_raises(ValueError):
            tensor = T.tensor(np.random.random((3, 3, 3)))
            svd_fun(tensor)

        # Test for singular matrices (some eigenvals will be zero)
        # Rank at most 5
        matrix = T.tensor(np.dot(np.random.random((20, 5)), np.random.random((5, 20))))
        U, S, V = tl.partial_svd(matrix, n_eigenvecs=n)
        true_rec_error = tl.sum((matrix - tl.dot(U, tl.reshape(S, (-1, 1))*V))**2)
        assert_(true_rec_error <= tol)

        # Test if partial_svd returns the same result for the same setting
        matrix = T.tensor(np.random.random((20, 5)))
        random_state = np.random.RandomState(0)
        U1, S1, V1 = tl.partial_svd(matrix, n_eigenvecs=2, random_state=random_state)
        U2, S2, V2 = tl.partial_svd(matrix, n_eigenvecs=2, random_state=0)
        assert_array_equal(U1, U2)
        assert_array_equal(S1, S2)
        assert_array_equal(V1, V2)
Esempio n. 2
0
def svd_init_fac(tensor, rank):
    """
    svd initialization of factor matrices for a given tensor and rank
    
    Parameters
    ----------
    tensor : tensor
    rank : int

    Returns
    -------
    factors : list of matrices

  """
    factors = []
    for mode in range(tl.ndim(tensor)):
        # unfolding of a given mode
        unfolded = tl.unfold(tensor, mode)
        if rank <= tl.shape(tensor)[mode]:
            u, s, v = tl.partial_svd(
                unfolded,
                n_eigenvecs=rank)  # first rank eigenvectors/values (ascendent)
        else:
            u, s, v = tl.partial_svd(unfolded,
                                     n_eigenvecs=tl.shape(tensor)[mode])
            # completed by random columns
            u = np.append(u,
                          np.random.random(
                              (np.shape(u)[0], rank - tl.shape(tensor)[mode])),
                          axis=1)
            # sometimes we have singular matrix error for als
        factors += [u]
    return (factors)
Esempio n. 3
0
def mps_entanglement_entropy(tensor, boundary):
    """Returns the entanglement entropy of an MPS paritioned at boundary in TT tensor form. Assumes
    a traditional and single MPS, that is, a linear pure state in single-mode form.

    Parameters
    ----------
    tensor : (TT tensor)
        Data structure
    boundary : (int)
        Qubit at which to partition system.

    Returns
    -------
    tt_mps_entanglement_entropy : order-0 tensor
    """
    partial_mps = tensor[boundary]
    dims = partial_mps.shape
    partial_mps = tl.reshape(partial_mps, (1, dims[0] * dims[1], dims[2]))
    partial_mps = tt_to_tensor([partial_mps] + tensor[boundary + 1::])
    partial_mps = tl.reshape(partial_mps, (dims[0] * dims[1], -1))
    _, eig_vals, _ = tl.partial_svd(partial_mps, min(partial_mps.shape))
    eig_vals = eig_vals**2
    eps = tl.eps(eig_vals.dtype)
    eig_vals = eig_vals[eig_vals > eps]

    return -T.sum(T.log2(eig_vals) * eig_vals)
def out_bottle_layer_transformation(new_layer,old_layer, out_bottle_ratio):
    new_layer.main_stream_out = int(new_layer.main_stream_out * out_bottle_ratio)
    r = new_layer.main_stream_out
    new_layer.out_bottle_conv = CB(new_layer.main_stream_out, new_layer.channel_out, 1)
    new_layer.out_bottle = new_layer.channel_out
    assert new_layer.main_stream.channel_split == 1, "the channel split should be done after in bottle"
    assert not new_layer.main_stream.spatial_split, "the spatial split should be done after in bottle"
    new_main_stream = main_stream(new_layer.main_stream_in, new_layer.main_stream_out, new_layer.kernel,
                                  new_layer.stride,
                                  channel_split=new_layer.main_stream.channel_split,
                                  spatial_split=new_layer.main_stream.spatial_split,
                                  dilated=new_layer.main_stream.dilated)
    old_weight = old_layer.main_stream.conv1.conv.weight.data
    n, c, h, w = old_weight.size()
    temp_weight_tensor = old_weight.reshape(n,c*h*w) # n * (c*h*w)
    U, S, V = tensorly.partial_svd(temp_weight_tensor, new_layer.main_stream_out)
    S = torch.diag(S)
    S = torch.sqrt(S)
    U = torch.mm(U, S)  # n x r
    V = torch.mm(S, V)  # R x (c x H x W)
    U = U.t().reshape(n, r, 1, 1)  # transpose and reshape n x r x 1 x 1
    V = V.reshape(r, c, h, w)  # R x N x H x W
    V = torch.transpose(V, 0, 1)
    new_layer.out_bottle_conv.conv.weight.data = U
    new_main_stream.conv1.conv.weight.data = V
    new_layer.main_stream = new_main_stream
    return new_layer
def in_bottle_layer_transformation(new_layer,old_layer, in_bottle_ratio):
    new_layer.in_bottle = int(new_layer.channel_in * in_bottle_ratio)
    new_layer.in_bottle_conv = CB(new_layer.channel_in, int(new_layer.in_bottle), 1)
    new_layer.main_stream_in = new_layer.in_bottle
    assert new_layer.main_stream.channel_split == 1, "the channel split should be done after in bottle"
    assert not new_layer.main_stream.spatial_split, "the spatial split should be done after in bottle"
    new_main_stream = main_stream(new_layer.main_stream_in, new_layer.main_stream_out, new_layer.kernel,
                                  new_layer.stride,
                                  channel_split=new_layer.main_stream.channel_split,
                                  spatial_split=new_layer.main_stream.spatial_split,
                                  dilated=new_layer.main_stream.dilated)
    old_weight = old_layer.main_stream.conv1.conv.weight.data
    n, c, h, w = old_weight.size()
    temp_weight_tensor = torch.transpose(old_weight, 0, 1)  # CxNxHxW
    temp_weight_tensor = temp_weight_tensor.reshape(c, n * h * w)
    U, S, V = tensorly.partial_svd(temp_weight_tensor, new_layer.in_bottle)
    S = torch.diag(S)
    S = torch.sqrt(S)
    U = torch.mm(U, S)  # C x R
    V = torch.mm(S, V)  # R x (N x H x W)
    U = U.t().reshape(new_layer.in_bottle, c, 1, 1)  # transpose and reshape R x C x 1 x 1
    V = V.reshape(new_layer.in_bottle, n, h, w)  # R x N x H x W
    V = torch.transpose(V, 0, 1)
    new_layer.in_bottle_conv.conv.weight.data = U
    new_main_stream.conv1.conv.weight.data = V
    new_layer.main_stream = new_main_stream
    return new_layer
Esempio n. 6
0
def test_svd_time():
    """Test SVD time
    
    SVD shouldn't be slow for tall and skinny matrices
    if n_eigenvec == min(matrix.shape)
    """
    M = tl.tensor(np.random.random_sample((4, 10000)))
    t = time()
    _ = tl.partial_svd(M, 4)
    t = time() - t
    assert_(t <= 0.1,
            f'Partial_SVD took too long, maybe full_matrices set wrongly')

    M = tl.tensor(np.random.random_sample((10000, 4)))
    t = time()
    _ = tl.partial_svd(M, 4)
    t = time() - t
    assert_(t <= 0.1,
            f'Partial_SVD took too long, maybe full_matrices set wrongly')
Esempio n. 7
0
def svd_decomposition_linear_layer(layer, rank):
    [U, S, V] = tl.partial_svd(layer.weight.data, rank)

    first_layer = torch.nn.Linear(in_features=V.shape[1],
                                  out_features=V.shape[0],
                                  bias=False)
    second_layer = torch.nn.Linear(in_features=U.shape[1],
                                   out_features=U.shape[0],
                                   bias=True)

    if layer.bias is not None:
        second_layer.bias.data = layer.bias.data

    first_layer.weight.data = (V.t() * S).t()
    second_layer.weight.data = U

    new_layers = [first_layer, second_layer]
    return nn.Sequential(*new_layers)
Esempio n. 8
0
def svd_thresholding(matrix, threshold):
    """Singular value thresholding operator

    Parameters
    ----------
    matrix : ndarray
    threshold : float

    Returns
    -------
    ndarray
        matrix on which the operator has been applied

    See also
    --------
    procrustes : procrustes operator
    """
    U, s, V = tl.partial_svd(matrix, n_eigenvecs=min(matrix.shape))
    return tl.dot(U, tl.reshape(soft_thresholding(s, threshold), (-1, 1)) * V)
Esempio n. 9
0
def procrustes(matrix):
    """Procrustes operator

    Parameters
    ----------
    matrix : ndarray

    Returns
    -------
    ndarray
        matrix on which the Procrustes operator has been applied
        has the same shape as the original tensor


    See also
    --------
    svd_thresholding : SVD-thresholding operator
    """
    U, _, V = tl.partial_svd(matrix, n_eigenvecs=min(matrix.shape))
    return tl.dot(U, V)
Esempio n. 10
0
def svd_decompose_linear(layer,
                         rank=None,
                         criterion=EnergyThreshold,
                         threshold=0.85):
    if rank is None or rank == -1:
        rank = svd_rank_linear(layer, criterion(threshold))

    [U, S, V] = tl.partial_svd(layer.weight.data, rank)

    first_layer = torch.nn.Linear(in_features=V.shape[1],
                                  out_features=V.shape[0],
                                  bias=False)
    second_layer = torch.nn.Linear(in_features=U.shape[1],
                                   out_features=U.shape[0],
                                   bias=True)

    if layer.bias is not None:
        second_layer.bias.data = layer.bias.data

    first_layer.weight.data = (V.t() * S).t()
    second_layer.weight.data = U

    new_layers = [first_layer, second_layer]
    return nn.Sequential(*new_layers)
Esempio n. 11
0
def matrix_product_state(input_tensor, rank, verbose=False):
    """MPS decomposition via recursive SVD

        Decomposes `input_tensor` into a sequence of order-3 tensors (factors)
        -- also known as Tensor-Train decomposition [1]_.

    Parameters
    ----------
    input_tensor : tensorly.tensor
    rank : {int, int list}
            maximum allowable MPS rank of the factors
            if int, then this is the same for all the factors
            if int list, then rank[k] is the rank of the kth factor
    verbose : boolean, optional
            level of verbosity

    Returns
    -------
    factors : MPS factors
              order-3 tensors of the MPS decomposition

    References
    ----------
    .. [1] Ivan V. Oseledets. "Tensor-train decomposition", SIAM J. Scientific Computing, 33(5):2295–2317, 2011.
    """

    # Check user input for errors
    tensor_size = input_tensor.shape
    n_dim = len(tensor_size)

    if isinstance(rank, int):
        rank = [rank] * (n_dim + 1)
    elif n_dim + 1 != len(rank):
        message = 'Provided incorrect number of ranks. Should verify len(rank) == tl.ndim(tensor)+1, but len(rank) = {} while tl.ndim(tensor) + 1  = {}'.format(
            len(rank), n_dim)
        raise (ValueError(message))

    # Make sure it's not a tuple but a list
    rank = list(rank)

    context = tl.context(input_tensor)

    # Initialization
    if rank[0] != 1:
        print(
            'Provided rank[0] == {} but boundaring conditions dictatate rank[0] == rank[-1] == 1: setting rank[0] to 1.'
            .format(rank[0]))
        rank[0] = 1
    if rank[-1] != 1:
        print(
            'Provided rank[-1] == {} but boundaring conditions dictatate rank[0] == rank[-1] == 1: setting rank[-1] to 1.'
            .format(rank[0]))

    unfolding = input_tensor
    factors = [None] * n_dim

    # Getting the MPS factors up to n_dim - 1
    for k in range(n_dim - 1):

        # Reshape the unfolding matrix of the remaining factors
        n_row = int(rank[k] * tensor_size[k])
        unfolding = tl.reshape(unfolding, (n_row, -1))

        # SVD of unfolding matrix
        (n_row, n_column) = unfolding.shape
        current_rank = min(n_row, n_column, rank[k + 1])
        U, S, V = tl.partial_svd(unfolding, current_rank)
        rank[k + 1] = current_rank

        # Get kth MPS factor
        factors[k] = tl.reshape(U, (rank[k], tensor_size[k], rank[k + 1]))

        if (verbose is True):
            print("MPS factor " + str(k) + " computed with shape " +
                  str(factors[k].shape))

        # Get new unfolding matrix for the remaining factors
        unfolding = tl.reshape(S, (-1, 1)) * V

    # Getting the last factor
    (prev_rank, last_dim) = unfolding.shape
    factors[-1] = tl.reshape(unfolding, (prev_rank, last_dim, 1))

    if (verbose is True):
        print("MPS factor " + str(n_dim - 1) + " computed with shape " +
              str(factors[n_dim - 1].shape))

    return factors
Esempio n. 12
0
def non_negative_tucker_hals(tensor,
                             rank,
                             n_iter_max=100,
                             init="svd",
                             svd='numpy_svd',
                             tol=1e-8,
                             sparsity_coefficients=None,
                             core_sparsity_coefficient=None,
                             fixed_modes=None,
                             random_state=None,
                             verbose=False,
                             normalize_factors=False,
                             return_errors=False,
                             exact=False,
                             algorithm='fista'):
    """
    Non-negative Tucker decomposition

    Uses HALS to update each factor columnwise and uses
    fista or active set algorithm to update the core, see [1]_ 
    
    Parameters
    ----------
    tensor : ndarray
    rank : None, int or int list
        size of the core tensor, ``(len(ranks) == tensor.ndim)``
        if int, the same rank is used for all modes
    n_iter_max : int
            maximum number of iteration
    init : {'svd', 'random'}, optional
    svd : str, default is 'numpy_svd'
        function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
    tol : float, optional
          tolerance: the algorithm stops when the variation in
          the reconstruction error is less than the tolerance
        Default: 1e-8
    sparsity_coefficients : array of float (as much as the number of modes)
        The sparsity coefficients are used for each factor
        If set to None, the algorithm is computed without sparsity
        Default: None
    core_sparsity_coefficient : array of float. This coefficient imposes sparsity on core
        when it is updated with fista.
        Default: None
    fixed_modes : array of integers (between 0 and the number of modes)
        Has to be set not to update a factor, 0 and 1 for U and V respectively
        Default: None
    verbose : boolean
        Indicates whether the algorithm prints the successive
        reconstruction errors or not
        Default: False
    normalize_factors : if True, aggregates the core which will contain the norms of the factors.
    return_errors : boolean
        Indicates whether the algorithm should return all reconstruction errors
        and computation time of each iteration or not
        Default: False
    exact : If it is True, the HALS nnls subroutines give results with high precision but it needs high computational cost. 
        If it is False, the algorithm gives an approximate solution.
        Default: False
    algorithm : {'fista', 'active_set'}
        Non negative least square solution to update the core. 
        Default: 'fista'
    Returns
    -------
    factors : ndarray list
            list of positive factors of the CP decomposition
            element `i` is of shape ``(tensor.shape[i], rank)``
    errors: list
        A list of reconstruction errors at each iteration of the algorithm.

    Notes
    -----
    Tucker decomposes a tensor into a core tensor and list of factors:
    .. math::
        \\begin{equation}
            tensor = [| core; factors[0], ... ,factors[-1] |]
        \\end{equation}

    We solve the following problem for each factor:
    .. math::
        \\begin{equation}
            \\min_{tensor >= 0} ||tensor_[i] - factors[i]\\times core_[i] \\times (\\prod_{i\\neq j}(factors[j]))^T||^2
        \\end{equation}

    If we define two variables such as:
    .. math::
            U = core_[i] \\times (\\prod_{i\\neq j}(factors[j]\\times factors[j]^T)) \\
            M = tensor_[i]

    Gradient of the problem becomes:
    .. math::
        \\begin{equation}
            \\delta = -U^TM + factors[i] \\times U^TU
        \\end{equation}

    In order to calculate UTU and UTM, we define two variables:
    .. math::
        \\begin{equation}
            core_cross = \prod_{i\\neq j}(core_[i] \\times (\\prod_{i\\neq j}(factors[j]\\times factors[j]^T)) \\
            tensor_cross =  \prod_{i\\neq j} tensor_[i] \\times factors_[i]
        \\end{equation}
    Then UTU and UTM becomes:
    .. math::
        \\begin{equation}
            UTU = core_cross_[j] \\times core_[j]^T  \\
            UTM =  (tensor_cross_[j] \\times \\times core_[j]^T)^T
        \\end{equation}
    References
    ----------
    .. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
       SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
    """
    rank = validate_tucker_rank(tl.shape(tensor), rank=rank)
    n_modes = tl.ndim(tensor)
    if sparsity_coefficients is None or not isinstance(sparsity_coefficients,
                                                       Iterable):
        sparsity_coefficients = [sparsity_coefficients] * n_modes

    if fixed_modes is None:
        fixed_modes = []

    # Avoiding errors
    for fixed_value in fixed_modes:
        sparsity_coefficients[fixed_value] = None
    # Generating the mode update sequence
    modes = [
        mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes
    ]

    nn_core, nn_factors = initialize_tucker(tensor,
                                            rank,
                                            modes,
                                            init=init,
                                            svd=svd,
                                            random_state=random_state,
                                            non_negative=True)
    # initialisation - declare local variables
    norm_tensor = tl.norm(tensor, 2)
    rec_errors = []

    # Iterate over one step of NTD
    for iteration in range(n_iter_max):
        # One pass of least squares on each updated mode
        for mode in modes:

            # Computing Hadamard of cross-products
            pseudo_inverse = nn_factors.copy()
            for i, factor in enumerate(nn_factors):
                if i != mode:
                    pseudo_inverse[i] = tl.dot(tl.conj(tl.transpose(factor)),
                                               factor)
            # UtU
            core_cross = multi_mode_dot(nn_core, pseudo_inverse, skip=mode)
            UtU = tl.dot(unfold(core_cross, mode),
                         tl.transpose(unfold(nn_core, mode)))

            # UtM
            tensor_cross = multi_mode_dot(tensor,
                                          nn_factors,
                                          skip=mode,
                                          transpose=True)
            MtU = tl.dot(unfold(tensor_cross, mode),
                         tl.transpose(unfold(nn_core, mode)))
            UtM = tl.transpose(MtU)

            # Call the hals resolution with nnls, optimizing the current mode
            nn_factor, _, _, _ = hals_nnls(
                UtM,
                UtU,
                tl.transpose(nn_factors[mode]),
                n_iter_max=100,
                sparsity_coefficient=sparsity_coefficients[mode],
                exact=exact)
            nn_factors[mode] = tl.transpose(nn_factor)
        # updating core
        if algorithm == 'fista':
            pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]),
                                        nn_factors[-1])
            core_estimation = multi_mode_dot(tensor,
                                             nn_factors,
                                             transpose=True)
            learning_rate = 1

            for MtM in pseudo_inverse:
                learning_rate *= 1 / (tl.partial_svd(MtM)[1][0])
            nn_core = fista(
                core_estimation,
                pseudo_inverse,
                x=nn_core,
                n_iter_max=n_iter_max,
                sparsity_coef=core_sparsity_coefficient,
                lr=learning_rate,
            )
        if algorithm == 'active_set':
            pseudo_inverse[-1] = tl.dot(tl.transpose(nn_factors[-1]),
                                        nn_factors[-1])
            core_estimation_vec = tl.base.tensor_to_vec(
                tl.tenalg.mode_dot(tensor_cross,
                                   tl.transpose(nn_factors[modes[-1]]),
                                   modes[-1]))
            pseudo_inverse_kr = tl.tenalg.kronecker(pseudo_inverse)
            vectorcore = active_set_nnls(core_estimation_vec,
                                         pseudo_inverse_kr,
                                         x=nn_core,
                                         n_iter_max=n_iter_max)
            nn_core = tl.reshape(vectorcore, tl.shape(nn_core))

        # Adding the l1 norm value to the reconstruction error
        sparsity_error = 0
        for index, sparse in enumerate(sparsity_coefficients):
            if sparse:
                sparsity_error += 2 * (sparse *
                                       tl.norm(nn_factors[index], order=1))
        # error computation
        rec_error = tl.norm(tensor - tucker_to_tensor(
            (nn_core, nn_factors)), 2) / norm_tensor
        rec_errors.append(rec_error)

        if iteration > 1:
            if verbose:
                print('reconstruction error={}, variation={}.'.format(
                    rec_errors[-1], rec_errors[-2] - rec_errors[-1]))

            if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
                if verbose:
                    print('converged in {} iterations.'.format(iteration))
                break
        if normalize_factors:
            nn_core, nn_factors = tucker_normalize((nn_core, nn_factors))
    tensor = TuckerTensor((nn_core, nn_factors))
    if return_errors:
        return tensor, rec_errors
    else:
        return tensor
Esempio n. 13
0
def tensor_ring(input_tensor, rank, mode=0, verbose=False):
    """Tensor Ring decomposition via recursive SVD

        Decomposes `input_tensor` into a sequence of order-3 tensors (factors) [1]_.

    Parameters
    ----------
    input_tensor : tensorly.tensor
    rank : Union[int, List[int]]
            maximum allowable TR rank of the factors
            if int, then this is the same for all the factors
            if int list, then rank[k] is the rank of the kth factor
    mode : int, default is 0
            index of the first factor to compute
    verbose : boolean, optional
            level of verbosity

    Returns
    -------
    factors : TR factors
              order-3 tensors of the TR decomposition

    References
    ----------
    .. [1] Qibin Zhao et al. "Tensor Ring Decomposition" arXiv preprint arXiv:1606.05535, (2016).
    """
    rank = validate_tr_rank(tl.shape(input_tensor), rank=rank)
    n_dim = len(input_tensor.shape)

    # Change order
    if mode:
        order = tuple(range(mode, n_dim)) + tuple(range(mode))
        input_tensor = tl.transpose(input_tensor, order)
        rank = rank[mode:] + rank[:mode]

    tensor_size = input_tensor.shape

    factors = [None] * n_dim

    # Getting the first factor
    unfolding = tl.reshape(input_tensor, (tensor_size[0], -1))

    n_row, n_column = unfolding.shape
    if rank[0] * rank[1] > min(n_row, n_column):
        raise ValueError(f'rank[{mode}] * rank[{mode + 1}] = {rank[0] * rank[1]} is larger than '
                         f'first matricization dimension {n_row}×{n_column}.\n'
                         'Failed to compute first factor with specified rank. '
                         'Reduce specified ranks or change first matricization `mode`.')

    # SVD of unfolding matrix
    U, S, V = tl.partial_svd(unfolding, rank[0] * rank[1])

    # Get first TR factor
    factor = tl.reshape(U, (tensor_size[0], rank[0], rank[1]))
    factors[0] = tl.transpose(factor, (1, 0, 2))
    if verbose is True:
        print("TR factor " + str(mode) + " computed with shape " + str(factor.shape))

    # Get new unfolding matrix for the remaining factors
    unfolding = tl.reshape(S, (-1, 1)) * V
    unfolding = tl.reshape(unfolding, (rank[0], rank[1], -1))
    unfolding = tl.transpose(unfolding, (1, 2, 0))

    # Getting the TR factors up to n_dim - 1
    for k in range(1, n_dim - 1):

        # Reshape the unfolding matrix of the remaining factors
        n_row = int(rank[k] * tensor_size[k])
        unfolding = tl.reshape(unfolding, (n_row, -1))

        # SVD of unfolding matrix
        n_row, n_column = unfolding.shape
        current_rank = min(n_row, n_column, rank[k + 1])
        U, S, V = tl.partial_svd(unfolding, current_rank)
        rank[k + 1] = current_rank

        # Get kth TR factor
        factors[k] = tl.reshape(U, (rank[k], tensor_size[k], rank[k + 1]))

        if verbose is True:
            print("TR factor " + str((mode + k) % n_dim) + " computed with shape " + str(factors[k].shape))

        # Get new unfolding matrix for the remaining factors
        unfolding = tl.reshape(S, (-1, 1)) * V

    # Getting the last factor
    prev_rank = unfolding.shape[0]
    factors[-1] = tl.reshape(unfolding, (prev_rank, -1, rank[0]))

    if verbose is True:
        print("TR factor " + str((mode - 1) % n_dim) + " computed with shape " + str(factors[-1].shape))

    # Reorder factors to match input
    if mode:
        factors = factors[-mode:] + factors[:-mode]

    return TRTensor(factors)
Esempio n. 14
0
#!/usr/bin/env python
# coding: utf-8
import os
import csv
import numpy as np
import random
import scipy.sparse
import sys
import tensorly as tl
import warnings
warnings.filterwarnings('ignore')

with open(sys.argv[1], 'r') as f:
    mat = scipy.sparse.load_npz(
        str(sys.argv[1]
            ))  #load the graph adjacency matrix coords.csv_sparse_graph.npz
    u, s, vt = tl.partial_svd(
        mat.todense(), n_eigenvecs=101
    )  # 3 should be enough because the data is x,y,z times beads
    np.savez(str(sys.argv[1]) + '_tsvd', u=u, s=s, vt=vt)
Esempio n. 15
0
config_sample = random.sample(
    files, k=samples)  # sample k times without replacement from configurations

T = np.zeros(shape=(samples, 3043, 3043), dtype=np.float32)
count = 0
for config in config_sample:
    dirname = os.fsdecode(config)
    filename = os.path.join(path, dirname + '/coords.csv_sparse_graph.npz')
    if os.path.isfile(filename):
        with open(filename, 'r') as f:
            mat = scipy.sparse.load_npz(filename).astype(np.float32).todense()
            # sub_threshold_indices = mat < 0.5
            # mat[sub_threshold_indices] = 0
            u, s, vh = tl.partial_svd(
                mat, n_eigenvecs=3
            )  # 3 should be enough because the data is x,y,z times beads
            T[count, :, :] = np.dot(u, np.dot(np.diag(s), vh))
            del mat
        continue
    else:
        continue
    count += 1

factors = non_negative_parafac(T,
                               rank=rank,
                               n_iter_max=10000,
                               verbose=1,
                               init='svd',
                               tol=1e-10)
# print(factors[0])
Esempio n. 16
0
import umap
import random
import scipy.sparse
import sys
import tensorly as tl
import warnings
warnings.filterwarnings('ignore')

with open(sys.argv[1], 'r') as f:  # the input is the coord.csv file
    XYZ = np.array(list(csv.reader(f, delimiter=',')))[:, :3].astype(
        np.float)  # load coordinates
    mat = umap.umap_.fuzzy_simplicial_set(
        XYZ,
        n_neighbors=100,  #hard-coded
        random_state=np.random.RandomState(seed=42),
        metric='l2',
        metric_kwds={},
        knn_indices=None,
        knn_dists=None,
        angular=False,
        set_op_mix_ratio=1.0,
        local_connectivity=2.0,
        verbose=False)

    # Truncated SVD
    u, s, vt = tl.partial_svd(
        mat.todense(), n_eigenvecs=101)  # the 101 truncation is hard-coded

    scipy.sparse.save_npz(str(sys.argv[1]) + '_sparse_graph.npz', mat)
    np.savez(str(sys.argv[1]) + '_tsvd', u=u, s=s, vt=vt)
Esempio n. 17
0
def fista(UtM,
          UtU,
          x=None,
          n_iter_max=100,
          non_negative=True,
          sparsity_coef=0,
          lr=None,
          tol=10e-8):
    """
    Fast Iterative Shrinkage Thresholding Algorithm (FISTA)

    Computes an approximate (nonnegative) solution for Ux=M linear system.

    Parameters
    ----------
    UtM : ndarray
        Pre-computed product of the transposed of U and M
    UtU : ndarray
        Pre-computed product of the transposed of U and U
    x : init
       Default: None
    n_iter_max : int
        Maximum number of iteration
        Default: 100
    non_negative : bool, default is False
                   if True, result will be non-negative
    lr : float
        learning rate
        Default : None
    sparsity_coef : float or None
    tol : float
        stopping criterion

    Returns
    -------
    x : approximate solution such that Ux = M

    Notes
    -----
    We solve the following problem :math: `1/2 ||m - Ux ||_2^2 + \\lambda |x|_1`

    Reference
    ----------
    [1] : Beck, A., & Teboulle, M. (2009). A fast iterative
          shrinkage-thresholding algorithm for linear inverse problems.
          SIAM journal on imaging sciences, 2(1), 183-202.
    """
    if sparsity_coef is None:
        sparsity_coef = 0

    if x is None:
        x = tl.zeros(tl.shape(UtM), **tl.context(UtM))
    if lr is None:
        lr = 1 / (tl.partial_svd(UtU)[1][0])
    # Parameters
    momentum_old = tl.tensor(1.0)
    norm_0 = 0.0
    x_update = tl.copy(x)

    for iteration in range(n_iter_max):
        if isinstance(UtU, list):
            x_gradient = -UtM + tl.tenalg.multi_mode_dot(
                x_update, UtU, transpose=False) + sparsity_coef
        else:
            x_gradient = -UtM + tl.dot(UtU, x_update) + sparsity_coef

        if non_negative is True:
            x_gradient = tl.where(lr * x_gradient < x_update, x_gradient,
                                  x_update / lr)

        x_new = x_update - lr * x_gradient
        momentum = (1 + tl.sqrt(1 + 4 * momentum_old**2)) / 2
        x_update = x_new + ((momentum_old - 1) / momentum) * (x_new - x)
        momentum_old = momentum
        x = tl.copy(x_new)
        norm = tl.norm(lr * x_gradient)
        if iteration == 1:
            norm_0 = norm
        if norm < tol * norm_0:
            break
    return x
Esempio n. 18
0
def tensor_train(input_tensor, rank, verbose=False):
    """TT decomposition via recursive SVD

        Decomposes `input_tensor` into a sequence of order-3 tensors (factors)
        -- also known as Tensor-Train decomposition [1]_.

    Parameters
    ----------
    input_tensor : tensorly.tensor
    rank : {int, int list}
            maximum allowable TT rank of the factors
            if int, then this is the same for all the factors
            if int list, then rank[k] is the rank of the kth factor
    verbose : boolean, optional
            level of verbosity

    Returns
    -------
    factors : TT factors
              order-3 tensors of the TT decomposition

    References
    ----------
    .. [1] Ivan V. Oseledets. "Tensor-train decomposition", SIAM J. Scientific Computing, 33(5):2295–2317, 2011.
    """
    rank = validate_tt_rank(tl.shape(input_tensor), rank=rank)
    tensor_size = input_tensor.shape
    n_dim = len(tensor_size)

    unfolding = input_tensor
    factors = [None] * n_dim

    # Getting the TT factors up to n_dim - 1
    for k in range(n_dim - 1):

        # Reshape the unfolding matrix of the remaining factors
        n_row = int(rank[k] * tensor_size[k])
        unfolding = tl.reshape(unfolding, (n_row, -1))

        # SVD of unfolding matrix
        (n_row, n_column) = unfolding.shape
        current_rank = min(n_row, n_column, rank[k + 1])
        U, S, V = tl.partial_svd(unfolding, current_rank)
        rank[k + 1] = current_rank

        # Get kth TT factor
        factors[k] = tl.reshape(U, (rank[k], tensor_size[k], rank[k + 1]))

        if (verbose is True):
            print("TT factor " + str(k) + " computed with shape " +
                  str(factors[k].shape))

        # Get new unfolding matrix for the remaining factors
        unfolding = tl.reshape(S, (-1, 1)) * V

    # Getting the last factor
    (prev_rank, last_dim) = unfolding.shape
    factors[-1] = tl.reshape(unfolding, (prev_rank, last_dim, 1))

    if (verbose is True):
        print("TT factor " + str(n_dim - 1) + " computed with shape " +
              str(factors[n_dim - 1].shape))

    return TTTensor(factors)
Esempio n. 19
0
def tensor_ring(input_tensor, rank):
    '''Tensor ring (TR) decomposition via recursive SVD

        Decomposes input_tensor into a sequence of order-3 tensors (factors),
        with the input rank of the first factor equal to the output rank of the 
        last factor. This code is modified from MPS decomposition in tensorly 
        lib's src code

    Parameters
    ----------
    input_tensor : tensorly.tensor
    rank : {int, int list}
            maximum allowable rank of the factors
            if int, then this is the same for all the factors
            if int list, then rank[k] is the rank of the kth factor

    Returns
    -------
    factors : Tensor ring factors
              order-3 tensors of the tensor ring decomposition
    '''

    # Check user input for errors
    tensor_size = input_tensor.shape
    n_dim = len(tensor_size)

    if isinstance(rank, int):
        rank = [rank] * n_dim
    elif n_dim != len(rank):
        message = 'Provided incorrect number of ranks. '
        raise (ValueError(message))
    rank = list(rank)

    # Initialization
    unfolding = tl.unfold(input_tensor, 0)
    factors = [None] * n_dim
    U, S, V = tl.partial_svd(unfolding, rank[0])
    r0 = int(np.sqrt(rank[0]))
    while rank[0] % r0:
        r0 -= 1
    T0 = tl.reshape(U, (tensor_size[0], r0, rank[0] // r0))
    factors[0] = torch.transpose(torch.tensor(T0), 0, 1)
    unfolding = tl.reshape(S, (-1, 1)) * V
    rank[1] = rank[0] // r0
    rank.append(r0)

    # Getting the MPS factors up to n_dim
    for k in range(1, n_dim):

        # Reshape the unfolding matrix of the remaining factors
        n_row = int(rank[k] * tensor_size[k])
        unfolding = tl.reshape(unfolding, (n_row, -1))

        # SVD of unfolding matrix
        (n_row, n_column) = unfolding.shape
        rank[k + 1] = min(n_row, n_column, rank[k + 1])
        U, S, V = tl.partial_svd(unfolding, rank[k + 1])

        # Get kth MPS factor
        factors[k] = tl.reshape(U, (rank[k], tensor_size[k], rank[k + 1]))

        # Get new unfolding matrix for the remaining factors
        unfolding = tl.reshape(S, (-1, 1)) * V

    return factors