Esempio n. 1
0
def gramians(factors, Gr, P1, P2):
    """ 
    Computes all Gramian matrices of the factor matrices. Also it computes all Hadamard products between the 
    different Gramians.
    """

    L = len(factors)
    R = factors[0].shape[1]

    for l in range(L):
        Gr[l] = dot(factors[l].T, factors[l], out=Gr[l])

    for l in range(1, L):
        for ll in range(l):
            P2[l][ll] = ones((R, R), dtype=float64)
            itr = [i for i in range(L)]
            itr.remove(l)
            itr.remove(ll)
            for lll in itr:
                P2[l][ll] = mlinalg.hadamard(P2[l][ll], Gr[lll], P2[l][ll])
            P2[ll][l] = P2[l][ll]
        if l < L-1:
            P1[l] = mlinalg.hadamard(P2[l][ll], Gr[ll], P1[l])
        else:
            P1[l] = mlinalg.hadamard(P2[l][0], Gr[0], P1[l])

    P1[0] = mlinalg.hadamard(P2[0][1], Gr[1], P1[0])

    return Gr, P1, P2
Esempio n. 2
0
def hessian(factors, P1, P2, sum_dims):
    """
    Approximate Hessian matrix of the error function.
    """

    L = len(factors)
    R = factors[0].shape[1]
    dims = [factors[l].shape[0] for l in range(L)]
    H = zeros((R * sum(dims), R * sum(dims)))
    vec_factors = [zeros(R*dims[l]) for l in range(L)]
    fortran_factors = [np.array(factors[l], order='F') for l in range(L)]
    for l in range(L):
        vec_factors[l] = cnv.vec(factors[l], vec_factors[l], dims[l], R) 
        vec_factors[l] = vec_factors[l].reshape(R*dims[l], 1).T
                
    for l in range(L):
        I = identity(dims[l])
        # Block H_{ll}.
        H[sum_dims[l]:sum_dims[l+1], sum_dims[l]:sum_dims[l+1]] = mlinalg.kronecker(P1[l, :, :], I)
        for ll in range(l):              
            I = ones((dims[l], dims[ll]))
            tmp1 = mlinalg.kronecker(P2[l, ll, :, :], I)
            tmp2 = zeros((R*dims[l], R*dims[ll]))
            tmp2 = compute_blocks(tmp2, fortran_factors[l], vec_factors[ll], tuple(dims), R, l, ll)  
            # Blocks H_{l, ll} and H_{ll, l}.
            H[sum_dims[l]:sum_dims[l+1], sum_dims[ll]:sum_dims[ll+1]] = \
                mlinalg.hadamard(tmp1, tmp2, H[sum_dims[l]:sum_dims[l+1], sum_dims[ll]:sum_dims[ll+1]])  
            H[sum_dims[ll]:sum_dims[ll+1], sum_dims[l]:sum_dims[l+1]] = H[sum_dims[l]:sum_dims[l+1], sum_dims[ll]:sum_dims[ll+1]].T               
              
    return H
Esempio n. 3
0
def matvec_inner(A, P2, P_VT_W, result_tmp, L):
    for ll in range(L):
        X = A[ll]
        for l in range(ll):
            P_VT_W = mlinalg.hadamard(P2[l][ll], X, P_VT_W)
            result_tmp[l] += P_VT_W
        for l in range(ll+1, L):
            P_VT_W = mlinalg.hadamard(P2[l][ll], X, P_VT_W)
            result_tmp[l] += P_VT_W
                
    return result_tmp
Esempio n. 4
0
def rank_progress(X, Y, Z, m, n, R, k=0, greys=True, rgb=False):
    """
    Plots the partial sums of rank one terms corresponding to the k-th slice of the CPD. The last image should match the
    original CPD. Use rgb=True only for tensors of the form (m, n, 3) encoding RGB format. The program will display the
    red rank one terms, then it will add the green rank one terms and then the blue rank one terms. This ordering may
    cause some distortions on the final image.

    Inputs
    ------
    X, Y, Z: 2-D arrays
        Their are the CPD of some third order tensor.
    m, n, p, R: int
    k: int
        Slice we want to visualize.
    greys: bool
        If True (default), it will show all slices in gray scale. Otherwise it will show the RGB evolution of the
        slices. In this case the parameter 'rgb' should be set to True.
    rgb: bool
        If True, it will show all the RGB evolution of the slices. False is default.    
    """

    if greys:
        temp = zeros((m, n))
        sections = mlinalg.rank1(X, Y, Z, m, n, R, k)
        for r in range(R):
            temp = temp + sections[:, :, r]
            plt.imshow(temp, cmap='gray')
            name = 'factor_' + str(r + 1) + '.png'
            plt.savefig(name)
            plt.show()

    elif rgb:
        count = 0
        temp = zeros((m, n, 3))
        for color_choice in [0, 1, 2]:
            sections = mlinalg.rank1(X, Y, Z, m, n, R, color_choice)
            for r in range(R):
                temp[:, :,
                     color_choice] = temp[:, :, color_choice] + sections[:, :,
                                                                         r]
                plt.imshow(array(temp, dtype=uint8))
                name = 'factor_' + str(count) + '.png'
                plt.savefig(name)
                plt.show()
                count += 1

    else:
        return

    return
Esempio n. 5
0
def compute_grad(Tl, factors, P1, g, N, gg, dims, sum_dims):
    """
    This function computes the gradient of the error function.
    """

    # Initialize first variables.
    L = len(factors)
    R = factors[0].shape[1]

    # Main computations.
    for l in range(L):
        itr = [l for l in reversed(range(L))]
        itr.remove(l)
        M = factors[itr[0]]

        # Compute Khatri-Rao products W^(L) ⊙ ... ⊙ W^(l+1) ⊙ W^(l-1) ⊙ ... ⊙ W^(1).
        for ll in range(L-2):
            tmp = M
            dim1, dim2 = tmp.shape[0], dims[itr[ll+1]]
            M = empty((dim1 * dim2, R), dtype=float64)
            M = mlinalg.khatri_rao(tmp, factors[itr[ll+1]], M)

        dot(Tl[l], M, out=N[l])
        dot(factors[l], P1[l], out=gg[l])
        g[sum_dims[l]: sum_dims[l+1]] = (gg[l] - N[l]).T.ravel()

    return g
Esempio n. 6
0
def cpd2unfold1(T1_approx, factors):
    """
    Converts the factor matrices to the first unfolding of the corresponding tensor.

    Inputs
    ------
    factors: list of 2-D arrays
        The factor matrices.

    Outputs
    ------
    T1_approx: float 2-D array
        First unfolding of T_approx, where T_approx is (factors[0],...,factors[L-1])*I in coordinate format.
    """

    L = len(factors)
    M = factors[1]

    for l in range(2, L):
        N = empty((M.shape[0] * factors[l].shape[0], M.shape[1]))
        M = mlinalg.khatri_rao(factors[l], M, N)

    dot(factors[0], M.T, out=T1_approx)

    return T1_approx
Esempio n. 7
0
def cpd2tens(factors):
    """
    Converts the factor matrices to tensor in coordinate format using a Khatri-Rao product formula.

    Inputs
    ------
    factors: list of 2-D arrays
        The factor matrices.

    Outputs
    ------
    T_approx: float L-D array
        Tensor (factors[0],...,factors[L-1])*I in coordinate format. 
    """

    L = len(factors)
    dims = [factors[l].shape[0] for l in range(L)]
    T_approx = empty(dims)
    M = factors[1]

    for l in range(2, L):
        N = empty((M.shape[0] * factors[l].shape[0], M.shape[1]))
        M = mlinalg.khatri_rao(factors[l], M, N)

    T1_approx = dot(factors[0], M.T)
    T_approx = foldback(T_approx, T1_approx, 1)

    return T_approx
Esempio n. 8
0
def als_iteration(Tl, factors, fix_mode):
    """
    This function the ALS iterations, that is, it computes the pseudoinverse with respect to the modes. This 
    implementation is simple and not intended to be optimal. 
    """

    # Initialize first variables.
    L = len(factors)
    R = factors[0].shape[1]
    dims = [factors[l].shape[0] for l in range(L)]

    # Main computations for the general case.
    if fix_mode == -1:
        for l in range(L):
            itr = [l for l in reversed(range(L))]
            itr.remove(l)
            M = factors[itr[0]]

            # Compute Khatri-Rao products W^(L) ⊙ ... ⊙ W^(l+1) ⊙ W^(l-1) ⊙ ... ⊙ W^(1).
            for ll in range(L - 2):
                tmp = M
                dim1, dim2 = tmp.shape[0], dims[itr[ll + 1]]
                M = empty((dim1 * dim2, R), dtype=float64)
                M = mlinalg.khatri_rao(tmp, factors[itr[ll + 1]], M)

            factors[l] = dot(Tl[l], pinv(M.T))

        return factors

    # If fix_mode != -1, it is assumed that the program is using the bicpd function.
    # This part is only used for third order tensors.
    X, Y, Z = factors
    T1, T2, T3 = Tl

    if fix_mode == 0:
        M = empty((Z.shape[0] * X.shape[0], R))
        M = mlinalg.khatri_rao(Z, X, M)
        Y = dot(T2, pinv(M.T))
        M = empty((Y.shape[0] * X.shape[0], R))
        M = mlinalg.khatri_rao(Y, X, M)
        Z = dot(T3, pinv(M.T))

    elif fix_mode == 1:
        X, Y, Z = factors
        M = empty((Z.shape[0] * Y.shape[0], R))
        M = mlinalg.khatri_rao(Z, Y, M)
        X = dot(T1, pinv(M.T))
        M = empty((Y.shape[0] * X.shape[0], R))
        M = mlinalg.khatri_rao(Y, X, M)
        Z = dot(T3, pinv(M.T))

    elif fix_mode == 2:
        X, Y, Z = factors
        M = empty((Z.shape[0] * Y.shape[0], R))
        M = mlinalg.khatri_rao(Z, Y, M)
        X = dot(T1, pinv(M.T))
        M = empty((Z.shape[0] * X.shape[0], R))
        M = mlinalg.khatri_rao(Z, X, M)
        Y = dot(T2, pinv(M.T))

    return [X, Y, Z]
Esempio n. 9
0
def rank1_plot(X,
               Y,
               Z,
               m,
               n,
               R,
               k=0,
               num_rows=5,
               num_cols=5,
               greys=True,
               rgb=False,
               save=False):
    """
    This function generates an image with the frontal sections of all rank one terms (in coordinates) of some CPD. It
    also saves the image in a file.
    Warning: this functions uses a lot of memory.

    Inputs
    ------
    X, Y, Z: 2-D arrays
        Their are the CPD of some third order tensor.
    m, n, R: int
    k: int
        Slice we want to visualize.
    num_rows, num_cols: int
        The dimensions of the grid of subplots. We recommend using squares grids in order to maximize the size of each 
        subplot. Some blank squares may be left at the end.
    greys: bool
        If True (default), it will show all slices in gray scale. Otherwise it will show the RGB evolution of the
        slices.
        In this case the parameter 'rgb' should be set to True.
    rgb: bool
        If True, it will show all the RGB evolution of the slices. Default is rgb=False.
    """

    sections = mlinalg.rank1(X, Y, Z, m, n, R, k)
    r = 0
    count = 0

    while r < R:
        fig, ax = plt.subplots(num_rows,
                               num_cols,
                               figsize=(30, 30),
                               sharex='col',
                               sharey='row')
        for i in range(num_rows):
            for j in range(num_cols):
                ax[i, j].xaxis.set_major_locator(plt.NullLocator())
                ax[i, j].yaxis.set_major_locator(plt.NullLocator())
                if r < R:
                    if greys:
                        temp = sections[:, :, r]
                        ax[i, j].imshow(temp, cmap='gray')
                    elif rgb:
                        temp = zeros((m, n, 3))
                        temp[:, :, k] = sections[:, :, r]
                        ax[i, j].imshow(array(temp, dtype=uint8))
                    else:
                        return
                r += 1

        if save:
            name = 'fig_' + str(count) + '.png'
            plt.savefig(name)
        count += 1

    return
Esempio n. 10
0
def starting_point(T, Tsize, S, U, R, ordering, options):
    """
    This function generates a starting point to begin the iterations. There are three options:
        - list: the user may give a list with the factor matrices to be used as starting point.
        - 'random': the entries the factor matrices are generated by the normal distribution with mean 0 and variance 1.
        - 'smart_random': generates a random starting point with a method based on the MLSVD which always guarantee a 
           small relative error. Check the function 'smart_random' for more details about this method.
        - 'smart': works similar as the smart_random method, but this one is deterministic and generates the best rank-R
           approximation based on the MLSVD.
    
    Inputs
    ------
    T: float array
    Tsize: float
    S: float array
        The core tensor of the MLSVD of T.
    U: list of float 2D arrays
        Each element of U is a orthogonal matrix of the MLSVD of T.
    R: int
        the desired rank.
    ordering: list of ints
       Since the cpd may change the index ordering, this list may be necessary if the user gives an initialization,
       which will be based on the original ordering.
    options: class
    
    Outputs
    -------
    init_factors: list of float 2D arrays
    """

    # Extract all variable from the class of options.
    initialization = options.initialization
    c = options.factors_norm
    symm = options.symm
    display = options.display
    dims = S.shape
    L = len(dims)

    if type(initialization) == list:
        init_factors = [
            dot(U[l].T, initialization[ordering[l]]) for l in range(L)
        ]

    elif initialization == 'random':
        init_factors = [randn(dims[l], R) for l in range(L)]

    elif initialization == 'smart_random':
        init_factors = smart_random(S, dims, R)

    elif initialization == 'smart':
        init_factors = smart(S, dims, R)

    else:
        sys.exit('Error with init parameter.')

    if type(initialization) != list:

        # Depending on the tensor, the factors may have null entries. We want to avoid that. The solution is to
        # introduce a very small random noise.
        init_factors = clean_zeros(init_factors, dims, R)

        # Make all factors balanced.
        init_factors = cnv.equalize(init_factors, R)

        # Apply additional transformations if requested.
        init_factors = cnv.transform(init_factors, symm, c)

    if display > 2 or display < -1:
        S_init = cnv.cpd2tens(init_factors)
        if type(T) == list:
            rel_error = mlinalg.compute_error(T, Tsize, S_init, U, dims)
        else:
            S1_init = cnv.unfold(S_init, 1)
            rel_error = mlinalg.compute_error(T, Tsize, S1_init, U, dims)
        return init_factors, rel_error

    return init_factors
Esempio n. 11
0
def bicpd(T, R, fixed_factor, options):
    """
    Practically the same as tricpd, but this function keeps the some factor fixed during all the computations. This
    function is to be used as part of the tensor train cpd.
    """

    # INITIALIZE RELEVANT VARIABLES

    # Extract all variable from the class of options.
    initialization = options.initialization
    refine = options.refine
    symm = options.symm
    display = options.display
    tol_mlsvd = options.tol_mlsvd
    bi_method = options.bi_method_parameters[0]
    if type(tol_mlsvd) == list:
        tol_mlsvd = tol_mlsvd[1]

    # Set the other variables.
    m, n, p = T.shape
    Tsize = norm(T)
    ordering = [0, 1, 2]

    # Test consistency of dimensions and rank.
    aux.consistency(R, (m, n, p), options)

    # COMPRESSION STAGE

    if display > 0:
        print(
            '-----------------------------------------------------------------------------------------------'
        )
        print('Computing MLSVD of T')

    # Compute compressed version of T with the MLSVD. We have that T = (U1, U2, U3)*S.
    if display > 2 or display < -1:
        S, U, T1, sigmas, best_error = cmpr.mlsvd(T, Tsize, R, options)
    else:
        S, U, T1, sigmas = cmpr.mlsvd(T, Tsize, R, options)
    R1, R2, R3 = S.shape
    U1, U2, U3 = U

    # When the tensor is symmetric we want S to have equal dimensions.
    if symm:
        R_min = min(R1, R2, R3)
        R1, R2, R3 = R_min, R_min, R_min
        S = S[:R_min, :R_min, :R_min]
        U1, U2, U3 = U1[:, :R_min], U2[:, :R_min], U3[:, :R_min]

    if display > 0:
        if (R1, R2, R3) == (m, n, p):
            if tol_mlsvd == -1:
                print('    No compression and no truncation requested by user')
                print('    Working with dimensions', T.shape)
            else:
                print('    No compression detected')
                print('    Working with dimensions', T.shape)
        else:
            print('    Compression detected')
            print('    Compressing from', T.shape, 'to', S.shape)
        if display > 2:
            print('    Compression relative error = {:7e}'.format(best_error))

    # GENERATION OF STARTING POINT STAGE

    # Generate initial to start dGN.
    if display > 2 or display < -1:
        [X, Y, Z], init_error = init.starting_point(T, Tsize, S, U, R,
                                                    ordering, options)
    else:
        [X, Y, Z] = init.starting_point(T, Tsize, S, U, R, ordering, options)

    # Discard the factor computed in start_point and use the previous one. Then project it on the compressed space.
    if fixed_factor[1] == 0:
        X = dot(U1.T, fixed_factor[0])
        X = [X, 0]
    elif fixed_factor[1] == 1:
        Y = dot(U2.T, fixed_factor[0])
        Y = [Y, 1]
    elif fixed_factor[1] == 2:
        Z = dot(U3.T, fixed_factor[0])
        Z = [Z, 2]

    if display > 0:
        print(
            '-----------------------------------------------------------------------------------------------'
        )
        if type(initialization) == list:
            print('Type of initialization: fixed + user')
        else:
            print('Type of initialization: fixed +', initialization)
        if display > 2:
            if fixed_factor[1] == 0:
                S_init = cnv.cpd2tens([X[0], Y, Z])
            elif fixed_factor[1] == 1:
                S_init = cnv.cpd2tens([X, Y[0], Z])
            elif fixed_factor[1] == 2:
                S_init = cnv.cpd2tens([X, Y, Z[0]])
            S1_init = cnv.unfold(S_init, 1)
            init_error = mlinalg.compute_error(T, Tsize, S1_init, [U1, U2, U3],
                                               (R1, R2, R3))
            print(
                '    Initial guess relative error = {:5e}'.format(init_error))

    # DAMPED GAUSS-NEWTON STAGE

    if display > 0:
        print(
            '-----------------------------------------------------------------------------------------------'
        )
        print('Computing CPD of T')

    # Compute the approximated tensor in coordinates with dGN or ALS.
    if bi_method == 'als':
        factors, step_sizes_main, errors_main, improv_main, gradients_main, stop_main = \
            als.als(S, [X, Y, Z], R, options)
    else:
        factors, step_sizes_main, errors_main, improv_main, gradients_main, stop_main = \
            gn.dGN(S, [X, Y, Z], R, options)
    X, Y, Z = factors

    # FINAL WORKS

    # Use the orthogonal transformations to obtain the CPD of T.
    if fixed_factor[1] == 0:
        Y = dot(U2, Y)
        Z = dot(U3, Z)
    elif fixed_factor[1] == 1:
        X = dot(U1, X)
        Z = dot(U3, Z)
    elif fixed_factor[1] == 2:
        X = dot(U1, X)
        Y = dot(U2, Y)

    # Compute error.
    T1_approx = empty(T1.shape)
    if fixed_factor[1] == 0:
        T1_approx = cnv.cpd2unfold1(T1_approx, [fixed_factor[0], Y, Z])
    elif fixed_factor[1] == 1:
        T1_approx = cnv.cpd2unfold1(T1_approx, [X, fixed_factor[0], Z])
    elif fixed_factor[1] == 2:
        T1_approx = cnv.cpd2unfold1(T1_approx, [X, Y, fixed_factor[0]])

    # Save and display final information.
    step_sizes_refine = array([0])
    errors_refine = array([0])
    improv_refine = array([0])
    gradients_refine = array([0])
    stop_refine = 5
    output = aux.output_info(T1, Tsize, T1_approx, step_sizes_main,
                             step_sizes_refine, errors_main, errors_refine,
                             improv_main, improv_refine, gradients_main,
                             gradients_refine, stop_main, stop_refine, options)

    if display > 0:
        print(
            '==============================================================================================='
        )
        print('Final results of bicpd')
        if refine:
            print('    Number of steps =', output.num_steps)
        else:
            print('    Number of steps =', output.num_steps)
        print('    Relative error =', output.rel_error)
        acc = float('%.6e' % Decimal(output.accuracy))
        print('    Accuracy = ', acc, '%')

    return X, Y, Z, output
Esempio n. 12
0
def mlsvd(T, Tsize, R, options):
    """
    This function computes a truncated MLSVD of tensors of any order. The output is such that T = (U_1,...,U_L)*S, and
    UT is the list of the transposes of U.
    The parameter n_iter of the randomized SVD is set to 2. It is only good to increase this value when the tensor has
    much noise. Still this issue is addressed by the low rank CPD approximation, so n_iter=2 is enough.

    Inputs
    ------
    T: float array
        Objective tensor in coordinates.
    Tsize: float
        Frobenius norm of T.
    R: int
        An upper bound for the multilinear rank of T. Normally one will use the rank of T.
    options: class with the parameters previously defined.

    Outputs
    -------
    S: float array
        Core tensor of the MLSVD.
    U: list of float 2-D arrays
        List with truncated matrices of the original U.
    T1: float 2-D arrays
        First unfolding of T.
    sigmas: list of float 1-D arrays
        List with truncated arrays of the original sigmas.
    """

    # INITIALIZE RELEVANT VARIABLES.

    sigmas = []
    U = []

    # Verify if T is sparse, in which case it will be given as a list with the data.
    if type(T) == list:
        data, idxs, dims = T
    else:
        dims = T.shape
    L = len(dims)

    # Set options.
    options = aux.make_options(options, L)
    trunc_dims = options.trunc_dims
    display = options.display
    mlsvd_method = options.mlsvd_method
    tol_mlsvd = options.tol_mlsvd
    if type(tol_mlsvd) == list:
        if L > 3:
            tol_mlsvd = tol_mlsvd[0]
        else:
            tol_mlsvd = tol_mlsvd[1]
    gpu = options.gpu
    if gpu:
        import pycuda.gpuarray as gpuarray
        import pycuda.autoinit
        from skcuda import linalg, rlinalg

    # tol_mlsvd = -1 means no truncation and no compression, that is, the original tensor.
    if tol_mlsvd == -1:
        T1 = cnv.unfold(T, 1)
        U = [identity(dims[l]) for l in range(L)]
        sigmas = [ones(dims[l]) for l in range(L)]
        if display > 2 or display < -1:
            return T, U, T1, sigmas, 0.0
        else:
            return T, U, T1, sigmas

    # T is sparse.
    elif type(T) == list:
        for l in range(L):
            Tl = cnv.sparse_unfold(data, idxs, dims, l + 1)
            if l == 0:
                T1 = cnv.sparse_unfold(data, idxs, dims, l + 1)
            mlsvd_method = 'sparse'
            U, sigmas, Vlt, dim = compute_svd(Tl, U, sigmas, dims, R,
                                              mlsvd_method, tol_mlsvd, gpu, L,
                                              l)

        # Compute (U_1^T,...,U_L^T)*T = S.
        new_dims = [U[l].shape[1] for l in range(L)]
        UT = [U[l].T for l in range(L)]
        S = mlinalg.sparse_multilin_mult(UT, data, idxs, new_dims)

    # Compute MLSVD base on sequentially truncated method.
    elif mlsvd_method == 'seq':
        S_dims = copy(dims)
        S = T
        for l in range(L):
            Sl = cnv.unfold(S, l + 1)
            if l == 0:
                T1 = cnv.unfold_C(S, l + 1)
            U, sigmas, Vlt, dim = compute_svd(Sl, U, sigmas, dims, R,
                                              mlsvd_method, tol_mlsvd, gpu, L,
                                              l)

            # Compute l-th unfolding of S truncated at the l-th mode.
            Sl = (Vlt.T * sigmas[-1]).T
            S_dims[l] = dim
            S = empty(S_dims, dtype=float64)
            S = cnv.foldback(S, Sl, l + 1)

    # Compute MLSVD based on classic method.
    elif mlsvd_method == 'classic':
        for l in range(L):
            Tl = cnv.unfold(T, l + 1)
            if l == 0:
                T1 = cnv.unfold_C(T, l + 1)
            U, sigmas, Vlt, dim = compute_svd(Tl, U, sigmas, dims, R,
                                              mlsvd_method, tol_mlsvd, gpu, L,
                                              l)

        # Compute (U_1^T,...,U_L^T)*T = S.
        UT = [U[l].T for l in range(L)]
        S = mlinalg.multilin_mult(UT, T1, dims)

    # Specific truncation is given by the user.
    if type(trunc_dims) == list:
        slices = []
        for l in range(L):
            slices.append(slice(0, trunc_dims[l]))
            if trunc_dims[l] > U[l].shape[1]:
                print('trunc_dims[', l, '] =', trunc_dims[l], 'and U[', l,
                      '].shape =', U[l].shape)
                sys.exit(
                    'Must have trunc_dims[l] <= min(dims[l], R) for all mode l=1...'
                    + str(L))
            U[l] = U[l][:, :trunc_dims[l]]
        S = S[tuple(slices)]

    # Compute error of compressed tensor.
    if display > 2 or display < -1:
        if type(T) == list:
            best_error = mlinalg.compute_error(T, Tsize, S, U, dims)
        else:
            S1 = cnv.unfold(S, 1)
            best_error = mlinalg.compute_error(T, Tsize, S1, U, S.shape)
        return S, U, T1, sigmas, best_error

    return S, U, T1, sigmas
Esempio n. 13
0
def test_truncation(T,
                    trunc_list,
                    display=True,
                    n_iter=2,
                    power_iteration_normalizer='none'):
    """
    This function test one or several possible truncations for the MLSVD of T, showing the  error of the truncations. It
    is possible to accomplish the same results calling the function mlsvd with display=3 but this is not advisable since
    each call recomputes the same unfolding SVD's.
    The variable trunc_list must be a list of truncations. Even if it is only one truncation, it must be a list with one
    truncation only.
    """

    # Set the main variables about T.
    dims = T.shape
    L = len(dims)
    Tsize = norm(T)

    # Transform list into array and get the maximum for each dimension.
    max_trunc_dims = np.max(array(trunc_list), axis=0)

    # Compute truncated SVD of all unfoldings of T.
    sigmas = []
    U = []
    T1 = empty((dims[0], prod(dims) // dims[0]), dtype=float64)
    for l in range(L):
        Tl = cnv.unfold(T, l + 1)
        if l == 0:
            T1 = cnv.unfold_C(T, l + 1)
        low_rank = min(dims[l], max_trunc_dims[l])
        Ul, sigma_l, Vlt = rand_svd(
            Tl,
            low_rank,
            n_iter=n_iter,
            power_iteration_normalizer=power_iteration_normalizer)
        sigmas.append(sigma_l)
        U.append(Ul)

    # Save errors in a list.
    trunc_error = []

    # Truncated MLSVD.
    for trunc in trunc_list:
        # S, U and UT truncated.
        current_dims = trunc
        current_U = []
        current_sigmas = []
        for l in range(L):
            current_U.append(U[l][:, :current_dims[l]])
            current_sigmas.append(sigmas[l][:current_dims[l]])
        current_UT = [current_U[l].T for l in range(L)]
        S = mlinalg.multilin_mult(current_UT, T1, dims)

        # Error of truncation.
        S1 = cnv.unfold(S, 1)
        current_error = mlinalg.compute_error(T, Tsize, S1, current_U,
                                              current_dims)
        trunc_error.append(current_error)

        # Display results.
        if display:
            print('Truncation:', current_dims)
            print('Error:', current_error)
            print()

    return trunc_error