Beispiel #1
0
def compute_svd(Tl, U, sigmas, dims, R, mlsvd_method, tol_mlsvd, gpu, L, l):
    low_rank = min(R, dims[l])

    if gpu:
        if mlsvd_method == 'gpu' or mlsvd_method == 'sparse':
            tmp = array(dot(Tl, Tl.T), dtype=float32, order='F')
            Tl_gpu = gpuarray.to_gpu(tmp)
            Ul, sigma_l, Vlt = rlinalg.rsvd(Tl_gpu, k=low_rank, p=10, q=2, method='standard')
            sigma_l = sqrt(sigma_l)
        else:
            tmp = array(Tl.T, dtype=float32)
            Tl_gpu = gpuarray.to_gpu(tmp)
            Ul, sigma_l, Vlt = rlinalg.rsvd(Tl_gpu, k=low_rank, p=10, q=2, method='standard')

    else:
        if mlsvd_method == 'sparse':
            Tl = Tl.dot(Tl.T)
            Ul, sigma_l, Vlt = rand_svd(Tl, low_rank, n_oversamples=10, n_iter=2, power_iteration_normalizer='none')
            sigma_l = sqrt(sigma_l)
        else:
            Ul, sigma_l, Vlt = rand_svd(Tl, low_rank, n_oversamples=10, n_iter=2, power_iteration_normalizer='none')

    # Truncate more based on energy.
    Ul, sigma_l, Vlt, dim = clean_compression(Ul, sigma_l, Vlt, tol_mlsvd, L)
    sigmas.append(sigma_l)
    U.append(Ul)

    return U, sigmas, Vlt, dim
Beispiel #2
0
def test_truncation(T, trunc_list, display=True, n_iter=2, power_iteration_normalizer='none'):
    """
    This function test one or several possible truncations for the MLSVD of T, showing the  error of the truncations. It
    is possible to accomplish the same results calling the function mlsvd with display=3 but this is not advisable since
    each call recomputes the same unfolding SVD's.
    The variable trunc_list must be a list of truncations. Even if it is only one truncation, it must be a list with one
    truncation only.
    """

    # Set the main variables about T.
    dims = T.shape
    L = len(dims)
    Tsize = norm(T)

    # Transform list into array and get the maximum for each dimension.
    max_trunc_dims = np.max(array(trunc_list), axis=0)

    # Compute truncated SVD of all unfoldings of T.
    sigmas = []
    U = []
    T1 = empty((dims[0], prod(dims) // dims[0]), dtype=float64)
    for l in range(L):
        Tl = cnv.unfold(T, l+1)
        if l == 0:
            T1 = cnv.unfold_C(T, l+1)
        low_rank = min(dims[l], max_trunc_dims[l])
        Ul, sigma_l, Vlt = rand_svd(Tl, low_rank, n_iter=n_iter, power_iteration_normalizer=power_iteration_normalizer)
        sigmas.append(sigma_l)
        U.append(Ul)

    # Save errors in a list.
    trunc_error = []

    # Truncated MLSVD.
    for trunc in trunc_list:
        # S, U and UT truncated.
        current_dims = trunc
        current_U = []
        current_sigmas = []
        for l in range(L):
            current_U.append(U[l][:, :current_dims[l]])
            current_sigmas.append(sigmas[l][:current_dims[l]])
        current_UT = [current_U[l].T for l in range(L)]
        S = mlinalg.multilin_mult(current_UT, T1, dims)

        # Error of truncation.
        S1 = cnv.unfold(S, 1)
        current_error = mlinalg.compute_error(T, Tsize, S1, current_U, current_dims)
        trunc_error.append(current_error)

        # Display results.
        if display:
            print('Truncation:', current_dims)
            print('Error:', current_error)
            print()

    return trunc_error
Beispiel #3
0
def tt_core(V, dims, r1, r2, l):
    """
    Computation of one core of the CPD Tensor Train function (cpdtt).
    """

    V = V.reshape(r1 * dims[l], prod(dims[l + 1:]), order='F')
    low_rank = min(V.shape[0], V.shape[1])
    U, S, V = rand_svd(V, low_rank, n_iter=0)
    U = U[:, :r2]
    S = diag(S)
    V = dot(S, V)
    V = V[:r2, :]
    if r1 == 1:
        g = U.reshape(dims[l], r2, order='F')
    else:
        g = U.reshape(r1, dims[l], r2, order='F')
    return V, g