Esempio n. 1
0
def test_coupled_matrix_tensor_3d_factorization():
    I = 21
    J = 12
    K = 8
    M = 7
    R = 3

    tensor_cp_true = random_cp((I, J, K), rank=R, normalise_factors=False)
    matrix_cp_true = random_cp((I, M), rank=R, normalise_factors=False)
    matrix_cp_true.factors[0] = tensor_cp_true.factors[0]

    tensor_true = cp_to_tensor(tensor_cp_true)
    matrix_true = cp_to_tensor(matrix_cp_true)

    X_pred, Y_pred, errors = coupled_matrix_tensor_3d_factorization(
        tensor_true, matrix_true, R)

    # Check that the error monotonically decreases
    assert_(np.all(np.diff(errors) <= 0.0))

    # # Check reconstruction of noisy tensor
    tol_norm_2 = 10e-2
    tol_max_abs = 10e-2
    tensor_pred = cp_to_tensor(X_pred)
    matrix_pred = cp_to_tensor(Y_pred)
    error = tl.norm(tensor_true - tensor_pred)**2 + tl.norm(matrix_true -
                                                            matrix_pred)**2
def depthwise_factorization(K, dm=1):
    shapeK = K.shape
    dimK = len(shapeK)
    Kbar = np.concatenate([K for i in range(dm)], axis=-2)

    k1 = shapeK[0]
    k2 = shapeK[1]
    c_in = shapeK[-2] * dm
    c_out = shapeK[-1]

    D = tt.tensor(np.random.randn(k1, k2, c_in))
    W = np.random.randn(c_out, c_in)
    I = np.zeros((c_in, c_in, c_in))
    for i in range(c_in):
        I[i, i, i] = 1.0
    I = tt.tensor(I)

    Khat = tt.tenalg.mode_dot(tt.tenalg.inner(D, I, 1), W, -1)
    err = tt.norm(Khat - Kbar)
    Knorm = tt.norm(K)

    K3 = my_unfold(Kbar, -2)
    K4 = my_unfold(Kbar, -1)
    K3t_vec = (K3.T).flatten(order='F')

    n_iter = 0
    n_max = 50
    err_vec = np.zeros((n_max, 1))
    still_improving = True
    #print(n_iter, err, np.linalg.cond(W))

    while err > 1.e-8 and still_improving and n_iter < n_max:
        err_vec[n_iter] = err

        # fix W, update D
        D3t_vec = tt.tenalg.kronecker([
            np.diag(np.reciprocal(np.diag(W.T @ W))) @ tt.tenalg.khatri_rao(
                [np.identity(c_in), W]).T,
            np.identity(k1 * k2)
        ]) @ K3t_vec
        D = tt.tensor(np.reshape(D3t_vec, D.shape, order='F'))

        # fix D, update W
        D3 = my_unfold(D, -1)
        W = K4 @ tt.tenalg.khatri_rao([np.identity(c_in), D3.T]) @ np.diag(
            np.reciprocal(np.diag(D3 @ D3.T)))
        # rescale W,D so that W has abs-row-sum = 1
        Rvec = np.max(np.abs(W), axis=0)
        W = W @ np.diag(np.reciprocal(Rvec))
        D = tt.tenalg.mode_dot(D, np.diag(Rvec), -1)

        # Update error
        Khat = tt.tenalg.mode_dot(tt.tenalg.inner(D, I, 1), W, -1)
        err = tt.norm(Khat - Kbar) / Knorm

        still_improving = err < err_vec[n_iter]
        n_iter += 1
        #print(n_iter, err)

    return D, W, Khat, err_vec, n_iter
Esempio n. 3
0
def Activationcoeffsingle(args):  #The parameters are tensors

    Gnew = tl.tensor(args[1][args[10]])
    Gold = tl.tensor(np.zeros(args[1][args[10]].shape))
    Gresult = tl.tensor(np.zeros(args[1][args[10]].shape))
    Matrix = np.dot(
        np.dot(mxnet_backend.to_numpy(args[2]), mxnet_backend.to_numpy(Gnew)),
        mxnet_backend.to_numpy(args[3]).T)
    error = tl.norm(args[0][args[10]] - tl.tensor(Matrix), 2) / tl.norm(
        args[0][args[10]], 2)
    nbiter = 0

    while (nbiter < args[5]):
        nbiter = nbiter + 1
        Gold = Gnew
        derivative = Derivativefeatureproblem(args[0][args[10]], Gold, args[2],
                                              args[3], args[7], args[8],
                                              args[9])

        Gnew = Gold - args[4] * derivative
        if (args[9] == True):
            Gnew = tl.tensor(np.maximum(mxnet_backend.to_numpy(Gnew), 0))
        Gresult = Gnew

        Matrix = np.dot(
            np.dot(mxnet_backend.to_numpy(args[2]),
                   mxnet_backend.to_numpy(Gnew)),
            mxnet_backend.to_numpy(args[3]).T)

        error = tl.norm(args[0][args[10]] - tl.tensor(Matrix), 2) / tl.norm(
            args[0][args[10]], 2)
        if (error < args[6]):
            break
    return Gresult
Esempio n. 4
0
def test_cp_vonneumann_entropy_mixed_state():
    """Test for cp_vonneumann_entropy on CP tensors. 
    This test checks that the VNE of mixed states is calculated correctly.
    """
    state1 = tl.tensor([[
        0.03004805, 0.42426117, 0.5483771, 0.4784077, 0.25792725, 0.34388784,
        0.99927586, 0.96605812
    ]])
    state1 = state1 / tl.norm(state1)
    state2 = tl.tensor([[
        0.84250089, 0.43429687, 0.26551928, 0.18262211, 0.55584835, 0.2565509,
        0.33197401, 0.97741178
    ]])
    state2 = state2 / tl.norm(state2)
    mat_mixed = tl.tensor((tl.dot(tl.transpose(state1), state1) +
                           tl.dot(tl.transpose(state2), state2)) / 2.)
    actual_vne = 0.5546
    mat = parafac(tl.tensor(mat_mixed), rank=2, normalize_factors=True)
    mat_unnorm = parafac(tl.tensor(mat_mixed), rank=2, normalize_factors=False)
    tl_vne = cp_vonneumann_entropy(mat)
    tl_vne_unnorm = cp_vonneumann_entropy(mat_unnorm)
    tl.testing.assert_array_almost_equal(tl_vne, actual_vne, decimal=3)
    tl.testing.assert_array_almost_equal(tl_vne_unnorm, actual_vne, decimal=3)
    assert_array_almost_equal(tl_vne, actual_vne, decimal=3)
    assert_array_almost_equal(tl_vne_unnorm, actual_vne, decimal=3)
Esempio n. 5
0
def test_parafac_power_iteration():
    """Test for symmetric Parafac optimized with robust tensor power iterations"""
    rng = check_random_state(1234)
    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1

    shape = (5, 3, 4)
    rank = 4
    tensor = random_cp(shape, rank=rank, full=True, random_state=rng)
    ktensor = parafac_power_iteration(tensor,
                                      rank=10,
                                      n_repeat=10,
                                      n_iteration=10)

    rec = tl.cp_to_tensor(ktensor)
    error = tl.norm(rec - tensor, 2) / tl.norm(tensor, 2)
    assert_(
        error < tol_norm_2,
        f'Norm 2 of reconstruction error={error} higher than tol={tol_norm_2}.'
    )
    error = tl.max(tl.abs(rec - tensor))
    assert_(
        error < tol_max_abs,
        f'Absolute norm of reconstruction error={error} higher than tol={tol_max_abs}.'
    )
Esempio n. 6
0
def test_partial_tucker():
    """Test for the Partial Tucker decomposition"""
    rng = tl.check_random_state(1234)
    tol_norm_2 = 10e-3
    tol_max_abs = 10e-1
    tensor = tl.tensor(rng.random_sample((3, 4, 3)))
    modes = [1, 2]
    core, factors = partial_tucker(tensor, modes, rank=None, n_iter_max=200, verbose=True)
    reconstructed_tensor = multi_mode_dot(core, factors, modes=modes)
    norm_rec = tl.norm(reconstructed_tensor, 2)
    norm_tensor = tl.norm(tensor, 2)
    assert_((norm_rec - norm_tensor)/norm_rec < tol_norm_2)

    # Test the max abs difference between the reconstruction and the tensor
    assert_(tl.max(tl.abs(norm_rec - norm_tensor)) < tol_max_abs)

    # Test the shape of the core and factors
    ranks = [3, 1]
    core, factors = partial_tucker(tensor, modes=modes, rank=ranks, n_iter_max=100, verbose=1)
    for i, rank in enumerate(ranks):
        assert_equal(factors[i].shape, (tensor.shape[i+1], ranks[i]),
                     err_msg="factors[{}].shape={}, expected {}".format(
                         i, factors[i].shape, (tensor.shape[i+1], ranks[i])))
    assert_equal(core.shape, [tensor.shape[0]]+ranks, err_msg="Core.shape={}, "
                     "expected {}".format(core.shape, [tensor.shape[0]]+ranks))

    # Test random_state fixes the core and the factor matrices
    core1, factors1 = partial_tucker(tensor, modes=modes, rank=ranks, random_state=0)
    core2, factors2 = partial_tucker(tensor, modes=modes, rank=ranks, random_state=0)
    assert_array_equal(core1, core2)
    for factor1, factor2 in zip(factors1, factors2):
        assert_array_equal(factor1, factor2)
def test_gcp_sgd():
    """ Test sgd optimization functionality """
    # Create a random tensor
    np.random.seed(1234)
    d = 3
    shp = (100, 20, 30)
    size = 1
    for i in shp:
        size *= i
    data = np.random.rand(size)
    tensor = tl.tensor(data.reshape(shp, order='F'), dtype=tl.float64)
    rank = 10
    mTen = gcp(tensor, rank, type='normal', opt='sgd', maxiters=100, epciters=10)

    assert (mTen is not None), "gcp returned null"
    assert (len(mTen[1]) == d), "Number of factors should be 3, currently has " + str(len(mTen[1]))

    # Check each factor matrices has the correct number of columns
    for k in range(d):
        rows, columns = tl.shape(mTen[1][k])
        assert (columns == rank), "Factor matrix {} needs {} columns, but only has {}".format(i + 1, rank, columns)

    # Check CPTensor has same number of elements as tensor
    mTen = tl.cp_to_tensor(mTen)
    assert (tensor.size == mTen.size), "Unequal number of tensor elements. Tensor: {} CPTensor: {}".format(tensor.size,
                                                                                                           tl.cp_to_tensor(
                                                                                                               mTen).size)
    score = 1 - (tl.norm(tensor - mTen) / tl.norm(tensor))
    print("Score: {0:0.4f}".format(score))
def Operations_listmatrices(listofmatrices,operationnature):#The parameters are tensors
    Res=[]   
    if (operationnature=="Arrayconversion"):
        for matrix in listofmatrices:
           element=tl.tensor(matrix)
           Res.append(mxnet_backend.to_numpy(element))  
        return Res

    if (operationnature=="Transpose"):
        for matrix in listofmatrices:
           element=tl.tensor(matrix)
           Res.append(element.T)#computes A.T
        return Res
    
    if(operationnature=="Transposetimes"):
       for matrix in listofmatrices:
           element=tl.tensor(matrix)
           Matrix=tl.backend.dot(element.T,element)
           Res.append(Matrix) #computes A.T*A  
       return Res
   
    if(operationnature=="NormI"):
           for matrix in listofmatrices:
               Res.append(tl.norm(matrix,1))
           return Res
    if(operationnature=="NormII"):
           for matrix in listofmatrices:
               Res.append(np.power(tl.norm(matrix,2),2))
           return Res
def Sparse_code(X, G_init, listoffactors, Nonnegative, step, max_iter, alpha,
                theta, epsilon):  #The parameters are tensors
    #This function is used to perform the sparse coding step
    #All the tensor and parameters are of tensor type
    #G_new=T.tensor(np.copy(mxnet_backend.to_numpy(G_init)))
    G_new = tl.tensor(G_init)
    G_old = tl.tensor(np.zeros(G_new.shape))
    G_result = tl.tensor(np.zeros(G_new.shape))
    error = np.power(tl.norm(X - Tensor_matrixproduct(G_new, listoffactors),
                             2), 2)  #+Lambda*T.norm(G_new,1))

    nb_iter = 0
    error_list = [error]
    while (nb_iter <= max_iter):
        nb_iter = nb_iter + 1
        G_old = G_new

        G_new = G_old - step * derivativeCore(X, G_old, listoffactors)
        if (Nonnegative == True):
            G_new = tl.backend.maximum(
                G_old - step * derivativeCore(X, G_old, listoffactors) +
                tl.tensor(alpha * theta * np.ones(G_old.shape)), 0)
        if (Nonnegative == False):
            G_new = Proximal_operator(G_new, step)
        error = np.power(
            tl.norm(X - Tensor_matrixproduct(G_new, listoffactors), 2),
            2)  #+Lambda*T.norm(G_new,1)
        G_result = G_new
        error_list.append(error)
        #if(np.abs(previous_error-error)/error<epsilon):
        if (np.sqrt(error) / tl.norm(X, 2) < epsilon):
            G_result = G_old
            error_list = error_list[0:len(error_list) - 1]
            break
    return G_result, error_list, nb_iter
def Factorupdateproblem(X,G,Ainit,listoffactorsmatrices,alpha,theta,n,maxiter,epsilon):
    
    Anew=tl.tensor(Ainit)
    Aold=tl.tensor(np.zeros(Anew.shape))
    Aresult=tl.tensor(np.zeros(Anew.shape))
    error=np.power(tl.norm(X-Tensor_matrixproduct(G,listoffactorsmatrices),2),2)+alpha*(1-theta)*np.power(tl.norm(Anew,2),2)
    
    #previouserror=0
    nbiter=0
    while(nbiter<maxiter):
        nbiter=nbiter+1
        Aold=Anew
        #previouserror=error
        
        Anew=derivativeDict(X,G,Aold,listoffactorsmatrices,alpha,theta,n)
       
        Anew=Anew/tl.norm(Anew,2)
        error=np.power(tl.norm(X-Tensor_matrixproduct(G,listoffactorsmatrices),2),2)#+alpha*(1-theta)*np.power(T.norm(Anew,2),2)
        Aresult=Anew
        
        #if(previouserror-error<epsilon):
        if(np.sqrt(error)/tl.norm(X,2)<epsilon):
           Aresult=Aold
           break
    return Aresult
Esempio n. 11
0
def test_masked_tucker():
    """Test for the masked Tucker decomposition.
    This checks that a mask of 1's is identical to the unmasked case.
    """
    rng = check_random_state(1234)
    tensor = tl.tensor(rng.random_sample((3, 3, 3)))
    mask = tl.tensor(np.ones((3, 3, 3)))

    mask_fact = tucker(tensor, rank=(2, 2, 2), mask=mask)
    fact = tucker(tensor, rank=(2, 2, 2))
    diff = tucker_to_tensor(mask_fact) - tucker_to_tensor(fact)
    assert_(
        tl.norm(diff) < 0.001, 'norm 2 of reconstruction higher than 0.001')

    # Mask an outlier value, and check that the decomposition ignores it
    tensor = random_tucker((5, 5, 5), (1, 1, 1), full=True, random_state=1234)
    mask = tl.tensor(np.ones((5, 5, 5)))

    mask_tensor = tl.tensor(tensor)
    mask_tensor = tl.index_update(mask_tensor, tl.index[0, 0, 0], 1.0)
    mask = tl.index_update(mask, tl.index[0, 0, 0], 0)

    # We won't use the SVD decomposition, but check that it at least runs successfully
    mask_fact = tucker(mask_tensor, rank=(1, 1, 1), mask=mask, init="svd")
    mask_fact = tucker(mask_tensor,
                       rank=(1, 1, 1),
                       mask=mask,
                       init="random",
                       random_state=1234)
    mask_err = tl.norm(tucker_to_tensor(mask_fact) - tensor)

    assert_(mask_err < 0.001, 'norm 2 of reconstruction higher than 0.001')
def Mean_relative_errorsingle(args):
    error = np.power(
        tl.norm(
            args[0][args[3]] - Tensor_matrixproduct(args[1][args[3]], args[2]),
            2), 2)
    error = error / np.power(tl.norm(args[0][args[3]], 2), 2)
    return error
def Operations_listmatrices(listofmatrices, operationnature):
    #This function takes a list of matrices and performs some classical operations on its elements.
    #The variable operationnature specifies the operation performed
    #The matrices are of tensor type
    Res = []
    if (operationnature == "Transpose"):
        for matrix in listofmatrices:
            #element=np.copy(mxnet_backend.to_numpy(matrix))
            element = np.copy(tl.backend.to_numpy(matrix))
            Res.append(tl.tensor(element.T))  #computes A.T
        return Res

    if (operationnature == "Transposetimes"):
        for matrix in listofmatrices:
            #element=np.copy(mxnet_backend.to_numpy(matrix))
            element = np.copy(tl.backend.to_numpy(matrix))
            Res.append(tl.tensor(np.dot(element.T, element)))  #computes A.T*A
        return Res

    if (operationnature == "NormI"):
        for matrix in listofmatrices:
            Res.append(tl.norm(tl.tensor(matrix), 1))
        return Res
    if (operationnature == "NormII"):
        for matrix in listofmatrices:
            Res.append(np.power(tl.norm(tl.tensor(matrix), 2), 2))
        return Res

    if (operationnature == "Tensorize"):
        for matrix in listofmatrices:
            Res.append(tl.tensor(matrix))
        return Res
Esempio n. 14
0
def normalized_error(reference_tensor, reconstructed_tensor):
    '''Computes a normalized error between two tensors

    Parameters
    ----------
    reference_tensor : ndarray list
        A tensor that could be a list of lists, a multidimensional numpy array or
        a tensorly.tensor. This tensor is the input of a tensor decomposition and
        used as reference in the normalized error for a new tensor reconstructed
        from the factors of the tensor decomposition.

    reconstructed_tensor : ndarray list
        A tensor that could be a list of lists, a multidimensional numpy array or
        a tensorly.tensor. This tensor is an approximation of the reference_tensor
        by using the resulting factors of a tensor decomposition to compute it.

    Returns
    -------
    norm_error : float
        The normalized error between a reference tensor and a reconstructed tensor.
        The error is normalized by dividing by the Frobinius norm of the reference
        tensor.
    '''
    norm_error = tl.norm(reference_tensor -
                         reconstructed_tensor) / tl.norm(reference_tensor)
    return tl.to_numpy(norm_error)
Esempio n. 15
0
def score(fac, fac_est):
    """
    standard score metric

    Parameters
    ----------
    fac : list of matrices
        true factors
    fac_est : list of matrices
        factors returned by CP decomposition

    Returns
    -------
    double
        Score.

  """
    weights, fac = tl.cp_normalize((None, fac))
    weights_est, fac_est = tl.cp_normalize((None, fac_est))
    score = 0
    # find the corresponding columns of fac and fac_est
    row_ind, col_ind = linear_sum_assignment(
        -np.abs(np.dot(np.transpose(fac[0]), fac_est[0])))
    for k in range(len(fac)):
        fac_est[k] = fac_est[k][:, col_ind]
    for i in range(fac[0].shape[1]):  # R
        temp = 1
        for j in range(len(fac)):  # N
            temp = temp * np.dot(fac[j][:, i], fac_est[j][:, i]) / (
                tl.norm(fac[j][:, i]) * tl.norm(fac_est[j][:, i]))
            # np.transpose(fac[j][:,i] ?
        score = score + temp
    return (score / fac[0].shape[1])
def test_gcp_continuous_loss_functions():
    cont_losses = ['normal', 'gaussian']
    opts = ['lbfgsb', 'sgd']
    rng = tl.check_random_state(1234)
    shp = (4, 5, 6)
    rank = 4
    #tensor = generate_test_tensor('normal', shp)
    size = 1
    for i in shp:
        size *= i
    data1 = rng.random(size)
    tensor = tl.tensor(data1.reshape(shp, order='F'), dtype=tl.float64)
    ## CHECK CONTINUOUS DATA-CENTRIC LOSS
    print("\n***************************************************")
    print("\t Testing continuous data")
    for loss in cont_losses:
        print("***************************************************\n")
        print("Loss function type: {}".format(loss))
        for opt in opts:
            mTen = gcp(tensor, rank, type=loss, opt=opt, maxiters=1000, epciters=100)
            assert (mTen is not None), "gcp({}}) returned null".format(opt)
            mTen = tl.cp_to_tensor(mTen)
            assert (tensor.size == mTen.size), "Unequal number of tensor elements. \
                                        Tensor: {} CPTensor: {}".format(tensor.size, tl.cp_to_tensor(mTen).size)
            score = 1 - (tl.norm(tensor - mTen) / tl.norm(tensor))
            print("Score: {0:0.4f}\n".format(score))
Esempio n. 17
0
def test_symmetric_parafac_power_iteration(monkeypatch):
    """Test for symmetric Parafac optimized with robust tensor power iterations"""
    rng = tl.check_random_state(1234)
    tol_norm_2 = 10e-1
    tol_max_abs = 10e-1

    size = 5
    rank = 4
    true_factor = tl.tensor(rng.random_sample((size, rank)))
    true_weights = tl.ones(rank)
    tensor = tl.cp_to_tensor((true_weights, [true_factor] * 3))
    weights, factor = symmetric_parafac_power_iteration(tensor,
                                                        rank=10,
                                                        n_repeat=10,
                                                        n_iteration=10)

    rec = tl.cp_to_tensor((weights, [factor] * 3))
    error = tl.norm(rec - tensor, 2)
    error /= tl.norm(tensor, 2)
    assert_(error < tol_norm_2, 'norm 2 of reconstruction higher than tol')
    # Test the max abs difference between the reconstruction and the tensor
    assert_(
        tl.max(tl.abs(rec - tensor)) < tol_max_abs,
        'abs norm of reconstruction error higher than tol')
    assert_class_wrapper_correctly_passes_arguments(
        monkeypatch,
        symmetric_parafac_power_iteration,
        SymmetricCP,
        ignore_args={},
        rank=3)
Esempio n. 18
0
    def explained_variance(self):
        '''Computes the explained variance score for a tensor decomposition. Inspired on the
        function in sklearn.metrics.explained_variance_score.

        Returns
        -------
        explained_variance : float
            Explained variance score for a tnesor factorization.
        '''
        assert self.tl_object is not None, "Must run compute_tensor_factorization before using this method."
        tensor = self.tensor
        rec_tensor = self.tl_object.to_tensor()
        mask = self.mask

        if mask is not None:
            tensor = tensor * mask
            rec_tensor = tensor * mask

        y_diff_avg = tl.mean(tensor - rec_tensor)
        numerator = tl.norm(tensor - rec_tensor - y_diff_avg)

        tensor_avg = tl.mean(tensor)
        denominator = tl.norm(tensor - tensor_avg)

        if denominator == 0.:
            explained_variance = 0.0
        else:
            explained_variance =  1. - (numerator / denominator)
            explained_variance = explained_variance.item()
        return explained_variance
def test_gcp_1():
    """ Test for generalized CP"""

    ## Test 1 - shapes and dimensions

    # Create tensor with random elements
    rng = tl.check_random_state(1234)
    d = 3
    n = 4
    shape = (40, 50, 60)
    tensor = tl.tensor(rng.random(shape), dtype=tl.float32)
    # tensor = (np.arange(n**d, dtype=float).reshape((n,)*d))
    # tensor = tl.tensor(tensor)  # a 4 x 4 x 4 tensor

    tensor_shape = tensor.shape

    # Find gcp decomposition of the tensor
    rank = 20
    mTen = gcp(tensor, rank, type='normal', state=rng, maxiters=1e5)
    print(mTen)
    assert(mTen is not None), "gcp returned null"
    assert(len(mTen[1]) == d), "Number of factors should be 3, currently has " + str(len(mTen[1]))

    # Check each factor matrices has the correct number of columns
    for k in range(d):
        rows, columns = tl.shape(mTen[1][k])
        assert(columns == rank), "Factor matrix {} needs {} columns, but only has {}".format(i+1, rank, columns)

    # Check CPTensor has same number of elements as tensor
    mTen = tl.cp_to_tensor(mTen)
    assert(tensor.size == mTen.size), "Unequal number of tensor elements. Tensor: {} CPTensor: {}".format(tensor.size,tl.cp_to_tensor(mTen).size)
    score = 1 - (tl.norm(tensor - mTen)/tl.norm(tensor))
    print("Score: {}".format(score))
def Operations_listmatrices(listofmatrices,operationnature):#The parameters are tensors
    Res=[]
    if (operationnature=="Turnintoarray"):
        for matrix in listofmatrices:
           element=np.copy(mxnet_backend.to_numpy(matrix))
           Res.append(element)#computes A.T
        return Res
    
    if (operationnature=="Transpose"):
        for matrix in listofmatrices:
           element=np.copy(mxnet_backend.to_numpy(matrix))
           Res.append(tl.tensor(element.T))#computes A.T
        return Res
    
    if(operationnature=="Transposetimes"):
       for matrix in listofmatrices:
           element=np.copy(mxnet_backend.to_numpy(matrix))
         
           Res.append(tl.tensor(np.dot(element.T,element))) #computes A.T*A  
       return Res
   
    if(operationnature=="NormI"):
           for matrix in listofmatrices:
               Res.append(tl.norm(matrix,1))
           return Res
    if(operationnature=="NormII"):
           for matrix in listofmatrices:
               Res.append(np.power(tl.norm(matrix,2),2))
           return Res
       
    if(operationnature=="Tensorize"):
           for matrix in listofmatrices:
               Res.append(tl.tensor(matrix))
           return Res
Esempio n. 21
0
def err_fac(fac, fac_est):
    """
    factor error computation
    Parameters
    ----------
    fac : list of matrices
        true factor matrices
    fac_est : list of matrices
        factor matrices estimation

    Returns
    -------
    float
        factor error
  """
    # normalize factor matrices
    weights, fac = tl.cp_normalize((None, fac))
    weights_est, fac_est = tl.cp_normalize((None, fac_est))
    err = 0
    for i in range(len(fac)):
        # find the corresponding columns of fac and fac_est
        if i == 0:
            row_ind, col_ind = linear_sum_assignment(
                -np.abs(np.dot(np.transpose(fac[i]), fac_est[i])))
        err = err + (tl.norm(fac[i] - fac_est[i][:, col_ind]) /
                     tl.norm(fac[i]))
    return (err / len(fac))
Esempio n. 22
0
def test_matrix_product_state():
    """ Test for matrix_product_state """
    rng = check_random_state(1234)

    ## Test 1

    # Create tensor with random elements
    tensor = tl.tensor(rng.random_sample([3, 4, 5, 6, 2, 10]))
    tensor_shape = tensor.shape

    # Find MPS decomposition of the tensor
    rank = [1, 3, 3, 4, 2, 2, 1]
    factors = matrix_product_state(tensor, rank)

    assert (
        len(factors) == 6
    ), "Number of factors should be 6, currently has " + str(len(factors))

    # Check that the ranks are correct and that the second mode of each factor
    # has the correct number of elements
    r_prev_iteration = 1
    for k in range(6):
        (r_prev_k, n_k, r_k) = factors[k].shape
        assert (tensor_shape[k] == n_k
                ), "Mode 1 of factor " + str(k) + "needs " + str(
                    tensor_shape[k]) + " dimensions, currently has " + str(n_k)
        assert (r_prev_k == r_prev_iteration), " Incorrect ranks of factors "
        r_prev_iteration = r_k

    ## Test 2
    # Create tensor with random elements
    tensor = tl.tensor(rng.random_sample([3, 4, 5, 6, 2, 10]))
    tensor_shape = tensor.shape

    # Find MPS decomposition of the tensor
    rank = [1, 5, 4, 3, 8, 10, 1]
    factors = matrix_product_state(tensor, rank)

    for k in range(6):
        (r_prev, n_k, r_k) = factors[k].shape

        first_error_message = "MPS rank " + str(
            k) + " is greater than the maximum allowed "
        first_error_message += str(r_prev) + " > " + str(rank[k])
        assert (r_prev <= rank[k]), first_error_message

        first_error_message = "MPS rank " + str(
            k + 1) + " is greater than the maximum allowed "
        first_error_message += str(r_k) + " > " + str(rank[k + 1])
        assert (r_k <= rank[k + 1]), first_error_message

    ## Test 3
    tol = 10e-5
    tensor = tl.tensor(rng.random_sample([3, 3, 3]))
    factors = matrix_product_state(tensor, (1, 3, 3, 1))
    reconstructed_tensor = tl.mps_to_tensor(factors)
    error = tl.norm(reconstructed_tensor - tensor, 2)
    error /= tl.norm(tensor, 2)
    assert_(error < tol, 'norm 2 of reconstruction higher than tol')
Esempio n. 23
0
def TuckerBatchfull(X, Coretensorsize, max_iter, listoffactorsinit, Ginit,
                    Nonnegative, Reprojectornot, alpha, theta, step, epsilon):
    N = len(list(X.shape))
    listoffactorsnew = list(listoffactorsinit)
    listoffactorsnew = Operations_listmatrices(listoffactorsnew, "Tensorize")
    listoffactorsold = []
    listoffactorsresult = []
    Gnew = tl.tensor(np.copy(Ginit))
    Gold = tl.tensor(np.zeros(Ginit.shape))
    Gresult = tl.tensor(np.zeros(Ginit.shape))
    error = np.power(
        tl.norm(
            tl.tensor(X) - Tensor_matrixproduct(Gnew, listoffactorsnew), 2), 2
    )  #+alpha*theta*T.norm(Gnew,1)+alpha*(1-theta)*np.sum(Operations_listmatrices(listoffactorsnew[1:N],"NormII"))
    print("Point I")

    nbiter = 0
    errorlist = [error]
    while (nbiter < max_iter):
        print("We are in batch")
        nbiter = nbiter + 1
        listoffactorsold = listoffactorsnew
        Gold = Gnew

        Gnew = Sparse_code(tl.tensor(X), Gold, listoffactorsold, Nonnegative,
                           step, max_iter, alpha, theta, epsilon)[0]

        for n in range(N):

            Aold = listoffactorsnew[n]

            Anew = Factorupdateproblem(tl.tensor(X), Gnew, Aold,
                                       listoffactorsnew, Nonnegative, alpha,
                                       theta, n, max_iter, step, epsilon)

            listoffactorsnew[n] = Anew

        error = np.power(
            tl.norm(
                tl.tensor(X) - Tensor_matrixproduct(Gnew, listoffactorsnew),
                2), 2
        )  #+alpha*theta*T.norm(Gnew,1)+alpha*(1-theta)*np.sum(Operations_listmatrices(listoffactorsnew[1:N],"NormII"))

        errorlist.append(error)
        listoffactorsresult = listoffactorsold
        Gresult = Gnew

        if (np.sqrt(error) / tl.norm(tl.tensor(X), 2) < epsilon):
            listoffactorsresult = listoffactorsold
            Gresult = Gold
            errorlist = errorlist[0:len(errorlist) - 1]
            break
    #print(errorlist)

    if (Reprojectornot == True):
        return Gresult, listoffactorsresult, errorlist, nbiter

    if (Reprojectornot == False):
        return listoffactorsresult, errorlist, nbiter
def test_matrix_product_state_cross_4():
    """ Test for matrix_product_state """

    # TEST 4
    # Random tensor is not really compress-able. Test on a tensor as values of a function

    def getEquispaceGrid(n_dim, rng, subdivisions):
        '''
        Returns a grid of equally-spaced points in the specified number of dimensions

        n_dim       : The number of dimensions to construct the tensor grid in
        rng         : The maximum dimension coordinate (grid starts at 0)
        subdivisions: Number of subdivisions of the grid to construct
        '''

        return np.array([
            np.array(range(subdivisions + 1)) * rng * 1.0 / subdivisions
            for i in range(n_dim)
        ])

    def evaluateGrid(grid, fcn):
        '''
        Loops over a grid in specified order and computes the specified function at each
        point in the grid, returning a list of computed values.
        '''
        d, n = grid.shape
        values = np.zeros(len(grid[0])**len(grid))
        idx = 0
        for permutation in itertools.product(range(len(grid[0])),
                                             repeat=len(grid)):
            pt = np.array(
                [grid[i][permutation[i]] for i in range(len(permutation))])
            values[idx] = fcn(pt)
            idx += 1

        return values.reshape((n, ) * d)

    def func(X):
        return sum(X)**3

    maxvoleps = 1e-4
    tol = 1e-3
    n = 10
    d = 4
    rng = 1
    grid = getEquispaceGrid(d, rng, n)
    value = evaluateGrid(grid, func)
    value = tl.tensor(value)

    # Find MPS decomposition of the tensor
    rank = [1, 4, 4, 4, 1]
    factors = matrix_product_state_cross(value, rank, tol=tol)

    approx = mps_to_tensor(factors)
    error = tl.norm(approx - value, 2)
    error /= tl.norm(value, 2)

    print(error)
    assert_(error < 1e-5, 'norm 2 of reconstruction higher than tol')
Esempio n. 25
0
    def penalty(self, order=1):
        """Add l2 regularization on the core and the factors"""
        penalty = tl.norm(self.core, order)

        for f in self.factors:
            penalty = penalty + tl.norm(f, order)

        return penalty
Esempio n. 26
0
def test_randomized_svd():
    """ Imports the tensor of union of all genes among 6 cell lines and performs parafac. """
    tensor, _, _ = form_tensor()
    tInit = initialize_cp(tensor, 7)
    tfac = parafac(tensor, rank=7, init=tInit, linesearch=True, n_iter_max=2)
    r2x = 1 - tl.norm(
        (tl.cp_to_tensor(tfac) - tensor))**2 / (tl.norm(tensor))**2
    assert r2x > 0
def Mean_relative_error(X, G, listoffactors, setting, pool):
    if (setting == "Single"):
        return np.power(tl.norm(X - Tensor_matrixproduct(G, listoffactors), 2),
                        2) / np.power(tl.norm(X, 2), 2)
    if (setting == "MiniBatch"):
        Mean_errorslist = pool.map(Mean_relative_errorsingle,
                                   [[X, G, listoffactors, l]
                                    for l in range(len(X))])
        return np.mean(np.array(Mean_errorslist))
Esempio n. 28
0
def init_factors(I,
                 J,
                 K,
                 r,
                 noise_level=0.1,
                 scale=False,
                 nn=False,
                 snr=False):
    """
    Initialize a three way tensor's factor matrices
    
    Parameters
    ----------
    I : int
        dimension of mode 1.
    J : int
        dimension of mode 2.
    K : int
        dimension of mode 3.
    r : int
        rank.
    noise_level : float, optional
        noise level. The default is 0.001.
    scale : boolean, optional
        whether to scale the singular value or not. The default is False.
    snr : boolean, optional
        whether return SNR or not

    Returns
    -------
    factors : list of matrices
        factors
    noise : tensor
        noise tensor.

  """
    A = np.random.normal(0, 1, size=(I, r))
    B = np.random.normal(0, 1, size=(J, r))
    C = np.random.normal(0, 1, size=(K, r))
    if nn == True:
        A = np.abs(A)
        B = np.abs(B)
        C = np.abs(C)
    if (scale == True):
        A = sv_scale_to_100(A)
        B = sv_scale_to_100(B)
        C = sv_scale_to_100(C)
    factors = [A, B, C]
    x = tl.cp_to_tensor((None, factors))
    N = np.random.normal(0, 1, size=(I, J, K))
    noise = noise_level * tl.norm(x) / tl.norm(N) * N
    SNR = 10 * np.log10(tl.norm(x)**2 / tl.norm(noise)**2)
    if snr == True:
        return (factors, noise, SNR)
    else:
        return (factors, noise)
Esempio n. 29
0
def test_RPCA():
    """Test for RPCA"""
    tol = 1e-5

    sample = np.array([[1., 2, 3, 4],
                       [2, 4, 6, 8]])
    clean = np.vstack([sample[None, ...]]*100)
    noise_probability = 0.05
    rng = check_random_state(12345)
    noise = rng.choice([0., 100., -100.], size=clean.shape, replace=True,
                      p=[1 - noise_probability, noise_probability/2, noise_probability/2])
    tensor = tl.tensor(clean + noise)
    corrupted_clean = np.copy(clean)
    corrupted_noise = np.copy(noise)
    clean = tl.tensor(clean)
    noise = tl.tensor(noise)
    clean_pred, noise_pred = robust_pca(tensor, mask=None, reg_E=0.4, mu_max=10e12,
                                        learning_rate=1.2,
                                        n_iter_max=200, tol=tol, verbose=True)
    # check recovery
    assert_array_almost_equal(tensor, clean_pred+noise_pred, decimal=tol)
    # check low rank recovery
    assert_array_almost_equal(clean, clean_pred, decimal=1)
    # Check for sparsity of the gross error
    # assert tl.sum(noise_pred > 0.01) == tl.sum(noise > 0.01)
    assert_array_equal((noise_pred > 0.01), (noise > 0.01))
    # check sparse gross error recovery
    assert_array_almost_equal(noise, noise_pred, decimal=1)

    ############################
    # Test with missing values #
    ############################
    # Add some corruption (missing values, replaced by ones)
    mask = rng.choice([0, 1], clean.shape, replace=True, p=[0.05, 0.95])
    corrupted_clean[mask == 0] = 1
    tensor = tl.tensor(corrupted_clean + corrupted_noise)
    corrupted_noise = tl.tensor(corrupted_noise)
    corrupted_clean = tl.tensor(corrupted_clean)
    mask = tl.tensor(mask)
    # Decompose the tensor
    clean_pred, noise_pred = robust_pca(tensor, mask=mask, reg_E=0.4, mu_max=10e12,
                                        learning_rate=1.2,
                                        n_iter_max=200, tol=tol, verbose=True)
    # check recovery
    assert_array_almost_equal(tensor, clean_pred+noise_pred, decimal=tol)
    # check low rank recovery
    assert_array_almost_equal(corrupted_clean*mask, clean_pred*mask, decimal=1)
    # check sparse gross error recovery
    assert_array_almost_equal(noise*mask, noise_pred*mask, decimal=1)

    # Check for recovery of the corrupted/missing part
    mask = 1 - mask
    error = tl.norm((clean*mask - clean_pred*mask), 2)/tl.norm(clean*mask, 2)
    assert_(error <= 10e-3)
def test_matrix_product_state_cross_3():
    """ Test for matrix_product_state """
    rng = check_random_state(1234)

    ## Test 3
    tol = 10e-5
    tensor = tl.tensor(rng.random_sample([3, 3, 3]))
    factors = matrix_product_state_cross(tensor, (1, 3, 3, 1))
    reconstructed_tensor = mps_to_tensor(factors)
    error = tl.norm(reconstructed_tensor - tensor, 2)
    error /= tl.norm(tensor, 2)
    assert_(error < tol, 'norm 2 of reconstruction higher than tol')