Exemple #1
0
    def get_exact_moments(self):
        d = self.d
        sigma2 = self.sigmas[0,0,0]
        M1 = sum(w * mu for w, mu in zip(self.weights, self.means.T)).reshape(d,1)
        M2 = sum(w * (sc.outer(mu,mu) + S) for w, mu, S in zip(self.weights, self.means.T, self.sigmas))
        M3 = sum(w * tensorify(mu,mu,mu) for w, mu, S in zip(self.weights, self.means.T, self.sigmas))

        M1_ = np.hstack((M1 for _ in range(d)))
        M3 += sigma2 * ktensor([M1_, np.eye(d), np.eye(d)]).totensor()
        M3 += sigma2 * ktensor([np.eye(d), M1_, np.eye(d)]).totensor()
        M3 += sigma2 * ktensor([np.eye(d), np.eye(d), M1_]).totensor()

        return M1, M2, M3
Exemple #2
0
    def factorize(self, x, iterations=100, showProgress=False, default=True):
        if not default:
            x = dtensor(x)
            num_ways = len(self.factor[0])
            X_itr = []
            R = len(self.factor)
            for way_index in range(num_ways):
                X_cur = []
                for r in range(R):
                    X_cur.append(self.factor[r][way_index].tolist())
                X_itr.append(np.array(X_cur).T)

        for i1 in np.arange(1, iterations + 1):
            if showProgress:
                progress = "*" if 0 < (i1 % 20) \
                 else "[%d/%d]\n" % (i1, iterations)
                print(progress)
            if default:
                self.updateAllFactors(x, self.factor)
            else:
                # pdb.set_trace()
                X_itr = self.updateAllFactorsGradient(x, X_itr, num_ways, R)
                ktensor_X = ktensor(X_itr)
                import math
                error_X = math.sqrt(getError(x, ktensor_X,
                                             x.norm())) / x.norm()
                # print(error_X)
        if not default:
            result_factor = []
            for r in range(R):
                each_factor = []
                for way_index in range(num_ways):
                    each_factor.append(X_itr[way_index].T[r])
                result_factor.append(each_factor)
            self.factor = result_factor
def RobustTPM(T, k, L=25, N=20):
    """
    Algorithm 1 from   "Tensor Decompositions for Learning Latent Variable Models"
    @param T: symmetric tensor
    @param k: the number of latent states
    @param L,N: number of iterations
    """
    Thetas = []

    for tau in range(L):
        Theta = np.random.randn(1, k)
        Theta = Theta / np.linalg.norm(Theta)

        for t in range(N):
            Theta = T.ttm([Theta, Theta], [1, 2]).reshape(1, k)
            Theta = Theta / np.linalg.norm(Theta)

        Thetas.append(Theta)

    ThetaFinal_idx = np.argmax(
        [T.ttm([theta, theta, theta], [0, 1, 2]) for theta in Thetas])

    Theta = Thetas[ThetaFinal_idx]

    for t in range(N):
        Theta = T.ttm([Theta, Theta], [1, 2]).reshape(1, k)
        Theta = Theta / np.linalg.norm(Theta)

    Lambda = T.ttm([Theta, Theta, Theta], [0, 1, 2]).squeeze()

    return Theta, Lambda, T - skt.ktensor([Theta.T, Theta.T, Theta.T
                                           ]).totensor() * Lambda
Exemple #4
0
def test_spttv(subs, vals, shape):
    S = sptensor(subs, vals, shape=shape)
    K = ktensor([
        np.random.randn(shape[0], 2),
        np.random.randn(shape[1], 2),
        np.random.randn(shape[2], 2)
    ])
    K.innerprod(S)
Exemple #5
0
def test_candecomp():
    """
    Test if it works
    """
    d = 3
    pi = rand(d)
    A = orthogonal(3)
    T = ten.ktensor( [A, A, A], pi ).totensor()
    pi_, A_, B_, C_ = candecomp( T, d )
    T_ = ten.ktensor( [A_, B_, C_], pi_ ).totensor()

    pi_ = match_columns_sign(np.atleast_2d(pi_), np.atleast_2d(pi)).flatten()
    A_ = match_columns_sign(A_, A)

    assert np.allclose(pi, pi_)
    assert np.allclose(A, A_)

    assert np.allclose( T, T_ )
Exemple #6
0
def test_candecomp():
    """
    Test if it works
    """
    d = 3
    pi = rand(d)
    A = orthogonal(3)
    T = ten.ktensor([A, A, A], pi).totensor()
    pi_, A_, B_, C_ = candecomp(T, d)
    T_ = ten.ktensor([A_, B_, C_], pi_).totensor()

    pi_ = match_columns_sign(np.atleast_2d(pi_), np.atleast_2d(pi)).flatten()
    A_ = match_columns_sign(A_, A)

    assert np.allclose(pi, pi_)
    assert np.allclose(A, A_)

    assert np.allclose(T, T_)
Exemple #7
0
def test_spttv():
    subs = (
        array([0, 1, 0, 5, 7, 8]),
        array([2, 0, 4, 5, 3, 9]),
        array([0, 1, 2, 2, 1, 0])
    )
    vals = array([1, 1, 1, 1, 1, 1])
    S = sptensor(subs, vals, shape=[10, 10, 3])
    K = ktensor([randn(10, 2), randn(10, 2), randn(3, 2)])
    K.innerprod(S)
def test_vectorization():
    rank = 5
    shape = (5, 27, 3, 13)
    U = [randn(s, rank) for s in shape]
    K = ktensor(U)
    v = K.tovec()
    K2 = v.toktensor()

    assert_equal(sum([s * rank for s in shape]), len(v.v))
    assert_equal(K, K2)
Exemple #9
0
def test_spttv():
    # subs = (
    #    array([0, 1, 0, 5, 7, 8]),
    #    array([2, 0, 4, 5, 3, 9]),
    #    array([0, 1, 2, 2, 1, 0])
    # )
    # vals = array([1, 1, 1, 1, 1, 1])
    S = sptensor(subs, vals, shape=shape)
    K = ktensor([randn(shape[0], 2), randn(shape[1], 2), randn(shape[2], 2)])
    K.innerprod(S)
Exemple #10
0
def test_vectorization():
    rank = 5
    shape = (5, 27, 3, 13)
    U = [np.random.randn(s, rank) for s in shape]
    K = ktensor(U)
    v = K.tovec()
    K2 = v.toktensor()

    assert sum([s * rank for s in shape]) == len(v.v)
    assert K == K2
Exemple #11
0
def test_vectorization():
    rank = 5
    shape = (5, 27, 3, 13)
    U = [randn(s, rank) for s in shape]
    K = ktensor(U)
    v = K.tovec()
    K2 = v.toktensor()

    assert sum([s * rank for s in shape]) == len(v.v)
    assert K == K2
Exemple #12
0
def test_spttv():
    #subs = (
    #    array([0, 1, 0, 5, 7, 8]),
    #    array([2, 0, 4, 5, 3, 9]),
    #    array([0, 1, 2, 2, 1, 0])
    #)
    #vals = array([1, 1, 1, 1, 1, 1])
    S = sptensor(subs, vals, shape=shape)
    K = ktensor([randn(shape[0], 2), randn(shape[1], 2), randn(shape[2], 2)])
    K.innerprod(S)
Exemple #13
0
def get_moments(xs, k):
    n, d = xs.shape
    assert d >= k

    m1 = (xs.sum(0) / n).reshape(d, 1)
    m2 = xs.T.dot(xs) / n
    U, S, _ = np.linalg.svd(m2 - m1.dot(m1.T))

    sigma2, v = S[-1], U[:, -1]

    m1 = (np.atleast_2d((xs - m1.T).dot(v)**2).T * xs).sum(0) / n
    M1 = np.hstack(np.atleast_2d(m1).T for _ in range(d))
    #M1 = sigma2 * np.hstack( (m1 for _ in range(d)) )

    M2 = m2 - sigma2 * np.eye(d)
    M3 = Triples(xs, xs, xs)
    M3 -= ktensor([M1, np.eye(d), np.eye(d)]).totensor()
    M3 -= ktensor([np.eye(d), M1, np.eye(d)]).totensor()
    M3 -= ktensor([np.eye(d), np.eye(d), M1]).totensor()
    return m1, M2, M3
def get_moments(xs, k):
    n, d = xs.shape
    assert d >= k

    m1 = (xs.sum(0) / n).reshape(d,1)
    m2 = xs.T.dot(xs) / n 
    U, S, _ = np.linalg.svd(m2 - m1.dot(m1.T))

    sigma2, v = S[-1], U[:,-1]

    m1 = (np.atleast_2d((xs - m1.T).dot(v)**2).T * xs ).sum(0) / n
    M1 = np.hstack(np.atleast_2d(m1).T for _ in range(d))
    #M1 = sigma2 * np.hstack( (m1 for _ in range(d)) )

    M2 = m2 - sigma2 * np.eye(d)
    M3 = Triples(xs, xs, xs)
    M3 -= ktensor([M1, np.eye(d), np.eye(d)]).totensor()
    M3 -= ktensor([np.eye(d), M1, np.eye(d)]).totensor()
    M3 -= ktensor([np.eye(d), np.eye(d), M1]).totensor()
    return m1, M2, M3
Exemple #15
0
def RecoverMoments(omega, M):
    """
    Recovers the theoretical moment matrix M2 and the tensors M3 from the conditional expectations
    and the mixing weights
    @param omega: the mixing weights
    @param M: the conditional expectations matrix
    """
    M2 = M.dot(np.diag(omega)).dot(M.T)
    M3 = skt.ktensor([M, M, M], omega).totensor()

    return M2, M3
def decompose_tensor(filters):
    """ filters is of type input feature maps, output feature maps, wxh of filter
        Output is a structure P which contains lambda, U{1}, U{2}, U{3}    
    """
    # Set logging to DEBUG to see CP-ALS information
    logging.basicConfig(level=logging.DEBUG)
    print filters.shape
    filters = np.array(filters)   
    print filters.shape 
    print filters.dtype
    nbr_filters = filters.shape[0]
    fwidth = filters.shape[2]
    fheight = filters.shape[3]
    Pstruct = []
    for chanel in range(filters.shape[1]):
        filter_for_channel = filters[:,chanel,:,:]
        filter_for_channel.reshape(nbr_filters, fwidth, fheight)
        filter_for_channel = np.swapaxes(filter_for_channel, 0,2);
        print 'Number of filters ', nbr_filters
        print 'filter_for channel shape ', filter_for_channel.shape
        fig, axes = plt.subplots(nrows=5, ncols=4)
        fig.tight_layout() 
        
        for f in xrange(nbr_filters):
            img = filter_for_channel[:,:,f]
            plt.subplot(5,4,f)
            plt.imshow(img)
        plt.show(block=False)
        T  = dtensor(filter_for_channel);
        rank = np.floor(nbr_filters*0.6);
        print 'rank is ', rank
        session = pymatlab.session_factory()
        session.putvalue('A',rank)
        del session
        ## P.U, P.lmbda
        print 'P U0,U1,U2, lambda sizes: ', P.U[0].size, P.U[1].size, P.U[2].size, P.lmbda
        print 'fit was ', fit        
        Pstruct.append(P)
        #dtensor(ktensor(U).toarray())
        print np.allclose(T, P.totensor())
    
    
    U = [np.random.rand(i,3) for i in (20, 10, 14)]
    
    Tn = dtensor(ktensor(U).toarray())
    P, fit, itr, _ = cp_als(Tn, 10)
    print 'P U0,U1,U2, lambda sizes: ', P.U[0].size, P.U[1].size, P.U[2].size, P.lmbda
    print 'fit was ', fit  
    print np.allclose(Tn, P.totensor())
    
    return Pstruct
Exemple #17
0
def jtnorm_fro_err_nways(self, X, Y, X_all, Y_all, norm_X, norm_Y):
    """ Compute the approximation error in Frobeinus norm

    norm(X - W.dot(H.T)) is efficiently computed based on trace() expansion 
    when W and H are thin.

    Parameters
    ----------
    X : numpy.array or scikit tensor, shape (m,n,o)
    X_U : numpy.array, shape (m,R1)
    norm_X : precomputed norm of X

    Returns
    -------
    float
    """
    F_ktensor_X = ktensor(X_all)
    F_ktensor_Y = ktensor(Y_all)
    error_X = getError(X, F_ktensor_X, norm_X)
    error_Y = getError(Y, F_ktensor_Y, norm_Y)
    cost = ((math.sqrt(np.maximum(error_X, 0))) / norm_X +
            (math.sqrt(np.maximum(error_Y, 0))) / norm_Y) / 2.

    return cost
Exemple #18
0
 def dot(self, vectors, modes):
     """
     Args:
         vectors: 
         modes: 
     Returns:
         convolution:
     """
     factors = deepcopy(self.ktensor.U)
     # print(self.ktensor.lmbda)
     for ind, i in zip(modes, range(len(modes))):
         factors[ind] = (vectors[i].T).dot(factors[ind]).reshape(
             (1, (self.rank, ) * 3))
     convolution = ktensor(factors, self.ktensor.lmbda).toarray().squeeze()
     return convolution
Exemple #19
0
    def computeError(self, x, X_itr, reference_matrix, L_matrix):
        ktensor_X = ktensor(X_itr)

        error = math.sqrt(getError(x, ktensor_X, x.norm())) / x.norm()
        reference_error = 0
        similarity_error = 0
        # similarity_error = sum([np.trace(X_itr[i].T.dot(L_matrix[i]).dot(X_itr[i])) for i in range(len(L_matrix))])
        # similarity_error = similarity_error / len(L_matrix)

        for i in range(len(L_matrix)):
            # _log.info(X_itr[i])
            similarity_error += np.trace(X_itr[i].T.dot(L_matrix[i]).dot(
                X_itr[i]))
            # similarity_error += np.linalg.norm(L_matrix[i] - X_itr[i].dot(X_itr[i].T))
            reference_error += np.linalg.norm(X_itr[i] - reference_matrix[i])

        reference_error /= len(L_matrix)
        similarity_error /= len(L_matrix)
        return error, similarity_error, reference_error
Exemple #20
0
    def covariance_tensor(self, Views):

        number_of_samples = Views[0].shape[0]

        for n in range(number_of_samples):
            u = []
            for v in range(self.number_of_views):
                u.append(np.array(Views[v][n]).reshape(-1, 1))

            cov_x = ktensor(u).toarray()

            if n == 0:
                cov_ten = cov_x
            else:
                cov_ten = cov_ten + cov_x

        cov_ten = cov_ten / (number_of_samples - 1)

        return cov_ten
Exemple #21
0
def main():

    from numpy.random import rand

    # -----------------------------------------------
    # Creating a synthetic 4th-order tensor
    # -----------------------------------------------
    N1 = 20
    N2 = 25
    N3 = 30
    N4 = 30

    R = 10

    # Random initialization
    np.random.seed(42)

    A_org = np.random.rand(N1, R)
    A_org[A_org < 0.4] = 0

    B_org = rand(N2, R)
    B_org[B_org < 0.4] = 0

    C_org = rand(N3, R)
    C_org[C_org < 0.4] = 0

    D_org = rand(N4, R)
    D_org[D_org < 0.4] = 0

    X_ks = ktensor([A_org, B_org, C_org, D_org])
    X = X_ks.totensor()

    # -----------------------------------------------
    # Tentative initial values
    # -----------------------------------------------
    A0 = np.random.rand(N1, R)
    B0 = np.random.rand(N2, R)
    C0 = np.random.rand(N3, R)
    D0 = np.random.rand(N4, R)

    Finit = [A0, B0, C0, D0]

    # -----------------------------------------------
    # Uncomment only one of the following
    # -----------------------------------------------
    X_approx_ks = nonnegative_tensor_factorization(X, R)

#     X_approx_ks = nonnegative_tensor_factorization(X, R,
#                                                    min_iter=5, max_iter=20)
#
#     X_approx_ks = nonnegative_tensor_factorization(X, R,
#                                                    method='anls_asgroup')
#
#     X_approx_ks = nonnegative_tensor_factorization(X, R,
#                                                    tol=1e-7, max_iter=300)
#
#     X_approx_ks = nonnegative_tensor_factorization(X, R,
#                                                    init=Finit)

    # -----------------------------------------------
    # Approximation Error
    # -----------------------------------------------
    X_approx = X_approx_ks.totensor()
    X_err = (X - X_approx).norm() / X.norm()
    print "Error:", X_err
Exemple #22
0
def main():

    from numpy.random import rand

    # -----------------------------------------------
    # Creating a synthetic 4th-order tensor
    # -----------------------------------------------
    N1 = 20
    N2 = 25
    N3 = 30
    N4 = 30

    R = 10

    # Random initialization
    np.random.seed(42)

    A_org = np.random.rand(N1, R)
    A_org[A_org < 0.4] = 0

    B_org = rand(N2, R)
    B_org[B_org < 0.4] = 0

    C_org = rand(N3, R)
    C_org[C_org < 0.4] = 0

    D_org = rand(N4, R)
    D_org[D_org < 0.4] = 0

    X_ks = ktensor([A_org, B_org, C_org, D_org])
    X = X_ks.totensor()

    # -----------------------------------------------
    # Tentative initial values
    # -----------------------------------------------
    A0 = np.random.rand(N1, R)
    B0 = np.random.rand(N2, R)
    C0 = np.random.rand(N3, R)
    D0 = np.random.rand(N4, R)

    Finit = [A0, B0, C0, D0]

    # -----------------------------------------------
    # Uncomment only one of the following
    # -----------------------------------------------
    X_approx_ks = nonnegative_tensor_factorization(X, R)

    #     X_approx_ks = nonnegative_tensor_factorization(X, R,
    #                                                    min_iter=5, max_iter=20)
    #
    #     X_approx_ks = nonnegative_tensor_factorization(X, R,
    #                                                    method='anls_asgroup')
    #
    #     X_approx_ks = nonnegative_tensor_factorization(X, R,
    #                                                    tol=1e-7, max_iter=300)
    #
    #     X_approx_ks = nonnegative_tensor_factorization(X, R,
    #                                                    init=Finit)

    # -----------------------------------------------
    # Approximation Error
    # -----------------------------------------------
    X_approx = X_approx_ks.totensor()
    X_err = (X - X_approx).norm() / X.norm()
    print("Error:", X_err)
Exemple #23
0
def nonnegative_tensor_factorization(X,
                                     r,
                                     method='anls_bpp',
                                     tol=1e-4,
                                     stop_criterion=1,
                                     min_iter=20,
                                     max_iter=200,
                                     max_time=1e6,
                                     init=None,
                                     orderWays=None):
    """
    Nonnegative Tensor Factorization (Canonical Decomposition / PARAFAC)

    Based on the Matlab version written by Jingu Kim ([email protected])
               School of Computational Science and Engineering,
               Georgia Institute of Technology

    This software implements nonnegativity-constrained low-rank approximation
    of tensors in PARAFAC model. Assuming that a k-way tensor X and target rank
    r are given, this software seeks F1, ... , Fk by solving the following
    problem:

    minimize
        || X- sum_(j=1)^r (F1_j o F2_j o ... o Fk_j) ||_F^2 +
              G(F1, ... , Fk) + H(F1, ..., Fk)
    where
        G(F1, ... , Fk) = sum_(i=1)^k ( alpha_i * ||Fi||_F^2 ),
        H(F1, ... , Fk) = sum_(i=1)^k ( beta_i sum_(j=1)^n || Fi_j ||_1^2 ).
    such that
        Fi >= 0 for all i.

    To use this software, it is necessary to first install scikit_tensor.

    Reference:
         Fast Nonnegative Tensor Factorization with an Active-set-like Method.
         Jingu Kim and Haesun Park.
         In High-Performance Scientific Computing: Algorithms and Applications,
         Springer, 2012, pp. 311-326.

    Parameters
    ----------
    X : tensor' object of scikit_tensor
        Input data tensor.

    r : int
        Target low-rank.

    method : string, optional
        Algorithm for solving NMF. One of the following values:
         'anls_bpp' 'anls_asgroup' 'hals' 'mu'
         See above paper (and references therein) for the details
         of these algorithms.
         Default is 'anls_bpp'.

    tol : float, optional
        Stopping tolerance. Default is 1e-4.
        If you want to obtain a more accurate solution,
        decrease TOL and increase MAX_ITER at the same time.

    min_iter : int, optional
        Minimum number of iterations. Default is 20.

    max_iter : int, optional
        Maximum number of iterations. Default is 200.

    init : A cell array that contains initial values for factors Fi.
            See examples to learn how to set.

    Returns
    -------
        F : a 'ktensor' object that represent a factorized form of a tensor.

    Examples
    --------
        F = nonnegative_tensor_factorization(X, 5)
        F = nonnegative_tensor_factorization(X, 10, tol=1e-3)
        F = nonnegative_tensor_factorization(X, 7, init=Finit, tol=1e-5)
    """

    nWay = len(X.shape)

    if orderWays is None:
        orderWays = np.arange(nWay)

    # set initial values
    if init is not None:
        F_cell = init
    else:
        Finit = [np.random.rand(X.shape[i], r) for i in range(nWay)]
        F_cell = Finit

    grad = getGradient(X, F_cell, nWay, r)

    nr_X = X.norm()
    nr_grad_all = np.sqrt(
        np.sum(np.linalg.norm(grad[i], 'fro')**2 for i in range(nWay)))

    if method == "anls_bpp":
        method = anls_bpp()
    elif method == "anls_asgroup":
        method = anls_asgroup()
    elif method == "mu":
        method = mu()
    elif method == "hals":
        method = hals()
    else:
        raise Exception("Unknown method")

    # Execute initializer
    F_cell, FF_init = method.initializer(X, F_cell, nWay, orderWays, r)

    tStart = time.time()

    if stop_criterion == 2:
        F_kten = ktensor(F_cell)
        rel_Error = getRelError(X, ktensor(F_cell), nWay, nr_X)

    if stop_criterion == 1:
        pGrad = getProjGradient(X, F_cell, nWay, r)
        SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)

    # main iterations
    for iteration in range(max_iter):
        cntu = True

        F_cell, FF_init = method.iterSolver(X, F_cell, FF_init, nWay, r,
                                            orderWays)
        F_kten = ktensor(F_cell)

        if iteration >= min_iter:

            if time.time() - tStart > max_time:
                cntu = False

            else:

                if stop_criterion == 1:
                    pGrad = getProjGradient(X, F_cell, nWay, r)
                    SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)
                    if SC_PGRAD < tol:
                        cntu = False

                elif stop_criterion == 2:
                    prev_rel_Error = rel_Error
                    rel_Error = getRelError(X, F_kten, nWay, nr_X)
                    SC_DIFF = np.abs(prev_rel_Error - rel_Error)
                    if SC_DIFF < tol:
                        cntu = False
                else:
                    rel_Error = getRelError(X, F_kten, nWay, nr_X)
                    if rel_Error < 1:
                        cntu = False

        if not cntu:
            break

    return F_kten
Exemple #24
0
def tensorify(a, b, c):
    a = np.atleast_2d(a).T
    b = np.atleast_2d(b).T
    c = np.atleast_2d(c).T

    return ktensor([a,b,c]).totensor()
Exemple #25
0
    def iter_solver(self, sc, blocks, X, Y, Z1, Z2, S, \
        X_factors,Y_factors, R1, R2, k, norm_X, norm_Y, \
        Lambda, location_reg, D_matrix, W_matrix,Teneye_Z_X, Teneye_Z_Y, \
        E_X, E_Y, Z_X, Z_Y,alpha_k,cost_current,Z_weights_X,Z_weights_Y,\
        num_workers,distance = 1, current_iter = 1, tree_group = None,
        P_new = None, P_Z = None, Teneye_S_X = None, Teneye_S_Y = None, 
        level = None):


        alpha, beta, gamma, delta, train_proportion = Lambda
        by_norm = '2'
        X_itr = X_factors    
        Y_itr = Y_factors
        num_ways = len(X_itr)
        n1 = norm_X
        n2 = norm_Y
        
        k = distance

        X_d = [np.sum(each_factor[:,k:],axis=1) for each_factor in X_itr]
        Y_d = [np.sum(each_factor[:,k:],axis=1) for each_factor in Y_itr]

        X_new = ktensor(X_itr).totensor()
        Y_new = ktensor(Y_itr).totensor()
        
        X_FF_iter = []
        Y_FF_iter = []
        XtW_iter = []
        YtW_iter = []
        for way_index in range(num_ways):
            ways = list(range(num_ways))
            ways.remove(way_index)
            X_FF = np.ones((R1,R1))
            Y_FF = np.ones((R2,R2))
            # pdb.set_trace()           
            for w in ways:
                X_FF = X_FF * X_itr[w].T.dot(X_itr[w])
                Y_FF = Y_FF * Y_itr[w].T.dot(Y_itr[w])
            X_FF_iter.append(X_FF)
            Y_FF_iter.append(Y_FF)
            XtW_iter.append(X.uttkrp(X_itr, way_index))
            YtW_iter.append(Y.uttkrp(Y_itr, way_index))

        for l in range(R1):
            for way_index in range(num_ways):
                if l < k:
                    X_itr[way_index][:,l] = X_factors[way_index][:,l] * (X_FF_iter[way_index][l,l]) / (X_FF_iter[way_index][l,l] + n1*alpha) + (XtW_iter[way_index][:,l] - X_factors[way_index].dot(X_FF_iter[way_index])[:,l] + (n1*alpha)*Y_factors[way_index][:,l]) / (X_FF_iter[way_index][l,l] + n1*alpha + self.eps)
                    Y_itr[way_index][:,l] = Y_factors[way_index][:,l] * (Y_FF_iter[way_index][l,l]) / (Y_FF_iter[way_index][l,l] + n2*alpha) + (YtW_iter[way_index][:,l] - Y_factors[way_index].dot(Y_FF_iter[way_index])[:,l] + (n2*alpha)*X_factors[way_index][:,l]) / (Y_FF_iter[way_index][l,l] + n2*alpha + self.eps)

                else:

                    X_itr[way_index][:,l] = X_factors[way_index][:,l] + (XtW_iter[way_index][:,l] - X_factors[way_index].dot(X_FF_iter[way_index])[:,l] - (n1*beta/2)*Y_d[way_index]) / (X_FF_iter[way_index][l,l] + self.eps)
                    Y_itr[way_index][:,l] = Y_factors[way_index][:,l] + (YtW_iter[way_index][:,l] - Y_factors[way_index].dot(Y_FF_iter[way_index])[:,l] + (n2*beta/2)*X_d[way_index]) / (Y_FF_iter[way_index][l,l] + self.eps)

                X_itr[way_index][:,l][X_itr[way_index][:,l] < self.eps] = self.eps
                Y_itr[way_index][:,l][Y_itr[way_index][:,l] < self.eps] = self.eps


        X_itr = [fn.normalize_column(each_factor,by_norm='2')[0] if way_index < (num_ways - 1) else each_factor for way_index, each_factor in enumerate(X_itr)]
        Y_itr = [fn.normalize_column(each_factor,by_norm='2')[0] if way_index < (num_ways - 1) else each_factor for way_index, each_factor in enumerate(Y_itr)]

        alpha_k_new = 0
        return (X_itr,Y_itr,Teneye_S_X, Teneye_S_Y, 
            E_X, E_Y, Z_X, Z_Y,
            alpha_k_new,Z_weights_X,Z_weights_Y, P_new, P_Z)        
Exemple #26
0
def deflate(T, lbda, v):
    v = np.atleast_2d(v).T
    return T - lbda * ten.ktensor([v,v,v]).totensor()
Exemple #27
0
def tensorify(a, b, c):
    a = np.atleast_2d(a).T
    b = np.atleast_2d(b).T
    c = np.atleast_2d(c).T

    return ktensor([a, b, c]).totensor()
Exemple #28
0
def nonnegative_tensor_factorization(X, r, method='anls_bpp',
                                     tol=1e-4, stop_criterion=1,
                                     min_iter=20, max_iter=200, max_time=1e6,
                                     init=None, orderWays=None):
    """
    Nonnegative Tensor Factorization (Canonical Decomposition / PARAFAC)

    Based on the Matlab version written by Jingu Kim ([email protected])
               School of Computational Science and Engineering,
               Georgia Institute of Technology

    This software implements nonnegativity-constrained low-rank approximation
    of tensors in PARAFAC model. Assuming that a k-way tensor X and target rank
    r are given, this software seeks F1, ... , Fk by solving the following
    problem:

    minimize
        || X- sum_(j=1)^r (F1_j o F2_j o ... o Fk_j) ||_F^2 +
              G(F1, ... , Fk) + H(F1, ..., Fk)
    where
        G(F1, ... , Fk) = sum_(i=1)^k ( alpha_i * ||Fi||_F^2 ),
        H(F1, ... , Fk) = sum_(i=1)^k ( beta_i sum_(j=1)^n || Fi_j ||_1^2 ).
    such that
        Fi >= 0 for all i.

    To use this software, it is necessary to first install scikit_tensor.

    Reference:
         Fast Nonnegative Tensor Factorization with an Active-set-like Method.
         Jingu Kim and Haesun Park.
         In High-Performance Scientific Computing: Algorithms and Applications,
         Springer, 2012, pp. 311-326.

    Parameters
    ----------
    X : tensor' object of scikit_tensor
        Input data tensor.

    r : int
        Target low-rank.

    method : string, optional
        Algorithm for solving NMF. One of the following values:
         'anls_bpp' 'anls_asgroup' 'hals' 'mu'
         See above paper (and references therein) for the details
         of these algorithms.
         Default is 'anls_bpp'.

    tol : float, optional
        Stopping tolerance. Default is 1e-4.
        If you want to obtain a more accurate solution,
        decrease TOL and increase MAX_ITER at the same time.

    min_iter : int, optional
        Minimum number of iterations. Default is 20.

    max_iter : int, optional
        Maximum number of iterations. Default is 200.

    init : A cell array that contains initial values for factors Fi.
            See examples to learn how to set.

    Returns
    -------
        F : a 'ktensor' object that represent a factorized form of a tensor.

    Examples
    --------
        F = nonnegative_tensor_factorization(X, 5)
        F = nonnegative_tensor_factorization(X, 10, tol=1e-3)
        F = nonnegative_tensor_factorization(X, 7, init=Finit, tol=1e-5)
    """

    nWay = len(X.shape)

    if orderWays is None:
        orderWays = np.arange(nWay)

    # set initial values
    if init is not None:
        F_cell = init
    else:
        Finit = [np.random.rand(X.shape[i], r) for i in range(nWay)]
        F_cell = Finit

    grad = getGradient(X, F_cell, nWay, r)

    nr_X = X.norm()
    nr_grad_all = np.sqrt(np.sum(np.linalg.norm(grad[i], 'fro') ** 2
                                 for i in range(nWay)))

    if method == "anls_bpp":
        method = anls_bpp()
    elif method == "anls_asgroup":
        method = anls_asgroup()
    elif method == "mu":
        method = mu()
    elif method == "hals":
        method = hals()
    else:
        raise Exception("Unknown method")

    # Execute initializer
    F_cell, FF_init = method.initializer(X, F_cell, nWay, orderWays, r)

    tStart = time.time()

    if stop_criterion == 2:
        F_kten = ktensor(F_cell)
        rel_Error = getRelError(X, ktensor(F_cell), nWay, nr_X)

    if stop_criterion == 1:
        pGrad = getProjGradient(X, F_cell, nWay, r)
        SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)

    # main iterations
    for iteration in range(max_iter):
        cntu = True

        F_cell, FF_init = method.iterSolver(X, F_cell,
                                            FF_init, nWay, r, orderWays)
        F_kten = ktensor(F_cell)

        if iteration >= min_iter:

            if time.time() - tStart > max_time:
                cntu = False

            else:

                if stop_criterion == 1:
                    pGrad = getProjGradient(X, F_cell, nWay, r)
                    SC_PGRAD = getStopCriterion(pGrad, nWay, nr_grad_all)
                    if SC_PGRAD < tol:
                        cntu = False

                elif stop_criterion == 2:
                    prev_rel_Error = rel_Error
                    rel_Error = getRelError(X, F_kten, nWay, nr_X)
                    SC_DIFF = np.abs(prev_rel_Error - rel_Error)
                    if SC_DIFF < tol:
                        cntu = False
                else:
                    rel_Error = getRelError(X, F_kten, nWay, nr_X)
                    if rel_Error < 1:
                        cntu = False

        if not cntu:
            break

    return F_kten
Exemple #29
0
def deflate(T, lbda, v):
    v = np.atleast_2d(v).T
    return T - lbda * ten.ktensor([v, v, v]).totensor()