Exemple #1
0
    def ttt(self, tB, adims, bdims):
        amatrix = tenmat.tenmat(self, adims, option='t')
        bmatrix = tenmat.tenmat(tB, bdims)
        cmatrix = amatrix.mtimes(bmatrix)

        c = cmatrix.totensor()
        return c
Exemple #2
0
def ctor(verbose):

    dat = numpy.arange(24).reshape([2, 3, 4])

    t = tensor.tensor(dat)
    print t
    if (verbose):
        obj = tenmat.tenmat(t, [1, 0])
        print obj
        print obj.copy()

    dat = dat.reshape([4, 6])
    t = tensor.tensor(dat)
    if (verbose):
        obj = tenmat.tenmat(t, [0], [1], [4, 6])
        print obj
Exemple #3
0
def ctor(verbose):
    
    dat = numpy.arange(24).reshape([2,3,4]);
    
    t = tensor.tensor(dat);
    print t;
    if (verbose):
        obj = tenmat.tenmat(t, [1,0]);
        print obj;
        print obj.copy();
        
    dat = dat.reshape([4,6]);
    t = tensor.tensor(dat);
    if (verbose):
        obj = tenmat.tenmat(t, [0], [1], [4,6]);
        print obj;
Exemple #4
0
def totensorTests(verbose):

    dat = numpy.arange(24).reshape([2, 3, 4])
    t = tensor.tensor(dat)
    obj = tenmat.tenmat(t, [2, 1])
    if (verbose):
        print obj
        print obj.totensor()
Exemple #5
0
def totensorTests(verbose):
    
    dat = numpy.arange(24).reshape([2,3,4]);
    t = tensor.tensor(dat);
    obj = tenmat.tenmat(t,[2,1]);
    if(verbose):
        print obj;
        print obj.totensor();
Exemple #6
0
def calculatePhi(X, B, Pi, n, epsilon=1e-4, C=None):
    """
    Calculate the matrix for multiplicative update

    Parameters
    ----------
    X       : the observed tensor
    B       : the factor matrix associated with mode n
    Pi      : the product of all matrices but the n-th from above
    n       : the mode that we are trying to solve the subproblem for
    epsilon : the 
    C       : the augmented / non-augmented tensor (\alpha u \Psi or B \Phi) in sparse form
    """
    Phi = None
    if X.__class__ == sptensor.sptensor:
        Phi = -np.ones((X.shape[n], B.shape[1]))
        #print 'n:',n

        xsubs = X.subs[:, n]
        #print 'xsub:',xsubs.shape
        if C != None:
            v = np.sum(np.multiply(B[xsubs, :], Pi) + C, axis=1)
            #print 'v1'
        else:
            #print 'B[xsubs,:]',B[xsubs,:].shape
            #print 'Pi',Pi.shape
            #print 'np.multiply(B[xsubs,:], Pi)',np.multiply(B[xsubs,:], Pi).shape
            #print np.multiply(B[xsubs,:], Pi)
            v = np.sum(np.array(np.multiply(B[xsubs, :], Pi)), axis=1)
            #print 'v2'
            #print 'v size:',v.shape
            #print v
        #print '___v size',v.shape
        for i in range(len(v)):
            if v[i] == 0:
                v[i] = 1.0

        wvals = X.vals.flatten() / v
        #print('wvals',wvals.shape)
        for r in range(B.shape[1]):
            #print 'r',r
            #print 'np.array(xsubs)',np.array(xsubs).shape
            Pi = np.array(Pi)
            #print 'Pi[:,r]',Pi[:,r].shape
            #print 'np.array(np.multiply(wvals, Pi[:,r]))',np.array(np.multiply(Pi[:,r],np.array(wvals) )).shape
            Phi[:,
                r] = accumarray.accum_np(np.array(xsubs),
                                         np.array(np.multiply(wvals, Pi[:,
                                                                        r])),
                                         size=X.shape[n])
            #print 'Phi[:,r]',Phi[:,r].shape
    else:
        Xn = tenmat.tenmat(X, [n])
        V = np.inner(B, Pi)
        W = Xn.data / np.maximum(V, epsilon)
        Phi = np.inner(W, Pi.transpose())
    return Phi
def __calculatePhi(X, M, R, n, Pi, epsilon):
    """
    Calculate the matrix for multiplicative update
    """
    Phi = None
    if X.__class__ == sptensor.sptensor:
        Phi = -np.ones((X.shape[n], R))
        xsubs = X.subs[:,n]
        v = np.sum(np.multiply(M.U[n][xsubs,:], Pi), axis=1)
        wvals = X.vals.flatten() / np.maximum(v, epsilon)
        for r in range(R):
            #Phi[:,r] = tools.accum_np(xsubs, np.multiply(wvals, Pi[:,r]), X.shape[n])
            Phi[:,r] = accumarray.accum_np(xsubs, np.multiply(wvals, Pi[:,r]), size=X.shape[n])
    else:
        Xn = tenmat.tenmat(X,[n])
        V = np.inner(M.U[n],Pi)
        W = Xn.data / np.maximum(V, epsilon)
        Phi = np.inner(W, Pi.transpose())
        
    return Phi
Exemple #8
0
def DTA(Xnew, R, C=None, alpha=None):
    """DTA analysis"""

    # number of dimensions of the input tensor.
    N = Xnew.ndims()

    # If the co-variacne matrices are not given,
    # initialize all of them to be 0
    if (C == None):
        C = []
        dv = Xnew.shape
        for i in range(0, N):
            C.extend([sparse.coo_matrix(([], ([], [])), [dv[i], dv[i]])])

    # If the forgetting factor is not given, it is 1.
    if (alpha == None):
        alpha = 1

    U = []
    Cnew = []
    for i in range(0, N):
        if (Xnew.__class__ == tensor.tensor):
            XM = tenmat.tenmat(Xnew, [i]).tondarray()
        elif (Xnew.__class__ == sptensor.sptensor):
            XM = sptenmat.sptenmat(Xnew, [i]).tosparsemat()
        elif (Xnew.__class__ == ttensor.ttensor):
            raise TypeError("It is not supported yet.")
        else:
            raise TypeError(
                "1st argument must be tensor, sptensor, or ttensor")

        Cnew.extend(
            [numpy.array(alpha * C[i] + numpy.dot(XM, XM.transpose()))])

        (w, v) = eigwrapper(Cnew[i], R[i])

        U.extend([numpy.array(v)])

    core = Xnew.ttm(U, None, 't')
    T = ttensor.ttensor(core, U)
    return (T, Cnew)
Exemple #9
0
def DTA(Xnew, R, C = None, alpha = None):
    """DTA analysis"""
    
    # number of dimensions of the input tensor.
    N = Xnew.ndims();
    
    # If the co-variacne matrices are not given,
    # initialize all of them to be 0
    if(C == None):
        C = [];
        dv = Xnew.shape;
        for i in range(0, N):
            C.extend([ sparse.coo_matrix(([],([],[])),[dv[i], dv[i]]) ]);
    
    # If the forgetting factor is not given, it is 1.
    if(alpha == None):
        alpha = 1;
    
    U = [];
    Cnew = [];
    for i in range (0,N):
        if(Xnew.__class__ == tensor.tensor):
            XM = tenmat.tenmat(Xnew,[i]).tondarray();
        elif(Xnew.__class__ == sptensor.sptensor):
            XM = sptenmat.sptenmat(Xnew,[i]).tosparsemat();
        elif(Xnew.__class__ == ttensor.ttensor):
            raise TypeError("It is not supported yet.");
        else:
            raise TypeError("1st argument must be tensor, sptensor, or ttensor");
        
        
        Cnew.extend([ numpy.array(alpha*C[i] + numpy.dot(XM, XM.transpose())) ]);
        
        (w,v) = eigwrapper(Cnew[i], R[i]);
        
        U.extend([ numpy.array(v) ]);

    core = Xnew.ttm(U, None, 't');
    T = ttensor.ttensor(core, U);
    return (T, Cnew);
Exemple #10
0
    def ttm(self, mat, dims = None, option = None):
        """ computes the sptensor times the given matrix.
        arrs is a single 2-D matrix/array or a list of those matrices/arrays."""
        
        if(dims == None):
            dims = range(0,self.ndims());
        
        #Handle when arrs is a list of arrays
        if(mat.__class__ == list):
            if(len(mat) == 0):
                raise ValueError("the given list of arrays is empty!");
            
            (dims,vidx) = tools.tt_dimscehck(dims, self.ndims(), len(mat));
            
            Y = self.ttm(mat[vidx[0]],dims[0],option);
            for i in range(1, len(dims)):
                Y = Y.ttm(mat[vidx[i]],dims[i],option);
                
            return Y;                
        
        if(mat.ndim != 2):
            raise ValueError ("matrix in 2nd armuent must be a matrix!");

        if(option != None):
            if (option == 't'):
                mat = mat.transpose();
            else:
                raise ValueError ("unknown option.");          
        
        
        if(dims.__class__ == list):
            if(len(dims) != 1):
                raise ValueError("Error in number of elements in dims");
            else:
                dims = dims[0];
        
        if(dims < 0 or dims > self.ndims()):
            raise ValueError ("Dimension N must be between 1 and num of dimensions");
        
        #Check that sizes match
        if(self.shape[dims] != mat.shape[1]):
            raise ValueError ("size mismatch on V");
        
        #Compute the new size
        newsiz = list(self.shape);
        newsiz[dims] = mat.shape[0];
        
        #Compute Xn
        Xnt = sptenmat.sptenmat(self,None,[dims],None,'t');
        rdims = Xnt.rdims;
        cdims = Xnt.cdims;
        
        I = [];
        J = [];
        for i in range(0, len(Xnt.subs)):
            I.extend([Xnt.subs[i][0]]);
            J.extend([Xnt.subs[i][1]]);
        
        
        Z = (sparse.coo_matrix((Xnt.vals.flatten(),(I,J)),
            shape = (tools.getelts(Xnt.tsize, Xnt.rdims).prod(),
                     tools.getelts(Xnt.tsize, Xnt.cdims).prod()))
             * mat.transpose());
        
        Z = tensor.tensor(Z,newsiz).tosptensor();
        
        
        if(Z.nnz() <= 0.5 * numpy.array(newsiz).prod()):
            Ynt = sptenmat.sptenmat(Z, rdims, cdims);
            return Ynt.tosptensor();
        else:
            Ynt = tenmat.tenmat(Z.totensor(), rdims, cdims);
            return Ynt.totensor();
Exemple #11
0
    def ttm(self, mat, dims = None, option = None):
        """ computes the sptensor times the given matrix.
        arrs is a single 2-D matrix/array or a list of those matrices/arrays."""
        
        if(dims == None):
            dims = range(0,self.ndims());
        
        #Handle when arrs is a list of arrays
        if(mat.__class__ == list):
            if(len(mat) == 0):
                raise ValueError("the given list of arrays is empty!");
            
            (dims,vidx) = tools.tt_dimscehck(dims, self.ndims(), len(mat));
            
            Y = self.ttm(mat[vidx[0]],dims[0],option);
            for i in range(1, len(dims)):
                Y = Y.ttm(mat[vidx[i]],dims[i],option);
                
            return Y;                
        
        if(mat.ndim != 2):
            raise ValueError ("matrix in 2nd armuent must be a matrix!");

        if(option != None):
            if (option == 't'):
                mat = mat.transpose();
            else:
                raise ValueError ("unknown option.");          
        
        
        if(dims.__class__ == list):
            if(len(dims) != 1):
                raise ValueError("Error in number of elements in dims");
            else:
                dims = dims[0];
        
        if(dims < 0 or dims > self.ndims()):
            raise ValueError ("Dimension N must be between 1 and num of dimensions");
        
        #Check that sizes match
        if(self.shape[dims] != mat.shape[1]):
            raise ValueError ("size mismatch on V");
        
        #Compute the new size
        newsiz = list(self.shape);
        newsiz[dims] = mat.shape[0];
        
        #Compute Xn
        Xnt = sptenmat.sptenmat(self,None,[dims],None,'t');
        rdims = Xnt.rdims;
        cdims = Xnt.cdims;
        
        I = [];
        J = [];
        for i in range(0, len(Xnt.subs)):
            I.extend([Xnt.subs[i][0]]);
            J.extend([Xnt.subs[i][1]]);
        
        
        Z = (sparse.coo_matrix((Xnt.vals.flatten(),(I,J)),
            shape = (tools.getelts(Xnt.tsize, Xnt.rdims).prod(),
                     tools.getelts(Xnt.tsize, Xnt.cdims).prod()))
             * mat.transpose());
        
        Z = tensor.tensor(Z,newsiz).tosptensor();
        
        
        if(Z.nnz() <= 0.5 * numpy.array(newsiz).prod()):
            Ynt = sptenmat.sptenmat(Z, rdims, cdims);
            return Ynt.tosptensor();
        else:
            Ynt = tenmat.tenmat(Z.totensor(), rdims, cdims);
            return Ynt.totensor();