Exemple #1
0
    def getTensor(mat):
        def mkfactor(size, rank, L, alpha):
            #D = cholesky(eye(size)+alpha*L)
            [u, s, w] = svd(L)
            D = dot(u, diag(s))
            A = random.randn(size, rank)
            return alpha * dot(D, A)

        alpha = 1e-3
        As = [
            mkfactor(size[i], rank, createChainLaplacian(size[i]), alpha)
            for i in xrange(dim)
        ]
        import algorithm as alg
        I = alg.createUnitTensor(dim, rank)
        X = alg.expand(I, As)

        abg = sum(abs(X.flatten())) / prod(size)
        print abg
        noiseLevel = 0.1
        X += abg * noiseLevel * random.randn(*X.shape)

        X = X / norm(X)

        #X = arange(1000).reshape(10,10,10)
        return X
Exemple #2
0
def testTucker():
    s = 8
    r = 5 
    re = 3
    size = [s,s,s]
    rank = [r,r,r]
    rank_estimate = [re,re,re]
   
    L = createTestLaplacian(s)
    alpha = 0.01

    while True:
        #X = alg.randomTensorOfNorm(size,rank,0)
        dat = benchmark.ThreeDNoseData()
        X = dat["X"]
        L = dat["L"]
        X = X / norm(X)

        print "start estimation for tensor size of", X.shape 

        (G,As) = alg.HOOI(X,rank_estimate,L,alpha)


        print "finished"

        Result = alg.expand(G,As)

        #print "original \n", X
        #print "estimated \n",Result
        print "error \n", norm(X - Result)
        raw_input()
Exemple #3
0
 def approximateCP(Xin):
     print re
     print alpha
     print Ls
     print beta
     As = alg.RRMFCP(Xin,re,beta,Ls,alpha)
     Xs = alg.expand(I,As)
     return Xs
Exemple #4
0
def randomTensorOfNorm(sizelist,ranks):
    X = 0
    A = zeros(sizelist)
    N = len(sizelist)
    As = [random.rand(sizelist[i],ranks[i]) for i in xrange(N)]

    G = random.rand(*ranks)
    return alg.expand(G,As)

    return A
Exemple #5
0
def randomTensorOfNorm(sizelist, ranks):
    X = 0
    A = zeros(sizelist)
    N = len(sizelist)
    As = [random.rand(sizelist[i], ranks[i]) for i in xrange(N)]

    G = random.rand(*ranks)
    return alg.expand(G, As)

    return A
Exemple #6
0
def testL():
    """
    過去の遺産
    """
    s = 100
    size = [s, s, s]
    rank = 4
    rank_estimate = 15

    alpha = 0.1
    beta = 0.00

    while True:
        #X = alg.randomTensorOfNorm(size,rank,0.05)
        dat = benchmark.Bonnie()
        X = dat["X"]
        L = dat["L"]

        #X = X - mean(X)
        #X = X + min(X.reshape(prod(X.shape)))
        print "norm:", norm(X)

        originalNorm = norm(X)
        X = X / norm(X)
        #X = X+0.1

        print[det(l) for l in L]
        return
        print X.shape

        #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta)
        As = alg.RRMFCP(X, rank_estimate, beta, L, alpha)
        #As = cpold.RRMFCP(X,rank_estimate,alpha)

        print "finished"

        I = alg.createUnitTensor(len(size), rank_estimate)
        Result = alg.expand(I, As)

        #vint = vectorize(int)
        #Result = vint(Result*originalNorm+0.5) / originalNorm
        #Result = sign(vint(Result))

        #benchmark.SaveImage(X.reshape(151*2,151*19),"Sugar")
        #benchmark.SaveImage(Result.reshape(151*2,151*19),"Est_Sugar")
        #benchmark.SaveImage(abs(X-Result).reshape(151*2,151*19),"Diff_Sugar")
        #Result = Result / norm(Result)

        #print "original \n", X
        print "estimated \n", abs(X - Result)
        print "error \n", norm(X - Result)
        #print As[0]
        raw_input()
Exemple #7
0
def testTucker():
    size = [3, 3, 3]
    rank = [3, 3, 3]
    rank_estimate = [1, 1, 1]

    X = randomTensorOfNorm(size, rank)

    (G, As) = alg.HOOI(X, rank_estimate)

    print "finished"

    Result = alg.expand(G, As)

    print "original \n", X
    print "estimated \n", Result
    print "error \n", norm(X - Result)
Exemple #8
0
def testTucker():
    size = [3,3,3]
    rank = [3,3,3]
    rank_estimate = [1,1,1]
    
    X = randomTensorOfNorm(size,rank)

    (G,As) = alg.HOOI(X,rank_estimate)

    print "finished"

    Result = alg.expand(G,As)

    print "original \n", X
    print "estimated \n",Result
    print "error \n", norm(X - Result)
Exemple #9
0
def testCP():
    s = 100 
    size = [s,s,s]
    rank = 4
    rank_estimate = 5
   
    alpha = 0.1
    beta = 0.0001

    while True:
        #X = alg.randomTensorOfNorm(size,rank,0.05)
        #dat = benchmark.ThreeDNoseData()
        #dat = benchmark.RandomSmallTensor()
        dat = benchmark.Flow_Injection()
        #dat = benchmark.Bonnie()
        #dat = benchmark.Artificial()
        X = dat["X"]
        L = dat["L"]

        #X = X - mean(X)
        #X = X + min(X.reshape(prod(X.shape)))
        print "norm:",norm(X)
        X = X / norm(X)
        #X = X+0.1
        
        L = [None,None,None]
        print X.shape

        #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta)
        As = alg.RRMFCP(X,rank_estimate,beta,L,alpha)
        #As = cpold.RRMFCP(X,rank_estimate,alpha)

        print "finished"
        
        I = alg.createUnitTensor(len(size),rank_estimate)
        Result = alg.expand(I,As)

        benchmark.SaveImage(X,"Flow")
        benchmark.SaveImage(Result,"Est_Flow")
        #Result = Result / norm(Result)
        
        #print "original \n", X
        #print "estimated \n",Result
        print "error \n", norm(X - Result)
        #print As[0]
        raw_input()
Exemple #10
0
    def getTensor(mat):
        def mkfactor(size,rank):
            th = random.randn()
            off = random.randn()
            d2 = arange(size) * arange(size)
            d1 = arange(size)
            print th,off
            A = array([d1*th + off for i in xrange(rank)]).T
            return A

        As = [mkfactor(size[i],rank[i]) for i in xrange(dim)]
        import algorithm as alg
        def p(obj):
            print obj

        G = random.randn(*rank)
        X = alg.expand(G,As)

        X = X / norm(X)
        return X
Exemple #11
0
    def getTensor(mat):
        def mkfactor(size,rank):
            th = random.randn()
            off = random.randn()
            d2 = arange(size) * arange(size)
            d1 = arange(size)
            print th,off
            A = array([d1*th + off for i in xrange(rank)]).T
            return A

        As = [mkfactor(size[i],rank) for i in xrange(dim)]
        As[0] = (random.rand(size[0],rank)-0.5)*0.5
        As[1] = (random.rand(size[1],rank)-0.5)*0.5
        import algorithm as alg
        I = alg.createUnitTensor(dim,rank)
        I = random.randn(rank,rank,rank)
        X = alg.expand(I,As)

        X = X / norm(X)
        return X
Exemple #12
0
    def getTensor(mat):
        def mkfactor(size,rank):
            th = random.randn()
            off = random.randn()
            d2 = arange(size) * arange(size)
            d1 = arange(size)
            print th,off
            A = array([d1*th + off for i in xrange(rank)]).T
            return A

        As = [mkfactor(size[i],rank[i]) for i in xrange(dim)]
        import algorithm as alg
        def p(obj):
            print obj

        G = random.randn(*rank)
        X = alg.expand(G,As)

        X = X / norm(X)
        return X
Exemple #13
0
    def getTensor(mat):
        def mkfactor(size, rank):
            th = random.randn()
            off = random.randn()
            d2 = arange(size) * arange(size)
            d1 = arange(size)
            print th, off
            A = array([d1 * th + off for i in xrange(rank)]).T
            return A

        As = [mkfactor(size[i], rank) for i in xrange(dim)]
        As[0] = (random.rand(size[0], rank) - 0.5) * 0.5
        As[1] = (random.rand(size[1], rank) - 0.5) * 0.5
        import algorithm as alg
        I = alg.createUnitTensor(dim, rank)
        I = random.randn(rank, rank, rank)
        X = alg.expand(I, As)

        X = X / norm(X)
        return X
Exemple #14
0
def testTucker():
    """
    Tucker分解テスト
    """
    s = 8
    r = 5 
    re = 10
    size = [s,s,s]
    rank = [r,r,r]
    rank_estimate = [re,re,re]
   
    L = createTestLaplacian(s)
    alpha = 0.01

    while True:
        #X = alg.randomTensorOfNorm(size,rank,0)
        #dat = benchmark.ThreeDNoseData()
        dat = benchmark.Bonnie()
        #dat = benchmark.Enron()
        #dat = benchmark.Flow_Injection()
        X = dat["X"]
        L = dat["L"]
        X = X / norm(X)
        L=[None,None,None]

        print "start estimation for tensor size of", X.shape 

        (G,As) = alg.HOOI_obsolete(X,rank_estimate,alpha,L)


        print "finished"

        Result = alg.expand(G,As)

        #print "original \n", X
        #print "estimated \n",Result
        print "error \n", norm(X - Result)
        raw_input()
Exemple #15
0
    def getTensor(mat):
        def mkfactor(size,rank,L,alpha):
            #D = cholesky(eye(size)+alpha*L)
            [u,s,w] = svd(L)
            D = dot(u,diag(s))
            A = random.randn(size,rank)
            return alpha * dot(D,A)

        alpha = 1e-3
        As = [mkfactor(size[i],rank,createChainLaplacian(size[i]),alpha) for i in xrange(dim)]
        import algorithm as alg
        I = alg.createUnitTensor(dim,rank)
        X = alg.expand(I,As)

        abg = sum(abs(X.flatten())) / prod(size)
        print abg
        noiseLevel = 0.1 
        X += abg * noiseLevel * random.randn(*X.shape)

        X = X / norm(X)

        #X = arange(1000).reshape(10,10,10)
        return X
Exemple #16
0
def testTucker():
    """
    Tucker分解テスト
    """
    s = 8
    r = 5
    re = 10
    size = [s, s, s]
    rank = [r, r, r]
    rank_estimate = [re, re, re]

    L = createTestLaplacian(s)
    alpha = 0.01

    while True:
        #X = alg.randomTensorOfNorm(size,rank,0)
        #dat = benchmark.ThreeDNoseData()
        dat = benchmark.Bonnie()
        #dat = benchmark.Enron()
        #dat = benchmark.Flow_Injection()
        X = dat["X"]
        L = dat["L"]
        X = X / norm(X)
        L = [None, None, None]

        print "start estimation for tensor size of", X.shape

        (G, As) = alg.HOOI_obsolete(X, rank_estimate, alpha, L)

        print "finished"

        Result = alg.expand(G, As)

        #print "original \n", X
        #print "estimated \n",Result
        print "error \n", norm(X - Result)
        raw_input()
Exemple #17
0
def CompletionGradient(X,shape,ObservedList,R,Ls,alpha=0,XoriginalTensor=None,isProd=False):
    #X is Dense Vector with only observed elements

    #Observed must be given as list of coordinate tuples
    #TrueX is 3 dimensional array


    N = 3
    Xns = [critical.unfold(X,n,shape,ObservedList) for n in xrange(N)]
    print "unfolded"
    As = [zeros((shape[n],R)) for n in xrange(N)]
    #As = [SVD.getLeadingSingularVects(Xns[n],R) for n in xrange(N)]
    Rev = 100
    for i in xrange(Rev):
        Asadd = [SVD.getLeadingSingularVects(Xns[n],R) for n in xrange(N)]
        for k in xrange(N):
            As[k] += Asadd[k]
    for k in xrange(N):
        As[k] /= Rev 
        #As[k] += random.randn(shape[k],R)*0.01



    n,m,l=shape
    def lossfunc(U,V,W):
        #print "loss start"
        XO = critical.HadamardProdOfSparseTensor(U,V,W,ObservedList)
        loss = norm(XO - X)
        #print "loss end"
        return loss

    #Xinit = flattenAs(As)
    #print "start bfgs"


    J = alg.createUnitTensor(3,R)
    #Mask = getMaskTensor(ObservedList,shape)

    U,V,W = As

    Ls[:] = [L*alpha if not L==None else 0 for L in Ls]

    Lu,Lv,Lw=Ls
    beta = 1e-8
    isProd = False

    if not isProd:
        LuUse = alpha * Lu + beta * eye(n) 
        LvUse = alpha * Lv + beta * eye(m) 
        LwUse = alpha * Lw + beta * eye(l) 
    #print U.shape, V.shape, W.shape

    Xest = XoriginalTensor #memory consuming


    threshold = 0.5*const.ConvergenceThreshold_NewCompletion
    maxiter=700
    bfgs_maxiter=3
    errorold = inf
    errorTest = inf
    expandedX = 0

    Dw,Kw = separateLaplacian(Lw,l)
    Dv,Kv = separateLaplacian(Lv,m)
    Du,Ku = separateLaplacian(Lu,n)

    import itertools
    for steps in itertools.count():
        if isProd:
            DSu = alpha * trace(dot(W.T*Dw,W)) * trace(dot(V.T*Dv,V))
            KSu = alpha * trace(dot(W.T,dot(Kw,W))) * trace(dot(V.T,dot(Kv,V)))
            LuUse = DSu * Du - KSu * Ku + eye(n)

        #print "optimization of U"
        #print [U.shape,V.shape,W.shape]
        grad = lambda U:critical.Gradient(X,ObservedList,(U,V,W),LuUse,shape,R,0)
        loss = lambda U:lossfunc(U,V,W)
        U = LBFGS_matrix(loss,U,grad,maxiter=bfgs_maxiter)
        #print [U.shape,V.shape,W.shape]

        if isProd:
            DSv = alpha * trace(dot(U.T*Du,U)) * trace(dot(W.T*Dw,W))
            KSv = alpha * trace(dot(U.T,dot(Ku,U))) * trace(dot(W.T,dot(Kw,W)))
            LvUse = DSv * Dv - KSv * Kv + eye(m)
        #print "optimization of V"
        grad = lambda V:critical.Gradient(X,ObservedList,(U,V,W),LvUse,shape,R,1)
        loss = lambda V:lossfunc(U,V,W)
        V = LBFGS_matrix(loss,V,grad,maxiter=bfgs_maxiter)

        if isProd:
            DSw = alpha * trace(dot(U.T*Du,U)) * trace(dot(V.T*Dv,V))
            KSw = alpha * trace(dot(U.T,dot(Ku,U))) * trace(dot(V.T,dot(Kv,V)))
            LwUse = DSw * Dw - KSw * Kw + eye(l)
        #print "optimization of W"
        grad = lambda W:critical.Gradient(X,ObservedList,(U,V,W),LwUse,shape,R,2)
        loss = lambda W:lossfunc(U,V,W)
        W = LBFGS_matrix(loss,W,grad,maxiter=bfgs_maxiter)

        #grad = lambda (U,V,W)
        
        if False:
            XO = critical.HadamardProdOfSparseTensor(U,V,W,ObservedList)
            normrate = norm(XO) / norm(X)
            qubicnormrate = normrate ** (1.0 / 3)
            U /= qubicnormrate
            V /= qubicnormrate
            W /= qubicnormrate

        if steps % 20 == 1:
            Xest = alg.expand(J,[U,V,W])
            errorTest = norm(Xest - XoriginalTensor)
            #print U

        errorObserved = lossfunc(U,V,W)


        if steps % 20 == 1:
            print "iter:",steps," err:",errorTest ," oberr:",errorObserved, " diff:", errorObserved-errorold, "norm;", norm(Xest)

        faultThreshold = 1e4
        if errorObserved > faultThreshold or errorObserved != errorObserved:
            #やりなおし
            print "-----Try Again-----"
            print steps," steps, observation error=",errorObserved
            return CompletionGradient(X,shape,ObservedList,R,Ls,alpha,XoriginalTensor)

        if abs(errorObserved - errorold) < threshold or steps >= maxiter:
            expandedX = alg.expand(J,[U,V,W])
            print "estimation finished in ",(steps+1),"steps."
            break

        errorold = errorObserved

    return expandedX
Exemple #18
0
 def approximate(Xin):
     (G,As) = alg.HOOI(Xin,rank_estimate,alpha,L)
     Xs = alg.expand(G,As)
     return Xs
Exemple #19
0
 def approximate(Xin):
     As = alg.RRMFCP(Xin,rank_estimate,beta,Ls,alpha,Ps,Ds)
     Xs = alg.expand(I,As)
     return Xs
Exemple #20
0
 def approximate(Xin):
     As = alg.RRMFCP(Xin,rank_estimate,alpha)
     Xs = alg.expand(I,As)
     return Xs
Exemple #21
0
 def approximate(Xin):
     As = alg.RRMFCP(Xin, rank_estimate, alpha)
     Xs = alg.expand(I, As)
     return Xs
Exemple #22
0
def testCP():
    """
    CP分解テスト
    """
    s = 100 
    size = [s,s,s]
    rank = 10
    rank_estimate = 15
   
    alpha = 0.1
    beta = 0.00

    #X = alg.randomTensorOfNorm(size,rank,0.05)
    #dat = benchmark.ThreeDNoseData()
    dat = benchmark.RandomSmallTensor()
    #dat = benchmark.Flow_Injection()
    #dat = benchmark.Bonnie()
    #dat = benchmark.Enron()
    #dat = benchmark.Sugar()
    #dat = benchmark.Artificial()
    while True:
        X = dat["X"]
        L = dat["L"]

        #X = X - mean(X)
        #X = X + min(X.reshape(prod(X.shape)))
        print "norm:",norm(X)

        originalNorm = norm(X)
        X = X / norm(X)
        #X = X+0.1
        
        L = [None,None,None]
        print X.shape

        #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta)
        start = datetime.datetime.today()
        As = alg.RRMFCP(X,rank_estimate,beta,L,alpha)
        end = datetime.datetime.today()
        #As = cpold.RRMFCP(X,rank_estimate,alpha)

        print "finished"
        print end-start
        
        I = alg.createUnitTensor(len(size),rank_estimate)
        Result = alg.expand(I,As)

        #vint = vectorize(int)
        #Result = vint(Result*originalNorm+0.5) / originalNorm
        #Result = sign(vint(Result))

        #benchmark.SaveImage(X.reshape(151*2,151*19),"Sugar")
        #benchmark.SaveImage(Result.reshape(151*2,151*19),"Est_Sugar")
        #benchmark.SaveImage(abs(X-Result).reshape(151*2,151*19),"Diff_Sugar")
        #Result = Result / norm(Result)
        
        #print "original \n", X
        #print "estimated \n",abs(X-Result)
        print "error \n", norm(X - Result)
        #print As[0]
        raw_input()
Exemple #23
0
def testCP():
    """
    CP分解テスト
    """
    s = 100
    size = [s, s, s]
    rank = 10
    rank_estimate = 15

    alpha = 0.1
    beta = 0.00

    #X = alg.randomTensorOfNorm(size,rank,0.05)
    #dat = benchmark.ThreeDNoseData()
    dat = benchmark.RandomSmallTensor()
    #dat = benchmark.Flow_Injection()
    #dat = benchmark.Bonnie()
    #dat = benchmark.Enron()
    #dat = benchmark.Sugar()
    #dat = benchmark.Artificial()
    while True:
        X = dat["X"]
        L = dat["L"]

        #X = X - mean(X)
        #X = X + min(X.reshape(prod(X.shape)))
        print "norm:", norm(X)

        originalNorm = norm(X)
        X = X / norm(X)
        #X = X+0.1

        L = [None, None, None]
        print X.shape

        #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta)
        start = datetime.datetime.today()
        As = alg.RRMFCP(X, rank_estimate, beta, L, alpha)
        end = datetime.datetime.today()
        #As = cpold.RRMFCP(X,rank_estimate,alpha)

        print "finished"
        print end - start

        I = alg.createUnitTensor(len(size), rank_estimate)
        Result = alg.expand(I, As)

        #vint = vectorize(int)
        #Result = vint(Result*originalNorm+0.5) / originalNorm
        #Result = sign(vint(Result))

        #benchmark.SaveImage(X.reshape(151*2,151*19),"Sugar")
        #benchmark.SaveImage(Result.reshape(151*2,151*19),"Est_Sugar")
        #benchmark.SaveImage(abs(X-Result).reshape(151*2,151*19),"Diff_Sugar")
        #Result = Result / norm(Result)

        #print "original \n", X
        #print "estimated \n",abs(X-Result)
        print "error \n", norm(X - Result)
        #print As[0]
        raw_input()
Exemple #24
0
        originalNorm = norm(X)
        X = X / norm(X)
        #X = X+0.1
        
        print [det(l) for l in L]
        return
        print X.shape

        #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta)
        As = alg.RRMFCP(X,rank_estimate,beta,L,alpha)
        #As = cpold.RRMFCP(X,rank_estimate,alpha)

        print "finished"
        
        I = alg.createUnitTensor(len(size),rank_estimate)
        Result = alg.expand(I,As)

        #vint = vectorize(int)
        #Result = vint(Result*originalNorm+0.5) / originalNorm
        #Result = sign(vint(Result))

        #benchmark.SaveImage(X.reshape(151*2,151*19),"Sugar")
        #benchmark.SaveImage(Result.reshape(151*2,151*19),"Est_Sugar")
        #benchmark.SaveImage(abs(X-Result).reshape(151*2,151*19),"Diff_Sugar")
        #Result = Result / norm(Result)
        
        #print "original \n", X
        print "estimated \n",abs(X-Result)
        print "error \n", norm(X - Result)
        #print As[0]
        raw_input()