def getTensor(mat): def mkfactor(size, rank, L, alpha): #D = cholesky(eye(size)+alpha*L) [u, s, w] = svd(L) D = dot(u, diag(s)) A = random.randn(size, rank) return alpha * dot(D, A) alpha = 1e-3 As = [ mkfactor(size[i], rank, createChainLaplacian(size[i]), alpha) for i in xrange(dim) ] import algorithm as alg I = alg.createUnitTensor(dim, rank) X = alg.expand(I, As) abg = sum(abs(X.flatten())) / prod(size) print abg noiseLevel = 0.1 X += abg * noiseLevel * random.randn(*X.shape) X = X / norm(X) #X = arange(1000).reshape(10,10,10) return X
def Evaluation_CP(): """ CP分解のクロスバリデーションによる評価 """ logger = Logger("CPEvaluation") s = 20 r = 3 re = 2 size = [s,s,s] rank = r rank_estimate = re alpha = 0.001 logger.WriteLine("TensorSize="+str(size)) logger.WriteLine("TensorRank="+str(rank)) logger.WriteLine("EstimationRank"+str(rank_estimate)) Y = alg.randomTensorOfNorm(size,rank,0.05) * 100 I = alg.createUnitTensor(len(size),rank) while True: testrates = [0.1 * (i+1) for i in xrange(9)] for zerorate in testrates: W = createMask(size,zerorate) def approximate(Xin): As = alg.RRMFCP(Xin,rank_estimate,alpha) Xs = alg.expand(I,As) return Xs X = comp.Completion(Y,W,approximate) diff = norm(Y-X) logger.WriteLine(str(zerorate)+ " " + str(diff)) print "rate:", zerorate, "error:", norm(Y-X)
def Evaluation_CP(): """ CP分解のクロスバリデーションによる評価 """ logger = Logger("CPEvaluation") s = 20 r = 3 re = 2 size = [s, s, s] rank = r rank_estimate = re alpha = 0.001 logger.WriteLine("TensorSize=" + str(size)) logger.WriteLine("TensorRank=" + str(rank)) logger.WriteLine("EstimationRank" + str(rank_estimate)) Y = alg.randomTensorOfNorm(size, rank, 0.05) * 100 I = alg.createUnitTensor(len(size), rank) while True: testrates = [0.1 * (i + 1) for i in xrange(9)] for zerorate in testrates: W = createMask(size, zerorate) def approximate(Xin): As = alg.RRMFCP(Xin, rank_estimate, alpha) Xs = alg.expand(I, As) return Xs X = comp.Completion(Y, W, approximate) diff = norm(Y - X) logger.WriteLine(str(zerorate) + " " + str(diff)) print "rate:", zerorate, "error:", norm(Y - X)
def CompletionDistance_CP_Everystep(Y,Observed,rank_estimate,Ls,alpha): """ EM-ALS CP分解/損失関数にラプラシアン """ N = Y.ndim R = rank_estimate I = alg.createUnitTensor(N,R) updater = lambda X,G,As:(I,alg.CPDistanceStep(As,X,R=rank_estimate,Ls=Ls,alpha=alpha)) return newalg.CompletionStep(Y,Observed,Y,updater)
def testL(): """ 過去の遺産 """ s = 100 size = [s, s, s] rank = 4 rank_estimate = 15 alpha = 0.1 beta = 0.00 while True: #X = alg.randomTensorOfNorm(size,rank,0.05) dat = benchmark.Bonnie() X = dat["X"] L = dat["L"] #X = X - mean(X) #X = X + min(X.reshape(prod(X.shape))) print "norm:", norm(X) originalNorm = norm(X) X = X / norm(X) #X = X+0.1 print[det(l) for l in L] return print X.shape #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta) As = alg.RRMFCP(X, rank_estimate, beta, L, alpha) #As = cpold.RRMFCP(X,rank_estimate,alpha) print "finished" I = alg.createUnitTensor(len(size), rank_estimate) Result = alg.expand(I, As) #vint = vectorize(int) #Result = vint(Result*originalNorm+0.5) / originalNorm #Result = sign(vint(Result)) #benchmark.SaveImage(X.reshape(151*2,151*19),"Sugar") #benchmark.SaveImage(Result.reshape(151*2,151*19),"Est_Sugar") #benchmark.SaveImage(abs(X-Result).reshape(151*2,151*19),"Diff_Sugar") #Result = Result / norm(Result) #print "original \n", X print "estimated \n", abs(X - Result) print "error \n", norm(X - Result) #print As[0] raw_input()
def CompletionKP_CP_EveryStep(Y,Observed,rank_estimate,Ls,alpha): """ EM-ALS CP分解/全体に対する正則化/Kronecker積バージョン """ assert(isinstance(Ls,list)) N = Y.ndim R = rank_estimate Ws,Ds = InitializeDiagAndSimilarity(Y,Ls) PWs,DWs = DecomposeLaplacians(Ws) I = alg.createUnitTensor(N,R) updater = lambda X,G,As:(I,alg.CPKprodStep(As=As,X=X,R=rank_estimate,Ds=Ds,Ws=Ws,PWs=PWs,DWs=DWs,alpha=alpha)) return newalg.CompletionStep(Y,Observed,Y,updater)
def CompletionKS_CP_EveryStep(Y,Observed,rank_estimate,Ls,alpha): """ EM-ALS CP分解/全体に対する正則化 """ N = Y.ndim R = rank_estimate #X,As->Xnew if Ls == None: Ls = [None for i in xrange(N)] Ps,Ds = DecomposeLaplacians(Ls) I = alg.createUnitTensor(N,R) updater = lambda X,G,As:(I,alg.CPKsumStep(As=As,X=X,R=rank_estimate,Ls=Ls,Ps=Ps,Ds=Ds,alpha=alpha)) return newalg.CompletionStep(Y,Observed,Y,updater)
def CompletionCPProd_EveryStep(Y,Observed,rank_estimate,Ls,alpha,beta): """ EM-ALS CP分解/Kronecker積バージョン """ assert(isinstance(Ls,list)) assert(not isinstance(rank_estimate,list)) N = Y.ndim R = rank_estimate #X,As->Xnew if Ls == None: Ls = [None for i in xrange(N)] I = alg.createUnitTensor(N,R) updater = lambda X,G,As:(I,alg.RRMFCPProdstep(As=As,X=X,R=rank_estimate,beta=beta,Ls=Ls,alpha=alpha)) return newalg.CompletionStep(Y,Observed,Y,updater)
def testCP(): s = 100 size = [s,s,s] rank = 4 rank_estimate = 5 alpha = 0.1 beta = 0.0001 while True: #X = alg.randomTensorOfNorm(size,rank,0.05) #dat = benchmark.ThreeDNoseData() #dat = benchmark.RandomSmallTensor() dat = benchmark.Flow_Injection() #dat = benchmark.Bonnie() #dat = benchmark.Artificial() X = dat["X"] L = dat["L"] #X = X - mean(X) #X = X + min(X.reshape(prod(X.shape))) print "norm:",norm(X) X = X / norm(X) #X = X+0.1 L = [None,None,None] print X.shape #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta) As = alg.RRMFCP(X,rank_estimate,beta,L,alpha) #As = cpold.RRMFCP(X,rank_estimate,alpha) print "finished" I = alg.createUnitTensor(len(size),rank_estimate) Result = alg.expand(I,As) benchmark.SaveImage(X,"Flow") benchmark.SaveImage(Result,"Est_Flow") #Result = Result / norm(Result) #print "original \n", X #print "estimated \n",Result print "error \n", norm(X - Result) #print As[0] raw_input()
def CompletionCP_EveryStep(Y,Observed,rank_estimate,Ls,alpha,beta): """ EM-ALS CP分解 """ assert(isinstance(Ls,list)) assert(not isinstance(rank_estimate,list)) N = Y.ndim R = rank_estimate #X,As->Xnew if Ls == None: Ls = [None for i in xrange(N)] Ps,Ds = DecomposeLaplacians(Ls) I = alg.createUnitTensor(N,R) updater = lambda X,G,As:(I,alg.RRMFCPstep(As=As,X=X,R=rank_estimate,beta=beta,Ls=Ls,Ps=Ps,Ds=Ds,alpha=alpha)) return newalg.CompletionStep(Y,Observed,Y,updater)
def CompletionCP(Y,observed,rank_estimate,Ls,alpha,beta): """ CP分解による補完 """ assert(isinstance(Ls,list)) assert(not isinstance(rank_estimate,list)) if Ls == None: Ls = [None for i in xrange(N)] (Ps,Ds) = DecomposeLaplacians(Ls) #print "Singular Decomposed L" I = alg.createUnitTensor(Y.ndim,rank_estimate) def approximate(Xin): As = alg.RRMFCP(Xin,rank_estimate,beta,Ls,alpha,Ps,Ds) Xs = alg.expand(I,As) return Xs return Completion(Y,observed,approximate)
def getTensor(mat): def mkfactor(size, rank): th = random.randn() off = random.randn() d2 = arange(size) * arange(size) d1 = arange(size) print th, off A = array([d1 * th + off for i in xrange(rank)]).T return A As = [mkfactor(size[i], rank) for i in xrange(dim)] As[0] = (random.rand(size[0], rank) - 0.5) * 0.5 As[1] = (random.rand(size[1], rank) - 0.5) * 0.5 import algorithm as alg I = alg.createUnitTensor(dim, rank) I = random.randn(rank, rank, rank) X = alg.expand(I, As) X = X / norm(X) return X
def getTensor(mat): def mkfactor(size,rank): th = random.randn() off = random.randn() d2 = arange(size) * arange(size) d1 = arange(size) print th,off A = array([d1*th + off for i in xrange(rank)]).T return A As = [mkfactor(size[i],rank) for i in xrange(dim)] import algorithm as alg def p(obj): print obj I = alg.createUnitTensor(dim,rank) X = alg.expand(I,As) X = X / norm(X) return X
def getTensor(mat): def mkfactor(size,rank): th = random.randn() off = random.randn() d2 = arange(size) * arange(size) d1 = arange(size) print th,off A = array([d1*th + off for i in xrange(rank)]).T return A As = [mkfactor(size[i],rank) for i in xrange(dim)] As[0] = (random.rand(size[0],rank)-0.5)*0.5 As[1] = (random.rand(size[1],rank)-0.5)*0.5 import algorithm as alg I = alg.createUnitTensor(dim,rank) I = random.randn(rank,rank,rank) X = alg.expand(I,As) X = X / norm(X) return X
def getTensor(mat): def mkfactor(size, rank): th = random.randn() off = random.randn() d2 = arange(size) * arange(size) d1 = arange(size) print th, off A = array([d1 * th + off for i in xrange(rank)]).T return A As = [mkfactor(size[i], rank) for i in xrange(dim)] import algorithm as alg def p(obj): print obj I = alg.createUnitTensor(dim, rank) X = alg.expand(I, As) X = X / norm(X) return X
def getTensor(mat): def mkfactor(size,rank,L,alpha): #D = cholesky(eye(size)+alpha*L) [u,s,w] = svd(L) D = dot(u,diag(s)) A = random.randn(size,rank) return alpha * dot(D,A) alpha = 1e-3 As = [mkfactor(size[i],rank,createChainLaplacian(size[i]),alpha) for i in xrange(dim)] import algorithm as alg I = alg.createUnitTensor(dim,rank) X = alg.expand(I,As) abg = sum(abs(X.flatten())) / prod(size) print abg noiseLevel = 0.1 X += abg * noiseLevel * random.randn(*X.shape) X = X / norm(X) #X = arange(1000).reshape(10,10,10) return X
def testCompletion(): s = 10 r = 3 re = 7 size = [s,s,s] rank = [r,r,r] rank_estimate = [re,re,re] zerorate = 0.9 import benchmark #data = benchmark.Wine_v6() #data = benchmark.Bonnie() data = benchmark.Flow_Injection() #data = benchmark.ThreeDNoseData() #data = benchmark.RandomSmallTensor() #data = benchmark.Artificial() Ls = data["L"] X = data["X"]# import numpy.linalg #X = X - mean(X) normX = numpy.linalg.norm(X) print normX X = X / normX #Y = alg.randomTensorOfNorm(size,rank,0.01) * 10 Y = X print Y.shape #print map(lambda x:str(x.shape),Ls) alpha = 0.1 #L = createTestLaplacian(s) Ls = [None,None,None] W = createMask(list(Y.shape),zerorate) beta = 0 I = alg.createUnitTensor(len(size),re) def approximateCP(Xin): print re print alpha print Ls print beta As = alg.RRMFCP(Xin,re,beta,Ls,alpha) Xs = alg.expand(I,As) return Xs def approximateTucker(Xin): (G,As) = alg.HOOI(Xin,rank_estimate,alpha,Ls) Xs = alg.expand(G,As) return Xs print "test fo Completion starts, size:", Y.shape if False: if False: X = comp.CompletionCP(Y,W,re,Ls,alpha,beta) else: X = comp.CompletionTucker(Y,W,rank_estimate,Ls,alpha) else: import newalg X = newalg.HOOI(Y,W,Y,rank_estimate,alpha,Ls) # print "final est. error rate", abs(1- X / Y) print "final estimation error:", norm(Y-X)
originalNorm = norm(X) X = X / norm(X) #X = X+0.1 print [det(l) for l in L] return print X.shape #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta) As = alg.RRMFCP(X,rank_estimate,beta,L,alpha) #As = cpold.RRMFCP(X,rank_estimate,alpha) print "finished" I = alg.createUnitTensor(len(size),rank_estimate) Result = alg.expand(I,As) #vint = vectorize(int) #Result = vint(Result*originalNorm+0.5) / originalNorm #Result = sign(vint(Result)) #benchmark.SaveImage(X.reshape(151*2,151*19),"Sugar") #benchmark.SaveImage(Result.reshape(151*2,151*19),"Est_Sugar") #benchmark.SaveImage(abs(X-Result).reshape(151*2,151*19),"Diff_Sugar") #Result = Result / norm(Result) #print "original \n", X print "estimated \n",abs(X-Result) print "error \n", norm(X - Result) #print As[0]
def testCompletion(): """ テンソル補完のテスト用コード """ re = 4 rank_estimate = [re,re,re] zerorate = 0.99 import benchmark #data = benchmark.Artificial() data = benchmark.Flow_Injection() Ls = data["L"] X = data["X"]# #X = X - mean(X) alpha = 1e-2 method = "CP" method = "Tucker" #method = "KSCP" #method = "KSTucker" #method = "DistanceTucker" #method = "DistanceCP" #method = "TuckerProd" #method = "CPProd" #method="CP" #method = "KPCP" #method = "KPTucker" #Ls[0] = None #Ls[1] = None #Ls = [None,None,None] normX = numpy.linalg.norm(X) print "norm",normX #print max(X) #print sum(sign(X)) X = X / normX #Y = alg.randomTensorOfNorm(size,rank,0.01) * 10 Y = X print Y.shape #Ls = [None,None,None] #Ls[0]=None #Ls[1]=None W = createMask(list(Y.shape),zerorate) print sum(W)," / ",prod(Y.shape) beta = 0 I = alg.createUnitTensor(X.ndim,re) print "test fo Completion starts, size:", Y.shape print method if method == "CP": X = comp.CompletionCP_EveryStep(Y,W,re,Ls,alpha,beta) elif method == "Tucker": X = comp.CompletionTucker_EveryStep(Y,W,rank_estimate,Ls,alpha) elif method == "KSCP": X = comp.CompletionKS_CP_EveryStep(Y,W,re,Ls,alpha) elif method == "KSTucker": X = comp.CompletionKS_Tucker_EveryStep(Y,W,rank_estimate,Ls,alpha) elif method == "KPCP": X = comp.CompletionKP_CP_EveryStep(Y,W,re,Ls,alpha) elif method == "KPTucker": X = comp.CompletionKP_Tucker_EveryStep(Y,W,rank_estimate,Ls,alpha) elif method == "DistanceTucker": X = comp.CompletionDistance_Tucker_EveryStep(Y,W,rank_estimate,Ls,alpha) elif method == "DistanceCP": X = comp.CompletionDistance_CP_Everystep(Y,W,re,Ls,alpha) elif method == "CPProd": X = comp.CompletionCPProd_EveryStep(Y,W,re,Ls,alpha,beta) elif method == "TuckerProd": X = comp.CompletionTuckerProd_EveryStep(Y,W,rank_estimate,Ls,alpha) print "final estimation error:", norm(Y-X) #print "original \n",abs(Y-X) #print "estimated \n",X vint = vectorize(int)
def testCP(): """ CP分解テスト """ s = 100 size = [s,s,s] rank = 10 rank_estimate = 15 alpha = 0.1 beta = 0.00 #X = alg.randomTensorOfNorm(size,rank,0.05) #dat = benchmark.ThreeDNoseData() dat = benchmark.RandomSmallTensor() #dat = benchmark.Flow_Injection() #dat = benchmark.Bonnie() #dat = benchmark.Enron() #dat = benchmark.Sugar() #dat = benchmark.Artificial() while True: X = dat["X"] L = dat["L"] #X = X - mean(X) #X = X + min(X.reshape(prod(X.shape))) print "norm:",norm(X) originalNorm = norm(X) X = X / norm(X) #X = X+0.1 L = [None,None,None] print X.shape #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta) start = datetime.datetime.today() As = alg.RRMFCP(X,rank_estimate,beta,L,alpha) end = datetime.datetime.today() #As = cpold.RRMFCP(X,rank_estimate,alpha) print "finished" print end-start I = alg.createUnitTensor(len(size),rank_estimate) Result = alg.expand(I,As) #vint = vectorize(int) #Result = vint(Result*originalNorm+0.5) / originalNorm #Result = sign(vint(Result)) #benchmark.SaveImage(X.reshape(151*2,151*19),"Sugar") #benchmark.SaveImage(Result.reshape(151*2,151*19),"Est_Sugar") #benchmark.SaveImage(abs(X-Result).reshape(151*2,151*19),"Diff_Sugar") #Result = Result / norm(Result) #print "original \n", X #print "estimated \n",abs(X-Result) print "error \n", norm(X - Result) #print As[0] raw_input()
def testCompletion(): """ テンソル補完のテスト用コード """ re = 4 rank_estimate = [re, re, re] zerorate = 0.99 import benchmark #data = benchmark.Artificial() data = benchmark.Flow_Injection() Ls = data["L"] X = data["X"] # #X = X - mean(X) alpha = 1e-2 method = "CP" method = "Tucker" #method = "KSCP" #method = "KSTucker" #method = "DistanceTucker" #method = "DistanceCP" #method = "TuckerProd" #method = "CPProd" #method="CP" #method = "KPCP" #method = "KPTucker" #Ls[0] = None #Ls[1] = None #Ls = [None,None,None] normX = numpy.linalg.norm(X) print "norm", normX #print max(X) #print sum(sign(X)) X = X / normX #Y = alg.randomTensorOfNorm(size,rank,0.01) * 10 Y = X print Y.shape #Ls = [None,None,None] #Ls[0]=None #Ls[1]=None W = createMask(list(Y.shape), zerorate) print sum(W), " / ", prod(Y.shape) beta = 0 I = alg.createUnitTensor(X.ndim, re) print "test fo Completion starts, size:", Y.shape print method if method == "CP": X = comp.CompletionCP_EveryStep(Y, W, re, Ls, alpha, beta) elif method == "Tucker": X = comp.CompletionTucker_EveryStep(Y, W, rank_estimate, Ls, alpha) elif method == "KSCP": X = comp.CompletionKS_CP_EveryStep(Y, W, re, Ls, alpha) elif method == "KSTucker": X = comp.CompletionKS_Tucker_EveryStep(Y, W, rank_estimate, Ls, alpha) elif method == "KPCP": X = comp.CompletionKP_CP_EveryStep(Y, W, re, Ls, alpha) elif method == "KPTucker": X = comp.CompletionKP_Tucker_EveryStep(Y, W, rank_estimate, Ls, alpha) elif method == "DistanceTucker": X = comp.CompletionDistance_Tucker_EveryStep(Y, W, rank_estimate, Ls, alpha) elif method == "DistanceCP": X = comp.CompletionDistance_CP_Everystep(Y, W, re, Ls, alpha) elif method == "CPProd": X = comp.CompletionCPProd_EveryStep(Y, W, re, Ls, alpha, beta) elif method == "TuckerProd": X = comp.CompletionTuckerProd_EveryStep(Y, W, rank_estimate, Ls, alpha) print "final estimation error:", norm(Y - X) #print "original \n",abs(Y-X) #print "estimated \n",X vint = vectorize(int)
def testCP(): """ CP分解テスト """ s = 100 size = [s, s, s] rank = 10 rank_estimate = 15 alpha = 0.1 beta = 0.00 #X = alg.randomTensorOfNorm(size,rank,0.05) #dat = benchmark.ThreeDNoseData() dat = benchmark.RandomSmallTensor() #dat = benchmark.Flow_Injection() #dat = benchmark.Bonnie() #dat = benchmark.Enron() #dat = benchmark.Sugar() #dat = benchmark.Artificial() while True: X = dat["X"] L = dat["L"] #X = X - mean(X) #X = X + min(X.reshape(prod(X.shape))) print "norm:", norm(X) originalNorm = norm(X) X = X / norm(X) #X = X+0.1 L = [None, None, None] print X.shape #As = alg.RRMFCP(X,rank_estimate,alpha,[L,None,None],beta) start = datetime.datetime.today() As = alg.RRMFCP(X, rank_estimate, beta, L, alpha) end = datetime.datetime.today() #As = cpold.RRMFCP(X,rank_estimate,alpha) print "finished" print end - start I = alg.createUnitTensor(len(size), rank_estimate) Result = alg.expand(I, As) #vint = vectorize(int) #Result = vint(Result*originalNorm+0.5) / originalNorm #Result = sign(vint(Result)) #benchmark.SaveImage(X.reshape(151*2,151*19),"Sugar") #benchmark.SaveImage(Result.reshape(151*2,151*19),"Est_Sugar") #benchmark.SaveImage(abs(X-Result).reshape(151*2,151*19),"Diff_Sugar") #Result = Result / norm(Result) #print "original \n", X #print "estimated \n",abs(X-Result) print "error \n", norm(X - Result) #print As[0] raw_input()
def CompletionGradient(X,shape,ObservedList,R,Ls,alpha=0,XoriginalTensor=None,isProd=False): #X is Dense Vector with only observed elements #Observed must be given as list of coordinate tuples #TrueX is 3 dimensional array N = 3 Xns = [critical.unfold(X,n,shape,ObservedList) for n in xrange(N)] print "unfolded" As = [zeros((shape[n],R)) for n in xrange(N)] #As = [SVD.getLeadingSingularVects(Xns[n],R) for n in xrange(N)] Rev = 100 for i in xrange(Rev): Asadd = [SVD.getLeadingSingularVects(Xns[n],R) for n in xrange(N)] for k in xrange(N): As[k] += Asadd[k] for k in xrange(N): As[k] /= Rev #As[k] += random.randn(shape[k],R)*0.01 n,m,l=shape def lossfunc(U,V,W): #print "loss start" XO = critical.HadamardProdOfSparseTensor(U,V,W,ObservedList) loss = norm(XO - X) #print "loss end" return loss #Xinit = flattenAs(As) #print "start bfgs" J = alg.createUnitTensor(3,R) #Mask = getMaskTensor(ObservedList,shape) U,V,W = As Ls[:] = [L*alpha if not L==None else 0 for L in Ls] Lu,Lv,Lw=Ls beta = 1e-8 isProd = False if not isProd: LuUse = alpha * Lu + beta * eye(n) LvUse = alpha * Lv + beta * eye(m) LwUse = alpha * Lw + beta * eye(l) #print U.shape, V.shape, W.shape Xest = XoriginalTensor #memory consuming threshold = 0.5*const.ConvergenceThreshold_NewCompletion maxiter=700 bfgs_maxiter=3 errorold = inf errorTest = inf expandedX = 0 Dw,Kw = separateLaplacian(Lw,l) Dv,Kv = separateLaplacian(Lv,m) Du,Ku = separateLaplacian(Lu,n) import itertools for steps in itertools.count(): if isProd: DSu = alpha * trace(dot(W.T*Dw,W)) * trace(dot(V.T*Dv,V)) KSu = alpha * trace(dot(W.T,dot(Kw,W))) * trace(dot(V.T,dot(Kv,V))) LuUse = DSu * Du - KSu * Ku + eye(n) #print "optimization of U" #print [U.shape,V.shape,W.shape] grad = lambda U:critical.Gradient(X,ObservedList,(U,V,W),LuUse,shape,R,0) loss = lambda U:lossfunc(U,V,W) U = LBFGS_matrix(loss,U,grad,maxiter=bfgs_maxiter) #print [U.shape,V.shape,W.shape] if isProd: DSv = alpha * trace(dot(U.T*Du,U)) * trace(dot(W.T*Dw,W)) KSv = alpha * trace(dot(U.T,dot(Ku,U))) * trace(dot(W.T,dot(Kw,W))) LvUse = DSv * Dv - KSv * Kv + eye(m) #print "optimization of V" grad = lambda V:critical.Gradient(X,ObservedList,(U,V,W),LvUse,shape,R,1) loss = lambda V:lossfunc(U,V,W) V = LBFGS_matrix(loss,V,grad,maxiter=bfgs_maxiter) if isProd: DSw = alpha * trace(dot(U.T*Du,U)) * trace(dot(V.T*Dv,V)) KSw = alpha * trace(dot(U.T,dot(Ku,U))) * trace(dot(V.T,dot(Kv,V))) LwUse = DSw * Dw - KSw * Kw + eye(l) #print "optimization of W" grad = lambda W:critical.Gradient(X,ObservedList,(U,V,W),LwUse,shape,R,2) loss = lambda W:lossfunc(U,V,W) W = LBFGS_matrix(loss,W,grad,maxiter=bfgs_maxiter) #grad = lambda (U,V,W) if False: XO = critical.HadamardProdOfSparseTensor(U,V,W,ObservedList) normrate = norm(XO) / norm(X) qubicnormrate = normrate ** (1.0 / 3) U /= qubicnormrate V /= qubicnormrate W /= qubicnormrate if steps % 20 == 1: Xest = alg.expand(J,[U,V,W]) errorTest = norm(Xest - XoriginalTensor) #print U errorObserved = lossfunc(U,V,W) if steps % 20 == 1: print "iter:",steps," err:",errorTest ," oberr:",errorObserved, " diff:", errorObserved-errorold, "norm;", norm(Xest) faultThreshold = 1e4 if errorObserved > faultThreshold or errorObserved != errorObserved: #やりなおし print "-----Try Again-----" print steps," steps, observation error=",errorObserved return CompletionGradient(X,shape,ObservedList,R,Ls,alpha,XoriginalTensor) if abs(errorObserved - errorold) < threshold or steps >= maxiter: expandedX = alg.expand(J,[U,V,W]) print "estimation finished in ",(steps+1),"steps." break errorold = errorObserved return expandedX