def Operations_listmatrices(listofmatrices,operationnature):#The parameters are tensors
    Res=[]
    if (operationnature=="Turnintoarray"):
        for matrix in listofmatrices:
           element=np.copy(mxnet_backend.to_numpy(matrix))
           Res.append(element)#computes A.T
        return Res
    
    if (operationnature=="Transpose"):
        for matrix in listofmatrices:
           element=np.copy(mxnet_backend.to_numpy(matrix))
           Res.append(tl.tensor(element.T))#computes A.T
        return Res
    
    if(operationnature=="Transposetimes"):
       for matrix in listofmatrices:
           element=np.copy(mxnet_backend.to_numpy(matrix))
         
           Res.append(tl.tensor(np.dot(element.T,element))) #computes A.T*A  
       return Res
   
    if(operationnature=="NormI"):
           for matrix in listofmatrices:
               Res.append(tl.norm(matrix,1))
           return Res
    if(operationnature=="NormII"):
           for matrix in listofmatrices:
               Res.append(np.power(tl.norm(matrix,2),2))
           return Res
       
    if(operationnature=="Tensorize"):
           for matrix in listofmatrices:
               Res.append(tl.tensor(matrix))
           return Res
Beispiel #2
0
def Operations_listmatrices(listofmatrices,operationnature):
    #This function takes a list of matrices and performs some classical operations on its elements.
    #The variable operationnature specifies the operation performed
    #The matrices are of tensor type
    Res=[]
    if (operationnature=="Transpose"):
        for matrix in listofmatrices:
           #element=np.copy(mxnet_backend.to_numpy(matrix))
           element=np.copy(mxnet_backend.to_numpy(matrix))
           Res.append(tl.tensor(element.T))#computes A.T
        return Res
    
    if(operationnature=="Transposetimes"):
       for matrix in listofmatrices:
           #element=np.copy(mxnet_backend.to_numpy(matrix))
           element=np.copy(mxnet_backend.to_numpy(matrix))
           Res.append(tl.tensor(np.dot(element.T,element))) #computes A.T*A  
       return Res
   
    if(operationnature=="NormI"):
           for matrix in listofmatrices:
               Res.append(tl.norm(tl.tensor(matrix),1))
           return Res
    if(operationnature=="NormII"):
           for matrix in listofmatrices:
               Res.append(np.power(tl.norm(tl.tensor(matrix),2),2))
           return Res
       
    if(operationnature=="Tensorize"):
           for matrix in listofmatrices:
               Res.append(tl.tensor(matrix))
           return Res
Beispiel #3
0
def Derivativefeatureproblem(Spectrogram, G, A,
                             B):  #The parameters are tensors
    derivative = -np.dot(
        mxnet_backend.to_numpy(A).T, Spectrogram -
        np.dot(np.dot(mxnet_backend.to_numpy(A), mxnet_backend.to_numpy(G)),
               mxnet_backend.to_numpy(B).T))
    derivative = np.dot(derivative, mxnet_backend.to_numpy(B))
    return derivative
Beispiel #4
0
def TestPositivity_single(X):
    #This function is used to test if the data is inherently positive for a single tensor
    #The parameter is a tensor
    #Size=np.size(np.array(mxnet_backend.to_numpy(X)))
    Size=np.size(mxnet_backend.to_numpy(X))
    #Bool=np.sum(np.array(mxnet_backend.to_numpy(X)>=0))/Size
    Bool=np.sum(np.array(mxnet_backend.to_numpy(X)>=0))/Size
    if(Bool!=1):#There is at least one negative elements
      return False
    if(Bool==1):
      return True
Beispiel #5
0
def Lineserchstep(X,G,listoffactors,Nonnegative,setting,A,n,Grad,a,b,alpha,theta,pool):
    #This function computes the step with line search
    #All the parameters are of tensor type
    Matrix_list=list(listoffactors)
    r=(np.sqrt(5)-1)/2
    As=a
    Bs=b
    L=Bs-As
    lambda1=As+r*r*L
    lambda2=As+r*L
    nb_iter=0
    F1=1
    F2=0
    Lambda=0
    while(nb_iter<5):
      nb_iter=nb_iter+1
      if(Nonnegative==False):
          
         Matrix_list[n]=A-lambda1*Grad
         #F1=np.power(T.norm(X-Tensor_matrixproduct(G,Matrix_list),2),2)+alpha*(1-theta)*np.power(T.norm(A,2),2)
         #print(type(A))
         #print(A)
         #Secours=(alpha*(1-theta)/2)*np.power(tl.norm(listoffactors[n],2),2)
         #pdb.set_trace()
         F1=Error(X,G,Matrix_list,setting,pool)+(alpha*(1-theta)/2)*np.power(tl.norm(A,2),2)
         Matrix_list[n]=tl.tensor(A-lambda2*Grad)
         F2=Error(X,G,Matrix_list,setting,pool)+(alpha*(1-theta)/2)*np.power(tl.norm(A,2),2)
         
        
      if(Nonnegative==True):
         
         Matrix_list[n]=tl.tensor(np.maximum(mxnet_backend.to_numpy(A-lambda1*Grad),0))
         #F1=np.power(T.norm(X-Tensor_matrixproduct(G,Matrix_list),2),2)+alpha*(1-theta)*np.power(T.norm(A,2),2)
         F1=Error(X,G,Matrix_list,setting,pool)+(alpha*(1-theta)/2)*np.power(tl.norm(A,2),2)
         
         Matrix_list[n]=tl.tensor(np.maximum(mxnet_backend.to_numpy(A-lambda2*Grad),0))
         #F2=np.power(T.norm(X-Tensor_matrixproduct(G,Matrix_list),2),2)+alpha*(1-theta)*np.power(T.norm(A,2),2)
         F2=Error(X,G,Matrix_list,setting,pool)+(alpha*(1-theta)/2)*np.power(tl.norm(A,2),2)
            
      if(F1>F2):
          As=lambda1
          lambda1=lambda2
          L=Bs-As
          lambda2=As+r*L
      else:
          Bs=lambda2
          lambda2=lambda1
          L=Bs-As
          lambda1=As+r*r*L
          
      if((L<0.001) or nb_iter>=5):
          Lambda=(Bs+As)/2
    return Lambda
def Gramschmidt(A):
    [m,n]=np.array(A.shape,dtype=int)
    Q=tl.tensor(np.zeros((m,n)))
    R=tl.tensor(np.zeros((n,n)))
    for j in range(n):
        v=A[:,j]
        for i in range(j):
            R[i,j]=np.dot(mxnet_backend.to_numpy(Q[:,i]).T,mxnet_backend.to_numpy(A[:,j]))
            v=v-R[i,j]*Q[:,i] 
        R[j,j]=tl.norm(v,2)
        Q[:,j]=v/R[j,j]
    return Q
def HOSVD(Tensor, Coretensorsize):
    N = len(Tensor.shape)
    listofmatrices = []
    for n in range(N):
        U, s, V = np.linalg.svd(mxnet_backend.to_numpy(unfold(Tensor, n)),
                                full_matrices=True)
        A = U[:, 0:Coretensorsize[n]]
        listofmatrices.append(A)
    Coretensor = Tensor_matrixproduct(
        tl.tensor(Tensor),
        Operations_listmatrices(
            Operations_listmatrices(listofmatrices, "Transpose"), "Tensorize"))
    Coretensor = mxnet_backend.to_numpy(Coretensor)
    return Coretensor, listofmatrices
def Transform_tensor_into_featuresmatrix(tensor):
    size=np.array(tensor.shape,dtype=int)
    number_of_samples=size[0]
    result=np.zeros((number_of_samples,size[1]*size[2]))
    for i in range(number_of_samples):
        result[i,:]=np.resize(mxnet_backend.to_numpy(tensor[i,:,:]),np.size(tensor[i,:,:]))
    return result
def Proximal_operator(X, step):
    #This function computes the proximal operator of the l1 norm
    #X is of tensor type
    #Res=np.copy(mxnet_backend.to_numpy(X))
    Res = np.copy(mxnet_backend.to_numpy(X))
    Res = np.sign(Res) * np.maximum(np.abs(Res) - step, 0)
    return tl.tensor(Res)
Beispiel #10
0
def Factorupdateproblem(X, G, Ainit, listoffactorsmatrices, Nonnegative, alpha,
                        theta, n, maxiter, step, epsilon):

    Anew = tl.tensor(Ainit)
    Aold = tl.tensor(np.zeros(Anew.shape))
    Aresult = tl.tensor(np.zeros(Anew.shape))
    error = np.power(
        tl.norm(X - Tensor_matrixproduct(G, listoffactorsmatrices), 2),
        2) + alpha * (1 - theta) * np.power(tl.norm(Anew, 2), 2)

    #previouserror=0
    nbiter = 0
    while (nbiter < maxiter):
        nbiter = nbiter + 1
        Aold = Anew
        #previouserror=error

        Anew = Aold - step * derivativeDict(X, G, Aold, listoffactorsmatrices,
                                            alpha, theta, n)
        if (Nonnegative == True):
            Anew = np.maximum(mxnet_backend.to_numpy(Anew), 0)
            Anew = tl.tensor(Anew)

        Anew = Anew / tl.norm(Anew, 2)
        error = np.power(
            tl.norm(X - Tensor_matrixproduct(G, listoffactorsmatrices), 2),
            2)  #+alpha*(1-theta)*np.power(T.norm(Anew,2),2)
        Aresult = Anew

        #if(previouserror-error<epsilon):
        if (np.sqrt(error) / tl.norm(X, 2) < epsilon):
            Aresult = Aold
            break
    return Aresult
Beispiel #11
0
def Sparse_code(X,G_init,listoffactors,Nonnegative,step,max_iter,alpha,theta,epsilon):#The parameters are tensors
      #This function is used to perform the sparse coding step
      #All the tensor and parameters are of tensor type
      #G_new=T.tensor(np.copy(mxnet_backend.to_numpy(G_init)))
      G_new=tl.tensor(G_init)
      G_old=tl.tensor(np.zeros(G_new.shape))
      G_result=tl.tensor(np.zeros(G_new.shape))      
      error=np.power(tl.norm(X-Tensor_matrixproduct(G_new,listoffactors),2),2)#+Lambda*T.norm(G_new,1))
         
      nb_iter=0
      error_list=[error]    
      while(nb_iter<=max_iter):
          nb_iter=nb_iter+1
          G_old=G_new
         
          G_new=G_old-step*derivativeCore(X,G_old,listoffactors)
          if(Nonnegative==True):             
             G_new=np.maximum(mxnet_backend.to_numpy(G_old-step*(derivativeCore(X,G_old,listoffactors)))+alpha*theta*np.ones(G_old.shape),0)
             G_new=tl.tensor(G_new)
          if(Nonnegative==False):
             G_new=Proximal_operator(G_new,step)
          error=np.power(tl.norm(X-Tensor_matrixproduct(G_new,listoffactors),2),2)#+Lambda*T.norm(G_new,1)
          G_result=G_new
          error_list.append(error)
          #if(np.abs(previous_error-error)/error<epsilon):
          if(np.sqrt(error)/tl.norm(X,2)<epsilon):
              G_result=G_old
              error_list=error_list[0:len(error_list)-1]
              break
      return G_result,error_list,nb_iter
def Retraction(Point,Parameter,backendchoice):
    M=Point+Parameter
    if(backendchoice=='numpy'):
       Q,R=np.linalg.qr(M)
    if(backendchoice=='mxnet'):
       Q,R=np.linalg.qr(mxnet_backend.to_numpy(M))
    return tl.tensor(Q)
def Operations_listmatrices(listofmatrices,operationnature):#The parameters are tensors
    Res=[]   
    if (operationnature=="Arrayconversion"):
        for matrix in listofmatrices:
           element=tl.tensor(matrix)
           Res.append(mxnet_backend.to_numpy(element))  
        return Res

    if (operationnature=="Transpose"):
        for matrix in listofmatrices:
           element=tl.tensor(matrix)
           Res.append(element.T)#computes A.T
        return Res
    
    if(operationnature=="Transposetimes"):
       for matrix in listofmatrices:
           element=tl.tensor(matrix)
           Matrix=tl.backend.dot(element.T,element)
           Res.append(Matrix) #computes A.T*A  
       return Res
   
    if(operationnature=="NormI"):
           for matrix in listofmatrices:
               Res.append(tl.norm(matrix,1))
           return Res
    if(operationnature=="NormII"):
           for matrix in listofmatrices:
               Res.append(np.power(tl.norm(matrix,2),2))
           return Res
def derivativeDict(X,G,A,listofmatrices,alpha,theta,n):#the parameters are tensors
    
    listoffactors=list(listofmatrices)
    listoffactors[n]=tl.tensor(np.identity(X.shape[n]))
    
    WidehatX=Tensor_matrixproduct(X,Operations_listmatrices(listoffactors,"Transpose"))
    
    listoffactors[n]=tl.tensor(np.identity(G.shape[n]))
    
    B=unfold(Tensor_matrixproduct(G,listoffactors),n) 
    
  
    Result=tl.tensor(np.dot(mxnet_backend.to_numpy(unfold(WidehatX,n)),mxnet_backend.to_numpy(unfold(G,n)).T))-tl.tensor(np.dot(mxnet_backend.to_numpy(A),np.dot(mxnet_backend.to_numpy(B),mxnet_backend.to_numpy(B).T)))+alpha*(1-theta)*A

    #Res1=np.dot(A,np.dot(B,B.T))
    #Res2=alpha*(1-theta)*A
    return Result
Beispiel #15
0
def Activationcoeffsingle(args):  #The parameters are tensors

    Gnew = tl.tensor(args[1][args[10]])
    Gold = tl.tensor(np.zeros(args[1][args[10]].shape))
    Gresult = tl.tensor(np.zeros(args[1][args[10]].shape))
    Matrix = np.dot(
        np.dot(mxnet_backend.to_numpy(args[2]), mxnet_backend.to_numpy(Gnew)),
        mxnet_backend.to_numpy(args[3]).T)
    error = tl.norm(args[0][args[10]] - tl.tensor(Matrix), 2) / tl.norm(
        args[0][args[10]], 2)
    nbiter = 0

    while (nbiter < args[5]):
        nbiter = nbiter + 1
        Gold = Gnew
        derivative = Derivativefeatureproblem(args[0][args[10]], Gold, args[2],
                                              args[3], args[7], args[8],
                                              args[9])

        Gnew = Gold - args[4] * derivative
        if (args[9] == True):
            Gnew = tl.tensor(np.maximum(mxnet_backend.to_numpy(Gnew), 0))
        Gresult = Gnew

        Matrix = np.dot(
            np.dot(mxnet_backend.to_numpy(args[2]),
                   mxnet_backend.to_numpy(Gnew)),
            mxnet_backend.to_numpy(args[3]).T)

        error = tl.norm(args[0][args[10]] - tl.tensor(Matrix), 2) / tl.norm(
            args[0][args[10]], 2)
        if (error < args[6]):
            break
    return Gresult
def ALTO_single(X, Coretensorsize, K, Pre_existingfactors,
                sigma):  #All the parameters are tensors
    ListoffactorsU = list(Pre_existingfactors)
    ListoffactorsV = Augmentlist(ListoffactorsU, K, sigma)
    Stilde = Tensor_matrixproduct(
        X, Operations_listmatrices(ListoffactorsV, "Transpose"))
    #core,factors=tucker(Stilde,Coretensorsize,init='random',random_state=2): this was the initial line
    core, factors = tucker(Stilde,
                           Coretensorsize,
                           init='random',
                           random_state=1)
    Listoffactorsresult = []
    for i in range(len(factors)):
        Listoffactorsresult.append(
            np.dot(mxnet_backend.to_numpy(ListoffactorsV[i]),
                   mxnet_backend.to_numpy(factors[i])))
    Listoffactorsresult = Operations_listmatrices(Listoffactorsresult,
                                                  "Tensorize")
    return core, Listoffactorsresult
def Tensor_matrixproduct(X,listoffactors):#The parameters are tensors(tensor and matrices)
    
    Res=tl.tensor(np.copy(mxnet_backend.to_numpy(X)))
    
    mode=-1
    for matrix in listoffactors:
        mode=mode+1
        
        Res=tenalg.mode_dot(Res,matrix,mode) 
       
    return Res
def OnlineTensorlearningallblocks(Similaritymatrix, listresponses,
                                  listpredictors, Rank, P, Q, M, K, mu, alpha,
                                  Methodname):
    #listresponses contain the data for two consecutive time samples
    Choleskysimilaritymatrix = np.copy(Similaritymatrix)

    Oldparamtensor = np.zeros((P, Q, M))
    Coretensorsize = [Rank, Rank, Rank]
    rmselist = []
    for m in range(M):

        #pdb.set_trace()
        Oldparamtensor[:, :,
                       m] = np.dot(listresponses[0][:, :, m],
                                   np.linalg.pinv(listpredictors[0][:, :, m]))

    Core, Newloadingmatrices = HOSVD(
        tl.tensor(Oldparamtensor), Coretensorsize
    )  #tucker(tl.tensor(Oldparamtensor),Coretensorsize,init='svd',random_state=1)

    Newparametertensor = Tensor_matrixproduct(
        tl.tensor(Core),
        Operations_listmatrices(Newloadingmatrices, "Tensorize"))
    Oldloadingmatrices = []

    for l in range(len(listresponses) - 1):
        ResponsetensorX = listresponses[l + 1]
        PredictortensorZ = listpredictors[l + 1]
        Oldparamtensor = Newparametertensor
        Oldloadingmatrices = Newloadingmatrices
        print("The block number is")
        print(l)
        Newparametertensor, Newloadingmatrices = OnlineTensorlearningsingleblock(
            Choleskysimilaritymatrix, mxnet_backend.to_numpy(Oldparamtensor),
            Oldloadingmatrices, ResponsetensorX, PredictortensorZ, alpha, M, K,
            Coretensorsize, Methodname)
        rmselist.append(
            RMSE(ResponsetensorX, mxnet_backend.to_numpy(Newparametertensor),
                 PredictortensorZ))

    return mxnet_backend.to_numpy(Newparametertensor), rmselist
Beispiel #19
0
def Dictionary_update(X,G,listofmatrices,Nonnegative,Pold,Qold,setting,alpha,theta,step,max_iter,epsilon,n,t,period,pool):
    #This function is used to perform the gradient descent with line search while enforcing nonnegativity if this option is chosen
    #All the parameters are tensors
    Anew=tl.tensor(listofmatrices[n])
    Aold=tl.tensor(np.zeros(Anew.shape))
    Aresult=tl.tensor(np.zeros(Anew.shape))
    listoffactors=list(listofmatrices) 
    error=Error(X,G,listoffactors,setting,pool)#+(alpha*(1-theta)/2)*np.power(T.norm(Anew,2),2)
    
    error_list=[error]
    #previous_error=0
    nb_iter=0
    while(nb_iter<=max_iter):
         nb_iter=nb_iter+1
         #previous_error=error
         Aold=Anew
         if(Nonnegative==True):
             derivative=derivativeDict(X,G,Aold,listofmatrices,Pold,Qold,setting,alpha,theta,n,t)
             if( (nb_iter%period)==0 or (nb_iter==1) ):
                 a=step/10
                 b=step
                 step=Lineserchstep(X,G,listoffactors,Nonnegative,setting,Aold,n,derivative,a,b,alpha,theta,pool)
                      
             #Anew=np.maximum(Aold-step*derivativeDict(X,G,Aold,listofmatrices,Pold,Qold,setting,alpha,theta,n,t),0)
 
             Anew=tl.tensor(np.maximum(mxnet_backend.to_numpy(Aold-step*derivative),0))
             #Anew=Anew/tl.norm(Anew,2)
             
             #pdb.set_trace()
         if(Nonnegative==False):
             derivative=derivativeDict(X,G,Aold,listofmatrices,Pold,Qold,setting,alpha,theta,n,t)
             if( (nb_iter%period)==0 or (nb_iter==1) ):
                 a=step/10
                 b=step
                 step=Lineserchstep(X,G,listoffactors,Nonnegative,setting,Aold,n,derivative,a,b,alpha,theta,pool)
                      #Lineserchstep(X,G,listoffactors,Nonnegative,setting,A,n,Grad,a,b,alpha,theta,pool)
             Anew=Aold-step*derivativeDict(X,G,Aold,listofmatrices,Pold,Qold,setting,alpha,theta,n,t)
             Anew=Anew/tl.norm(Anew,2) 
             
         #listoffactors[n]=Anew
         #error=T.norm(X-Tensor_matrixproduct(G,listoffactors),2)+alpha*(1-theta)*T.norm(Anew,2)
         #pdb.set_trace()
         error=Error(X,G,listoffactors,setting,pool)#+alpha*(1-theta)*T.norm(Anew,2)
         error_list.append(error)
         Aresult=Anew
         #print(Norm(X,setting,pool))
         #pdb.set_trace()
         #if(np.abs(previous_error-error)/error<epsilon):
         if(error/Norm(X,setting,pool)<epsilon):
             Aresult=Aold
             error_list=error_list[0:len(error_list)-1]
             break         
    return Aresult,error_list,nb_iter 
def GenerateTensorsNonnegative(Numberofexamples,randomseed):
    np.random.seed(randomseed)
    Xtrain=np.random.rand(Numberofexamples,30,40,50)
    Coretensorsize=np.array([Numberofexamples,20,20,20])
    Greal=np.maximum(np.random.normal(loc=0,scale=1,size=Coretensorsize),0)
    #The line below changes
    listoffactorsreal=[np.random.normal(loc=0,scale=1/10,size=(30,20)),np.random.normal(loc=0,scale=1/10,size=(40,20)),np.random.normal(loc=0,scale=1/10,size=(50,20))]         
    listoffactorsreal=Nonnegativepart(listoffactorsreal)
    for n in range(Numberofexamples):
       Xtrain[n,:,:,:]=mxnet_backend.to_numpy(Tensor_matrixproduct(tl.tensor(Greal[n,:,:,:]),Operations_listmatrices(listoffactorsreal,"Tensorize")))     
    #Xtrain=Xtrain/T.norm(T.tensor(Xtrain),2)
    return Xtrain
Beispiel #21
0
def Derivativefeatureproblem(Spectrogram, G, A, B, alpha, theta,
                             Nonnegative):  #The parameters are tensors
    derivative = -np.dot(
        mxnet_backend.to_numpy(A).T,
        mxnet_backend.to_numpy(Spectrogram) -
        np.dot(np.dot(mxnet_backend.to_numpy(A), mxnet_backend.to_numpy(G)),
               mxnet_backend.to_numpy(B).T))
    derivative = np.dot(derivative, mxnet_backend.to_numpy(B))
    if (Nonnegative == True):
        derivative = derivative + alpha * theta * np.ones(G.shape)
    return tl.tensor(derivative)
Beispiel #22
0
def Reconstruction(Originalimage,patchsize,Coretensorsize,alpha,theta,val,pool):
    
    [I,J,K]=np.array(Originalimage.shape,dtype=int)
    Listrestauration=[]
    n0=np.min([I,J])
    Imrestored=np.zeros((I,J,K))
    nbpatches=int(np.floor(n0/patchsize)) 
    Setofpatches=np.zeros((patchsize,patchsize,3,nbpatches))
    slicenumber=0
    mask=Definemasksingleslice(Originalimage,val,slicenumber)
    Setofpatches=Patch_extractionallslices(Originalimage,mask,patchsize,val)
    Xtrain_set=[]
    for l in range(nbpatches):
        Xtrain_set.append(tl.tensor(Setofpatches[:,:,:,l]))
    max_iter=100
    period=3
    Nonnegative=True
 
    epsilon=np.power(10,-3,dtype=float)
    step=np.power(10,-6,dtype=float)
    Setting="Single"
    nbepochs=10
    Reprojectornot=False
    Minibatchsize=[]
    Pre_existingfactors=[]

    penaltylasso=alpha*theta
    Pre_existingG_settrain=[]
    
    for l in range(nbpatches):
        
        Pre_existingG_settrain.append(tl.tensor(np.maximum(np.random.normal(loc=0,scale=1/4,size=Coretensorsize),0)))

    Pre_existingfactors=[tl.tensor(np.maximum(np.random.normal(loc=0,scale=1/4,size=(patchsize,Coretensorsize[0])),0)),tl.tensor(np.maximum(np.random.normal(loc=0,scale=1/4,size=(patchsize,Coretensorsize[1])),0)),tl.tensor(np.maximum(np.random.normal(loc=0,scale=1/4,size=(K,Coretensorsize[2])),0))]
    
    Pre_existingP=[tl.tensor(np.random.normal(loc=0,scale=2,size=(patchsize,Coretensorsize[0]))),tl.tensor(np.random.normal(loc=0,scale=2,size=(patchsize,Coretensorsize[1]))),tl.tensor(np.random.normal(loc=0,scale=2,size=(K,Coretensorsize[2])))]
    
    Pre_existingQ=[tl.tensor(np.random.normal(loc=0,scale=2,size=(Coretensorsize[0],Coretensorsize[0]))),tl.tensor(np.random.normal(loc=0,scale=2,size=(Coretensorsize[1],Coretensorsize[1]))),tl.tensor(np.random.normal(loc=0,scale=2,size=(Coretensorsize[2],Coretensorsize[2])))]
        
 
    Dictionarymatrices,listobjectivefunctionvalues,Objectivefunction_per_epoch=CyclicBlocCoordinateTucker_setWithPredefinedEpochs(Xtrain_set,Coretensorsize,Pre_existingfactors,Pre_existingG_settrain,backendchoice,Pre_existingP,Pre_existingQ,Nonnegative,Reprojectornot,Setting,Minibatchsize,step,alpha,theta,max_iter,epsilon,period,nbepochs,pool)
    
    Dm=list(Dictionarymatrices)
    
    Dictionarymatricesconverted=Operations_listmatrices(Dictionarymatrices,"Arrayconversion")
    
    mask=Definemasksingleslice(Originalimage,val,0)
    for i in range(nbpatches):
        for j in range(nbpatches):
            
            patchmask=mask[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize]#[indx,indy]         

            
            Aux=Originalimage[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize,:]#[indx,indy,:]            
            Aux=np.resize(Aux,np.size(Aux))        
            patchmask=Repetition(patchmask,K)
            Auxmask=np.resize(patchmask,np.size(patchmask))
            Ind=np.where(Auxmask!=val)[0]      #np.nonzero(Auxmask)[0]
            
            yy=Aux[Ind]
            Dictionarymatrix=np.kron(Dictionarymatricesconverted[0],Dictionarymatricesconverted[1])
            Dictionarymatrix=np.kron(Dictionarymatrix,Dictionarymatricesconverted[2])
            Dma=Dictionarymatrix[Ind,:]
            clf=linear_model.Lasso(alpha=penaltylasso,fit_intercept=False,positive=True)#fit_intercept=False
            
            clf.fit(Dma,yy)

            Activationcoeff=np.reshape(clf.coef_,Coretensorsize)
            
            Restore=Tensor_matrixproduct(Activationcoeff,Dm)
            Restore=mxnet_backend.to_numpy(Restore)

            Listrestauration.append(Restore)

    return Imrestored,Listrestauration    
        
#val=255   #value which allows to define the missing pixels
#ratio=0.4 #ratio of missing pixels
#Hyperspectralimg=np.array(Image.open("Lena.png"))
#[W,H,K]=np.array(Hyperspectralimg.shape,dtype=int)
#Hyperspectralimg=Hyperspectralimg+np.maximum(np.random.normal(loc=0,scale=1,size=(W,H,K)),0)/10
#plt.imshow(Hyperspectralimgpixelsdropped)
#plt.show()
#pdb.set_trace()
#patchsize=16
#Rank=16
#alpha=0.001
#theta=0.1
#[width,length,spectralbands]=np.array(Hyperspectralimg.shape,dtype=int)
#Commonsize=np.min(np.array([width,length,spectralbands]))
#Corentensorsize=[int(Rank),int(Rank),int(Rank)]
#pool=Pool(20)
#Imrestored,Listrestauration=Reconstruction(Hyperspectralimgpixelsdropped,patchsize,Corentensorsize,alpha,theta,val,pool)
#pdb.set_trace()
            
            
            
Beispiel #23
0
def CyclicBlocCoordinateTucker_set(X_set,Coretensorsize,Pre_existingfactors,Pre_existingG_set,Pre_existingP,Pre_existingQ,Nonnegative,Reprojectornot,Setting,Minibatchnumber,step,alpha,theta,max_iter,epsilon,period,pool):#All the parameters are arrays,
    #This function is used to perform the online setting either for single or minibatch processing
    if(Nonnegative==True):
        Positivity=TestPositivity(X_set)
        if(Positivity==False):
            raise Exception("You decide to perform a nonnegative decomposition while some of your tensors present negative entries")
    
    L=len(X_set)
    
    Inferredfactorsresult=list(Pre_existingfactors)
    listofactorsfactorsinit=[]
    if(Setting=="Single"):
        for t in range(L):
           print("We are in single online")
           print("The tensor processed is")
           print(t)
           X=X_set[t]
           Pre_existingG=Pre_existingG_set[t]
           listofactorsfactorsinit=Inferredfactorsresult
           Gresult,Inferredfactorsresult=CyclicBlocCoordinateTucker_single(X,Coretensorsize,listofactorsfactorsinit,Pre_existingG,Pre_existingP,Pre_existingQ,Nonnegative,Setting,t+1,step,alpha,theta,max_iter,epsilon,period,pool)
#           print("The tensor processed is")
#           print(t)
#           print(Nonnegative)
#           print("The values ofnthe factors are")
#           
#           print(Inferredfactorsresult[0])
#           print(Inferredfactorsresult[1])
#           print(Inferredfactorsresult[2])
#           print(np.sum(mxnet_backend.to_numpy(Inferredfactorsresult[0])))
#           print(np.sum(mxnet_backend.to_numpy(Inferredfactorsresult[1])))
#           print(np.sum(mxnet_backend.to_numpy((Inferredfactorsresult[2]))))
           
           N=len(list(X.shape))  
           Pre_existingG=np.copy(Gresult)
           G=tl.tensor(Gresult)
           for n in range(N):
             listoffactors=list(Inferredfactorsresult)
             
             listoffactors[n]=np.identity(X.shape[n]) 
            
             WidehatX=Tensor_matrixproduct(X,Operations_listmatrices(listoffactors,"Transpose"))       
             listoffactors[n]=tl.tensor(np.identity(G.shape[n]))
             B=Tensor_matrixproduct(tl.tensor(G),listoffactors) 
             
             Pre_existingP[n]=Pre_existingP[n]+np.dot(mxnet_backend.to_numpy(unfold(WidehatX,n)),mxnet_backend.to_numpy(unfold(G,n)).T)
             
             Pre_existingQ[n]=Pre_existingQ[n]+np.dot(mxnet_backend.to_numpy(unfold(B,n)),mxnet_backend.to_numpy(unfold(B,n).T))             
        
        if(Reprojectornot==True):
            Gresult=[]
            for t in range(L):
              X=X_set[t]
              G_init=Pre_existingG_set[t]
              G=Sparse_coding(tl.tensor(X),tl.tensor(G_init),Operations_listmatrices(Inferredfactorsresult,"Tensorize"),Nonnegative,Setting,step,max_iter,alpha,theta,epsilon,pool)
              Gresult.append(G)
                   
            return Gresult,Inferredfactorsresult
        if(Reprojectornot==False):
            return Inferredfactorsresult
    
    
    if(Setting=="MiniBatch"):
        X_setdivided=SplitToMinibatch(X_set,Minibatchnumber)
        Pre_existingGsetdivided=SplitToMinibatch(Pre_existingG_set,Minibatchnumber)
        Pre_existingPold=[]
        Pre_existingPnew=list(Pre_existingP)
        Pre_existingQold=[]
        Pre_existingQnew=list(Pre_existingQ)
        #for mininb in range(Minibatchnumber):
        for mininb in range(len(Minibatchnumber)):
            print("We are minibatch")
            print("The minibatch processed is")
            print(mininb)
            X_minibatch=X_setdivided[mininb]
            Pre_existingG_minibatch=Pre_existingGsetdivided[mininb]
            Pre_existingPold=Pre_existingPnew
            Pre_existingQold=Pre_existingQnew            
            print("Point I")            
            Gresult,Inferredfactorsresult=CyclicBlocCoordinateTucker_single(X_minibatch,Coretensorsize,Pre_existingfactors,Pre_existingG_minibatch,Pre_existingPold,Pre_existingQold,Nonnegative,Setting,mininb+1,step,alpha,theta,max_iter,epsilon,period,pool)
            print("Point II")            
            if(mininb!=len(Minibatchnumber)-1):
               X_minibatchold=Operations_listmatrices(X_setdivided[mininb],"Tensorize")
               N=len(list(X_minibatchold[0].shape))
               Inferredfactorsresult=Operations_listmatrices(Inferredfactorsresult,"Tensorize")
               minibatchsize=len(X_minibatchold)
             
               Gactivationcoeff=list(Gresult)
               
               for n in range(N): 
                   for r in range(minibatchsize):                              
                     X=X_minibatchold[r]
                     G=Gactivationcoeff[r]
                     listoffactors=list(Inferredfactorsresult)
                     
                     listoffactors[n]=np.identity(X.shape[n]) 
                      
                     WidehatX=Tensor_matrixproduct(tl.tensor(X),Operations_listmatrices(listoffactors,"Transpose"))       
                     listoffactors[n]=np.identity(G.shape[n])
             
                     B=Tensor_matrixproduct(tl.tensor(G),Operations_listmatrices(listoffactors,"Tensorize"))        
                     Pre_existingPnew[n]=Pre_existingPold[n]+np.dot(mxnet_backend.to_numpy(unfold(WidehatX,n)),mxnet_backend.to_numpy(unfold(G,n)).T)       
                     Pre_existingQnew[n]=Pre_existingQold[n]+np.dot(mxnet_backend.to_numpy(unfold(B,n)),mxnet_backend.to_numpy(unfold(B,n)).T) 
                   

        if(Reprojectornot==True):
            
            for mininb in range(len(Minibatchnumber)):
               X_minibatch=Operations_listmatrices(X_setdivided[mininb],"Tensorize")
               G_init=Operations_listmatrices(Pre_existingGsetdivided[mininb],"Tensorize")
               
               G=Sparse_coding(X_minibatch,G_init,Inferredfactorsresult,Nonnegative,Setting,step,max_iter,alpha,theta,epsilon,pool)
           
               for activation_coeff in G:
              
                  Gresult.append(activation_coeff)
             
            return Gresult,Inferredfactorsresult
        if(Reprojectornot==False):
            return Inferredfactorsresult
def Proximal_operator(X,step):#The parameter is a tensor
    Res=np.copy(mxnet_backend.to_numpy(X))
    Res=np.sign(Res)*np.maximum(np.abs(Res)-step,0)
    return tl.tensor(Res)
def derivativeDict(X, G, A, listofmatrices, Pold, Qold, setting, alpha, theta,
                   n, t):
    #The function is used to compute the derivative of the objective function with respect the nth dictionary matrix
    #The parameters are tensors
    listoffactors = list(listofmatrices)
    #Pnew=T.tensor(np.copy(mxnet_backend.to_numpy(Pold)))
    #Qnew=T.tensor(np.copy(mxnet_backend.to_numpy(Qold)))
    Pnew = tl.tensor(Pold)
    Qnew = tl.tensor(Qold)

    if (setting == "Single"):
        listoffactors[n] = np.identity(X.shape[n])
        WidehatX = Tensor_matrixproduct(
            X, Operations_listmatrices(listoffactors, "Transpose"))
        listoffactors[n] = tl.tensor(np.identity(G.shape[n]))
        B = Tensor_matrixproduct(G, listoffactors)
        #B=unfold(Offset(G),mode)
        #pdb.set_trace()

        Pnew = Pnew + tl.tensor(
            np.dot(mxnet_backend.to_numpy(unfold(WidehatX, n)),
                   mxnet_backend.to_numpy(unfold(G, n)).T))
        Qnew = Qnew + tl.tensor(
            np.dot(mxnet_backend.to_numpy(unfold(B, n)),
                   mxnet_backend.to_numpy(unfold(B, n)).T))

        Res = -Pnew / t + tl.tensor(
            np.dot(mxnet_backend.to_numpy(A), mxnet_backend.to_numpy(Qnew)) /
            t) + alpha * (1 - theta) * A

        return Res

    if (setting == "MiniBatch"):

        rho = len(X)
        for r in range(rho):
            listoffactors[n] = tl.tensor(np.identity(X[r].shape[n]))

            WidehatX = Tensor_matrixproduct(
                X[r], Operations_listmatrices(listoffactors, "Transpose"))

            Pnew = Pnew + tl.tensor(
                np.dot(mxnet_backend.to_numpy(unfold(WidehatX, n)),
                       mxnet_backend.to_numpy(unfold(G[r], n)).T))

            listoffactors[n] = tl.tensor(np.identity(G[r].shape[n]))

            B = Tensor_matrixproduct(G[r], listoffactors)

            Qnew = Qnew + tl.tensor(
                np.dot(mxnet_backend.to_numpy(unfold(B, n)),
                       mxnet_backend.to_numpy(unfold(B, n).T)))

        Res = -Pnew / t + tl.tensor(
            np.dot(mxnet_backend.to_numpy(A), mxnet_backend.to_numpy(Qnew)) /
            t) + alpha * (1 - theta) * A
        return Res
def Reconstruction(Originalimage,patchsize,Rank,alpha,theta,val,pool):
    
    [I,J]=np.array(Originalimage.shape,dtype=int)
    Listrestauration=[]
    n0=np.min([I,J])
    Imrestored=np.zeros((I,J))
    #nbx=int(np.floor(n0/patchsize))
    nbpatches=int(np.floor(n0/patchsize)) 
    Setofpatches=np.zeros((patchsize,patchsize,nbpatches))
    mask=Definemasksingleslice2d(Originalimage,val)
    Setofpatches=Patch_extractionallslices2d(Originalimage,mask,patchsize,val)
    Xtrain=np.zeros((nbpatches,patchsize,patchsize))
    for l in range(nbpatches):
        Xtrain[l,:,:]=tl.tensor(Setofpatches[:,:,l])
    max_iter=500#0
    period=3
    Nonnegative=False
 
    epsilon=np.power(10,-3,dtype=float)
    step=np.power(10,-5,dtype=float)#np.power(10,-5,dtype=float)
    Setting="Single"
    nbepochs=3
    Reprojectornot=False
    Minibatchsize=[]
    Pre_existingfactors=[]
    Coretensorsize=[Rank,Rank]
    penaltylasso=alpha*theta
    Ginittrain=np.zeros((nbpatches,Coretensorsize[0],Coretensorsize[1]))
    
    for l in range(nbpatches):
        
        Ginittrain[l,:,:]=tl.tensor(np.maximum(np.random.normal(loc=0,scale=1,size=Coretensorsize),0))

    
    listoffactorsinit=[tl.tensor(np.identity(nbpatches)),tl.tensor(np.maximum(np.random.normal(loc=0,scale=1,size=(patchsize,Coretensorsize[0])),0)),tl.tensor(np.maximum(np.random.normal(loc=0,scale=1,size=(patchsize,Coretensorsize[1])),0))]
    
    start_timetraining=time.clock()
    
    Dictionarymatrices,errorlist,listobjectivefunctionvalues,nbiter=TuckerBatch(Xtrain,[nbpatches,Coretensorsize[0],Coretensorsize[1]],max_iter,listoffactorsinit,Ginittrain,Nonnegative,backendchoice,Reprojectornot,alpha,theta,step,epsilon,pool)                                        
    
    #Dictionarymatrices,errorlist,listobjectivefunctionlist,Objectivefnction_per_epoch,Time_per_epoch,nbiter=TuckerBatch(Xtrain,[nbpatches,Coretensorsize[0],Coretensorsize[1]],max_iter,listoffactorsinit,Ginittrain,Nonnegative,backendchoice,Reprojectornot,alpha,theta,step,epsilon,pool)
    #pdb.set_trace()
    
    
    end_timetraining=time.clock()
    Runningtime=end_timetraining-start_timetraining
    print("The running time")
    print(Runningtime)
    pdb.set_trace()
    #adress='/Users/Traoreabraham/Desktop/OnlineTensorDictionaryLearning/Hyperspecimpainting/Objectivefunctions/TuckerObjectivefunction'+str(Rank)   
    
    #adress='/home/scr/etu/sil821/traorabr/ImageInpainting/Objectivefunctions/TuckerObjectivefunction'+str(Rank)
    #np.savez_compressed(adress,Objectivefunction=listobjectivefunctionlist,Objectivefnctionperepoch=Objectivefnction_per_epoch,Timeperepoch=Time_per_epoch)    
    #pdb.set_trace()
    
    
    Dm=list(Dictionarymatrices[1:3])
    
    Dictionarymatricesconverted=Operations_listmatrices(Dictionarymatrices[1:3],"Arrayconversion")
    
    mask=Definemasksingleslice2d(Originalimage,val)
    #plt.imshow(mask)
    #plt.show()
    #pdb.set_trace()
    for i in range(nbpatches):
        for j in range(nbpatches):
            #indx=np.array(np.linspace(i*patchsize+1,(i+1)*patchsize,patchsize),dtype=int)
            #indy=np.array(np.linspace(j*patchsize+1,(j+1)*patchsize,patchsize),dtype=int)
            
            
            #pdb.set_trace()
            #patchmask=mask[i*patchsize+1:(i+1)*patchsize,j*patchsize+1:(j+1)*patchsize]#[indx,indy]         
            #Aux=Originalimage[i*patchsize+1:(i+1)*patchsize,j*patchsize+1:(j+1)*patchsize,:]#[indx,indy,:]            
            patchmask=mask[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize]#[indx,indy]         
            #plt.imshow(patchmask)
            #plt.show()
            #print(patchmask)
            
            Aux=Originalimage[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize]#[indx,indy,:]            
            Aux=np.resize(Aux,np.size(Aux))        
            #patchmask=Repetition(patchmask,K)
            Auxmask=np.resize(patchmask,np.size(patchmask))
            Ind=np.where(Auxmask!=val)[0]      #np.nonzero(Auxmask)[0]
            
            yy=Aux[Ind]
            Dictionarymatrix=np.kron(Dictionarymatricesconverted[0],Dictionarymatricesconverted[1])
            Dma=Dictionarymatrix[Ind,:]
            clf=linear_model.Lasso(alpha=penaltylasso,fit_intercept=False,positive=True)#fit_intercept=False
            #print(Dma.shape)
            #print(yy.shape)
            clf.fit(Dma,yy)
            #print(clf.coef_.shape)
            #pdb.set_trace()
         
            Activationcoeff=np.reshape(clf.coef_,(Rank,Rank))
            
            Restore=Tensor_matrixproduct(Activationcoeff,Dm)
            Restore=mxnet_backend.to_numpy(Restore)
            #plt.imshow(Originalimage[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize,:])
            #plt.show()
            #plt.imshow(Restore)
            #plt.show()
            
            Listrestauration.append(Restore)
            #plt.imshow(Restore)
            #plt.show()
            #plt.imshow(Originalimage[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize,:])
            #plt.show()
            #pdb.set_trace()
            #print(np.argwhere(Restore==0))
            #print(i*patchsize)
            #print((i+1)*patchsize)
            #print(j*patchsize)
            #print((j+1)*patchsize)
            Imrestored[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize]=Restore
            #print(Restore)            
            #pdb.set_trace()
            #print("The values of i and j are")
            #print(i,j)
    
    return Imrestored,Listrestauration,Runningtime
def RobustsubspaceLearning_Single(X, Pre_existingprojectionmatrices,
                                  Pre_existingenergymatrices, Pre_existingmean,
                                  beta, alpha, p):  #All parameters are arrays
    Tensor = tl.tensor(X)
    listoffactors = list(Pre_existingprojectionmatrices)
    listoffactors = Operations_listmatrices(listoffactors, "Tensorize")
    Energymatrices = list(Pre_existingenergymatrices)
    Mean = np.copy(Pre_existingmean)
    N = len(list(Tensor.shape))
    R = Tensor - Tensor_matrixproduct(
        X, Operations_listmatrices(listoffactors, "Transposetimes"))
    Weightmatriceslist = []
    for n in range(N):
        Eigenvalue = np.linalg.eig(Energymatrices[n])[0]
        U = listoffactors[n]
        [I, J] = np.array(U.shape, dtype=int)
        Xn = unfold(Tensor, mode=n)
        [In, Jn] = np.array(Xn.shape, dtype=int)
        Weightmatrix = np.zeros((In, Jn))
        Sigma = np.zeros((In, Jn))
        for i in range(In):
            for j in range(Jn):
                Sigma[i, j] = np.max(
                    np.multiply(np.sqrt(np.abs(Eigenvalue[1:p])),
                                mxnet_backend.to_numpy(U[1:p, i])))
        k = beta * Sigma
        if (n == 1):
            R = R.T
        for i in range(In):
            for j in range(Jn):

                #Weightmatrix[i,j]=1/(1+np.power(mxnet_backend.to_numpy(R[i,j])/np.maximum(k[i,j],0.001),2)):#This was the initial line
                Weightmatrix[i, j] = 1 / (
                    1 + np.power(R[i, j] / np.maximum(k[i, j], 0.001), 2))
        Weightmatriceslist.append(Weightmatrix)
    W = np.minimum(Weightmatriceslist[0], Weightmatriceslist[1].T)

    WeightTensor = tl.tensor(
        np.multiply(np.sqrt(mxnet_backend.to_numpy(W)),
                    mxnet_backend.to_numpy(Tensor)))
    Mean = alpha * Mean + (1 - alpha) * mxnet_backend.to_numpy(WeightTensor)
    Projectionmatricesresult = []
    Energymatreicesresult = []
    for n in range(N):
        Xn = unfold(WeightTensor, mode=n)
        Covariancematrix = np.dot(
            np.dot(
                mxnet_backend.to_numpy(listoffactors[n]).T, Energymatrices[n]),
            mxnet_backend.to_numpy(listoffactors[n]))
        Covariancematrix = alpha * Covariancematrix + (1 - alpha) * np.dot(
            mxnet_backend.to_numpy(Xn),
            mxnet_backend.to_numpy(Xn).T)
        [Un, diagn, V] = np.linalg.svd(Covariancematrix)

        diagn = diagn / np.power(tl.norm(Xn, 2), 2)
        indices = np.argsort(diagn)
        indices = np.flip(indices, axis=0)

        [J, I] = np.array(listoffactors[n].shape, dtype=int)
        Unew = np.zeros((J, I))
        for j in range(J):
            Unew[j, :] = Un[indices[j], :]
        Sn = np.diag(diagn)
        Projectionmatricesresult.append(Unew)
        Energymatreicesresult.append(Sn)
    return Projectionmatricesresult, Energymatreicesresult, Mean, WeightTensor
def Reconstruction2d(Originalimage, patchsize, Coretensorsize, alpha, theta,
                     val, pool):

    [I, J] = np.array(Originalimage.shape, dtype=int)
    Listrestauration = []
    n0 = np.min([I, J])
    Imrestored = np.zeros((I, J))
    #nbx=int(np.floor(n0/patchsize))
    nbpatches = int(np.floor(n0 / patchsize))
    Setofpatches = np.zeros((patchsize, patchsize, nbpatches))
    mask = Definemasksingleslice2d(Originalimage, val)
    Setofpatches = Patch_extractionallslices2d(Originalimage, mask, patchsize,
                                               val)
    Xtrain_set = []
    for l in range(nbpatches):
        Xtrain_set.append(tl.tensor(Setofpatches[:, :, l]))
    max_iter = 100
    period = 3
    Nonnegative = True

    epsilon = np.power(10, -3, dtype=float)
    step = np.power(10, -5, dtype=float)  #np.power(10,-6,dtype=float)
    Setting = "Single"
    nbepochs = 1  #5
    Reprojectornot = False
    Minibatchsize = []
    Pre_existingfactors = []
    #Coretensorsize=[Rank,Rank,Rank]
    penaltylasso = alpha * theta
    Pre_existingG_settrain = []

    for l in range(nbpatches):

        Pre_existingG_settrain.append(
            tl.tensor(
                np.maximum(
                    np.random.normal(loc=0, scale=1, size=Coretensorsize), 0)))

    Pre_existingfactors = [
        tl.tensor(
            np.maximum(
                np.random.normal(loc=0,
                                 scale=1,
                                 size=(patchsize, Coretensorsize[0])), 0)),
        tl.tensor(
            np.maximum(
                np.random.normal(loc=0,
                                 scale=1,
                                 size=(patchsize, Coretensorsize[1])), 0))
    ]

    Pre_existingP = [
        tl.tensor(
            np.random.normal(loc=0,
                             scale=1,
                             size=(patchsize, Coretensorsize[0]))),
        tl.tensor(
            np.random.normal(loc=0,
                             scale=1,
                             size=(patchsize, Coretensorsize[1])))
    ]

    Pre_existingQ = [
        tl.tensor(
            np.random.normal(loc=0,
                             scale=1,
                             size=(Coretensorsize[0], Coretensorsize[0]))),
        tl.tensor(
            np.random.normal(loc=0,
                             scale=1,
                             size=(Coretensorsize[1], Coretensorsize[1])))
    ]

    #Dictionarymatrices,listobjectivefunctionvalues,Objectivefunction_per_epoch=CyclicBlocCoordinateTucker_setWithPredefinedEpochs(Xtrain_set,Coretensorsize,Pre_existingfactors,Pre_existingG_settrain,backendchoice,Pre_existingP,Pre_existingQ,Nonnegative,Reprojectornot,Setting,Minibatchsize,step,alpha,theta,max_iter,epsilon,period,nbepochs,pool)
    Starttime = time.time()
    Gresult4, Dictionarymatrices = ALTO_setWithpredefinedEpochs(
        Xtrain_set, Coretensorsize, Pre_existingfactors, 5, pool, 1, nbepochs)
    Endtime = time.time()
    Runningtime = Endtime - Starttime
    print("The running time is")
    print(Runningtime)
    pdb.set_trace()
    Dm = list(Dictionarymatrices)

    Dictionarymatricesconverted = Operations_listmatrices(
        Dictionarymatrices, "Arrayconversion")

    mask = Definemasksingleslice2d(Originalimage, val)
    #plt.imshow(mask)
    #plt.show()
    #pdb.set_trace()
    for i in range(nbpatches):
        for j in range(nbpatches):
            #indx=np.array(np.linspace(i*patchsize+1,(i+1)*patchsize,patchsize),dtype=int)
            #indy=np.array(np.linspace(j*patchsize+1,(j+1)*patchsize,patchsize),dtype=int)

            #pdb.set_trace()
            #patchmask=mask[i*patchsize+1:(i+1)*patchsize,j*patchsize+1:(j+1)*patchsize]#[indx,indy]
            #Aux=Originalimage[i*patchsize+1:(i+1)*patchsize,j*patchsize+1:(j+1)*patchsize,:]#[indx,indy,:]
            patchmask = mask[i * patchsize:(i + 1) * patchsize,
                             j * patchsize:(j + 1) * patchsize]  #[indx,indy]
            #plt.imshow(patchmask)
            #plt.show()
            #print(patchmask)

            Aux = Originalimage[i * patchsize:(i + 1) * patchsize, j *
                                patchsize:(j + 1) * patchsize]  #[indx,indy,:]
            Aux = np.resize(Aux, np.size(Aux))
            #patchmask=Repetition(patchmask,K)
            Auxmask = np.resize(patchmask, np.size(patchmask))
            Ind = np.where(Auxmask != val)[0]  #np.nonzero(Auxmask)[0]

            yy = Aux[Ind]
            Dictionarymatrix = np.kron(Dictionarymatricesconverted[0],
                                       Dictionarymatricesconverted[1])
            Dma = Dictionarymatrix[Ind, :]
            clf = linear_model.Lasso(alpha=penaltylasso,
                                     fit_intercept=False,
                                     positive=False)  #fit_intercept=False
            #print(Dma.shape)
            #print(yy.shape)
            clf.fit(Dma, yy)
            #print(clf.coef_.shape)
            #pdb.set_trace()
            Activationcoeff = np.reshape(clf.coef_, (Rank, Rank))

            Restore = Tensor_matrixproduct(Activationcoeff, Dm)
            Restore = mxnet_backend.to_numpy(Restore)
            #plt.imshow(Originalimage[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize,:])
            #plt.show()
            #plt.imshow(Restore)
            #plt.show()

            Listrestauration.append(Restore)
            #plt.imshow(Restore)
            #plt.show()
            #plt.imshow(Originalimage[i*patchsize:(i+1)*patchsize,j*patchsize:(j+1)*patchsize,:])
            #plt.show()
            #pdb.set_trace()
            #print(np.argwhere(Restore==0))
            #print(i*patchsize)
            #print((i+1)*patchsize)
            #print(j*patchsize)
            #print((j+1)*patchsize)
            Imrestored[i * patchsize:(i + 1) * patchsize,
                       j * patchsize:(j + 1) * patchsize] = Restore
            #print(Restore)
            #pdb.set_trace()
            #print("The values of i and j are")
            #print(i,j)
    return Imrestored, Listrestauration