Esempio n. 1
0
 def Get_RHS(self,Regu):
     grad = []
     inter = self.tenpy.TTTP(self.Omega, self.A)
     ctf.Sparse_exp(inter)
     ctf.Sparse_add(inter,self.T,alpha=-1)
     #inter = self.T - inter
     for i in range(len(self.A)):
         lst_mat = self.A[:]
         lst_mat[i] = self.tenpy.zeros(self.A[i].shape)
         self.tenpy.MTTKRP(inter,lst_mat,i)
         grad.append(lst_mat[i]-Regu*self.A[i])
     ctf.Sparse_add(inter,self.T,alpha=-1)
     return grad,inter
Esempio n. 2
0
    def Get_RHS(self, num, regu):
        #The gradient of the loss function is Mttkrp(e^m - x) ............... Need negative of this
        M = self.tenpy.TTTP(self.Omega, self.A)
        ctf.Sparse_exp(M)
        #inter = subtract_sparse(self.T,M)
        ctf.Sparse_add(M, self.T, alpha=-1)

        #inter = self.T - M
        lst_mat = []
        for j in range(len(self.A)):
            if j != num:
                lst_mat.append(self.A[j])
            else:
                lst_mat.append(self.tenpy.zeros(self.A[num].shape))

        self.tenpy.MTTKRP(M, lst_mat, num)
        grad = lst_mat[num] - regu * self.A[num]
        ctf.Sparse_add(M, self.T, alpha=-1)
        #self.tenpy.printf("The norm of gradient is ",self.tenpy.vecnorm(grad))
        return [grad, M]
Esempio n. 3
0
    def Get_RHS(self, num, regu):
        #The gradient of the loss function is Mttkrp(e^m - x) ............... Need negative of this
        Omega_ = self.sampled_T.copy()
        ctf.get_index_tensor(Omega_)
        M = self.tenpy.TTTP(Omega_, self.A)
        #inter = elementwise_exp(self.tenpy.TTTP(getOmega(self.sampled_T),self.A))
        ctf.Sparse_exp(M)
        ctf.Sparse_add(M, self.sampled_T, alpha=-1)
        #inter = subtract_sparse(self.sampled_T,inter)
        lst_mat = []
        for j in range(len(self.A)):
            if j != num:
                lst_mat.append(self.A[j])
            else:
                lst_mat.append(self.tenpy.zeros(self.A[num].shape))

        self.tenpy.MTTKRP(M, lst_mat, num)
        #inter.set_zero()
        grad = lst_mat[num] - regu * self.A[num]

        #self.tenpy.printf("The norm of gradient is ",self.tenpy.vecnorm(grad))
        return grad
Esempio n. 4
0
def getPCPGN(tenpy, T_in, T, O, X, reg_GN, num_iter_GN,tol,csv_file):
    opt = Poisson_CP_GN_Completer(tenpy, T_in, O, X)
    if tenpy.name() == 'ctf':
        nnz_tot = T_in.nnz_tot
    else:
        nnz_tot = np.sum(O)
    regu = reg_GN
    tenpy.printf("--------------------------------Poisson GN WIth  CG-----------------------------")
    t_ALS = ctf.timer_epoch("Poisson_GN")
    start= time.time()
    # T_in = backend.einsum('ijk,ijk->ijk',T,O)
    it = 0
    time_all = 0
    P = T_in.copy()

    ctf.Sparse_log(P)
    ctf.Sparse_mul(P,T_in)
    ctf.Sparse_add(P,T_in,beta=-1)
    val2 = ctf.sum(P)
    #val2 = ctf.sum(subtract_sparse(elementwise_prod(T_in,elementwise_log(T_in)),T_in))
    M = tenpy.TTTP(O,X)
        #val = ctf.sum(subtract_sparse(ctf.exp(M),elementwise_prod(T_in,M) ))

    P = M.copy()
    ctf.Sparse_mul(P,T_in)
    ctf.Sparse_exp(M)
    #rmse_lsq =  tenpy.vecnorm(T_in-M)/(nnz_tot)**0.5
    #tenpy.printf("least square RMSE is",rmse_lsq)

    ctf.Sparse_add(M,P,beta=-1)
    val = ctf.sum(M)
    P.set_zero()
    M.set_zero()
    rmse = (val+val2)/nnz_tot
    P.set_zero()
    if tenpy.is_master_proc():
            tenpy.printf("After " + str(it) + " iterations,")
            tenpy.printf("RMSE is",rmse)
    if csv_file is not None:
        csv_writer = csv.writer(
            csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
    
    for i in range(num_iter_GN):
        it+=1
        s = time.time()
        t_ALS.begin()
        X = opt.step(regu)
        t_ALS.end()
        e = time.time()
        time_all+= e- s
        #rmse = tenpy.vecnorm(tenpy.TTTP(O,[U,V,W])-T_in)/(nnz_tot)**0.5
        M = tenpy.TTTP(O,X)
        #val = ctf.sum(subtract_sparse(ctf.exp(M),elementwise_prod(T_in,M) ))

        P = M.copy()
        ctf.Sparse_mul(P,T_in)
        ctf.Sparse_exp(M)
        #rmse_lsq =  tenpy.vecnorm(T_in-M)/(nnz_tot)**0.5
        #tenpy.printf("least square RMSE is",rmse_lsq)

        ctf.Sparse_add(M,P,beta=-1)
        val = ctf.sum(M)
        P.set_zero()
        M.set_zero()
        rmse = (val+val2)/nnz_tot
        regu = regu/2
        if tenpy.is_master_proc():
            tenpy.printf("After " + str(it) + " iterations,")
            tenpy.printf("RMSE is",rmse)
            #print("Full Tensor Objective",(tenpy.norm(tenpy.einsum('ir,jr,kr->ijk',U,V,W)-T)))
            if csv_file is not None:
                csv_writer.writerow([i,time_all , rmse, i,'PGN'])
                csv_file.flush()
            if abs(rmse) < tol:
                tenpy.printf("Ending algo due to tolerance")
                break
    
    end= time.time()
    end= time.time()

    tenpy.printf('Poisson_GN time taken is ',end - start)
    
    return X
Esempio n. 5
0
def sgd_poisson(tenpy, T_in, T, O, U, V, W, reg_als, I, J, K, R, num_iter_als,
                tol, csv_file):
    step_size = 0.03
    opt = Poisson_sgd_Completer(tenpy, T_in, O, [U, V, W], step_size)
    #if T_in.sp == True:
    #    nnz_tot = T_in.nnz_tot
    #else:
    #    nnz_tot = ctf.sum(omega)
    if tenpy.name() == 'ctf':
        nnz_tot = T_in.nnz_tot
    else:
        nnz_tot = np.sum(O)
    t_ALS = ctf.timer_epoch("poisson_sgd")

    regu = reg_als
    tenpy.printf(
        "--------------------------------Poisson_sgd-----------------------")
    start = time.time()
    # T_in = backend.einsum('ijk,ijk->ijk',T,O)
    it = 0
    time_all = 0

    #val2 = ctf.sum(subtract_sparse(elementwise_prod(T_in,elementwise_log(T_in)),T_in))
    P = T_in.copy()

    ctf.Sparse_log(P)
    ctf.Sparse_mul(P, T_in)
    ctf.Sparse_add(P, T_in, beta=-1)
    val2 = ctf.sum(P)
    P.set_zero()

    if csv_file is not None:
        csv_writer = csv.writer(csv_file,
                                delimiter=',',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)

    for i in range(num_iter_als):
        it += 1
        s = time.time()
        #t_ALS.begin()
        [U, V, W] = opt.step(regu)
        #t_ALS.end()
        e = time.time()
        time_all += e - s
        #rmse = tenpy.vecnorm(tenpy.TTTP(O,[U,V,W])-T_in)/(nnz_tot)**0.5
        if it % 20 == 0:
            M = tenpy.TTTP(O, [U, V, W])
            #val = ctf.sum(subtract_sparse(ctf.exp(M),elementwise_prod(T_in,M) ))
            P = M.copy()
            ctf.Sparse_mul(P, T_in)
            ctf.Sparse_exp(M)

            ctf.Sparse_add(M, P, beta=-1)
            val = ctf.sum(M)
            P.set_zero()
            M.set_zero()
            rmse = (val + val2) / nnz_tot
            if tenpy.is_master_proc():
                tenpy.printf("After " + str(it) + " iterations, and time is",
                             time_all)
                tenpy.printf("RMSE is", rmse)
                #print("Full Tensor Objective",(tenpy.norm(tenpy.einsum('ir,jr,kr->ijk',U,V,W)-T)))
                if csv_file is not None:
                    csv_writer.writerow([i, time_all, rmse, i, 'PALS'])
                    csv_file.flush()
                if abs(rmse) < tol:
                    tenpy.printf("Ending algo due to tolerance")
                    break

    end = time.time()

    tenpy.printf('Poisson sgd time taken is ', end - start)

    return [U, V, W]