def Get_RHS(self,Regu): grad = [] inter = self.tenpy.TTTP(self.Omega, self.A) ctf.Sparse_exp(inter) ctf.Sparse_add(inter,self.T,alpha=-1) #inter = self.T - inter for i in range(len(self.A)): lst_mat = self.A[:] lst_mat[i] = self.tenpy.zeros(self.A[i].shape) self.tenpy.MTTKRP(inter,lst_mat,i) grad.append(lst_mat[i]-Regu*self.A[i]) ctf.Sparse_add(inter,self.T,alpha=-1) return grad,inter
def matvec(self, regu, delta): N = len(self.A) ret = [] lst_mat = self.A[:] lst_mat[0] = delta[0].copy() inter = self.tenpy.TTTP(self.Omega, lst_mat) for n in range(1, N): lst_mat = self.A[:] lst_mat[n] = delta[n].copy() M = self.tenpy.TTTP(self.Omega, lst_mat) ctf.Sparse_add(inter, M) lst_mat = self.A[:] lst_mat[0] = self.tenpy.zeros(self.A[0].shape) self.tenpy.MTTKRP(inter, lst_mat, 0) ret.append(self.tenpy.zeros(self.A[0].shape)) ret[0] += lst_mat[0] ret[0] += regu * delta[0] for n in range(1, N): ret.append(self.tenpy.zeros(self.A[n].shape)) lst_mat = self.A[:] lst_mat[n] = self.tenpy.zeros(self.A[n].shape) self.tenpy.MTTKRP(inter, lst_mat, n) ret[n] += lst_mat[n] ret[n] += regu * delta[n] return ret
def Get_Num(self, num, r, regu, M): #The gradient of the loss function is Mttkrp(e^m - x) ............... Need negative of this lst_mat = [] for j in range(len(self.A)): lst_mat.append(self.A[j][:, r]) #inter = subtract_sparse(self.T,M) ctf.Sparse_add(M, self.T, alpha=-1) lst_mat[num] = self.tenpy.zeros(self.A[num].shape[0]) self.tenpy.MTTKRP(M, lst_mat, num) grad = lst_mat[num] - regu * self.A[num][:, r] ctf.Sparse_add(M, self.T, alpha=-1) #self.tenpy.printf("The norm of gradient is ",self.tenpy.norm(grad)) return grad
def Get_RHS(self, num, regu): #The gradient of the loss function is Mttkrp(e^m - x) ............... Need negative of this M = self.tenpy.TTTP(self.Omega, self.A) ctf.Sparse_exp(M) #inter = subtract_sparse(self.T,M) ctf.Sparse_add(M, self.T, alpha=-1) #inter = self.T - M lst_mat = [] for j in range(len(self.A)): if j != num: lst_mat.append(self.A[j]) else: lst_mat.append(self.tenpy.zeros(self.A[num].shape)) self.tenpy.MTTKRP(M, lst_mat, num) grad = lst_mat[num] - regu * self.A[num] ctf.Sparse_add(M, self.T, alpha=-1) #self.tenpy.printf("The norm of gradient is ",self.tenpy.vecnorm(grad)) return [grad, M]
def ccd(tenpy, T_in, T, O, X, reg_als, num_iter_als, tol, csv_file): opt = ccd_Completer(tenpy, T_in, O, X) #if T_in.sp == True: # nnz_tot = T_in.nnz_tot #else: # nnz_tot = ctf.sum(omega) if tenpy.name() == 'ctf': nnz_tot = T_in.nnz_tot else: nnz_tot = np.sum(O) t_ccd = ctf.timer_epoch("ccd") regu = reg_als tenpy.printf("--------------------------------ccd-----------------------") start = time.time() # T_in = backend.einsum('ijk,ijk->ijk',T,O) it = 0 time_all = 0 if csv_file is not None: csv_writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for i in range(num_iter_als): it += 1 s = time.time() t_ccd.begin() X = opt.step(regu) t_ccd.end() e = time.time() time_all += e - s M = tenpy.TTTP(O, X) ctf.Sparse_add(M, T_in, beta=-1) rmse = tenpy.vecnorm(M) / (nnz_tot)**0.5 M.set_zero() if tenpy.is_master_proc(): tenpy.printf("After " + str(it) + " iterations,") tenpy.printf("RMSE is", rmse) #print("Full Tensor Objective",(tenpy.norm(tenpy.einsum('ir,jr,kr->ijk',X[0],X[1],X[2])-T))) if csv_file is not None: csv_writer.writerow([i, time_all, rmse, i, 'CCD']) csv_file.flush() if rmse < tol: tenpy.printf("Ending algo due to tolerance") break end = time.time() tenpy.printf('ccd time taken is ', end - start) return X
def matvec(self,regu,delta,d_derivative): N = len(self.A) ret = [] lst_mat = self.A[:] lst_mat[0] = delta[0].copy() inter = self.tenpy.TTTP(d_derivative, lst_mat) s_derivative = d_derivative.copy() ctf.Sparse_add(s_derivative,self.T,beta=-1) for n in range(1,N): lst_mat= self.A[:] lst_mat[n] = delta[n].copy() M = self.tenpy.TTTP(d_derivative, lst_mat) ctf.Sparse_add(inter,M) lst_mat = self.A[:] lst_mat[0] = self.tenpy.zeros(self.A[0].shape) self.tenpy.MTTKRP(inter,lst_mat,0) ret.append(self.tenpy.zeros(self.A[0].shape)) ret[0]+=lst_mat[0] ret[0]+= regu*delta[0] for n in range(1,N): ret.append(self.tenpy.zeros(self.A[n].shape)) lst_mat = self.A[:] lst_mat[n] = self.tenpy.zeros(self.A[n].shape) self.tenpy.MTTKRP(inter,lst_mat,n) ret[n]+=lst_mat[n] ret[n]+= regu*delta[n] ''' for n in range(N): lst_mat = self.A[:] lst_mat[n]=self.tenpy.zeros(self.A[n].shape) for i in range(N): if i != n: lst_mat[i] = delta[i].copy() self.tenpy.MTTKRP(s_derivative,lst_mat,n) ret[n]+=lst_mat[n] lst_mat[i]=self.A[i].copy() ''' return ret
def step(self, regu): M = self.tenpy.TTTP(self.Omega, self.A) for r in range(self.rank): for i in range(len(self.A)): lst_vec = [] for j in range(len(self.A)): lst_vec.append(self.A[j][:, r]) numerator = self.Get_Num(i, r, regu, M) denominator = self.Get_Denom(i, r, regu) delta = numerator / denominator lst_vec[i] = delta self.A[i][:, r] += delta P = self.tenpy.TTTP(self.Omega, lst_vec) ctf.Sparse_add(M, P) return self.A
def getCPGN(tenpy, T_in, T, O, X, reg_GN, num_iter_GN, tol, csv_file): opt = CP_GN_Completer(tenpy, T_in, O, X) if tenpy.name() == 'ctf': nnz_tot = T_in.nnz_tot else: nnz_tot = np.sum(O) regu = reg_GN tenpy.printf( "--------------------------------GN WIth CG-----------------------------" ) start = time.time() # T_in = backend.einsum('ijk,ijk->ijk',T,O) it = 0 time_all = 0 if csv_file is not None: csv_writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for i in range(num_iter_GN): it += 1 s = time.time() [X, cg_iters] = opt.step(regu) e = time.time() if regu > 1e-05: regu = regu / 2 time_all += e - s M = tenpy.TTTP(O, X) ctf.Sparse_add(M, T_in, beta=-1) rmse = tenpy.vecnorm(M) / (nnz_tot)**0.5 if tenpy.is_master_proc(): tenpy.printf("After " + str(it) + " iterations, and time ", time_all) tenpy.printf("RMSE is", rmse) #print("Full Tensor Objective",(tenpy.norm(tenpy.einsum('ir,jr,kr->ijk',X[0],X[1],X[2])-T))) if csv_file is not None: csv_writer.writerow([i, time_all, rmse, cg_iters, 'GN']) csv_file.flush() if rmse < tol: tenpy.printf("Ending algo due to tolerance") break end = time.time() tenpy.printf('GN time taken is ', end - start) return X
def Get_RHS(self, num, regu): #The gradient of the loss function is Mttkrp(e^m - x) ............... Need negative of this Omega_ = self.sampled_T.copy() ctf.get_index_tensor(Omega_) M = self.tenpy.TTTP(Omega_, self.A) #inter = elementwise_exp(self.tenpy.TTTP(getOmega(self.sampled_T),self.A)) ctf.Sparse_exp(M) ctf.Sparse_add(M, self.sampled_T, alpha=-1) #inter = subtract_sparse(self.sampled_T,inter) lst_mat = [] for j in range(len(self.A)): if j != num: lst_mat.append(self.A[j]) else: lst_mat.append(self.tenpy.zeros(self.A[num].shape)) self.tenpy.MTTKRP(M, lst_mat, num) #inter.set_zero() grad = lst_mat[num] - regu * self.A[num] #self.tenpy.printf("The norm of gradient is ",self.tenpy.vecnorm(grad)) return grad
def getPCPGN(tenpy, T_in, T, O, X, reg_GN, num_iter_GN,tol,csv_file): opt = Poisson_CP_GN_Completer(tenpy, T_in, O, X) if tenpy.name() == 'ctf': nnz_tot = T_in.nnz_tot else: nnz_tot = np.sum(O) regu = reg_GN tenpy.printf("--------------------------------Poisson GN WIth CG-----------------------------") t_ALS = ctf.timer_epoch("Poisson_GN") start= time.time() # T_in = backend.einsum('ijk,ijk->ijk',T,O) it = 0 time_all = 0 P = T_in.copy() ctf.Sparse_log(P) ctf.Sparse_mul(P,T_in) ctf.Sparse_add(P,T_in,beta=-1) val2 = ctf.sum(P) #val2 = ctf.sum(subtract_sparse(elementwise_prod(T_in,elementwise_log(T_in)),T_in)) M = tenpy.TTTP(O,X) #val = ctf.sum(subtract_sparse(ctf.exp(M),elementwise_prod(T_in,M) )) P = M.copy() ctf.Sparse_mul(P,T_in) ctf.Sparse_exp(M) #rmse_lsq = tenpy.vecnorm(T_in-M)/(nnz_tot)**0.5 #tenpy.printf("least square RMSE is",rmse_lsq) ctf.Sparse_add(M,P,beta=-1) val = ctf.sum(M) P.set_zero() M.set_zero() rmse = (val+val2)/nnz_tot P.set_zero() if tenpy.is_master_proc(): tenpy.printf("After " + str(it) + " iterations,") tenpy.printf("RMSE is",rmse) if csv_file is not None: csv_writer = csv.writer( csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for i in range(num_iter_GN): it+=1 s = time.time() t_ALS.begin() X = opt.step(regu) t_ALS.end() e = time.time() time_all+= e- s #rmse = tenpy.vecnorm(tenpy.TTTP(O,[U,V,W])-T_in)/(nnz_tot)**0.5 M = tenpy.TTTP(O,X) #val = ctf.sum(subtract_sparse(ctf.exp(M),elementwise_prod(T_in,M) )) P = M.copy() ctf.Sparse_mul(P,T_in) ctf.Sparse_exp(M) #rmse_lsq = tenpy.vecnorm(T_in-M)/(nnz_tot)**0.5 #tenpy.printf("least square RMSE is",rmse_lsq) ctf.Sparse_add(M,P,beta=-1) val = ctf.sum(M) P.set_zero() M.set_zero() rmse = (val+val2)/nnz_tot regu = regu/2 if tenpy.is_master_proc(): tenpy.printf("After " + str(it) + " iterations,") tenpy.printf("RMSE is",rmse) #print("Full Tensor Objective",(tenpy.norm(tenpy.einsum('ir,jr,kr->ijk',U,V,W)-T))) if csv_file is not None: csv_writer.writerow([i,time_all , rmse, i,'PGN']) csv_file.flush() if abs(rmse) < tol: tenpy.printf("Ending algo due to tolerance") break end= time.time() end= time.time() tenpy.printf('Poisson_GN time taken is ',end - start) return X
def sgd_poisson(tenpy, T_in, T, O, U, V, W, reg_als, I, J, K, R, num_iter_als, tol, csv_file): step_size = 0.03 opt = Poisson_sgd_Completer(tenpy, T_in, O, [U, V, W], step_size) #if T_in.sp == True: # nnz_tot = T_in.nnz_tot #else: # nnz_tot = ctf.sum(omega) if tenpy.name() == 'ctf': nnz_tot = T_in.nnz_tot else: nnz_tot = np.sum(O) t_ALS = ctf.timer_epoch("poisson_sgd") regu = reg_als tenpy.printf( "--------------------------------Poisson_sgd-----------------------") start = time.time() # T_in = backend.einsum('ijk,ijk->ijk',T,O) it = 0 time_all = 0 #val2 = ctf.sum(subtract_sparse(elementwise_prod(T_in,elementwise_log(T_in)),T_in)) P = T_in.copy() ctf.Sparse_log(P) ctf.Sparse_mul(P, T_in) ctf.Sparse_add(P, T_in, beta=-1) val2 = ctf.sum(P) P.set_zero() if csv_file is not None: csv_writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for i in range(num_iter_als): it += 1 s = time.time() #t_ALS.begin() [U, V, W] = opt.step(regu) #t_ALS.end() e = time.time() time_all += e - s #rmse = tenpy.vecnorm(tenpy.TTTP(O,[U,V,W])-T_in)/(nnz_tot)**0.5 if it % 20 == 0: M = tenpy.TTTP(O, [U, V, W]) #val = ctf.sum(subtract_sparse(ctf.exp(M),elementwise_prod(T_in,M) )) P = M.copy() ctf.Sparse_mul(P, T_in) ctf.Sparse_exp(M) ctf.Sparse_add(M, P, beta=-1) val = ctf.sum(M) P.set_zero() M.set_zero() rmse = (val + val2) / nnz_tot if tenpy.is_master_proc(): tenpy.printf("After " + str(it) + " iterations, and time is", time_all) tenpy.printf("RMSE is", rmse) #print("Full Tensor Objective",(tenpy.norm(tenpy.einsum('ir,jr,kr->ijk',U,V,W)-T))) if csv_file is not None: csv_writer.writerow([i, time_all, rmse, i, 'PALS']) csv_file.flush() if abs(rmse) < tol: tenpy.printf("Ending algo due to tolerance") break end = time.time() tenpy.printf('Poisson sgd time taken is ', end - start) return [U, V, W]