def pc_weight_epsilon(pc_xyz, epsilon_scale): num_data , num_dim = pc_xyz.shape # N by d print('The number of the input signal is %d\n' % num_data) print('The dimension of the input signal is %d\n' % num_dim) pc_xyz_col = np.sum(pc_xyz ** 2, axis=1) pc_matrix_dist = np.tile(pc_xyz_col, (num_data, 1)) + np.tile(pc_xyz_col, (num_data, 1)).T - 2 * (pc_xyz @ pc_xyz.T) # square of dist epsilon_default = np.sqrt(np.mean(pc_matrix_dist)) # the default epsilon is the root of mean value of distance matrix epsilon = epsilon_default * epsilon_scale print('The e NN graph you would like to build is e = %.10f\n' % epsilon) pc_matrix_preserve = pc_matrix_dist pc_matrix_dist[pc_matrix_dist > epsilon] = 0 pc_dist_kernel = np.exp(-pc_matrix_dist/epsilon) pc_dist_kernel[pc_dist_kernel == 1] = 0 # convert ii entry to 0 pc_dist_kernel -= np.diag(np.diag(pc_dist_kernel)) eps = 2.2204e-16 pc_weight_epsilon_unit = csr_matrix(pc_dist_kernel/(np.sum(pc_dist_kernel, axis=1)[:, None] + eps)) # normalized pc_weight_epsilon_sym = csr_matrix(pc_dist_kernel) num_neigh = csr_matrix.count_nonzero(pc_weight_epsilon_sym[0, :]) print('The number of neighbors of first point is %d\n' % num_neigh) return pc_weight_epsilon_sym , pc_weight_epsilon_unit , epsilon
def format_metis(S, metis_fn): rows, cols = S.shape if isinstance(S, dok_matrix): edges = dok_matrix.count_nonzero(S) // 2 elif isinstance(S, csr_matrix): edges = csr_matrix.count_nonzero(S) // 2 with open(metis_fn, "w") as f: # header format: #nodes, #edges, 001 (indicates weighted edges) f.write("{} {} 001\n".format(rows, edges)) for i in range(rows): cur_node = [] for j in range(cols): if S[i, j] > 0: cur_node.append("{} {}".format(j + 1, int(S[i, j]))) f.write("{}\n".format(" ".join(cur_node)))
test = plylst_test #print(test) row = np.repeat(range(n_train), plylst_train['num_songs']) col = [song for songs in plylst_train['songs_id'] for song in songs] dat = np.repeat(1, plylst_train['num_songs'].sum()) train_songs_A = spr.csr_matrix((dat, (row, col)), shape=(n_train, n_songs)) row = np.repeat(range(n_train), plylst_train['num_tags']) col = [tag for tags in plylst_train['tags_id'] for tag in tags] dat = np.repeat(1, plylst_train['num_tags'].sum()) train_tags_A = spr.csr_matrix((dat, (row, col)), shape=(n_train, n_tags)) train_songs_A_csr = train_songs_A.tocsr() train_tags_A_csr = train_tags_A.tocsr() WT = csr_matrix.count_nonzero(train_songs_A) / (train_songs_A.shape[0] * train_songs_A.shape[1]) Wt = csr_matrix.count_nonzero(train_tags_A) / (train_tags_A.shape[0] * train_tags_A.shape[1]) print(Wt) j = 1 print(j) x_train_song = train_songs_A_csr batch_size = 512 epochs = 1 original_dim = n_songs # number of songs ########AutoEncoder _ songs
CC = np.linalg.inv(np.matmul(np.transpose(Coding_matrix),Coding_matrix)) res_p = np.matmul(CC,np.matmul(np.transpose(Coding_matrix),output_worker)) atw = list(range(k)); aow = [i for i in atw if i not in amw]; res_p = np.reshape(res_p, (q*c*np.size(aow), 1)); for j in range(0,np.size(apw)): res[:,aow[j]] = res_p[j*c*q:(j+1)*c*q].ravel(); else: ## Peeling Decoder for all 1's AA = csr_matrix(Coding_matrix); BB = output_worker; res2 = np.transpose(res); res2 = (np.reshape(res2, (Delta,c))); while csr_matrix.count_nonzero(AA)>0: ind1 = AA.sum(axis=1); ## Finding the rows with single unknown ind3 = np.where(ind1==1) ind2 = ind3[0]; (aa,bb) = np.nonzero(AA[ind2,:]) ij = np.argsort(aa) bb = bb[ij]; (_, imp_rows) = np.unique(bb, axis=0,return_index=True); bb = bb[imp_rows]; res2[bb,:] = BB[ind2[imp_rows],:]; ## Recovering unknowns (ee,ff) = np.nonzero(AA[:,bb]) for ii in range(0,np.size(ee)): BB[ee[ii],:] = BB[ee[ii],:] - res2[bb[ff[ii]],:]; AA[ee,bb[ff]] = 0;