def fourier(x, L, Fout, K): assert K == L.shape[O] N, M, Fin = x.get_shape() N, M, Fin = int(N), int(M), int(Fin) _, U = graph.fourier(L) U = tf.constant(U.T, dtype=tf.float32) W = _weight_variable([M, Fout, Fin], regularization=False) return filter_in_fourier(x, L, Fout, K, U, W)
def fourier(self, x, L, Fout, K): assert K == L.shape[ 0] # artificial but useful to compute number of parameters N, M, Fin = x.get_shape() N, M, Fin = int(N), int(M), int(Fin) # Fourier basis _, U = graph.fourier(L) U = tf.constant(U.T, dtype=tf.float32) # Weights W = self._weight_variable([M, Fout, Fin], regularization=False) return self.filter_in_fourier(x, L, Fout, K, U, W)
def coarsening_pooling(self, normalize=True): adj = scipy.sparse.csr_matrix(self.adjacency_matrix) for i in range(len(self.pooling_sizes)): adj_coarsened, pooling_matrices = self._coarserning_pooling_( adj, self.pooling_sizes[i], normalize) pooling_matrices = np.array(pooling_matrices) self.graphs.append(adj_coarsened) self.layer2pooling_matrices[i] = pooling_matrices adj = scipy.sparse.csr_matrix(adj_coarsened) #num_nodes_before_final = adj_coarsened.shape[0] #if num_nodes_before_final < 4: #num_nodes_before_final = 4 num_nodes_before_final = 4 pooling_matrices_final = [ sp.lil_matrix((adj_coarsened.shape[0], 1)) for i in range(num_nodes_before_final) ] if adj_coarsened.shape[0] > 1: L_i = graph.laplacian(adj_coarsened, normalize) lamb_i, U_i = graph.fourier(L_i) for j in range(num_nodes_before_final): if j < adj_coarsened.shape[0]: if U_i[0, j] < 0: pooling_matrices_final[j][:, 0] = -U_i[:, j].reshape( -1, 1) else: pooling_matrices_final[j][:, 0] = U_i[:, j].reshape(-1, 1) else: if U_i[0, adj_coarsened.shape[0] - 1] < 0: pooling_matrices_final[ j][:, 0] = -U_i[:, adj_coarsened.shape[0] - 1].reshape(-1, 1) else: pooling_matrices_final[j][:, 0] = U_i[:, adj_coarsened. shape[0] - 1].reshape(-1, 1) else: for j in range(num_nodes_before_final): pooling_matrices_final[j][:, 0] = adj_coarsened.reshape(-1, 1) self.layer2pooling_matrices[i + 1] = pooling_matrices_final
def spline(x, L, Fout, K): N, M, Fin = x.get_shape() N, M, Fin = int(N), int(M), int(Fin) lamb, U = graph.fourier(L) U = tf.constant(U.T, dtype=tf.float32) B = bspline_basis(K, lamb, degree=3) B = tf.constant(B, dtype=tf.float32) W = _weight_variable([K, Fout * Fin], regularization=False) W = tf.matmul(B, W) W = tf.reshape(W, [M, Fout, Fin]) return filter_in_fourier(x, L, Fout, K, U, W)
def spline(self, x, L, Fout, K): N, M, Fin = x.get_shape() N, M, Fin = int(N), int(M), int(Fin) # Fourier basis lamb, U = graph.fourier(L) U = tf.constant(U.T, dtype=tf.float32) # M x M # Spline basis B = bspline_basis(K, lamb, degree=3) # M x K #B = bspline_basis(K, len(lamb), degree=3) # M x K B = tf.constant(B, dtype=tf.float32) # Weights W = self._weight_variable([K, Fout * Fin], regularization=False) W = tf.matmul(B, W) # M x Fout*Fin W = tf.reshape(W, [M, Fout, Fin]) return self.filter_in_fourier(x, L, Fout, K, U, W)
def __init__(self, L, F): super(fgcnn2, self).__init__() #self.L = L # Graph Laplacian, NFEATURES x NFEATURES self.F = F # Number of filters _, self.U = graph.fourier(L)
def _coarserning_pooling_(self, adjacency_matrix, pooling_size, normalize=False): num_nodes = adjacency_matrix[:, 0].shape[0] A_dense = adjacency_matrix.todense() num_clusters = int(num_nodes / pooling_size) if num_clusters == 0: num_clusters = num_clusters + 1 sc = SpectralClustering(n_clusters=num_clusters, affinity='precomputed', n_init=10) sc.fit(A_dense) clusters = dict() for inx, label in enumerate(sc.labels_): if label not in clusters: clusters[label] = [] clusters[label].append(inx) num_clusters = len(clusters) num_nodes_in_largest_clusters = 0 for label in clusters: if len(clusters[label]) >= num_nodes_in_largest_clusters: num_nodes_in_largest_clusters = len(clusters[label]) if num_nodes_in_largest_clusters <= 5: num_nodes_in_largest_clusters = 5 num_nodes_in_largest_clusters = 5 Adjacencies_per_cluster = [ adjacency_matrix[clusters[label], :][:, clusters[label]] for label in range(len(clusters)) ] ######## Get inter matrix A_int = sp.lil_matrix(adjacency_matrix) for i in range(len(clusters)): zero_list = list(set(range(num_nodes)) - set(clusters[i])) for j in clusters[i]: A_int[j, zero_list] = 0 A_int[zero_list, j] = 0 ######## Getting adjacenccy matrix wuith only external links A_ext = adjacency_matrix - A_int ######## Getting cluster vertex indicate matrix row_inds = [] col_inds = [] data = [] for i in clusters: for j in clusters[i]: row_inds.append(j) col_inds.append(i) data.append(1) Omega = sp.coo_matrix((data, (row_inds, col_inds))) A_coarsened = np.dot(np.dot(np.transpose(Omega), A_ext), Omega) ########## Constructing pooling matrix pooling_matrices = [ sp.lil_matrix((num_nodes, num_clusters)) for i in range(num_nodes_in_largest_clusters) ] for i in clusters: adj = Adjacencies_per_cluster[i] if len(clusters[i]) > 1: L_i = graph.laplacian(adj, normalize) lamb_i, U_i = graph.fourier(L_i) for j in range(num_nodes_in_largest_clusters): if j < len(clusters[i]): if U_i[0, j] < 0: pooling_matrices[j][clusters[i], i] = -U_i[:, j].reshape(-1, 1) else: pooling_matrices[j][clusters[i], i] = U_i[:, j].reshape(-1, 1) else: if U_i[0, len(clusters[i]) - 1] < 0: pooling_matrices[j][clusters[i], i] = -U_i[:, len(clusters[i]) - 1].reshape(-1, 1) else: pooling_matrices[j][clusters[i], i] = U_i[:, len(clusters[i]) - 1].reshape(-1, 1) else: for j in range(num_nodes_in_largest_clusters): pooling_matrices[j][clusters[i], i] = adj.reshape(-1, 1) return A_coarsened, pooling_matrices
#number of subjects N = 21 #number of matrices per subject (coming from dynamical conn) per_subj = 20 N = int(N) per_subj = int(per_subj) #common A (read it for now) #A = read_A("/lustre/scratch/wbic-beta/mmc57/hcp_data/sst_rest_data/csv_files/laplacian/A.csv") fname_ = "/lustre/scratch/wbic-beta/mmc57/hcp_data/sst_rest_data/csv_files/laplacian/dum_Lapl.csv" #make Laplacian #L, perm, levs, L_list = make_laplacian(A) L, perm, levs, L_list = make_laplacian_directory(fname_) _, U = graph.fourier(L) U = U.astype(np.float32) L1 = L_list[1] L2 = L_list[2] L3 = L_list[3] #L = convert_sparse_matrix_to_sparse_tensor(L) #L1 = convert_sparse_matrix_to_sparse_tensor(L[1]) #L2 = convert_sparse_matrix_to_sparse_tensor(L[2]) #L3 = convert_sparse_matrix_to_sparse_tensor(L[3]) directory1 = "/lustre/scratch/wbic-beta/mmc57/hcp_data/sst_rest_data/csv_files/mat_files3/*.csv" data = read_mat_stuff_perm(N, per_subj, directory1, perm) directory2 = "/lustre/scratch/wbic-beta/mmc57/hcp_data/sst_rest_data/csv_files/indices3/label.csv" input_labels = read_labels(directory2)