def load_hcp_tcgn(device): time_series, labels, As = load_hcp_example() normalized_laplacian = True coarsening_levels = 4 graphs, perm = coarsening.coarsen(As[0], levels=coarsening_levels, self_connections=False) L = [ torch.tensor(graph.rescale_L(graph.laplacian( A, normalized=normalized_laplacian).todense(), lmax=2), dtype=torch.float).to(device) for A in graphs ] L_sparse = list() for A in graphs: g = graph.rescale_L(graph.laplacian(A, normalized=normalized_laplacian), lmax=2) coo = coo_matrix(g) values = coo.data indices = np.vstack((coo.row, coo.col)) i = torch.LongTensor(indices) v = torch.FloatTensor(values) shape = coo.shape a = torch.sparse.FloatTensor(i, v, torch.Size(shape)) a = a.to(device) L_sparse.append(a) # idx_train = range(17*512) idx_train = range(int(0.8 * time_series.shape[0])) print('Size of train set: {}'.format(len(idx_train))) idx_test = range(len(idx_train), time_series.shape[0]) print('Size of test set: {}'.format(len(idx_test))) # idx_train = range(5*512) # idx_test = range(len(idx_train), 10*512) train_data = time_series[idx_train] train_labels = labels[idx_train] test_data = time_series[idx_test] test_labels = labels[idx_test] train_data = perm_data_time(train_data, perm) test_data = perm_data_time(test_data, perm) sparse = False if sparse: laplacian = L_sparse else: laplacian = L return laplacian, train_data, test_data, train_labels, test_labels
def layer_cheb(params, x, level): # Transform to Chebyshev basis xc = x.T def chebyshev(x, L): return graph.chebyshev(L, x, hyper['filter_order']) L = graph.rescale_L(hyper['L'][level], lmax=2) xc = chebyshev(xc, L) xc = xc.T # Filter if level == 0: W = params['W1'] y = np.einsum('abc,ce->abe', xc, W) if level == 1: W = params['W2'] y = np.einsum('abcd,de->abce', xc, W) # Bias and non-linearity if level == 0: b = params['b1'] if level == 1: b = params['b2'] y += b # N x M x F return y
def nn_predict_tgcn_cheb(params, x): L = graph.rescale_L(hyper['L'][0], lmax=2) w = np.fft.fft(x, axis=2) xc = chebyshev_time_vertex(L, w, hyper['filter_order']) y = np.einsum('knhq,kfh->fnq', xc, params['W1']) y += np.expand_dims(params['b1'], axis=2) # nonlinear layer # y = np.tanh(y) y = ReLU(y) # dense layer y = np.einsum('fnq,cfn->cq', y, params['W2']) y += np.expand_dims(params['b2'], axis=1) outputs = np.real(y.T) return outputs - logsumexp(outputs, axis=1, keepdims=True)
def create_graph(device): def grid_graph(m, corners=False): z = graph.grid(m) dist, idx = graph.distance_sklearn_metrics(z, k=number_edges, metric=metric) A = graph.adjacency(dist, idx) # Connections are only vertical or horizontal on the grid. # Corner vertices are connected to 2 neightbors only. if corners: import scipy.sparse A = A.toarray() A[A < A.max() / 1.5] = 0 A = scipy.sparse.csr_matrix(A) print('{} edges'.format(A.nnz)) print("{} > {} edges".format(A.nnz // 2, number_edges * m**2 // 2)) return A number_edges = 12 metric = 'euclidean' normalized_laplacian = True coarsening_levels = 4 A = grid_graph(28, corners=False) # A = graph.replace_random_edges(A, 0) graphs, perm = coarsening.coarsen(A, levels=coarsening_levels, self_connections=False) L = [ torch.tensor(graph.rescale_L(graph.laplacian( A, normalized=normalized_laplacian).todense(), lmax=2), dtype=torch.float).to(device) for A in graphs ] # graph.plot_spectrum(L) del A return L, perm