def load_hcp_tcgn(device): time_series, labels, As = load_hcp_example() normalized_laplacian = True coarsening_levels = 4 graphs, perm = coarsening.coarsen(As[0], levels=coarsening_levels, self_connections=False) coos = [ torch.tensor([graph.tocoo().row, graph.tocoo().col], dtype=torch.long).to(device) for graph in graphs ] idx_train = range(int(0.8 * time_series.shape[0])) print('Size of train set: {}'.format(len(idx_train))) idx_test = range(len(idx_train), time_series.shape[0]) print('Size of test set: {}'.format(len(idx_test))) train_data = time_series[idx_train] train_labels = labels[idx_train] test_data = time_series[idx_test] test_labels = labels[idx_test] train_data = perm_data_time(train_data, perm) test_data = perm_data_time(test_data, perm) return graphs, coos, train_data, test_data, train_labels, test_labels
def load_hcp_tcgn(device): time_series, labels, As = load_hcp_example(full=True) normalized_laplacian = True coarsening_levels = 4 shuffled = False A = As[0] #A = arr.todense() if shuffled: B = A.toarray() B = list(B[np.triu_indices(A.shape[0])]) random.shuffle(B) A = np.zeros((A.shape[0], A.shape[0])) indices = np.triu_indices(A.shape[0]) A[indices] = B A = A + A.T - np.diag(A.diagonal()) A = sp.csr_matrix(A) graphs, perm = coarsening.coarsen(A, levels=coarsening_levels, self_connections=False) #else: W = sp.random(As[0].shape[0], As[0].shape[0], density=0, format='csr', data_rvs=lambda s: np.random.uniform(0, 1, size=s)) #graphs, perm = coarsening.coarsen(W, levels=coarsening_levels, self_connections=False) #graphs = [As[0]] graphs = [W] coos = [ torch.tensor([graph.tocoo().row, graph.tocoo().col], dtype=torch.long).to(device) for graph in graphs ] idx_train = range(int(0.8 * time_series.shape[0])) print('Size of train set: {}'.format(len(idx_train))) idx_test = range(len(idx_train), time_series.shape[0]) print('Size of test set: {}'.format(len(idx_test))) train_data = time_series[idx_train] train_labels = labels[idx_train] test_data = time_series[idx_test] test_labels = labels[idx_test] #train_data = perm_data_time(train_data, perm) #test_data = perm_data_time(test_data, perm) return graphs, coos, train_data, test_data, train_labels, test_labels
def experiment(args): args.reg_weight = 5.e-4 #1.e-5 # torch.manual_seed(args.seed) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") #device = torch.device("cpu") # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} graphs, perm = create_graph(device) coos = [ torch.tensor([graph.tocoo().row, graph.tocoo().col], dtype=torch.long).to(device) for graph in graphs ] train_images, test_images, train_labels, test_labels = get_mnist_data_gcn( perm) training_set = Dataset(train_images, train_labels) train_loader = torch.utils.data.DataLoader(training_set, batch_size=args.batch_size) validation_set = Dataset(test_images, test_labels) test_loader = torch.utils.data.DataLoader(validation_set, batch_size=args.batch_size) #sh = train_images.shape #model = Net(graphs, coos) #model = NetGCNBasic(graphs, coos) model = NetMLP(int(graphs[0].shape[0])) if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) model.to(device) #model.cuda() pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print(pytorch_total_params) optimizer = optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95) for epoch in range(1, args.epochs): train(args, model, device, train_loader, optimizer, epoch) scheduler.step() test(args, model, device, test_loader, epoch) if args.save_model: torch.save(model.state_dict(), "mnist_cnn.pt")
def forward(self, data, graphs): coos = [ torch.tensor( [graph.tocoo().row, graph.tocoo().col], dtype=torch.long) for graph in graphs ] x, edge_index = data.x, coos[0] try: x = F.relu(self.conv1(x, edge_index)) except: print("boo") #x = F.dropout(x, training=self.training) #x = self.conv2(x, edge_index) x = self.fc1(x) return F.log_softmax(x, dim=1)
def load_hcp_tcgn(device): X_train, y_train, X_test, y_test, As = vote.load_hcp_vote() normalized_laplacian = True coarsening_levels = 4 graphs, perm = coarsening.coarsen(As[0], levels=coarsening_levels, self_connections=False) coos = [ torch.tensor([graph.tocoo().row, graph.tocoo().col], dtype=torch.long).to(device) for graph in graphs ] train_data = perm_data_time(X_train, perm) test_data = perm_data_time(X_test, perm) return graphs, coos, train_data, test_data, y_train, y_test