aug_adj2 = aug_adj2.cuda() labels = labels.cuda() idx_train = idx_train.cuda() idx_val = idx_val.cuda() idx_test = idx_test.cuda() b_xent = nn.BCEWithLogitsLoss() xent = nn.CrossEntropyLoss() cnt_wait = 0 best = 1e9 best_t = 0 for epoch in range(nb_epochs): model.train() optimiser.zero_grad() idx = np.random.permutation(nb_nodes) shuf_fts = features[:, idx, :] lbl_1 = torch.ones(batch_size, nb_nodes) lbl_2 = torch.zeros(batch_size, nb_nodes) lbl = torch.cat((lbl_1, lbl_2), 1) if torch.cuda.is_available(): shuf_fts = shuf_fts.cuda() lbl = lbl.cuda() logits = model(features, shuf_fts,
def main(): saved_graph = os.path.join('assets', 'saved_graphs', 'best_dgi.pickle') saved_logreg = os.path.join('assets', 'saved_graphs', 'best_logreg.pickle') dataset = 'cora' # training params batch_size = 1 nb_epochs = 10000 patience = 25 lr = 0.001 l2_coef = 0.0 drop_prob = 0.0 hid_units = 512 sparse = True nonlinearity = 'prelu' # special name to separate parameters adj, features, labels, idx_train, idx_test, idx_val = process.load_data(dataset) features, _ = process.preprocess_features(features) nb_nodes = features.shape[0] ft_size = features.shape[1] nb_classes = labels.shape[1] adj = process.normalize_adj(adj + sp.eye(adj.shape[0])) if sparse: adj = process.sparse_mx_to_torch_sparse_tensor(adj) else: adj = (adj + sp.eye(adj.shape[0])).todense() features = torch.FloatTensor(features[np.newaxis]) if not sparse: adj = torch.FloatTensor(adj[np.newaxis]) labels = torch.FloatTensor(labels[np.newaxis]) idx_train = torch.LongTensor(idx_train) idx_val = torch.LongTensor(idx_val) idx_test = torch.LongTensor(idx_test) print("Training Nodes: {}, Testing Nodes: {}, Validation Nodes: {}".format(len(idx_train), len(idx_test), len(idx_val))) model = DGI(ft_size, hid_units, nonlinearity) optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef) if torch.cuda.is_available(): print('Using CUDA') model.cuda() features = features.cuda() if sparse: sp_adj = sp_adj.cuda() else: adj = adj.cuda() labels = labels.cuda() idx_train = idx_train.cuda() idx_val = idx_val.cuda() idx_test = idx_test.cuda() b_xent = nn.BCEWithLogitsLoss() xent = nn.CrossEntropyLoss() cant_wait = 0 best = 1e9 best_t = 0 if not os.path.exists(saved_graph): pbar = trange(nb_epochs) for epoch in pbar: model.train() optimiser.zero_grad() idx = np.random.permutation(nb_nodes) shuf_fts = features[:, idx, :] lbl_1 = torch.ones(batch_size, nb_nodes) lbl_2 = torch.zeros(batch_size, nb_nodes) lbl = torch.cat((lbl_1, lbl_2), 1) if torch.cuda.is_available(): shuf_fts = shuf_fts.cuda() lbl = lbl.cuda() logits = model(features, shuf_fts, adj, sparse, None, None, None) loss = b_xent(logits, lbl) pbar.desc = 'Loss: {:.4f}'.format(loss) if loss < best: best = loss best_t = epoch cnt_wait = 0 torch.save(model.state_dict(), saved_graph) else: cant_wait += 1 if cant_wait == patience: tqdm.write('Early stopping!') break loss.backward() optimiser.step() print('Loading {}th Epoch'.format(best_t) if best_t else 'Loading Existing Graph') model.load_state_dict(torch.load(saved_graph)) embeds, _ = model.embed(features, adj, sparse, None) train_embs = embeds[0, idx_train] val_embs = embeds[0, idx_val] test_embs = embeds[0, idx_test] train_lbls = torch.argmax(labels[0, idx_train], dim=1) val_lbls = torch.argmax(labels[0, idx_val], dim=1) test_lbls = torch.argmax(labels[0, idx_test], dim=1) tot = torch.zeros(1) if torch.cuda.is_available(): tot = tot.cuda() accs = [] print("\nValidation:") pbar = trange(50) for _ in pbar: log = LogReg(hid_units, nb_classes) opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0) pat_steps = 0 best_acc = torch.zeros(1) if torch.cuda.is_available(): log.cuda() best_acc = best_acc.cuda() for _ in range(100): log.train() opt.zero_grad() logits = log(train_embs) loss = xent(logits, train_lbls) loss.backward() opt.step() logits = log(test_embs) preds = torch.argmax(logits, dim=1) acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0] accs.append(acc * 100) pbar.desc = "Accuracy: {:.2f}%".format(100 * acc) tot += acc torch.save(log.state_dict(), saved_logreg) accs = torch.stack(accs) print('Average Accuracy: {:.2f}%'.format(accs.mean())) print('Standard Deviation: {:.3f}'.format(accs.std())) print("\nTesting") logits = log(val_embs) preds = torch.argmax(logits, dim=1) acc = torch.sum(preds == val_lbls).float() / val_lbls.shape[0] print("Accuracy: {:.2f}%".format(100 * acc))
def train_transductive(dataset, dataset_str, edge_index, gnn_type, model_name, K=None, random_init=False, drop_sigma=False): batch_size = 1 # Transductive setting hyperparameters = get_hyperparameters() nb_epochs = hyperparameters["nb_epochs"] patience = hyperparameters["patience"] lr = hyperparameters["lr"] if gnn_type == "SGConv": lr /= 3. hid_units = hyperparameters["hid_units"] nonlinearity = hyperparameters["nonlinearity"] nb_nodes = dataset.x.shape[0] ft_size = dataset.x.shape[1] nb_classes = torch.max(dataset.y).item()+1 # 0 based cnt features = dataset.x model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, K=K, drop_sigma=drop_sigma) optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=0) if torch.cuda.is_available(): features = features.cuda() edge_index = edge_index.cuda() model = model.cuda() b_xent = nn.BCEWithLogitsLoss() xent = nn.CrossEntropyLoss() cnt_wait = 0 best = 1e9 best_t = 0 for epoch in range(nb_epochs): if random_init: break model.train() optimiser.zero_grad() idx = np.random.permutation(nb_nodes) shuf_fts = features[idx, :] lbl_1 = torch.ones(nb_nodes) lbl_2 = torch.zeros(nb_nodes) lbl = torch.cat((lbl_1, lbl_2), 0) if torch.cuda.is_available(): shuf_fts = shuf_fts.cuda() lbl = lbl.cuda() logits = model(features, shuf_fts, edge_index) loss = b_xent(logits, lbl) print('Loss:', loss) if loss < best: best = loss best_t = epoch cnt_wait = 0 torch.save(model.state_dict(), './trained_models/'+model_name) else: cnt_wait += 1 if cnt_wait == patience: print('Early stopping!') break loss.backward() optimiser.step() return best_t
log.train() opt.zero_grad() logits = log(train_embs) loss = xent(logits, train_lbls) loss.backward() opt.step() logits = log(test_embs) preds = torch.argmax(logits, dim=1) acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0] return acc.detach().cpu().numpy() for epoch in range(nb_epochs): encoder.train() optimiser.zero_grad() if make_adv: # step_size = step_size_init * math.pow(drop, math.floor((1 + epoch) / epochs_drop)) step_size = step_size_init step_size_x = stepsize_x adv = atm(sp_adj, sp_A, None, n_flips, b_xent=b_xent, step_size=step_size, eps_x=args.epsilon, step_size_x=step_size_x, iterations=attack_iters, should_normalize=True, random_restarts=False, make_adv=True) if attack_mode == 'A': sp_adj = adv elif attack_mode == 'X': features = adv elif attack_mode == 'both': sp_adj = adv[0]
def process_inductive(dataset, gnn_type="GCNConv", K=None, random_init=False, runs=10): hyperparameters = get_hyperparameters() nb_epochs = hyperparameters["nb_epochs"] patience = hyperparameters["patience"] lr = hyperparameters["lr"] l2_coef = hyperparameters["l2_coef"] drop_prob = hyperparameters["drop_prob"] hid_units = hyperparameters["hid_units"] nonlinearity = hyperparameters["nonlinearity"] batch_size = hyperparameters["batch_size"] norm_features = torch_geometric.transforms.NormalizeFeatures() dataset_train = PPI( "./geometric_datasets/"+dataset, split="train", transform=norm_features, ) print(dataset_train) dataset_val = PPI( "./geometric_datasets/"+dataset, split="val", transform=norm_features, ) print(dataset_val) dataset_test = PPI( "./geometric_datasets/"+dataset, split="test", transform=norm_features, ) data = [] for d in dataset_train: data.append(d) for d in dataset_val: data.append(d) ft_size = dataset_train[0].x.shape[1] nb_classes = dataset_train[0].y.shape[1] # multilabel b_xent = nn.BCEWithLogitsLoss() loader_train = DataLoader( data, batch_size=hyperparameters["batch_size"], shuffle=True, ) loader_test = DataLoader( dataset_test, batch_size=hyperparameters["batch_size"], shuffle=False ) all_accs = [] for _ in range(runs): model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, batch_size=1, K=K) model_name = get_model_name(dataset, gnn_type, K, random_init=random_init) print(model) optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef) if torch.cuda.is_available(): print('Using CUDA') model = model.cuda() model.train() torch.cuda.empty_cache() for epoch in range(20): if random_init: break total_loss = 0 batch_id = 0 model.train() loaded = list(loader_train) for batch in loaded: optimiser.zero_grad() if torch.cuda.is_available: batch = batch.to('cuda') nb_nodes = batch.x.shape[0] features = batch.x labels = batch.y edge_index = batch.edge_index idx = np.random.randint(0, len(data)) while idx == batch_id: idx = np.random.randint(0, len(data)) shuf_fts = torch.nn.functional.dropout(loaded[idx].x, drop_prob) edge_index2 = loaded[idx].edge_index lbl_1 = torch.ones(nb_nodes) lbl_2 = torch.zeros(shuf_fts.shape[0]) lbl = torch.cat((lbl_1, lbl_2), 0) if torch.cuda.is_available(): shuf_fts = shuf_fts.cuda() if edge_index2 is not None: edge_index2 = edge_index2.cuda() lbl = lbl.cuda() logits = model(features, shuf_fts, edge_index, batch=batch.batch, edge_index_alt=edge_index2) loss = b_xent(logits, lbl) loss.backward() optimiser.step() batch_id += 1 total_loss += loss.item() print(epoch, 'Train Loss:', total_loss/(len(dataset_train))) torch.save(model.state_dict(), './trained_models/'+model_name) torch.cuda.empty_cache() print('Loading last epoch') if not random_init: model.load_state_dict(torch.load('./trained_models/'+model_name)) model.eval() b_xent_reg = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(2.25)) train_embs, whole_train_data = preprocess_embeddings(model, dataset_train) val_embs, whole_val_data = preprocess_embeddings(model, dataset_val) test_embs, whole_test_data = preprocess_embeddings(model, dataset_test) for _ in range(50): log = LogReg(hid_units, nb_classes) opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0) log.cuda() pat_steps = 0 best = 1e9 log.train() for _ in range(250): opt.zero_grad() logits = log(train_embs) loss = b_xent_reg(logits, whole_train_data.y) loss.backward() opt.step() log.eval() val_logits = log(val_embs) loss = b_xent_reg(val_logits, whole_val_data.y) if loss.item() < best: best = loss.item() pat_steps = 0 if pat_steps >= 5: break pat_steps += 1 log.eval() logits = log(test_embs) preds = torch.sigmoid(logits) > 0.5 f1 = sklearn.metrics.f1_score(whole_test_data.y.cpu(), preds.long().cpu(), average='micro') all_accs.append(float(f1)) print() print('Micro-averaged f1:', f1) all_accs = torch.tensor(all_accs) with open("./results/"+model_name[:-4]+"_results.txt", "w") as f: f.writelines([str(all_accs.mean().item())+'\n', str(all_accs.std().item())]) print(all_accs.mean()) print(all_accs.std())