acc_test = accuracy(output[idx_test], labels[idx_test]) print("Test set results:", "accuracy= {:.4f}".format(acc_test.item())) print('=== Adversarial Training for Evasion Attack===') adversary = Random() adv_train_model = GCN(nfeat=features.shape[1], nclass=labels.max() + 1, nhid=16, dropout=0, with_relu=False, with_bias=True, device=device) adv_train_model = adv_train_model.to(device) adv_train_model.initialize() n_perturbations = int(0.01 * (adj.sum() // 2)) for i in tqdm(range(100)): # modified_adj = adversary.attack(features, adj) modified_adj = adversary.attack(adj, n_perturbations=n_perturbations, type='add') adv_train_model.fit(features, modified_adj, labels, idx_train, train_iters=50, initialize=False) adv_train_model.eval() # test directly or fine tune
print('=== test on original adj ===') model.fit(features, adj, labels, idx_train) output = model.output acc_test = accuracy(output[idx_test], labels[idx_test]) print("Test set results:", "accuracy= {:.4f}".format(acc_test.item())) print('=== testing GCN on perturbed graph ===') model.fit(features, perturbed_adj, labels, idx_train) output = model.output acc_test = accuracy(output[idx_test], labels[idx_test]) print("Test set results:", "accuracy= {:.4f}".format(acc_test.item())) # For poisoning attack, the adjacency matrix you have # is alreay perturbed print('=== Adversarial Training for Poisoning Attack===') model.initialize() n_perturbations = int(0.01 * (adj.sum() // 2)) for i in range(100): # modified_adj = adversary.attack(features, adj) adversary.attack(perturbed_adj, n_perturbations=n_perturbations, type='remove') modified_adj = adversary.modified_adj model.fit(features, modified_adj, labels, idx_train, train_iters=50, initialize=False) model.eval()