def load_perterbued_data(dataset, ptb_rate, ptb_type="meta"): if ptb_type == 'meta': data = Dataset(root='/tmp/', name=dataset.lower(), setting='nettack', seed=15, require_mask=True) data.x, data.y = data.features, data.labels if ptb_rate > 0: perturbed_data = PrePtbDataset(root='/tmp/', name=dataset.lower(), attack_method='meta', ptb_rate=ptb_rate) data.edge_index = perturbed_data.adj else: data.edge_index = data.adj return data elif ptb_type == 'random_add': data = Dataset(root='/tmp/', name=dataset.lower(), setting='nettack', seed=15, require_mask=True) data.x, data.y = data.features, data.labels num_edge = data.adj.sum(axis=None) / 2 attacker = Random() attacker.attack(data.adj, n_perturbations=int(ptb_rate * num_edge), type='add') data.edge_index = attacker.modified_adj return data elif ptb_type == 'random_remove': data = Dataset(root='/tmp/', name=dataset.lower(), setting='nettack', seed=15, require_mask=True) data.x, data.y = data.features, data.labels num_edge = data.adj.sum(axis=None) / 2 attacker = Random() attacker.attack(data.adj, n_perturbations=int(ptb_rate * num_edge), type='remove') data.edge_index = attacker.modified_adj return data raise Exception(f"the ptb_type of {ptb_type} has not been implemented")
np.random.seed( 15 ) # Here the random seed is to split the train/val/test data, we need to set the random seed to be the same as that when you generate the perturbed graph data = Dataset(root='/tmp/', name=args.dataset, setting='nettack') adj, features, labels = data.adj, data.features, data.labels idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test if args.attack == 'no': perturbed_adj = adj if args.attack == 'random': from deeprobust.graph.global_attack import Random attacker = Random() n_perturbations = int(args.ptb_rate * (adj.sum() // 2)) perturbed_adj = attacker.attack(adj, n_perturbations, type='add') if args.attack == 'meta' or args.attack == 'nettack': perturbed_data = PrePtbDataset(root='/tmp/', name=args.dataset, attack_method=args.attack, ptb_rate=args.ptb_rate) perturbed_adj = perturbed_data.adj if args.attack == 'nettack': idx_test = perturbed_data.target_nodes np.random.seed(args.seed) torch.manual_seed(args.seed) model = GCN(nfeat=features.shape[1], nhid=args.hidden,
adv_train_model = GCN(nfeat=features.shape[1], nclass=labels.max() + 1, nhid=16, dropout=0, with_relu=False, with_bias=True, device=device) adv_train_model = adv_train_model.to(device) adv_train_model.initialize() n_perturbations = int(0.01 * (adj.sum() // 2)) for i in tqdm(range(100)): # modified_adj = adversary.attack(features, adj) modified_adj = adversary.attack(adj, n_perturbations=n_perturbations, type='add') adv_train_model.fit(features, modified_adj, labels, idx_train, train_iters=50, initialize=False) adv_train_model.eval() # test directly or fine tune print('=== test on perturbed adj ===') output = adv_train_model.predict() acc_test = accuracy(output[idx_test], labels[idx_test]) print("Test set results:", "accuracy= {:.4f}".format(acc_test.item()))
print('=== testing GCN on perturbed graph ===') model.fit(features, perturbed_adj, labels, idx_train) output = model.output acc_test = accuracy(output[idx_test], labels[idx_test]) print("Test set results:", "accuracy= {:.4f}".format(acc_test.item())) # For poisoning attack, the adjacency matrix you have # is alreay perturbed print('=== Adversarial Training for Poisoning Attack===') model.initialize() n_perturbations = int(0.01 * (adj.sum() // 2)) for i in range(100): # modified_adj = adversary.attack(features, adj) adversary.attack(perturbed_adj, n_perturbations=n_perturbations, type='remove') modified_adj = adversary.modified_adj model.fit(features, modified_adj, labels, idx_train, train_iters=50, initialize=False) model.eval() # test directly or fine tune print('=== test on perturbed adj ===') output = model.predict(features, perturbed_adj) acc_test = accuracy(output[idx_test], labels[idx_test])
np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) data = Dataset(root='/tmp/', name=args.dataset) adj, features, labels = data.adj, data.features, data.labels idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test idx_unlabeled = np.union1d(idx_val, idx_test) # Setup Attack Model model = Random() n_perturbations = int(args.ptb_rate * (adj.sum() // 2)) model.attack(adj, n_perturbations) modified_adj = model.modified_adj adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True) adj = adj.to(device) features = features.to(device) labels = labels.to(device) modified_adj = normalize_adj(modified_adj) modified_adj = sparse_mx_to_torch_sparse_tensor(modified_adj) modified_adj = modified_adj.to(device)
np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) data = Dataset(root='/tmp/', name=args.dataset) adj, features, labels = data.adj, data.features, data.labels idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test idx_unlabeled = np.union1d(idx_val, idx_test) # Setup Attack Model model = Random() n_perturbations = int(args.ptb_rate * (adj.sum()//2)) modified_adj = model.attack(adj, n_perturbations) adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True) adj = adj.to(device) features = features.to(device) labels = labels.to(device) modified_adj = normalize_adj(modified_adj) modified_adj = sparse_mx_to_torch_sparse_tensor(modified_adj) modified_adj = modified_adj.to(device) def test(adj): ''' test on GCN ''' # adj = normalize_adj_tensor(adj) gcn = GCN(nfeat=features.shape[1],