Ejemplo n.º 1
0
    attacker = Random()
    n_perturbations = int(args.ptb_rate * (adj.sum() // 2))
    perturbed_adj = attacker.attack(adj, n_perturbations, type='add')

if args.attack == 'meta' or args.attack == 'nettack':
    perturbed_data = PrePtbDataset(root='/tmp/',
                                   name=args.dataset,
                                   attack_method=args.attack,
                                   ptb_rate=args.ptb_rate)
    perturbed_adj = perturbed_data.adj
    if args.attack == 'nettack':
        idx_test = perturbed_data.target_nodes

np.random.seed(args.seed)
torch.manual_seed(args.seed)

model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout,
            device=device)

perturbed_adj, features, labels = preprocess(perturbed_adj,
                                             features,
                                             labels,
                                             preprocess_adj=False,
                                             device=device)
prognn = ProGNN(model, args, device)
prognn.fit(features, perturbed_adj, labels, idx_train, idx_val)
prognn.test(features, labels, idx_test)
Ejemplo n.º 2
0
    def __init__(self, args):
        self.dataset = args.dataset
        self.device = torch.device(
            f"cuda:{args.cuda_num}" if args.cuda else "cpu")
        if self.dataset in ["Cora", "Citeseer", "Pubmed", "CoauthorCS"]:
            if args.ptb:
                self.data = load_perterbued_data(self.dataset, args.ptb_rate,
                                                 args.ptb_type)
                self.loss_fn = torch.nn.functional.nll_loss
            else:
                self.data = load_data(self.dataset)
                self.loss_fn = torch.nn.functional.nll_loss
        elif self.dataset in ["PPI"]:
            self.data = load_ppi_data()
            self.loss_fn = torch.nn.BCEWithLogitsLoss()
        else:
            raise Exception(
                f"the dataset of {self.dataset} has not been implemented")

        self.entropy_loss = torch.nn.functional.binary_cross_entropy_with_logits

        self.type_model = args.type_model
        self.epochs = args.epochs
        self.weight_decay = args.weight_decay
        self.alpha = args.alpha
        self.gamma = args.gamma
        self.beta = args.beta
        self.lamb = args.lamb
        self.num_classes = args.num_classes
        self.ptb_rate = args.ptb_rate
        self.ptb_type = args.ptb_type
        self.metric = args.metric
        self.num_layers = args.num_layers

        if self.type_model == "GCN":
            self.model = GCN(args)
        elif self.type_model == "GAT":
            self.model = GAT(args)
        elif self.type_model == "NLGCN":
            self.model = NLGCN(args)
        elif self.type_model == "g_U_Net":
            self.model = gunet(args)
        elif self.type_model == "JKNet":
            self.model = JKNetMaxpool(args)
        elif self.type_model == "SGC":
            self.model = simpleGCN(args)
        elif self.type_model == "APPNP":
            self.model = APPNP(args)
        else:
            raise Exception(
                f"the model of {self.type_model} has not been implemented")

        if self.dataset in ["Cora", "Citeseer", "Pubmed", "CoauthorCS"]:
            if args.ptb:
                self.data.edge_index, self.data.x, self.data.y = utils.preprocess(
                    self.data.edge_index,
                    self.data.x,
                    self.data.y,
                    preprocess_adj=False,
                    sparse=False,
                    device=self.device)

            else:
                self.data.to(self.device)
        self.model.to(self.device)

        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=args.lr,
                                          weight_decay=args.weight_decay)

        wandb.init(project="Gref", config=args)
        wandb.watch(self.model)