Ejemplo n.º 1
0
 def test(self, idx_test):
     # output = self.forward()
     output = self.output
     loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
     acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
     print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
           "accuracy= {:.4f}".format(acc_test.item()))
Ejemplo n.º 2
0
    def _train_with_val(self, labels, idx_train, idx_val, train_iters,
                        verbose):
        optimizer = optim.Adam(self.parameters(), lr=self.lr)

        best_loss_val = 100
        best_acc_val = 0

        for i in range(train_iters):
            self.train()
            optimizer.zero_grad()
            output = self.forward()
            loss_train = self._loss(output[idx_train], labels[idx_train])
            loss_train.backward()
            optimizer.step()
            if verbose and i % 10 == 0:
                print('Epoch {}, training loss: {}'.format(
                    i, loss_train.item()))

            self.eval()
            output = self.forward()
            loss_val = F.nll_loss(output[idx_val], labels[idx_val])
            acc_val = utils.accuracy(output[idx_val], labels[idx_val])

            if best_loss_val > loss_val:
                best_loss_val = loss_val
                self.output = output

            if acc_val > best_acc_val:
                best_acc_val = acc_val
                self.output = output

        print(
            '=== picking the best model according to the performance on validation ==='
        )
Ejemplo n.º 3
0
    def get_meta_grad(self, features, adj_norm, idx_train, idx_unlabeled, labels, labels_self_training):

        hidden = features
        for ix, w in enumerate(self.weights):
            b = self.biases[ix] if self.with_bias else 0
            if self.sparse_features:
                hidden = adj_norm @ torch.spmm(hidden, w) + b
            else:
                hidden = adj_norm @ hidden @ w + b
            if self.with_relu:
                hidden = F.relu(hidden)

        output = F.log_softmax(hidden, dim=1)

        loss_labeled = F.nll_loss(output[idx_train], labels[idx_train])
        loss_unlabeled = F.nll_loss(output[idx_unlabeled], labels_self_training[idx_unlabeled])
        loss_test_val = F.nll_loss(output[idx_unlabeled], labels[idx_unlabeled])

        if self.lambda_ == 1:
            attack_loss = loss_labeled
        elif self.lambda_ == 0:
            attack_loss = loss_unlabeled
        else:
            attack_loss = self.lambda_ * loss_labeled + (1 - self.lambda_) * loss_unlabeled

        print('GCN loss on unlabled data: {}'.format(loss_test_val.item()))
        print('GCN acc on unlabled data: {}'.format(utils.accuracy(output[idx_unlabeled], labels[idx_unlabeled]).item()))
        print('attack loss: {}'.format(attack_loss.item()))

        adj_grad, feature_grad = None, None
        if self.attack_structure:
            adj_grad = torch.autograd.grad(attack_loss, self.adj_changes, retain_graph=True)[0]
        if self.attack_features:
            feature_grad = torch.autograd.grad(attack_loss, self.features_changes, retain_graph=True)[0]
        return adj_grad, feature_grad
Ejemplo n.º 4
0
    def train_gcn(self, epoch, features, adj, labels, idx_train, idx_val):
        args = self.args
        estimator = self.estimator
        adj = estimator.normalize()

        t = time.time()
        self.model.train()
        self.optimizer.zero_grad()

        output = self.model(features, adj)
        loss_train = F.nll_loss(output[idx_train], labels[idx_train])
        acc_train = accuracy(output[idx_train], labels[idx_train])
        loss_train.backward()
        self.optimizer.step()

        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        self.model.eval()
        output = self.model(features, adj)

        loss_val = F.nll_loss(output[idx_val], labels[idx_val])
        acc_val = accuracy(output[idx_val], labels[idx_val])

        if acc_val > self.best_val_acc:
            self.best_val_acc = acc_val
            self.best_graph = adj.detach()
            self.weights = deepcopy(self.model.state_dict())
            if args.debug:
                print('\t=== saving current graph/gcn, best_val_acc: %s' %
                      self.best_val_acc.item())

        if loss_val < self.best_val_loss:
            self.best_val_loss = loss_val
            self.best_graph = adj.detach()
            self.weights = deepcopy(self.model.state_dict())
            if args.debug:
                print(f'\t=== saving current graph/gcn, best_val_loss: %s' %
                      self.best_val_loss.item())

        if args.debug:
            if epoch % 1 == 0:
                print('Epoch: {:04d}'.format(epoch + 1),
                      'loss_train: {:.4f}'.format(loss_train.item()),
                      'acc_train: {:.4f}'.format(acc_train.item()),
                      'loss_val: {:.4f}'.format(loss_val.item()),
                      'acc_val: {:.4f}'.format(acc_val.item()),
                      'time: {:.4f}s'.format(time.time() - t))
Ejemplo n.º 5
0
    def inner_train(self, features, modified_adj, idx_train, idx_unlabeled,
                    labels, labels_self_training):
        #adj_norm = utils.normalize_adj_tensor(modified_adj)
        adj_norms = self.binarize(modified_adj)
        for j in range(self.train_iters):
            # hidden = features
            # for w, b in zip(self.weights, self.biases):
            #     if self.sparse_features:
            #         hidden = adj_norm @ torch.spmm(hidden, w) + b
            #     else:
            #         hidden = adj_norm @ hidden @ w + b
            #     if self.with_relu:
            #         hidden = F.relu(hidden)

            hidden = features
            for ix, w in enumerate(self.weights):
                b = self.biases[ix] if self.with_bias else 0
                if self.sparse_features:
                    hidden = adj_norm @ torch.spmm(hidden, w) + b
                else:
                    hidden = adj_norm @ hidden @ w + b
                if self.with_relu:
                    hidden = F.relu(hidden)

            output = F.log_softmax(hidden, dim=1)
            loss_labeled = F.nll_loss(output[idx_train], labels[idx_train])
            loss_unlabeled = F.nll_loss(output[idx_unlabeled],
                                        labels_self_training[idx_unlabeled])

            if self.lambda_ == 1:
                attack_loss = loss_labeled
            elif self.lambda_ == 0:
                attack_loss = loss_unlabeled
            else:
                attack_loss = self.lambda_ * loss_labeled + (
                    1 - self.lambda_) * loss_unlabeled

            self.optimizer.zero_grad()
            loss_labeled.backward(retain_graph=True)

            if self.attack_structure:
                self.adj_changes.grad.zero_()
                self.adj_grad_sum += torch.autograd.grad(attack_loss,
                                                         self.adj_changes,
                                                         retain_graph=True)[0]
            if self.attack_features:
                self.feature_changes.grad.zero_()
                self.feature_grad_sum += torch.autograd.grad(
                    attack_loss, self.feature_changes, retain_graph=True)[0]

            self.optimizer.step()

        loss_test_val = F.nll_loss(output[idx_unlabeled],
                                   labels[idx_unlabeled])
        print('GCN loss on unlabled data: {}'.format(loss_test_val.item()))
        print('GCN acc on unlabled data: {}'.format(
            utils.accuracy(output[idx_unlabeled],
                           labels[idx_unlabeled]).item()))
Ejemplo n.º 6
0
 def test(self, idx_test):
     self.eval()
     output = self.predict()
     # output = self.output
     loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
     acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
     print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
           "accuracy= {:.4f}".format(acc_test.item()))
     return [loss_test.item(), acc_test.item()]
Ejemplo n.º 7
0
 def test(self, idx_test):
     self.eval()
     output = self.predict(
     )  # here use the self.features and self.adj_norm in training stage
     loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
     acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
     # print("Test set results:",
     #       "loss= {:.4f}".format(loss_test.item()),
     #       "accuracy= {:.4f}".format(acc_test.item()))
     return acc_test, output
Ejemplo n.º 8
0
    def _train_with_val(self, labels, idx_train, idx_val, train_iters, verbose):
        #if verbose:
        #    print('=== training gcn model ===')
        optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
        #optimizer = LRScheduler(filter(lambda x: x.requires_grad, self.parameters()))

        best_loss_val = 100
        best_acc_val = 0

        for i in range(train_iters):
            self.train()
            optimizer.zero_grad()
            output = self.forward(self.features, self.adj_norm)
            loss_train = F.nll_loss(output[idx_train], labels[idx_train])
            loss_train.backward()
            acc_train = utils.accuracy(output[idx_train], self.labels[idx_train])
            optimizer.step()
            #if verbose == 0:
            #print('Epoch {}, training loss: {}, training acc: {}'.format(i, loss_train.item(), acc_train.item()))

            self.eval()
            output  = self.forward(self.features, self.adj_norm)
            loss_val = F.nll_loss(output[idx_val], labels[idx_val])
            acc_val = utils.accuracy(output[idx_val], labels[idx_val])

            #if best_loss_val > loss_val:
            #    best_loss_val = loss_val
            #    self.output = output
            #    weights = deepcopy(self.state_dict())
            #_, embeddings = self.get_logits(self.features, self.adj_norm)
            #norms_ = []
            #for cl in range(self.nclass):
            #  norms_.append(torch.sum(torch.mean(embeddings[(self.labels == cl)], dim=0)**2).item())
            #norms_.append(acc_val.item())
            #print(norms_)
            if acc_val > best_acc_val:
                best_acc_val = acc_val
                self.output = output
                weights = deepcopy(self.state_dict())
                print('Epoch {}, val loss: {}, val acc: {}'.format(i, loss_val.item(), acc_val.item()))
        print('=== picking the best model according to the performance on validation === {}'.format(best_acc_val))
        self.load_state_dict(weights)
Ejemplo n.º 9
0
 def test(self, idx_test):
     """Evaluate the peformance on test set
     """
     self.eval()
     # output = self.forward()
     output = self.output
     loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
     acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
     print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
           "accuracy= {:.4f}".format(acc_test.item()))
     return acc_test.item()
Ejemplo n.º 10
0
 def test(self, idx_test, model_name=None):
     # self.model_name = model_name
     self.eval()
     output = self.predict()
     # output = self.output
     loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
     acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
     # print("Test set results:",
     #       "loss= {:.4f}".format(loss_test.item()),
     #       "accuracy= {:.4f}".format(acc_test.item()))
     return acc_test, output
Ejemplo n.º 11
0
 def test(self, features, labels, idx_test):
     print("\t=== testing ===")
     self.model.eval()
     adj = self.best_graph
     if self.best_graph is None:
         adj = self.estimator.normalize()
     output = self.model(features, adj)
     loss_test = F.nll_loss(output[idx_test], labels[idx_test])
     acc_test = accuracy(output[idx_test], labels[idx_test])
     print("\tTest set results:",
           "loss= {:.4f}".format(loss_test.item()),
           "accuracy= {:.4f}".format(acc_test.item()))
Ejemplo n.º 12
0
    def test(self, idx_test, filename):
        self.eval()
        output = self.predict()
        loss_test = F.nll_loss(output[idx_test],
                               self.labels[idx_test],
                               reduction='none')
        pred = torch.argmax(output, dim=1)[idx_test].data.cpu().numpy()
        loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
        acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
        print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
              "accuracy= {:.4f}".format(acc_test.item()))

        return acc_test
Ejemplo n.º 13
0
    def _train_with_val(self, labels, idx_train, idx_val, train_iters,
                        verbose):
        if verbose:
            print('=== training gcn model ===')
        optimizer = optim.Adam(self.parameters(),
                               lr=self.lr,
                               weight_decay=self.weight_decay)

        best_loss_val = 100
        best_acc_val = 0

        for i in range(train_iters):
            self.train()
            optimizer.zero_grad()
            output = self.forward(self.features, self.adj_norm)
            loss_train = F.nll_loss(output[idx_train], labels[idx_train])
            loss_train.backward()
            optimizer.step()

            # pred = output[self.idx_test].max(1)[1]

            acc_test = accuracy(output[self.idx_test], labels[self.idx_test])
            # acc_test = pred.eq(labels[self.idx_test]).sum().item() / self.idx_test.shape[0]

            self.eval()
            output = self.forward(self.features, self.adj_norm)
            loss_val = F.nll_loss(output[idx_val], labels[idx_val])
            acc_val = utils.accuracy(output[idx_val], labels[idx_val])

            if verbose and i % 200 == 0:
                print('Epoch {}, training loss: {}, test acc: {}'.format(
                    i, loss_train.item(), acc_test))

            if best_loss_val > loss_val:
                best_loss_val = loss_val
                self.output = output
                weights = deepcopy(self.state_dict())

            if acc_val > best_acc_val:
                best_acc_val = acc_val
                self.output = output
                weights = deepcopy(self.state_dict())

        if verbose:
            print(
                '=== picking the best model according to the performance on validation ==='
            )
        self.load_state_dict(weights)
Ejemplo n.º 14
0
    def test(self, idx_test):
        """Evaluate GCN performance on test set.

        Parameters
        ----------
        idx_test :
            node testing indices
        """
        self.eval()
        output = self.predict()
        # output = self.output
        loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
        acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
        print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
              "accuracy= {:.4f}".format(acc_test.item()))
        return acc_test
Ejemplo n.º 15
0
    def _train_with_val(self, labels, idx_train, idx_val, train_iters,
                        verbose):
        if verbose:
            print('=== training gcn model ===')
        optimizer = optim.Adam(self.parameters(),
                               lr=self.lr,
                               weight_decay=self.weight_decay)

        best_loss_val = 100
        best_acc_val = 0

        for i in range(train_iters):

            self.train()
            optimizer.zero_grad()
            output, embeddings = self.myforward(self.features, self.adj_norm)
            loss_train = F.nll_loss(output[idx_train], labels[idx_train])
            # acc_train = accuracy(output[idx_train], labels[idx_train])
            loss_ssl = self.lambda_ * self.regression_loss(embeddings)
            loss_total = loss_train + loss_ssl
            loss_total.backward()
            optimizer.step()

            if verbose and i % 10 == 0:
                print('Epoch {}, training loss: {}'.format(
                    i, loss_train.item()))

            self.eval()
            output = self.forward(self.features, self.adj_norm)
            loss_val = F.nll_loss(output[idx_val], labels[idx_val])
            acc_val = utils.accuracy(output[idx_val], labels[idx_val])

            if best_loss_val > loss_val:
                best_loss_val = loss_val
                self.output = output
                weights = deepcopy(self.state_dict())

            if acc_val > best_acc_val:
                best_acc_val = acc_val
                self.output = output
                weights = deepcopy(self.state_dict())

        if verbose:
            print(
                '=== picking the best model according to the performance on validation ==='
            )
        self.load_state_dict(weights)
Ejemplo n.º 16
0
    def _train_with_val(self, train_iters, patience, verbose):
        if verbose:
            print('=== training GAT model ===')
        optimizer = optim.Adam(self.parameters(),
                               lr=self.lr,
                               weight_decay=self.weight_decay)

        labels = self.data.y
        train_mask, val_mask = self.data.train_mask, self.data.val_mask

        best_loss_val = 100
        best_acc_val = 0

        for i in range(train_iters):
            self.train()
            optimizer.zero_grad()
            output = self.forward(self.data)

            loss_train = F.nll_loss(output[train_mask], labels[train_mask])
            loss_train.backward()
            optimizer.step()

            if verbose and i % 10 == 0:
                print('Epoch {}, training loss: {}'.format(
                    i, loss_train.item()))

            self.eval()
            output = self.forward(self.data)
            loss_val = F.nll_loss(output[val_mask], labels[val_mask])
            acc_val = utils.accuracy(output[val_mask], labels[val_mask])

            if best_loss_val > loss_val:
                best_loss_val = loss_val
                self.output = output
                weights = deepcopy(self.state_dict())

            if acc_val > best_acc_val:
                best_acc_val = acc_val
                self.output = output
                weights = deepcopy(self.state_dict())

        if verbose:
            print(
                '=== picking the best model according to the performance on validation ==='
            )
        self.load_state_dict(weights)
Ejemplo n.º 17
0
    def test(self):
        """Evaluate GAT performance on test set.

        Parameters
        ----------
        idx_test :
            node testing indices
        """
        self.eval()
        test_mask = self.data.test_mask
        labels = self.data.y
        output = self.forward(self.data)
        # output = self.output
        loss_test = F.nll_loss(output[test_mask], labels[test_mask])
        acc_test = utils.accuracy(output[test_mask], labels[test_mask])
        print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
              "accuracy= {:.4f}".format(acc_test.item()))
        return acc_test.item()
Ejemplo n.º 18
0
    def test(self, pyg_data=None):
        """Evaluate MedianGCN performance on test set.

        Parameters
        ----------
        pyg_data :
            pytorch geometric dataset object        
        idx_test :
            node testing indices
        """
        self.eval()
        data = pyg_data[0].to(self.device) if pyg_data is not None else self.data
        test_mask = data.test_mask
        labels = data.y
        output = self.forward(data)
        # output = self.output
        loss_test = F.nll_loss(output[test_mask], labels[test_mask])
        acc_test = utils.accuracy(output[test_mask], labels[test_mask])
        print("Test set results:",
              "loss= {:.4f}".format(loss_test.item()),
              "accuracy= {:.4f}".format(acc_test.item()))
        return acc_test.item()
Ejemplo n.º 19
0
def test_acc(adj, features, target_node):
    ''' test on GCN '''
    gcn = GCN(nfeat=features.shape[1],
              nhid=16,
              nclass=labels.max().item() + 1,
              dropout=0.5,
              device=device)

    gcn = gcn.to(device)

    gcn.fit(features, adj, labels, idx_train, idx_val, patience=30)

    gcn.eval()
    output = gcn.predict()
    # probs = t.exp(output[[target_node]])[0]
    # print('Target node probs: {}'.format(probs.detach().cpu().numpy()))
    acc_test = accuracy(output[idx_test], labels[idx_test])

    print("\nOverall test set results:",
          "accuracy= {:.4f}".format(acc_test.item()))

    return acc_test.item()
Ejemplo n.º 20
0
# check output
adj_per_nor_t = t.from_numpy(adj_per).float().cuda()

hidden = t.from_numpy(base_feat).float().cuda()
hidden = adj_per_nor_t @ hidden @ model.gc1.weight
if model.with_bias:
    hidden = hidden + model.gc1.bias
if model.with_relu:
    hidden = F.relu(hidden)
hidden = adj_per_nor_t @ hidden @ model.gc2.weight
if model.with_bias:
    hidden = hidden + model.gc2.bias

output = F.log_softmax(hidden, dim=1)
acc_after_attack = utils.accuracy(output[idx_test], labels[idx_test])

# Here the random seed is to split the train/val/test data,
# we need to set the random seed to be the same as that when you generate the perturbed graph
# data = Dataset(root='/tmp/', name=args.dataset, setting='nettack', seed=15)
# Or we can just use setting='prognn' to get the splits
# data = Dataset(root='./tmp/', name=args.dataset, setting='prognn')
# adj, features, labels_1 = data.adj, data.features, data.labels
# idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test

# load pre-attacked graph
# perturbed_data = PrePtbDataset(root='./tmp/',
#         name=args.dataset,
#         attack_method='meta',
#         ptb_rate=args.ptb_rate)
Ejemplo n.º 21
0
    def get_meta_grad(self,
                      features,
                      adj_norm,
                      ori_e,
                      ori_v,
                      idx_train,
                      idx_unlabeled,
                      labels,
                      labels_self_training,
                      verbose=False):

        hidden = features
        for ix, w in enumerate(self.weights):
            b = self.biases[ix] if self.with_bias else 0
            if self.sparse_features:
                hidden = adj_norm @ torch.spmm(hidden, w) + b
            else:
                hidden = adj_norm @ hidden @ w + b
            if self.with_relu and ix != len(self.weights) - 1:
                hidden = F.relu(hidden)

        output = F.log_softmax(hidden, dim=1)

        loss_labeled = F.nll_loss(output[idx_train], labels[idx_train])
        loss_unlabeled = F.nll_loss(output[idx_unlabeled],
                                    labels_self_training[idx_unlabeled])
        loss_test_val = F.nll_loss(output[idx_unlabeled],
                                   labels[idx_unlabeled])

        if self.lambda_ == 1:
            attack_loss = loss_labeled
        elif self.lambda_ == 0:
            attack_loss = loss_unlabeled
        else:
            attack_loss = self.lambda_ * loss_labeled + (
                1 - self.lambda_) * loss_unlabeled

        eigen_norm = torch.norm(ori_e)
        eigen_mse = 0
        # New: add regularization term for spectral distance
        if self.regularization_weight != 0:
            e, v = torch.symeig(adj_norm, eigenvectors=True)
            eigen_mse = F.mse_loss(ori_e, e, reduction='sum')
        reg_loss = eigen_mse / eigen_norm * self.regularization_weight

        if verbose:
            print('classification loss = {:.4f} | '.format(attack_loss.item()),
                  'reg loss = {:.8f} | '.format(reg_loss),
                  'eigen_mse = {:.8f} | '.format(eigen_mse),
                  'eigen_norm = {:.4f}'.format(eigen_norm))

            loss_test, acc_test = calc_acc(output, labels, idx_unlabeled)
            loss_train, acc_train = calc_acc(output, labels, idx_train)

            print(
                "-- Before final discretize: train loss = {:.4f} | ".format(
                    loss_train), "train acc = {:.4f} | ".format(acc_train),
                "unlabeled loss = {:.4f} | ".format(loss_test),
                "unlabeled acc = {:.4f} | ".format(acc_test))

            print('-- GCN loss on unlabled data: {:.4f}'.format(
                loss_test_val.item()))
            print('-- GCN acc on unlabled data: {:.4f}'.format(
                utils.accuracy(output[idx_unlabeled],
                               labels[idx_unlabeled]).item()))

        attack_loss += reg_loss

        self.loss = attack_loss

        adj_grad, feature_grad = None, None
        if self.attack_structure:
            adj_grad = torch.autograd.grad(attack_loss,
                                           self.adj_changes,
                                           retain_graph=True)[0]
        if self.attack_features:
            feature_grad = torch.autograd.grad(attack_loss,
                                               self.feature_changes,
                                               retain_graph=True)[0]

        return adj_grad, feature_grad
Ejemplo n.º 22
0
    def train_adj(self, epoch, features, adj, labels, idx_train, idx_val):
        estimator = self.estimator
        args = self.args
        if args.debug:
            print("\n=== train_adj ===")
        t = time.time()
        estimator.train()
        self.optimizer_adj.zero_grad()

        loss_l1 = torch.norm(estimator.estimated_adj, 1)
        loss_fro = torch.norm(estimator.estimated_adj - adj, p='fro')
        normalized_adj = estimator.normalize()

        if args.lambda_:
            loss_smooth_feat = self.feature_smoothing(estimator.estimated_adj,
                                                      features)
        else:
            loss_smooth_feat = 0 * loss_l1

        output = self.model(features, normalized_adj)
        loss_gcn = F.nll_loss(output[idx_train], labels[idx_train])
        acc_train = accuracy(output[idx_train], labels[idx_train])

        loss_symmetric = torch.norm(estimator.estimated_adj \
                        - estimator.estimated_adj.t(), p="fro")

        loss_diffiential = loss_fro + args.gamma * loss_gcn + args.lambda_ * loss_smooth_feat + args.phi * loss_symmetric

        loss_diffiential.backward()

        self.optimizer_adj.step()
        loss_nuclear = 0 * loss_fro
        if args.beta != 0:
            self.optimizer_nuclear.zero_grad()
            self.optimizer_nuclear.step()
            loss_nuclear = prox_operators.nuclear_norm

        self.optimizer_l1.zero_grad()
        self.optimizer_l1.step()

        total_loss = loss_fro \
                    + args.gamma * loss_gcn \
                    + args.alpha * loss_l1 \
                    + args.beta * loss_nuclear \
                    + args.phi * loss_symmetric

        estimator.estimated_adj.data.copy_(
            torch.clamp(estimator.estimated_adj.data, min=0, max=1))

        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        self.model.eval()
        normalized_adj = estimator.normalize()
        output = self.model(features, normalized_adj)

        loss_val = F.nll_loss(output[idx_val], labels[idx_val])
        acc_val = accuracy(output[idx_val], labels[idx_val])
        print('Epoch: {:04d}'.format(epoch + 1),
              'acc_train: {:.4f}'.format(acc_train.item()),
              'loss_val: {:.4f}'.format(loss_val.item()),
              'acc_val: {:.4f}'.format(acc_val.item()),
              'time: {:.4f}s'.format(time.time() - t))

        if acc_val > self.best_val_acc:
            self.best_val_acc = acc_val
            self.best_graph = normalized_adj.detach()
            self.weights = deepcopy(self.model.state_dict())
            if args.debug:
                print(f'\t=== saving current graph/gcn, best_val_acc: %s' %
                      self.best_val_acc.item())

        if loss_val < self.best_val_loss:
            self.best_val_loss = loss_val
            self.best_graph = normalized_adj.detach()
            self.weights = deepcopy(self.model.state_dict())
            if args.debug:
                print(f'\t=== saving current graph/gcn, best_val_loss: %s' %
                      self.best_val_loss.item())

        if args.debug:
            if epoch % 1 == 0:
                print(
                    'Epoch: {:04d}'.format(epoch + 1),
                    'loss_fro: {:.4f}'.format(loss_fro.item()),
                    'loss_gcn: {:.4f}'.format(loss_gcn.item()),
                    'loss_feat: {:.4f}'.format(loss_smooth_feat.item()),
                    'loss_symmetric: {:.4f}'.format(loss_symmetric.item()),
                    'delta_l1_norm: {:.4f}'.format(
                        torch.norm(estimator.estimated_adj - adj, 1).item()),
                    'loss_l1: {:.4f}'.format(loss_l1.item()),
                    'loss_total: {:.4f}'.format(total_loss.item()),
                    'loss_nuclear: {:.4f}'.format(loss_nuclear.item()))
Ejemplo n.º 23
0
    def inner_train(
        self,
        features,
        modified_adj,
        ori_e,
        ori_v,
        idx_train,
        idx_unlabeled,
        labels,
        labels_self_training,
        verbose=False,
    ):
        adj_norm = utils.normalize_adj_tensor(modified_adj, device=self.device)
        eigen_norm = torch.norm(ori_e)
        eigen_mse = 0
        # New: add regularization term for spectral distance
        if self.regularization_weight != 0:
            e, v = torch.symeig(adj_norm, eigenvectors=True)
            eigen_mse = F.mse_loss(ori_e, e, reduction='sum')
        reg_loss = eigen_mse / eigen_norm * self.regularization_weight

        for j in range(self.train_iters):
            # hidden = features
            # for w, b in zip(self.weights, self.biases):
            #     if self.sparse_features:
            #         hidden = adj_norm @ torch.spmm(hidden, w) + b
            #     else:
            #         hidden = adj_norm @ hidden @ w + b
            #     if self.with_relu:
            #         hidden = F.relu(hidden)

            hidden = features
            for ix, w in enumerate(self.weights):
                b = self.biases[ix] if self.with_bias else 0
                if self.sparse_features:
                    hidden = adj_norm @ torch.spmm(hidden, w) + b
                else:
                    hidden = adj_norm @ hidden @ w + b
                if self.with_relu:
                    hidden = F.relu(hidden)

            output = F.log_softmax(hidden, dim=1)
            loss_labeled = F.nll_loss(output[idx_train], labels[idx_train])
            loss_unlabeled = F.nll_loss(output[idx_unlabeled],
                                        labels_self_training[idx_unlabeled])

            if self.lambda_ == 1:
                attack_loss = loss_labeled
            elif self.lambda_ == 0:
                attack_loss = loss_unlabeled
            else:
                attack_loss = self.lambda_ * loss_labeled + (
                    1 - self.lambda_) * loss_unlabeled

            loss_test_val = F.nll_loss(output[idx_unlabeled],
                                       labels[idx_unlabeled])

            attack_loss += reg_loss
            self.loss = attack_loss

            self.optimizer.zero_grad()
            loss_labeled.backward(retain_graph=True)

            if self.attack_structure:
                self.adj_changes.grad.zero_()
                self.adj_grad_sum += torch.autograd.grad(attack_loss,
                                                         self.adj_changes,
                                                         retain_graph=True)[0]
            if self.attack_features:
                self.feature_changes.grad.zero_()
                self.feature_grad_sum += torch.autograd.grad(
                    attack_loss, self.feature_changes, retain_graph=True)[0]

            self.optimizer.step()

        if verbose:
            print(
                'classification loss = {:.4f} | '.format(attack_loss.item() -
                                                         reg_loss),
                'reg loss = {:.8f} | '.format(reg_loss),
                'eigen_mse = {:.8f} | '.format(eigen_mse),
                'eigen_norm = {:.4f}'.format(eigen_norm))

            loss_test, acc_test = calc_acc(output, labels, idx_unlabeled)
            loss_train, acc_train = calc_acc(output, labels, idx_train)

            print(
                "-- Before final discretize: train loss = {:.4f} | ".format(
                    loss_train), "train acc = {:.4f} | ".format(acc_train),
                "unlabeled loss = {:.4f} | ".format(loss_test),
                "unlabeled acc = {:.4f} | ".format(acc_test))

            print('-- GCN loss on unlabled data: {:.4f}'.format(
                loss_test_val.item()))
            print('-- GCN acc on unlabled data: {:.4f}'.format(
                utils.accuracy(output[idx_unlabeled],
                               labels[idx_unlabeled]).item()))
Ejemplo n.º 24
0
# check output
adj_per_nor_t = t.from_numpy(adj_per).float().cuda()

hidden = t.from_numpy(base_feat).float().cuda()
hidden = adj_per_nor_t @ hidden @ model.gc1.weight
if model.with_bias:
    hidden = hidden + model.gc1.bias
if model.with_relu:
    hidden = F.relu(hidden)
hidden = adj_per_nor_t @ hidden @ model.gc2.weight
if model.with_bias:
    hidden = hidden + model.gc2.bias

output = F.log_softmax(hidden, dim=1)
acc_after_attack = utils.accuracy(output[idx_test], labels[idx_test])

# do the low_pass
sim_adj_h0 = simGraph_init(base_feat,
                           p_neighbor_num=neighbor_num,
                           p_layer_id=0)

modified_adj_h0 = low_pass_adj_sym(adj_per,
                                   sim_adj_h0,
                                   base_feat,
                                   p_filter_value=1)

modified_adj_h0_t = t.from_numpy(modified_adj_h0).float().cuda()

hidden_low_pass_0 = t.from_numpy(base_feat).float().cuda()
hidden_low_pass_h0 = modified_adj_h0_t @ hidden_low_pass_0 @ model.gc1.weight