Example #1
0
    def meta_grad(self):

        modified_adj, modified_x = self.tf_adj, self.tf_x
        persistent = self.structure_attack and self.feature_attack

        with tf.GradientTape(persistent=persistent) as tape:
            if self.structure_attack:
                modified_adj = self.get_perturbed_adj(self.tf_adj,
                                                      self.adj_changes)

            if self.feature_attack:
                modified_x = self.get_perturbed_x(self.tf_x, self.x_changes)

            adj_norm = normalize_adj_tensor(modified_adj)
            output = self.forward(modified_x, adj_norm) / 5.0
            logit_labeled = tf.gather(output, self.idx_train)
            logit_unlabeled = tf.gather(output, self.idx_unlabeled)

            loss_labeled = self.loss_fn(self.labels_train, logit_labeled)
            loss_unlabeled = self.loss_fn(self.self_training_labels,
                                          logit_unlabeled)

            attack_loss = self.lambda_ * loss_labeled + (
                1 - self.lambda_) * loss_unlabeled

        adj_grad, x_grad = None, None

        if self.structure_attack:
            adj_grad = tape.gradient(attack_loss, self.adj_changes)

        if self.feature_attack:
            x_grad = tape.gradient(attack_loss, self.x_changes)

        return adj_grad, x_grad
Example #2
0
    def random_sample(self, sample_epochs=20, disable=False):
        best_loss = -np.inf
        best_s = None
        s = tf.linalg.band_part(self.adj_changes, 0, -1) - tf.linalg.band_part(
            self.adj_changes, 0, 0)
        for _ in tqdm(range(sample_epochs),
                      desc='Random Sampling',
                      disable=disable):
            random_matrix = tf.random.uniform(shape=(self.n_nodes,
                                                     self.n_nodes),
                                              minval=0.,
                                              maxval=1.)
            sampled = tf.where(s > random_matrix, 1., 0.)
            if tf.reduce_sum(sampled) > self.n_perturbations:
                continue

            with tf.device(self.device):
                self.adj_changes.assign(sampled)
                adj = self.get_perturbed_adj()
                adj_norm = normalize_adj_tensor(adj)
                logit = self.surrogate([self.tf_x, adj_norm, self.idx_attack])
                logit = softmax(logit)
                loss = self.compute_loss(logit)

            if best_loss < loss:
                best_loss = loss
                best_s = sampled

        return best_s.numpy()
Example #3
0
    def update_surrogate(self, trainable_variables, idx):
        with tf.GradientTape() as tape:
            adj = self.get_perturbed_adj()
            adj_norm = normalize_adj_tensor(adj)
            logit = self.surrogate([self.tf_x, adj_norm, idx])
            logit = softmax(logit)
            loss = self.compute_loss(logit)

        gradients = tape.gradient(loss, trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, trainable_variables))
Example #4
0
    def compute_structure_gradients(self, adj, x, target_index, target_label):

        with tf.GradientTape() as tape:
            tape.watch(adj)
            adj_norm = normalize_adj_tensor(adj)
            logit = self.surrogate([x, adj_norm, target_index])
            loss = self.loss_fn(target_label, logit)

        gradients = tape.gradient(loss, adj)
        return gradients
Example #5
0
    def compute_gradients(self, idx):
        with tf.GradientTape() as tape:
            tape.watch(self.adj_changes)
            adj = self.get_perturbed_adj()
            adj_norm = normalize_adj_tensor(adj)
            logit = self.surrogate([self.tf_x, adj_norm, idx])
            logit = softmax(logit)
            loss = self.compute_loss(logit)

        gradients = tape.gradient(loss, self.adj_changes)
        return gradients
Example #6
0
    def compute_gradients(self, modified_adj, adj_changes, target_index,
                          target_label):

        with tf.GradientTape() as tape:
            tape.watch(adj_changes)
            adj = modified_adj + adj_changes
            adj_norm = normalize_adj_tensor(adj)
            logit = self.surrogate([self.tf_x, adj_norm, target_index])
            loss = self.loss_fn(target_label, logit, from_logits=True)

        gradients = tape.gradient(loss, adj_changes)
        return gradients
Example #7
0
    def inner_train(self, adj, x):

        self.initialize()
        adj_norm = normalize_adj_tensor(adj)

        for _ in range(self.epochs):
            weight_grads = self.train_step(x, adj_norm, self.idx_train,
                                           self.labels_train)

            for v, g in zip(self.velocities, weight_grads):
                v.assign(self.momentum * v + g)

            for w, v in zip(self.weights, self.velocities):
                w.assign_sub(self.lr * v)
Example #8
0
    def meta_grad(self):
        self.initialize()

        modified_adj, modified_x = self.tf_adj, self.tf_x
        adj_grad_sum, x_grad_sum = self.adj_grad_sum, self.x_grad_sum
        optimizer = self.optimizer

        for _ in tf.range(self.epochs):

            with tf.GradientTape(persistent=True) as tape:
                if self.structure_attack:
                    modified_adj = self.get_perturbed_adj(
                        self.tf_adj, self.adj_changes)

                if self.feature_attack:
                    modified_x = self.get_perturbed_x(self.tf_x,
                                                      self.x_changes)

                adj_norm = normalize_adj_tensor(modified_adj)
                output = self.forward(modified_x, adj_norm) / 5.0
                logit_labeled = tf.gather(output, self.idx_train)
                logit_unlabeled = tf.gather(output, self.idx_unlabeled)

                loss_labeled = self.loss_fn(self.labels_train, logit_labeled)
                loss_unlabeled = self.loss_fn(self.self_training_labels,
                                              logit_unlabeled)

                attack_loss = self.lambda_ * loss_labeled + (
                    1 - self.lambda_) * loss_unlabeled

            adj_grad, x_grad = None, None

            gradients = tape.gradient(loss_labeled, self.weights)
            optimizer.apply_gradients(zip(gradients, self.weights))

            if self.structure_attack:
                adj_grad = tape.gradient(attack_loss, self.adj_changes)
                adj_grad_sum.assign_add(adj_grad)

            if self.feature_attack:
                x_grad = tape.gradient(attack_loss, self.x_changes)
                x_grad_sum.assign_add(x_grad)

            del tape

        return adj_grad_sum, x_grad_sum
    def compute_gradients(self, modified_adj, modified_x, target_index,
                          target_label):

        with tf.GradientTape(persistent=True) as tape:
            adj_norm = normalize_adj_tensor(modified_adj)
            logit = self.surrogate([modified_x, adj_norm, target_index])
            loss = self.loss_fn(target_label, logit, from_logits=True)

        adj_grad, x_grad = None, None

        if self.structure_attack:
            adj_grad = tape.gradient(loss, modified_adj)

        if self.feature_attack:
            x_grad = tape.gradient(loss, modified_x)

        return adj_grad, x_grad
Example #10
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train=None,
                 seed=None,
                 name=None,
                 device='CPU:0',
                 surrogate=None,
                 surrogate_args={},
                 **kwargs):
        super().__init__(adj,
                         x=x,
                         labels=labels,
                         seed=seed,
                         name=name,
                         device=device,
                         **kwargs)

        if surrogate is None:
            surrogate = train_a_surrogate(self, 'DenseGCN', idx_train,
                                          **surrogate_args)
        elif not isinstance(surrogate, DenseGCN):
            raise RuntimeError(
                "surrogate model should be the instance of `graphgallery.nn.DenseGCN`."
            )

        adj, x = self.adj, self.x
        self.nodes_set = set(range(self.n_nodes))
        self.features_set = np.arange(self.n_attrs)

        # IG can also conduct feature attack
        self.allow_feature_attack = True

        with tf.device(self.device):
            self.surrogate = surrogate
            self.loss_fn = SparseCategoricalCrossentropy(from_logits=True)
            self.tf_x = astensor(x)
            self.tf_adj = astensor(adj.A)
            self.adj_norm = normalize_adj_tensor(self.tf_adj)