def construct_mask(self):
        adj_mask = np.ones(self.adj.shape, dtype=self.floatx)
        x_mask = np.ones(self.x.shape, dtype=self.floatx)
        adj_mask[:, self.target] = 0.
        adj_mask[self.target, :] = 0.
        x_mask[self.target, :] = 0

        adj_mask = astensor(adj_mask)
        x_mask = astensor(x_mask)

        return adj_mask, x_mask
Exemple #2
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train=None,
                 idx_val=None,
                 seed=None,
                 name=None,
                 device='CPU:0',
                 surrogate=None,
                 surrogate_args={},
                 **kwargs):
        super().__init__(adj,
                         x=x,
                         labels=labels,
                         seed=seed,
                         name=name,
                         device=device,
                         **kwargs)

        if surrogate is None:
            surrogate = train_a_surrogate(self, 'GCN', idx_train, idx_val,
                                          **kwargs)

        elif not isinstance(surrogate, SemiSupervisedModel):
            raise RuntimeError(
                "surrogate model should be instance of `graphgallery.nn.SemiSupervisedModel`."
            )

        with tf.device(self.device):
            self.surrogate = surrogate
            self.loss_fn = sparse_categorical_crossentropy
            self.tf_x = astensor(self.x)
Exemple #3
0
    def construct_sub_adj(self, influence_nodes, wrong_label_nodes, sub_nodes,
                          sub_edges):
        length = len(wrong_label_nodes)
        potential_edges = np.vstack([
            np.stack([np.tile(infl, length), wrong_label_nodes], axis=1)
            for infl in influence_nodes
        ])

        if len(influence_nodes) > 1:
            # TODO: considering self-loops
            mask = self.adj[tuple(potential_edges.T)].A1 == 0
            potential_edges = potential_edges[mask]

        nodes = np.union1d(sub_nodes, wrong_label_nodes)
        edge_weights = np.ones(sub_edges.shape[0], dtype=self.floatx)
        non_edge_weights = np.zeros(potential_edges.shape[0],
                                    dtype=self.floatx)
        self_loop_weights = np.ones(nodes.shape[0], dtype=self.floatx)
        self_loop = np.stack([nodes, nodes], axis=1)

        self.indices = np.vstack([
            sub_edges, sub_edges[:, [1, 0]], potential_edges,
            potential_edges[:, [1, 0]], self_loop
        ])
        self.edge_weights = tf.Variable(edge_weights, dtype=self.floatx)
        self.non_edge_weights = tf.Variable(non_edge_weights,
                                            dtype=self.floatx)
        self.self_loop_weights = astensor(self_loop_weights, dtype=self.floatx)
        self.edge_index = sub_edges
        self.non_edge_index = potential_edges
        self.self_loop = self_loop
Exemple #4
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train,
                 surrogate=None,
                 surrogate_args={},
                 seed=None,
                 name=None,
                 device='CPU:0',
                 **kwargs):

        super().__init__(adj=adj,
                         x=x,
                         labels=labels,
                         seed=seed,
                         name=name,
                         device=device,
                         **kwargs)
        adj, x = self.adj, self.x

        if surrogate is None:
            surrogate = train_a_surrogate(self, 'DenseGCN', idx_train,
                                          **surrogate_args)
        elif not isinstance(surrogate, DenseGCN):
            raise RuntimeError(
                "surrogate model should be the instance of `graphgallery.nn.DenseGCN`."
            )

        idx_attack = asintarr(idx_train)

        with tf.device(self.device):
            self.idx_attack = astensor(idx_attack)
            self.labels_attack = astensor(labels[idx_attack])
            self.tf_adj = astensor(self.adj.A)
            self.tf_x = astensor(x)
            self.complementary = tf.ones_like(self.tf_adj) - tf.eye(
                self.n_nodes) - 2. * self.tf_adj
            self.loss_fn = sparse_categorical_crossentropy
            self.adj_changes = tf.Variable(
                tf.zeros(adj.shape, dtype=self.floatx))
            self.surrogate = surrogate

            # used for CW_loss=True
            self.label_matrix = tf.gather(tf.eye(self.n_classes),
                                          self.labels_attack)
Exemple #5
0
    def get_feature_importance(self, candidates, steps, disable=False):
        adj = self.adj_norm
        x = self.tf_x
        target_index = astensor([self.target])
        target_label = astensor(self.target_label)
        baseline_add = x.numpy()
        baseline_add[candidates[:, 0], candidates[:, 1]] = 1.0
        baseline_add = astensor(baseline_add)
        baseline_remove = x.numpy()
        baseline_remove[candidates[:, 0], candidates[:, 1]] = 0.0
        baseline_remove = astensor(baseline_remove)
        feature_indicator = self.x[candidates[:, 0], candidates[:, 1]] > 0

        features = candidates[feature_indicator]
        non_attrs = candidates[~feature_indicator]

        feature_gradients = tf.zeros(features.shape[0])
        non_feature_gradients = tf.zeros(non_attrs.shape[0])

        for alpha in tqdm(tf.linspace(0., 1.0, steps + 1),
                          desc='Computing feature importance',
                          disable=disable):
            ###### Compute integrated gradients for removing features ######
            x_diff = x - baseline_remove
            x_step = baseline_remove + alpha * x_diff

            gradients = self.compute_feature_gradients(adj, x_step,
                                                       target_index,
                                                       target_label)
            feature_gradients += -tf.gather_nd(gradients, features)

            ###### Compute integrated gradients for adding features ######
            x_diff = baseline_add - x
            x_step = baseline_add - alpha * x_diff

            gradients = self.compute_feature_gradients(adj, x_step,
                                                       target_index,
                                                       target_label)
            non_feature_gradients += tf.gather_nd(gradients, non_attrs)

        integrated_grads = np.zeros(feature_indicator.size)
        integrated_grads[feature_indicator] = feature_gradients.numpy()
        integrated_grads[~feature_indicator] = non_feature_gradients.numpy()

        return integrated_grads
Exemple #6
0
    def get_link_importance(self, candidates, steps, disable=False):

        adj = self.tf_adj
        x = self.tf_x
        target_index = astensor([self.target])
        target_label = astensor(self.target_label)
        baseline_add = adj.numpy()
        baseline_add[candidates[:, 0], candidates[:, 1]] = 1.0
        baseline_add = astensor(baseline_add)
        baseline_remove = adj.numpy()
        baseline_remove[candidates[:, 0], candidates[:, 1]] = 0.0
        baseline_remove = astensor(baseline_remove)
        edge_indicator = self.adj[candidates[:, 0], candidates[:, 1]].A1 > 0

        edges = candidates[edge_indicator]
        non_edges = candidates[~edge_indicator]

        edge_gradients = tf.zeros(edges.shape[0])
        non_edge_gradients = tf.zeros(non_edges.shape[0])

        for alpha in tqdm(tf.linspace(0., 1.0, steps + 1),
                          desc='Computing link importance',
                          disable=disable):
            ###### Compute integrated gradients for removing edges ######
            adj_diff = adj - baseline_remove
            adj_step = baseline_remove + alpha * adj_diff

            gradients = self.compute_structure_gradients(
                adj_step, x, target_index, target_label)
            edge_gradients += -tf.gather_nd(gradients, edges)

            ###### Compute integrated gradients for adding edges ######
            adj_diff = baseline_add - adj
            adj_step = baseline_add - alpha * adj_diff

            gradients = self.compute_structure_gradients(
                adj_step, x, target_index, target_label)
            non_edge_gradients += tf.gather_nd(gradients, non_edges)

        integrated_grads = np.zeros(edge_indicator.size)
        integrated_grads[edge_indicator] = edge_gradients.numpy()
        integrated_grads[~edge_indicator] = non_edge_gradients.numpy()

        return integrated_grads
Exemple #7
0
    def attack(self,
               target,
               n_perturbations=None,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(target, n_perturbations, direct_attack,
                       structure_attack, feature_attack)

        if direct_attack and self.n_perturbations == self.degree[target]:
            warnings.warn(
                'GradArgmax only work for removing edges, thus it will make the target node become a singleton for direct attack '
                'and `n_perturbations` equals the degree of target.`n_perturbations` is automatically set to `degree-1`.',
                RuntimeWarning)
            self.n_perturbations -= 1

        target_index = astensor([target])
        target_label = astensor(self.target_label)

        surrogate = self.surrogate

        for _ in tqdm(range(self.n_perturbations),
                      desc='Peturbing Graph',
                      disable=disable):
            adj = astensor(normalize_adj(self.modified_adj))
            indices = adj.indices.numpy()
            gradients = self.compute_gradients(adj, target_index,
                                               target_label).numpy()
            gradients = np.minimum(gradients, 0.)
            sorted_index = np.argsort(gradients)
            for index in sorted_index:
                u, v = indices[index]
                if not self.allow_singleton and (self.modified_degree[u] <= 1
                                                 or
                                                 self.modified_degree[v] <= 1):
                    continue
                if not self.is_modified_edge(u, v):
                    self.structure_flips[(u, v)] = index
                    self.update_graph(u, v)
                    break
Exemple #8
0
    def attack(self,
               target,
               n_perturbations=None,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(target, n_perturbations, direct_attack,
                       structure_attack, feature_attack)

        if not direct_attack:
            raise NotImplementedError(
                f'{self.name} does NOT support indirect attack now.')

        target_index = astensor([target])
        target_label = astensor(self.target_label)

        for _ in tqdm(range(self.n_perturbations),
                      desc='Peturbing Graph',
                      disable=disable):
            with tf.device(self.device):
                gradients = self.compute_gradients(self.modified_adj,
                                                   self.adj_changes,
                                                   target_index, target_label)

            modified_row = tf.gather(self.modified_adj, target_index)
            gradients = (gradients * (-2 * modified_row + 1)).numpy().ravel()

            sorted_index = np.argsort(-gradients)
            for index in sorted_index:
                u = target
                v = index % self.n_nodes
                has_edge = self.adj[u, v]
                if has_edge and not self.allow_singleton and (
                        self.modified_degree[u] <= 1
                        or self.modified_degree[v] <= 1):
                    continue
                if not self.is_modified_edge(u, v):
                    self.structure_flips[(u, v)] = index
                    self.update_graph(u, v, has_edge)
                    break
Exemple #9
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train=None,
                 seed=None,
                 name=None,
                 device='CPU:0',
                 surrogate=None,
                 surrogate_args={},
                 **kwargs):
        super().__init__(adj,
                         x=x,
                         labels=labels,
                         seed=seed,
                         name=name,
                         device=device,
                         **kwargs)

        if surrogate is None:
            surrogate = train_a_surrogate(self, 'DenseGCN', idx_train,
                                          **surrogate_args)
        elif not isinstance(surrogate, DenseGCN):
            raise RuntimeError(
                "surrogate model should be the instance of `graphgallery.nn.DenseGCN`."
            )

        adj, x = self.adj, self.x
        self.nodes_set = set(range(self.n_nodes))
        self.features_set = np.arange(self.n_attrs)

        # IG can also conduct feature attack
        self.allow_feature_attack = True

        with tf.device(self.device):
            self.surrogate = surrogate
            self.loss_fn = SparseCategoricalCrossentropy(from_logits=True)
            self.tf_x = astensor(x)
            self.tf_adj = astensor(adj.A)
            self.adj_norm = normalize_adj_tensor(self.tf_adj)
Exemple #10
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train,
                 idx_unlabeled,
                 hidden_layers,
                 use_relu,
                 self_training_labels=None,
                 seed=None,
                 name=None,
                 device='CPU:0',
                 **kwargs):

        super().__init__(adj=adj,
                         x=x,
                         labels=labels,
                         seed=seed,
                         name=name,
                         device=device,
                         **kwargs)
        adj, x, labels = self.adj, self.x, self.labels

        idx_train = asintarr(idx_train)
        idx_unlabeled = asintarr(idx_unlabeled)

        if self_training_labels is None:
            surrogate = DenseGCN(adj,
                                 x,
                                 labels,
                                 device='GPU',
                                 norm_x=None,
                                 seed=None)
            surrogate.build(16, activations='relu' if use_relu else None)
            his = surrogate.train(idx_train,
                                  verbose=0,
                                  epochs=200,
                                  save_best=False)
            self_training_labels = surrogate.predict(idx_unlabeled).argmax(1)

        self.ll_ratio = None
        # mettack can also conduct feature attack
        self.allow_feature_attack = True

        with tf.device(self.device):
            self.idx_train = astensor(idx_train, dtype=self.intx)
            self.idx_unlabeled = astensor(idx_unlabeled, dtype=self.intx)
            self.labels_train = astensor(self.labels[idx_train],
                                         dtype=self.floatx)
            self.self_training_labels = astensor(self_training_labels,
                                                 dtype=self.floatx)
            self.tf_adj = astensor(adj.A, dtype=self.floatx)
            self.tf_x = astensor(x, dtype=self.floatx)
            self.build(hidden_layers=hidden_layers)
            self.use_relu = use_relu
            self.loss_fn = SparseCategoricalCrossentropy(from_logits=True)

            self.adj_changes = tf.Variable(tf.zeros_like(self.tf_adj))
            self.x_changes = tf.Variable(tf.zeros_like(self.tf_x))
Exemple #11
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train=None,
                 hops=2,
                 seed=None,
                 name=None,
                 device='CPU:0',
                 surrogate=None,
                 surrogate_args={},
                 **kwargs):

        super().__init__(adj,
                         x=x,
                         labels=labels,
                         seed=seed,
                         name=name,
                         device=device,
                         **kwargs)

        if surrogate is None:
            surrogate = train_a_surrogate(self, 'SGC', idx_train,
                                          **surrogate_args)
        elif not isinstance(surrogate, SGC):
            raise RuntimeError(
                "surrogate model should be the instance of `graphgallery.nn.SGC`."
            )

        self.hops = hops
        # nodes with the same class labels
        self.similar_nodes = [
            np.where(labels == c)[0] for c in range(self.n_classes)
        ]

        with tf.device(self.device):
            W, b = surrogate.weights
            X = astensor(x)
            self.b = b
            self.XW = X @ W
            self.surrogate = surrogate
            self.SGC = SGConvolution(hops)
            self.loss_fn = sparse_categorical_crossentropy
Exemple #12
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train,
                 idx_unlabeled=None,
                 surrogate=None,
                 surrogate_args={},
                 seed=None,
                 name=None,
                 device='CPU:0',
                 **kwargs):

        super().__init__(adj=adj,
                         x=x,
                         labels=labels,
                         seed=seed,
                         name=name,
                         device=device,
                         **kwargs)

        adj, x = self.adj, self.x

        if surrogate is None:
            surrogate = train_a_surrogate(self, 'DenseGCN', idx_train,
                                          **surrogate_args)
        elif not isinstance(surrogate, DenseGCN):
            raise RuntimeError(
                "surrogate model should be the instance of `graphgallery.nn.DenseGCN`."
            )

        # poisoning attack in DeepRobust
        if idx_unlabeled is None:
            idx_attack = idx_train
            labels_attack = labels[idx_train]
        else:  # Evasion attack in original paper
            idx_unlabeled = asintarr(idx_unlabeled)
            self_training_labels = self.estimate_self_training_labels(
                surrogate, idx_unlabeled)
            idx_attack = np.hstack([idx_train, idx_unlabeled])
            labels_attack = np.hstack(
                [labels[idx_train], self_training_labels])

        with tf.device(self.device):
            self.idx_attack = astensor(idx_attack)
            self.labels_attack = astensor(labels_attack)
            self.tf_adj = astensor(self.adj.A)
            self.tf_x = astensor(x)
            self.complementary = tf.ones_like(self.tf_adj) - tf.eye(
                self.n_nodes) - 2. * self.tf_adj
            self.loss_fn = SparseCategoricalCrossentropy()
            self.adj_changes = tf.Variable(
                tf.zeros(adj.shape, dtype=self.floatx))
            self.surrogate = surrogate

            # used for `CW_loss=True`
            self.label_matrix = tf.gather(tf.eye(self.n_classes),
                                          self.labels_attack)
            self.range_idx = tf.range(idx_attack.size, dtype=self.intx)
            self.indices_real = tf.stack([self.range_idx, self.labels_attack],
                                         axis=1)
    def attack(self,
               target,
               n_perturbations=None,
               symmetric=True,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(target, n_perturbations, direct_attack,
                       structure_attack, feature_attack)

        if feature_attack and not is_binary(self.x):
            raise RuntimeError(
                "Attacks on the node features are currently only supported for binary attributes."
            )

        with tf.device(self.device):
            target_index = astensor([self.target])
            target_labels = astensor(self.target_label)

            modified_adj, modified_x = self.modified_adj, self.modified_x

            if not direct_attack:
                adj_mask, x_mask = self.construct_mask()
            else:
                adj_mask, x_mask = None, None

            for _ in tqdm(range(self.n_perturbations),
                          desc='Peturbing Graph',
                          disable=disable):

                adj_grad, x_grad = self.compute_gradients(
                    modified_adj, modified_x, target_index, target_labels)

                adj_grad_score = tf.constant(0.0)
                x_grad_score = tf.constant(0.0)

                if structure_attack:

                    if symmetric:
                        adj_grad = (adj_grad + tf.transpose(adj_grad)) / 2.

                    adj_grad_score = self.structure_score(
                        modified_adj, adj_grad, adj_mask)

                if feature_attack:
                    x_grad_score = self.feature_score(modified_x, x_grad,
                                                      x_mask)

                if tf.reduce_max(adj_grad_score) >= tf.reduce_max(
                        x_grad_score):
                    adj_grad_argmax = tf.argmax(adj_grad_score)
                    row, col = divmod(adj_grad_argmax.numpy(), self.n_nodes)
                    modified_adj[row, col].assign(1. - modified_adj[row, col])
                    modified_adj[col, row].assign(1. - modified_adj[col, row])
                    self.structure_flips.append((row, col))
                else:
                    x_grad_argmax = tf.argmax(x_grad_score)
                    row, col = divmod(x_grad_argmax.numpy(), self.n_attrs)
                    modified_x[row, col].assign(1. - modified_x[row, col])
                    self.feature_flips.append((row, col))