Esempio n. 1
0
    def attack(self,
               num_budgets=0.05,
               sample_epochs=20,
               C=None,
               CW_loss=False,
               epochs=200,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(num_budgets, structure_attack, feature_attack)

        self.CW_loss = CW_loss
        if not C:
            if CW_loss:
                C = 0.1
            else:
                C = 200

        for epoch in tqdm(range(epochs), desc='PGD Training', disable=disable):
            gradients = self.compute_gradients(self.victim_nodes)
            lr = C / np.sqrt(epoch + 1)
            self.adj_changes.data.add_(lr * gradients)
            self.projection()

        best_s = self.random_sample(sample_epochs, disable=disable)
        self.adj_flips = np.transpose(np.where(best_s > 0.))
        return self
Esempio n. 2
0
    def random_sample(self, sample_epochs=20, disable=False):
        best_loss = -10000
        best_s = None
        s = tf.linalg.band_part(self.adj_changes, 0, -1) - tf.linalg.band_part(self.adj_changes, 0, 0)
        for it in tqdm(range(sample_epochs),
                       desc='Random Sampling',
                       disable=disable):
            random_matrix = tf.random.uniform(shape=(self.num_nodes,
                                                     self.num_nodes),
                                              minval=0.,
                                              maxval=1.)
            sampled = tf.where(s > random_matrix, 1., 0.)
            if tf.reduce_sum(sampled) > self.num_budgets:
                continue

            with tf.device(self.device):
                self.adj_changes.assign(sampled)
                loss = self.compute_loss(self.victim_nodes)

            if best_loss < loss:
                best_loss = loss
                best_s = sampled

        assert best_s is not None, "Something wrong"
        return best_s.numpy()
Esempio n. 3
0
    def attack(self,
               num_budgets=0.05,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(num_budgets, structure_attack, feature_attack)

        influence_nodes = list(self.nodes_set)
        adj_flips = self.adj_flips
        random_list = np.random.choice(2, self.num_budgets) * 2 - 1

        for remove_or_insert in tqdm(random_list,
                                     desc='Peturbing Graph',
                                     disable=disable):
            if remove_or_insert > 0:
                edge = self.add_edge(influence_nodes)
                while edge is None:
                    edge = self.add_edge(influence_nodes)

            else:
                edge = self.del_edge(influence_nodes)
                while edge is None:
                    edge = self.del_edge(influence_nodes)

            adj_flips[edge] = 1.0
            u, v = edge
            self.modified_degree[u] += remove_or_insert
            self.modified_degree[v] += remove_or_insert
        return self
Esempio n. 4
0
    def attack(self,
               num_budgets=0.05,
               structure_attack=True,
               feature_attack=False,
               ll_constraint=False,
               ll_cutoff=0.004,
               disable=False):

        super().attack(num_budgets, structure_attack, feature_attack)

        if ll_constraint:
            raise NotImplementedError(
                "`log_likelihood_constraint` has not been well tested."
                " Please set `ll_constraint=False` to achieve a better performance."
            )

        if feature_attack and not self.graph.is_binary():
            raise ValueError(
                "Attacks on the node features are currently only supported for binary attributes."
            )

        with tf.device(self.device):
            modified_adj, modified_nx = self.adj_tensor, self.x_tensor
            adj_tensor, x_tensor = self.adj_tensor, self.x_tensor
            adj_changes, x_changes = self.adj_changes, self.x_changes
            adj_flips, nattr_flips = self.adj_flips, self.nattr_flips

            for it in tqdm(range(self.num_budgets),
                           desc='Peturbing Graph',
                           disable=disable):

                x_grad, adj_grad = self.meta_grad()

                adj_meta_score = tf.constant(0.0)
                x_meta_score = tf.constant(0.0)

                if structure_attack:
                    modified_adj = self.get_perturbed_adj(adj_tensor, adj_changes)
                    adj_meta_score = self.structure_score(modified_adj, adj_grad, ll_constraint, ll_cutoff)

                if feature_attack:
                    modified_nx = self.get_perturbed_x(x_tensor, x_changes)
                    x_meta_score = self.feature_score(modified_nx, feature_grad)

                if tf.reduce_max(adj_meta_score) >= tf.reduce_max(x_meta_score) and structure_attack:
                    adj_meta_argmax = tf.argmax(adj_meta_score)
                    row, col = divmod(adj_meta_argmax.numpy(), self.num_nodes)
                    adj_changes[row, col].assign(-2. * modified_adj[row, col] + 1.)
                    adj_changes[col, row].assign(-2. * modified_adj[col, row] + 1.)
                    adj_flips.append((row, col))
                elif tf.reduce_max(adj_meta_score) < tf.reduce_max(x_meta_score) and feature_attack:
                    x_meta_argmax = tf.argmax(x_meta_score)
                    row, col = divmod(x_meta_argmax.numpy(), self.num_attrs)
                    x_changes[row, col].assign(-2 * modified_nx[row, col] + 1)
                    nattr_flips.append((row, col))
                else:
                    warnings.warn(f"Do nothing at iter {it}. adj_meta_score={adj_meta_score}, x_meta_score={x_meta_score}",
                                  UserWarning)
        return self
Esempio n. 5
0
    def attack(self,
               num_budgets=0.05,
               symmetric=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(num_budgets, structure_attack, feature_attack)

        if feature_attack and not self.graph.is_binary():
            raise RuntimeError(
                "Currently only attack binary node attributes are supported")

        modified_adj, modified_nx = self.modified_adj, self.modified_nx

        for it in tqdm(range(self.num_budgets),
                       desc='Peturbing Graph',
                       disable=disable):

            with tf.device(self.device):
                adj_grad, x_grad = self.compute_gradients(
                    modified_adj, modified_nx, self.victim_nodes,
                    self.victim_labels)

                adj_grad_score = tf.constant(0.0)
                x_grad_score = tf.constant(0.0)

                if structure_attack:

                    if symmetric:
                        adj_grad += tf.transpose(adj_grad)

                    adj_grad_score = self.structure_score(
                        modified_adj, adj_grad)

                if feature_attack:
                    x_grad_score = self.feature_score(modified_nx, x_grad)

                if tf.reduce_max(adj_grad_score) >= tf.reduce_max(
                        x_grad_score):
                    adj_grad_argmax = tf.argmax(adj_grad_score)
                    row, col = divmod(adj_grad_argmax.numpy(), self.num_nodes)
                    modified_adj[row, col].assign(1. - modified_adj[row, col])
                    modified_adj[col, row].assign(1. - modified_adj[col, row])
                    self.adj_flips.append((row, col))
                else:
                    x_grad_argmax = tf.argmax(x_grad_score)
                    row, col = divmod(x_grad_argmax.numpy(), self.num_attrs)
                    modified_nx[row, col].assign(1. - modified_nx[row, col])
                    self.nattr_flips.append((row, col))
        return self
Esempio n. 6
0
    def attack(self,
               target,
               num_budgets=None,
               logit=None,
               reduced_nodes=3,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(target, num_budgets, direct_attack, structure_attack,
                       feature_attack)

        if logit is None:
            logit = self.surrogate.predict(target).ravel()

        top2 = logit.argsort()[-2:]
        self.wrong_label = np.setdiff1d(top2, self.target_label)[0]
        assert self.wrong_label != self.target_label

        self.subgraph_preprocessing(reduced_nodes)
        offset = self.edge_weights.shape[0]

        with tf.device(self.device):
            for it in tqdm(range(self.num_budgets),
                           desc='Peturbing Graph',
                           disable=disable):
                edge_grad, non_edge_grad = self.compute_gradient(norm=False)
                edge_grad *= (-2 * self.edge_weights + 1)
                non_edge_grad *= (-2 * self.non_edge_weights + 1)
                gradients = tf.concat([edge_grad, non_edge_grad], axis=0)
                sorted_indices = tf.argsort(gradients, direction="DESCENDING")

                for index in sorted_indices:
                    if index < offset:
                        u, v = self.edge_index[:, index]
                        add = False
                        if not self.allow_singleton and (
                                self.selfloop_degree[u] <= 2
                                or self.selfloop_degree[v] <= 2):
                            continue
                    else:
                        index -= offset
                        u, v = self.non_edge_index[:, index]
                        add = True

                    if not self.is_modified(u, v):
                        self.adj_flips[(u, v)] = it
                        self.update_subgraph(u, v, index, add=add)
                        break
        return self
Esempio n. 7
0
    def attack(self,
               target,
               num_budgets=None,
               logit=None,
               attacker_nodes=3,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(target, num_budgets, direct_attack, structure_attack,
                       feature_attack)

        if logit is None:
            logit = self.logits[target]
        idx = list(set(range(logit.size)) - set([self.target_label]))
        wrong_label = idx[logit[idx].argmax()]

        with tf.device(self.device):
            self.wrong_label = wrong_label
            self.true_label = tf.convert_to_tensor(self.target_label,
                                                   dtype=self.floatx)
            self.subgraph_preprocessing(attacker_nodes)
            offset = self.edge_weights.shape[0]

            for it in tqdm(range(self.num_budgets),
                           desc='Peturbing Graph',
                           disable=disable):
                edge_grad, non_edge_grad = self.compute_gradient()
                edge_grad = normalize_GCN(self.edge_index, edge_grad,
                                          self.selfloop_degree)
                non_edge_grad = normalize_GCN(self.non_edge_index,
                                              non_edge_grad,
                                              self.selfloop_degree)
                edge_grad *= (-2 * self.edge_weights + 1)
                non_edge_grad *= (-2 * self.non_edge_weights + 1)
                gradients = tf.concat([edge_grad, non_edge_grad], axis=0)
                index = tf.argmax(gradients)
                if index < offset:
                    u, v = self.edge_index[:, index]
                    add = False
                else:
                    index -= offset
                    u, v = self.non_edge_index[:, index]
                    add = True

                assert not self.is_modified(u, v)
                self.adj_flips[(u, v)] = it
                self.update_subgraph(u, v, index, add=add)
        return self
    def get_feature_importance(self, candidates, steps, disable=False):
        adj = self.adj_norm
        x = self.x_tensor
        mask = (candidates[:, 0], candidates[:, 1])
        target_index = gf.astensor([self.target])
        target_label = gf.astensor(self.target_label)
        baseline_add = x.numpy()
        baseline_add[mask] = 1.0
        baseline_add = gf.astensor(baseline_add)
        baseline_remove = x.numpy()
        baseline_remove[mask] = 0.0
        baseline_remove = gf.astensor(baseline_remove)
        feature_indicator = self.graph.node_attr[mask] > 0

        features = candidates[feature_indicator]
        non_features = candidates[~feature_indicator]

        feature_gradients = tf.zeros(features.shape[0])
        non_feature_gradients = tf.zeros(non_features.shape[0])

        for alpha in tqdm(tf.linspace(0., 1.0, steps + 1),
                          desc='Computing feature importance',
                          disable=disable):
            ###### Compute integrated gradients for removing features ######
            x_diff = x - baseline_remove
            x_step = baseline_remove + alpha * x_diff

            gradients = self.compute_feature_gradients(adj, x_step,
                                                       target_index,
                                                       target_label)
            feature_gradients += -tf.gather_nd(gradients, features)

            ###### Compute integrated gradients for adding features ######
            x_diff = baseline_add - x
            x_step = baseline_add - alpha * x_diff

            gradients = self.compute_feature_gradients(adj, x_step,
                                                       target_index,
                                                       target_label)
            non_feature_gradients += tf.gather_nd(gradients, non_features)

        integrated_grads = np.zeros(feature_indicator.size)
        integrated_grads[feature_indicator] = feature_gradients.numpy()
        integrated_grads[~feature_indicator] = non_feature_gradients.numpy()

        return integrated_grads
Esempio n. 9
0
    def attack(self,
               target,
               num_budgets=None,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(target, num_budgets, direct_attack, structure_attack,
                       feature_attack)

        if not direct_attack:
            raise NotImplementedError(
                f'{self.name} does NOT support indirect attack.')

        target_index, target_label = gf.astensors([self.target],
                                                  self.target_label)
        adj_matrix = self.graph.adj_matrix

        for it in tqdm(range(self.num_budgets),
                       desc='Peturbing Graph',
                       disable=disable):
            with tf.device(self.device):
                gradients = self.compute_gradients(self.modified_adj,
                                                   self.adj_changes,
                                                   target_index, target_label)

                modified_row = tf.gather(self.modified_adj, target_index)
                gradients = (gradients *
                             (-2 * modified_row + 1)).numpy().ravel()

            sorted_index = np.argsort(-gradients)
            for index in sorted_index:
                u = target
                v = index % adj_matrix.shape[0]
                exist = adj_matrix[u, v]
                if exist and not self.allow_singleton and (
                        self.modified_degree[u] <= 1
                        or self.modified_degree[v] <= 1):
                    continue
                if not self.is_modified(u, v):
                    self.adj_flips[(u, v)] = it
                    self.flip_edge(u, v, exist)
                    break
        return self
    def get_link_importance(self, candidates, steps, disable=False):

        adj = self.adj_tensor
        x = self.x_tensor
        mask = (candidates[:, 0], candidates[:, 1])
        target_index = gf.astensor([self.target])
        target_label = gf.astensor(self.target_label)
        baseline_add = adj.numpy()
        baseline_add[mask] = 1.0
        baseline_add = gf.astensor(baseline_add)
        baseline_remove = adj.numpy()
        baseline_remove[mask] = 0.0
        baseline_remove = gf.astensor(baseline_remove)
        edge_indicator = self.graph.adj_matrix[mask].A1 > 0

        edges = candidates[edge_indicator]
        non_edges = candidates[~edge_indicator]

        edge_gradients = tf.zeros(edges.shape[0])
        non_edge_gradients = tf.zeros(non_edges.shape[0])

        for alpha in tqdm(tf.linspace(0., 1.0, steps + 1),
                          desc='Computing link importance',
                          disable=disable):
            ###### Compute integrated gradients for removing edges ######
            adj_diff = adj - baseline_remove
            adj_step = baseline_remove + alpha * adj_diff

            gradients = self.compute_structure_gradients(
                adj_step, x, target_index, target_label)
            edge_gradients += -tf.gather_nd(gradients, edges)

            ###### Compute integrated gradients for adding edges ######
            adj_diff = baseline_add - adj
            adj_step = baseline_add - alpha * adj_diff

            gradients = self.compute_structure_gradients(
                adj_step, x, target_index, target_label)
            non_edge_gradients += tf.gather_nd(gradients, non_edges)

        integrated_grads = np.zeros(edge_indicator.size)
        integrated_grads[edge_indicator] = edge_gradients.numpy()
        integrated_grads[~edge_indicator] = non_edge_gradients.numpy()

        return integrated_grads
Esempio n. 11
0
    def attack(self,
               target,
               num_budgets=None,
               threshold=0.5,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(target, num_budgets, direct_attack, structure_attack,
                       feature_attack)

        if direct_attack:
            influence_nodes = [target]
        else:
            # influence_nodes = list(self.graph.neighbors(target))
            influence_nodes = self.graph.adj_matrix[target].indices.tolist()

        chosen = 0
        adj_flips = self.adj_flips

        with tqdm(total=self.num_budgets,
                  desc='Peturbing Graph',
                  disable=disable) as pbar:
            while chosen < self.num_budgets:

                # randomly choose to add or remove edges
                if np.random.rand() <= threshold:
                    delta = 1.0
                    edge = self.add_edge(influence_nodes)
                else:
                    delta = -1.0
                    edge = self.del_edge(influence_nodes)

                if edge is not None:
                    adj_flips[edge] = chosen
                    chosen += 1
                    u, v = edge
                    self.modified_degree[u] += delta
                    self.modified_degree[v] += delta
                    pbar.update(1)

        return self
Esempio n. 12
0
    def random_sample(self, sample_epochs=20, disable=False):
        best_loss = -10000
        best_s = None
        s = torch.triu(self.adj_changes, diagonal=1)
        _one = torch.tensor(1.).to(self.device)
        _zero = torch.tensor(0.).to(self.device)
        for it in tqdm(range(sample_epochs),
                       desc='Random Sampling',
                       disable=disable):
            random_matrix = torch.zeros_like(s).uniform_(0, 1)
            sampled = torch.where(s > random_matrix, _one, _zero)
            if sampled.sum() > self.num_budgets:
                continue

            self.adj_changes.data.copy_(sampled)
            loss = self.compute_loss(self.victim_nodes)

            if best_loss < loss:
                best_loss = loss
                best_s = sampled

        assert best_s is not None, "Something wrong"
        return best_s.detach().cpu().numpy()
Esempio n. 13
0
    def attack(self,
               num_budgets=0.05,
               sample_epochs=20,
               C=None,
               CW_loss=False,
               epochs=100,
               update_per_epoch=20,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super(PGD, self).attack(num_budgets, structure_attack, feature_attack)

        self.CW_loss = CW_loss

        if not C:
            if CW_loss:
                C = 0.1
            else:
                C = 200

        with tf.device(self.device):

            for epoch in tqdm(range(epochs),
                              desc='MinMax Training',
                              disable=disable):
                if (epoch + 1) % update_per_epoch == 0:
                    self.update_surrogate(self.victim_nodes)
                gradients = self.compute_gradients(self.victim_nodes)
                lr = C / np.sqrt(epoch + 1)
                self.adj_changes.assign_add(lr * gradients)
                self.projection()

            best_s = self.random_sample(sample_epochs)
            self.adj_flips = np.transpose(np.where(best_s > 0.))
        return self
Esempio n. 14
0
    def attack(self,
               target,
               num_budgets=None,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               n_influencers=5,
               ll_constraint=True,
               ll_cutoff=0.004,
               disable=False):

        super().attack(target, num_budgets, direct_attack, structure_attack,
                       feature_attack)

        if feature_attack and not self.graph.is_binary():
            raise RuntimeError(
                "Currently only attack binary node attributes are supported")

        if ll_constraint and self.allow_singleton:
            raise RuntimeError(
                '`ll_constraint` is failed when `allow_singleton=True`, please set `attacker.allow_singleton=False`.'
            )

        logits_start = self.compute_logits()
        best_wrong_class = self.strongest_wrong_class(logits_start)

        if structure_attack and ll_constraint:
            # Setup starting values of the likelihood ratio test.
            degree_sequence_start = self.degree
            current_degree_sequence = self.degree.astype('float64')
            d_min = 2
            S_d_start = np.sum(
                np.log(degree_sequence_start[degree_sequence_start >= d_min]))
            current_S_d = np.sum(
                np.log(
                    current_degree_sequence[current_degree_sequence >= d_min]))
            n_start = np.sum(degree_sequence_start >= d_min)
            current_n = np.sum(current_degree_sequence >= d_min)
            alpha_start = compute_alpha(n_start, S_d_start, d_min)
            log_likelihood_orig = compute_log_likelihood(
                n_start, alpha_start, S_d_start, d_min)

        if len(self.influence_nodes) == 0:
            if not direct_attack:
                # Choose influencer nodes
                infls, add_infls = self.get_attacker_nodes(
                    n_influencers, add_additional_nodes=True)
                self.influence_nodes = np.concatenate((infls, add_infls))
                # Potential edges are all edges from any attacker to any other node, except the respective
                # attacker itself or the node being attacked.
                self.potential_edges = np.row_stack([
                    np.column_stack(
                        (np.tile(infl, self.num_nodes - 2),
                         np.setdiff1d(np.arange(self.num_nodes),
                                      np.array([self.target, infl]))))
                    for infl in self.influence_nodes
                ])
            else:
                # direct attack
                influencers = [self.target]
                self.potential_edges = np.column_stack(
                    (np.tile(self.target, self.num_nodes - 1),
                     np.setdiff1d(np.arange(self.num_nodes), self.target)))
                self.influence_nodes = np.array(influencers)

        self.potential_edges = self.potential_edges.astype("int32")

        for it in tqdm(range(self.num_budgets),
                       desc='Peturbing Graph',
                       disable=disable):
            if structure_attack:
                # Do not consider edges that, if removed, result in singleton edges in the graph.
                if not self.allow_singleton:
                    filtered_edges = gf.singleton_filter(self.potential_edges, self.modified_adj).astype("int32")
                else:
                    filtered_edges = self.potential_edges

                if ll_constraint:
                    # Update the values for the power law likelihood ratio test.
                    deltas = 2 * (1 - self.modified_adj[tuple(
                        filtered_edges.T)].A.ravel()) - 1
                    d_edges_old = current_degree_sequence[filtered_edges]
                    d_edges_new = current_degree_sequence[
                        filtered_edges] + deltas[:, None]
                    new_S_d, new_n = update_Sx(current_S_d, current_n,
                                               d_edges_old, d_edges_new, d_min)
                    new_alphas = compute_alpha(new_n, new_S_d, d_min)
                    new_ll = compute_log_likelihood(new_n, new_alphas, new_S_d,
                                                    d_min)
                    alphas_combined = compute_alpha(new_n + n_start,
                                                    new_S_d + S_d_start, d_min)
                    new_ll_combined = compute_log_likelihood(
                        new_n + n_start, alphas_combined, new_S_d + S_d_start,
                        d_min)
                    new_ratios = -2 * new_ll_combined + 2 * (
                        new_ll + log_likelihood_orig)

                    # Do not consider edges that, if added/removed, would lead to a violation of the
                    # likelihood ration Chi_square cutoff value.
                    powerlaw_filter = filter_chisquare(new_ratios, ll_cutoff)
                    filtered_edges = filtered_edges[powerlaw_filter]

                # Compute new entries in A_hat_square_uv
                a_hat_uv_new = self.compute_new_a_hat_uv(filtered_edges)
                # Compute the struct scores for each potential edge
                struct_scores = self.struct_score(a_hat_uv_new,
                                                  self.compute_XW())
                best_edge_ix = struct_scores.argmin()
                best_edge_score = struct_scores.min()
                best_edge = filtered_edges[best_edge_ix]

            if feature_attack:
                # Compute the feature scores for each potential feature perturbation
                feature_ixs, feature_scores = self.feature_scores()
                best_feature_ix = feature_ixs[0]
                best_feature_score = feature_scores[0]

            if structure_attack and feature_attack:
                # decide whether to choose an edge or feature to change
                if best_edge_score < best_feature_score:
                    change_structure = True
                else:
                    change_structure = False

            elif structure_attack:
                change_structure = True
            elif feature_attack:
                change_structure = False

            if change_structure:
                # perform edge perturbation
                u, v = best_edge
                modified_adj = self.modified_adj.tolil(copy=False)
                modified_adj[(u, v)] = modified_adj[(
                    v, u)] = 1 - modified_adj[(u, v)]
                self.modified_adj = modified_adj.tocsr(copy=False)
                self.adj_norm = gf.normalize_adj(modified_adj)
                self.adj_flips.append((u, v))

                if ll_constraint:
                    # Update likelihood ratio test values
                    current_S_d = new_S_d[powerlaw_filter][best_edge_ix]
                    current_n = new_n[powerlaw_filter][best_edge_ix]
                    current_degree_sequence[best_edge] += deltas[
                        powerlaw_filter][best_edge_ix]
            else:

                modified_nx = self.modified_nx.tolil(copy=False)
                modified_nx[tuple(
                    best_feature_ix)] = 1 - modified_nx[tuple(best_feature_ix)]
                self.modified_nx = modified_nx.tocsr(copy=False)
                self.nattr_flips.append(tuple(best_feature_ix))
        return self
Esempio n. 15
0
    def attack(self,
               num_budgets=0.05,
               structure_attack=True,
               feature_attack=False,
               ll_constraint=False,
               ll_cutoff=0.004,
               disable=False):

        super().attack(num_budgets, structure_attack, feature_attack)

        if ll_constraint:
            raise NotImplementedError(
                "`log_likelihood_constraint` has not been well tested."
                " Please set `ll_constraint=False` to achieve a better performance."
            )

        if feature_attack and not self.graph.is_binary():
            raise ValueError(
                "Attacks on the node features are currently only supported for binary attributes."
            )

        modified_adj, modified_nx = self.adj_tensor, self.x_tensor
        adj_tensor, x_tensor = self.adj_tensor, self.x_tensor
        adj_changes, x_changes = self.adj_changes, self.x_changes
        adj_flips, nattr_flips = self.adj_flips, self.nattr_flips

        self.inner_train(modified_nx, modified_adj)

        for it in tqdm(range(self.num_budgets),
                       desc='Peturbing Graph',
                       disable=disable):

            if structure_attack:
                modified_adj = self.get_perturbed_adj(adj_tensor, adj_changes)

            if feature_attack:
                modified_nx = self.get_perturbed_x(x_tensor, x_changes)

            adj_norm = gf.normalize_adj_tensor(modified_adj)

            self.inner_train(modified_nx, adj_norm)

            x_grad, adj_grad = self.meta_grad(modified_nx, adj_norm)

            x_meta_score = torch.tensor(0.0)
            adj_meta_score = torch.tensor(0.0)

            if structure_attack:
                adj_meta_score = self.structure_score(modified_adj, adj_grad,
                                                      ll_constraint, ll_cutoff)
            if feature_attack:
                x_meta_score = self.feature_score(modified_nx, x_grad)

            if adj_meta_score.max() >= x_meta_score.max():
                adj_meta_argmax = torch.argmax(adj_meta_score)
                row, col = unravel_index(adj_meta_argmax, self.num_nodes)
                self.adj_changes.data[row][
                    col] += -2 * modified_adj[row][col] + 1
                self.adj_changes.data[col][
                    row] += -2 * modified_adj[row][col] + 1
                adj_flips.append((row, col))

            else:
                x_meta_argmax = torch.argmax(x_meta_score)
                row, col = unravel_index(x_meta_argmax, self.num_attrs)
                self.x_changes.data[row][col] += -2 * modified_nx[row][col] + 1
                nattr_flips.append((row, col))
Esempio n. 16
0
    def attack(self,
               target,
               num_budgets=None,
               symmetric=True,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               disable=False):

        super().attack(target, num_budgets, direct_attack, structure_attack,
                       feature_attack)

        if feature_attack and not self.graph.is_binary():
            raise RuntimeError(
                "Currently only attack binary node attributes are supported")

        with tf.device(self.device):
            target_index = gf.astensor([self.target])
            target_labels = gf.astensor(self.target_label)

            modified_adj, modified_nx = self.modified_adj, self.modified_nx

            if not direct_attack:
                adj_mask, x_mask = self.construct_mask()
            else:
                adj_mask, x_mask = None, None

            for _ in tqdm(range(self.num_budgets),
                          desc='Peturbing Graph',
                          disable=disable):

                adj_grad, x_grad = self.compute_gradients(
                    modified_adj, modified_nx, target_index, target_labels)

                adj_grad_score = tf.constant(0.0)
                x_grad_score = tf.constant(0.0)

                if structure_attack:

                    if symmetric:
                        adj_grad = (adj_grad + tf.transpose(adj_grad)) / 2.

                    adj_grad_score = self.structure_score(
                        modified_adj, adj_grad, adj_mask)

                if feature_attack:
                    x_grad_score = self.feature_score(modified_nx, x_grad,
                                                      x_mask)

                if tf.reduce_max(adj_grad_score) >= tf.reduce_max(
                        x_grad_score):
                    adj_grad_argmax = tf.argmax(adj_grad_score)
                    row, col = divmod(adj_grad_argmax.numpy(), self.num_nodes)
                    modified_adj[row, col].assign(1. - modified_adj[row, col])
                    modified_adj[col, row].assign(1. - modified_adj[col, row])
                    self.adj_flips.append((row, col))
                else:
                    x_grad_argmax = tf.argmax(x_grad_score)
                    row, col = divmod(x_grad_argmax.numpy(), self.num_attrs)
                    modified_nx[row, col].assign(1. - modified_nx[row, col])
                    self.nattr_flips.append((row, col))
        return self
Esempio n. 17
0
    def attack(self,
               target,
               num_budgets=None,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               ll_constraint=False,
               ll_cutoff=0.004,
               disable=False):

        super().attack(target, num_budgets, direct_attack, structure_attack,
                       feature_attack)

        # Setup starting values of the likelihood ratio test.
        degree_sequence_start = self.degree
        current_degree_sequence = self.degree.astype('float64')
        d_min = 2  # denotes the minimum degree a node needs to have to be considered in the power-law test
        S_d_start = np.sum(
            np.log(degree_sequence_start[degree_sequence_start >= d_min]))
        current_S_d = np.sum(
            np.log(current_degree_sequence[current_degree_sequence >= d_min]))
        n_start = np.sum(degree_sequence_start >= d_min)
        current_n = np.sum(current_degree_sequence >= d_min)
        alpha_start = compute_alpha(n_start, S_d_start, d_min)
        log_likelihood_orig = compute_log_likelihood(n_start, alpha_start,
                                                     S_d_start, d_min)

        N = self.num_nodes
        if not direct_attack:
            # Choose influencer nodes
            # influence_nodes = self.graph.adj_matrix[target].nonzero()[1]
            influence_nodes = self.graph.adj_matrix[target].indices
            # Potential edges are all edges from any attacker to any other node, except the respective
            # attacker itself or the node being attacked.
            potential_edges = np.row_stack([
                np.column_stack((np.tile(infl, N - 2),
                                 np.setdiff1d(np.arange(N),
                                              np.array([target, infl]))))
                for infl in influence_nodes
            ])
        else:
            # direct attack
            potential_edges = np.column_stack(
                (np.tile(target, N - 1), np.setdiff1d(np.arange(N), target)))
            influence_nodes = np.asarray([target])

        for it in tqdm(range(self.num_budgets),
                       desc='Peturbing Graph',
                       disable=disable):

            if not self.allow_singleton:
                filtered_edges = gf.singleton_filter(potential_edges,
                                                     self.modified_adj)
            else:
                filtered_edges = potential_edges

            if ll_constraint:
                # Update the values for the power law likelihood ratio test.
                deltas = 2 * (1 - self.modified_adj[tuple(
                    filtered_edges.T)].toarray()[0]) - 1
                d_edges_old = current_degree_sequence[filtered_edges]
                d_edges_new = current_degree_sequence[
                    filtered_edges] + deltas[:, None]
                new_S_d, new_n = update_Sx(current_S_d, current_n, d_edges_old,
                                           d_edges_new, d_min)
                new_alphas = compute_alpha(new_n, new_S_d, d_min)
                new_ll = compute_log_likelihood(new_n, new_alphas, new_S_d,
                                                d_min)
                alphas_combined = compute_alpha(new_n + n_start,
                                                new_S_d + S_d_start, d_min)
                new_ll_combined = compute_log_likelihood(
                    new_n + n_start, alphas_combined, new_S_d + S_d_start,
                    d_min)
                new_ratios = -2 * new_ll_combined + 2 * (new_ll +
                                                         log_likelihood_orig)

                # Do not consider edges that, if added/removed, would lead to a violation of the
                # likelihood ration Chi_square cutoff value.
                powerlaw_filter = filter_chisquare(new_ratios, ll_cutoff)
                filtered_edges = filtered_edges[powerlaw_filter]

            struct_scores = self.struct_score(self.modified_adj,
                                              self.X_mean,
                                              self.eig_vals,
                                              self.eig_vec,
                                              filtered_edges,
                                              K=self.K,
                                              T=self.T,
                                              lambda_method="nosum")
            best_edge_ix = struct_scores.argmax()
            u, v = filtered_edges[best_edge_ix]  # best edge

            while (u, v) in self.adj_flips:
                struct_scores[best_edge_ix] = 0
                best_edge_ix = struct_scores.argmax()
                u, v = filtered_edges[best_edge_ix]

            self.modified_adj[(u, v)] = self.modified_adj[(
                v, u)] = 1. - self.modified_adj[(u, v)]
            self.adj_flips[(u, v)] = 1.0

            if ll_constraint:
                # Update likelihood ratio test values
                current_S_d = new_S_d[powerlaw_filter][best_edge_ix]
                current_n = new_n[powerlaw_filter][best_edge_ix]
                current_degree_sequence[[
                    u, v
                ]] += deltas[powerlaw_filter][best_edge_ix]
        return self