示例#1
0
def attackVictim(attack, approach, print_answer, attacked_node, y_target, node_num):
    device = attack.device
    dataset = attack.dataset

    neighbours_and_dist = kBFS(root=attacked_node, device=device, reversed_arr_list=dataset.reversed_arr_list,
                               K=attack.num_layers)
    if neighbours_and_dist.nelement():
        neighbours_and_dist = manipulateNeighborhood(attack=attack, approach=approach, attacked_node=attacked_node,
                                                     neighbours_and_dist=neighbours_and_dist, device=device)
        attack_log = 'Attack: {:03d}, Node: {}, BFS clique: {}'.format(node_num, attacked_node.item(),
                                                                       neighbours_and_dist.shape[0] + 1)
    else:
        attack_log = 'Attack: {:03d}, Node: {} is a solo node'.format(node_num, attacked_node.item())
    # in adversarial mode add #Epoch
    if attack.mode.isAdversarial():
        attack_log = 'Adv Epoch: {:03d}, '.format(attack.idx) + attack_log

    # special cases of solo node and duo node for double
    BFS_size = neighbours_and_dist.shape[0]
    if not neighbours_and_dist.nelement():
        if print_answer is Print.YES:
            print(attack_log, flush=True)
        return None
    if approach is NodeApproach.TWO_ATTACKERS and BFS_size == 1:
        if print_answer is Print.YES:
            print(attack_log + ': Too small for two attackers', flush=True)
        return None

    if print_answer is Print.YES:
        print(attack_log, flush=True)
    malicious_node = approach.getMaliciousNode(attack=attack, attacked_node=attacked_node, y_target=y_target,
                                               node_num=node_num, neighbours_and_dist=neighbours_and_dist,
                                               BFS_size=BFS_size)
    # calculates the malicious node for the irregular approach of agree
    if approach is NodeApproach.AGREE:
        print()
        malicious_node_heuristic = heuristicApproach(reversed_arr_list=attack.dataset.reversed_arr_list,
                                                     neighbours_and_dist=neighbours_and_dist,
                                                     device=attack.device)
        malicious_node_gradient = gradientApproach(attack=attack, attacked_node=attacked_node, y_target=y_target,
                                                   node_num=node_num, neighbours_and_dist=neighbours_and_dist,
                                                   approach=approach)
        attack_results = torch.zeros(1, 2)
        attack_results[0][0] = malicious_node_heuristic == malicious_node_gradient
        return attack_results
    if malicious_node is None:
        quit("Node approach doesnt exist")

    attack_results = attackTrainer(attack=attack, approach=approach, model=attack.model_wrapper.model,
                                   print_answer=print_answer, attacked_nodes=attacked_node, y_targets=y_target,
                                   malicious_nodes=malicious_node, node_num=node_num,
                                   attack_epochs=attack.attack_epochs, lr=attack.lr)
    return attack_results
def edgeAttackVictim(attack, approach: Approach, print_flag: bool, attacked_node: torch.Tensor, y_target: torch.Tensor,
                     node_num: int) -> torch.Tensor:
    """
        chooses the edge we attack with from our pool of possible edges.
        the pool of possible edges changes per approach
        this BFS environments is also calculated according to our selected approach
        lastly, we attack using attackTrainer
        important note: the victim node is already known (attacked node)

        Parameters
        ----------
        attack: oneGNNAttack
        approach: Approach
        print_flag: bool - whether to print every iteration or not
        attacked_node: torch.Tensor - the victim node
        y_target: torch.Tensor - the target label of the attack
        node_num: int - the index of the attacked/victim node (out of the train/val/test-set)

        Returns
        -------
        attack_result: torch.Tensor
    """
    device = attack.device
    dataset = attack.getDataset()
    data = dataset.data
    model = attack.model_wrapper.model
    targeted = attack.targeted
    end_log_template = ', Attack Success: {}'

    neighbours_and_dist = kBFS(root=attacked_node, device=device, reversed_arr_list=dataset.reversed_arr_list,
                               K=model.num_layers - 1)
    if not neighbours_and_dist.nelement():
        if print_flag:
            print('Attack: {:03d}, Node: {} is a solo node'.format(node_num, attacked_node.item()), flush=True)
        return None
    malicious_indices = neighbours_and_dist[:, 0]
    if print_flag:
        print('Attack: {:03d}, Node: {}'.format(node_num, attacked_node.item()), flush=True, end='')

    # according to our approach choose the edge we wish to flip
    if approach is EdgeApproach.RANDOM:
        # select a random node on the graph and - malicious index
        # select a random node from our BFS of distance K-1 -  attacked node
        # use flipEdge
        malicious_index = np.random.choice(data.num_nodes, 1).item()
        new_attacked_node_index = np.random.choice(malicious_indices.shape[0] + 1, 1).item()
        if new_attacked_node_index == malicious_indices.shape[0]:
            new_attacked_node = attacked_node
        else:
            new_attacked_node = torch.tensor([malicious_indices[new_attacked_node_index].item()]).to(device)
        flipEdge(model=model, attacked_node=new_attacked_node, malicious_index=malicious_index, device=device)
        attack_results = test(data=data, model=model, targeted=targeted, attacked_nodes=new_attacked_node,
                              y_targets=y_target)

        if print_flag:
            print(end_log_template.format(attack_results[3]), flush=True)
    else:
        # EdgeApproach.SINGLE
        # select a random node on the graph - malicious index
        # Add all possible edges between the malicious index and the BFS of distance K-1
        # calculate the edge with the largest gradient and flip it, using edgeTrainer
        #
        # EdgeApproach.GRAD_CHOICE
        # Add all possible edges between all possible nodes and the BFS of distance K-1
        # calculate the edge with the largest gradient and flip it, using edgeTrainer
        malicious_index = model.expandEdgesByMalicious(dataset=dataset, approach=approach, attacked_node=attacked_node,
                                                       neighbours=malicious_indices, device=device)
        attack_results = edgeTrainer(data=data, approach=approach, targeted=targeted, model=model,
                                     attacked_node=attacked_node, y_target=y_target, node_num=node_num,
                                     malicious_index=malicious_index, device=device, print_flag=print_flag,
                                     end_log_template=end_log_template)
    if attack_results is None:
        print("Node approach doesnt exist", flush=True)
        quit()

    return attack_results[3]
示例#3
0
def edgeAttackVictim(attack, approach, print_flag, attacked_node, y_target,
                     node_num):
    device = attack.device
    dataset = attack.dataset
    data = dataset.data
    model = attack.model_wrapper.model
    targeted = attack.targeted

    neighbours_and_dist = kBFS(root=attacked_node,
                               device=device,
                               reversed_arr_list=dataset.reversed_arr_list,
                               K=model.num_layers - 1)
    if not neighbours_and_dist.nelement():
        if print_flag:
            print('Attack: {:03d}, Node: {} is a solo node'.format(
                node_num, attacked_node.item()),
                  flush=True)
        return None

    malicious_indices = neighbours_and_dist[:, 0]
    if print_flag:
        print('Attack: {:03d}, Node: {}'.format(node_num,
                                                attacked_node.item()),
              flush=True,
              end='')

    attack_results = None
    # according to our approach choose the edge we wish to flip
    if approach is EdgeApproach.RANDOM:
        # select a random node on the graph and - malicious index
        # select a random node from our BFS of distance K-1 -  attacked node
        # use flipEdge
        malicious_index = np.random.choice(data.num_nodes, 1).item()
        new_attacked_node_index = np.random.choice(
            malicious_indices.shape[0] + 1, 1).item()
        if new_attacked_node_index == malicious_indices.shape[0]:
            new_attacked_node = attacked_node
        else:
            new_attacked_node = torch.tensor(
                [malicious_indices[new_attacked_node_index].item()]).to(device)
        flipEdge(model=model,
                 attacked_node=new_attacked_node,
                 malicious_index=malicious_index,
                 device=device)
        attack_results = test(data=data,
                              model=model,
                              targeted=targeted,
                              attacked_nodes=new_attacked_node,
                              y_targets=y_target)

    if approach is EdgeApproach.GRAD:
        # select a random node on the graph and - malicious index
        # Add all possible edges between the malicious index and the BFS of distance K-1
        # calculate the edge with the largest gradient and flip it, using edgeTrainer
        malicious_index = np.random.choice(data.num_nodes, 1).item()
        expansion_mode = {"full": False, "malicious_index": malicious_index}
        model.expandEdges(dataset=dataset,
                          attacked_node=attacked_node,
                          neighbours=malicious_indices,
                          device=device,
                          expansion_mode=expansion_mode)
        model = edgeTrainer(targeted=targeted,
                            model=model,
                            attacked_node=attacked_node,
                            y_target=y_target,
                            device=device)
        attack_results = test(data=data,
                              model=model,
                              targeted=targeted,
                              attacked_nodes=attacked_node,
                              y_targets=y_target)

    if approach is EdgeApproach.GLOBAL_GRAD:
        # Add all possible edges between all possible nodes and the BFS of distance K-1
        # calculate the edge with the largest gradient and flip it, using edgeTrainer
        expansion_mode = {"full": True}
        model.expandEdges(dataset=dataset,
                          attacked_node=attacked_node,
                          neighbours=malicious_indices,
                          device=device,
                          expansion_mode=expansion_mode)
        model = edgeTrainer(targeted=targeted,
                            model=model,
                            attacked_node=attacked_node,
                            y_target=y_target,
                            device=device)
        attack_results = test(data=data,
                              model=model,
                              targeted=targeted,
                              attacked_nodes=attacked_node,
                              y_targets=y_target)
    if attack_results is None:
        print("Node approach doesnt exist")
        quit()

    if print_flag:
        print(', Defense Success: {}'.format(not attack_results[3]))
    return attack_results[3]
示例#4
0
def attackVictim(attack, approach: Approach, attacked_node: torch.Tensor, y_target: torch.Tensor, node_num: int)\
        -> torch.Tensor:
    """
        chooses the node we attack with (the malicious node) from our BFS environment
        this BFS environments is also calculated according to our selected approach
        lastly, we attack using attackTrainer
        important note: the victim node is already known (attacked node)

        Parameters
        ----------
        attack: oneGNNAttack
        approach: Approach
        attacked_node: torch.Tensor - the victim node
        y_target: torch.Tensor - the target label of the attack
        node_num: int - the index of the attacked/victim node (out of the train/val/test-set)

        Returns
        -------
        attack_results: torch.Tensor - 2d-tensor that includes
                                       1st-col - the defence
                                       2nd-col - the number of attributes used
        if the number of attributes is 0 the node is misclassified to begin with
    """
    device = attack.device
    dataset = attack.getDataset()
    print_answer = attack.print_answer

    neighbours_and_dist = kBFS(root=attacked_node, device=device, reversed_arr_list=dataset.reversed_arr_list,
                               K=attack.num_layers)
    if neighbours_and_dist.nelement():
        neighbours_and_dist = manipulateNeighborhood(attack=attack, approach=approach, attacked_node=attacked_node,
                                                     neighbours_and_dist=neighbours_and_dist, device=device)
        attack_log = 'Attack: {:03d}, Node: {}, BFS clique: {}'.format(node_num, attacked_node.item(),
                                                                       neighbours_and_dist.shape[0] + 1)
    else:
        attack_log = 'Attack: {:03d}, Node: {} is a solo node'.format(node_num, attacked_node.item())
    # in adversarial mode add #Epoch
    if attack.mode.isAdversarial():
        attack_log = 'Adv Epoch: {:03d}, '.format(attack.idx) + attack_log

    # special cases of solo node and duo node for double
    BFS_size = neighbours_and_dist.shape[0]
    if not neighbours_and_dist.nelement():
        if print_answer is Print.YES:
            print(attack_log, flush=True)
        return None

    if print_answer is Print.YES:
        print(attack_log, end='', flush=True)
        if approach is not NodeApproach.MULTIPLE_ATTACKERS:
            print()
    malicious_node, attack = approach.getMaliciousNode(attack=attack, attacked_node=attacked_node, y_target=y_target,
                                                       node_num=node_num, neighbours_and_dist=neighbours_and_dist,
                                                       BFS_size=BFS_size)
    # calculates the malicious node for the irregular approaches
    if approach is NodeApproach.AGREE:
        print()
        malicious_node_heuristic = heuristicApproach(reversed_arr_list=dataset.reversed_arr_list,
                                                     neighbours_and_dist=neighbours_and_dist,
                                                     device=attack.device)
        malicious_node_gradient = gradientApproach(attack=attack, attacked_node=attacked_node, y_target=y_target,
                                                   node_num=node_num, neighbours_and_dist=neighbours_and_dist)
        attack_results = torch.zeros(1, 2)
        attack_results[0][0] = malicious_node_heuristic == malicious_node_gradient  # in attackSet we change to equal
        return attack_results

    if approach is NodeApproach.ZERO_FEATURES:
        model = attack.model_wrapper.model
        data = dataset.data
        zero_model = copy.deepcopy(model)
        # train
        zero_model.node_attribute_list[malicious_node][:] = 0

        # test correctness
        changed_attributes = (zero_model.getInput() != model.getInput())[malicious_node].sum().item()

        # test
        results = test(data=data, model=zero_model, targeted=attack.targeted,
                       attacked_nodes=attacked_node, y_targets=y_target)

        log_template = createLogTemplate(attack=attack, dataset=dataset) + ', Attack Success: {}\n'
        if dataset.type is DatasetType.DISCRETE:
            print(log_template.format(node_num, 1, changed_attributes, *results), flush=True)
        if dataset.type is DatasetType.CONTINUOUS:
            print(log_template.format(node_num, 1, *results), flush=True)
        attack_results = torch.tensor([[results[3], changed_attributes]])
        return attack_results

    if approach is NodeApproach.MULTIPLE_ATTACKERS:
        if malicious_node is None:
            if print_answer is Print.YES:
                print(f': Too small for {attack.num_of_attackers} attackers\n', flush=True)
            return None
        else:
            print()

    if approach is NodeApproach.INJECTION:
        dataset = attack.getDataset()
        classified_to_target = checkNodeClassification(attack=attack, dataset=dataset,
                                                       attacked_node=attacked_node, y_target=y_target,
                                                       print_answer=Print.NO, attack_num=node_num + 1)
        if not classified_to_target:
            print("misclassified right after injection!\n", flush=True)
            attack.model_wrapper.model.removeInjectedNode(attack=attack)
            return torch.tensor([[1, 0]])

    attack_results = attackTrainer(attack=attack, attacked_nodes=attacked_node, y_targets=y_target,
                                   malicious_nodes=malicious_node, node_num=node_num)

    if approach is NodeApproach.INJECTION:
        attack.model_wrapper.model.removeInjectedNode(attack=attack)
    return attack_results