Exemple #1
0
 def attackPerApproach(self, approach: Approach) -> Tuple[torch.Tensor]:
     """
         information at the generic base class oneGNNSAttack
     """
     results, _, _ = attackSet(self, approach=approach, trainset=False)
     mean_results = getDefenceResultsMean(attack=self, approach=approach, attack_results=results)
     return mean_results[0], mean_results[1]
Exemple #2
0
 def attackOneApproachAndSetAttackEpochs(self, approach, Ktest):
     self.setAttackEpochs(Ktest)
     results, _, _ = attackSet(self,
                               approach=approach,
                               print_answer=self.print_answer,
                               trainset=False)
     return results[0]
Exemple #3
0
    def attackOneApproachAndSetAttackEpochs(self, approach: Approach, Ktest: int) -> Tuple[torch.Tensor]:
        """
            sets the requested Ktest before
            executing the requested attack for a specific approach on a specific gnn_type

            Parameters
            ----------
            approach: Approach - the type of attack approach
                                 more information at classes.approach_classes.Approach
            Ktest: int - number of attack epochs for the test

            Returns
            -------
            defence: torch.Tensor - the defence %
        """
        self.setAttackEpochs(Ktest)
        results, _, _ = attackSet(self, approach=approach, trainset=False)
        mean_results = getDefenceResultsMean(attack=self, approach=approach, attack_results=results)
        return mean_results[0]
Exemple #4
0
    def attackPerGNN(self) -> Tuple[torch.Tensor]:
        """
            executes the requested attack for the requested attribute ratios on a specific gnn_type
        """
        max_attributes = self.getDataset().data.x.shape[1] * self.num_of_attackers
        defence = torch.zeros(len(self.l_0_list)).to(self.device)
        attributes = torch.zeros(len(self.l_0_list)).to(self.device)

        self.setAttributeRatio(1.0)
        results, _, _ = attackSet(self, approach=self.approaches[0], trainset=False)
        results = results.type(torch.FloatTensor)
        for l_0_idx, l_0 in enumerate(self.l_0_list):
            self.setAttributeRatio(l_0)
            attribute_mask = (results[:, 1] <= l_0 * max_attributes)
            mask = torch.logical_and(attribute_mask, results[:, 0])

            defence[l_0_idx] = 1 - (mask.sum().type(torch.FloatTensor) / results.shape[0])
            attributes[l_0_idx] = results[mask, 1].mean(dim=0) / max_attributes

        return defence.unsqueeze(0), attributes.unsqueeze(0)
def getTheMostHarmfulInput(attack, approach: Approach) -> Tuple[torch.Tensor]:
    """
        attacks the model and extract the attacked feature matrix
        
        Parameters
        ----------
        attack: oneGNNAttack
        approach: torch_geometric.data.Data
        
        Returns
        -------
        attacked_nodes: torch.Tensor - the victim nodes
        attacked_x: torch.Tensor - the feature matrices after the attack
        y_targets: torch.Tensor - the target labels of the attack
    """
    attack.print_answer = Print.NO
    _, attacked_nodes, y_targets = attackSet(attack=attack,
                                             approach=approach,
                                             trainset=True)
    attacked_x = attack.model_wrapper.model.getInput().clone().detach()
    return attacked_x, attacked_nodes, y_targets
Exemple #6
0
    def attackPerGNNDiscrete(self) -> Tuple[torch.Tensor]:
        """
            attackPerGNN for DISCRETE datasets
        """
        max_attributes = self.getDataset().data.x.shape[1]
        defence = torch.zeros(len(self.l_0_list)).to(self.device)
        attributes = torch.zeros(len(self.l_0_list)).to(self.device)

        self.setL0(1.0)
        results, _, _ = attackSet(self,
                                  approach=NodeApproach.SINGLE,
                                  trainset=False)
        results = results.type(torch.FloatTensor)
        for l_0_idx, l_0 in enumerate(self.l_0_list):
            self.setL0(l_0)
            attribute_mask = (results[:, 1] <= l_0 * max_attributes)
            mask = torch.logical_and(attribute_mask, results[:, 0])

            defence[l_0_idx] = 1 - (mask.sum().type(torch.FloatTensor) /
                                    results.shape[0])
            attributes[l_0_idx] = results[mask, 1].mean(dim=0) / max_attributes

        return defence.unsqueeze(0), attributes.unsqueeze(0)
Exemple #7
0
def getTheMostHarmfulInput(attack, approach):
    _, attacked_nodes, y_targets = attackSet(attack=attack, approach=approach, print_answer=Print.NO, trainset=True)
    attacked_x = attack.model_wrapper.model.getInput().clone().detach()
    return attacked_x, attacked_nodes, y_targets
Exemple #8
0
 def attackOneApproach(self, approach):
     results, _, _ = attackSet(self,
                               approach=approach,
                               print_answer=self.print_answer,
                               trainset=False)
     return results[0]