Exemplo n.º 1
0
 def compute_p_a_prime(self, instance):
     # find X'A(x')
     new_list = []
     for attack_instance in self.new_list:
         if not fv_equals(attack_instance.get_feature_vector(),
                          instance.get_feature_vector()):
             new_list.append(attack_instance)
     p_x_ = 0
     for attack_instance in new_list:
         p_x_ += np.exp(self.posterior_proba(attack_instance))[0, 1]
     return p_x_ + self.i_x(instance) * \
            np.exp(self.posterior_proba(instance))[0, 1]
Exemplo n.º 2
0
 def find_new_list(self, instances):
     """
     Find the changed instances in the attacker's modified data from training data
     :param instances: attacker's modified data
     :return: None
     """
     new_list = []
     for attack_instance in instances:
         equal = False
         for instance in self.training_instances:
             if fv_equals(attack_instance.get_feature_vector(),
                          instance.get_feature_vector()):
                 equal = True
                 break
         if not equal:
             new_list.append(attack_instance)
     self.new_list = new_list
Exemplo n.º 3
0
    def train(self):
        '''
        This is implemented according to Algorithm 1 in Central Rettraining Framework
        for Scalable Adversarial Classification. This will iterate between computing
        a classifier and adding the adversarial instances to the training data that evade
        the previously computed classifier.
        :return: None
        '''
        self.model.train(self.training_instances)
        iteration = self.iteration_times
        #self.attacker = self.attack_alg()
        #self.attacker.set_params(self.adv_params)
        #self.attacker.set_adversarial_params(self.model, self.training_instances)

        print("==> Training...")
        malicious_instances = [
            x for x in self.training_instances if self.model.predict(x) == 1
        ]
        augmented_instances = self.training_instances

        while iteration != 0:
            new = []
            transformed_instances = self.attacker.attack(malicious_instances)
            for instance in transformed_instances:
                in_list = False
                for idx, old_instance in enumerate(augmented_instances):
                    if fv_equals(old_instance.get_feature_vector(),
                                 instance.get_feature_vector()):
                        in_list = True
                if not in_list:
                    new.append(instance)
                augmented_instances.append(
                    Instance(label=1,
                             feature_vector=instance.get_feature_vector()))
            self.model.train(augmented_instances)
            malicious_instances = [
                x for x in augmented_instances if self.model.predict(x) == 1
            ]
            iteration -= 1
            if new is None:
                break