Exemplo n.º 1
0
def test_label_flipping():
    print('\n#################################################################')
    print('START label flipping attack.\n')

    begin = time.time()

    # Data processing unit
    # The path is an index of 400 testing samples(raw email data).
    dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
                           binary=True, raw=True)
    training_data = load_dataset(dataset)

    print('Training sample size: ', len(training_data), '/400\n', sep='')

    # Randomly cut dataset in approximately half
    rand_choices = np.random.binomial(1, 0.5, len(training_data))
    new_training_data = []
    predict_data = []
    for i in range(len(training_data)):
        if rand_choices[i] == 1:
            new_training_data.append(training_data[i])
        else:
            predict_data.append(training_data[i])
    training_data = new_training_data

    # Setting the default learner
    # Test simple learner svm
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, training_data)
    learner.train()
    orig_learner = deepcopy(learner)

    # Execute the attack
    cost = list(np.random.binomial(2, 0.5, len(training_data)))
    total_cost = 0.3 * len(training_data)  # flip around ~30% of the labels
    attacker = LabelFlipping(learner, cost, total_cost, verbose=True)
    attack_data = attacker.attack(training_data)

    flip_vector = []  # 0 -> flipped, 1 -> not flipped
    for i in range(len(attack_data)):
        if attack_data[i].get_label() != training_data[i].get_label():
            flip_vector.append(0)
        else:
            flip_vector.append(1)

    print('Flip vector with 0 -> flipped and 1 -> not flipped: \n',
          np.array(flip_vector), '\n')

    original_pred_labels = learner.predict(training_data)

    # Retrain the model with poisoned data
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, attack_data)
    learner.train()

    ############################################################################
    # Calculate statistics with training data

    attack_pred_labels = learner.predict(training_data)  # predict w/ orig label

    (orig_precent_correct,
     attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 training_data)

    print('###################################################################')
    print('Predictions with training dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with predict data (other half of dataset)

    original_pred_labels = orig_learner.predict(predict_data)
    attack_pred_labels = learner.predict(predict_data)

    (orig_precent_correct,
     attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 predict_data)

    print('###################################################################')
    print('Predictions with other half of dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    end = time.time()
    print('\nTotal time: ', round(begin - end, 2), 's', '\n', sep='')

    print('\nEND label flipping attack.')
    print('#################################################################\n')
Exemplo n.º 2
0
class TestDataPoisoningLearner:
    def __init__(self,
                 learner_names: List[str] or str,
                 attacker_name: str,
                 dataset: EmailDataset,
                 params: Dict = None,
                 verbose=True):
        """
        Test setup.
        :param learner_names: List of learner names or one string either 'trim', 'atrim', 'irl',
                              or 'outlier-removal'
        :param attacker_name: Either 'label-flipping', 'k-insertion', 'data-modification', or
                              'dummy'
        :param dataset: the dataset
        :param params: the params to pass to the learner - if None, defaults will be used
        :param verbose: if True, will print START and STOP and set learners and attackers to
                        verbose mode
        """

        if isinstance(learner_names, str):
            learner_names = [learner_names]
        learner_names = list(map(lambda x: x.lower(), learner_names))

        if set(learner_names) > {'trim', 'atrim', 'irl', 'outlier-removal'}:
            raise ValueError('Learner name not trim, atrim, nor irl.')

        if attacker_name.lower() not in [
                'label-flipping', 'k-insertion', 'data-modification', 'dummy'
        ]:
            raise ValueError('Attacker name not label-flipping, k-insertion, '
                             'data-modification, nor dummy.')

        self.learner_names = learner_names

        def update_lnr_names(x):
            if x == 'trim':
                x = 'TRIM Learner'
            elif x == 'atrim':
                x = 'Alternating TRIM Learner'
            elif x == 'irl':
                x = 'Iterative Retraining Learner'
            else:  # x == 'outlier-removal'
                x = 'Outlier Removal Learner'
            return x

        self.learner_names = list(map(update_lnr_names, self.learner_names))

        self.attacker_name = attacker_name.lower()
        self.params = params
        self.verbose = verbose

        training_data, testing_data = dataset.split({'train': 50, 'test': 50})
        self.training_instances = load_dataset(training_data)
        self.testing_instances = load_dataset(testing_data)

        self.learner = None  # SVM with clean dataset
        self.attack_learner = None  # SVM with attacked dataset
        self.dp_learner = None  # Learner we are testing
        self.attacker = None  # the attacker
        self.attack_instances = None  # the attacked instances

        # Before attack
        self.training_pred_labels = None  # the predicted labels of the training set for the SVM
        self.testing_pred_labels = None  # the predicted labels of the testing set for the SVM

        # After attack
        self.attack_training_pred_labels = None  # attacker predicted labels for training set SVM
        self.attack_testing_pred_labels = None  # attacker predicted labels for the testing set SVM
        self.dp_learner_training_pred_labels = None  # predicted labels for training set DP Learner
        self.dp_learner_testing_pred_labels = None  # predicted labels for the training set DP L.

        self.labels = []  # true labels
        for inst in self.training_instances + self.testing_instances:
            self.labels.append(inst.get_label())

        self.results = []  # List of result tuples

    def test(self):
        if self.verbose:
            print(
                '\n###################################################################'
            )
            print(
                'START', self.learner_names[0]
                if len(self.learner_names) == 1 else 'learner', 'test.\n')

        self._setup()
        self._attack()
        self._retrain()

        for name in self.learner_names:
            begin = time.time()
            self._run_learner(name)
            end = time.time()

            result = (list(self.labels), list(self.training_pred_labels) +
                      list(self.testing_pred_labels),
                      list(self.attack_training_pred_labels) +
                      list(self.attack_testing_pred_labels),
                      list(self.dp_learner_training_pred_labels) +
                      list(self.dp_learner_testing_pred_labels), end - begin)

            self.results.append(result)

        if self.verbose:
            print(
                '\nEND', self.learner_names[0]
                if len(self.learner_names) == 1 else 'learner', 'test.')
            print(
                '###################################################################\n'
            )

        return self.results[0] if len(self.results) == 1 else self.results

    def _setup(self):
        if self.verbose:
            print('Training sample size: ',
                  len(self.training_instances),
                  '/400\n',
                  sep='')

        # Setting the default learner
        learning_model = svm.SVC(probability=True, kernel='linear')
        self.learner = SimpleLearner(learning_model, self.training_instances)
        self.learner.train()

        self.training_pred_labels = self.learner.predict(
            self.training_instances)
        self.testing_pred_labels = self.learner.predict(self.testing_instances)

    def _attack(self):
        # Execute the attack
        if self.attacker_name == 'label-flipping':
            cost = list(
                np.random.binomial(2, 0.5, len(self.training_instances)))
            total_cost = 40  # flip around 40 labels
            if self.params:
                self.attacker = LabelFlipping(deepcopy(self.learner),
                                              **self.params)
            else:
                self.attacker = LabelFlipping(deepcopy(self.learner),
                                              cost,
                                              total_cost,
                                              verbose=self.verbose)
        elif self.attacker_name == 'k-insertion':
            self.attacker = KInsertion(
                deepcopy(self.learner),
                self.training_instances[0],
                number_to_add=50,  # 50 / (200 + 50) = 20%
                verbose=self.verbose)
        elif self.attacker_name == 'data-modification':
            target_theta = calculate_target_theta(
                deepcopy(self.learner), deepcopy(self.training_instances),
                deepcopy(self.testing_instances))

            self.attacker = DataModification(deepcopy(self.learner),
                                             target_theta,
                                             verbose=self.verbose)
        else:  # self.attacker_name == 'dummy'
            num_instances = len(self.training_instances)

            class DummyAttacker:
                def attack(self, instances):
                    attack_instances = deepcopy(instances)
                    tmp = np.random.binomial(1, 0.2, num_instances)
                    for i, val in enumerate(tmp):
                        if val == 1:
                            attack_instances[i].set_label(
                                attack_instances[i].get_label() * -1)

                    print('Poisoned instances: ',
                          sum(tmp),
                          '/',
                          num_instances,
                          sep='')
                    print('Unpoisoned instances: ',
                          num_instances - sum(tmp),
                          '/',
                          num_instances,
                          sep='')

                    return attack_instances

            self.attacker = DummyAttacker()

        if self.verbose:
            print(
                '\n###################################################################'
            )
            print('START', self.attacker_name, 'attack.\n')

        if self.attacker_name == 'data-modification':
            self.attack_instances = self.attacker.attack(
                deepcopy(self.training_instances[:40]))
            self.attack_instances += deepcopy(self.training_instances[:-160])
        else:
            self.attack_instances = self.attacker.attack(
                deepcopy(self.training_instances))

        if self.verbose:
            print('\nEND', self.attacker_name, 'attack.')
            print(
                '###################################################################\n'
            )

    def _retrain(self):
        # Retrain the model with poisoned data
        learning_model = svm.SVC(probability=True, kernel='linear')
        self.attack_learner = SimpleLearner(learning_model,
                                            self.attack_instances)
        self.attack_learner.train()

        self.attack_training_pred_labels = self.attack_learner.predict(
            self.training_instances)
        self.attack_testing_pred_labels = self.attack_learner.predict(
            self.testing_instances)

    def _run_learner(self, name):
        if self.verbose:
            print(
                '\n###################################################################'
            )
            print('START ', name, '.\n', sep='')

        if name == 'TRIM Learner':
            self.dp_learner = TRIMLearner(
                deepcopy(self.attack_instances),
                int(len(self.attack_instances) * 0.8),
                verbose=self.verbose)
        elif name == 'Alternating TRIM Learner':
            self.dp_learner = AlternatingTRIMLearner(deepcopy(
                self.attack_instances),
                                                     verbose=self.verbose)
        elif name == 'Iterative Retraining Learner':
            self.dp_learner = IterativeRetrainingLearner(deepcopy(
                self.attack_instances),
                                                         verbose=self.verbose)
        else:  # name == 'Outlier Removal Learner'
            self.dp_learner = OutlierRemovalLearner(deepcopy(
                self.attack_instances),
                                                    verbose=self.verbose)

        self.dp_learner.train()

        if self.verbose:
            print('\nEND ', name, '.', sep='')
            print(
                '###################################################################\n'
            )

        self.dp_learner_training_pred_labels = self.dp_learner.predict(
            self.training_instances)
        self.dp_learner_testing_pred_labels = self.dp_learner.predict(
            self.testing_instances)
Exemplo n.º 3
0
    print("{0} malicious instances are being detected initially")
    return ls, [x.label for x in ls]


dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
                       binary=True,
                       raw=True)
training_, testing_ = dataset.split({'train': 60, 'test': 40})
training_data = load_dataset(training_)
testing_data = load_dataset(testing_)
test_true_label = [x.label for x in testing_data]

# test simple learner svm
learning_model = svm.SVC(probability=True, kernel='linear')
learner1 = SimpleLearner(learning_model, training_data)
learner1.train()

predictions = learner1.predict(testing_data)
print("======== initial prediction =========")
print(summary(predictions, test_true_label))

# Note: should not use only malicious data
attacker = GoodWord(n=500)
attacker.set_adversarial_params(learner1, testing_data)
new_testing_data = attacker.attack(testing_data)

predictions2 = learner1.predict(new_testing_data)
print("========= post-attack prediction =========")
print("post attack preds" + str(predictions2))
print(type(predictions2).__name__)
print(type(test_true_label).__name__)
Exemplo n.º 4
0
def test_iterative_retraining_learner():
    print()
    print(
        '###################################################################')
    print('START TRIM learner test.\n')

    begin = time.time()

    if len(sys.argv) == 2 and sys.argv[1] in [
            'label-flipping', 'k-insertion', 'data-modification'
    ]:
        attacker_name = sys.argv[1]
    else:
        attacker_name = 'label-flipping'

    # Data processing unit
    # The path is an index of 400 testing samples(raw email data).
    dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
                           binary=True,
                           raw=True)

    training_data, testing_data = dataset.split({'train': 20, 'test': 80})
    training_data = load_dataset(training_data)
    testing_data = load_dataset(testing_data)

    print('Training sample size: ', len(training_data), '/400\n', sep='')

    # Setting the default learner
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, training_data)
    learner.train()

    original_pred_labels = learner.predict(training_data)
    orig_learner = deepcopy(learner)

    # Execute the attack
    if attacker_name == 'label-flipping':
        cost = list(np.random.binomial(2, 0.5, len(training_data)))
        total_cost = 0.3 * len(training_data)  # flip around ~30% of the labels
        attacker = LabelFlipping(learner, cost, total_cost, verbose=True)
    elif attacker_name == 'k-insertion':
        number_to_add = int(0.25 * len(training_data))
        attacker = KInsertion(learner,
                              training_data[0],
                              number_to_add=number_to_add,
                              verbose=True)
    else:  # attacker_name == 'data-modification'
        lnr = orig_learner.model.learner
        eye = np.eye(training_data[0].get_feature_count(), dtype=int)
        orig_theta = lnr.decision_function(eye) - lnr.intercept_[0]
        target_theta = deepcopy(orig_theta)

        spam_instances = []
        for inst in training_data + testing_data:
            if inst.get_label() == 1:
                spam_instances.append(inst)

        spam_features, ham_features = get_spam_features(spam_instances)

        # Set features to recognize spam as ham
        for index in spam_features:
            target_theta[index] = -10

        for index in ham_features:
            target_theta[index] = 0.01

        print('Features selected: ', np.array(spam_features))
        print('Number of features: ', len(spam_features))

        attacker = DataModification(orig_learner, target_theta, verbose=True)

    print(
        '###################################################################')
    print('START', attacker_name, 'attack.\n')

    attack_data = attacker.attack(training_data)

    print('\nEND', attacker_name, 'attack.')
    print(
        '###################################################################')
    print()

    # Retrain the model with poisoned data
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, attack_data)
    learner.train()

    print(
        '###################################################################')
    print('START Iterative Retraining learner.\n')

    ###

    print('\nEND Iterative Retraining learner.')
    print(
        '###################################################################')
    print()

    ############################################################################
    # Calculate statistics with training data

    attack_pred_labels = learner.predict(
        training_data)  # predict w/ orig label

    (orig_precent_correct, attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 training_data)

    print(
        '###################################################################')
    print('Predictions with training dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with predict data (other half of dataset)

    original_pred_labels = orig_learner.predict(testing_data)
    attack_pred_labels = learner.predict(testing_data)

    (orig_precent_correct, attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 testing_data)

    print(
        '###################################################################')
    print('Predictions with other half of dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with trim learner

    data = training_data + testing_data
    trim_pred_labels = trim_learner.predict(data)
    normal_pred_labels = learner.predict(data)

    (trim_percent_correct, normal_percent_correct,
     difference) = calculate_correct_percentages(trim_pred_labels,
                                                 normal_pred_labels, data)

    print(
        '###################################################################')
    print('Predictions using TRIM learner:')
    print('TRIM learner percentage: ', trim_percent_correct, '%')
    print('Simple learner correct percentage: ', normal_percent_correct, '%')
    print('Difference: ', difference, '%')

    end = time.time()
    print('\nTotal time: ', round(begin - end, 2), 's', '\n', sep='')

    print('\nEND TRIM learner test.')
    print(
        '###################################################################')
    print()
Exemplo n.º 5
0
def NB_learner(data):
    learner_model = GaussianNB()
    learner = SimpleLearner(model=learner_model,
                            training_instances=data['training_data'])
    learner.train()
    return learner
Exemplo n.º 6
0
from data_reader.dataset import EmailDataset
from data_reader.operations import load_dataset
from adlib.adversaries import CostSensitive

# data operation
dataset = EmailDataset(path='.data_reader/data/raw/trec05p-1/full',
                       binary=False,
                       raw=True)
training_, testing_ = dataset.split({'train': 60, 'test': 40})
training_data = load_dataset(training_)
testing_data = load_dataset(testing_)

# learner
learning_model = BernoulliNB()
learner = SimpleLearner(learning_model, training_data)
learner.train()

# adversary
param = {}
param['Ua'] = [[0, 20], [0, 0]]
param['Vi'] = 0
param['Uc'] = [[1, -1], [-10, 1]]
param['scenario'] = None

adversary = CostSensitive()
adversary.set_params(param)
adversary.set_adversarial_params(learner, training_data)

# test attack
predictions1 = learner.predict(testing_data)
adversary.attack(testing_data)
Exemplo n.º 7
0
def learner(data):
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, data['training_data'])
    learner.train()
    return learner
Exemplo n.º 8
0
def test_k_insertion():
    """
    Use as follows:
    python3 adlib/tests/adversaries/k_insertion_test.py #-TO-ADD #-ITERATIONS
    """

    print(
        '\n#################################################################')
    print('START k-insertion attack.\n')

    if len(sys.argv) > 2:
        number_to_add = int(sys.argv[1])
        num_iterations = int(sys.argv[2])
    else:
        number_to_add = 1
        num_iterations = 4

    # Data processing unit
    # The path is an index of 400 testing samples(raw email data).
    dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
                           binary=True,
                           raw=True)
    training_data = load_dataset(dataset)

    # Randomly choose ~12% of dataset to decrease debugging time
    # 10% was too small for a realistic calculation.
    choices = np.random.binomial(1, 0.12, len(training_data))
    temp = []
    predict_data = []
    count = 0
    for i in range(len(training_data)):
        if choices[i] == 1:
            temp.append(training_data[i])
            count += 1
        else:
            predict_data.append(training_data[i])
    training_data = temp
    print('Training sample size: ', count, '/400\n', sep='')

    # Setting the default learner
    # Test simple learner svm
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, training_data)
    learner.train()

    original_pred_labels = learner.predict(training_data)
    before_attack_label = original_pred_labels[0]
    orig_learner = deepcopy(learner)

    # Do the attack
    attacker = KInsertion(learner,
                          training_data[0],
                          number_to_add=number_to_add,
                          num_iterations=num_iterations,
                          verbose=True)

    attack_data = attacker.attack(training_data)

    # Retrain the model with poisoned data
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, attack_data)
    learner.train()

    print('Number of added instances: ', len(attack_data) - len(training_data))

    ############################################################################
    # Calculate statistics with training data

    attack_pred_labels = learner.predict(
        training_data)  # predict w/ orig label
    after_attack_label = attack_pred_labels[0]

    (orig_precent_correct, attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 training_data)

    print(
        '###################################################################')
    print('Predictions with training dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with predict data (other half of dataset)

    original_pred_labels = orig_learner.predict(predict_data)
    attack_pred_labels = learner.predict(predict_data)

    (orig_precent_correct, attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 predict_data)

    print(
        '###################################################################')
    print('Predictions with other half of dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with predict data (other half of dataset)

    print(
        '###################################################################')
    print('Selected instance true label: ', training_data[0].get_label())
    print('Selected instance predicted label BEFORE attack: ',
          before_attack_label)
    print('Selected instance predicted label AFTER attack: ',
          after_attack_label)

    ############################################################################
    # Output loss calculations

    print(
        '###################################################################')
    print('poison_instance loss before attack: ',
          round(attacker.poison_loss_before, 4), '%')
    print('poison_instance loss after attack: ',
          round(attacker.poison_loss_after, 4), '%')
    print('poison_instance loss difference: ',
          round(attacker.poison_loss_after - attacker.poison_loss_before, 4),
          '%')

    print('\nEND k-insertion attack.')
    print(
        '#################################################################\n')
Exemplo n.º 9
0
def test_data_modification():
    print()
    print(
        '###################################################################')
    print('START data modification attack.\n')

    begin = time.time()

    # Data processing unit
    # The path is an index of 400 testing samples(raw email data).
    dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
                           binary=False,
                           raw=True)

    training_data, predict_data = dataset.split({'train': 50, 'test': 50})
    training_data = load_dataset(training_data)
    predict_data = load_dataset(predict_data)

    print('Training sample size: ', len(training_data), '/400\n', sep='')

    # Setting the default learner
    # Test simple learner svm
    orig_learning_model = svm.SVC(probability=True, kernel='linear')
    orig_learner = SimpleLearner(orig_learning_model, training_data)
    orig_learner.train()

    ############################################################################
    # Calculate target theta, 1 -> spam, -1 -> ham; For the target theta
    # calculation, I am assuming I know which spam I want to be classified
    # as ham and the features I want to have a disproportionate effect on the
    # decision function calculation. For example, if feature #32 is something
    # that all of my spam has in common, I want to make the entry corresponding
    # to #32 (index 32 - 1 = 31) in target_theta to be disproportionately
    # negative so that when my spam is being classified, the 1 indicating that
    # feature #32 is present will be multiplied by a large negative number so as
    # to decrease the value of the decision function and hopefully make it
    # negative so as to classify my spam as ham.

    lnr = orig_learner.model.learner
    eye = np.eye(training_data[0].get_feature_count(), dtype=int)
    orig_theta = lnr.decision_function(eye) - lnr.intercept_[0]
    target_theta = deepcopy(orig_theta)

    spam_instances = []
    for inst in training_data + predict_data:
        if inst.get_label() == 1:
            spam_instances.append(inst)

    spam_features, ham_features = get_spam_features(spam_instances)

    # Set features to recognize spam as ham
    for index in spam_features:
        target_theta[index] = -10

    for index in ham_features:
        target_theta[index] = 0.01

    print('Features selected: ', np.array(spam_features))
    print('Number of features: ', len(spam_features))

    ############################################################################

    # Get original predictions
    original_pred_labels = orig_learner.predict(training_data)

    # Do the attack
    attacker = DataModification(orig_learner, target_theta, verbose=True)
    attack_data = attacker.attack(training_data)

    # Retrain the model with poisoned data
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, attack_data)
    learner.train()

    ############################################################################
    # Calculate statistics with training data

    attack_pred_labels = learner.predict(
        training_data)  # predict w/ orig label

    (orig_precent_correct, attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 training_data)

    print(
        '###################################################################')
    print('Predictions with training dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with predict data (other half of dataset)

    original_pred_labels = orig_learner.predict(predict_data)
    attack_pred_labels = learner.predict(predict_data)

    (orig_precent_correct, attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 predict_data)

    print(
        '###################################################################')
    print('Predictions with other half of dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with predict data (other half of dataset)

    spam_pred_labels = learner.predict(spam_instances)
    spam_ham_count = sum(map(lambda x: 1 if x == -1 else 0, spam_pred_labels))
    print(
        '###################################################################')
    print('Number of spam instances in original training set that were \n',
          'classified as ham after the attack: ',
          spam_ham_count,
          '/',
          len(spam_instances),
          sep='')

    end = time.time()
    print('\nTotal time: ', round(begin - end, 2), 's', '\n', sep='')

    print('\nEND data modification attack.')
    print(
        '###################################################################')
    print()
Exemplo n.º 10
0
def test_k_insertion():
    """
    Use as follows:
    python3 adlib/tests/adversaries/k_insertion_test.py NUMBER-TO-ADD
    """

    print()
    print('###################################################################')
    print('START k-insertion attack.\n')

    begin = time.time()

    # Data processing unit
    # The path is an index of 400 testing samples(raw email data).
    dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
                           binary=False, raw=True)
    training_data, predict_data = dataset.split({'train': 20, 'test': 80})
    training_data = load_dataset(training_data)
    predict_data = load_dataset(predict_data)

    print('Training sample size: ', len(training_data), '/400\n', sep='')

    if len(sys.argv) > 2:
        number_to_add = int(sys.argv[1])
    else:
        number_to_add = int(0.25 * len(training_data))

    # Setting the default learner
    # Test simple learner svm
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, training_data)
    learner.train()

    original_pred_labels = learner.predict(training_data)
    before_attack_label = original_pred_labels[0]
    orig_learner = deepcopy(learner)

    # Do the attack
    attacker = KInsertion(learner,
                          training_data[0],
                          number_to_add=number_to_add,
                          verbose=True)

    attack_data = attacker.attack(training_data)

    # Retrain the model with poisoned data
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, attack_data)
    learner.train()

    print('Number of added instances: ', len(attack_data) - len(training_data))

    ############################################################################
    # Calculate statistics with training data

    attack_pred_labels = learner.predict(training_data)  # predict w/ orig label
    after_attack_label = attack_pred_labels[0]

    (orig_precent_correct,
     attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 training_data)

    print('###################################################################')
    print('Predictions with training dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with predict data (other half of dataset)

    original_pred_labels = orig_learner.predict(predict_data)
    attack_pred_labels = learner.predict(predict_data)

    (orig_precent_correct,
     attack_precent_correct,
     difference) = calculate_correct_percentages(original_pred_labels,
                                                 attack_pred_labels,
                                                 predict_data)

    print('###################################################################')
    print('Predictions with other half of dataset:')
    print('Original correct percentage: ', orig_precent_correct, '%')
    print('Attack correct percentage: ', attack_precent_correct, '%')
    print('Difference: ', difference, '%')

    ############################################################################
    # Calculate statistics with predict data (other half of dataset)

    print('###################################################################')
    print('Selected instance true label: ', training_data[0].get_label())
    print('Selected instance predicted label BEFORE attack: ',
          before_attack_label)
    print('Selected instance predicted label AFTER attack: ',
          after_attack_label)

    ############################################################################
    # Output loss calculations

    print('###################################################################')
    print('poison_instance loss before attack: ',
          round(attacker.poison_loss_before, 4))
    print('poison_instance loss after attack: ',
          round(attacker.poison_loss_after, 4))
    print('poison_instance loss difference: ',
          round(attacker.poison_loss_after - attacker.poison_loss_before, 4))

    end = time.time()
    print('\nTotal time: ', round(end - begin, 2), 's', '\n', sep='')

    print('\nEND k-insertion attack.')
    print('###################################################################')
    print()