예제 #1
0
def simple_learner(data):
    learning_model = svm.SVC(probability=True, kernel='linear')
    learner = SimpleLearner(learning_model, data['training_data'])
    learner.train()
    return learner
예제 #2
0
def empty_learner():
    return SimpleLearner()
예제 #3
0
def NB_learner(data):
    learner_model = GaussianNB()
    learner = SimpleLearner(model=learner_model,
                            training_instances=data['training_data'])
    learner.train()
    return learner
예제 #4
0
    print("{0} malicious instances are being detected initially".format(
        len(ls)))
    return ls, [x.label for x in ls]


dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
                       binary=False,
                       raw=True)
training_, testing_ = dataset.split({'train': 60, 'test': 40})
training_data = load_dataset(training_)
testing_data = load_dataset(testing_)
test_true_label = [x.label for x in testing_data]

#test simple learner svm
learning_model = svm.SVC(probability=True, kernel='linear')
learner1 = SimpleLearner(learning_model, training_data)
learner1.train()

predictions = learner1.predict(testing_data)
print("======== initial prediction =========")
print(summary(predictions, test_true_label))

#test Restrained_attack
attacker = CoordinateGreedy(lambda_val=0, max_change=100, step_size=1000)
attacker.set_adversarial_params(learner1, testing_data)
new_testing_data = attacker.attack(testing_data)

predictions2 = learner1.predict(new_testing_data)
print("========= post-attack prediction =========")
print(summary(predictions2, test_true_label))
예제 #5
0
from data_reader.operations import load_dataset
from data_reader.operations import sparsify
from adversaries import CostSensitive
from copy import deepcopy

#data operation
dataset = EmailDataset(path='.data_reader/data/raw/trec05p-1/full',
                       binary=False,
                       raw=True)
training_, testing_ = dataset.split({'train': 60, 'test': 40})
training_data = load_dataset(training_)
testing_data = load_dataset(testing_)

#learner
learning_model = BernoulliNB()
learner = SimpleLearner(learning_model, training_data)
learner.train()

#adversary
param = {}
param['Ua'] = [[0, 20], [0, 0]]
param['Vi'] = 0
param['Uc'] = [[1, -1], [-10, 1]]
param['scenario'] = None

adversary = CostSensitive()
adversary.set_params(param)
adversary.set_adversarial_params(learner, training_data)

#test attack
predictions1 = learner.predict(testing_data)