Exemple #1
0
def test_misclassfication():
    c = criteria.Misclassification()
    predictions = np.array([0.1, 0.5, 0.7, 0.4])
    assert c.is_adversarial(predictions, 0)
    assert c.is_adversarial(predictions, 1)
    assert not c.is_adversarial(predictions, 2)
    assert c.is_adversarial(predictions, 3)
Exemple #2
0
def test_misclassification_names():
    c = criteria.Misclassification()
    c1 = criteria.TopKMisclassification(k=1)
    c5 = criteria.TopKMisclassification(k=5)
    assert c.name() == c1.name()
    assert c1.name() != c5.name()
    c22 = criteria.TopKMisclassification(k=22)
    assert "22" in c22.name()
Exemple #3
0
def test_combined_criteria():
    c1 = criteria.Misclassification()
    c2 = criteria.OriginalClassProbability(0.2)
    c3 = c1 & c2

    probabilities = np.array([0.09, 0.11, 0.39, 0.41])
    predictions = np.log(probabilities)

    for i in range(len(predictions)):
        b1 = c1.is_adversarial(predictions, i)
        b2 = c2.is_adversarial(predictions, i)
        b3 = c3.is_adversarial(predictions, i)

        assert (b1 and b2) == b3

    assert c1.name() == "Top1Misclassification"
    assert c2.name() == "OriginalClassProbability-0.2000"
    assert c3.name() == c2.name() + "__" + c1.name()
Exemple #4
0
def cwAttack(victim_model): # victim_model should be model wrapped with foolbox model
    attacker = attacks.CarliniWagnerL2Attack(victim_model, crt.Misclassification())
    return FoolboxAttackWrapper(attacker)
    
Exemple #5
0
def dfAttack(victim_model):   # victim_model should be model wrapped with foolbox model
    attacker = attacks.DeepFoolAttack(victim_model, crt.Misclassification())
    return FoolboxAttackWrapper(attacker)
Exemple #6
0
def pgdAttack(victim_model):    # victim_model should be model wrapped with foolbox model
    attacker = attacks.RandomStartProjectedGradientDescentAttack(victim_model, crt.Misclassification(), distance=distances.Linfinity)
    return FoolboxAttackWrapper(attacker)
Exemple #7
0
def fgsmAttack(victim_model):   # victim_model should be model wrapped with foolbox model
    attacker = attacks.GradientSignAttack(victim_model, crt.Misclassification())
    return FoolboxAttackWrapper(attacker)
Exemple #8
0
def Configuration(model,
                  criterion=criteria.Misclassification(),
                  distance=distances.MeanSquaredDistance):
    return (model, criterion, distance)