def test_keras_iris_unbounded(self):
        classifier = get_tabular_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channels_first=True)
        attack = ProjectedGradientDescent(classifier, eps=1, eps_step=0.2, max_iter=5)
        x_test_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_adv).all())
        self.assertTrue((x_test_adv > 1).any())
        self.assertTrue((x_test_adv < 0).any())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with PGD adversarial examples: %.2f%%", (acc * 100))
Beispiel #2
0
           input_shape=x_train.shape[1:]))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))

model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])

classifier = KerasClassifier(model=model, clip_values=(min_, max_))
classifier.fit(x_train, y_train, nb_epochs=5, batch_size=128)

# Evaluate the classifier on the test set
preds = np.argmax(classifier.predict(x_test), axis=1)
acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
print("\nTest accuracy: %.2f%%" % (acc * 100))

# Craft adversarial samples with FGSM
epsilon = 0.1  # Maximum perturbation
adv_crafter = FastGradientMethod(classifier, eps=epsilon)
x_test_adv = adv_crafter.generate(x=x_test)

# Evaluate the classifier on the adversarial examples
preds = np.argmax(classifier.predict(x_test_adv), axis=1)
acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
print("\nTest accuracy on adversarial sample: %.2f%%" % (acc * 100))
Beispiel #3
0
elif dataset == 'cifar':
    input_shape = [32, 32, 3]
elif dataset == 'lidc':
    input_shape = (116, 116, 3)

# load model
model = load_model(path + 'models/' + dataset + '/' + dataset +
                   '_vgg16_model.h5')

############### create robust classifier ###############
robust_classifier = KerasClassifier(clip_values=(0, 1),
                                    model=model,
                                    use_logits=False)

# evaluate original classifier accuracy on training set
acc = accuracy_score(y_train, (robust_classifier.predict(x_train) > 0.5))
print("Accuracy of original classifier on train set: %.2f%%, " % (acc * 100))
# auc = roc_auc_score(y_train, robust_classifier.predict(x_train), average='macro')
# print("Auc of original classifier on train set: %.2f%%\n" % (auc * 100))

# Evaluate classifier accuracy on the test set
acc = accuracy_score(y_test, (robust_classifier.predict(x_test) > 0.5))
print("Accuracy of original classifier on test set: %.2f%%, " % (acc * 100))
# auc = roc_auc_score(y_test, robust_classifier.predict(x_test), average='macro')
# print("Auc of original classifier on test set: %.2f%%\n" % (auc * 100))

# Craft adversarial samples
attacker = create_attack(attack_type, robust_classifier)
x_test_adv = attacker.generate(x_test)
x_train_adv = attacker.generate(x_train)
Beispiel #4
0
model.add(Dense(100, activation="relu"))
model.add(Dense(10, activation="softmax"))

model.compile(
    loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01), metrics=["accuracy"]
)

# Step 3: Create the ART classifier

classifier = KerasClassifier(model=model, clip_values=(min_pixel_value, max_pixel_value), use_logits=False)

# Step 4: Train the ART classifier

classifier.fit(x_train, y_train, batch_size=64, nb_epochs=3)

# Step 5: Evaluate the ART classifier on benign test examples

predictions = classifier.predict(x_test)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print("Accuracy on benign test examples: {}%".format(accuracy * 100))

# Step 6: Generate adversarial test examples
attack = FastGradientMethod(estimator=classifier, eps=0.2)
x_test_adv = attack.generate(x=x_test)

# Step 7: Evaluate the ART classifier on adversarial test examples

predictions = classifier.predict(x_test_adv)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print("Accuracy on adversarial test examples: {}%".format(accuracy * 100))
Beispiel #5
0
# #                 url='https://www.dropbox.com/s/ta75pl4krya5djj/cifar_resnet.h5?dl=1')
# # classifier_model = load_model(path)
# classifier_model = KerasClassifier()
# classifier_model.fit(x = x_train, y = y_train)

test_num = adv_num = len(x_test)
# test_num = adv_num = 500
# test_num = 10
# adv_num = 10

classifier = KerasClassifier(clip_values=(min_, max_),
                             model=classifier_model,
                             use_logits=False)

# classifier_model.summary()
x_test_pred = np.argmax(classifier.predict(x_test[:test_num]), axis=1)
nb_correct_pred = np.sum(x_test_pred == np.argmax(y_test[:test_num], axis=1))
print(f"Original test data (first {test_num} images):")
print("Correctly classified: {}".format(nb_correct_pred))
print("Incorrectly classified: {}".format(test_num - nb_correct_pred))

start = timeit.default_timer()
# FGSM with extensions
attacker = FastGradientSignMethod(classifier, eps=5, batch_size=32)
# x_test_adv = attacker.generate(x_test[:adv_num]) # non-targeted
# x_test_adv = attacker.generate_targeted(x_test[:adv_num], aimed_target = x_test[0]) #targeted
x_test_adv = attacker.generate_iterative(
    x_test[:adv_num], eps_step=0.05)  #iterative non-targeted
# x_test_adv = attacker.generate_targeted_iterative(x_test[:adv_num], eps_step = 0.05, aimed_target =x_test[0]) #iterative targeted

# # Hop Skip Jump: Paper uses max_iter=64, max_eval=10000, init_eval=100 but thats ultra-mega slow.
def main():
    Ad = np.load(AD_MAT_FILE)  # Load adjacency matrix
    NUM_TEST = 50
    NUM_GRAPH = 200
    array_std, array_mean_values, array_overlap_ratio = load_raw_result_csv(
        RAW_RESULT_FILE)
    NUM_CLASS = array_mean_values.shape[1]
    print(array_mean_values.shape)
    with open("result_PGD.csv", "w", newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow([
            "overlap ratio", "acc_test_L", "acc_test_WL", "acc_adv_with_Lip",
            "acc_adv_without_Lip"
        ])
    for i in range(NUM_TEST):
        x_test, y_test = reconstruct_test_data(array_std[i],
                                               array_mean_values[i], Ad,
                                               NUM_GRAPH)
        model_with_Lip_constr = tf.keras.models.load_model(
            "saved_model/fit{}_model_with_Lip_constr.h5".format(i))
        print(model_with_Lip_constr.summary())
        model_without_Lip_constr = tf.keras.models.load_model(
            "saved_model/fit{}_model_without_Lip_constr.h5".format(i))

        print(
            "Evaluation of model WITH Lipschitz constant constraint on TEST data"
        )
        loss_test_L, acc_test_L = model_with_Lip_constr.evaluate(
            x_test, y_test, batch_size=x_test.shape[0], verbose=0)
        print("Loss: {:.4f}, accuracy: {:.4f}".format(loss_test_L, acc_test_L))

        print(
            "Evaluation of model WITHOUT Lipschitz constant constraint on TEST data"
        )
        loss_test_WL, acc_test_WL = model_without_Lip_constr.evaluate(
            x_test, y_test, batch_size=x_test.shape[0], verbose=0)
        print("Loss: {:.4f}, accuracy: {:.4f}".format(loss_test_WL,
                                                      acc_test_WL))

        # Reshape model output
        reshape_with_Lip = Reshape(
            (x_test.shape[1] * NUM_CLASS, ),
            name="added_reshape_layer_L")(model_with_Lip_constr.output)
        new_model_with_Lip = Model(inputs=model_with_Lip_constr.input,
                                   outputs=reshape_with_Lip)
        reshape_without_Lip = Reshape(
            (x_test.shape[1] * NUM_CLASS, ),
            name="added_reshape_layer_WL")(model_without_Lip_constr.output)
        new_model_without_Lip = Model(inputs=model_without_Lip_constr.input,
                                      outputs=reshape_without_Lip)
        new_model_with_Lip.compile(loss='categorical_crossentropy',
                                   optimizer='adam',
                                   metrics=['accuracy'])
        new_model_without_Lip.compile(loss='categorical_crossentropy',
                                      optimizer='adam',
                                      metrics=['accuracy'])
        min_value = np.min(array_mean_values[i]) - 100 * array_std[i]
        max_value = np.max(array_mean_values[i]) + 100 * array_std[i]
        classifier_with_Lip = KerasClassifier(model=new_model_with_Lip,
                                              clip_values=(min_value,
                                                           max_value),
                                              use_logits=False)
        classifier_without_Lip = KerasClassifier(model=new_model_without_Lip,
                                                 clip_values=(min_value,
                                                              max_value),
                                                 use_logits=False)
        attack1 = AutoProjectedGradientDescent(estimator=classifier_with_Lip,
                                               norm="inf",
                                               eps=0.6,
                                               eps_step=1,
                                               batch_size=200,
                                               nb_random_init=5,
                                               verbose=True,
                                               targeted=False)
        attack2 = AutoProjectedGradientDescent(
            estimator=classifier_without_Lip,
            norm="inf",
            eps=0.6,
            eps_step=1,
            batch_size=200,
            nb_random_init=5,
            verbose=True,
            targeted=False)

        x_test_adv1 = attack1.generate(x=x_test,
                                       mask=np.ones((1, x_test.shape[1],
                                                     x_test.shape[2])))
        x_test_adv2 = attack2.generate(x=x_test,
                                       mask=np.ones((1, x_test.shape[1],
                                                     x_test.shape[2])))
        y_predict_adv_with_Lip = classifier_with_Lip.predict(x_test_adv1)
        y_predict_adv_without_Lip = classifier_without_Lip.predict(x_test_adv2)

        y_predict_adv_with_Lip = y_predict_adv_with_Lip.reshape((y_test.shape))
        y_predict_adv_without_Lip = y_predict_adv_without_Lip.reshape(
            (y_test.shape))
        acc_adv_with_Lip = np.sum(
            np.argmax(y_predict_adv_with_Lip, axis=2) == np.argmax(
                y_test, axis=2)) / (y_test.shape[0] * y_test.shape[1])
        print(
            "Accuracy on adversarial test examples with Lipschitz constraint: {:.2f}%"
            .format(acc_adv_with_Lip * 100))
        acc_adv_without_Lip = np.sum(
            np.argmax(y_predict_adv_without_Lip, axis=2) == np.argmax(
                y_test, axis=2)) / (y_test.shape[0] * y_test.shape[1])
        print(
            "Accuracy on adversarial test examples without Lipschitz constraint: {:.2f}%"
            .format(acc_adv_without_Lip * 100))

        with open("result_PGD.csv", "a", newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow([
                array_overlap_ratio[i], acc_test_L, acc_test_WL,
                acc_adv_with_Lip, acc_adv_without_Lip
            ])
Beispiel #7
0
classifier = KerasClassifier(clip_values=(min_, max_),
                             model=classifier_model,
                             use_logits=False,
                             preprocessing=(0.5, 1))

target_class = "bird"  # one of ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
target_label = np.zeros(len(class_descr))
target_label[class_descr.index(target_class)] = 1
target_instance = np.expand_dims(
    x_test[np.argmax(y_test, axis=1) == class_descr.index(target_class)][3],
    axis=0)

# fig = plt.imshow(target_instance[0])
print('true_class: ' + target_class)
print('predicted_class: ' +
      class_descr[np.argmax(classifier.predict(target_instance), axis=1)[0]])

feature_layer = classifier.layer_names[-2]

base_class = "frog"  # one of ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
base_idxs = np.argmax(y_test, axis=1) == class_descr.index(base_class)
base_instances = np.copy(x_test[base_idxs][:10])
base_labels = y_test[base_idxs][:10]

x_test_pred = np.argmax(classifier.predict(base_instances), axis=1)
nb_correct_pred = np.sum(x_test_pred == np.argmax(base_labels, axis=1))

print("New test data to be poisoned (10 images):")
print("Correctly classified: {}".format(nb_correct_pred))
print("Incorrectly classified: {}".format(10 - nb_correct_pred))
# Create attack for adversarial trainer; here, we use 2 attacks, both crafting adv examples on the target model
pgd = ProjectedGradientDescent(classifier,
                               eps=8,
                               eps_step=2,
                               max_iter=10,
                               num_random_init=20)

# Create some adversarial samples for evaluation
x_test_pgd = pgd.generate(x_test)

# Create adversarial trainer and perform adversarial training
adv_trainer = AdversarialTrainer(classifier, attacks=pgd, ratio=1.0)
adv_trainer.fit_generator(art_datagen, nb_epochs=83)

# Evaluate the adversarially trained model on clean test set
labels_true = np.argmax(y_test, axis=1)
labels_test = np.argmax(classifier.predict(x_test), axis=1)
print("Accuracy test set: %.2f%%" %
      (np.sum(labels_test == labels_true) / x_test.shape[0] * 100))

# Evaluate the adversarially trained model on original adversarial samples
labels_pgd = np.argmax(classifier.predict(x_test_pgd), axis=1)
print("Accuracy on original PGD adversarial samples: %.2f%%" %
      (np.sum(labels_pgd == labels_true) / x_test.shape[0] * 100))

# Evaluate the adversarially trained model on fresh adversarial samples produced on the adversarially trained model
x_test_pgd = pgd.generate(x_test)
labels_pgd = np.argmax(classifier.predict(x_test_pgd), axis=1)
print("Accuracy on new PGD adversarial samples: %.2f%%" %
      (np.sum(labels_pgd == labels_true) / x_test.shape[0] * 100))
Beispiel #9
0
def main():
    # Read MNIST dataset (x_raw contains the original images):
    (x_raw, y_raw), (x_raw_test, y_raw_test), min_, max_ = load_mnist(raw=True)

    n_train = np.shape(x_raw)[0]
    num_selection = 5000
    random_selection_indices = np.random.choice(n_train, num_selection)
    x_raw = x_raw[random_selection_indices]
    y_raw = y_raw[random_selection_indices]

    # Poison training data
    perc_poison = 0.33
    (is_poison_train, x_poisoned_raw,
     y_poisoned_raw) = generate_backdoor(x_raw, y_raw, perc_poison)
    x_train, y_train = preprocess(x_poisoned_raw, y_poisoned_raw)
    # Add channel axis:
    x_train = np.expand_dims(x_train, axis=3)

    # Poison test data
    (is_poison_test, x_poisoned_raw_test,
     y_poisoned_raw_test) = generate_backdoor(x_raw_test, y_raw_test,
                                              perc_poison)
    x_test, y_test = preprocess(x_poisoned_raw_test, y_poisoned_raw_test)
    # Add channel axis:
    x_test = np.expand_dims(x_test, axis=3)

    # Shuffle training data so poison is not together
    n_train = np.shape(y_train)[0]
    shuffled_indices = np.arange(n_train)
    np.random.shuffle(shuffled_indices)
    x_train = x_train[shuffled_indices]
    y_train = y_train[shuffled_indices]
    is_poison_train = is_poison_train[shuffled_indices]

    # Create Keras convolutional neural network - basic architecture from Keras examples
    # Source here: https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py
    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation="relu",
               input_shape=x_train.shape[1:]))
    model.add(Conv2D(64, (3, 3), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation="softmax"))

    model.compile(loss="categorical_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    classifier = KerasClassifier(model=model, clip_values=(min_, max_))

    classifier.fit(x_train, y_train, nb_epochs=30, batch_size=128)

    # Evaluate the classifier on the test set
    preds = np.argmax(classifier.predict(x_test), axis=1)
    acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
    print("\nTest accuracy: %.2f%%" % (acc * 100))

    # Evaluate the classifier on poisonous data
    preds = np.argmax(classifier.predict(x_test[is_poison_test]), axis=1)
    acc = np.sum(preds == np.argmax(y_test[is_poison_test],
                                    axis=1)) / y_test[is_poison_test].shape[0]
    print(
        "\nPoisonous test set accuracy (i.e. effectiveness of poison): %.2f%%"
        % (acc * 100))

    # Evaluate the classifier on clean data
    preds = np.argmax(classifier.predict(x_test[is_poison_test == 0]), axis=1)
    acc = np.sum(preds == np.argmax(y_test[
        is_poison_test == 0], axis=1)) / y_test[is_poison_test == 0].shape[0]
    print("\nClean test set accuracy: %.2f%%" % (acc * 100))

    # Calling poisoning defence:
    defence = ActivationDefence(classifier, x_train, y_train)

    # End-to-end method:
    print("------------------- Results using size metric -------------------")
    print(defence.get_params())
    defence.detect_poison(nb_clusters=2, nb_dims=10, reduce="PCA")

    # Evaluate method when ground truth is known:
    is_clean = is_poison_train == 0
    confusion_matrix = defence.evaluate_defence(is_clean)
    print("Evaluation defence results for size-based metric: ")
    jsonObject = json.loads(confusion_matrix)
    for label in jsonObject:
        print(label)
        pprint.pprint(jsonObject[label])

    # Visualize clusters:
    print("Visualize clusters")
    sprites_by_class = defence.visualize_clusters(x_train, "mnist_poison_demo")
    # Show plots for clusters of class 5
    n_class = 5
    try:
        import matplotlib.pyplot as plt

        plt.imshow(sprites_by_class[n_class][0])
        plt.title("Class " + str(n_class) + " cluster: 0")
        plt.show()
        plt.imshow(sprites_by_class[n_class][1])
        plt.title("Class " + str(n_class) + " cluster: 1")
        plt.show()
    except ImportError:
        print(
            "matplotlib not installed. For this reason, cluster visualization was not displayed"
        )

    # Try again using distance analysis this time:
    print(
        "------------------- Results using distance metric -------------------"
    )
    print(defence.get_params())
    defence.detect_poison(nb_clusters=2,
                          nb_dims=10,
                          reduce="PCA",
                          cluster_analysis="distance")
    confusion_matrix = defence.evaluate_defence(is_clean)
    print("Evaluation defence results for distance-based metric: ")
    jsonObject = json.loads(confusion_matrix)
    for label in jsonObject:
        print(label)
        pprint.pprint(jsonObject[label])

    # Other ways to invoke the defence:
    kwargs = {"nb_clusters": 2, "nb_dims": 10, "reduce": "PCA"}
    defence.cluster_activations(**kwargs)

    kwargs = {"cluster_analysis": "distance"}
    defence.analyze_clusters(**kwargs)
    defence.evaluate_defence(is_clean)

    kwargs = {"cluster_analysis": "smaller"}
    defence.analyze_clusters(**kwargs)
    defence.evaluate_defence(is_clean)

    print("done :) ")
def main():
    Ad = np.load(AD_MAT_FILE)  # Load adjacency matrix
    NUM_TEST = 50  # The number of experiments recorded in raw result file
    NUM_GRAPH = 200  # The number of graph in a test dataset
    array_std, array_mean_values, array_overlap_ratio = load_raw_result_csv(
        RAW_RESULT_FILE)
    NUM_CLASS = array_mean_values.shape[1]
    print(array_mean_values.shape)
    with open("result_DeepFool.csv", "w", newline='') as csvfile:
        # Header for the csv file:
        # 1)overlap measurement
        # 2)accuracy of model with Lipschitz constant constraint on original test dataset
        # 3)accuracy of model without Lipschitz constant constraint on original test dataset
        # 4)accuracy of model with Lipschitz constant constraint on adversarial test dataset
        # 5)accuracy of model without Lipschitz constant constraint on adversarial test dataset
        writer = csv.writer(csvfile)
        writer.writerow([
            "overlap ratio", "acc_test_L", "acc_test_WL", "acc_adv_with_Lip",
            "acc_adv_without_Lip"
        ])

    # Begin adversarial test for each previous model
    for i in range(0, NUM_TEST):
        tf.keras.backend.clear_session()
        # Reconstruct test dataset for each model
        x_test, y_test = reconstruct_test_data(array_std[i],
                                               array_mean_values[i], Ad,
                                               NUM_GRAPH)
        # Load models with/without Lipschitz constant constraint
        model_with_Lip_constr = tf.keras.models.load_model(
            "saved_model_adver_attack/fit{}_model_with_Lip_constr.h5".format(
                i))
        print(model_with_Lip_constr.summary())
        model_without_Lip_constr = tf.keras.models.load_model(
            "saved_model_adver_attack/fit{}_model_without_Lip_constr.h5".
            format(i))

        # Evaluation of models on original test dataset
        print(
            "Evaluation of model WITH Lipschitz constant constraint on TEST data"
        )
        loss_test_L, acc_test_L = model_with_Lip_constr.evaluate(
            x_test, y_test, batch_size=x_test.shape[0], verbose=0)
        print("Loss: {:.4f}, accuracy: {:.4f}".format(loss_test_L, acc_test_L))

        print(
            "Evaluation of model WITHOUT Lipschitz constant constraint on TEST data"
        )
        loss_test_WL, acc_test_WL = model_without_Lip_constr.evaluate(
            x_test, y_test, batch_size=x_test.shape[0], verbose=0)
        print("Loss: {:.4f}, accuracy: {:.4f}".format(loss_test_WL,
                                                      acc_test_WL))

        # Reshape model output to fit the adversarial attack classifier
        reshape_with_Lip = Reshape(
            (x_test.shape[1] * NUM_CLASS, ),
            name="added_reshape_layer_L")(model_with_Lip_constr.output)
        new_model_with_Lip = Model(inputs=model_with_Lip_constr.input,
                                   outputs=reshape_with_Lip)
        reshape_without_Lip = Reshape(
            (x_test.shape[1] * NUM_CLASS, ),
            name="added_reshape_layer_WL")(model_without_Lip_constr.output)
        new_model_without_Lip = Model(inputs=model_without_Lip_constr.input,
                                      outputs=reshape_without_Lip)
        new_model_with_Lip.compile(loss='categorical_crossentropy',
                                   optimizer='adam',
                                   metrics=['accuracy'])
        new_model_without_Lip.compile(loss='categorical_crossentropy',
                                      optimizer='adam',
                                      metrics=['accuracy'])
        min_value = np.min(array_mean_values[i]) - 100 * array_std[i]
        max_value = np.max(array_mean_values[i]) + 100 * array_std[i]

        # construct classifiers to wrap the existing model
        classifier_with_Lip = KerasClassifier(model=new_model_with_Lip,
                                              clip_values=(min_value,
                                                           max_value),
                                              use_logits=False)
        classifier_without_Lip = KerasClassifier(model=new_model_without_Lip,
                                                 clip_values=(min_value,
                                                              max_value),
                                                 use_logits=False)

        # construct DeepFool attack
        attack1 = DeepFool(classifier=classifier_with_Lip,
                           epsilon=0.2,
                           batch_size=10)
        attack2 = DeepFool(classifier=classifier_without_Lip,
                           epsilon=0.2,
                           batch_size=10)

        # Generate advasarial samples
        x_test_adv1 = attack1.generate(x=x_test)
        x_test_adv2 = attack2.generate(x=x_test)

        # Evaluation of models on adversarial test dataset
        y_predict_adv_with_Lip = classifier_with_Lip.predict(x_test_adv1)
        y_predict_adv_without_Lip = classifier_without_Lip.predict(x_test_adv2)
        y_predict_adv_with_Lip = y_predict_adv_with_Lip.reshape((y_test.shape))
        y_predict_adv_without_Lip = y_predict_adv_without_Lip.reshape(
            (y_test.shape))
        acc_adv_with_Lip = np.sum(
            np.argmax(y_predict_adv_with_Lip, axis=2) == np.argmax(
                y_test, axis=2)) / (y_test.shape[0] * y_test.shape[1])
        print(
            "Accuracy on adversarial test examples with Lipschitz constraint: {:.2f}%"
            .format(acc_adv_with_Lip * 100))
        acc_adv_without_Lip = np.sum(
            np.argmax(y_predict_adv_without_Lip, axis=2) == np.argmax(
                y_test, axis=2)) / (y_test.shape[0] * y_test.shape[1])
        print(
            "Accuracy on adversarial test examples without Lipschitz constraint: {:.2f}%"
            .format(acc_adv_without_Lip * 100))

        # Save comparison result
        with open("result_DeepFool.csv", "a", newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow([
                array_overlap_ratio[i], acc_test_L, acc_test_WL,
                acc_adv_with_Lip, acc_adv_without_Lip
            ])
Beispiel #11
0
tiny_vgg.compile(optimizer=optimizer, loss=loss_object)

classifier = KerasClassifier(model=tiny_vgg,
                             clip_values=(0, 1),
                             use_logits=False)

attack = ProjectedGradientDescent(estimator=classifier,
                                  eps=16 / 255,
                                  eps_step=1 / 255,
                                  norm="inf",
                                  max_iter=200)

#attack = CarliniLInfMethod(classifier,
#    confidence=0.8, targeted=False, learning_rate=0.001)

x_test_adv = attack.generate(x=x_test)
outputs = classifier.predict(x_test_adv)

preds = np.argmax(outputs, axis=1)
trues = np.argmax(y_test, axis=1)

accuracy = np.sum(preds == trues) / len(y_test)
print("Accuracy on adversarial test examples: {}%".format(accuracy * 100))
print("Ixs that worked: ")
print(np.where(preds != trues))

## Save a few of x_test_adv, please work:
for i in range(len(preds)):
    x = (x_test_adv[i] * 255).astype(np.uint8)
    im = Image.fromarray(x)
    im.save(ADV_IMAGE_SAVE_LOCATION + '/x_adv_' + str(i) + '.jpeg')
    ]

# evaluate sensitivity scores of each image
test_eps_scores = [1] * x_test.shape[0]

for eps in eps_range:
    attacker = ProjectedGradientDescent(classifier,
                                        eps=eps,
                                        eps_step=eps / 4,
                                        max_iter=max_iter,
                                        num_random_init=num_random_init)
    x_test_adv = attacker.generate(x_test)
    for i in range(x_test.shape[0]):
        img = np.expand_dims(x_test[i], axis=0)
        adv_img = np.expand_dims(x_test_adv[i], axis=0)
        pred = np.argmax(classifier.predict(img))
        pred_adv = np.argmax(classifier.predict(adv_img))
        if test_eps_scores[i] == 1:
            if pred != pred_adv:
                test_eps_scores[i] = eps
np.save(path + dataset + '/test_eps_scores.npy', test_eps_scores)

test_eps_scores = np.load(path + dataset + '/test_eps_scores.npy')

test_eps_freq = [0] * x_test.shape[0]
for eps_score in test_eps_scores:
    for i in range(len(eps_range)):
        if eps_score == eps_range[i]:
            test_eps_freq[i] = test_eps_freq[i] + 1

for i in range(len(eps_range)):