def get_vanilla_model(x_train, y_train, batch_norm=False):
    m = Sequential()

    m.add(Conv2D(64, (3, 3), padding='valid', input_shape=(28, 28, 1)))
    m.add(Activation('relu'))
    if (batch_norm):
        m.add(BatchNormalization())
    m.add(Conv2D(64, (3, 3)))
    m.add(Activation('relu'))
    if (batch_norm):
        m.add(BatchNormalization())
    m.add(MaxPooling2D(pool_size=(2, 2)))
    m.add(Dropout(0.5))
    m.add(Flatten())
    m.add(Dense(128))
    m.add(Activation('relu'))
    if (batch_norm):
        m.add(BatchNormalization())
    m.add(Dropout(0.5))
    m.add(Dense(10))
    m.add(Activation('softmax'))
    m.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
    c = KerasClassifier(model=m)
    c.fit(x_train, y_train, nb_epochs=50, batch_size=128)
    return c
def test_defences_predict(get_default_mnist_subset, get_image_classifier_list):
    (x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_default_mnist_subset

    clip_values = (0, 1)
    fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
    jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
    smooth = SpatialSmoothing()
    classifier_, _ = get_image_classifier_list(one_classifier=True)
    classifier = KerasClassifier(
        clip_values=clip_values, model=classifier_._model, preprocessing_defences=[fs, jpeg, smooth]
    )
    assert len(classifier.preprocessing_defences) == 3

    predictions_classifier = classifier.predict(x_test_mnist)

    # Apply the same defences by hand
    x_test_defense = x_test_mnist
    x_test_defense, _ = fs(x_test_defense, y_test_mnist)
    x_test_defense, _ = jpeg(x_test_defense, y_test_mnist)
    x_test_defense, _ = smooth(x_test_defense, y_test_mnist)
    classifier, _ = get_image_classifier_list(one_classifier=True)

    predictions_check = classifier._model.predict(x_test_defense)

    # Check that the prediction results match
    np.testing.assert_array_almost_equal(predictions_classifier, predictions_check, decimal=4)
    def test_iris_k_unbounded(self):
        (_, _), (x_test, y_test) = self.iris
        classifier, _ = get_iris_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model,
                                     use_logits=False,
                                     channel_index=1)
        attack_params = {
            "max_iter": 1,
            "attacker": "newtonfool",
            "attacker_params": {
                "max_iter": 5
            }
        }
        attack = UniversalPerturbation(classifier)
        attack.set_params(**attack_params)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info(
            'Accuracy on Iris with universal adversarial examples: %.2f%%',
            (acc * 100))
示例#4
0
    def test_defences_predict(self):
        clip_values = (0, 1)
        fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
        jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
        smooth = SpatialSmoothing()
        classifier_ = get_classifier_kr()
        classifier = KerasClassifier(clip_values=clip_values,
                                     model=classifier_._model,
                                     defences=[fs, jpeg, smooth])
        self.assertEqual(len(classifier.defences), 3)

        predictions_classifier = classifier.predict(self.x_test)

        # Apply the same defences by hand
        x_test_defense = self.x_test
        x_test_defense, _ = fs(x_test_defense, self.y_test)
        x_test_defense, _ = jpeg(x_test_defense, self.y_test)
        x_test_defense, _ = smooth(x_test_defense, self.y_test)
        classifier = get_classifier_kr()
        predictions_check = classifier._model.predict(x_test_defense)

        # Check that the prediction results match
        np.testing.assert_array_almost_equal(predictions_classifier,
                                             predictions_check,
                                             decimal=4)
示例#5
0
    def setUp(self):

        (self.x_train,
         self.y_train), (x_test,
                         y_test), min_, max_ = load_dataset(str('mnist'))
        self.x_train = self.x_train[:300]
        self.y_train = self.y_train[:300]

        k.set_learning_phase(1)
        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=self.x_train.shape[1:]))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        self.classifier = KerasClassifier((min_, max_), model=model)
        self.classifier.fit(self.x_train,
                            self.y_train,
                            nb_epochs=1,
                            batch_size=128)

        self.defence = ActivationDefence(self.classifier, self.x_train,
                                         self.y_train)
    def test_keras_iris_unbounded(self):
        classifier = get_tabular_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)

        # Norm=2
        attack = HopSkipJump(classifier, targeted=False, max_iter=2, max_eval=100, init_eval=10)
        x_test_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_adv).all())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with HopSkipJump adversarial examples: %.2f%%", (acc * 100))

        # Norm=np.inf
        attack = HopSkipJump(classifier, targeted=False, max_iter=2, max_eval=100, init_eval=10, norm=np.Inf)
        x_test_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_adv).all())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with HopSkipJump adversarial examples: %.2f%%", (acc * 100))

        # Clean-up session
        k.clear_session()
    def test_defences_predict(self):
        from art.defences import FeatureSqueezing, JpegCompression, SpatialSmoothing

        (_, _), (x_test, y_test) = self.mnist

        clip_values = (0, 1)
        fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
        jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
        smooth = SpatialSmoothing()
        classifier = KerasClassifier(clip_values=clip_values,
                                     model=self.model_mnist._model,
                                     defences=[fs, jpeg, smooth])
        self.assertEqual(len(classifier.defences), 3)

        preds_classifier = classifier.predict(x_test)

        # Apply the same defences by hand
        x_test_defense = x_test
        x_test_defense, _ = fs(x_test_defense, y_test)
        x_test_defense, _ = jpeg(x_test_defense, y_test)
        x_test_defense, _ = smooth(x_test_defense, y_test)
        preds_check = self.model_mnist._model.predict(x_test_defense)

        # Check that the prediction results match
        self.assertTrue((preds_classifier - preds_check <= 1e-5).all())
示例#8
0
    def test_iris_unbounded(self):
        (_, _), (x_test, y_test) = self.iris
        classifier = get_iris_classifier_kr()

        def t(x):
            return x

        def transformation():
            while True:
                yield t

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model,
                                     use_logits=False,
                                     channel_index=1)
        classifier = ExpectationOverTransformations(
            classifier, sample_size=1, transformation=transformation)
        attack = FastGradientMethod(classifier, eps=1)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv > 1).any())
        self.assertTrue((x_test_adv < 0).any())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on Iris with limited query info: %.2f%%',
                    (acc * 100))
示例#9
0
    def test_functional_model(self):
        keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=1, output_layer=1)
        self.assertTrue(keras_model._input.name, "input1")
        self.assertTrue(keras_model._output.name, "output1")

        keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=0, output_layer=0)
        self.assertTrue(keras_model._input.name, "input0")
        self.assertTrue(keras_model._output.name, "output0")
示例#10
0
 def test_functional_model(self):
     # Need to update the functional_model code to produce a model with more than one input and output layers...
     m = self.functional_model()
     keras_model = KerasClassifier((0, 1), m, input_layer=1, output_layer=1)
     self.assertTrue(keras_model._input.name, "input1")
     self.assertTrue(keras_model._output.name, "output1")
     keras_model = KerasClassifier((0, 1), m, input_layer=0, output_layer=0)
     self.assertTrue(keras_model._input.name, "input0")
     self.assertTrue(keras_model._output.name, "output0")
示例#11
0
    def test_loss_gradient(self):
        (_, _), (x_test, y_test) = self.mnist
        classifier = KerasClassifier((0, 1), self.model_mnist)

        # Test gradient
        grads = classifier.loss_gradient(x_test, y_test)

        self.assertTrue(np.array(grads.shape == (NB_TEST, 28, 28, 1)).all())
        self.assertTrue(np.sum(grads) != 0)
def test_functional_model(get_functional_model):
    functional_model = get_functional_model
    keras_model = KerasClassifier(functional_model, clip_values=(0, 1), input_layer=1, output_layer=1)
    assert keras_model._input.name == "input1:0"
    assert keras_model._output.name == "output1/Softmax:0"

    keras_model = KerasClassifier(functional_model, clip_values=(0, 1), input_layer=0, output_layer=0)
    assert keras_model._input.name == "input0:0"
    assert keras_model._output.name == "output0/Softmax:0"
示例#13
0
 def _test_functional_model(self, custom_activation=True):
     # Need to update the functional_model code to produce a model with more than one input and output layers...
     keras_model = KerasClassifier((0, 1), self.functional_model, input_layer=1, output_layer=1,
                                   custom_activation=custom_activation)
     self.assertTrue(keras_model._input.name, "input1")
     self.assertTrue(keras_model._output.name, "output1")
     keras_model = KerasClassifier((0, 1), self.functional_model, input_layer=0, output_layer=0,
                                   custom_activation=custom_activation)
     self.assertTrue(keras_model._input.name, "input0")
     self.assertTrue(keras_model._output.name, "output0")
    def test_krclassifier(self):
        """
        Test with a KerasClassifier.
        :return:
        """
        # Initialize a tf session
        session = tf.Session()
        k.set_session(session)

        # Get MNIST
        (x_train, y_train), (x_test, y_test) = self.mnist

        # Create simple CNN
        model = Sequential()
        model.add(
            Conv2D(4,
                   kernel_size=(5, 5),
                   activation='relu',
                   input_shape=(28, 28, 1)))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adam(lr=0.01),
                      metrics=['accuracy'])

        # Get classifier
        krc = KerasClassifier((0, 1), model, use_logits=False)
        krc.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=10)

        # First attack (without EoT):
        fgsm = FastGradientMethod(classifier=krc, targeted=True)
        params = {'y': random_targets(y_test, krc.nb_classes)}
        x_test_adv = fgsm.generate(x_test, **params)

        # Second attack (with EoT):
        def t(x):
            return x

        def transformation():
            while True:
                yield t

        eot = ExpectationOverTransformations(sample_size=1,
                                             transformation=transformation)

        fgsm_with_eot = FastGradientMethod(classifier=krc,
                                           expectation=eot,
                                           targeted=True)
        self.assertFalse(fgsm_with_eot.expectation is None)
        x_test_adv_with_eot = fgsm_with_eot.generate(x_test, **params)

        self.assertTrue(
            (np.abs(x_test_adv - x_test_adv_with_eot) < 0.001).all())
示例#15
0
    def test_save(self):
        import os

        path = 'tmp'
        filename = 'model.h5'
        classifier = KerasClassifier((0, 1), model=self.model_mnist)
        classifier.save(filename, path=path)
        self.assertTrue(os.path.isfile(os.path.join(path, filename)))

        # Remove saved file
        os.remove(os.path.join(path, filename))
def load_clf(folder, clf_filename, model_filename):
    cwd = os.getcwd()
    os.chdir(folder)
    import pickle
    with open(clf_filename, "rb") as f:
        clf_state = pickle.load(f)
    model = tf.keras.models.load_model(model_filename)
    os.chdir(cwd)
    clf = KerasClassifier(model=model)
    clf.__setstate__(clf_state())
    return clf, model
示例#17
0
    def test_krclassifier(self):
        """
        Second test with the KerasClassifier.
        :return:
        """
        # Initialize a tf session
        session = tf.Session()
        k.set_session(session)

        # Get MNIST
        (x_train, y_train), (x_test, y_test) = self.mnist

        # Create simple CNN
        model = Sequential()
        model.add(
            Conv2D(4,
                   kernel_size=(5, 5),
                   activation='relu',
                   input_shape=(28, 28, 1)))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adam(lr=0.01),
                      metrics=['accuracy'])

        # Get classifier
        krc = KerasClassifier((0, 1), model, use_logits=False)
        krc.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)

        # Attack
        attack_params = {
            "max_translation": 10.0,
            "num_translations": 3,
            "max_rotation": 30.0,
            "num_rotations": 3
        }
        attack_st = SpatialTransformation(krc)
        x_train_adv = attack_st.generate(x_train, **attack_params)

        self.assertTrue(abs(x_train_adv[0, 8, 13, 0] - 0.8066048) <= 0.01)
        self.assertTrue(abs(attack_st.fooling_rate - 0.923) <= 0.01)

        self.assertTrue(attack_st.attack_trans_x == -3)
        self.assertTrue(attack_st.attack_trans_y == -3)
        self.assertTrue(attack_st.attack_rot == -30.0)

        x_test_adv = attack_st.generate(x_test)

        self.assertTrue(abs(x_test_adv[0, 14, 14, 0] - 0.6941315) <= 0.01)

        k.clear_session()
示例#18
0
    def test_resnet(self):
        keras.backend.set_learning_phase(0)
        model = ResNet50(weights='imagenet')
        classifier = KerasClassifier(model, clip_values=(0, 255))

        image = img_to_array(load_img(os.path.join(self.test_dir, 'test.jpg'), target_size=(224, 224)))
        image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))

        prediction = classifier.predict(image)
        label = decode_predictions(prediction)[0][0]

        self.assertEqual(label[1], 'Weimaraner')
        self.assertAlmostEqual(prediction[0, 178], 0.2658045, places=3)
    def test_keras_iris_unbounded_LInf(self):
        classifier = get_tabular_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
        attack = CarliniLInfMethod(classifier, targeted=False, max_iter=10, eps=1)
        x_test_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_adv).all())

        predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == predictions_adv).all())
        accuracy = np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with C&W adversarial examples: %.2f%%", (accuracy * 100))
示例#20
0
    def test_iris_k_unbounded(self):
        classifier, _ = get_iris_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
        attack = DeepFool(classifier, max_iter=5, batch_size=128)
        x_test_adv = attack.generate(self.x_test)
        self.assertFalse((self.x_test == x_test_adv).all())

        predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test, axis=1) == predictions_adv).all())
        accuracy = np.sum(predictions_adv == np.argmax(self.y_test, axis=1)) / self.y_test.shape[0]
        logger.info('Accuracy on Iris with DeepFool adversarial examples: %.2f%%', (accuracy * 100))
def test_resnet(create_test_image):
    image_file_path = create_test_image
    keras.backend.set_learning_phase(0)
    model = ResNet50(weights="imagenet")
    classifier = KerasClassifier(model, clip_values=(0, 255))

    image = img_to_array(load_img(image_file_path, target_size=(224, 224)))
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))

    prediction = classifier.predict(image)
    label = decode_predictions(prediction)[0][0]

    assert label[1] == "Weimaraner"
    np.testing.assert_array_almost_equal(prediction[0, 178], 0.2658045, decimal=3)
示例#22
0
    def adversarial_training(self):
        # Data augmentation: expand the training set with the adversarial samples
        x_train = np.append(self.x_train, self.adv_train, axis=0)
        y_train = np.append(self.y_train, self.y_train, axis=0)

        # Retrain the CNN on the extended dataset
        classifier = KerasClassifier((min_, max_), model=model)
        classifier.fit(x_train, y_train, nb_epochs=5, batch_size=50)

        with open(out_file, 'a+') as f:
            preds = np.argmax(classifier.predict(x_train), axis=1)
            acc = np.sum(
                preds == np.argmax(y_train, axis=1)) / y_train.shape[0]
            print("TRAIN: %.2f%% \n" % (acc * 100), file=f)

            preds = np.argmax(classifier.predict(self.adv_train), axis=1)
            acc = np.sum(
                preds == np.argmax(y_train, axis=1)) / y_train.shape[0]
            print("TRAIN-ADVERSARY: %.2f%% \n" % (acc * 100), file=f)

            preds = np.argmax(classifier.predict(x_test), axis=1)
            acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
            print("TEST: %.2f%% \n" % (acc * 100), file=f)

            preds = np.argmax(classifier.predict(self.adv_test), axis=1)
            acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
            print('TEST-ADVERSARY: %.2f%% \n' % (acc * 100), file=f)
    def test_iris_k_unbounded(self):
        (_, _), (x_test, y_test) = self.iris
        classifier, _ = get_iris_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
        attack = CarliniLInfMethod(classifier, targeted=False, max_iter=10, eps=1)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on Iris with C&W adversarial examples: %.2f%%', (acc * 100))
    def test_keras_classifier(self):
        """
        Second test with the KerasClassifier.
        :return:
        """
        # Build KerasClassifier
        victim_krc = get_image_classifier_kr()

        # Create simple CNN
        model = Sequential()
        model.add(
            Conv2D(1,
                   kernel_size=(7, 7),
                   activation="relu",
                   input_shape=(28, 28, 1)))
        model.add(MaxPooling2D(pool_size=(4, 4)))
        model.add(Flatten())
        model.add(Dense(10, activation="softmax"))
        loss = keras.losses.categorical_crossentropy
        model.compile(loss=loss,
                      optimizer=keras.optimizers.Adam(lr=0.001),
                      metrics=["accuracy"])

        # Get classifier
        thieved_krc = KerasClassifier(model,
                                      clip_values=(0, 1),
                                      use_logits=False)

        # Create attack
        copycat_cnn = CopycatCNN(
            classifier=victim_krc,
            batch_size_fit=self.batch_size,
            batch_size_query=self.batch_size,
            nb_epochs=NB_EPOCHS,
            nb_stolen=NB_STOLEN,
        )
        thieved_krc = copycat_cnn.extract(x=self.x_train_mnist,
                                          thieved_classifier=thieved_krc)

        victim_preds = np.argmax(
            victim_krc.predict(x=self.x_train_mnist[:100]), axis=1)
        thieved_preds = np.argmax(
            thieved_krc.predict(x=self.x_train_mnist[:100]), axis=1)
        acc = np.sum(victim_preds == thieved_preds) / len(victim_preds)

        self.assertGreater(acc, 0.3)

        # Clean-up
        k.clear_session()
    def test_keras_iris_unbounded(self):
        classifier = get_tabular_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
        attack = BasicIterativeMethod(classifier, eps=1, eps_step=0.2, batch_size=128)
        x_test_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_adv).all())
        self.assertTrue((x_test_adv > 1).any())
        self.assertTrue((x_test_adv < 0).any())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with BIM adversarial examples: %.2f%%", (acc * 100))
示例#26
0
    def test_shapes(self):
        x_test, y_test = self.mnist[1]
        classifier = KerasClassifier((0, 1), self.model_mnist)

        preds = classifier.predict(self.mnist[1][0])
        self.assertTrue(preds.shape == y_test.shape)

        self.assertTrue(classifier.nb_classes == 10)

        class_grads = classifier.class_gradient(x_test[:11])
        self.assertTrue(class_grads.shape == tuple([11, 10] +
                                                   list(x_test[1].shape)))

        loss_grads = classifier.loss_gradient(x_test[:11], y_test[:11])
        self.assertTrue(loss_grads.shape == x_test[:11].shape)
示例#27
0
def get_art_model(model_kwargs: dict,
                  wrapper_kwargs: dict,
                  weights_path: Optional[str] = None) -> KerasClassifier:
    """
    resnet pretrained on Imagenet. The initial layers transform the input from canonical form to the expected input
    format for the DenseNet-121.
    """
    input = tf.keras.Input(shape=(224, 224, 3))

    # Preprocessing layers
    img_scaled_to_255 = Lambda(lambda image: image * 255)(input)
    # Reorder image channels i.e. img = img[..., ::-1]
    img_channel_reorder = Lambda(lambda image: tf.reverse(image, axis=[-1]))(
        img_scaled_to_255)
    # Model was trained with inputs zero-centered on ImageNet mean
    img_normalized = Lambda(lambda image: image - IMAGENET_MEANS)(
        img_channel_reorder)

    resnet50 = ResNet50(weights=None,
                        input_tensor=img_normalized,
                        **model_kwargs)
    model = Model(inputs=input, outputs=resnet50.output)

    if weights_path:
        model.load_weights(weights_path)

    wrapped_model = KerasClassifier(
        model,
        clip_values=(0.0, 1.0),
        **wrapper_kwargs,
    )
    return wrapped_model
示例#28
0
def simple_cnn(dropout):
    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(28, 28, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    if dropout > 0:
        model.add(Dropout(dropout))
    model.add(Flatten())

    model.add(Dense(128, activation='relu'))
    if dropout > 0:
        model.add(Dropout(dropout))

    model.add(Dense(10, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    classifier = KerasClassifier(clip_values=(0., 1.), model=model)
    return classifier
    def setUpClass(cls):

        (x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
        x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
        cls.mnist = (x_train, y_train), (x_test, y_test), (min_, max_)

        # Create simple keras model
        import keras.backend as k
        from keras.models import Sequential
        from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D

        k.set_learning_phase(1)
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        from art.classifiers import KerasClassifier
        cls.classifier = KerasClassifier((min_, max_), model=model)

        cls.classifier.fit(x_train, y_train, nb_epochs=1, batch_size=128)

        cls.defence = ActivationDefence(cls.classifier, x_train, y_train)
def debug():
    """
    Function:
        For debugging.
    For attacker_name:
        "AdversarialPatch"
        "Attack"
		"BoundaryAttack"
    add your attacker's name here.
    """
    x_train, y_train, x_test, y_test, model, min_, max_ = GetCifar10WithModel()
    x_test_example = x_test[:10]
    y_test_example = y_test[:10]

    classifier = KerasClassifier(clip_values=(min_, max_),
                                 model=model,
                                 use_logits=False,
                                 preprocessing=(0.5, 1))

    x_adv_adversial_patch, dt_adversial_patch = GetAttackers(
        classifier, x_test_example, "AdversarialPatch")
    fig = plt.figure()
    ax1 = fig.add_subplot(211)
    ax2 = fig.add_subplot(212)
    ax1.imshow(x_adv_adversial_patch[0])
    ax2.imshow(x_adv_adversial_patch[1])
    plt.show()
    #x_adv_adversial_patch, = x_adv_adversial_patch
    #print(x_adv_adversial_patch)
    #x_adv_attack, dt_attack = GetAttackers(classifier, x_test_example, "Attack")
    #x_adv_boundary_attack, dt_boundary_attack = GetAttackers(classifier, x_test_example, "BoundaryAttack")

    print("Time duration for AdversarialPatch: \t", dt_adversial_patch)