Exemple #1
0
    def test_functional_model(self):
        keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=1, output_layer=1)
        self.assertTrue(keras_model._input.name, "input1")
        self.assertTrue(keras_model._output.name, "output1")

        keras_model = KerasClassifier(self.functional_model, clip_values=(0, 1), input_layer=0, output_layer=0)
        self.assertTrue(keras_model._input.name, "input0")
        self.assertTrue(keras_model._output.name, "output0")
def test_functional_model(get_functional_model):
    functional_model = get_functional_model
    keras_model = KerasClassifier(functional_model, clip_values=(0, 1), input_layer=1, output_layer=1)
    assert keras_model._input.name == "input1:0"
    assert keras_model._output.name == "output1/Softmax:0"

    keras_model = KerasClassifier(functional_model, clip_values=(0, 1), input_layer=0, output_layer=0)
    assert keras_model._input.name == "input0:0"
    assert keras_model._output.name == "output0/Softmax:0"
Exemple #3
0
 def test_functional_model(self):
     # Need to update the functional_model code to produce a model with more than one input and output layers...
     m = self.functional_model()
     keras_model = KerasClassifier((0, 1), m, input_layer=1, output_layer=1)
     self.assertTrue(keras_model._input.name, "input1")
     self.assertTrue(keras_model._output.name, "output1")
     keras_model = KerasClassifier((0, 1), m, input_layer=0, output_layer=0)
     self.assertTrue(keras_model._input.name, "input0")
     self.assertTrue(keras_model._output.name, "output0")
Exemple #4
0
 def _test_functional_model(self, custom_activation=True):
     # Need to update the functional_model code to produce a model with more than one input and output layers...
     keras_model = KerasClassifier((0, 1), self.functional_model, input_layer=1, output_layer=1,
                                   custom_activation=custom_activation)
     self.assertTrue(keras_model._input.name, "input1")
     self.assertTrue(keras_model._output.name, "output1")
     keras_model = KerasClassifier((0, 1), self.functional_model, input_layer=0, output_layer=0,
                                   custom_activation=custom_activation)
     self.assertTrue(keras_model._input.name, "input0")
     self.assertTrue(keras_model._output.name, "output0")
Exemple #5
0
    def _test_fit_generator(self, custom_activation=False):
        from art.classifiers.keras import generator_fit
        from art.data_generators import KerasDataGenerator

        labels = np.argmax(self.mnist[1][1], axis=1)
        classifier = KerasClassifier((0, 1),
                                     self.model_mnist,
                                     use_logits=False,
                                     custom_activation=custom_activation)
        acc = np.sum(
            np.argmax(classifier.predict(self.mnist[1][0]), axis=1) ==
            labels) / NB_TEST
        logger.info('Accuracy: %.2f%%', (acc * 100))

        gen = generator_fit(self.mnist[0][0],
                            self.mnist[0][1],
                            batch_size=BATCH_SIZE)
        data_gen = KerasDataGenerator(generator=gen,
                                      size=NB_TRAIN,
                                      batch_size=BATCH_SIZE)
        classifier.fit_generator(generator=data_gen, nb_epochs=2)
        acc2 = np.sum(
            np.argmax(classifier.predict(self.mnist[1][0]), axis=1) ==
            labels) / NB_TEST
        logger.info('Accuracy: %.2f%%', (acc2 * 100))

        self.assertTrue(acc2 >= .8 * acc)
    def test_krclassifier(self):
        """
        Second test with the KerasClassifier.
        :return:
        """
        # Get MNIST
        (x_train, y_train), (x_test, _) = self.mnist

        # Create simple CNN
        model = Sequential()
        model.add(
            Conv2D(4,
                   kernel_size=(5, 5),
                   activation='relu',
                   input_shape=(28, 28, 1)))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adam(lr=0.01),
                      metrics=['accuracy'])

        # Get classifier
        krc = KerasClassifier((0, 1), model, use_logits=False)
        krc.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)

        # Attack
        # import time
        nf = NewtonFool(krc, max_iter=5)

        # print("Test Keras....")
        # starttime = time.clock()
        # x_test_adv = nf.generate(x_test, batch_size=1)
        # endtime = time.clock()
        # print(1, endtime - starttime)

        # starttime = time.clock()
        # x_test_adv = nf.generate(x_test, batch_size=10)
        # endtime = time.clock()
        # print(10, endtime - starttime)

        # starttime = time.clock()
        x_test_adv = nf.generate(x_test, batch_size=100)
        # endtime = time.clock()
        # print(100, endtime - starttime)
        #
        # starttime = time.clock()
        # x_test_adv = nf.generate(x_test, batch_size=1000)
        # endtime = time.clock()
        # print(1000, endtime - starttime)

        self.assertFalse((x_test == x_test_adv).all())

        y_pred = krc.predict(x_test)
        y_pred_adv = krc.predict(x_test_adv)
        y_pred_bool = y_pred.max(axis=1, keepdims=1) == y_pred
        y_pred_max = y_pred.max(axis=1)
        y_pred_adv_max = y_pred_adv[y_pred_bool]
        self.assertTrue((y_pred_max >= y_pred_adv_max).all())
def get_vanilla_model(x_train, y_train, batch_norm=False):
    m = Sequential()

    m.add(Conv2D(64, (3, 3), padding='valid', input_shape=(28, 28, 1)))
    m.add(Activation('relu'))
    if (batch_norm):
        m.add(BatchNormalization())
    m.add(Conv2D(64, (3, 3)))
    m.add(Activation('relu'))
    if (batch_norm):
        m.add(BatchNormalization())
    m.add(MaxPooling2D(pool_size=(2, 2)))
    m.add(Dropout(0.5))
    m.add(Flatten())
    m.add(Dense(128))
    m.add(Activation('relu'))
    if (batch_norm):
        m.add(BatchNormalization())
    m.add(Dropout(0.5))
    m.add(Dense(10))
    m.add(Activation('softmax'))
    m.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
    c = KerasClassifier(model=m)
    c.fit(x_train, y_train, nb_epochs=50, batch_size=128)
    return c
def test_defences_predict(get_default_mnist_subset, get_image_classifier_list):
    (x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_default_mnist_subset

    clip_values = (0, 1)
    fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
    jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
    smooth = SpatialSmoothing()
    classifier_, _ = get_image_classifier_list(one_classifier=True)
    classifier = KerasClassifier(
        clip_values=clip_values, model=classifier_._model, preprocessing_defences=[fs, jpeg, smooth]
    )
    assert len(classifier.preprocessing_defences) == 3

    predictions_classifier = classifier.predict(x_test_mnist)

    # Apply the same defences by hand
    x_test_defense = x_test_mnist
    x_test_defense, _ = fs(x_test_defense, y_test_mnist)
    x_test_defense, _ = jpeg(x_test_defense, y_test_mnist)
    x_test_defense, _ = smooth(x_test_defense, y_test_mnist)
    classifier, _ = get_image_classifier_list(one_classifier=True)

    predictions_check = classifier._model.predict(x_test_defense)

    # Check that the prediction results match
    np.testing.assert_array_almost_equal(predictions_classifier, predictions_check, decimal=4)
Exemple #9
0
    def setUp(self):

        (self.x_train,
         self.y_train), (x_test,
                         y_test), min_, max_ = load_dataset(str('mnist'))
        self.x_train = self.x_train[:300]
        self.y_train = self.y_train[:300]

        k.set_learning_phase(1)
        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=self.x_train.shape[1:]))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        self.classifier = KerasClassifier((min_, max_), model=model)
        self.classifier.fit(self.x_train,
                            self.y_train,
                            nb_epochs=1,
                            batch_size=128)

        self.defence = ActivationDefence(self.classifier, self.x_train,
                                         self.y_train)
Exemple #10
0
def simple_cnn(dropout):
    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(28, 28, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    if dropout > 0:
        model.add(Dropout(dropout))
    model.add(Flatten())

    model.add(Dense(128, activation='relu'))
    if dropout > 0:
        model.add(Dropout(dropout))

    model.add(Dense(10, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    classifier = KerasClassifier(clip_values=(0., 1.), model=model)
    return classifier
Exemple #11
0
    def setUpClass(cls):

        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
        cls.mnist = (x_train, y_train), (x_test, y_test)

        k.set_learning_phase(1)
        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=x_train.shape[1:]))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        cls.classifier = KerasClassifier((0, 1), model=model)
        cls.classifier.fit(x_train, y_train, nb_epochs=2, batch_size=128)

        cls.defence = ActivationDefence(cls.classifier, x_train, y_train)
Exemple #12
0
def run_attack(targeted,
               attack_name,
               classifier,
               samples,
               y_samples,
               batch_size=DEFAULT_BATCH_SIZE,
               dataset='mnist',
               fgsm_epsilon=0.1,
               cwl2_confidence=0):

    x_test_new = None

    targeted_y_samples = None
    if targeted:
        targeted_y_samples = get_targeted_ys(y_samples)

    #-------------------
    # CleverHans attacks
    if attack_name == 'DeepFool':
        x_test_new = get_DeepFool_adversarial(targeted, samples, classifier,
                                              batch_size)
    elif attack_name == 'CarliniL2Method':
        x_test_new = get_CWL2_adversarial(targeted, samples,
                                          targeted_y_samples, classifier,
                                          batch_size, cwl2_confidence)
    #-----------------
    else:  # ART attacks
        classifier_copy = KerasClassifier(model=classifier,
                                          clip_values=(0.0, 1.0),
                                          use_logits=False)
        x_test_new = get_adversarial(targeted, attack_name, classifier_copy,
                                     samples, targeted_y_samples, batch_size,
                                     dataset, fgsm_epsilon, cwl2_confidence)

    return x_test_new
Exemple #13
0
    def test_with_defences(self):
        (x_train, y_train), (x_test, y_test) = self.mnist

        # Get the ready-trained Keras model
        model = self.classifier_k._model
        fs = FeatureSqueezing(bit_depth=1, clip_values=(0, 1))
        classifier = KerasClassifier(model=model, clip_values=(0, 1), defences=fs)
        # Wrap the classifier
        classifier = QueryEfficientBBGradientEstimation(classifier, 20, 1 / 64., round_samples=1 / 255.)

        attack = FastGradientMethod(classifier, eps=1)
        x_train_adv = attack.generate(x_train)
        x_test_adv = attack.generate(x_test)

        self.assertFalse((x_train == x_train_adv).all())
        self.assertFalse((x_test == x_test_adv).all())

        train_y_pred = get_labels_np_array(classifier.predict(x_train_adv))
        test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))

        self.assertFalse((y_train == train_y_pred).all())
        self.assertFalse((y_test == test_y_pred).all())

        preds = classifier.predict(x_train_adv)
        acc = np.sum(np.argmax(preds, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]
        logger.info('Accuracy on adversarial train examples with feature squeezing and limited query info: %.2f%%',
                    (acc * 100))

        preds = classifier.predict(x_test_adv)
        acc = np.sum(np.argmax(preds, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on adversarial test examples with feature squeezing and limited query info: %.2f%%',
                    (acc * 100))
Exemple #14
0
    def test_defences_predict(self):
        clip_values = (0, 1)
        fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
        jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
        smooth = SpatialSmoothing()
        classifier_ = get_classifier_kr()
        classifier = KerasClassifier(clip_values=clip_values,
                                     model=classifier_._model,
                                     defences=[fs, jpeg, smooth])
        self.assertEqual(len(classifier.defences), 3)

        predictions_classifier = classifier.predict(self.x_test)

        # Apply the same defences by hand
        x_test_defense = self.x_test
        x_test_defense, _ = fs(x_test_defense, self.y_test)
        x_test_defense, _ = jpeg(x_test_defense, self.y_test)
        x_test_defense, _ = smooth(x_test_defense, self.y_test)
        classifier = get_classifier_kr()
        predictions_check = classifier._model.predict(x_test_defense)

        # Check that the prediction results match
        np.testing.assert_array_almost_equal(predictions_classifier,
                                             predictions_check,
                                             decimal=4)
Exemple #15
0
    def test_pickle(self):
        filename = "my_classifier.p"
        full_path = os.path.join(ART_DATA_PATH, filename)
        folder = os.path.split(full_path)[0]
        if not os.path.exists(folder):
            os.makedirs(folder)

        fs = FeatureSqueezing(bit_depth=1, clip_values=(0, 1))
        keras_model = KerasClassifier(
            self.functional_model, clip_values=(0, 1), input_layer=1, output_layer=1, preprocessing_defences=fs
        )
        with open(full_path, "wb") as save_file:
            pickle.dump(keras_model, save_file)

        # Unpickle:
        with open(full_path, "rb") as load_file:
            loaded = pickle.load(load_file)

        self.assertEqual(keras_model._clip_values, loaded._clip_values)
        self.assertEqual(keras_model._channel_index, loaded._channel_index)
        self.assertEqual(keras_model._use_logits, loaded._use_logits)
        self.assertEqual(keras_model._input_layer, loaded._input_layer)
        self.assertEqual(self.functional_model.get_config(), loaded._model.get_config())
        self.assertTrue(isinstance(loaded.preprocessing_defences[0], FeatureSqueezing))

        os.remove(full_path)
Exemple #16
0
    def setUpClass(cls):
        k.clear_session()
        k.set_learning_phase(1)

        # Get MNIST
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train, x_test, y_test = x_train[:
                                                   NB_TRAIN], y_train[:
                                                                      NB_TRAIN], x_test[:
                                                                                        NB_TEST], y_test[:
                                                                                                         NB_TEST]
        cls.mnist = ((x_train, y_train), (x_test, y_test))

        model_1 = KerasClassifier((0, 1), cls._get_model(epochs=2))
        model_2 = KerasClassifier((0, 1), cls._get_model(epochs=2))
        cls.ensemble = EnsembleClassifier((0, 1), [model_1, model_2])
Exemple #17
0
    def _get_tabular_classifier_list(clipped=True):
        if framework == "keras":
            if clipped:
                classifier_list = [get_tabular_classifier_kr()]
            else:
                classifier = get_tabular_classifier_kr()
                classifier_list = [KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)]

        if framework == "tensorflow":
            if clipped:
                classifier, _ = get_tabular_classifier_tf()
                classifier_list = [classifier]
            else:
                logging.warning("{0} doesn't have an uncliped classifier defined yet".format(framework))
                classifier_list = None

        if framework == "pytorch":
            if clipped:
                classifier_list = [get_tabular_classifier_pt()]
            else:
                logging.warning("{0} doesn't have an uncliped classifier defined yet".format(framework))
                classifier_list = None

        if framework == "scikitlearn":
            return get_tabular_classifier_scikit_list(clipped=False)

        return classifier_list
Exemple #18
0
    def _test_fit_image_generator(self, custom_activation=False):
        from keras.preprocessing.image import ImageDataGenerator
        from art.data_generators import KerasDataGenerator

        x_train, y_train = self.mnist[0]
        labels_test = np.argmax(self.mnist[1][1], axis=1)
        classifier = KerasClassifier((0, 1),
                                     self.model_mnist,
                                     use_logits=False,
                                     custom_activation=custom_activation)
        acc = np.sum(
            np.argmax(classifier.predict(self.mnist[1][0]), axis=1) ==
            labels_test) / NB_TEST
        logger.info('Accuracy: %.2f%%', (acc * 100))

        keras_gen = ImageDataGenerator(width_shift_range=0.075,
                                       height_shift_range=0.075,
                                       rotation_range=12,
                                       shear_range=0.075,
                                       zoom_range=0.05,
                                       fill_mode='constant',
                                       cval=0)
        keras_gen.fit(x_train)
        data_gen = KerasDataGenerator(generator=keras_gen.flow(
            x_train, y_train, batch_size=BATCH_SIZE),
                                      size=NB_TRAIN,
                                      batch_size=BATCH_SIZE)
        classifier.fit_generator(generator=data_gen, nb_epochs=2)
        acc2 = np.sum(
            np.argmax(classifier.predict(self.mnist[1][0]), axis=1) ==
            labels_test) / NB_TEST
        logger.info('Accuracy: %.2f%%', (acc2 * 100))

        self.assertTrue(acc2 >= .8 * acc)
Exemple #19
0
    def _test_with_defences(self, custom_activation=False):
        # Get MNIST
        (x_train, y_train), (x_test, y_test) = self.mnist

        # Get the ready-trained Keras model
        model = self.classifier_k._model
        classifier = KerasClassifier((0, 1), model, defences='featsqueeze1', custom_activation=custom_activation)

        attack = FastGradientMethod(classifier, eps=1)
        x_train_adv = attack.generate(x_train)
        x_test_adv = attack.generate(x_test)

        self.assertFalse((x_train == x_train_adv).all())
        self.assertFalse((x_test == x_test_adv).all())

        train_y_pred = get_labels_np_array(classifier.predict(x_train_adv))
        test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))

        self.assertFalse((y_train == train_y_pred).all())
        self.assertFalse((y_test == test_y_pred).all())

        preds = classifier.predict(x_train_adv)
        acc = np.sum(np.argmax(preds, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]
        logger.info('Accuracy on adversarial train examples with feature squeezing: %.2f%%', (acc * 100))

        preds = classifier.predict(x_test_adv)
        acc = np.sum(np.argmax(preds, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on adversarial test examples: %.2f%%', (acc * 100))
Exemple #20
0
    def _get_image_classifier_list_defended(one_classifier=False, **kwargs):
        sess = None
        classifier_list = None
        if framework == "keras":
            classifier = utils.get_image_classifier_kr()
            # Get the ready-trained Keras model
            fs = FeatureSqueezing(bit_depth=1, clip_values=(0, 1))
            classifier_list = [
                KerasClassifier(model=classifier._model,
                                clip_values=(0, 1),
                                preprocessing_defences=fs)
            ]

        if framework == "tensorflow":
            logging.warning(
                "{0} doesn't have a defended image classifier defined yet".
                format(framework))

        if framework == "pytorch":
            logging.warning(
                "{0} doesn't have a defended image classifier defined yet".
                format(framework))

        if framework == "scikitlearn":
            logging.warning(
                "{0} doesn't have a defended image classifier defined yet".
                format(framework))

        if classifier_list is None:
            return None, None

        if one_classifier:
            return classifier_list[0], sess

        return classifier_list, sess
    def test_iris_k_unbounded(self):
        (_, _), (x_test, y_test) = self.iris
        classifier, _ = get_iris_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model,
                                     use_logits=False,
                                     channel_index=1)
        attack_params = {
            "max_iter": 1,
            "attacker": "newtonfool",
            "attacker_params": {
                "max_iter": 5
            }
        }
        attack = UniversalPerturbation(classifier)
        attack.set_params(**attack_params)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info(
            'Accuracy on Iris with universal adversarial examples: %.2f%%',
            (acc * 100))
Exemple #22
0
    def test_pickle(self):
        import os
        filename = 'my_classifier.p'
        from art import DATA_PATH
        full_path = os.path.join(DATA_PATH, filename)
        folder = os.path.split(full_path)[0]
        if not os.path.exists(folder):
            os.makedirs(folder)

        import pickle
        fs = FeatureSqueezing(bit_depth=1, clip_values=(0, 1))
        keras_model = KerasClassifier((0, 1),
                                      self.functional_model,
                                      input_layer=1,
                                      output_layer=1,
                                      defences=fs)
        with open(full_path, 'wb') as save_file:
            pickle.dump(keras_model, save_file)

        # Unpickle:
        with open(full_path, 'rb') as load_file:
            loaded = pickle.load(load_file)

            self.assertTrue(keras_model._clip_values == loaded._clip_values)
            self.assertTrue(
                keras_model._channel_index == loaded._channel_index)
            self.assertTrue(keras_model._use_logits == loaded._use_logits)
            self.assertTrue(keras_model._input_layer == loaded._input_layer)
            self.assertTrue(self.functional_model.get_config() ==
                            loaded._model.get_config())
            self.assertTrue(isinstance(loaded.defences[0], FeatureSqueezing))

        os.remove(full_path)
def debug():
    """
    Function:
        For debugging.
    For attacker_name:
        "AdversarialPatch"
        "Attack"
		"BoundaryAttack"
    add your attacker's name here.
    """
    x_train, y_train, x_test, y_test, model, min_, max_ = GetCifar10WithModel()
    x_test_example = x_test[:10]
    y_test_example = y_test[:10]

    classifier = KerasClassifier(clip_values=(min_, max_),
                                 model=model,
                                 use_logits=False,
                                 preprocessing=(0.5, 1))

    x_adv_adversial_patch, dt_adversial_patch = GetAttackers(
        classifier, x_test_example, "AdversarialPatch")
    fig = plt.figure()
    ax1 = fig.add_subplot(211)
    ax2 = fig.add_subplot(212)
    ax1.imshow(x_adv_adversial_patch[0])
    ax2.imshow(x_adv_adversial_patch[1])
    plt.show()
    #x_adv_adversial_patch, = x_adv_adversial_patch
    #print(x_adv_adversial_patch)
    #x_adv_attack, dt_attack = GetAttackers(classifier, x_test_example, "Attack")
    #x_adv_boundary_attack, dt_boundary_attack = GetAttackers(classifier, x_test_example, "BoundaryAttack")

    print("Time duration for AdversarialPatch: \t", dt_adversial_patch)
    def setUpClass(cls):

        (x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
        x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
        cls.mnist = (x_train, y_train), (x_test, y_test), (min_, max_)

        # Create simple keras model
        import keras.backend as k
        from keras.models import Sequential
        from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D

        k.set_learning_phase(1)
        model = Sequential()
        model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        from art.classifiers import KerasClassifier
        cls.classifier = KerasClassifier((min_, max_), model=model)

        cls.classifier.fit(x_train, y_train, nb_epochs=1, batch_size=128)

        cls.defence = ActivationDefence(cls.classifier, x_train, y_train)
Exemple #25
0
    def test_iris_unbounded(self):
        (_, _), (x_test, y_test) = self.iris
        classifier = get_iris_classifier_kr()

        def t(x):
            return x

        def transformation():
            while True:
                yield t

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model,
                                     use_logits=False,
                                     channel_index=1)
        classifier = ExpectationOverTransformations(
            classifier, sample_size=1, transformation=transformation)
        attack = FastGradientMethod(classifier, eps=1)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv > 1).any())
        self.assertTrue((x_test_adv < 0).any())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on Iris with limited query info: %.2f%%',
                    (acc * 100))
def debug():

    x_train, y_train, x_test, y_test, model, min_, max_ = GetMnistWithModel()
    x_test_example = x_test[:100]
    y_test_example = y_test[:100]

    classifier = KerasClassifier(model=model, clip_values=(min_, max_))

    x_adv_SMM, dt_SMM = GetAttackers(classifier, x_test_example, "SMM")
    # np.save("samples/SMM_adv_mnist.npy", x_adv_SMM)
    x_adv_PGD, dt_PGD = GetAttackers(classifier, x_test_example, "PGD")
    # np.save("samples/PGD_adv_mnist.npy", x_adv_PGD)

    print("Time duration for SMM: \t", dt_SMM)
    print("Time duration for PGD: \t", dt_PGD)
    print(
        "---------------------------------------------------------------------"
    )
    conf_l_SMM, perturb_SMM = GetAdvAccuracy(classifier, x_test_example,
                                             x_adv_SMM, y_test_example)
    print(
        "---------------------------------------------------------------------"
    )
    conf_l_PGD, perturb_PGD = GetAdvAccuracy(classifier, x_test_example,
                                             x_adv_PGD, y_test_example)
Exemple #27
0
    def _create_krclassifier():
        """
        To create a simple KerasClassifier for testing.
        :return:
        """
        # Initialize a tf session
        session = tf.Session()
        k.set_session(session)

        # Create simple CNN
        model = Sequential()
        model.add(
            Conv2D(4,
                   kernel_size=(5, 5),
                   activation='relu',
                   input_shape=(28, 28, 1)))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(10, activation='softmax'))

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adam(lr=0.01),
                      metrics=['accuracy'])

        # Get the classifier
        krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False)

        return krc
Exemple #28
0
def get_iris_classifier_kr():
    """
    Standard Keras classifier for unit testing on Iris dataset. The weights and biases are identical to the Tensorflow
    model in `get_iris_classifier_tf`.

    :return: The trained model for Iris dataset and the session.
    :rtype: `tuple(KerasClassifier, tf.Session)`
    """
    import keras
    import keras.backend as k
    from keras.models import Sequential
    from keras.layers import Dense
    import tensorflow as tf

    from art.classifiers import KerasClassifier

    # Initialize a tf session
    sess = tf.Session()
    k.set_session(sess)

    # Create simple CNN
    model = Sequential()
    model.add(Dense(10, input_shape=(4,), activation='relu',
                    kernel_initializer=_kr_weights_loader('IRIS', 'W', 'DENSE1'),
                    bias_initializer=_kr_weights_loader('IRIS', 'B', 'DENSE1')))
    model.add(Dense(10, activation='relu', kernel_initializer=_kr_weights_loader('IRIS', 'W', 'DENSE2'),
                    bias_initializer=_kr_weights_loader('IRIS', 'B', 'DENSE2')))
    model.add(Dense(3, activation='softmax', kernel_initializer=_kr_weights_loader('IRIS', 'W', 'DENSE3'),
                    bias_initializer=_kr_weights_loader('IRIS', 'B', 'DENSE3')))
    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(lr=0.001), metrics=['accuracy'])

    # Get classifier
    krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False, channel_index=1)

    return krc, sess
Exemple #29
0
def get_art_model(model_kwargs: dict,
                  wrapper_kwargs: dict,
                  weights_path: Optional[str] = None) -> KerasClassifier:
    """
    resnet pretrained on Imagenet. The initial layers transform the input from canonical form to the expected input
    format for the DenseNet-121.
    """
    input = tf.keras.Input(shape=(224, 224, 3))

    # Preprocessing layers
    img_scaled_to_255 = Lambda(lambda image: image * 255)(input)
    # Reorder image channels i.e. img = img[..., ::-1]
    img_channel_reorder = Lambda(lambda image: tf.reverse(image, axis=[-1]))(
        img_scaled_to_255)
    # Model was trained with inputs zero-centered on ImageNet mean
    img_normalized = Lambda(lambda image: image - IMAGENET_MEANS)(
        img_channel_reorder)

    resnet50 = ResNet50(weights=None,
                        input_tensor=img_normalized,
                        **model_kwargs)
    model = Model(inputs=input, outputs=resnet50.output)

    if weights_path:
        model.load_weights(weights_path)

    wrapped_model = KerasClassifier(
        model,
        clip_values=(0.0, 1.0),
        **wrapper_kwargs,
    )
    return wrapped_model
    def test_iris_unbounded(self):
        (_, _), (x_test, y_test) = self.iris
        classifier = get_iris_classifier_kr()

        # Recreate a classifier without clip values
        krc = KerasClassifier(model=classifier._model,
                              use_logits=False,
                              channel_index=1)
        rs = RandomizedSmoothing(classifier=krc,
                                 sample_size=100,
                                 scale=0.01,
                                 alpha=0.001)
        attack = FastGradientMethod(rs, eps=1)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv > 1).any())
        self.assertTrue((x_test_adv < 0).any())

        preds_smooth = np.argmax(rs.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_smooth).all())

        pred = rs.predict(x_test)
        pred2 = rs.predict(x_test_adv)
        acc, cov = compute_accuracy(pred, y_test)
        acc2, cov2 = compute_accuracy(pred2, y_test)
        logger.info(
            'Accuracy on Iris with smoothing on adversarial examples: %.2f%%',
            (acc * 100))
        logger.info(
            'Coverage on Iris with smoothing on adversarial examples: %.2f%%',
            (cov * 100))
        logger.info('Accuracy on Iris with smoothing: %.2f%%', (acc2 * 100))
        logger.info('Coverage on Iris with smoothing: %.2f%%', (cov2 * 100))