示例#1
0
    def test_defences_predict(self):
        clip_values = (0, 1)
        fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
        jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
        smooth = SpatialSmoothing()
        classifier_ = get_classifier_kr()
        classifier = KerasClassifier(clip_values=clip_values,
                                     model=classifier_._model,
                                     defences=[fs, jpeg, smooth])
        self.assertEqual(len(classifier.defences), 3)

        predictions_classifier = classifier.predict(self.x_test)

        # Apply the same defences by hand
        x_test_defense = self.x_test
        x_test_defense, _ = fs(x_test_defense, self.y_test)
        x_test_defense, _ = jpeg(x_test_defense, self.y_test)
        x_test_defense, _ = smooth(x_test_defense, self.y_test)
        classifier = get_classifier_kr()
        predictions_check = classifier._model.predict(x_test_defense)

        # Check that the prediction results match
        np.testing.assert_array_almost_equal(predictions_classifier,
                                             predictions_check,
                                             decimal=4)
def test_defences_predict(get_default_mnist_subset, get_image_classifier_list):
    (x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_default_mnist_subset

    clip_values = (0, 1)
    fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
    jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
    smooth = SpatialSmoothing()
    classifier_, _ = get_image_classifier_list(one_classifier=True)
    classifier = KerasClassifier(
        clip_values=clip_values, model=classifier_._model, preprocessing_defences=[fs, jpeg, smooth]
    )
    assert len(classifier.preprocessing_defences) == 3

    predictions_classifier = classifier.predict(x_test_mnist)

    # Apply the same defences by hand
    x_test_defense = x_test_mnist
    x_test_defense, _ = fs(x_test_defense, y_test_mnist)
    x_test_defense, _ = jpeg(x_test_defense, y_test_mnist)
    x_test_defense, _ = smooth(x_test_defense, y_test_mnist)
    classifier, _ = get_image_classifier_list(one_classifier=True)

    predictions_check = classifier._model.predict(x_test_defense)

    # Check that the prediction results match
    np.testing.assert_array_almost_equal(predictions_classifier, predictions_check, decimal=4)
    def test_defences_predict(self):
        from art.defences import FeatureSqueezing, JpegCompression, SpatialSmoothing

        (_, _), (x_test, y_test) = self.mnist

        clip_values = (0, 1)
        fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
        jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
        smooth = SpatialSmoothing()
        classifier = KerasClassifier(clip_values=clip_values,
                                     model=self.model_mnist._model,
                                     defences=[fs, jpeg, smooth])
        self.assertEqual(len(classifier.defences), 3)

        preds_classifier = classifier.predict(x_test)

        # Apply the same defences by hand
        x_test_defense = x_test
        x_test_defense, _ = fs(x_test_defense, y_test)
        x_test_defense, _ = jpeg(x_test_defense, y_test)
        x_test_defense, _ = smooth(x_test_defense, y_test)
        preds_check = self.model_mnist._model.predict(x_test_defense)

        # Check that the prediction results match
        self.assertTrue((preds_classifier - preds_check <= 1e-5).all())
 def test_one_channel(self):
     clip_values = (0, 1)
     (x_train, _), (_, _), _, _ = load_mnist()
     x_train = x_train[:2]
     preprocess = JpegCompression(clip_values=clip_values, quality=70)
     x_compressed, _ = preprocess(x_train)
     self.assertEqual(x_compressed.shape, x_train.shape)
     self.assertTrue((x_compressed >= clip_values[0]).all())
     self.assertTrue((x_compressed <= clip_values[1]).all())
    def test_failure_clip_values_negative(self):
        clip_values = (-1, 1)

        # Assert that value error is raised
        with self.assertRaises(ValueError) as context:
            _ = JpegCompression(clip_values=clip_values,
                                channel_index=1,
                                quality=80)

        self.assertTrue('min value must be 0.' in str(context.exception))
    def test_failure_clip_values_unexpected_maximum(self):
        clip_values = (0, 2)

        # Assert that value error is raised
        with self.assertRaises(ValueError) as context:
            _ = JpegCompression(clip_values=clip_values,
                                channel_index=1,
                                quality=80)

        self.assertIn('max value must be either 1 or 255.',
                      str(context.exception))
    def test_failure_feature_vectors(self):
        clip_values = (0, 1)
        x = np.random.rand(10, 3)
        preprocess = JpegCompression(clip_values=clip_values,
                                     channel_index=1,
                                     quality=80)

        # Assert that value error is raised for feature vectors
        with self.assertRaises(ValueError) as context:
            preprocess(x)

        self.assertTrue('Feature vectors detected.' in str(context.exception))
 def test_channel_index(self):
     clip_values = (0, 255)
     (train_features, _), (_, _) = cifar10.load_data()
     x = train_features[:2]
     x = np.swapaxes(x, 1, 3)
     preprocess = JpegCompression(clip_values=clip_values,
                                  channel_index=1,
                                  quality=80)
     x_compressed, _ = preprocess(x)
     self.assertTrue((x_compressed.shape == x.shape))
     self.assertTrue((x_compressed >= clip_values[0]).all())
     self.assertTrue((x_compressed <= clip_values[1]).all())
 def test_three_channels_0_1(self):
     clip_values = (0, 1)
     (train_features, _), (_, _) = cifar10.load_data()
     x = train_features[:2] / 255.0
     preprocess = JpegCompression(clip_values=clip_values, quality=80)
     x_compressed, _ = preprocess(x)
     self.assertEqual(x_compressed.shape, x.shape)
     self.assertTrue((x_compressed >= clip_values[0]).all())
     self.assertTrue((x_compressed <= clip_values[1]).all())
     self.assertAlmostEqual(x_compressed[0, 14, 14, 0], 0.92941177)
     self.assertAlmostEqual(x_compressed[0, 14, 14, 1], 0.8039216)
     self.assertAlmostEqual(x_compressed[0, 14, 14, 2], 0.6117647)
 def test_three_channels_0_1(self):
     clip_values = (0, 1)
     (train_features, _), (_, _) = cifar10.load_data()
     x = train_features[:2] / 255.0
     x_original = x.copy()
     preprocess = JpegCompression(clip_values=clip_values, quality=80)
     x_compressed, _ = preprocess(x)
     self.assertEqual(x_compressed.shape, x.shape)
     self.assertTrue((x_compressed >= clip_values[0]).all())
     self.assertTrue((x_compressed <= clip_values[1]).all())
     self.assertAlmostEqual(x_compressed[0, 14, 14, 0], 0.92941177)
     self.assertAlmostEqual(x_compressed[0, 14, 14, 1], 0.8039216)
     self.assertAlmostEqual(x_compressed[0, 14, 14, 2], 0.6117647)
     # Check that x has not been modified by attack and classifier
     self.assertAlmostEqual(float(np.max(np.abs(x_original - x))),
                            0.0,
                            delta=0.00001)
示例#11
0
        plt.imshow(x_adv[0])
        plt.show(block=False)

    attack.max_iter = iter_step
"""
Adversarial image at step 0. L2 error 7213.825 and Tesseract output assent.
Adversarial image at step 30. L2 error 4137.465 and Tesseract output assent.
Adversarial image at step 60. L2 error 2795.272 and Tesseract output assent.
Adversarial image at step 90. L2 error 2537.3455 and Tesseract output assent.
Adversarial image at step 120. L2 error 2265.7817 and Tesseract output assent.
Adversarial image at step 150. L2 error 2109.62 and Tesseract output assent.
"""

# 3. 使用Jpeg压缩 保护分类器

jpeg = JpegCompression(clip_values=(0, 255), channel_index=3)
classifier_def = BlackBoxClassifier(predict, image_target.shape, 3, clip_values=(0, 255), defences=[jpeg])

# 3.1 this is the image we want to target
plt.imshow(image_target)
plt.show()
print('Tesseract output is: ' + label_dict[np.argmax(classifier_def.predict(np.array([image_target])))])
"""
Tesseract output is: dissent
"""

# 3.2 this is the label we want to perturb to
plt.imshow(image_init)
plt.show()
print('Tesseract output is: ' + label_dict[np.argmax(classifier_def.predict(np.array([image_init])))])
"""