def test_spatial_smoothing_median_filter_call(self): test_input = np.array([[[[1, 2], [3, 4]]]]) test_output = np.array([[[[1, 2], [3, 3]]]]) spatial_smoothing = SpatialSmoothing(channels_first=True, window_size=2) assert_array_equal(spatial_smoothing(test_input)[0], test_output)
def test_defences_predict(get_default_mnist_subset, get_image_classifier_list): (x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_default_mnist_subset clip_values = (0, 1) fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2) jpeg = JpegCompression(clip_values=clip_values, apply_predict=True) smooth = SpatialSmoothing() classifier_, _ = get_image_classifier_list(one_classifier=True) classifier = KerasClassifier(clip_values=clip_values, model=classifier_.model, preprocessing_defences=[fs, jpeg, smooth]) assert len(classifier.preprocessing_defences) == 3 predictions_classifier = classifier.predict(x_test_mnist) # Apply the same defences by hand x_test_defense = x_test_mnist x_test_defense, _ = fs(x_test_defense, y_test_mnist) x_test_defense, _ = jpeg(x_test_defense, y_test_mnist) x_test_defense, _ = smooth(x_test_defense, y_test_mnist) classifier, _ = get_image_classifier_list(one_classifier=True) predictions_check = classifier.model.predict(x_test_defense) # Check that the prediction results match np.testing.assert_array_almost_equal(predictions_classifier, predictions_check, decimal=4)
def test_spatial_smoothing_image_data(self, image_batch, channels_first, window_size): test_input, test_output = image_batch spatial_smoothing = SpatialSmoothing(channels_first=channels_first, window_size=window_size) assert_array_equal(spatial_smoothing(test_input)[0], test_output)
def test_defences_predict(self): clip_values = (0, 1) fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2) jpeg = JpegCompression(clip_values=clip_values, apply_predict=True) smooth = SpatialSmoothing() classifier_ = get_image_classifier_kr_tf() classifier = KerasClassifier(clip_values=clip_values, model=classifier_._model, preprocessing_defences=[fs, jpeg, smooth]) self.assertEqual(len(classifier.preprocessing_defences), 3) predictions_classifier = classifier.predict(self.x_test_mnist) # Apply the same defences by hand x_test_defense = self.x_test_mnist x_test_defense, _ = fs(x_test_defense, self.y_test_mnist) x_test_defense, _ = jpeg(x_test_defense, self.y_test_mnist) x_test_defense, _ = smooth(x_test_defense, self.y_test_mnist) classifier = get_image_classifier_kr_tf() predictions_check = classifier._model.predict(x_test_defense) # Check that the prediction results match np.testing.assert_array_almost_equal(predictions_classifier, predictions_check, decimal=4)
def test_non_spatial_data_error(self, tabular_batch): test_input = tabular_batch spatial_smoothing = SpatialSmoothing(channels_first=True) exc_msg = "Unrecognized input dimension. Spatial smoothing can only be applied to image and video data." with pytest.raises(ValueError, match=exc_msg): spatial_smoothing(test_input)
def test_relation_clip_values_error(art_warning): try: exc_msg = "Invalid 'clip_values': min >= max." with pytest.raises(ValueError, match=exc_msg): SpatialSmoothing(clip_values=(1, 0)) except ARTTestException as e: art_warning(e)
def test_window_size_error(art_warning): try: exc_msg = "Sliding window size must be a positive integer." with pytest.raises(ValueError, match=exc_msg): SpatialSmoothing(window_size=0) except ARTTestException as e: art_warning(e)
def test_defences_predict(get_default_mnist_subset, image_dl_estimator_defended, image_dl_estimator): (_, _), (x_test_mnist, y_test_mnist) = get_default_mnist_subset classifier, _ = image_dl_estimator_defended( one_classifier=True, defenses=["FeatureSqueezing", "JpegCompression", "SpatialSmoothing"]) if classifier is not None: assert len(classifier.preprocessing_defences) == 3 predictions_classifier = classifier.predict(x_test_mnist) # Apply the same defences by hand x_test_defense = x_test_mnist clip_values = (0, 1) fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2) x_test_defense, _ = fs(x_test_defense, y_test_mnist) jpeg = JpegCompression(clip_values=clip_values, apply_predict=True) x_test_defense, _ = jpeg(x_test_defense, y_test_mnist) smooth = SpatialSmoothing() x_test_defense, _ = smooth(x_test_defense, y_test_mnist) # classifier, _ = get_image_classifier_list(one_classifier=True, from_logits=True) classifier, _ = image_dl_estimator(one_classifier=True) predictions_check = classifier._model.predict(x_test_defense) # Check that the prediction results match np.testing.assert_array_almost_equal(predictions_classifier, predictions_check, decimal=4)
def test_triple_clip_values_error(art_warning): try: exc_msg = "'clip_values' should be a tuple of 2 floats or arrays containing the allowed data range." with pytest.raises(ValueError, match=exc_msg): SpatialSmoothing(clip_values=(0, 1, 2)) except ARTTestException as e: art_warning(e)
def test_spatial_smoothing_image_data(art_warning, image_batch, channels_first, window_size): try: test_input, test_output = image_batch spatial_smoothing = SpatialSmoothing(channels_first=channels_first, window_size=window_size) assert_array_equal(spatial_smoothing(test_input)[0], test_output) spatial_smoothing = SpatialSmoothing(channels_first=channels_first, window_size=window_size, clip_values=(0, 1)) spatial_smoothing(test_input) except ARTTestException as e: art_warning(e)
def _SpatialSmoothing(data): ''' :param data: tensor.cuda() | [N,C,H,W] | [0,1] :return: tensor.cuda() | [N,C,H,W] | [0,1] ''' # defence data = np.transpose(data.cpu().numpy(), [0, 3, 2, 1]) res = SpatialSmoothing(clip_values=(0, 1))(data)[0] return torch.from_numpy(np.transpose(res, [0, 3, 2, 1])).cuda()
def test_spatial_smoothing_video_data(art_warning, video_batch, channels_first): try: test_input, test_output = video_batch spatial_smoothing = SpatialSmoothing(channels_first=channels_first, window_size=2) assert_array_equal(spatial_smoothing(test_input)[0], test_output) except ARTTestException as e: art_warning(e)
def test_spatial_smoothing_median_filter_call(art_warning): try: test_input = np.array([[[[1, 2], [3, 4]]]]) test_output = np.array([[[[1, 2], [3, 3]]]]) spatial_smoothing = SpatialSmoothing(channels_first=True, window_size=2) assert_array_equal(spatial_smoothing(test_input)[0], test_output) except ARTTestException as e: art_warning(e)
def test_defences_predict(art_warning, get_default_mnist_subset, image_dl_estimator_defended, image_dl_estimator): try: (_, _), (x_test_mnist, y_test_mnist) = get_default_mnist_subset classifier, _ = image_dl_estimator() y_check_clean = classifier.predict(x_test_mnist) clip_values = (0, 1) classifier_defended, _ = image_dl_estimator_defended( defenses=["FeatureSqueezing"]) assert len(classifier_defended.preprocessing_defences) == 1 y_defended = classifier_defended.predict(x_test_mnist) fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2) x_test_defense, _ = fs(x_test_mnist, y_test_mnist) y_check = classifier.predict(x_test_defense) np.testing.assert_array_almost_equal(y_defended, y_check, decimal=4) np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, y_check, y_check_clean) classifier_defended, _ = image_dl_estimator_defended( defenses=["JpegCompression"]) assert len(classifier_defended.preprocessing_defences) == 1 y_defended = classifier_defended.predict(x_test_mnist) jpeg = JpegCompression( clip_values=clip_values, apply_predict=True, channels_first=classifier_defended.channels_first) x_test_defense, _ = jpeg(x_test_mnist, y_test_mnist) y_check = classifier.predict(x_test_defense) np.testing.assert_array_almost_equal(y_defended, y_check, decimal=4) np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, y_check, y_check_clean) classifier_defended, _ = image_dl_estimator_defended( defenses=["SpatialSmoothing"]) assert len(classifier_defended.preprocessing_defences) == 1 y_defended = classifier_defended.predict(x_test_mnist) smooth = SpatialSmoothing( channels_first=classifier_defended.channels_first) x_test_defense, _ = smooth(x_test_mnist, y_test_mnist) y_check = classifier.predict(x_test_defense) np.testing.assert_array_almost_equal(y_defended, y_check, decimal=4) np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, y_check, y_check_clean) except ARTTestException as e: art_warning(e)
def _image_dl_estimator_defended(one_classifier=False, **kwargs): sess = None classifier = None clip_values = (0, 1) fs = FeatureSqueezing(bit_depth=2, clip_values=clip_values) defenses = [] if kwargs.get("defenses") is None: defenses.append(fs) else: if "FeatureSqueezing" in kwargs.get("defenses"): defenses.append(fs) if "JpegCompression" in kwargs.get("defenses"): defenses.append( JpegCompression(clip_values=clip_values, apply_predict=True)) if "SpatialSmoothing" in kwargs.get("defenses"): defenses.append(SpatialSmoothing()) del kwargs["defenses"] if framework == "tensorflow2": classifier, _ = get_image_classifier_tf(**kwargs) if framework == "keras": classifier = get_image_classifier_kr(**kwargs) if framework == "kerastf": classifier = get_image_classifier_kr_tf(**kwargs) if framework == "pytorch": classifier = get_image_classifier_pt(**kwargs) for i, defense in enumerate(defenses): if "channels_first" in defense.params: defenses[i].channels_first = classifier.channels_first if classifier is not None: classifier.set_params(preprocessing_defences=defenses) else: raise ARTTestFixtureNotImplemented( "no defended image estimator", image_dl_estimator_defended.__name__, framework, {"defenses": defenses}) return classifier, sess
def _image_dl_estimator_defended(one_classifier=False, **kwargs): sess = None classifier = None clip_values = (0, 1) fs = FeatureSqueezing(bit_depth=2, clip_values=clip_values) defenses = [] if kwargs.get("defenses") is None: defenses.append(fs) else: if "FeatureSqueezing" in kwargs.get("defenses"): defenses.append(fs) if "JpegCompression" in kwargs.get("defenses"): defenses.append( JpegCompression(clip_values=clip_values, apply_predict=True)) if "SpatialSmoothing" in kwargs.get("defenses"): defenses.append(SpatialSmoothing()) del kwargs["defenses"] if framework == "keras": kr_classifier = get_image_classifier_kr(**kwargs) # Get the ready-trained Keras model classifier = KerasClassifier(model=kr_classifier._model, clip_values=(0, 1), preprocessing_defences=defenses) if framework == "kerastf": kr_tf_classifier = get_image_classifier_kr_tf(**kwargs) classifier = KerasClassifier(model=kr_tf_classifier._model, clip_values=(0, 1), preprocessing_defences=defenses) if classifier is None: raise ARTTestFixtureNotImplemented( "no defended image estimator", image_dl_estimator_defended.__name__, framework, {"defenses": defenses}) return classifier, sess
def test_window_size_error(self): exc_msg = "Sliding window size must be a positive integer." with pytest.raises(ValueError, match=exc_msg): SpatialSmoothing(window_size=0)
def _image_dl_estimator_defended(one_classifier=False, **kwargs): sess = None classifier_list = None clip_values = (0, 1) fs = FeatureSqueezing(bit_depth=2, clip_values=clip_values) defenses = [] if kwargs.get("defenses") is None: defenses.append(fs) else: if "FeatureSqueezing" in kwargs.get("defenses"): defenses.append(fs) if "JpegCompression" in kwargs.get("defenses"): defenses.append( JpegCompression(clip_values=clip_values, apply_predict=True)) if "SpatialSmoothing" in kwargs.get("defenses"): defenses.append(SpatialSmoothing()) del kwargs["defenses"] if framework == "keras": classifier = get_image_classifier_kr(**kwargs) # Get the ready-trained Keras model classifier_list = [ KerasClassifier(model=classifier._model, clip_values=(0, 1), preprocessing_defences=defenses) ] if framework == "tensorflow": logging.warning( "{0} doesn't have a defended image classifier defined yet". format(framework)) if framework == "pytorch": logging.warning( "{0} doesn't have a defended image classifier defined yet". format(framework)) if framework == "scikitlearn": logging.warning( "{0} doesn't have a defended image classifier defined yet". format(framework)) if framework == "kerastf": classifier = get_image_classifier_kr_tf(**kwargs) classifier_list = [ KerasClassifier(model=classifier._model, clip_values=(0, 1), preprocessing_defences=defenses) ] if classifier_list is None: return None, None if one_classifier: return classifier_list[0], sess return classifier_list, sess
def defencer(adv_data, defence_method, clip_values, eps=16, bit_depth=8, apply_fit=False, apply_predict=True): ''' :param adv_data: np.ndarray | [N C H W ] :param defence_method: | str :param clip_values:Tuple of the form `(min, max)` representing the minimum and maximum values allowed for features. | `tuple` :param bit_depth: The number of bits per channel for encoding the data. | 'int' :param apply_fit: True if applied during fitting/training. | bool :param apply_predict: True if applied during predicting. | bool :return: defended data | np.ndarray | [N C H W] ''' # step 1. define a defencer if defence_method == "FeatureSqueezing": defence = FeatureSqueezing(clip_values=clip_values, bit_depth=bit_depth, apply_fit=apply_fit, apply_predict=apply_predict) elif defence_method == "PixelDefend": criterion = nn.CrossEntropyLoss() # fm = 64 # pixel_cnn_model = nn.Sequential( # MaskedConv2d('A', 3, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True), # MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True), # MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True), # MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True), # MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True), # MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True), # MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True), # MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True), # nn.Conv2d(fm, 256, 1)) pixel_cnn_model = Pixel_cnn_net().cuda() pixel_cnn_model = torch.load("models/pixel_cnn_epoch_29.pth") # pixel_cnn_model = PixelCNN().cuda() # print(pixel_cnn_model) optimizer = optim.Adam(pixel_cnn_model.parameters()) pixel_cnn = PyTorchClassifier( model=pixel_cnn_model, clip_values=(0, 1), loss=criterion, optimizer=optimizer, input_shape=(3, 32, 32), nb_classes=10, ) defence = PixelDefend(clip_values=clip_values, eps=eps, pixel_cnn=pixel_cnn, apply_fit=apply_fit, apply_predict=apply_predict) adv_data = np.transpose(adv_data, [0, 3, 2, 1]) elif defence_method == "ThermometerEncoding": defence = ThermometerEncoding(clip_values=clip_values) elif defence_method == "TotalVarMin": defence = TotalVarMin(clip_values=clip_values) elif defence_method == "JPEGCompression": defence = JpegCompression(clip_values=clip_values) elif defence_method == "SpatialSmoothing": defence = SpatialSmoothing(clip_values=clip_values) adv_data = np.transpose(adv_data, [0, 3, 2, 1]) # step2. defend # print(adv_data.shape) res = defence(adv_data)[0] res = np.transpose(res, [0, 3, 2, 1]) # print(res.shape) return res
def test_spatial_smoothing_video_data(self, video_batch, channels_first): test_input, test_output = video_batch spatial_smoothing = SpatialSmoothing(channels_first=channels_first, window_size=2) assert_array_equal(spatial_smoothing(test_input)[0], test_output)
ax[i][2].axis('off') ax[i][2].set_title(label[preds_def[idx_img]]) plt.savefig('assets/plot_mnist_' + method_type + '.png') #### Adversarial defenses ########### ### PREPROCESS ################### # Feature Squeezing https://arxiv.org/abs/1704.01155 preproc = FeatureSqueezing(clip_values=(0, 1), bit_depth=1) X_def, _ = preproc(X_adv) preds_X_def = np.argmax(classifier.predict(X_def), axis=1) fooling_rate = np.sum(preds_X_def != np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Fooling rate after Feature Squeezing: %.2f%%', (fooling_rate * 100)) img_plot(y_test, preds_x_test, preds_X_adv, preds_X_def, x_test, X_adv, X_def, "feature_squeezing") # Spatial Smoothing https://arxiv.org/abs/1704.01155 spatial_smoothing = SpatialSmoothing(window_size=4) X_def, _ = spatial_smoothing(X_adv) preds_X_def = np.argmax(classifier.predict(X_def), axis=1) fooling_rate = np.sum(preds_X_def != np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Fooling rate after Spatial Smoothing: %.2f%%', (fooling_rate * 100)) img_plot(y_test, preds_x_test, preds_X_adv, preds_X_def, x_test, X_adv, X_def, "spatial_smoothing") # Label Smoothing https://pdfs.semanticscholar.org/b5ec/486044c6218dd41b17d8bba502b32a12b91a.pdf ls = LabelSmoothing(max_value=0.5) preds_X_adv = np.argmax(classifier.predict(X_adv), axis=1) _, y_test_smooth = ls(None, y_test) fooling_rate = np.sum(preds_X_adv != np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Fooling rate after Label Smoothing: %.2f%%', (fooling_rate * 100)) # Total Variance Minimization https://arxiv.org/abs/1711.00117 preproc = TotalVarMin(clip_values=(0,1))
def test_relation_clip_values_error(self): exc_msg = "Invalid 'clip_values': min >= max." with pytest.raises(ValueError, match=exc_msg): SpatialSmoothing(clip_values=(1, 0))
def test_triple_clip_values_error(self): exc_msg = "'clip_values' should be a tuple of 2 floats or arrays containing the allowed data range." with pytest.raises(ValueError, match=exc_msg): SpatialSmoothing(clip_values=(0, 1, 2))