def test_failure_feature_vectors(self):
        x = np.random.rand(10, 3)
        preprocess = TotalVarMin()

        # Assert that value error is raised for feature vectors
        with self.assertRaises(ValueError) as context:
            preprocess(x)

        self.assertIn("Feature vectors detected.", str(context.exception))
 def test_one_channel(self):
     clip_values = (0, 1)
     x = np.random.rand(2, 28, 28, 1)
     preprocess = TotalVarMin(clip_values=(0, 1))
     x_preprocessed, _ = preprocess(x)
     self.assertEqual(x_preprocessed.shape, x.shape)
     self.assertTrue((x_preprocessed >= clip_values[0]).all())
     self.assertTrue((x_preprocessed <= clip_values[1]).all())
     self.assertFalse((x_preprocessed == x).all())
 def test_three_channels(self):
     clip_values = (0, 1)
     x = np.random.rand(2, 32, 32, 3)
     x_original = x.copy()
     preprocess = TotalVarMin(clip_values=clip_values)
     x_preprocessed, _ = preprocess(x)
     self.assertEqual(x_preprocessed.shape, x.shape)
     self.assertTrue((x_preprocessed >= clip_values[0]).all())
     self.assertTrue((x_preprocessed <= clip_values[1]).all())
     self.assertFalse((x_preprocessed == x).all())
     # Check that x has not been modified by attack and classifier
     self.assertAlmostEqual(float(np.max(np.abs(x_original - x))),
                            0.0,
                            delta=0.00001)
示例#4
0
    def test_check_params(self):
        with self.assertRaises(ValueError):
            _ = TotalVarMin(prob=-1)

        with self.assertRaises(ValueError):
            _ = TotalVarMin(norm=-1)

        with self.assertRaises(ValueError):
            _ = TotalVarMin(solver="solver")

        with self.assertRaises(ValueError):
            _ = TotalVarMin(max_iter=-1)

        with self.assertRaises(ValueError):
            _ = TotalVarMin(clip_values=(0, 1, 2))

        with self.assertRaises(ValueError):
            _ = TotalVarMin(clip_values=(1, 0))

        with self.assertRaises(ValueError):
            _ = TotalVarMin(verbose="False")
示例#5
0
spatial_smoothing = SpatialSmoothing(window_size=4)
X_def, _ = spatial_smoothing(X_adv)
preds_X_def = np.argmax(classifier.predict(X_def), axis=1)
fooling_rate = np.sum(preds_X_def != np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Fooling rate after Spatial Smoothing: %.2f%%', (fooling_rate  * 100))
img_plot(y_test, preds_x_test, preds_X_adv, preds_X_def, x_test, X_adv, X_def, "spatial_smoothing")

# Label Smoothing https://pdfs.semanticscholar.org/b5ec/486044c6218dd41b17d8bba502b32a12b91a.pdf
ls = LabelSmoothing(max_value=0.5)
preds_X_adv = np.argmax(classifier.predict(X_adv), axis=1)
_, y_test_smooth = ls(None, y_test)
fooling_rate = np.sum(preds_X_adv != np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Fooling rate after Label Smoothing: %.2f%%', (fooling_rate  * 100))

# Total Variance Minimization https://arxiv.org/abs/1711.00117
preproc = TotalVarMin(clip_values=(0,1))
X_def, _ = preproc(X_adv)
preds_X_def = np.argmax(classifier.predict(X_def), axis=1)
fooling_rate = np.sum(preds_X_def != np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Fooling rate after Variance Minimization: %.2f%%', (fooling_rate  * 100))
img_plot(y_test, preds_x_test, preds_X_adv, preds_X_def, x_test, X_adv, X_def, "variance_minimization")

# Thermometer Encoding https://openreview.net/forum?id=S18Su--CW
preproc = ThermometerEncoding(clip_values=(0, 1), num_space=4) # devided into 4 levels
X_def, _ = preproc(X_adv)
preds_X_def = np.argmax(classifier.predict(X_def[:,:,:,1].reshape(1000,28,28,1)), axis=1) # use 2nd level
fooling_rate = np.sum(preds_X_def != np.argmax(y_test, axis=1)) / y_test.shape[0]
logger.info('Fooling rate after Thermometer Encoding: %.2f%%', (fooling_rate  * 100))
img_plot(y_test, preds_x_test, preds_X_adv, preds_X_def, x_test, X_adv, X_def[:,:,:,1].reshape(1000,28,28,1), "thermometer_encoding")

# Pixel Defend (simple PixelCNN)
示例#6
0
def defencer(adv_data,
             defence_method,
             clip_values,
             eps=16,
             bit_depth=8,
             apply_fit=False,
             apply_predict=True):
    '''
    :param adv_data: np.ndarray | [N C H W ]
    :param defence_method: | str
    :param clip_values:Tuple of the form `(min, max)` representing the minimum and maximum values allowed
               for features. | `tuple`
    :param bit_depth: The number of bits per channel for encoding the data. | 'int'
    :param apply_fit:  True if applied during fitting/training. | bool
    :param apply_predict: True if applied during predicting. | bool
    :return: defended data | np.ndarray | [N C H W]
    '''

    # step 1. define a defencer
    if defence_method == "FeatureSqueezing":
        defence = FeatureSqueezing(clip_values=clip_values,
                                   bit_depth=bit_depth,
                                   apply_fit=apply_fit,
                                   apply_predict=apply_predict)
    elif defence_method == "PixelDefend":
        criterion = nn.CrossEntropyLoss()
        # fm = 64
        # pixel_cnn_model = nn.Sequential(
        #     MaskedConv2d('A', 3, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True),
        #     MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True),
        #     MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True),
        #     MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True),
        #     MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True),
        #     MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True),
        #     MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True),
        #     MaskedConv2d('B', fm, fm, 7, 1, 3, bias=False), nn.BatchNorm2d(fm), nn.ReLU(True),
        #     nn.Conv2d(fm, 256, 1))
        pixel_cnn_model = Pixel_cnn_net().cuda()
        pixel_cnn_model = torch.load("models/pixel_cnn_epoch_29.pth")
        # pixel_cnn_model = PixelCNN().cuda()
        # print(pixel_cnn_model)
        optimizer = optim.Adam(pixel_cnn_model.parameters())
        pixel_cnn = PyTorchClassifier(
            model=pixel_cnn_model,
            clip_values=(0, 1),
            loss=criterion,
            optimizer=optimizer,
            input_shape=(3, 32, 32),
            nb_classes=10,
        )
        defence = PixelDefend(clip_values=clip_values,
                              eps=eps,
                              pixel_cnn=pixel_cnn,
                              apply_fit=apply_fit,
                              apply_predict=apply_predict)
        adv_data = np.transpose(adv_data, [0, 3, 2, 1])
    elif defence_method == "ThermometerEncoding":
        defence = ThermometerEncoding(clip_values=clip_values)
    elif defence_method == "TotalVarMin":
        defence = TotalVarMin(clip_values=clip_values)
    elif defence_method == "JPEGCompression":
        defence = JpegCompression(clip_values=clip_values)
    elif defence_method == "SpatialSmoothing":
        defence = SpatialSmoothing(clip_values=clip_values)

    adv_data = np.transpose(adv_data, [0, 3, 2, 1])
    # step2. defend
    # print(adv_data.shape)
    res = defence(adv_data)[0]
    res = np.transpose(res, [0, 3, 2, 1])
    # print(res.shape)
    return res