Beispiel #1
0
    def test_fix(self):
        x = np.array([[[[0.1], [0.2], [0.3]], [[0.7], [0.8], [0.9]],
                       [[0.4], [0.5], [0.6]]]]).astype(np.float32)
        x_original = x.copy()

        # Start to test
        preprocess = SpatialSmoothing(window_size=3)
        x_smooth, _ = preprocess(x)
        self.assertTrue(
            (x_smooth == np.array([[[[0.2], [0.3], [0.3]], [[0.4], [0.5],
                                                            [0.6]],
                                    [[0.5], [0.6],
                                     [0.6]]]]).astype(np.float32)).all())

        preprocess = SpatialSmoothing(window_size=1)
        x_smooth, _ = preprocess(x)
        self.assertTrue((x_smooth == x).all())

        preprocess = SpatialSmoothing(window_size=2)
        x_smooth, _ = preprocess(x)
        self.assertTrue(
            (x_smooth == np.array([[[[0.1], [0.2], [0.3]], [[0.7], [0.7],
                                                            [0.8]],
                                    [[0.7], [0.7],
                                     [0.8]]]]).astype(np.float32)).all())

        # Check that x has not been modified by attack and classifier
        self.assertAlmostEqual(float(np.max(np.abs(x_original - x))),
                               0.0,
                               delta=0.00001)
Beispiel #2
0
    def test_channels(self):
        x = np.arange(9).reshape((1, 1, 3, 3))
        preprocess = SpatialSmoothing(channel_index=1)
        x_smooth, _ = preprocess(x)

        x_new = np.arange(9).reshape((1, 3, 3, 1))
        preprocess = SpatialSmoothing()
        x_new_smooth, _ = preprocess(x_new)

        self.assertTrue((x_smooth[0, 0] == x_new_smooth[0, :, :, 0]).all())
def test_defences_predict(get_default_mnist_subset, get_image_classifier_list):
    (x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_default_mnist_subset

    clip_values = (0, 1)
    fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
    jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
    smooth = SpatialSmoothing()
    classifier_, _ = get_image_classifier_list(one_classifier=True)
    classifier = KerasClassifier(
        clip_values=clip_values, model=classifier_._model, preprocessing_defences=[fs, jpeg, smooth]
    )
    assert len(classifier.preprocessing_defences) == 3

    predictions_classifier = classifier.predict(x_test_mnist)

    # Apply the same defences by hand
    x_test_defense = x_test_mnist
    x_test_defense, _ = fs(x_test_defense, y_test_mnist)
    x_test_defense, _ = jpeg(x_test_defense, y_test_mnist)
    x_test_defense, _ = smooth(x_test_defense, y_test_mnist)
    classifier, _ = get_image_classifier_list(one_classifier=True)

    predictions_check = classifier._model.predict(x_test_defense)

    # Check that the prediction results match
    np.testing.assert_array_almost_equal(predictions_classifier, predictions_check, decimal=4)
Beispiel #4
0
    def _parse_defences(self, defences):
        self.defences = defences

        if defences:
            import re
            pattern = re.compile("featsqueeze[1-8]?")

            for d in defences:
                if pattern.match(d):
                    try:
                        from art.defences import FeatureSqueezing

                        bit_depth = int(d[-1])
                        self.feature_squeeze = FeatureSqueezing(bit_depth=bit_depth)
                    except:
                        raise ValueError('You must specify the bit depth for feature squeezing: featsqueeze[1-8]')

                # Add label smoothing
                if d == 'labsmooth':
                    from art.defences import LabelSmoothing
                    self.label_smooth = LabelSmoothing()

                # Add spatial smoothing
                if d == 'smooth':
                    from art.defences import SpatialSmoothing
                    self.smooth = SpatialSmoothing()
Beispiel #5
0
def def_SpatialSmoothing(x_train, x_test, y_train, y_test, x_train_adv, x_test_adv, min_, max_, file):
    train_num = 60000
    test_num = 10000
    # reshape to smooth
    x_train = x_train.reshape(train_num, 28, 28, 1)
    x_test = x_test.reshape(test_num, 28, 28, 1)
    x_train_adv = x_train_adv.reshape(5*train_num, 28, 28, 1)
    x_test_adv = x_test_adv.reshape(5*test_num, 28, 28, 1)
    # smooth
    smoother = SpatialSmoothing()
    x_train_smooth = smoother(x_train, window_size=3)
    x_test_smooth = smoother(x_test, window_size=3)
    x_train_adv_smooth = smoother(x_train_adv, window_size=3)
    x_test_adv_smooth = smoother(x_test_adv, window_size=3)
    # reshape back
    x_train_smooth = x_train_smooth.reshape(train_num, 784)
    x_test_smooth = x_test_smooth.reshape(test_num, 784)
    x_train_adv_smooth = x_train_adv_smooth.reshape(5*train_num, 784)
    x_test_adv_smooth = x_test_adv_smooth.reshape(5*test_num, 784)
    
    # train network
    classifier = create_Neural_Network(min_, max_)
    classifier.fit(x_train_smooth, y_train, nb_epochs=5, batch_size=50)
    
    # print result
    print("After SpatialSmoothing Defense\n")
    file.write("==== SpatialSmoothing Defense==== \n")
    for k in range (5):
        file.write("==== Attack %i ====\n" % (k))
        evaluate(x_train_smooth, x_test_smooth, y_train, y_test, x_train_adv_smooth[k*train_num:(k+1)*train_num], x_test_adv_smooth[k*test_num:(k+1)*test_num], y_train, y_test, classifier, file)
Beispiel #6
0
    def test_failure(self):
        x = np.arange(10).reshape(5, 2)
        preprocess = SpatialSmoothing(channel_index=1)
        with self.assertRaises(ValueError) as context:
            preprocess(x)

        self.assertIn('Feature vectors detected.', str(context.exception))
Beispiel #7
0
    def test_defences_predict(self):
        clip_values = (0, 1)
        fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
        jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
        smooth = SpatialSmoothing()
        classifier_ = get_classifier_kr()
        classifier = KerasClassifier(clip_values=clip_values,
                                     model=classifier_._model,
                                     defences=[fs, jpeg, smooth])
        self.assertEqual(len(classifier.defences), 3)

        predictions_classifier = classifier.predict(self.x_test)

        # Apply the same defences by hand
        x_test_defense = self.x_test
        x_test_defense, _ = fs(x_test_defense, self.y_test)
        x_test_defense, _ = jpeg(x_test_defense, self.y_test)
        x_test_defense, _ = smooth(x_test_defense, self.y_test)
        classifier = get_classifier_kr()
        predictions_check = classifier._model.predict(x_test_defense)

        # Check that the prediction results match
        np.testing.assert_array_almost_equal(predictions_classifier,
                                             predictions_check,
                                             decimal=4)
Beispiel #8
0
def def_SpatialSmoothing(x_train, x_test, y_train, y_test, x_train_adv,
                         x_test_adv, min_, max_):
    # reshape to smooth
    x_train = x_train.reshape(60000, 28, 28, 1)
    x_test = x_test.reshape(10000, 28, 28, 1)
    x_train_adv = x_train_adv.reshape(60000, 28, 28, 1)
    x_test_adv = x_test_adv.reshape(10000, 28, 28, 1)
    # smooth
    smoother = SpatialSmoothing()
    x_train_smooth = smoother(x_train, window_size=3)
    x_test_smooth = smoother(x_test, window_size=3)
    x_train_adv_smooth = smoother(x_train_adv, window_size=3)
    x_test_adv_smooth = smoother(x_test_adv, window_size=3)
    # reshape back
    x_train_smooth = x_train_smooth.reshape(60000, 784)
    x_test_smooth = x_test_smooth.reshape(10000, 784)
    x_train_adv_smooth = x_train_adv_smooth.reshape(60000, 784)
    x_test_adv_smooth = x_test_adv_smooth.reshape(10000, 784)

    # train network
    classifier = create_Neural_Network(min_, max_)
    classifier.fit(x_train_smooth, y_train, nb_epochs=5, batch_size=50)

    # print result
    print("After SpatialSmoothing Defense\n")
    evaluate(x_train_smooth, x_test_smooth, y_train, y_test,
             x_train_adv_smooth, x_test_adv_smooth, classifier)
    def test_defences_predict(self):
        from art.defences import FeatureSqueezing, JpegCompression, SpatialSmoothing

        (_, _), (x_test, y_test) = self.mnist

        clip_values = (0, 1)
        fs = FeatureSqueezing(clip_values=clip_values, bit_depth=2)
        jpeg = JpegCompression(clip_values=clip_values, apply_predict=True)
        smooth = SpatialSmoothing()
        classifier = KerasClassifier(clip_values=clip_values,
                                     model=self.model_mnist._model,
                                     defences=[fs, jpeg, smooth])
        self.assertEqual(len(classifier.defences), 3)

        preds_classifier = classifier.predict(x_test)

        # Apply the same defences by hand
        x_test_defense = x_test
        x_test_defense, _ = fs(x_test_defense, y_test)
        x_test_defense, _ = jpeg(x_test_defense, y_test)
        x_test_defense, _ = smooth(x_test_defense, y_test)
        preds_check = self.model_mnist._model.predict(x_test_defense)

        # Check that the prediction results match
        self.assertTrue((preds_classifier - preds_check <= 1e-5).all())
Beispiel #10
0
    def test_ones(self):
        m, n = 10, 2
        x = np.ones((1, m, n, 3))

        # Start to test
        for window_size in range(1, 20):
            preprocess = SpatialSmoothing(window_size=window_size)
            smoothed_x, _ = preprocess(x)
            self.assertTrue((smoothed_x == 1).all())
Beispiel #11
0
    def test_ones(self):
        m, n = 10, 2
        x = np.ones((1, m, n, 3))

        # Start to test
        for window_size in range(1, 10):
            logger.info("Window size: {}".format(window_size))
            preprocess = SpatialSmoothing(window_size=window_size)
            smoothed_x, _ = preprocess(x)
            np.testing.assert_array_almost_equal(smoothed_x, x, decimal=2)
Beispiel #12
0
    def test_fix(self):
        x = np.array([[[[0.1], [0.2], [0.3]], [[0.7], [0.8], [0.9]],
                       [[0.4], [0.5], [0.6]]]]).astype(np.float32)

        # Start to test
        preprocess = SpatialSmoothing(window_size=3)
        x_smooth, _ = preprocess(x)
        self.assertTrue(
            (x_smooth == np.array([[[[0.2], [0.3], [0.3]], [[0.4], [0.5],
                                                            [0.6]],
                                    [[0.5], [0.6],
                                     [0.6]]]]).astype(np.float32)).all())

        preprocess = SpatialSmoothing(window_size=1)
        x_smooth, _ = preprocess(x)
        self.assertTrue((x_smooth == x).all())

        preprocess = SpatialSmoothing(window_size=2)
        x_smooth, _ = preprocess(x)
        self.assertTrue(
            (x_smooth == np.array([[[[0.1], [0.2], [0.3]], [[0.7], [0.7],
                                                            [0.8]],
                                    [[0.7], [0.7],
                                     [0.8]]]]).astype(np.float32)).all())
Beispiel #13
0
def gzsl_launch(dataloader_seen, dataloader_unseen, all_vectors, criterion,
                params):

    if params["dataset"] == "CUB":
        from configs.config_CUB import MODEL_PATH, SMOOTHED_MODEL_PATH
    elif params["dataset"] == "AWA2":
        from configs.config_AWA2 import MODEL_PATH, SMOOTHED_MODEL_PATH
    elif params["dataset"] == "SUN":
        from configs.config_SUN import MODEL_PATH, SMOOTHED_MODEL_PATH

    resnet = torchvision.models.resnet101(pretrained=True).cuda()
    feature_extractor = nn.Sequential(*list(resnet.children())[:-1])

    if params["hasDefense"] and params["defense"] == "label_smooth":
        model_ale = torch.load(SMOOTHED_MODEL_PATH).cuda()
    else:
        model_ale = torch.load(MODEL_PATH).cuda()

    full_graph = FullGraph(feature_extractor, model_ale, all_vectors).cuda()
    full_graph.eval()
    optimizer = optim.SGD(full_graph.parameters(), lr=0.01, momentum=0.5)

    if params["dataset"] == "CUB":
        no_classes = 200
    elif params["dataset"] == "AWA2":
        no_classes = 50
    elif params["dataset"] == "SUN":
        no_classes = 717

    classifier = PyTorchClassifier(model=full_graph,
                                   loss=criterion,
                                   optimizer=optimizer,
                                   input_shape=(1, 150, 150),
                                   nb_classes=no_classes)

    if params["attack"] == "fgsm":
        batch_size = 1
        attack = FastGradientMethod(classifier=classifier,
                                    eps=params["fgsm_params"]["epsilon"],
                                    batch_size=batch_size)

    elif params["attack"] == "deepfool":
        batch_size = 1
        attack = DeepFool(classifier,
                          max_iter=params["deepfool_params"]["max_iter"],
                          epsilon=params["deepfool_params"]["epsilon"],
                          nb_grads=params["deepfool_params"]["nb_grads_gzsl"],
                          batch_size=batch_size)

    elif params["attack"] == "carlini_wagner":
        batch_size = params["batch_size"] if params["custom_collate"] else 1
        attack = CarliniL2Method(
            classifier,
            confidence=params["carliniwagner_params"]["confidence"],
            learning_rate=params["carliniwagner_params"]["learning_rate"],
            binary_search_steps=params["carliniwagner_params"]
            ["binary_search_steps"],
            max_iter=params["carliniwagner_params"]["max_iter"],
            initial_const=params["carliniwagner_params"]["initial_const"],
            max_halving=params["carliniwagner_params"]["max_halving"],
            max_doubling=params["carliniwagner_params"]["max_doubling"],
            batch_size=batch_size)

    preds_seen = []
    preds_seen_defended = []

    adv_preds_seen = []
    adv_preds_seen_defended = []
    labels_seen_ = []

    start = time.time()
    if params["hasDefense"]:
        if params["defense"] == "spatial_smooth":
            defense = SpatialSmoothing(
                window_size=params["ss_params"]["window_size"])
        elif params["defense"] == "totalvar":
            defense = TotalVarMin(
                max_iter=params["totalvar_params"]["max_iter"])

    for index, sample in enumerate(dataloader_seen):
        img = sample[0].numpy()
        label = sample[1].numpy()

        if params["clean_results"]:
            if params["hasDefense"] and params["defense"] != "label_smooth":
                img_def, _ = defense(img)
                predictions_defended = classifier.predict(
                    img_def, batch_size=batch_size)
                preds_seen_defended.extend(
                    np.argmax(predictions_defended, axis=1))
            predictions = classifier.predict(img, batch_size=batch_size)
            preds_seen.extend(np.argmax(predictions, axis=1))

        img_perturbed = attack.generate(x=img)
        if params["hasDefense"] and params["defense"] != "label_smooth":
            img_perturbed_defended, _ = defense(img_perturbed)
            predictions_adv_defended = classifier.predict(
                img_perturbed_defended, batch_size=batch_size)
            adv_preds_seen_defended.extend(
                np.argmax(predictions_adv_defended, axis=1))

        predictions_adv = classifier.predict(img_perturbed,
                                             batch_size=batch_size)
        adv_preds_seen.extend(np.argmax(predictions_adv, axis=1))
        labels_seen_.extend(label)

        if index % 1000 == 0:
            print(index, len(dataloader_seen))

    labels_seen_ = np.array(labels_seen_)
    adv_preds_seen = np.array(adv_preds_seen)
    adv_preds_seen_defended = np.array(adv_preds_seen_defended)
    uniq_labels_seen = np.unique(labels_seen_)

    adv_preds_unseen = []
    adv_preds_unseen_defended = []
    labels_unseen_ = []

    if params["clean_results"]:
        preds_unseen = []
        preds_seen = np.array(preds_seen)
        preds_unseen_defended = []
        preds_seen_defended = np.array(preds_seen_defended)

    for index, sample in enumerate(dataloader_unseen):
        img = sample[0].numpy()
        label = sample[1].numpy()

        if params["clean_results"]:
            if params["hasDefense"] and params["defense"] != "label_smooth":
                img_def, _ = defense(img)
                predictions_defended = classifier.predict(
                    img_def, batch_size=batch_size)
                preds_unseen_defended.extend(
                    np.argmax(predictions_defended, axis=1))
            predictions = classifier.predict(img, batch_size=batch_size)
            preds_unseen.extend(np.argmax(predictions, axis=1))

        img_perturbed = attack.generate(x=img)
        if params["hasDefense"] and params["defense"] != "label_smooth":
            img_perturbed_defended, _ = defense(img_perturbed)
            predictions_adv_defended = classifier.predict(
                img_perturbed_defended, batch_size=batch_size)
            adv_preds_unseen_defended.extend(
                np.argmax(predictions_adv_defended, axis=1))

        predictions_adv = classifier.predict(img_perturbed,
                                             batch_size=batch_size)
        adv_preds_unseen.extend(np.argmax(predictions_adv, axis=1))
        labels_unseen_.extend(label)

        if index % 1000 == 0:
            print(index, len(dataloader_unseen))

    end = time.time()

    labels_unseen_ = np.array(labels_unseen_)
    adv_preds_unseen = np.array(adv_preds_unseen)
    adv_preds_unseen_defended = np.array(adv_preds_unseen_defended)
    uniq_labels_unseen = np.unique(labels_unseen_)

    combined_labels = np.concatenate((labels_seen_, labels_unseen_))
    combined_preds_adv = np.concatenate((adv_preds_seen, adv_preds_unseen))
    combined_preds_adv_defended = np.concatenate(
        (adv_preds_seen_defended, adv_preds_unseen_defended))

    if params["clean_results"]:
        preds_unseen = np.array(preds_unseen)
        combined_preds = np.concatenate((preds_seen, preds_unseen))

        seen, unseen, h = harmonic_score_gzsl(combined_preds, combined_labels,
                                              uniq_labels_seen,
                                              uniq_labels_unseen)
        print("GZSL Clean (s/u/h):", seen, unseen, h)

        if params["hasDefense"] and params["defense"] != "label_smooth":
            preds_unseen_defended = np.array(preds_unseen_defended)
            combined_preds_defended = np.concatenate(
                (preds_seen_defended, preds_unseen_defended))
            seen, unseen, h = harmonic_score_gzsl(combined_preds_defended,
                                                  combined_labels,
                                                  uniq_labels_seen,
                                                  uniq_labels_unseen)
            print("GZSL Clean + defended (s/u/h):", seen, unseen, h)

    seen, unseen, h = harmonic_score_gzsl(combined_preds_adv, combined_labels,
                                          uniq_labels_seen, uniq_labels_unseen)
    print("GZSL Attacked (s/u/h):", seen, unseen, h)

    if params["hasDefense"] and params["defense"] != "label_smooth":
        seen, unseen, h = harmonic_score_gzsl(combined_preds_adv_defended,
                                              combined_labels,
                                              uniq_labels_seen,
                                              uniq_labels_unseen)
        print("GZSL Attacked + defended (s/u/h):", seen, unseen, h)

    print(end - start, "seconds passed for GZSL.")