Beispiel #1
0
    def setUpClass(cls):
        master_seed(seed=1234, set_mxnet=True)
        super().setUpClass()

        cls.x_train_mnist = np.swapaxes(cls.x_train_mnist, 1, 3)
        cls.x_test_mnist = np.swapaxes(cls.x_test_mnist, 1, 3)

        # Create a simple CNN - this one comes from the Gluon tutorial
        net = nn.Sequential()
        with net.name_scope():
            net.add(
                nn.Conv2D(channels=6, kernel_size=5, activation="relu"),
                nn.MaxPool2D(pool_size=2, strides=2),
                nn.Conv2D(channels=16, kernel_size=3, activation="relu"),
                nn.MaxPool2D(pool_size=2, strides=2),
                nn.Flatten(),
                nn.Dense(120, activation="relu"),
                nn.Dense(84, activation="relu"),
                nn.Dense(10),
            )
        net.initialize(init=init.Xavier())

        # Create optimizer
        loss = gluon.loss.SoftmaxCrossEntropyLoss()
        trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.1})

        # Fit classifier
        classifier = MXClassifier(
            model=net, loss=loss, clip_values=(0, 1), input_shape=(1, 28, 28), nb_classes=10, optimizer=trainer
        )
        classifier.fit(cls.x_train_mnist, cls.y_train_mnist, batch_size=128, nb_epochs=2)
        cls.classifier = classifier

        cls.x_train_mnist = np.swapaxes(cls.x_train_mnist, 1, 3)
        cls.x_test_mnist = np.swapaxes(cls.x_test_mnist, 1, 3)
Beispiel #2
0
    def setUpClass(cls):
        # Get MNIST
        (x_train, y_train), (x_test, y_test), _, _ = load_mnist()
        x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
        x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]
        x_train = np.swapaxes(x_train, 1, 3)
        x_test = np.swapaxes(x_test, 1, 3)
        cls.mnist = (x_train, y_train), (x_test, y_test)

        # Create a simple CNN - this one comes from the Gluon tutorial
        net = nn.Sequential()
        with net.name_scope():
            net.add(nn.Conv2D(channels=6, kernel_size=5, activation='relu'),
                    nn.MaxPool2D(pool_size=2, strides=2),
                    nn.Conv2D(channels=16, kernel_size=3, activation='relu'),
                    nn.MaxPool2D(pool_size=2, strides=2), nn.Flatten(),
                    nn.Dense(120, activation="relu"),
                    nn.Dense(84, activation="relu"), nn.Dense(10))
        net.initialize(init=init.Xavier())

        # Create optimizer
        loss = gluon.loss.SoftmaxCrossEntropyLoss()
        trainer = gluon.Trainer(net.collect_params(), 'sgd',
                                {'learning_rate': 0.1})

        # Fit classifier
        classifier = MXClassifier(model=net,
                                  loss=loss,
                                  clip_values=(0, 1),
                                  input_shape=(1, 28, 28),
                                  nb_classes=10,
                                  optimizer=trainer)
        classifier.fit(x_train, y_train, batch_size=128, nb_epochs=2)
        cls.classifier = classifier
    def test_loss_gradient(self):
        # Get MNIST
        (_, _), (x_test, y_test) = self._mnist

        # Create classifier
        classifier = MXClassifier((0, 1), self._model, (1, 28, 28), 10,
                                  self._trainer)
        grads = classifier.loss_gradient(x_test, y_test)

        self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 28, 28)).all())
        self.assertTrue(np.sum(grads) != 0)
Beispiel #4
0
    def test_preprocessing(self):
        # Get MNIST
        (_, _), (x_test, _) = self.mnist

        # Create classifier
        classifier_preproc = MXClassifier((0, 1), self.classifier._model, (1, 28, 28), 10, self.classifier._optimizer,
                                          preprocessing=(1, 2))

        preds = self.classifier.predict((x_test - 1.) / 2)
        preds_preproc = classifier_preproc.predict(x_test)
        self.assertTrue(np.sum(preds - preds_preproc) == 0)
    def test_preprocessing(self):
        # Get MNIST
        (_, _), (x_test, _) = self.mnist

        # Create classifier
        classifier_preproc = MXClassifier(model=self.classifier._model, clip_values=(0, 1), input_shape=(1, 28, 28),
                                          nb_classes=10, optimizer=self.classifier._optimizer, preprocessing=(1, 2))

        preds = self.classifier.predict((x_test - 1.) / 2)
        preds_preproc = classifier_preproc.predict(x_test)
        self.assertEqual(np.sum(preds - preds_preproc), 0)
    def test_fit_predict(self):
        (x_train, y_train), (x_test, y_test) = self._mnist

        # Fit classifier
        classifier = MXClassifier((0, 1), self._model, (1, 28, 28), 10,
                                  self._trainer)
        classifier.fit(x_train, y_train, batch_size=128, nb_epochs=2)

        preds = classifier.predict(x_test)
        acc = np.sum(np.argmax(preds, axis=1) == np.argmax(
            y_test, axis=1)) / len(y_test)
        print("\nAccuracy: %.2f%%" % (acc * 100))
        self.assertGreater(acc, 0.1)
Beispiel #7
0
    def test_preprocessing(self):
        # Create classifier
        loss = gluon.loss.SoftmaxCrossEntropyLoss()
        classifier_preproc = MXClassifier(
            model=self.classifier._model,
            loss=loss,
            clip_values=(0, 1),
            input_shape=(1, 28, 28),
            nb_classes=10,
            optimizer=self.classifier._optimizer,
            preprocessing=(1, 2),
        )

        preds = self.classifier.predict((self.x_test_mnist - 1.0) / 2)
        preds_preproc = classifier_preproc.predict(self.x_test_mnist)
        self.assertEqual(np.sum(preds - preds_preproc), 0)
Beispiel #8
0
def get_classifier_mx():
    """
    Standard MXNet classifier for unit testing

    :return: MXNetClassifier
    """
    import mxnet
    from mxnet.gluon.nn import Conv2D, MaxPool2D, Flatten, Dense
    from art.classifiers import MXClassifier

    model = mxnet.gluon.nn.Sequential()
    with model.name_scope():
        model.add(
            Conv2D(channels=1, kernel_size=7, activation="relu"),
            MaxPool2D(pool_size=4, strides=4),
            Flatten(),
            Dense(10),
        )
    model.initialize(init=mxnet.init.Xavier())

    # Create optimizer
    loss = mxnet.gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = mxnet.gluon.Trainer(model.collect_params(), "sgd", {"learning_rate": 0.1})

    # # Fit classifier
    # classifier = MXClassifier(model=net, loss=loss, clip_values=(0, 1), input_shape=(1, 28, 28), nb_classes=10,
    #                           optimizer=trainer)
    # classifier.fit(x_train, y_train, batch_size=128, nb_epochs=2)
    # cls.classifier = classifier

    # Get classifier
    mxc = MXClassifier(
        model=model,
        loss=loss,
        input_shape=(28, 28, 1),
        nb_classes=10,
        optimizer=trainer,
        ctx=None,
        channel_index=1,
        clip_values=(0, 1),
        defences=None,
        preprocessing=(0, 1),
    )

    return mxc
    def test_layers(self):
        # Get MNIST
        (_, _), (x_test, _) = self._mnist
        x_test = x_test[:NB_TEST]

        classifier = MXClassifier((0, 1), self._model, (1, 28, 28), 10,
                                  self._trainer)
        self.assertEqual(len(classifier.layer_names), 7)

        layer_names = classifier.layer_names
        for i, name in enumerate(layer_names):
            act_i = classifier.get_activations(x_test, i)
            act_name = classifier.get_activations(x_test, name)
            self.assertAlmostEqual(np.sum(act_name - act_i), 0)

        self.assertTrue(
            classifier.get_activations(x_test, 0).shape == (NB_TEST, 6, 24,
                                                            24))
        self.assertTrue(
            classifier.get_activations(x_test, 4).shape == (NB_TEST, 784))
 def test_input_shape(self):
     classifier = MXClassifier((0, 1), self._model, (1, 28, 28), 10,
                               self._trainer)
     self.assertEqual(classifier.input_shape, (1, 28, 28))
 def test_nb_classes(self):
     classifier = MXClassifier((0, 1), self._model, (1, 28, 28), 10,
                               self._trainer)
     self.assertEqual(classifier.nb_classes, 10)
Beispiel #12
0
    model.add(Flatten())
    model.add(Dense(100, activation='relu'))
    model.add(Dense(10))
    model.initialize()

loss = mxnet.gluon.loss.SoftmaxCrossEntropyLoss()
trainer = mxnet.gluon.Trainer(model.collect_params(), 'sgd',
                              {'learning_rate': 0.01})

# Step 3: 创建ART分类器

classifier = MXClassifier(model=model,
                          clip_values=(min_pixel_value, max_pixel_value),
                          loss=loss,
                          input_shape=(28, 28, 1),
                          nb_classes=10,
                          optimizer=trainer,
                          ctx=None,
                          channel_index=1,
                          defences=None,
                          preprocessing=(0, 1))

# Step 4: 训练ART分类器

classifier.fit(x_train, y_train, batch_size=64, nb_epochs=3)

# Step 5: 在良性的测试实例上评价ART分类器

predictions = classifier.predict(x_test)
accuracy = np.sum(
    np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print('Accuracy on benign test examples: {}%'.format(accuracy * 100))