def test_ptclassifier(self):
        """
        Third test with the PyTorchClassifier.
        :return:
        """
        # Get MNIST
        (x_train, y_train), (x_test, y_test) = self.mnist
        x_train = np.swapaxes(x_train, 1, 3)
        x_test = np.swapaxes(x_test, 1, 3)

        # Create simple CNN
        model = Model()

        # Define a loss function and optimizer
        loss_fn = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)

        # Get classifier
        ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28),
                                10)
        ptc.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=1)

        # Attack
        nf = NewtonFool(ptc, max_iter=5)
        x_test_adv = nf.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())

        y_pred = ptc.predict(x_test)
        y_pred_adv = ptc.predict(x_test_adv)
        y_pred_bool = y_pred.max(axis=1, keepdims=1) == y_pred
        y_pred_max = y_pred.max(axis=1)
        y_pred_adv_max = y_pred_adv[y_pred_bool]
        self.assertTrue((y_pred_max >= y_pred_adv_max).all())
    def setUpClass(cls):
        master_seed(seed=1234)
        super().setUpClass()

        cls.x_train_mnist = np.reshape(
            cls.x_train_mnist,
            (cls.x_train_mnist.shape[0], 1, 28, 28)).astype(np.float32)
        cls.x_test_mnist = np.reshape(
            cls.x_test_mnist,
            (cls.x_test_mnist.shape[0], 1, 28, 28)).astype(np.float32)

        # Define the network
        model = nn.Sequential(nn.Conv2d(1, 2, 5),
                              nn.ReLU(), nn.MaxPool2d(2, 2), Flatten(),
                              nn.Linear(288, 10))

        # Define a loss function and optimizer
        loss_fn = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        classifier = PyTorchClassifier(model=model,
                                       clip_values=(0, 1),
                                       loss=loss_fn,
                                       optimizer=optimizer,
                                       input_shape=(1, 28, 28),
                                       nb_classes=10)
        classifier.fit(cls.x_train_mnist,
                       cls.y_train_mnist,
                       batch_size=100,
                       nb_epochs=1)
        cls.seq_classifier = classifier

        # Define the network
        model = Model()
        loss_fn = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        classifier_2 = PyTorchClassifier(model=model,
                                         clip_values=(0, 1),
                                         loss=loss_fn,
                                         optimizer=optimizer,
                                         input_shape=(1, 28, 28),
                                         nb_classes=10)
        classifier_2.fit(cls.x_train_mnist,
                         cls.y_train_mnist,
                         batch_size=100,
                         nb_epochs=1)
        cls.module_classifier = classifier_2

        cls.x_train_mnist = np.reshape(
            cls.x_train_mnist,
            (cls.x_train_mnist.shape[0], 28, 28, 1)).astype(np.float32)
        cls.x_test_mnist = np.reshape(
            cls.x_test_mnist,
            (cls.x_test_mnist.shape[0], 28, 28, 1)).astype(np.float32)
示例#3
0
    def test_ptclassifier(self):
        """
        Third test with the PyTorchClassifier.
        :return:
        """
        # Get MNIST
        (x_train, y_train), (x_test, y_test) = self.mnist
        x_train = np.swapaxes(x_train, 1, 3)
        x_test = np.swapaxes(x_test, 1, 3)

        # Define the network
        model = Model()

        # Define a loss function and optimizer
        loss_fn = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)

        # Get classifier
        ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28),
                                10)
        ptc.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=10)

        # First attack
        cl2m = CarliniL2Method(classifier=ptc, targeted=True, max_iter=10)
        params = {'y': random_targets(y_test, ptc.nb_classes)}
        x_test_adv = cl2m.generate(x_test, **params)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv <= 1.0001).all())
        self.assertTrue((x_test_adv >= -0.0001).all())
        target = np.argmax(params['y'], axis=1)
        y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)
        self.assertTrue((target == y_pred_adv).any())

        # Second attack
        cl2m = CarliniL2Method(classifier=ptc, targeted=False, max_iter=10)
        params = {'y': random_targets(y_test, ptc.nb_classes)}
        x_test_adv = cl2m.generate(x_test, **params)
        self.assertTrue((x_test_adv <= 1.0001).all())
        self.assertTrue((x_test_adv >= -0.0001).all())
        target = np.argmax(params['y'], axis=1)
        y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)
        self.assertTrue((target != y_pred_adv).any())

        # Third attack
        cl2m = CarliniL2Method(classifier=ptc, targeted=False, max_iter=10)
        params = {}
        x_test_adv = cl2m.generate(x_test, **params)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv <= 1.0001).all())
        self.assertTrue((x_test_adv >= -0.0001).all())
        y_pred = np.argmax(ptc.predict(x_test), axis=1)
        y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)
        self.assertTrue((y_pred != y_pred_adv).any())
示例#4
0
def main(config_filepath):

    config = load_config(config_filepath)

    if os.path.isfile(config.model_output_path):
        click.confirm(f"Overwrite {config.model_output_path}?", abort=True)

    np.random.seed(config.seed)
    torch.manual_seed(config.seed)

    # Load data
    x = torch.load(config.x_filepath)
    y = torch.load(config.y_filepath)

    # Flatten training set
    x = x.reshape(x.shape[0], -1)

    clip_values = {}
    with open(config.clip_values_filepath, "r") as f:
        clip_values = json.load(f)
    clip_values = (
        clip_values.get("min_pixel_value"),
        clip_values.get("max_pixel_value"),
    )

    model = get_model_from_module(mnist.models, config.model_class_name)

    if not model:
        sys.exit(f"Could not load provided model {config.model_class_name}")

    classifier = PyTorchClassifier(
        model=model,
        clip_values=clip_values,
        loss=model.criterion,
        optimizer=model.optimizer,
        input_shape=(784),
        nb_classes=10,
    )  # TODO: move these parameters to config

    # Train classifier
    classifier.fit(x, y, batch_size=config.batch_size, nb_epochs=config.num_epochs)

    # Save data
    torch.save(model, config.model_output_path)
示例#5
0
    def test_ptclassifier(self):
        """
        Third test with the PyTorchClassifier.
        :return:
        """
        # Get MNIST
        (x_train, y_train), (x_test, y_test) = self.mnist
        x_train = np.swapaxes(x_train, 1, 3)
        x_test = np.swapaxes(x_test, 1, 3)

        # Create simple CNN
        # Define the network
        model = Model()

        # Define a loss function and optimizer
        loss_fn = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)

        # Get classifier
        ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28),
                                10)
        ptc.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=1)

        # Attack
        attack_params = {
            "max_translation": 10.0,
            "num_translations": 3,
            "max_rotation": 30.0,
            "num_rotations": 3
        }
        attack_st = SpatialTransformation(ptc)
        x_train_adv = attack_st.generate(x_train, **attack_params)

        self.assertTrue(abs(x_train_adv[0, 0, 13, 5] - 0.374206543) <= 0.01)
        # self.assertTrue(abs(attack_st.fooling_rate - 0.781) <= 0.01)

        self.assertTrue(attack_st.attack_trans_x == 0)
        self.assertTrue(attack_st.attack_trans_y == -3)
        self.assertTrue(attack_st.attack_rot == 30.0)

        x_test_adv = attack_st.generate(x_test)

        self.assertTrue(abs(x_test_adv[0, 0, 14, 14] - 0.008591662) <= 0.01)
    def test_ptclassifier(self):
        """
        Third test with the PyTorchClassifier.
        :return:
        """
        # Get MNIST
        (x_train, y_train), (x_test, y_test) = self.mnist
        x_train = np.swapaxes(x_train, 1, 3)
        x_test = np.swapaxes(x_test, 1, 3)

        # Create simple CNN
        # Define the network
        model = Model()

        # Define a loss function and optimizer
        loss_fn = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)

        # Get classifier
        ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28),
                                10)
        ptc.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=1)

        # Attack
        # TODO Launch with all possible attacks
        attack_params = {
            "attacker": "newtonfool",
            "attacker_params": {
                "max_iter": 5
            }
        }
        up = UniversalPerturbation(ptc)
        x_train_adv = up.generate(x_train, **attack_params)
        self.assertTrue((up.fooling_rate >= 0.2) or not up.converged)

        x_test_adv = x_test + up.v
        self.assertFalse((x_test == x_test_adv).all())

        train_y_pred = np.argmax(ptc.predict(x_train_adv), axis=1)
        test_y_pred = np.argmax(ptc.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all())
        self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all())
示例#7
0
    def setUpClass(cls):
        (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist')

        x_train = np.reshape(x_train,
                             (x_train.shape[0], 1, 28, 28)).astype(np.float32)
        x_test = np.reshape(x_test,
                            (x_test.shape[0], 1, 28, 28)).astype(np.float32)

        cls.x_train = x_train[:NB_TRAIN]
        cls.y_train = y_train[:NB_TRAIN]
        cls.x_test = x_test[:NB_TEST]
        cls.y_test = y_test[:NB_TEST]

        # Define the network
        model = nn.Sequential(nn.Conv2d(1, 2, 5),
                              nn.ReLU(), nn.MaxPool2d(2, 2), Flatten(),
                              nn.Linear(288, 10))

        # Define a loss function and optimizer
        loss_fn = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        classifier = PyTorchClassifier(model=model,
                                       clip_values=(0, 1),
                                       loss=loss_fn,
                                       optimizer=optimizer,
                                       input_shape=(1, 28, 28),
                                       nb_classes=10)
        classifier.fit(cls.x_train, cls.y_train, batch_size=100, nb_epochs=1)
        cls.seq_classifier = classifier

        # Define the network
        model = Model()
        loss_fn = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)
        classifier_2 = PyTorchClassifier(model=model,
                                         clip_values=(0, 1),
                                         loss=loss_fn,
                                         optimizer=optimizer,
                                         input_shape=(1, 28, 28),
                                         nb_classes=10)
        classifier_2.fit(x_train, y_train, batch_size=100, nb_epochs=1)
        cls.module_classifier = classifier_2
示例#8
0
                                clip_values=(min_, max_),
                                loss=criterion1,
                                optimizer=optimizer1,
                                input_shape=(3, 32, 32),
                                nb_classes=100)

classifier2 = PyTorchClassifier(model=model2,
                                clip_values=(min_, max_),
                                loss=criterion2,
                                optimizer=optimizer2,
                                input_shape=(3, 32, 32),
                                nb_classes=100)

print("training...")
classifier1.fit(model1_x_train,
                model1_y_train,
                batch_size=batch_size,
                nb_epochs=n_epochs)
classifier2.fit(model2_x_train,
                model2_y_train,
                batch_size=batch_size,
                nb_epochs=n_epochs)

# evaluation
model1.eval()
model2.eval()

predictions = classifier1.predict(shared_x_test)
acc = accuracy(predictions, shared_y_test)
print('Accuracy of model1 on shared test examples: {}%'.format(acc * 100))

top_five_acc = accuracy_n(predictions, shared_y_test, 5)
model = Net()

# Step 2a: 定义损失函数和优化器

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

# Step 3: 创建ART分类器

classifier = PyTorchClassifier(model=model, clip_values=(min_pixel_value, max_pixel_value), loss=criterion,
                               optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=10)

# Step 4: 训练ART分类器

classifier.fit(x_train, y_train, batch_size=64, nb_epochs=3)

# Step 5: 在良性的测试实例上评价ART分类器

predictions = classifier.predict(x_test)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print('Accuracy on benign test examples: {}%'.format(accuracy * 100))

# Step 6: 生成对抗性测试示例
attack = FastGradientMethod(classifier=classifier, eps=0.2)
x_test_adv = attack.generate(x=x_test)

# Step 7: 通过对抗性测试实例对ART分类器进行评价

predictions = classifier.predict(x_test_adv)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
示例#10
0
# Step 2: Create the model

vgg_ver = "VGG16"
model = VGG(vgg_ver)

# Step 2a: Define the loss function and the optimizer

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-2)

# Step 3: Create the ART classifier

classifier = PyTorchClassifier(model=model,
                               clip_values=(min_, max_),
                               loss=criterion,
                               optimizer=optimizer,
                               input_shape=(3, 32, 32),
                               nb_classes=10)

# Step 4: Train the ART classifier

classifier.fit(x_train, y_train, batch_size=128, nb_epochs=30)
classifier.save(f"pytorch_{vgg_ver}", "./logs")

# Step 5: Evaluate the ART classifier on benign test examples

predictions = classifier.predict(x_test)
accuracy = np.sum(
    np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print('Accuracy on benign test examples: {}%'.format(accuracy * 100))