def setUpClass(cls): # MNIST (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) # Iris (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') cls.iris = (x_train, y_train), (x_test, y_test)
def setUpClass(cls): master_seed(1234) cls.n_train = 1000 cls.n_test = 100 cls.batch_size = 16 cls.create_image_dataset(n_train=cls.n_train, n_test=cls.n_test) (x_train_iris, y_train_iris), (x_test_iris, y_test_iris), _, _ = load_dataset("iris") cls.x_train_iris = x_train_iris cls.y_train_iris = y_train_iris cls.x_test_iris = x_test_iris cls.y_test_iris = y_test_iris cls._x_train_iris_original = cls.x_train_iris.copy() cls._y_train_iris_original = cls.y_train_iris.copy() cls._x_test_iris_original = cls.x_test_iris.copy() cls._y_test_iris_original = cls.y_test_iris.copy() import warnings # Filter warning for scipy, removed with scipy 1.4 warnings.filterwarnings("ignore", ".*the output shape of zoom.*")
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') cls.x_train = x_train[:NB_TRAIN] cls.y_train = y_train[:NB_TRAIN] cls.x_test = x_test[:NB_TEST] cls.y_test = y_test[:NB_TEST]
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset("mnist") cls.x_train = list(x_train[:NB_TRAIN]) cls.y_train = list(y_train[:NB_TRAIN]) cls.x_test = list(x_test[:NB_TEST]) cls.y_test = list(y_test[:NB_TEST])
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') cls.x_train = x_train cls.y_train = y_train cls.x_test = x_test cls.y_test = y_test
def setUpClass(cls): # MNIST (x_train, y_train), (x_test, y_test), _, _ = load_dataset("mnist") x_train = list(x_train[:NB_TRAIN]) y_train = list(y_train[:NB_TRAIN]) x_test = list(x_test[:NB_TEST]) y_test = list(y_test[:NB_TEST]) cls.mnist = (x_train, y_train), (x_test, y_test) # Iris (x_train, y_train), (x_test, y_test), _, _ = load_dataset("iris") x_train = pd.DataFrame(x_train) y_train = pd.DataFrame(y_train) x_test = pd.DataFrame(x_test) y_test = pd.DataFrame(y_test) cls.iris = (x_train, y_train), (x_test, y_test)
def setUp(self): (self.x_train, self.y_train), (x_test, y_test), min_, max_ = load_dataset(str('mnist')) self.x_train = self.x_train[:300] self.y_train = self.y_train[:300] k.set_learning_phase(1) model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=self.x_train.shape[1:])) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) self.classifier = KerasClassifier((min_, max_), model=model) self.classifier.fit(self.x_train, self.y_train, nb_epochs=1, batch_size=128) self.defence = ActivationDefence(self.classifier, self.x_train, self.y_train)
def GetCifar10WithModel(): """ Function: Load cifar-10 dataset and load a pre-trained cifar10 model. """ (x_train, y_train), (x_test, y_test), min_, max_ = load_dataset('cifar10') num_samples_train = 100 num_samples_test = 100 x_train = x_train[0:num_samples_train] y_train = y_train[0:num_samples_train] x_test = x_test[0:num_samples_test] y_test = y_test[0:num_samples_test] class_descr = [ 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ] path = get_file( 'cifar_resnet.h5', extract=False, path=DATA_PATH, url='https://www.dropbox.com/s/ta75pl4krya5djj/cifar_resnet.h5?dl=1') classifier_model = load_model(path) # classifier_model.summary() return x_train, y_train, x_test, y_test, classifier_model, min_, max_
def test_subsetscan_detector(self): (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN] x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST] # Keras classifier classifier = get_classifier_kr() # Generate adversarial samples: attacker = FastGradientMethod(classifier, eps=0.5) x_train_adv = attacker.generate(x_train) x_test_adv = attacker.generate(x_test) # Compile training data for detector: x_train_detector = np.concatenate((x_train, x_train_adv), axis=0) bgd = x_train clean = x_test anom = x_test_adv detector = SubsetScanningDetector(classifier, bgd, layer=1) _, _, dpwr = detector.scan(clean, clean) self.assertAlmostEqual(dpwr, 0.5) _, _, dpwr = detector.scan(clean, anom) self.assertGreater(dpwr, 0.5) _, _, dpwr = detector.scan(clean, x_train_detector, 85, 15) self.assertGreater(dpwr, 0.5)
def train_model(dataset): (x_train, y_train), (x_test, y_test), _, _ = load_dataset(str(dataset[0])) x_test = x_test * 2 - 1 x_train = x_train * 2 - 1 if dataset == ['mnist']: model = create_lenet_model(x_train.shape[1:]) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, epochs=20, batch_size=128, validation_data=(x_test, y_test)) model.save_weights('./models/mnist.h5') elif dataset == ['cifar10']: model = create_cnn_model(x_train.shape[1:]) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, epochs=20, batch_size=128, validation_data=(x_test, y_test)) model.save_weights('./models/cifar.h5') else: raise ValueError return model
def setUpClass(cls): k.clear_session() k.set_learning_phase(1) (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') cls.x_train = x_train[:NB_TRAIN] cls.y_train = y_train[:NB_TRAIN] cls.x_test = x_test[:NB_TEST] cls.y_test = y_test[:NB_TEST] # Load small Keras model cls.functional_model = cls.functional_model() cls.functional_model.fit([cls.x_train, cls.x_train], [cls.y_train, cls.y_train], nb_epoch=3) # Temporary folder for tests cls.test_dir = tempfile.mkdtemp() # Download one ImageNet pic for tests url = 'http://farm1.static.flickr.com/163/381342603_81db58bea4.jpg' result = requests.get(url, stream=True) if result.status_code == 200: image = result.raw.read() f = open(os.path.join(cls.test_dir, 'test.jpg'), 'wb') f.write(image) f.close()
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train = np.swapaxes(x_train, 1, 3).astype(np.float32) x_test = np.swapaxes(x_test, 1, 3).astype(np.float32) cls.x_train = x_train[:NB_TRAIN] cls.y_train = y_train[:NB_TRAIN] cls.x_test = x_test[:NB_TEST] cls.y_test = y_test[:NB_TEST] # Define the internal classifier classifier = get_classifier_pt() # Define the internal detector conv = nn.Conv2d(1, 16, 5) linear = nn.Linear(2304, 1) torch.nn.init.xavier_uniform_(conv.weight) torch.nn.init.xavier_uniform_(linear.weight) model = nn.Sequential(conv, nn.ReLU(), nn.MaxPool2d(2, 2), Flatten(), linear) model = Model(model) loss_fn = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) detector = PyTorchClassifier(model=model, loss=loss_fn, optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=1, clip_values=(0, 1)) # Define the detector-classifier cls.detector_classifier = DetectorClassifier(classifier=classifier, detector=detector)
def test_nursery(self): (x_train, y_train), (x_test, y_test), min_, max_ = load_nursery(raw=True) self.assertEqual(x_train.shape[0], y_train.shape[0]) self.assertEqual(x_test.shape[0], y_test.shape[0]) (x_train, y_train), (x_test, y_test), min_, max_ = load_nursery(scaled=False) self.assertEqual(min_, 0.0) self.assertEqual(max_, 4.0) self.assertEqual(x_train.shape[0], y_train.shape[0]) self.assertEqual(x_test.shape[0], y_test.shape[0]) (x_train, y_train), (x_test, y_test), min_, max_ = load_nursery() self.assertAlmostEqual(min_, -1.3419307411337875, places=6) self.assertEqual(max_, 2.0007720517562224) self.assertEqual(x_train.shape[0], y_train.shape[0]) self.assertEqual(x_test.shape[0], y_test.shape[0]) (x_train, y_train), (x_test, y_test), min_, max_ = load_dataset("nursery") self.assertAlmostEqual(min_, -1.3419307411337875, places=6) self.assertEqual(max_, 2.0007720517562224) self.assertEqual(x_train.shape[0], y_train.shape[0]) self.assertEqual(x_test.shape[0], y_test.shape[0])
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') # change iris to binary problem, so it is learnable for GPC cls.x_train = x_train cls.y_train = y_train[:, 1] cls.x_test = x_test cls.y_test = y_test[:, 1]
def setUpClass(cls): k.set_learning_phase(1) (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) # Keras classifier cls.classifier_k = get_classifier_kr()
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') cls.x_train = x_train[:NB_TRAIN] cls.y_train = y_train[:NB_TRAIN] cls.x_test = x_test[:NB_TEST] cls.y_test = y_test[:NB_TEST] # Temporary folder for tests cls.test_dir = tempfile.mkdtemp()
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') cls.x_train = x_train[:NB_TRAIN] cls.y_train = y_train[:NB_TRAIN] cls.x_test = x_test[:NB_TEST] cls.y_test = y_test[:NB_TEST] # Keras classifier cls.classifier_k = get_classifier_kr() scores = cls.classifier_k._model.evaluate(x_train, y_train) logger.info('[Keras, MNIST] Accuracy on training set: %.2f%%', (scores[1] * 100)) scores = cls.classifier_k._model.evaluate(x_test, y_test) logger.info('[Keras, MNIST] Accuracy on test set: %.2f%%', (scores[1] * 100)) # Create basic CNN on MNIST using TensorFlow cls.classifier_tf, sess = get_classifier_tf() scores = get_labels_np_array(cls.classifier_tf.predict(x_train)) accuracy = np.sum( np.argmax(scores, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0] logger.info('[TF, MNIST] Accuracy on training set: %.2f%%', (accuracy * 100)) scores = get_labels_np_array(cls.classifier_tf.predict(x_test)) accuracy = np.sum( np.argmax(scores, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('[TF, MNIST] Accuracy on test set: %.2f%%', (accuracy * 100)) # Create basic PyTorch model cls.classifier_py = get_classifier_pt() x_train, x_test = np.swapaxes(x_train, 1, 3), np.swapaxes(x_test, 1, 3) scores = get_labels_np_array( cls.classifier_py.predict(x_train.astype(np.float32))) accuracy = np.sum( np.argmax(scores, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0] logger.info('[PyTorch, MNIST] Accuracy on training set: %.2f%%', (accuracy * 100)) scores = get_labels_np_array( cls.classifier_py.predict(x_test.astype(np.float32))) accuracy = np.sum( np.argmax(scores, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('[PyTorch, MNIST] Accuracy on test set: %.2f%%', (accuracy * 100))
def fix_get_cifar10_data(): """ Get the first 10 samples of the cifar10 test set :return: First 10 sample/label pairs of the cifar10 test dataset. """ nb_test = 10 (_, _), (x_test, y_test), _, _ = load_dataset("cifar10") y_test = np.argmax(y_test, axis=1) x_test, y_test = x_test[:nb_test], y_test[:nb_test] return x_test, y_test
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset("mnist") x_train, y_train, x_test, y_test = x_train[: NB_TRAIN], y_train[: NB_TRAIN], x_test[: NB_TEST], y_test[: NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) # Keras classifier cls.classifier_k = get_image_classifier_kr()
def create_image_dataset(cls, n_train, n_test): (x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist), _, _ = load_dataset("mnist") cls.x_train_mnist = x_train_mnist[:n_train] cls.y_train_mnist = y_train_mnist[:n_train] cls.x_test_mnist = x_test_mnist[:n_test] cls.y_test_mnist = y_test_mnist[:n_test] cls._x_train_mnist_original = cls.x_train_mnist.copy() cls._y_train_mnist_original = cls.y_train_mnist.copy() cls._x_test_mnist_original = cls.x_test_mnist.copy() cls._y_test_mnist_original = cls.y_test_mnist.copy()
def test_multi_attack_mnist(self): """ Test the adversarial trainer using two attackers: FGSM and DeepFool. The source and target models of the attack are two CNNs on MNIST trained for 5 epochs. FGSM and DeepFool both generate the attack images on the same source classifier. The test cast check if accuracy on adversarial samples increases after adversarially training the model. :return: None """ session = tf.Session() k.set_session(session) # Load MNIST (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[: NB_TRAIN], y_train[: NB_TRAIN], x_test[: NB_TEST], y_test[: NB_TEST] im_shape = x_train[0].shape # Create and fit target classifier comp_params = { 'loss': 'categorical_crossentropy', 'optimizer': 'adam', 'metrics': ['accuracy'] } params = {'epochs': 5, 'batch_size': BATCH_SIZE} classifier_tgt = CNN(im_shape, dataset='mnist') classifier_tgt.compile(comp_params) classifier_tgt.fit(x_train, y_train, **params) # Create source classifier classifier_src = CNN(im_shape, dataset='mnist') classifier_src.compile(comp_params) classifier_tgt.fit(x_train, y_train, **params) # Create FGSM and DeepFool attackers adv1 = FastGradientMethod(classifier_src, session) adv2 = DeepFool(classifier_src, session) x_adv = np.vstack((adv1.generate(x_test), adv2.generate(x_test))) y_adv = np.vstack((y_test, y_test)) print(y_adv.shape) acc = classifier_tgt.evaluate(x_adv, y_adv) # Perform adversarial training adv_trainer = AdversarialTrainer(classifier_tgt, [adv1, adv2]) adv_trainer.fit(x_train, y_train, **params) # Evaluate that accuracy on adversarial sample has improved acc_adv_trained = adv_trainer.classifier.evaluate(x_adv, y_adv) self.assertTrue(acc_adv_trained >= acc)
def setUpClass(cls): np.random.seed(seed=1234) (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') cls.x_train = x_train cls.y_train = y_train cls.x_test = x_test cls.y_test = np.argmax(y_test, axis=1) model = xgb.XGBClassifier(n_estimators=30, max_depth=5) model.fit(x_train, np.argmax(y_train, axis=1)) cls.classifier = XGBoostClassifier(model=model)
def test_stl(self): (x_train, y_train), (x_test, y_test), min_, max_ = load_stl() self.assertAlmostEqual(min_, 0.0, places=6) self.assertEqual(max_, 1.0) self.assertEqual(x_train.shape[0], y_train.shape[0]) self.assertEqual(x_test.shape[0], y_test.shape[0]) (x_train, y_train), (x_test, y_test), min_, max_ = load_dataset("stl10") self.assertAlmostEqual(min_, 0.0, places=6) self.assertEqual(max_, 1.0) self.assertEqual(x_train.shape[0], y_train.shape[0]) self.assertEqual(x_test.shape[0], y_test.shape[0])
def setUpClass(cls): (x_train, y_train), (x_test, y_test), _, _ = load_dataset("mnist") cls.n_classes = 10 cls.n_features = 28 * 28 n_train = x_train.shape[0] n_test = x_test.shape[0] x_train = x_train.reshape((n_train, cls.n_features)) x_test = x_test.reshape((n_test, cls.n_features)) cls.x_train = x_train[:NB_TRAIN] cls.y_train = y_train[:NB_TRAIN] cls.x_test = x_test[:NB_TEST] cls.y_test = y_test[:NB_TEST]
def setUpClass(cls): k.clear_session() (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') cls.x_train = x_train[:NB_TRAIN] cls.y_train = y_train[:NB_TRAIN] cls.x_test = x_test[:NB_TEST] cls.y_test = y_test[:NB_TEST] # Use twice the same classifier for unittesting, in application they would be different classifier_1 = get_classifier_kr() classifier_2 = get_classifier_kr() cls.ensemble = EnsembleClassifier(classifiers=[classifier_1, classifier_2], clip_values=(0, 1))
def setUpClass(cls): k.set_learning_phase(1) # Get MNIST (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist') x_train, y_train, x_test, y_test = x_train[: NB_TRAIN], y_train[: NB_TRAIN], x_test[: NB_TEST], y_test[: NB_TEST] cls.mnist = (x_train, y_train), (x_test, y_test) # Keras classifier cls.classifier_k, sess = get_classifier_kr() scores = cls.classifier_k._model.evaluate(x_train, y_train) logger.info('[Keras, MNIST] Accuracy on training set: %.2f%%', scores[1] * 100) scores = cls.classifier_k._model.evaluate(x_test, y_test) logger.info('[Keras, MNIST] Accuracy on test set: %.2f%%', scores[1] * 100) # Create basic CNN on MNIST using TensorFlow cls.classifier_tf, sess = get_classifier_tf() scores = get_labels_np_array(cls.classifier_tf.predict(x_train)) acc = np.sum(np.argmax(scores, axis=1) == np.argmax( y_train, axis=1)) / y_train.shape[0] logger.info('[TF, MNIST] Accuracy on training set: %.2f%%', acc * 100) scores = get_labels_np_array(cls.classifier_tf.predict(x_test)) acc = np.sum(np.argmax(scores, axis=1) == np.argmax( y_test, axis=1)) / y_test.shape[0] logger.info('[TF, MNIST] Accuracy on test set: %.2f%%', acc * 100) # Create basic PyTorch model cls.classifier_py = get_classifier_pt() x_train, x_test = np.swapaxes(x_train, 1, 3), np.swapaxes(x_test, 1, 3) scores = get_labels_np_array(cls.classifier_py.predict(x_train)) acc = np.sum(np.argmax(scores, axis=1) == np.argmax( y_train, axis=1)) / y_train.shape[0] logger.info('[PyTorch, MNIST] Accuracy on training set: %.2f%%', acc * 100) scores = get_labels_np_array(cls.classifier_py.predict(x_test)) acc = np.sum(np.argmax(scores, axis=1) == np.argmax( y_test, axis=1)) / y_test.shape[0] logger.info('[PyTorch, MNIST] Accuracy on test set: %.2f%%', acc * 100)
def fix_get_mnist_data(): """ Get the first 100 samples of the mnist test set with channels first format :return: First 100 sample/label pairs of the MNIST test dataset. """ nb_test = 100 (_, _), (x_test, y_test), _, _ = load_dataset("mnist") x_test = np.squeeze(x_test) x_test = np.expand_dims(x_test, axis=1) y_test = np.argmax(y_test, axis=1) x_test, y_test = x_test[:nb_test], y_test[:nb_test] return x_test, y_test
def setUpClass(cls): np.random.seed(seed=1234) # make iris a two class problem for GP (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') # change iris to binary problem, so it is learnable for GPC cls.iris = (x_train, y_train[:, 1]), (x_test, y_test[:, 1]) (X, y), (x_test, y_test) = cls.iris # set up GPclassifier gpkern = GPy.kern.RBF(np.shape(X)[1]) m = GPy.models.GPClassification(X, y.reshape(-1, 1), kernel=gpkern) m.inference_method = GPy.inference.latent_function_inference.laplace.Laplace( ) m.optimize(messages=True, optimizer='lbfgs') # get ART classifier + clean accuracy cls.classifier = GPyGaussianProcessClassifier(m)
def setUpClass(cls): np.random.seed(seed=1234) (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris') cls.x_train = x_train cls.y_train = y_train cls.x_test = x_test cls.y_test = np.argmax(y_test, axis=1) num_round = 10 param = {'objective': 'multi:softmax', 'metric': 'multi_logloss', 'num_class': 3} train_data = xgb.DMatrix(cls.x_train, label=cls.y_train) evallist = [(train_data, 'train')] model = xgb.train(param, train_data, num_round, evallist) cls.classifier = XGBoostClassifier(model=model, nb_classes=3)
def test(): (x_train, y_train), (x_test, y_test), min_, max_ = load_dataset(str('cifar10')) x_train = np.swapaxes(x_train, 1, 3).astype(np.float32) x_test = np.swapaxes(x_test, 1, 3).astype(np.float32) model = VGG('VGG16') model.load_state_dict(torch.load("./logs/pytorch_vgg16.h5.model")) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=1e-2) classifier = PyTorchClassifier(model=model, clip_values=(min_, max_ ), loss=criterion, optimizer=optimizer, input_shape=(3, 32, 32), nb_classes=10) predictions = classifier.predict(x_test) accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test) print('Accuracy on benign test examples: {}%'.format(accuracy * 100))