def attack(self, eps=0.05): # Create Adversarial Examples using FGSM. self.utility.print_message( NOTE, 'Creating Adversarial Examples using FGSM.') attack = FastGradientMethod(estimator=self.model, eps=eps) X_adv = attack.generate(x=self.dataset) return X_adv
def test_inverse_gan(fix_get_mnist_subset, image_dl_estimator_for_attack): (x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset gan, inverse_gan, sess = get_gan_inverse_gan_ft() if gan is None: logging.warning( "Couldn't perform this test because no gan is defined for this framework configuration" ) return classifier_list = image_dl_estimator_for_attack(FastGradientMethod) if classifier_list is None: logging.warning( "Couldn't perform this test because no classifier is defined") return classifier = classifier_list[0] attack = FastGradientMethod(classifier, eps=0.2) x_test_adv = attack.generate(x=x_test_mnist) inverse_gan = InverseGAN(sess=sess, gan=gan, inverse_gan=inverse_gan) x_test_defended = inverse_gan(x_test_adv, maxiter=1) assert np.mean(x_test_defended - x_test_adv) == pytest.approx(0.33819187, abs=0.05)
def test_inverse_gan(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack): try: (x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset gan, inverse_gan, sess = get_gan_inverse_gan_ft() if gan is None: logging.warning("Couldn't perform this test because no gan is defined for this framework configuration") return classifier = image_dl_estimator_for_attack(FastGradientMethod) attack = FastGradientMethod(classifier, eps=0.2) x_test_adv = attack.generate(x=x_test_mnist) inverse_gan = InverseGAN(sess=sess, gan=gan, inverse_gan=inverse_gan) x_test_defended = inverse_gan(x_test_adv, maxiter=1) np.testing.assert_array_almost_equal( float(np.mean(x_test_defended - x_test_adv)), 0.08818667382001877, decimal=0.01, ) except ARTTestException as e: art_warning(e)
def generate_adv_samples(self, model_path, name): name = name model = models.load_model(model_path) x_train, x_test, y_train, y_test = self.data_pre_process(name) classifier = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=model) fgsm = FastGradientMethod(classifier, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35, targeted=False, batch_size=128) x_adv_test = fgsm.generate(x=x_test) return x_adv_test
def generate_adv_samples(self): model = models.load_model(self.model_path) x_train, x_test, y_train, y_test = self.data_pre_process() classifier = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=model) if self.attack == 'fgsm': fgsm = FastGradientMethod(classifier, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35, targeted=False, batch_size=128) x_adv_test = fgsm.generate(x=x_test) if self.defence == True: return x_test, x_adv_test, y_test else: return x_test, x_adv_test, y_test
def attack_FGSM_nontargeted(dataloader, model, model_info, args, checkpoint_dir): """ FGSM attack """ device = args.device criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) img_size = model_info["model_img_size"] n_classes = model_info["num_classes"] classifier = PyTorchClassifier( model=model, loss=criterion, clip_values=(0.0, 1.0), optimizer=optimizer, input_shape=(img_size, img_size), nb_classes=n_classes, device_type=device, ) # attack = FastGradientMethod(estimator=classifier, batch_size=args.batch_size) attack = FastGradientMethod(estimator=classifier, batch_size=args.batch_size) # Launching a non-targeted attack # t = args.target_class print(f"Launching FGSM nontargeted attack") dest_images = os.path.join(checkpoint_dir, args.model_name) os.makedirs(dest_images, exist_ok=True) # Running over the entire-batch to compute a universal perturbation for data in tqdm(dataloader): sample, label, img_path = data sample = sample.to(device) # Launch attack sample_adv = attack.generate(x=sample.cpu()) # Code to save these images img_path = [it.split("/")[-1] for it in img_path] for i in range(len(sample_adv)): _img = sample_adv[i].transpose(1, 2, 0) skimage.io.imsave(os.path.join(dest_images, img_path[i]), img_as_ubyte(_img)) with open(os.path.join(dest_images, "stats.txt"), "w") as f: f.write(f"Fooling-rate was nan\n") return dest_images
def adv_model_fit(self): model = self.choose_model x_train, x_test, y_train, y_test = self.data_pre_process() classifier = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=model) fgsm = FastGradientMethod(classifier, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35, targeted=False, batch_size=128) x_train, x_test = self.data_reshape(self.model_choice, x_train, x_test) x_adv_train = fgsm.generate(x=x_train) history = model.fit(x_adv_train, y_train, epochs=self.epochs, batch_size=32, validation_split=0.2) data_record = DataRecord() data_record.model = model data_record.summary = model.to_yaml() data_record.history = history data_record.epochs = self.epochs self.result_save(data_record, self.save_adv_dir)
def FGSM(points=10): from art.attacks.evasion import FastGradientMethod from art.estimators.classification import TensorFlowV2Classifier loss_object = tf.keras.losses.SparseCategoricalCrossentropy() classifier = TensorFlowV2Classifier(model=model, nb_classes=10, input_shape=(28, 28, 1), loss_object=loss_object, clip_values=(0, 1), channels_first=False) # Craft adversarial samples with FGSM epsilons = [0.05 * i for i in range(points)] # Maximum perturbation preds = np.argmax(classifier.predict(x_test), axis=1) acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0] print("\nTest accuracy on normal sample: %.2f%% eps: %.2f" % (acc * 100, 0)) accuracies = [acc] examples = [] for epsilon in epsilons[1:]: adv_crafter = FastGradientMethod(classifier, eps=epsilon) x_test_adv = adv_crafter.generate(x=x_test) # Evaluate the classifier on the adversarial examples preds = np.argmax(classifier.predict(x_test_adv), axis=1) acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0] print("\nTest accuracy on adversarial sample: %.2f%% eps: %.2f" % (acc * 100, epsilon)) accuracies.append(acc) example = [] preds = np.argmax(classifier.predict(x_test_adv), axis=1) labels = np.argmax(y_test, axis=1) for i in range(len(preds)): p, l = preds[i], labels[i] if p != l: orig = l adv = p ex = x_test_adv[i] example.append((orig, adv, ex)) if len(example) == 5: break examples.append(example) plot_accuracies(epsilons, accuracies) plot_examples(epsilons[1:], examples)
def pre_adv_model_fit(self): model = models.load_model(os.path.join(self.save_dir, f'{self.model_choice}/model.h5')) x_train, x_test, y_train, y_test = self.data_pre_process() classifier = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=model) fgsm = FastGradientMethod(classifier, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35, targeted=False, batch_size=32) x_train, x_test = self.data_reshape(self.model_choice, x_train, x_test) x_adv_train = fgsm.generate(x=x_train) # x_adv_test = fgsm.generate(x=x_test) # adv_trainer = AdversarialTrainer(classifier, attacks=fgsm, ratio=1.0) # # samples = np.array(list(range(0, y_train.shape[0]))) # # y_train = np.column_stack((samples, y_train)) # y_train = np.reshape(y_train, (y_train.shape[0],)) # print(y_train.shape) # adv_trainer.fit(x_adv_train, y_train, batch_size=128, nb_epochs=10) history = model.fit(x_adv_train, y_train, epochs=self.epochs, batch_size=32, validation_split=0.2) data_record = DataRecord() data_record.model = model data_record.summary = model.to_yaml() data_record.history = history data_record.epochs = self.epochs self.result_save(data_record, self.save_pre_adv_dir)
def main(args): assert args.dataset in ['mnist', 'cifar', 'svhn', 'tiny', 'tiny_gray'], \ "dataset parameter must be either 'mnist', 'cifar', 'svhn', or 'tiny'" print('Dataset: %s' % args.dataset) adv_path = '/home/aaldahdo/detectors/adv_data/' if args.dataset == 'mnist': from baselineCNN.cnn.cnn_mnist import MNISTCNN as model model_mnist = model(mode='load', filename='cnn_{}.h5'.format(args.dataset)) classifier=model_mnist.model sgd = optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True) classifier.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy']) kclassifier = KerasClassifier(model=classifier, clip_values=(0, 1)) epsilons=[8/256, 16/256, 32/256, 64/256, 80/256, 128/256] epsilons1=[5, 10, 15, 20, 25, 30, 40] epsilons2=[0.125, 0.25, 0.3125, 0.5, 1, 1.5, 2] eps_sa=0.3 pa_th=78 # random_restart = 20 # x_train = model_mnist.x_train x_test = model_mnist.x_test # y_train = model_mnist.y_train y_test = model_mnist.y_test y_test_labels = model_mnist.y_test_labels translation = 10 rotation = 60 elif args.dataset == 'mnist_gray': from baselineCNN.cnn.cnn_mnist_gray import MNISTCNN as model model_mnist = model(mode='load', filename='cnn_{}.h5'.format(args.dataset)) classifier=model_mnist.model sgd = optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True) classifier.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy']) kclassifier = KerasClassifier(model=classifier, clip_values=(0, 1)) epsilons=[8/256, 16/256, 32/256, 64/256, 80/256, 128/256] epsilons1=[5, 10, 15, 20, 25, 30, 40] epsilons2=[0.125, 0.25, 0.3125, 0.5, 1, 1.5, 2] eps_sa=0.3 pa_th=78 # random_restart = 20 # x_train = model_mnist.x_train x_test = model_mnist.x_test # y_train = model_mnist.y_train y_test = model_mnist.y_test y_test_labels = model_mnist.y_test_labels translation = 10 rotation = 60 elif args.dataset == 'cifar': from baselineCNN.cnn.cnn_cifar10 import CIFAR10CNN as model model_cifar = model(mode='load', filename='cnn_{}.h5'.format(args.dataset)) classifier=model_cifar.model sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) classifier.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy']) kclassifier = KerasClassifier(model=classifier, clip_values=(0, 1)) epsilons=[8/256, 16/256, 32/256, 64/256, 80/256, 128/256] epsilons1=[5, 10, 15, 20, 25, 30, 40] epsilons2=[0.125, 0.25, 0.3125, 0.5, 1, 1.5, 2] eps_sa=0.125 pa_th=100 # x_train = model_cifar.x_train x_test = model_cifar.x_test # y_train = model_cifar.y_train y_test = model_cifar.y_test y_test_labels = model_cifar.y_test_labels translation = 8 rotation = 30 elif args.dataset == 'cifar_gray': from baselineCNN.cnn.cnn_cifar10_gray import CIFAR10CNN as model model_cifar = model(mode='load', filename='cnn_{}.h5'.format(args.dataset)) classifier=model_cifar.model sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) classifier.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy']) kclassifier = KerasClassifier(model=classifier, clip_values=(0, 1)) epsilons=[8/256, 16/256, 32/256, 64/256, 80/256, 128/256] epsilons1=[5, 10, 15, 20, 25, 30, 40] epsilons2=[0.125, 0.25, 0.3125, 0.5, 1, 1.5, 2] eps_sa=0.125 pa_th=100 # x_train = model_cifar.x_train x_test = model_cifar.x_test # y_train = model_cifar.y_train y_test = model_cifar.y_test y_test_labels = model_cifar.y_test_labels translation = 8 rotation = 30 elif args.dataset == 'svhn': from baselineCNN.cnn.cnn_svhn import SVHNCNN as model model_svhn = model(mode='load', filename='cnn_{}.h5'.format(args.dataset)) classifier=model_svhn.model sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) classifier.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy']) kclassifier = KerasClassifier(model=classifier, clip_values=(0, 1)) epsilons=[8/256, 16/256, 32/256, 64/256, 80/256, 128/256] epsilons1=[5, 10, 15, 20, 25, 30, 40] epsilons2=[0.125, 0.25, 0.3125, 0.5, 1, 1.5, 2] eps_sa=0.125 pa_th=100 # x_train = model_svhn.x_train x_test = model_svhn.x_test # y_train = model_svhn.y_train y_test = model_svhn.y_test y_test_labels = model_svhn.y_test_labels translation = 10 rotation = 60 elif args.dataset == 'svhn_gray': from baselineCNN.cnn.cnn_svhn_gray import SVHNCNN as model model_svhn = model(mode='load', filename='cnn_{}.h5'.format(args.dataset)) classifier=model_svhn.model sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) classifier.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy']) kclassifier = KerasClassifier(model=classifier, clip_values=(0, 1)) epsilons=[8/256, 16/256, 32/256, 64/256, 80/256, 128/256] epsilons1=[5, 10, 15, 20, 25, 30, 40] epsilons2=[0.125, 0.25, 0.3125, 0.5, 1, 1.5, 2] eps_sa=0.125 pa_th=100 # x_train = model_svhn.x_train x_test = model_svhn.x_test # y_train = model_svhn.y_train y_test = model_svhn.y_test y_test_labels = model_svhn.y_test_labels translation = 10 rotation = 60 elif args.dataset == 'tiny': from baselineCNN.cnn.cnn_tiny import TINYCNN as model model_tiny = model(mode='load', filename='cnn_{}.h5'.format(args.dataset)) classifier=model_tiny.model sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True) classifier.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy']) kclassifier = KerasClassifier(model=classifier, clip_values=(0, 1)) epsilons=[8/256, 16/256, 32/256, 64/256, 80/256, 128/256] epsilons1=[5, 10, 15, 20, 25, 30, 40] epsilons2=[0.125, 0.25, 0.3125, 0.5, 1, 1.5, 2] eps_sa=0.125 pa_th=100 # x_train = model_tiny.x_train x_test = model_tiny.x_test # y_train = model_tiny.y_train y_test = model_tiny.y_test y_test_labels = model_tiny.y_test_labels translation = 8 rotation = 30 del model_tiny elif args.dataset == 'tiny_gray': from baselineCNN.cnn.cnn_tiny_gray import TINYCNN as model model_tiny = model(mode='load', filename='cnn_{}.h5'.format(args.dataset)) classifier=model_tiny.model sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True) classifier.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy']) kclassifier = KerasClassifier(model=classifier, clip_values=(0, 1)) epsilons=[8/256, 16/256, 32/256, 64/256, 80/256, 128/256] epsilons1=[5, 10, 15, 20, 25, 30, 40] epsilons2=[0.125, 0.25, 0.3125, 0.5, 1, 1.5, 2] eps_sa=0.125 # x_train = model_tiny.x_train x_test = model_tiny.x_test # y_train = model_tiny.y_train y_test = model_tiny.y_test y_test_labels = model_tiny.y_test_labels translation = 8 rotation = 30 del model_tiny # batch_count_start = args.batch_indx # bsize = args.batch_size # batch_count_end = batch_count_start + 1 #FGSM for e in epsilons: attack = FastGradientMethod(estimator=kclassifier, eps=e, eps_step=0.01, batch_size=256) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_fgsm_' + str(e) + '.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #BIM for e in epsilons: attack = BasicIterativeMethod(estimator=kclassifier, eps=e, eps_step=0.01, batch_size=32, max_iter=int(e*256*1.25)) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_bim_' + str(e) + '.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #PGD1 for e in epsilons1: attack = ProjectedGradientDescent(estimator=kclassifier, norm=1, eps=e, eps_step=4, batch_size=32) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_pgd1_' + str(e) + '.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #PGD2 for e in epsilons2: attack = ProjectedGradientDescent(estimator=kclassifier, norm=2, eps=e, eps_step=0.1, batch_size=32) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_pgd2_' + str(e) + '.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #PGDInf for e in epsilons: attack = ProjectedGradientDescent(estimator=kclassifier, norm=np.inf, eps=e, eps_step=0.01, batch_size=32) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_pgdi_' + str(e) + '.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #CWi attack = CarliniLInfMethod(classifier=kclassifier, max_iter=200) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_cwi.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) # #CWi # if args.dataset=='tiny': # for n, x, y in batch(x_test, y_test, batch_size=bsize): # if n>=batch_count_start*bsize and n<batch_count_end*bsize: # adv_file_path = adv_path + args.dataset + '_cwi_' + str(batch_count_start) + '.npy' # if not os.path.isfile(adv_file_path): # attack = CarliniLInfMethod(classifier=kclassifier, max_iter=100, batch_size=bsize) # adv_data = attack.generate(x=x) # np.save(adv_file_path, adv_data) # print('Done - {}'.format(adv_file_path)) #CW2 - SLOW attack = CarliniL2Method(classifier=kclassifier, max_iter=100, batch_size=1, confidence=10) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_cw2.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #DF attack = DeepFool(classifier=kclassifier) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_df.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) # #DF # if args.dataset=='tiny': # for n, x, y in batch(x_test, y_test, batch_size=bsize): # if n>=batch_count_start*bsize and n<batch_count_end*bsize: # attack = DeepFool(classifier=kclassifier, epsilon=9, max_iter=100) # adv_data = attack.generate(x=x) # adv_file_path = adv_path + args.dataset + '_df_'+ str(batch_count_start) + '.npy' # np.save(adv_file_path, adv_data) # print('Done - {}'.format(adv_file_path)) #Spatial transofrmation attack attack = SpatialTransformation(classifier=kclassifier, max_translation=translation, max_rotation=rotation) adv_data = attack.generate(x=x_test) adv_file_path = adv_path + args.dataset + '_sta.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #Square Attack attack = SquareAttack(estimator=kclassifier, max_iter=200, eps=eps_sa) adv_data = attack.generate(x=x_test, y=y_test) adv_file_path = adv_path + args.dataset + '_sa.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #HopSkipJump Attack y_test_next= get_next_class(y_test) attack = HopSkipJump(classifier=kclassifier, targeted=False, max_iter=0, max_eval=100, init_eval=10) iter_step = 10 adv_data = np.zeros(x_test.shape) # adv_data = adv_data[0:25] for i in range(4): adv_data = attack.generate(x=x_test, x_adv_init=adv_data, resume=True) attack.max_iter = iter_step # _, acc_normal = classifier.evaluate(x_test[0:25], y_test[0:25]) # _, acc_adv = classifier.evaluate(adv_data, y_test[0:25]) # print('Normal accuracy - {}\nAttack accuracy - {}'.format(acc_normal, acc_adv)) # subcount=1 # for i in range(0, 25): # plt.subplot(5,5,subcount) # if args.dataset=='mnist': # plt.imshow(adv_data[i][:,:,0]) # else: # plt.imshow(adv_data[i][:,:,:]) # plt.suptitle(args.dataset+ " sb") # subcount = subcount + 1 # plt.show() adv_file_path = adv_path + args.dataset + '_hop.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path)) #ZOO attack attack = ZooAttack(classifier=kclassifier, batch_size=32) adv_data = attack.generate(x=x_test, y=y_test) adv_file_path = adv_path + args.dataset + '_zoo.npy' np.save(adv_file_path, adv_data) print('Done - {}'.format(adv_file_path))
classifier_lstm = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=lstmmodel) classifier_cnn = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=cnnmodel) # ---------------------------------------------------------- # fgsm fgsm = FastGradientMethod(classifier_cnn, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35, targeted=False, batch_size=32) x_test_adv_fgsm = fgsm.generate(x=x_test) # jsma jsma = SaliencyMapMethod(classifier_lstm, batch_size=32) x_test_adv_jsma = jsma.generate(x=x_test) # 显示正常的测试集合被攻击修改后的测试集的区别 adversarialFeatures(x_test, x_test_adv_fgsm, test_data) adversarialFeatures(x_test, x_test_adv_jsma, test_data) # 显示dnn网络在对抗攻击下的表现 dnnPredfgsm = dnnmodel.predict_classes(x_test_adv_fgsm, verbose=1) dnnPredjsma = dnnmodel.predict_classes(x_test_adv_jsma, verbose=1) printMetrics(y_test, dnnPredfgsm) printMetrics(y_test, dnnPredjsma)
clip_values=(0, 1)) print("Creating adversarial attack object...\n") fgsm = FastGradientMethod(adv_classifier, norm=np.inf, eps=eps, eps_step=0.001, targeted=False, batch_size=2048, num_random_init=27) print("Generating adversarial samples...\n") logger.info("Craft attack on training examples") x_train_adv = fgsm.generate(train_data) save_samples(x_train_adv, 'adv_train', exp) logger.info("Craft attack test examples") x_test_adv = fgsm.generate(test_data) save_samples(x_test_adv, 'adv_test', exp) print("Evaluating adversarial samples on clean model...\n") preds = np.argmax(adv_classifier.predict(x_test_adv), axis=1) acc = np.sum(preds == non_encoded_test_labels) / non_encoded_test_labels.shape[0] logger.info("Classifier before adversarial training") logger.info("Accuracy on adversarial samples: %.2f%%", (acc * 100)) logger.info("="*50)
def main(): # SETTING UP DEFENCE GAN TRAINED MODELS # * Clone the defence gan gitrepo https://github.com/yogeshbalaji/InvGAN # * Follow the setup instructions and copy the following: # * data/ to adversarial-robustness-toolbox/defence_gan/data/ # * output/gans/mnist to adversarial-robustness-toolbox/defence_gan/output/gans/mnist # * output/gans_inv_nottrain/mnist to adversarial-robustness-toolbox/defence_gan/output/gans_inv_nottrain/mnist # STEP 0 logging.info("Loading a Dataset") (_, _), (x_test_original, y_test_original), min_pixel_value, max_pixel_value = load_mnist() # TODO remove before PR request # batch_size = x_test_original.shape[0] batch_size = 1000 (x_test, y_test) = (x_test_original[:batch_size], y_test_original[:batch_size]) # STEP 1 logging.info("Creating a TS1 Mnist Classifier") classifier = create_ts1_art_mnist_classifier(min_pixel_value, max_pixel_value) classifier.fit(x_test, y_test, batch_size=batch_size, nb_epochs=3) # Code to load the original defense_gan paper mnist classifier to reproduce paper results # classifier_paper = create_defense_gan_paper_mnist_art_classifier() # STEP 2 logging.info("Evaluate the ART classifier on non adversarial examples") predictions = classifier.predict(x_test) accuracy_non_adv = get_accuracy(predictions, y_test) # STEP 3 logging.info("Generate adversarial examples") attack = FastGradientMethod(classifier, eps=0.2) x_test_adv = attack.generate(x=x_test) # STEP 4 logging.info("Evaluate the classifier on the adversarial examples") predictions = classifier.predict(x_test_adv) accuracy_adv = get_accuracy(predictions, y_test) # STEP 5 logging.info("Create DefenceGAN") encoder = create_ts1_encoder_model(batch_size) generator = create_ts1_generator_model(batch_size) inverse_gan = InverseGAN(sess=generator._sess, gan=generator, inverse_gan=encoder) # defense_gan = DefenseGAN(sess=generator.sess, # generator=generator) logging.info("Generating Defended Samples") x_test_defended = inverse_gan(x_test_adv, maxiter=1) # STEP 6 logging.info("Evaluate the classifier on the defended examples") predictions = classifier.predict(x_test_defended) accuracy_defended = get_accuracy(predictions, y_test) logger.info( "Accuracy on non adversarial examples: {}%".format(accuracy_non_adv)) logger.info("Accuracy on adversarial examples: {}%".format(accuracy_adv)) logger.info("Accuracy on defended examples: {}%".format(accuracy_defended))
epoch, batch_idx * len(adv_batch), len(train_data_loader.dataset), 100. * batch_idx / len(train_data_loader), loss.item() / len(adv_batch))) print('====> Epoch: {} Average loss: {:.4f}'.format( epoch, train_loss / len(train_data_loader.dataset))) batch_size = 128 nb_epochs = 10 # load data x_train, y_train, x_test, y_test = load_mnist() x_train = x_train.transpose(0, 3, 1, 2) x_train = x_train[0:10000] # temporary y_train = y_train[0:10000] # temporary # generate adversarial images attack = FastGradientMethod(classifier, eps=0.1) x_train_adv = attack.generate(x_train) ## generate train loader x_train = torch.tensor(x_train).type(torch.FloatTensor) x_train_adv = torch.tensor(x_train_adv).type(torch.FloatTensor) y_train = torch.tensor(y_train).type(torch.FloatTensor) train_data = torch.utils.data.TensorDataset(x_train_adv, x_train, y_train) train_data_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) device = torch.device("cpu") model = PuVAE(x_train.shape[1:], y_train.shape[1]).to(device) optimizer = optim.Adam(model.parameters(), lr=1e-3) if __name__ == "__main__": for epoch in range(1, nb_epochs + 1):
def plot_attacks_acc(classifier, x, y, path_fig, dataset, title): ''' Description: This function takes in a classifier model and a list of images with labels and creates a plot showing how the accuracy of model on the dataset decreases as attack strength (perturbation size) increases for 3 different attacks (FGSM, PGD, BIM). :param classifier: model to be evaluated :param x: list of images to be predicted on :param y: labels of images :param path_fig: path to save the plot figure :param dataset: name of dataset (e.g. mnist, cifar, ddsm, brain_mri, lidc) :param title: title to define plot figure :return: Figure will be saved with title ''' if dataset == 'ddsm': eps_range = [0.00001, 0.00005, 0.0001, 0.00025, 0.0005, 0.00075, 0.001, 0.00125, 0.0015, 0.00175, 0.002, 0.0025, 0.003, 0.0035, 0.004, 0.0045, 0.005, 0.0055, 0.006, 0.007, 0.008] step_size = 0.001 elif dataset == 'brain_mri': eps_range = [0.0001, 0.0005, 0.001, 0.0013, 0.0016, 0.002, 0.00225, 0.0025, 0.00275, 0.003, 0.00325, 0.0035, 0.00375, 0.004, 0.0045, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.012] step_size = 0.001 elif dataset == 'mnist': eps_range = [0.0001, 0.01, 0.02, 0.05, 0.075, 0.1, 0.125, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5] step_size = 0.1 elif dataset == 'cifar': eps_range = [0.0001, 0.001, 0.002, 0.003, 0.004, 0.005, 0.007, 0.009, 0.01, 0.015, 0.02, 0.03, 0.04, 0.05] step_size = 0.01 elif dataset == 'lidc': eps_range = [0.0001, 0.0003, 0.0006, 0.0008, 0.001, 0.00125, 0.0015, 0.00175, 0.002, 0.0023, 0.0026, 0.0028, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.011, 0.012, 0.013, 0.014, 0.015, 0.016, 0.017, 0.018, 0.019, 0.02] step_size = 0.001 nb_correct_fgsm = [] nb_correct_pgd = [] nb_correct_bim = [] for eps in eps_range: attacker_fgsm = FastGradientMethod(classifier, eps=eps) attacker_pgd = ProjectedGradientDescent(classifier, eps=eps, eps_step=eps/4, max_iter=10, num_random_init=5) attacker_bim = BasicIterativeMethod(classifier, eps=eps, eps_step=eps/10, max_iter=10) x_fgsm = attacker_fgsm.generate(x) x_pgd = attacker_pgd.generate(x) x_bim = attacker_bim.generate(x) x_pred_fgsm = np.argmax(classifier.predict(x_fgsm), axis=1) nb_correct_fgsm += [np.sum(x_pred_fgsm == np.argmax(y, axis=1))] x_pred_pgd = np.argmax(classifier.predict(x_pgd), axis=1) nb_correct_pgd += [np.sum(x_pred_pgd == np.argmax(y, axis=1))] x_pred_bim = np.argmax(classifier.predict(x_bim), axis=1) nb_correct_bim += [np.sum(x_pred_bim == np.argmax(y, axis=1))] fig, ax = plt.subplots() ax.plot(np.array(eps_range) / step_size, 100 * np.array(nb_correct_fgsm) / y.shape[0], 'b--', label='FGSM') ax.plot(np.array(eps_range) / step_size, 100 * np.array(nb_correct_pgd) / y.shape[0], 'r--', label='PGD') ax.plot(np.array(eps_range) / step_size, 100 * np.array(nb_correct_bim) / y.shape[0], 'g--', label='BIM') legend = ax.legend(loc='upper right', shadow=True, fontsize='large') legend.get_frame().set_facecolor('#FFFFFF') if dataset == 'mnist': plt.xlabel('Perturbation (x ' + '$10^{-1}$' + ')') elif dataset == 'cifar': plt.xlabel('Perturbation (x ' + '$10^{-2}$' + ')') else: plt.xlabel('Perturbation (x ' + '$10^{-3}$' + ')') plt.ylabel('Accuracy (%)') plt.savefig(path_fig + dataset + '/' + title + '.png') plt.clf() data = [np.array(eps_range), np.array(nb_correct_fgsm) / y.shape[0], np.array(nb_correct_pgd) / y.shape[0], np.array(nb_correct_bim) / y.shape[0]] out = csv.writer(open(path_csv + dataset + '/' + title + '.csv', "w"), delimiter=',', quoting=csv.QUOTE_ALL) out.writerows(zip(*data)) return 0
def _generate(self, x): attack = FastGradientMethod(self.classifier, **self._params) return attack.generate(x)
classifier = TensorFlowV2Classifier( model=model, loss_object=loss_object, train_step=train_step, nb_classes=10, input_shape=(28, 28, 1), clip_values=(0, 1), ) # Step 4: Train the ART classifier classifier.fit(x_train, y_train, batch_size=64, nb_epochs=3) # Step 5: Evaluate the ART classifier on benign test examples predictions = classifier.predict(x_test) accuracy = np.sum( np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test) print("Accuracy on benign test examples: {}%".format(accuracy * 100)) # Step 6: Generate adversarial test examples attack = FastGradientMethod(estimator=classifier, eps=0.2) x_test_adv = attack.generate(x=x_test) # Step 7: Evaluate the ART classifier on adversarial test examples predictions = classifier.predict(x_test_adv) accuracy = np.sum( np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test) print("Accuracy on adversarial test examples: {}%".format(accuracy * 100))
def main(args): batch_status_message = {'status':'Ready','modelurl':args.model} batch_count = 0 model_filename = 'base_model.h5' logging.info('model={}'.format(args.model)) location = os.path.join(ART_DATA_PATH, model_filename) try: os.remove(location) except OSError as error: pass path = get_file(model_filename, extract=False, path=ART_DATA_PATH, url=args.model) kmodel = load_model(path) model = KerasClassifier(kmodel, use_logits=False, clip_values=[float(args.min),float(args.max)]) logging.info('finished acquiring model') logging.info('creating attack {}'.format(args.attack)) if args.attack == 'FGM': attack = FastGradientMethod(model, eps=0.3, eps_step=0.01, targeted=False) logging.info('created FGM attack') elif args.attack == 'PGD': attack = ProjectedGradientDescent(model, eps=8, eps_step=2, max_iter=13, targeted=False, num_random_init=True) logging.info('created PGD attack') else: logging.error('Invalid attack provided {} must be one of {FGM, PGD}'.format(args.attack)) exit(0) logging.info('finished creating attack') logging.info('brokers={}'.format(args.brokers)) logging.info('readtopic={}'.format(args.readtopic)) logging.info('creating kafka consumer') consumer = KafkaConsumer( args.readtopic, bootstrap_servers=args.brokers, value_deserializer=lambda val: loads(val.decode('utf-8'))) logging.info("finished creating kafka consumer") if args.dbxtoken != '': dbx = dropbox.Dropbox(args.dbxtoken) logging.info('creating kafka producer') producer = KafkaProducer(bootstrap_servers=args.brokers, value_serializer=lambda x: dumps(x).encode('utf-8')) logging.info('finished creating kafka producer') else: dbx = None while True: for message in consumer: if message.value['url']: conn = psycopg2.connect( host = args.dbhost, port = 5432, dbname = args.dbname, user = args.dbusername, password = args.dbpassword) cur = conn.cursor() image_url = message.value['url'] query = 'UPDATE images SET STATUS=%s where URL=%s' cur.execute(query, ('Processed', image_url)) logging.info('updated database for {}'.format(image_url)) cur.close() conn.close() batch_count = batch_count+1 response = requests.get(image_url) img = Image.open(BytesIO(response.content)) label = message.value['label'] infilename = message.value['filename'].rpartition('.')[0] logging.info('received URL {}'.format(image_url)) logging.info('received label {}'.format(label)) logging.info('received filename {}'.format(infilename)) logging.info('downloading image') image = np.array(img.getdata()).reshape(1,img.size[0], img.size[1], 3).astype('float32') logging.info('downloaded image {} and {}'.format(image.shape,image.dtype)) images = np.ndarray(shape=(2,32,32,3)) logging.info('created images storage') images[0] = image logging.info('assigned image to images') adversarial = attack.generate(image) logging.info('adversarial image generated') images[1] = adversarial logging.info('adversarial image assigned') preds = model.predict(images) orig_inf = np.argmax(preds[0]) adv_inf = np.argmax(preds[1]) logging.info('original inference: {} adversarial inference: {}'.format(orig_inf, adv_inf)) if (orig_inf != adv_inf) and (dbx != None): fs=BytesIO() imout=Image.fromarray(np.uint8(adversarial[0])) imout.save(fs, format='jpeg') outfilename = '/images/{}_{}_adv.jpg'.format(infilename,adv_inf) logging.info('Uploading file') dbx.files_upload(f=fs.getvalue(), path=outfilename,mode=dropbox.files.WriteMode('overwrite', None)) if (batch_count == int(args.batchsize)) and (dbx != None): logging.info('Sending message {} to topic {}'.format(batch_status_message,args.writetopic)) producer.send(args.writetopic,batch_status_message) batch_count=0
y_test = y_test[0:1000] # load mnist CNN model in Keras logger.info('MNIST Dataset') # Mnist model x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1)) y = tf.placeholder(tf.float32, shape=(None, 10)) mnist_model, logits = mnist_model(input_ph=x, logits=True) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) mnist_model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) mnist_model.load_weights("trained_model/mnist_model.h5") classifier = KerasClassifier(model=mnist_model) # generate adversarial images using FGSM attack = FastGradientMethod(classifier, eps=0.13) X_adv = attack.generate(x_test) X_adv = np.clip(X_adv, 0, 1) # accuracy preds_x_test = np.argmax(classifier.predict(x_test), axis=1) acc = np.sum(preds_x_test == np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Accuracy on clean test images: %.2f%%', (acc * 100)) # fooling rate probs_X_adv = classifier.predict(X_adv) preds_X_adv = np.argmax(probs_X_adv, axis=1) fooling_rate = np.sum(preds_X_adv != np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info('Fooling rate of FGSM attacks: %.2f%%', (fooling_rate * 100)) # clip def norm(x): return np.clip(x, 0, 1)
continue classifier = load_classifier_ensemble(models_to_ensemble, data=data) else: raise ValueError('incorrect ensemble_inner arg') # create attack if args.attack_name == 'FGM': attack = FastGradientMethod(estimator=classifier, targeted=False, norm=args.norm, eps=args.norm_inner, num_random_init=args.n_random_init_inner, batch_size=args.batch_size) elif args.attack_name == 'PGD': attack = ProjectedGradientDescent(estimator=classifier, targeted=False, max_iter=args.n_iter_attack, norm=args.norm, eps=args.norm_inner, eps_step=args.norm_inner / 4, # TODO: tune? num_random_init=args.n_random_init_inner, batch_size=args.batch_size) else: raise NotImplementedError('attack-name not supported') X_adv_tmp = attack.generate(x=X_adv_tmp, y=y) # project on ball of max_norm size, and clip X_adv_tmp = X + projection(X_adv_tmp - X, eps=args.max_norm, norm_p=args.norm) # project on the ball X_adv_tmp = np.clip(X_adv_tmp, data.min_pixel_value, data.max_pixel_value) # print and save stats acc_ens_prob, acc_ens_logit = compute_accuracy_ensemble(models_dir=args.dir_models, X=X_adv_tmp, y=y, data=data) lpnorm = compute_norm(X_adv=X_adv_tmp, X=X, norm=args.norm) if USE_CUDA: torch.cuda.synchronize() end_time = time.perf_counter() print( f"Outer iteration: {i + 1}/{args.n_iter_outer}, Accuracy ens (prob): {acc_ens_prob * 100:.3f}%, Accuracy ens " f"(logit): {acc_ens_logit * 100:.3f}%, L{args.norm}-norm: mean {lpnorm.mean():.5f} (min {lpnorm.min():.5f} max {lpnorm.max():.5f})" f", Time: {(end_time - start_time) / 60:.3f} min") df_metrics = df_metrics.append(
test_dataset = Dataset(raw_test_data[0], raw_test_data[1]) distilled_test_set = Dataset(raw_test_data[0], convert_to_one_hot(raw_test_data[1], 10)) art_classifier = KerasClassifier(model=classifier, clip_values=(0, 1), use_logits=False) art_classifier_distilled = KerasClassifier(model=distilled_classifier, clip_values=(0, 1), use_logits=False) # Create adversarial data sets with low epsilon fast_gradient_method_low_epsil = FastGradientMethod( estimator=art_classifier, eps=0.1) fast_gradient_method_high_epsil = FastGradientMethod( estimator=art_classifier, eps=0.2) adversarial_low_epsil = fast_gradient_method_low_epsil.generate( x=test_dataset.images) adversarial_high_epsil = fast_gradient_method_high_epsil.generate( x=test_dataset.images) adversarial_low_dataset = Dataset(adversarial_low_epsil, raw_test_data[1], False) adversarial_high_dataset = Dataset(adversarial_high_epsil, raw_test_data[1], False) # Create adversarial data sets with high epsilon fast_gradient_method_low_epsil_distilled = FastGradientMethod( estimator=art_classifier_distilled, eps=0.1) fast_gradient_method_high_epsil_distilled = FastGradientMethod( estimator=art_classifier_distilled, eps=0.3) adversarial_low_epsil_distilled = fast_gradient_method_low_epsil_distilled.generate( x=test_dataset.images) adversarial_high_epsil_distilled = fast_gradient_method_high_epsil_distilled.generate(
def adversarial_generation( model: Architecture, x, y, epsilon=0.25, attack_type=AttackType.FGSM, num_iter=10, attack_backend: str = AttackBackend.FOOLBOX, ): """ Create an adversarial example (FGMS only for now) """ x.requires_grad = True logger.info(f"Generating for x (shape={x.shape}) and y (shape={y.shape})") if attack_backend == AttackBackend.ART: from art.attacks.evasion import ( FastGradientMethod, ProjectedGradientDescent, DeepFool as DeepFoolArt, CarliniL2Method, HopSkipJump, ) if attack_type == AttackType.FGSM: attacker = FastGradientMethod(estimator=model.art_classifier, eps=epsilon) elif attack_type == AttackType.PGD: attacker = ProjectedGradientDescent( estimator=model.art_classifier, max_iter=num_iter, eps=epsilon, eps_step=2 * epsilon / num_iter, ) elif attack_type == AttackType.DeepFool: attacker = DeepFoolArt(classifier=model.art_classifier, max_iter=num_iter) elif attack_type == "CW": attacker = CarliniL2Method( classifier=model.art_classifier, max_iter=num_iter, binary_search_steps=15, ) elif attack_type == AttackType.SQUARE: # attacker = SquareAttack(estimator=model.get_art_classifier()) raise NotImplementedError("Work in progress") elif attack_type == AttackType.HOPSKIPJUMP: attacker = HopSkipJump( classifier=model.art_classifier, targeted=False, max_eval=100, max_iter=10, init_eval=10, ) else: raise NotImplementedError(f"{attack_type} is not available in ART") attacked = attacker.generate(x=x.detach().cpu()) attacked = torch.from_numpy(attacked).to(device) elif attack_backend == AttackBackend.FOOLBOX: import foolbox as fb if model.name in ["efficientnet", "resnet32", "resnet44", "resnet56"]: model.set_default_forward_mode(None) else: model.set_default_forward_mode("presoft") if attack_type == AttackType.FGSM: attacker = fb.attacks.LinfFastGradientAttack() elif attack_type == AttackType.PGD: attacker = fb.attacks.LinfProjectedGradientDescentAttack( steps=num_iter, random_start=False, rel_stepsize=2 / num_iter) elif attack_type == AttackType.DeepFool: attacker = fb.attacks.LinfDeepFoolAttack(loss="crossentropy") elif attack_type == AttackType.CW: attacker = fb.attacks.L2CarliniWagnerAttack(steps=num_iter) elif attack_type == AttackType.BOUNDARY: attacker = fb.attacks.BoundaryAttack(steps=7000, spherical_step=0.01, source_step=0.01) x = x.float() else: raise NotImplementedError( f"{attack_type} is not available in Foolbox") attacked, _, _ = attacker( model.foolbox_classifier, x.detach(), torch.from_numpy(y).to(device), epsilons=epsilon, ) model.set_default_forward_mode(None) elif attack_backend == AttackBackend.CUSTOM: from tda.dataset.custom_attacks import FGSM, BIM, DeepFool, CW if attack_type == AttackType.FGSM: attacker = FGSM(model, ce_loss) attacked = attacker.run(data=x.detach(), target=torch.from_numpy(y).to(device), epsilon=epsilon) elif attack_type == AttackType.PGD: attacker = BIM(model, ce_loss, lims=(0, 1), num_iter=num_iter) attacked = attacker.run(data=x.detach(), target=torch.from_numpy(y).to(device), epsilon=epsilon) elif attack_type == AttackType.DeepFool: attacker = DeepFool(model, num_classes=10, num_iter=num_iter) attacked = [ attacker(x[i].detach(), torch.tensor(y[i]).to(device)) for i in range(len(x)) ] attacked = torch.cat([torch.unsqueeze(a, 0) for a in attacked], 0) elif attack_type == AttackType.CW: attacker = CW(model, lims=(0, 1), num_iter=num_iter) attacked = attacker.run(data=x.detach(), target=torch.from_numpy(y).to(device)) attacked = torch.cat([torch.unsqueeze(a, 0) for a in attacked], 0) else: raise NotImplementedError( f"{attack_type} is not available as custom implementation") else: raise NotImplementedError(f"Unknown backend {attack_backend}") return attacked.detach().double()
def main(): # Create ART object detector frcnn = PyTorchFasterRCNN(clip_values=(0, 255), attack_losses=[ "loss_classifier", "loss_box_reg", "loss_objectness", "loss_rpn_box_reg" ]) # Load image 1 image_0 = cv2.imread("./10best-cars-group-cropped-1542126037.jpg") image_0 = cv2.cvtColor(image_0, cv2.COLOR_BGR2RGB) # Convert to RGB print("image_0.shape:", image_0.shape) # Load image 2 image_1 = cv2.imread("./banner-diverse-group-of-people-2.jpg") image_1 = cv2.cvtColor(image_1, cv2.COLOR_BGR2RGB) # Convert to RGB image_1 = cv2.resize(image_1, dsize=(image_0.shape[1], image_0.shape[0]), interpolation=cv2.INTER_CUBIC) print("image_1.shape:", image_1.shape) # Stack images image = np.stack([image_0, image_1], axis=0).astype(np.float32) print("image.shape:", image.shape) for i in range(image.shape[0]): plt.axis("off") plt.title("image {}".format(i)) plt.imshow(image[i].astype(np.uint8), interpolation="nearest") plt.show() # Make prediction on benign samples predictions = frcnn.predict(x=image) for i in range(image.shape[0]): print("\nPredictions image {}:".format(i)) # Process predictions predictions_class, predictions_boxes, predictions_class = extract_predictions( predictions[i]) # Plot predictions plot_image_with_boxes(img=image[i].copy(), boxes=predictions_boxes, pred_cls=predictions_class) # Create and run attack eps = 32 attack = FastGradientMethod(estimator=frcnn, eps=eps) image_adv = attack.generate(x=image, y=None) print("\nThe attack budget eps is {}".format(eps)) print("The resulting maximal difference in pixel values is {}.".format( np.amax(np.abs(image - image_adv)))) assert np.amax(np.abs(image - image_adv)) == eps for i in range(image_adv.shape[0]): plt.axis("off") plt.title("image_adv {}".format(i)) plt.imshow(image_adv[i].astype(np.uint8), interpolation="nearest") plt.show() predictions_adv = frcnn.predict(x=image_adv) for i in range(image.shape[0]): print("\nPredictions adversarial image {}:".format(i)) # Process predictions predictions_adv_class, predictions_adv_boxes, predictions_adv_class = extract_predictions( predictions_adv[i]) # Plot predictions plot_image_with_boxes(img=image_adv[i].copy(), boxes=predictions_adv_boxes, pred_cls=predictions_adv_class)
def test_fgsm(adv_model, dataset, loss_fn, optimizer, batch_size=32, num_workers=20, device='cuda:0', attack='fgsm', **kwargs): """ Train the model with the given training data :param x: :param y: :param epochs: """ epsilons =[0.00001, 0.0001, 0.004, 0.01, 0.1, 1, 10, 100] label_dict = pkl.load(open('external/speaker2int_7323.pkl','rb')) extractor = mfcc_extractor(collate=False) adv_classifier = PyTorchClassifier(model=AdvModel(adv_model.cpu(), extractor.cpu()), loss=loss_fn, optimizer=optimizer, input_shape=[1, 32000], nb_classes=250) # Create Dataloader dataloader = DataLoader(dataset=dataset['eval'], batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn=PadBatch()) n_iterations = len(dataloader) f_log_all, f_name_all = createLogFiles('all') with open(f_name_all, 'a+') as f_log_all: f_log_all.write("\n\n #################################### Begin #####################################") f_log_all.write("\n New Log: {}".format(datetime.now())) # Loop over all the training data for generator n_files = 0 accuracy = 0 adv_acc_eps = {e: 0.0 for e in epsilons} success_eps = {e: 0.0 for e in epsilons} for i, (X, y, f) in enumerate(dataloader): if label_dict: y = torch.LongTensor([label_dict[y_] for y_ in y]) # send data to the GPU y = y.to(device) x_mfccs, labels = extractor((X.to(device).transpose(1,2))), y clean_logits = adv_model.forward(x_mfccs) clean_class = clean_logits.argmax(dim=-1) n_files += len(X) tmp_accuracy = torch.sum(clean_class == y).detach().cpu() accuracy += tmp_accuracy # Epsilon loop for e in epsilons: # FGSM if attack == 'fgsm': attack = FastGradientMethod(estimator=adv_classifier, eps=e) elif attack == 'bim': attack = ProjectedGradientDescent(estimator=adv_classifier, eps=e, eps_step=e/5, max_iter=100) X_fgsm = torch.Tensor(attack.generate(x=X)).to(device) assert(len(X_fgsm) == len(X)) pred_mfccs, labels_preds = extractor(X_fgsm.transpose(1,2)), y adv_logits = adv_model.forward(pred_mfccs) adv_class = adv_logits.argmax(dim=-1) tmp_success = torch.sum(clean_class != adv_class).detach().cpu() tmp_adv_acc = torch.sum(y == adv_class).detach().cpu() success_eps[e] += tmp_success adv_acc_eps[e] += tmp_adv_acc # Update total loss and acc with open(f_name_all, 'a+') as f_log_all: f_log_all.write('File {}\tBatch {}\tEps {}\tTarg {}\tClean {}\tAdv {}\n'.format( f[0][-1], i+1, e, y.cpu().detach().numpy(), clean_class.cpu().detach().numpy(), adv_class.cpu().detach().numpy())) for wav, fi in zip(X_fgsm, f): adv_path="samples/fgsm/{}".format(fi[-2]) if not os.path.exists(adv_path): os.makedirs(adv_path) torchaudio.save("{}/{}_{}.wav".format(adv_path,fi[-1], e), wav.squeeze().detach().cpu(), 8000) print("Epsilon: {}".format(e), "Tmp Acc: {:.3f}".format((tmp_accuracy + 0.0) / len(X)), "Tmp Adv: {:.3f}".format((tmp_adv_acc + 0.0) / len(X)), "Tmp Suc: {:.3f}".format((tmp_success + 0.0) / len(X))) accuracy = (accuracy + 0.0) / n_files adv_acc_eps = {k : v / n_files for k, v in adv_acc_eps.items()} success_eps = {k : v / n_files for k, v in success_eps.items()} with open(f_name_all, 'a+') as f_log_all: f_log_all.write('Epsilons: {} - Accuracy: {}%\tAdv Accuracy: {}%\tSuccess rate: {}%\n'.format(e, accuracy, adv_acc_eps, success_eps)) return
input_shape=x_train.shape[1:])) model.add(Conv2D(64, (3, 3), activation="relu")) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) classifier = KerasClassifier(model=model, clip_values=(min_, max_)) classifier.fit(x_train, y_train, nb_epochs=5, batch_size=128) # Evaluate the classifier on the test set preds = np.argmax(classifier.predict(x_test), axis=1) acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0] print("\nTest accuracy: %.2f%%" % (acc * 100)) # Craft adversarial samples with FGSM epsilon = 0.1 # Maximum perturbation adv_crafter = FastGradientMethod(classifier, eps=epsilon) x_test_adv = adv_crafter.generate(x=x_test) # Evaluate the classifier on the adversarial examples preds = np.argmax(classifier.predict(x_test_adv), axis=1) acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0] print("\nTest accuracy on adversarial sample: %.2f%%" % (acc * 100))
#auto pgd # auto_adv_crafter_untargeted = AutoProjectedGradientDescent(classifier, eps=args.eps, eps_step=args.eps_step, max_iter=args.max_iter) # print("AutoPGD:Craft attack on untargeted training examples") # x_test_auto_adv = auto_adv_crafter_untargeted.generate(x_test) # auto_adv_crafter_targeted = AutoProjectedGradientDescent(classifier, targeted=True, eps=args.eps_step, eps_step=args.eps_step, max_iter=args.max_iter) # print("AutoPGD:Craft attack on targeted training examples") # targets = random_targets(y_test, nb_classes=10) # x_test_auto_adv_targeted = auto_adv_crafter_targeted.generate(x_test, **{"y":targets}) #fgm fgm_adv_crafter_untargeted = FastGradientMethod(classifier, eps=args.eps, eps_step=args.eps_step) print("FGM:Craft attack on untargeted training examples") x_test_fgm_adv = fgm_adv_crafter_untargeted.generate(x_test) fgm_adv_crafter_targeted = FastGradientMethod(classifier, targeted=True, eps=args.eps_step, eps_step=args.eps_step) print("FGM:Craft attack on targeted training examples") targets = random_targets(y_test, nb_classes=10) x_test_fgm_adv_targeted = fgm_adv_crafter_targeted.generate( x_test, **{"y": targets}) # deepfool takes ~8 second for each adversarial image # Step 6: Evaluate the ART classifier on adversarial test examples # pp(x_test_adv.shape)