def main(argv=None): datasets = FLAGS.datasets if 'mnist' == datasets: train_start = 0 train_end = 60000 test_start = 0 test_end = 10000 # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end) sample = X_test[0:1000] elif 'cifar10' == datasets: preprocess_image = preprocess_image_1 train_start = 0 train_end = 50000 test_start = 0 test_end = 10000 # Get CIFAR10 test data X_train, Y_train, fn_train, X_test, Y_test, fn_test = data_cifar10( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) sample = X_test[198:199] imsave(FLAGS.sample, deprocess_image_1(sample)) elif 'svhn' == datasets: # choose the method of preprocess image preprocess_image = preprocess_image_1 train_start = 0 train_end = 73257 test_start = 0 test_end = 26032 # Get SVHN test data X_train, Y_train, X_test, Y_test = data_svhn( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) sample = X_test[198:199] imsave(FLAGS.sample, deprocess_image_1(sample)) store_path = '../datasets/experiment/mnist/fgsm/test' fgsm(datasets=datasets, sample=sample, model_name=FLAGS.model_name, store_path=store_path, step_size=FLAGS.step_size, epoch=FLAGS.epoch)
def main(argv=None): datasets = FLAGS.datasets if 'mnist' == datasets: train_start = 0 train_end = 60000 test_start = 0 test_end = 10000 # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end) sample = X_test[0:1] imsave(FLAGS.sample, deprocess_image_1(sample)) elif 'cifar10' == datasets: preprocess_image = preprocess_image_1 train_start = 0 train_end = 50000 test_start = 0 test_end = 10000 # Get CIFAR10 test data X_train, Y_train, X_test, Y_test = data_cifar10( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) sample = X_test[198:199] imsave(FLAGS.sample, deprocess_image_1(sample)) elif 'svhn' == datasets: # choose the method of preprocess image preprocess_image = preprocess_image_1 train_start = 0 train_end = 73257 test_start = 0 test_end = 26032 # Get SVHN test data X_train, Y_train, X_test, Y_test = data_svhn( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) sample = X_test[198:199] imsave(FLAGS.sample, deprocess_image_1(sample)) jsma(datasets=FLAGS.datasets, sample_path=FLAGS.sample, model_name=FLAGS.model, target=FLAGS.target, store_path=FLAGS.store_path)
def psd(datasets, model, samples_path, n): """ :param datasets :param model :param samples_path :param n :return: """ con = 5 X_train, Y_train, X_test, Y_test = get_data(datasets) X_test = [deprocess_image_1(np.asarray([image])) for image in X_test] [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) # if datasets=='cifar10': # image_list = [(img*255).reshape(img.shape[0], img.shape[1], img.shape[2]) for img in image_list] # else: # image_list = [(img*255).reshape(img.shape[0], img.shape[1]) for img in image_list] if datasets == 'cifar10': image_list = [ deprocess_image_1(np.asarray([img ])).reshape(img.shape[0], img.shape[1], img.shape[2]) for img in image_list ] else: image_list = [ deprocess_image_1(np.asarray([img ])).reshape(img.shape[0], img.shape[1]) for img in image_list ] result = 0.0 for i in range(len(image_list)): index = int(image_files[i].split('_')[-4]) adv = np.asarray(image_list[i]) ori = np.asarray(X_test[index]) if datasets == 'cifar10': ori = ori.reshape(ori.shape[0], ori.shape[1], ori.shape[2]) else: ori = ori.reshape(ori.shape[1], ori.shape[2]) #result = result + distance(adv, ori, (n - 1) / 2) result = result + distance(adv, ori, n, con) print('average perturbation sensitivity distance is %.4f' % (result / len(image_list))) #print('average perturbation sensitivity distance is %.4f' % (result/100)) return result / len(image_list)
def ass(datasets, model, samples_path): """ :param datasets :param model :param samples_path :return: """ tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) X_test = [deprocess_image_1(np.asarray([image])) for image in X_test] [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) if datasets == 'cifar10': image_list = [(img * 255).reshape(img.shape[0], img.shape[1], img.shape[2]) for img in image_list] else: image_list = [(img * 255).reshape(img.shape[0], img.shape[1]) for img in image_list] result = 0 for i in range(len(image_list)): index = int(image_files[i].split('_')[-4]) result = result + ssim(np.asarray(image_list[i]), np.asarray(X_test[index])) result = result / len(image_list) print('average structural similarity is %.4f' % (result)) return result
def ald(datasets, model, samples_path, p, epoch=49): """ :param datasets :param model :param samples_path :param p :return: """ tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) X_test = [deprocess_image_1(np.asarray([image])) for image in X_test] [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) if datasets=='cifar10': image_list = [(img*255).reshape(img.shape[0], img.shape[1], img.shape[2]) for img in image_list] else: image_list = [(img*255).reshape(img.shape[0], img.shape[1]) for img in image_list] distortion = 0 for i in range(len(image_list)): index = int(image_files[i].split('_')[-4]) distortion = distortion + distortion_measure(np.asarray(image_list[i]), np.asarray(X_test[index]), p) result = distortion / len(image_list) print('average L-%s distortion is %.4f' % (p, result)) return result
def main(argv=None): datasets = FLAGS.datasets start=FLAGS.start end=FLAGS.end if 'mnist' == datasets: train_start = 0 train_end = 60000 test_start = 0 test_end = 10000 # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end) sample = X_test[start:end] #imsave(FLAGS.sample, deprocess_image_1(sample)) elif 'cifar10' == datasets: preprocess_image = preprocess_image_1 train_start = 0 train_end = 50000 test_start = 0 test_end = 10000 # Get CIFAR10 test data X_train, Y_train, X_test, Y_test = data_cifar10(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) sample = X_train[0:10000] #imsave(FLAGS.sample, deprocess_image_1(sample)) elif 'svhn' == datasets: # choose the method of preprocess image preprocess_image = preprocess_image_1 train_start = 0 train_end = 73257 test_start = 0 test_end = 26032 # Get SVHN test data X_train, Y_train, X_test, Y_test = data_svhn(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) sample = X_test[0:100] imsave(FLAGS.sample, deprocess_image_1(sample)) store_path = 'test' suc,total=cw(datasets=datasets, sample=sample, model_name=FLAGS.model, target=FLAGS.target, store_path=store_path, ini_con=0.1,start=start,end=end) print(suc) print(total)
def main(argv=None): datasets = FLAGS.datasets if 'mnist' == datasets: train_start = 0 train_end = 60000 test_start = 0 test_end = 10000 # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end) sample = X_test[0:10] elif 'cifar10' == datasets: preprocess_image = preprocess_image_1 train_start = 0 train_end = 50000 test_start = 0 test_end = 10000 # Get CIFAR10 test data X_train, Y_train, X_test, Y_test = data_cifar10( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) sample = X_test[0:2] #imsave('a.png', deprocess_image_3(sample)) elif 'svhn' == datasets: # choose the method of preprocess image preprocess_image = preprocess_image_1 train_start = 0 train_end = 73257 test_start = 0 test_end = 26032 # Get SVHN test data X_train, Y_train, X_test, Y_test = data_svhn( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) sample = X_test[198:199] imsave(FLAGS.sample, deprocess_image_1(sample)) store_path = 'test0.03' bim(datasets=datasets, sample=sample, model_name=FLAGS.model, store_path=store_path, step_size='0.03')
def ric(datasets, model, samples_path, quality, epoch=49): """ :param datasets :param model :param samples_path :return: """ # Object used to keep track of (and return) key accuracies sess, preds, x, y, model, feed_dict = model_load(datasets, model, epoch=epoch) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) ori_path = samples_path.replace('test_data', 'ric_ori') if not os.path.exists(ori_path): os.makedirs(ori_path) ic_path = samples_path.replace('test_data', 'ric_ic') if not os.path.exists(ic_path): os.makedirs(ic_path) count = 0 for i in range(len(image_list)): #jj=np.asarray(image_list[i:i+1]) #print(jj.shape) if datasets == 'mnist': adv_img_deprocessed = deprocess_image_1( np.asarray(image_list[i:i + 1]))[0] elif datasets == 'cifar10': adv_img_deprocessed = deprocess_image_1( np.asarray(image_list[i:i + 1])) saved_adv_image_path = os.path.join( ori_path, image_files[i].replace("npy", "png")) imsave(saved_adv_image_path, adv_img_deprocessed) output_IC_path = os.path.join(ic_path, image_files[i].replace("npy", "jpg")) cmd = '../../guetzli/bin/Release/guetzli --quality {} {} {}'.format( quality, saved_adv_image_path, output_IC_path) assert os.system( cmd ) == 0, 'guetzli tool should be install before, https://github.com/google/guetzli' if datasets == 'cifar10': IC_image = Image.open(output_IC_path).convert('RGB') IC_image = np.asarray( [np.array(IC_image).astype('float32') / 255.0]) #IC_image=IC_image.reshape(32, 32, 3) elif datasets == 'mnist': IC_image = Image.open(output_IC_path).convert('L') IC_image = np.expand_dims(np.array(IC_image).astype('float32'), axis=0) / 255.0 IC_image = IC_image.reshape(-1, 28, 28, 1) if model_argmax(sess, x, preds, IC_image, feed=feed_dict) != int( real_labels[i]): count = count + 1 result = 1.0 * count / len(image_list) print('Robustness to image compression is %.4f' % (result)) # Close TF session sess.close() return result
def batch_attack(datasets, attack, model_path, store_path, nb_classes): if 'mnist' == datasets: train_start = 0 train_end = 60000 test_start = 0 test_end = 10000 # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end) elif 'cifar10' == datasets: preprocess_image = preprocess_image_1 train_start = 0 train_end = 50000 test_start = 0 test_end = 10000 # Get CIFAR10 test data X_train, Y_train, fn_train, X_test, Y_test, fn_test = data_cifar10( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) elif 'svhn' == datasets: # choose the method of preprocess image preprocess_image = preprocess_image_1 train_start = 0 train_end = 73257 test_start = 0 test_end = 26032 # Get SVHN test data X_train, Y_train, X_test, Y_test = data_svhn( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) store_path = store_path + attack + '/' + datasets sample_path = '../datasets/integration/batch_attack/' + datasets + '/' sess, preds, x, y, model, feed_dict = model_load(datasets, model_path) if os.listdir(sample_path) == []: for i in range(len(X_test)): sample = X_test[i:i + 1] path = sample_path + str(i) + '.png' imsave(path, deprocess_image_1(sample)) current_img = ndimage.imread(path) img = np.expand_dims( preprocess_image_1(current_img.astype('float64')), 0) p = model_argmax(sess, x, preds, img, feed=feed_dict) if p != Y_test[i].argmax(axis=0): os.remove(path) # for i in range(len(X_test)): # sample = X_test[i:i+1] # if model_argmax(sess, x, preds, sample, feed=feed_dict) == Y_test[i].argmax(axis=0): # path = sample_path + str(i) + '.png' # imsave(path, deprocess_image_1(sample)) sess.close() samples = os.listdir(sample_path) for sample in samples: tf.reset_default_graph() if 'blackbox' == attack: blackbox(datasets=datasets, sample_path=sample_path + sample, model_path=model_path, store_path=store_path, nb_classes=nb_classes) elif 'fgsm' == attack: fgsm(datasets=datasets, sample_path=sample_path + sample, model_path=model_path, store_path=store_path, nb_classes=nb_classes) else: i = int(sample.split('.')[-2]) for j in range(nb_classes): tf.reset_default_graph() if Y_test[i][j] == 0: if 'jsma' == attack: jsma(datasets=datasets, sample_path=sample_path + sample, target=j, model_path=model_path, store_path=store_path, nb_classes=nb_classes) if 'cw' == attack: cw(datasets=datasets, sample_path=sample_path + sample, target=j, model_path=model_path, store_path=store_path, nb_classes=nb_classes)
def prepare_datasets(datasets, model_path, attack_type, sample_path): print('Loading the data and model...') # Load the model sess, preds, x, y, model, feed_dict = model_load(datasets, model_path) # Load the dataset if 'mnist' == datasets: train_start = 0 train_end = 60000 test_start = 0 test_end = 10000 # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end) elif 'cifar10' == datasets: preprocess_image = preprocess_image_1 train_start = 0 train_end = 50000 test_start = 0 test_end = 10000 # Get CIFAR10 test data X_train, Y_train, fn_train, X_test, Y_test, fn_test = data_cifar10( train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, preprocess=preprocess_image) if attack_type == "normal": # Refine the normal, noisy and adversarial sets to only include samples for # which the original version was correctly classified by the model preds_test = np.asarray([]) for i in range(40): preds_test = np.concatenate( (preds_test, model_argmax(sess, x, preds, X_test[i * 250:(i + 1) * 250], feed=feed_dict))) inds_correct = np.asarray( np.where(preds_test == Y_test.argmax(axis=1))[0]) inds_correct = inds_correct[np.random.choice(len(inds_correct), 5000, replace=False)] X_test = X_test[inds_correct] for i in range(4000): imsave( "../datasets/experiment/" + datasets + "/normal/train/" + str(inds_correct[i]) + '_' + str(int(preds_test[inds_correct[i]])) + '_' + str(int(preds_test[inds_correct[i]])) + '_.png', deprocess_image_1(X_test[i:i + 1])) for j in range(1000): imsave( "../datasets/experiment/" + datasets + "/normal/test/" + str(inds_correct[4000 + j]) + '_' + str(int(preds_test[inds_correct[4000 + j]])) + '_' + str(int(preds_test[inds_correct[4000 + j]])) + '_.png', deprocess_image_1(X_test[4000 + j:4001 + j])) elif attack_type == "error": preds_test = np.asarray([]) for i in range(40): preds_test = np.concatenate( (preds_test, model_argmax(sess, x, preds, X_test[i * 250:(i + 1) * 250], feed=feed_dict))) inds_correct = np.asarray( np.where(preds_test != Y_test.argmax(axis=1))[0]) X_test = X_test[inds_correct] num = int(len(X_test) * 0.8) for i in range(num): imsave( "../datasets/experiment/" + datasets + "/error/train/" + str(inds_correct[i]) + '_' + str(int(np.argmax(Y_test[inds_correct[i]]))) + '_' + str(int(preds_test[inds_correct[i]])) + '_.png', deprocess_image_1(X_test[i:i + 1])) for j in range(len(X_test) - num): imsave( "../datasets/experiment/" + datasets + "/error/test/" + str(inds_correct[num + j]) + '_' + str(int(np.argmax(Y_test[inds_correct[num + j]]))) + '_' + str(int(preds_test[inds_correct[num + j]])) + '_.png', deprocess_image_1(X_test[num + j:num + 1 + j])) else: # Check attack type, select adversarial and noisy samples accordingly print('Loading adversarial samples...') # Load adversarial samplesx [X_test_adv, adv_image_files, real_labels, predicted_labels ] = utils.get_data_mutation_test(sample_path + attack_type + '/' + datasets) if len(X_test_adv) > 5000: index = np.asarray(range(len(X_test_adv))) index = index[np.random.choice(len(index), 5000, replace=False)] for i in range(4000): imsave( "../datasets/experiment/" + datasets + "/" + attack_type + "/train/" + adv_image_files[index[i]], X_test_adv[index[i]]) for j in range(1000): imsave( "../datasets/experiment/" + datasets + "/" + attack_type + "/test/" + adv_image_files[index[4000 + j]], X_test_adv[index[4000 + j]]) else: index = np.asarray(range(len(X_test_adv))) np.random.shuffle(index) cut = int(len(X_test_adv) * 0.8) for i in range(len(index)): if i < cut: imsave( "../datasets/experiment/" + datasets + "/" + attack_type + "/train/" + adv_image_files[index[i]], X_test_adv[index[i]]) else: imsave( "../datasets/experiment/" + datasets + "/" + attack_type + "/test/" + adv_image_files[index[i]], X_test_adv[index[i]])
def fgsm(datasets, sample, model_name, epoch, store_path, step_size='0.3', batch_size=256): """ :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) input_shape, nb_classes = get_shape(datasets) sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch) ########################################################################### # Craft adversarial examples using the FGSM approach ########################################################################### # Initialize the Fast Gradient Sign Method (FGSM) attack object and # graph ''' if 'mnist' == datasets: #sample = np.asarray([np.asarray(imread(sample_path)).reshape(28,28,1)]).astype('float32') #sample = preprocess_image_1(sample) print('1') elif 'cifar10' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) elif 'svhn' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) #print(sample.shape) ''' probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict) if sample.shape[0] == 1: current_class = np.argmax(probabilities) else: current_class = np.argmax(probabilities, axis=1) if not os.path.exists(store_path): os.makedirs(store_path) # only for correct: acc_pre_index = [] for i in range(0, sample.shape[0]): if current_class[i] == np.argmax(Y_test[i]): acc_pre_index.append(i) sample_acc = np.zeros(shape=(len(acc_pre_index), input_shape[1], input_shape[2], input_shape[3]), dtype='float32') probabilities_acc = np.zeros(shape=(len(acc_pre_index), nb_classes), dtype='float32') current_class_acc = np.zeros(shape=(len(acc_pre_index)), dtype=int) for i in range(0, len(acc_pre_index)): sample_acc[i] = sample[acc_pre_index[i]] probabilities_acc[i] = probabilities[acc_pre_index[i]] current_class_acc[i] = current_class[acc_pre_index[i]] print('Start generating adv. example') #print(float(step_size)) if 'mnist' == datasets: fgsm_params = {'eps': float(step_size), 'clip_min': 0., 'clip_max': 1.} elif 'cifar10' == datasets: fgsm_params = {'eps': float(step_size), 'clip_min': 0., 'clip_max': 1.} elif 'svhn' == datasets: fgsm_params = {'eps': float(step_size), 'clip_min': 0., 'clip_max': 1.} fgsm = FastGradientMethod(model, sess=sess) adv_x = fgsm.generate(x, **fgsm_params) nb_batches = int(math.ceil(float(sample_acc.shape[0]) / batch_size)) suc = 0 for batch in range(nb_batches): #start, end = batch_indices(batch, sample_acc.shape[0], batch_size) print(batch) start = batch * batch_size end = (batch + 1) * batch_size if end > sample_acc.shape[0]: end = sample_acc.shape[0] adv = sess.run(adv_x, feed_dict={ x: sample_acc[start:end], y: probabilities_acc[start:end] }) adv_img_deprocessed = deprocess_image_1(adv) # Check if success was achieved #probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict) new_class_label = model_argmax( sess, x, preds, adv, feed=feed_dict) # Predicted class of the generated adversary for i in range(0, len(new_class_label)): j = batch * batch_size + i if new_class_label[i] != current_class_acc[j]: suc += 1 path = store_path + '/adv_' + str( time.time() * 1000) + '_' + str( acc_pre_index[j]) + '_' + str( current_class_acc[j]) + '_' + str( new_class_label[i]) + '_.png' # path = store_path + '/' + str(acc_pre_index[j]) + '_' + str(time.time()*1000) + '_' + str(current_class_acc[j]) + '_' + str(new_class_label[i]) + '.png' imsave(path, adv_img_deprocessed[i]) # Close TF session sess.close() return suc, len(acc_pre_index)
def blackbox(datasets, sample, model_name, submodel_name, store_path, step_size=0.3, batch_size=256): """ the black-box attack from arxiv.org/abs/1602.02697 :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ # Simulate the black-box model locally # You could replace this by a remote labeling API for instance print("Preparing the black-box model.") tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) input_shape, nb_classes = get_shape(datasets) sess, bbox_preds, x, y, model, feed_dict = model_load(datasets, model_name) # Train substitute using method from https://arxiv.org/abs/1602.02697 print("Preparing the substitute model.") model_sub, preds_sub = sub_model_load(sess, datasets, submodel_name, model_name) ########################################################################### # Craft adversarial examples using the Blackbox approach ########################################################################### # Initialize the Fast Gradient Sign Method (FGSM) attack object. ''' if 'mnist' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(28,28,1)]).astype('float32') sample = preprocess_image_1(sample) elif 'cifar10' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) elif 'svhn' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) ''' probabilities = model_prediction(sess, x, model(x), sample, feed=feed_dict) if sample.shape[0] == 1: current_class = np.argmax(probabilities) else: current_class = np.argmax(probabilities, axis=1) if not os.path.exists(store_path): os.makedirs(store_path) # only for correct: acc_pre_index = [] for i in range(0, sample.shape[0]): if current_class[i] == np.argmax(Y_test[i]): acc_pre_index.append(i) sample_acc = np.zeros(shape=(len(acc_pre_index), input_shape[1], input_shape[2], input_shape[3]), dtype='float32') probabilities_acc = np.zeros(shape=(len(acc_pre_index), nb_classes), dtype='float32') current_class_acc = np.zeros(shape=(len(acc_pre_index)), dtype=int) for i in range(0, len(acc_pre_index)): sample_acc[i] = sample[acc_pre_index[i]] probabilities_acc[i] = probabilities[acc_pre_index[i]] current_class_acc[i] = current_class[acc_pre_index[i]] if datasets == 'mnist': fgsm_par = { 'eps': step_size, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1. } elif 'cifar10' == datasets: fgsm_par = { 'eps': step_size, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1. } elif 'svhn' == datasets: fgsm_par = { 'eps': step_size, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1. } fgsm = FastGradientMethod(model_sub, sess=sess) # Craft adversarial examples using the substitute x_adv_sub = fgsm.generate(x, **fgsm_par) nb_batches = int(math.ceil(float(sample_acc.shape[0]) / batch_size)) suc = 0 for batch in range(nb_batches): #start, end = batch_indices(batch, sample_acc.shape[0], batch_size) print(batch) start = batch * batch_size end = (batch + 1) * batch_size if end > sample_acc.shape[0]: end = sample_acc.shape[0] adv = sess.run(x_adv_sub, feed_dict={ x: sample_acc[start:end], y: probabilities_acc[start:end] }) adv_img_deprocessed = deprocess_image_1(adv) # Check if success was achieved #probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict) new_class_label = model_argmax( sess, x, model(x), adv, feed=feed_dict) # Predicted class of the generated adversary for i in range(0, len(new_class_label)): j = batch * batch_size + i if new_class_label[i] != current_class_acc[j]: suc += 1 path = store_path + '/' + str(j) + '_' + str( time.time() * 1000) + '_' + str( current_class_acc[j]) + '_' + str( new_class_label[i]) + '.png' imsave(path, adv_img_deprocessed[i]) # Close TF session sess.close() return suc, len(acc_pre_index) '''
def jsma(datasets, sample_path, model_name, target, store_path='../mt_result/integration/jsma/mnist'): """ the Jacobian-based saliency map approach (JSMA) :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ sess, preds, x, y, model, feed_dict = model_load(datasets, model_name) ########################################################################### # Craft adversarial examples using the Jacobian-based saliency map approach ########################################################################### if 'mnist' == datasets: sample = np.asarray( [np.asarray(imread(sample_path)).reshape(28, 28, 1)]).astype('float32') sample = preprocess_image_1(sample) elif 'cifar10' == datasets: sample = np.asarray( [np.asarray(imread(sample_path)).reshape(32, 32, 3)]).astype('float32') sample = preprocess_image_1(sample) elif 'svhn' == datasets: sample = np.asarray( [np.asarray(imread(sample_path)).reshape(32, 32, 3)]).astype('float32') sample = preprocess_image_1(sample) input_shape, nb_classes = get_shape(datasets) current_class = model_argmax(sess, x, preds, sample, feed=feed_dict) if not os.path.exists(store_path): os.makedirs(store_path) if target == current_class: return 'The target is equal to its original class' elif target >= nb_classes or target < 0: return 'The target is out of range' print('Start generating adv. example for target class %i' % target) # Instantiate a SaliencyMapMethod attack object jsma = SaliencyMapMethod(model, back='tf', sess=sess) jsma_params = { 'theta': 1., 'gamma': 0.1, 'clip_min': 0., 'clip_max': 1., 'y_target': None } # This call runs the Jacobian-based saliency map approach one_hot_target = np.zeros((1, nb_classes), dtype=np.float32) one_hot_target[0, target] = 1 jsma_params['y_target'] = one_hot_target adv_x = jsma.generate_np(sample, **jsma_params) # Check if success was achieved new_class_label = model_argmax( sess, x, preds, adv_x, feed=feed_dict) # Predicted class of the generated adversary res = int(new_class_label == target) # Close TF session sess.close() if res == 1: adv_img_deprocessed = deprocess_image_1(adv_x) i = sample_path.split('/')[-1].split('.')[-2] path = store_path + '/adv_' + str( time.time() * 1000) + '_' + i + '_' + str( current_class) + '_' + str(new_class_label) + '_.png' imsave(path, adv_img_deprocessed) print('$$$adv_img{' + path + '}') print('$$$ori_img{' + sample_path + '}')