def pure(datasets='mnist', attack='fgsm', model_name='lenet1'): tf.reset_default_graph() samples_path = '../adv_result/' + datasets + '/' + attack + '/' + model_name + '/pure' if not os.path.isdir(samples_path): os.makedirs(samples_path + '/train') os.makedirs(samples_path + '/test') samples_path_train = '../adv_result/' + datasets + '/' + attack + '/' + model_name + '/train_data' samples_path_test = '../adv_result/' + datasets + '/' + attack + '/' + model_name + '/test_data' sess, preds, x, y, model, feed_dict = model_load(datasets, model_name) [ image_list_train, image_files_train, real_labels_train, predicted_labels_train ] = get_data_file(samples_path_train) [ image_list_test, image_files_test, real_labels_test, predicted_labels_test ] = get_data_file(samples_path_test) #samples_train = np.asarray([preprocess_image_1(image.astype('float64')) for image in image_list_train]) #samples_test = np.asarray([preprocess_image_1(image.astype('float64')) for image in image_list_test]) samples_train = np.asarray(image_list_train) samples_test = np.asarray(image_list_test) probabilities_train = model_prediction(sess, x, preds, samples_train, feed=feed_dict) probabilities_test = model_prediction(sess, x, preds, samples_test, feed=feed_dict) for i in range(0, samples_train.shape[0]): if predicted_labels_train[i] == np.argmax(probabilities_train[i]): pure_train = samples_path + '/train/' + image_files_train[i] #imsave(pure_train, image_list_train[i]) np.save(pure_train, image_list_train[i]) for i in range(0, samples_test.shape[0]): if predicted_labels_test[i] == np.argmax(probabilities_test[i]): pure_test = samples_path + '/test/' + image_files_test[i] #imsave(pure_test, image_list_test[i]) np.save(pure_test, image_list_test[i])
def mr(datasets, model_name, attack, va, epoch=49): """ :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) input_shape, nb_classes = get_shape(datasets) sample = X_test sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch) probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict, datasets=datasets) if sample.shape[0] == 1: current_class = np.argmax(probabilities) else: current_class = np.argmax(probabilities, axis=1) # only for correct: acc_pre_index = [] for i in range(0, sample.shape[0]): if current_class[i] == np.argmax(Y_test[i]): acc_pre_index.append(i) print(len(acc_pre_index)) sess.close() total = 0 if attack == 'fgsm': samples_path = '../adv_result/' + datasets + '/' + attack + '/' + model_name + '/' + str( va) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) num = len(image_list) return num / len(acc_pre_index) else: total = 0 for tar in range(0, nb_classes): samples_path = '../adv_result/' + datasets + '/' + attack + '/' + model_name + '/' + str( va) + '_' + str(tar) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) total += len(image_list) return total / len(acc_pre_index)
def get_mc_predictions(sess, x, preds, samples, nb_iter=50): """ TODO :param model: :param X: :param nb_iter: :param batch_size: :return: """ preds_mc = [] for i in tqdm(range(nb_iter)): preds_mc.append(model_prediction(sess, x, preds, samples)) return np.asarray(preds_mc)
def bim(datasets, sample, model_name, store_path, step_size='0.3', batch_size=256, epoch=9): """ :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) input_shape, nb_classes = get_shape(datasets) print(epoch) sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch) ########################################################################### # Craft adversarial examples using the BIM approach ########################################################################### # Initialize the Basic Iterative Method (BIM) attack object and # graph ''' if 'mnist' == datasets: #sample = np.asarray([np.asarray(imread(sample_path)).reshape(28,28,1)]).astype('float32') #sample = preprocess_image_1(sample) print('1') elif 'cifar10' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) elif 'svhn' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) #print(sample.shape) ''' probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict) if sample.shape[0] == 1: current_class = np.argmax(probabilities) else: current_class = np.argmax(probabilities, axis=1) if not os.path.exists(store_path): os.makedirs(store_path) # only for correct: acc_pre_index = [] for i in range(0, sample.shape[0]): if current_class[i] == np.argmax(Y_test[i]): acc_pre_index.append(i) sample_acc = np.zeros(shape=(len(acc_pre_index), input_shape[1], input_shape[2], input_shape[3]), dtype='float32') probabilities_acc = np.zeros(shape=(len(acc_pre_index), nb_classes), dtype='float32') current_class_acc = np.zeros(shape=(len(acc_pre_index)), dtype=int) for i in range(0, len(acc_pre_index)): sample_acc[i] = sample[acc_pre_index[i]] probabilities_acc[i] = probabilities[acc_pre_index[i]] current_class_acc[i] = current_class[acc_pre_index[i]] print('Start generating adv. example') #print(float(step_size)) if 'mnist' == datasets: bim_params = { 'eps': float(step_size), 'eps_iter': float(step_size) / 6, 'clip_min': 0., 'clip_max': 1. } elif 'cifar10' == datasets: bim_params = { 'eps': float(step_size), 'eps_iter': float(step_size) / 6, 'clip_min': 0., 'clip_max': 1. } elif 'svhn' == datasets: bim_params = { 'eps': float(step_size), 'eps_iter': float(step_size) / 6, 'clip_min': 0., 'clip_max': 1. } bim = BasicIterativeMethod(model, sess=sess) adv_x = bim.generate(x, **bim_params) nb_batches = int(math.ceil(float(sample_acc.shape[0]) / batch_size)) suc = 0 for batch in range(nb_batches): #start, end = batch_indices(batch, sample_acc.shape[0], batch_size) print(batch) start = batch * batch_size end = (batch + 1) * batch_size if end > sample_acc.shape[0]: end = sample_acc.shape[0] adv = sess.run(adv_x, feed_dict={ x: sample_acc[start:end], y: probabilities_acc[start:end] }) #adv_img_deprocessed = deprocess_image_1(adv) #adv:float 0-1 numpy.save("filename.npy",a) # Check if success was achieved #probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict) new_class_label = model_argmax( sess, x, preds, adv, feed=feed_dict) # Predicted class of the generated adversary for i in range(0, len(new_class_label)): j = batch * batch_size + i if new_class_label[i] != current_class_acc[j]: suc += 1 path = store_path + '/' + str(acc_pre_index[j]) + '_' + str( time.time() * 1000) + '_' + str( current_class_acc[j]) + '_' + str(new_class_label[i]) np.save(path, adv[i]) # adv_img_deprocessed = deprocess_image_1(adv[i:i+1]) # adv_img_deprocessed=adv_img_deprocessed.reshape(adv_img_deprocessed.shape[1],adv_img_deprocessed.shape[2]) # path = store_path + '/' + str(acc_pre_index[j]) + '_' + str(time.time()*1000) + '_' + str(current_class_acc[j]) + '_' + str(new_class_label[i])+'.png' #print(adv[i].shape) # imsave(path, adv_img_deprocessed) # Close TF session sess.close() return suc, len(acc_pre_index)
def jsma(datasets, sample, model_name, target, store_path, gamma=0.1, start=0, end=10000, batch_size=32, epoch=9, mu=False, mu_var='gf', de=False, attack='fgsm'): """ the Jacobian-based saliency map approach (JSMA) :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) # sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch) sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch, mu=mu, mu_var=mu_var, de=de, attack=attack) ########################################################################### # Craft adversarial examples using the Jacobian-based saliency map approach ########################################################################### ''' if 'mnist' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(28,28,1)]).astype('float32') sample = preprocess_image_1(sample) elif 'cifar10' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) elif 'svhn' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) ''' input_shape, nb_classes = get_shape(datasets) sample = sample[start:end] probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict) current_class = [] for i in range(0, probabilities.shape[0]): current_class.append(np.argmax(probabilities[i])) if not os.path.exists(store_path): os.makedirs(store_path) ''' if target == current_class: return 'The target is equal to its original class' elif target >= nb_classes or target < 0: return 'The target is out of range' ''' #only for correct: Y_test = Y_test[start:end] acc_pre_index = [] for i in range(0, sample.shape[0]): if current_class[i] == np.argmax(Y_test[i]): acc_pre_index.append(i) print('Start generating adv. example for target class %i' % target) sample_acc = np.zeros(shape=(len(acc_pre_index), input_shape[1], input_shape[2], input_shape[3]), dtype='float') current_class_acc = np.zeros(shape=(len(acc_pre_index)), dtype=int) for i in range(0, len(acc_pre_index)): sample_acc[i] = sample[acc_pre_index[i]] current_class_acc[i] = current_class[acc_pre_index[i]] #print('current_class_acc',current_class_acc) # Instantiate a SaliencyMapMethod attack object jsma = SaliencyMapMethod(model, back='tf', sess=sess) jsma_params = { 'theta': 1., 'gamma': gamma, 'clip_min': 0., 'clip_max': 1., 'y_target': None } # This call runs the Jacobian-based saliency map approach one_hot_target = np.zeros((1, nb_classes), dtype=np.float32) one_hot_target[0, target] = 1 jsma_params['y_target'] = one_hot_target suc = 0 nb_batches = int(math.ceil(float(sample_acc.shape[0]) / batch_size)) for batch in range(nb_batches): #print(batch) start_batch = batch * batch_size end_batch = (batch + 1) * batch_size if end_batch > sample_acc.shape[0]: end_batch = sample_acc.shape[0] adv_inputs = sample_acc[start_batch:end_batch] for j in range(start_batch, end_batch): if current_class_acc[j] != target: adv_input = adv_inputs[j - start_batch].reshape( 1, input_shape[1], input_shape[2], input_shape[3]) adv = jsma.generate_np(adv_input, **jsma_params) new_class_labels = model_argmax(sess, x, preds, adv, feed=feed_dict) res = int(new_class_labels == target) if res == 1: adv = adv.reshape(adv.shape[1], adv.shape[2], adv.shape[3]) #adv_img_deprocessed = deprocess_image_1(adv) #adv_img_deprocessed=adv_img_deprocessed.reshape(adv_img_deprocessed.shape[1],adv_img_deprocessed.shape[2]) suc += 1 path = store_path + '/' + str( start + acc_pre_index[j] ) + '_' + str(time.time() * 1000) + '_' + str( current_class_acc[j]) + '_' + str(new_class_labels) #path=store_path + '/' + str(j)+ '_'+ str(current_class_acc[j]) +'.png' #imsave(path, adv_img_deprocessed) np.save(path, adv) #print(adv.shape) # Close TF session sess.close() return suc, len(acc_pre_index)
def choose_mu(attack='fgsm', datasets='mnist', total_num=10000, model_name='lenet1', mu_var='gf'): tf.reset_default_graph() tf.set_random_seed(1234) config = tf.ConfigProto() #config.gpu_options.per_process_gpu_memory_fraction = 0.7 config.gpu_options.allow_growth = True sess = tf.Session(config=config) X_train, Y_train, X_test, Y_test = get_data(datasets) sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, de=False, epoch=9, attack='fgsm', mu=True, mu_var=mu_var) pre = model_prediction(sess, x, preds, X_test, feed=feed_dict, datasets=datasets) acc_pre_index = [] for i in range(0, pre.shape[0]): if np.argmax(pre[i]) == np.argmax(Y_test[i]): acc_pre_index.append(i) input_shape, nb_classes = get_shape(datasets) train_path = '../adv_result/' + datasets + '/' + attack + '/' + model_name store_path_train = '../adv_result/mu_' + datasets + '/' + mu_var + '/' + attack + '/' + model_name + '/train_data' store_path_test = '../adv_result/mu_' + datasets + '/' + mu_var + '/' + attack + '/' + model_name + '/test_data' if not os.path.isdir(store_path_train): os.makedirs(store_path_train) if not os.path.isdir(store_path_test): os.makedirs(store_path_test) if datasets == 'cifar10': if attack == 'fgsm': step_size = [0.01, 0.02, 0.03] for s in range(0, len(step_size)): samples_path = train_path + '/' + str(step_size[s]) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) samples_adv = np.asarray(image_list) result = model_prediction(sess, x, preds, samples_adv, feed=feed_dict, datasets=datasets) ind_file = [] for i in range(len(image_list)): ind_file.append(image_files[i].split('_')[0]) ind = [] for i in range(len(image_list)): nn = int(image_files[i].split('_')[0]) if (nn in acc_pre_index) and (predicted_labels[i] == np.argmax(result[i])): ind.append(image_files[i].split('_')[0]) for i in range(0, int(math.ceil(X_test.shape[0] / 6))): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( step_size[s]) + '_' + image_files[i_index] test_p = store_path_test + '/' + image_files[i_index] np.save(test_p, image_list[i_index]) for i in range(int(math.ceil(X_test.shape[0] / 6)), X_test.shape[0]): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( step_size[s]) + '_' + image_files[i_index] train_p = store_path_train + '/' + image_files[i_index] np.save(train_p, image_list[i_index]) if attack == 'cw': targets = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] cw_ini_cons = [0.1, 0.2, 0.3] for t in range(0, len(targets)): for c in range(0, len(cw_ini_cons)): samples_path = train_path + '/' + str( cw_ini_cons[c]) + '_' + str(targets[t]) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) samples_adv = np.asarray(image_list) result = model_prediction(sess, x, preds, samples_adv, feed=feed_dict, datasets=datasets) ind_file = [] for i in range(len(image_list)): ind_file.append(image_files[i].split('_')[0]) ind = [] for i in range(len(image_list)): nn = int(image_files[i].split('_')[0]) if (nn in acc_pre_index) and (predicted_labels[i] == np.argmax(result[i])): ind.append(image_files[i].split('_')[0]) for i in range(1000 * t, 1000 * t + int(math.ceil(1000 / 6))): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( cw_ini_cons[c]) + '_' + image_files[i_index] test_p = store_path_test + '/' + image_files[ i_index] np.save(test_p, image_list[i_index]) for i in range(1000 * t + int(math.ceil(1000 / 6), 1000 * (t + 1))): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( cw_ini_cons[c]) + '_' + image_files[i_index] train_p = store_path_train + '/' + image_files[ i_index] np.save(train_p, image_list[i_index]) if attack == 'jsma': targets = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] jsma_var = [0.09, 0.1, 0.11] for t in range(0, len(targets)): for c in range(0, len(jsma_var)): samples_path = train_path + '/' + str( jsma_var[c]) + '_' + str(targets[t]) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) samples_adv = np.asarray(image_list) result = model_prediction(sess, x, preds, samples_adv, feed=feed_dict, datasets=datasets) ind_file = [] for i in range(len(image_list)): ind_file.append(image_files[i].split('_')[0]) ind = [] for i in range(len(image_list)): nn = int(image_files[i].split('_')[0]) if (nn in acc_pre_index) and (predicted_labels[i] == np.argmax(result[i])): ind.append(image_files[i].split('_')[0]) for i in range(1000 * t, 1000 * t + int(math.ceil(1000 / 6))): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( jsma_var[c]) + '_' + image_files[i_index] test_p = store_path_test + '/' + image_files[ i_index] np.save(test_p, image_list[i_index]) for i in range(1000 * t + int(math.ceil(1000 / 6)), 1000 * (t + 1)): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( jsma_var[c]) + '_' + image_files[i_index] train_p = store_path_train + '/' + image_files[ i_index] np.save(train_p, image_list[i_index]) if datasets == 'mnist': if attack == 'fgsm': step_size = [0.2, 0.3, 0.4] for s in range(0, len(step_size)): samples_path = train_path + '/' + str(step_size[s]) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) samples_adv = np.asarray(image_list) result = model_prediction(sess, x, preds, samples_adv, feed=feed_dict, datasets=datasets) ind_file = [] for i in range(len(image_list)): ind_file.append(image_files[i].split('_')[0]) ind = [] for i in range(len(image_list)): nn = int(image_files[i].split('_')[0]) if (nn in acc_pre_index) and (predicted_labels[i] == np.argmax(result[i])): ind.append(image_files[i].split('_')[0]) for i in range(0, int(math.ceil(X_test.shape[0] / 7))): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( step_size[s]) + '_' + image_files[i_index] test_p = store_path_test + '/' + image_files[i_index] np.save(test_p, image_list[i_index]) for i in range(int(math.ceil(X_test.shape[0] / 7)), X_test.shape[0]): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( step_size[s]) + '_' + image_files[i_index] train_p = store_path_train + '/' + image_files[i_index] np.save(train_p, image_list[i_index]) if attack == 'cw': targets = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] cw_ini_cons = [9, 10, 11] for t in range(0, len(targets)): for c in range(0, len(cw_ini_cons)): samples_path = train_path + '/' + str( cw_ini_cons[c]) + '_' + str(targets[t]) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) samples_adv = np.asarray(image_list) result = model_prediction(sess, x, preds, samples_adv, feed=feed_dict, datasets=datasets) ind_file = [] for i in range(len(image_list)): ind_file.append(image_files[i].split('_')[0]) ind = [] for i in range(len(image_list)): nn = int(image_files[i].split('_')[0]) if (nn in acc_pre_index) and (predicted_labels[i] == np.argmax(result[i])): ind.append(image_files[i].split('_')[0]) for i in range(1000 * t, 1000 * t + int(math.ceil(1000 / 7))): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( cw_ini_cons[c]) + '_' + image_files[i_index] test_p = store_path_test + '/' + image_files[ i_index] np.save(test_p, image_list[i_index]) for i in range(1000 * t + int(math.ceil(1000 / 7)), 1000 * (t + 1)): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( cw_ini_cons[c]) + '_' + image_files[i_index] train_p = store_path_train + '/' + image_files[ i_index] np.save(train_p, image_list[i_index]) if attack == 'jsma': targets = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] jsma_var = [0.09, 0.1, 0.11] for t in range(0, len(targets)): for c in range(0, len(jsma_var)): samples_path = train_path + '/' + str( jsma_var[c]) + '_' + str(targets[t]) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) samples_adv = np.asarray(image_list) result = model_prediction(sess, x, preds, samples_adv, feed=feed_dict, datasets=datasets) ind_file = [] for i in range(len(image_list)): ind_file.append(image_files[i].split('_')[0]) ind = [] for i in range(len(image_list)): nn = int(image_files[i].split('_')[0]) if (nn in acc_pre_index) and (predicted_labels[i] == np.argmax(result[i])): ind.append(image_files[i].split('_')[0]) for i in range(1000 * t, 1000 * t + int(math.ceil(1000 / 7))): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( jsma_var[c]) + '_' + image_files[i_index] test_p = store_path_test + '/' + image_files[ i_index] np.save(test_p, image_list[i_index]) for i in range(1000 * t + int(math.ceil(1000 / 7)), 1000 * (t + 1)): if str(i) in ind: i_index = ind_file.index(str(i)) image_files[i_index] = str( jsma_var[c]) + '_' + image_files[i_index] train_p = store_path_train + '/' + image_files[ i_index] np.save(train_p, image_list[i_index])
def blackbox(datasets, sample, model_name, submodel_name, store_path, step_size=0.3, batch_size=256): """ the black-box attack from arxiv.org/abs/1602.02697 :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ # Simulate the black-box model locally # You could replace this by a remote labeling API for instance print("Preparing the black-box model.") tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) input_shape, nb_classes = get_shape(datasets) sess, bbox_preds, x, y, model, feed_dict = model_load(datasets, model_name) # Train substitute using method from https://arxiv.org/abs/1602.02697 print("Preparing the substitute model.") model_sub, preds_sub = sub_model_load(sess, datasets, submodel_name, model_name) ########################################################################### # Craft adversarial examples using the Blackbox approach ########################################################################### # Initialize the Fast Gradient Sign Method (FGSM) attack object. ''' if 'mnist' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(28,28,1)]).astype('float32') sample = preprocess_image_1(sample) elif 'cifar10' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) elif 'svhn' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) ''' probabilities = model_prediction(sess, x, model(x), sample, feed=feed_dict) if sample.shape[0] == 1: current_class = np.argmax(probabilities) else: current_class = np.argmax(probabilities, axis=1) if not os.path.exists(store_path): os.makedirs(store_path) # only for correct: acc_pre_index = [] for i in range(0, sample.shape[0]): if current_class[i] == np.argmax(Y_test[i]): acc_pre_index.append(i) sample_acc = np.zeros(shape=(len(acc_pre_index), input_shape[1], input_shape[2], input_shape[3]), dtype='float32') probabilities_acc = np.zeros(shape=(len(acc_pre_index), nb_classes), dtype='float32') current_class_acc = np.zeros(shape=(len(acc_pre_index)), dtype=int) for i in range(0, len(acc_pre_index)): sample_acc[i] = sample[acc_pre_index[i]] probabilities_acc[i] = probabilities[acc_pre_index[i]] current_class_acc[i] = current_class[acc_pre_index[i]] if datasets == 'mnist': fgsm_par = { 'eps': step_size, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1. } elif 'cifar10' == datasets: fgsm_par = { 'eps': step_size, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1. } elif 'svhn' == datasets: fgsm_par = { 'eps': step_size, 'ord': np.inf, 'clip_min': 0., 'clip_max': 1. } fgsm = FastGradientMethod(model_sub, sess=sess) # Craft adversarial examples using the substitute x_adv_sub = fgsm.generate(x, **fgsm_par) nb_batches = int(math.ceil(float(sample_acc.shape[0]) / batch_size)) suc = 0 for batch in range(nb_batches): #start, end = batch_indices(batch, sample_acc.shape[0], batch_size) print(batch) start = batch * batch_size end = (batch + 1) * batch_size if end > sample_acc.shape[0]: end = sample_acc.shape[0] adv = sess.run(x_adv_sub, feed_dict={ x: sample_acc[start:end], y: probabilities_acc[start:end] }) adv_img_deprocessed = deprocess_image_1(adv) # Check if success was achieved #probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict) new_class_label = model_argmax( sess, x, model(x), adv, feed=feed_dict) # Predicted class of the generated adversary for i in range(0, len(new_class_label)): j = batch * batch_size + i if new_class_label[i] != current_class_acc[j]: suc += 1 path = store_path + '/' + str(j) + '_' + str( time.time() * 1000) + '_' + str( current_class_acc[j]) + '_' + str( new_class_label[i]) + '.png' imsave(path, adv_img_deprocessed[i]) # Close TF session sess.close() return suc, len(acc_pre_index) '''
def cw(datasets, sample, model_name, target, store_path, ini_con=10, start=0, end=10000, batch_size=32, epoch=9, mu=False, mu_var='gf', de=False, attack='fgsm'): """ Carlini and Wagner's attack :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) # sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch) sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch, mu=mu, mu_var=mu_var, de=de, attack=attack) print('load successfule') ########################################################################### # Craft adversarial examples using Carlini and Wagner's approach ########################################################################### ''' if 'mnist' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(28, 28, 1)]).astype('float32') sample = preprocess_image_1(sample) elif 'cifar10' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32, 32, 3)]).astype('float32') sample = preprocess_image_1(sample) elif 'svhn' == datasets: sample = np.asarray([np.asarray(imread(sample_path)).reshape(32,32,3)]).astype('float32') sample = preprocess_image_1(sample) ''' input_shape, nb_classes = get_shape(datasets) sample = sample[start:end] probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict) current_class = [] for i in range(0, probabilities.shape[0]): current_class.append(np.argmax(probabilities[i])) if not os.path.exists(store_path): os.makedirs(store_path) ''' if target == current_class: return 'The target is equal to its original class' elif target >= nb_classes or target < 0: return 'The target is out of range' ''' #only for correct: Y_test = Y_test[start:end] #sample=sample[start:end] acc_pre_index = [] for i in range(0, sample.shape[0]): if current_class[i] == np.argmax(Y_test[i]): acc_pre_index.append(i) print('current_class', current_class) print('Start generating adv. example for target class %i' % target) # Instantiate a CW attack object sample_acc = np.zeros(shape=(len(acc_pre_index), input_shape[1], input_shape[2], input_shape[3]), dtype='float') current_class_acc = np.zeros(shape=(len(acc_pre_index)), dtype=int) for i in range(0, len(acc_pre_index)): sample_acc[i] = sample[acc_pre_index[i]] current_class_acc[i] = current_class[acc_pre_index[i]] print('current_class_acc', current_class_acc) cw = CarliniWagnerL2(model, back='tf', sess=sess) one_hot = np.zeros((1, nb_classes), dtype=np.float32) one_hot[0, target] = 1 adv_ys = one_hot yname = "y_target" if 'mnist' == datasets: cw_params = { 'binary_search_steps': 1, yname: adv_ys, 'max_iterations': 1000, 'learning_rate': 0.1, 'batch_size': 1, 'initial_const': ini_con } elif 'cifar10' == datasets: cw_params = { 'binary_search_steps': 1, yname: adv_ys, 'max_iterations': 1000, 'learning_rate': 0.1, 'batch_size': 1, 'initial_const': ini_con } suc = 0 nb_batches = int(math.ceil(float(sample_acc.shape[0]) / batch_size)) for batch in range(nb_batches): start_batch = batch * batch_size end_batch = (batch + 1) * batch_size if end_batch > sample_acc.shape[0]: end_batch = sample_acc.shape[0] adv_inputs = sample_acc[start_batch:end_batch] for j in range(start_batch, end_batch): if current_class_acc[j] != target: adv_input = adv_inputs[j - start_batch].reshape( 1, input_shape[1], input_shape[2], input_shape[3]) adv = cw.generate_np(adv_input, **cw_params) #print(adv.shape) #print(adv) new_class_labels = model_argmax(sess, x, preds, adv, feed=feed_dict) res = int(new_class_labels == target) if res == 1: adv = adv.reshape(adv.shape[1], adv.shape[2], adv.shape[3]) #adv_img_deprocessed = deprocess_image_1(adv) #adv_img_deprocessed=adv_img_deprocessed.reshape(adv_img_deprocessed.shape[1],adv_img_deprocessed.shape[2]) suc += 1 path = store_path + '/' + str( start + acc_pre_index[j] ) + '_' + str(time.time() * 1000) + '_' + str( current_class_acc[j]) + '_' + str(new_class_labels) #path = store_path + '/' + str(start+acc_pre_index[j]) + '_' + str(time.time()*1000) + '_' + str(current_class_acc[j]) + '_' + str(new_class_labels)+'.png' #path=store_path + '/' + str(j)+ '_'+ str(current_class_acc[j]) +'.png' #imsave(path, adv) np.save(path, adv) #print(adv.shape) sess.close() return suc, len(acc_pre_index)
def cos(datasets, model, de_model, attack='fgsm', epoch=49, de_epoch=49): tf.reset_default_graph() """ :param datasets :param model :param samples_path :return: """ # Object used to keep track of (and return) key accuracies print("load defense model.") sess, preds, x, y, model, feed_dict = model_load(datasets, model, epoch=epoch) X_train, Y_train, X_test, Y_test = get_data(datasets) input_shape, nb_classes = get_shape(datasets) feed_dict_de = None #result_nor=sess.run(preds, feed_dict={x:X_test}) result_nor = model_prediction(sess, x, preds, X_test, feed=feed_dict, datasets=datasets) #print(result_nor) #print(model) #print(get_model_dict()) tf.reset_default_graph() sess, preds_de, x, y, model_de, feed_dict = model_load(datasets, de_model, de=True, attack=attack, epoch=de_epoch) #result_de=sess.run(preds_de, feed_dict={x:X_test}) result_de = model_prediction(sess, x, preds_de, X_test, feed=feed_dict, datasets=datasets) #print(result_de) # print('average confidence of adversarial class %.4f' %(result)) result = 0 num = 0 js = 0. for i in range(Y_test.shape[0]): if (np.argmax(Y_test[i]) == np.argmax(result_nor[i])) and (np.argmax( Y_test[i]) == np.argmax(result_de[i])): num += 1 p = result_nor[i] q = result_de[i] M = (p + q) / 2 js = js + 0.5 * scipy.stats.entropy( p, M) + 0.5 * scipy.stats.entropy(q, M) # Close TF session result = js / num print("JS divergence: ", result) sess.close() return result
def ccv(datasets='mnist', model='lenet1', de_model='lenet1', attack='fgsm', epoch=49, de_epoch=49): """ :param datasets :param model :param samples_path :return: """ tf.reset_default_graph() # Object used to keep track of (and return) key accuracies print("load defense model.") sess, preds, x, y, model, feed_dict = model_load(datasets, model, epoch=epoch) X_train, Y_train, X_test, Y_test = get_data(datasets) input_shape, nb_classes = get_shape(datasets) feed_dict_de = None result_nor = model_prediction(sess, x, preds, X_test, feed=feed_dict, datasets=datasets) #result_nor=sess.run(preds, feed_dict={x:X_test[0:1000]}) #print(result_nor) #print(model) #print(get_model_dict()) tf.reset_default_graph() sess, preds_de, x, y, model_de, feed_dict = model_load(datasets, de_model, True, attack=attack, epoch=de_epoch) #result_de=sess.run(preds_de, feed_dict={x:X_test[0:1000]}) result_de = model_prediction(sess, x, preds_de, X_test, feed=feed_dict, datasets=datasets) #print(result_de) # print('average confidence of adversarial class %.4f' %(result)) result = 0 num = 0 for i in range(Y_test.shape[0]): if (np.argmax(Y_test[i]) == np.argmax(result_nor[i])) and (np.argmax( Y_test[i]) == np.argmax(result_de[i])): num += 1 result += abs(result_nor[i][np.argmax(Y_test[i])] - result_de[i][np.argmax(Y_test[i])]) # Close TF session print(result / num) sess.close() return result / num