def main(argv=None): # 0. Select a dataset. from datasets import MNISTDataset, CIFAR10Dataset, ImageNetDataset from datasets import get_correct_prediction_idx, evaluate_adversarial_examples, calculate_mean_confidence, calculate_accuracy if FLAGS.dataset_name == "MNIST": dataset = MNISTDataset() elif FLAGS.dataset_name == "CIFAR-10": dataset = CIFAR10Dataset() elif FLAGS.dataset_name == "ImageNet": dataset = ImageNetDataset() # 1. Load a dataset. print("\n===Loading %s data..." % FLAGS.dataset_name) if FLAGS.dataset_name == 'ImageNet': if FLAGS.model_name == 'inceptionv3': img_size = 299 else: img_size = 224 X_test_all, Y_test_all = dataset.get_test_data(img_size, 0, 200) else: X_test_all, Y_test_all = dataset.get_test_dataset() # 2. Load a trained model. sess = load_tf_session() keras.backend.set_learning_phase(0) # Define input TF placeholder x = tf.placeholder(tf.float32, shape=(None, dataset.image_size, dataset.image_size, dataset.num_channels)) y = tf.placeholder(tf.float32, shape=(None, dataset.num_classes)) with tf.variable_scope(FLAGS.model_name): """ Create a model instance for prediction. The scaling argument, 'input_range_type': {1: [0,1], 2:[-0.5, 0.5], 3:[-1, 1]...} """ model = dataset.load_model_by_name(FLAGS.model_name, logits=False, input_range_type=1) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['acc']) # 3. Evaluate the trained model. # TODO: add top-5 accuracy for ImageNet. print("Evaluating the pre-trained model...") Y_pred_all = model.predict(X_test_all) mean_conf_all = calculate_mean_confidence(Y_pred_all, Y_test_all) accuracy_all = calculate_accuracy(Y_pred_all, Y_test_all) print('Test accuracy on raw legitimate examples %.4f' % (accuracy_all)) print('Mean confidence on ground truth classes %.4f' % (mean_conf_all)) # 4. Select some examples to attack. import hashlib from datasets import get_first_n_examples_id_each_class if FLAGS.select: # Filter out the misclassified examples. correct_idx = get_correct_prediction_idx(Y_pred_all, Y_test_all) if FLAGS.test_mode: # Only select the first example of each class. correct_and_selected_idx = get_first_n_examples_id_each_class( Y_test_all[correct_idx]) selected_idx = [correct_idx[i] for i in correct_and_selected_idx] else: if not FLAGS.balance_sampling: selected_idx = correct_idx[:FLAGS.nb_examples] else: # select the same number of examples for each class label. nb_examples_per_class = int(FLAGS.nb_examples / Y_test_all.shape[1]) correct_and_selected_idx = get_first_n_examples_id_each_class( Y_test_all[correct_idx], n=nb_examples_per_class) selected_idx = [ correct_idx[i] for i in correct_and_selected_idx ] else: selected_idx = np.array(range(FLAGS.nb_examples)) from utils.output import format_number_range selected_example_idx_ranges = format_number_range(sorted(selected_idx)) print("Selected %d examples." % len(selected_idx)) print("Selected index in test set (sorted): %s" % selected_example_idx_ranges) X_test, Y_test, Y_pred = X_test_all[selected_idx], Y_test_all[ selected_idx], Y_pred_all[selected_idx] # The accuracy should be 100%. accuracy_selected = calculate_accuracy(Y_pred, Y_test) mean_conf_selected = calculate_mean_confidence(Y_pred, Y_test) print('Test accuracy on selected legitimate examples %.4f' % (accuracy_selected)) print('Mean confidence on ground truth classes, selected %.4f\n' % (mean_conf_selected)) task = {} task['dataset_name'] = FLAGS.dataset_name task['model_name'] = FLAGS.model_name task['accuracy_test'] = accuracy_all task['mean_confidence_test'] = mean_conf_all task['test_set_selected_length'] = len(selected_idx) task['test_set_selected_idx_ranges'] = selected_example_idx_ranges task['test_set_selected_idx_hash'] = hashlib.sha1( str(selected_idx).encode('utf-8')).hexdigest() task['accuracy_test_selected'] = accuracy_selected task['mean_confidence_test_selected'] = mean_conf_selected task_id = "%s_%d_%s_%s" % \ (task['dataset_name'], task['test_set_selected_length'], task['test_set_selected_idx_hash'][:5], task['model_name']) FLAGS.result_folder = os.path.join(FLAGS.result_folder, task_id) if not os.path.isdir(FLAGS.result_folder): os.makedirs(FLAGS.result_folder) from utils.output import save_task_descriptor save_task_descriptor(FLAGS.result_folder, [task]) # 5. Generate adversarial examples. from attacks import maybe_generate_adv_examples from utils.squeeze import reduce_precision_py from utils.parameter_parser import parse_params attack_string_hash = hashlib.sha1( FLAGS.attacks.encode('utf-8')).hexdigest()[:5] sample_string_hash = task['test_set_selected_idx_hash'][:5] from datasets.datasets_utils import get_next_class, get_least_likely_class Y_test_target_next = get_next_class(Y_test) Y_test_target_ll = get_least_likely_class(Y_pred) X_test_adv_list = [] X_test_adv_discretized_list = [] Y_test_adv_discretized_pred_list = [] attack_string_list = filter(lambda x: len(x) > 0, FLAGS.attacks.lower().split(';')) to_csv = [] X_adv_cache_folder = os.path.join(FLAGS.result_folder, 'adv_examples') adv_log_folder = os.path.join(FLAGS.result_folder, 'adv_logs') predictions_folder = os.path.join(FLAGS.result_folder, 'predictions') for folder in [X_adv_cache_folder, adv_log_folder, predictions_folder]: if not os.path.isdir(folder): os.makedirs(folder) predictions_fpath = os.path.join(predictions_folder, "legitimate.npy") np.save(predictions_fpath, Y_pred, allow_pickle=False) if FLAGS.clip >= 0: epsilon = FLAGS.clip print("Clip the adversarial perturbations by +-%f" % epsilon) max_clip = np.clip(X_test + epsilon, 0, 1) min_clip = np.clip(X_test - epsilon, 0, 1) for attack_string in attack_string_list: attack_log_fpath = os.path.join(adv_log_folder, "%s_%s.log" % (task_id, attack_string)) attack_name, attack_params = parse_params(attack_string) print("\nRunning attack: %s %s" % (attack_name, attack_params)) if 'targeted' in attack_params: targeted = attack_params['targeted'] print("targeted value: %s" % targeted) if targeted == 'next': Y_test_target = Y_test_target_next elif targeted == 'll': Y_test_target = Y_test_target_ll elif targeted == False: attack_params['targeted'] = False Y_test_target = Y_test.copy() else: targeted = False attack_params['targeted'] = False Y_test_target = Y_test.copy() x_adv_fname = "%s_%s.pickle" % (task_id, attack_string) x_adv_fpath = os.path.join(X_adv_cache_folder, x_adv_fname) X_test_adv, aux_info = maybe_generate_adv_examples( sess, model, x, y, X_test, Y_test_target, attack_name, attack_params, use_cache=x_adv_fpath, verbose=FLAGS.verbose, attack_log_fpath=attack_log_fpath) if FLAGS.clip > 0: # This is L-inf clipping. X_test_adv = np.clip(X_test_adv, min_clip, max_clip) X_test_adv_list.append(X_test_adv) if isinstance(aux_info, float): duration = aux_info else: duration = aux_info['duration'] dur_per_sample = duration / len(X_test_adv) # 5.0 Output predictions. Y_test_adv_pred = model.predict(X_test_adv) predictions_fpath = os.path.join(predictions_folder, "%s.npy" % attack_string) np.save(predictions_fpath, Y_test_adv_pred, allow_pickle=False) # 5.1 Evaluate the adversarial examples being discretized to uint8. print("\n---Attack (uint8): %s" % attack_string) # All data should be discretized to uint8. X_test_adv_discret = reduce_precision_py(X_test_adv, 256) X_test_adv_discretized_list.append(X_test_adv_discret) Y_test_adv_discret_pred = model.predict(X_test_adv_discret) Y_test_adv_discretized_pred_list.append(Y_test_adv_discret_pred) rec = evaluate_adversarial_examples(X_test, Y_test, X_test_adv_discret, Y_test_target.copy(), targeted, Y_test_adv_discret_pred) rec['dataset_name'] = FLAGS.dataset_name rec['model_name'] = FLAGS.model_name rec['attack_string'] = attack_string rec['duration_per_sample'] = dur_per_sample rec['discretization'] = True to_csv.append(rec) from utils.output import write_to_csv attacks_evaluation_csv_fpath = os.path.join(FLAGS.result_folder, "%s_attacks_%s_evaluation.csv" % \ (task_id, attack_string_hash)) fieldnames = [ 'dataset_name', 'model_name', 'attack_string', 'duration_per_sample', 'discretization', 'success_rate', 'mean_confidence', 'mean_l2_dist', 'mean_li_dist', 'mean_l0_dist_value', 'mean_l0_dist_pixel' ] write_to_csv(to_csv, attacks_evaluation_csv_fpath, fieldnames) if FLAGS.visualize is True: from datasets.visualization import show_imgs_in_rows if FLAGS.test_mode or FLAGS.balance_sampling: selected_idx_vis = range(Y_test.shape[1]) else: selected_idx_vis = get_first_n_examples_id_each_class(Y_test, 1) legitimate_examples = X_test[selected_idx_vis] rows = [legitimate_examples] rows += map(lambda x: x[selected_idx_vis], X_test_adv_list) img_fpath = os.path.join( FLAGS.result_folder, '%s_attacks_%s_examples.png' % (task_id, attack_string_hash)) show_imgs_in_rows(rows, img_fpath) print('\n===Adversarial image examples are saved in ', img_fpath) # TODO: output the prediction and confidence for each example, both legitimate and adversarial. # 6. Evaluate robust classification techniques. # Example: --robustness \ # "Base;FeatureSqueezing?squeezer=bit_depth_1;FeatureSqueezing?squeezer=median_filter_2;" if FLAGS.robustness != '': """ Test the accuracy with robust classifiers. Evaluate the accuracy on all the legitimate examples. """ from robustness import evaluate_robustness result_folder_robustness = os.path.join(FLAGS.result_folder, "robustness") fname_prefix = "%s_%s_robustness" % (task_id, attack_string_hash) evaluate_robustness(FLAGS.robustness, model, Y_test_all, X_test_all, Y_test, \ attack_string_list, X_test_adv_discretized_list, fname_prefix, selected_idx_vis, result_folder_robustness)
def main(argv=None): # 0. Select a dataset. from datasets import MNISTDataset, CIFAR10Dataset, ImageNetDataset from datasets import get_correct_prediction_idx, evaluate_adversarial_examples, calculate_mean_confidence, \ calculate_accuracy if FLAGS.dataset_name == "MNIST": dataset = MNISTDataset() elif FLAGS.dataset_name == "CIFAR-10": dataset = CIFAR10Dataset() FLAGS.image_size = 32 # Redundant for the Current Attack. elif FLAGS.dataset_name == "ImageNet": dataset = ImageNetDataset() FLAGS.image_size = 224 # Redundant for the Current Attack. # 1. Load a dataset. print("\n===Loading %s data..." % FLAGS.dataset_name) if FLAGS.dataset_name == 'ImageNet': if FLAGS.model_name == 'inceptionv3': img_size = 299 else: img_size = 224 X_test_all, Y_test_all = dataset.get_test_data(img_size, 0, 200) else: X_test_all, Y_test_all = dataset.get_test_dataset() # Randomized optimizations if FLAGS.dataset_name != "ImageNet": all_idx = np.arange(10000) np.random.shuffle(all_idx) selected_idx = all_idx[:(FLAGS.nb_examples * 2)] X_test_all, Y_test_all = X_test_all[selected_idx], Y_test_all[ selected_idx] # 2. Load a trained model. sess = load_tf_session() keras.backend.set_learning_phase(0) # Define input TF placeholder x = tf.placeholder(tf.float32, shape=(None, dataset.image_size, dataset.image_size, dataset.num_channels)) y = tf.placeholder(tf.float32, shape=(None, dataset.num_classes)) sq_list = FLAGS.squeezers.split(';') print(" Squeezers used for EOT :", sq_list) x_s = [] squeezers = [] models = [] for squeezer in sq_list: x_s.append( tf.placeholder(tf.float32, shape=(None, dataset.image_size, dataset.image_size, dataset.num_channels))) if squeezer.startswith("median"): squeezers.append(lambda x: x) with tf.variable_scope(FLAGS.model_name + squeezer): cur_model = dataset.load_model_by_name( FLAGS.model_name, logits=False, input_range_type=1, pre_filter=get_squeezer_by_name(squeezer, 'tensorflow')) cur_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['acc']) models.append(cur_model) else: squeezers.append(get_squeezer_by_name(squeezer, 'python')) with tf.variable_scope(FLAGS.model_name + "local" + squeezer): cur_model = dataset.load_model_by_name(FLAGS.model_name, logits=False, input_range_type=1) cur_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['acc']) models.append(cur_model) with tf.variable_scope(FLAGS.model_name + "vanilla"): model_vanilla = dataset.load_model_by_name(FLAGS.model_name, logits=False, input_range_type=1) model_vanilla.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['acc']) # 3. Evaluate the trained model. # TODO: add top-5 accuracy for ImageNet. print("Evaluating the pre-trained model...") # We use the Vanilla Model here for Prediction print( " ************************************************* Shape of X_test_all :", X_test_all.shape) Y_pred_all = model_vanilla.predict(X_test_all) mean_conf_all = calculate_mean_confidence(Y_pred_all, Y_test_all) accuracy_all = calculate_accuracy(Y_pred_all, Y_test_all) print('Test accuracy on raw legitimate examples %.4f' % (accuracy_all)) print('Mean confidence on ground truth classes %.4f' % (mean_conf_all)) # 4. Select some examples to attack. import hashlib from datasets import get_first_n_examples_id_each_class if FLAGS.select: # Filter out the misclassified examples. correct_idx = get_correct_prediction_idx(Y_pred_all, Y_test_all) if FLAGS.test_mode: # Only select the first example of each class. correct_and_selected_idx = get_first_n_examples_id_each_class( Y_test_all[correct_idx]) selected_idx = [correct_idx[i] for i in correct_and_selected_idx] else: if not FLAGS.balance_sampling: selected_idx = correct_idx[:FLAGS.nb_examples] else: # select the same number of examples for each class label. nb_examples_per_class = int(FLAGS.nb_examples / Y_test_all.shape[1]) correct_and_selected_idx = get_first_n_examples_id_each_class( Y_test_all[correct_idx], n=nb_examples_per_class) selected_idx = [ correct_idx[i] for i in correct_and_selected_idx ] else: selected_idx = np.array(range(FLAGS.nb_examples)) from utils.output import format_number_range selected_example_idx_ranges = format_number_range(sorted(selected_idx)) print("Selected %d examples." % len(selected_idx)) print("Selected index in test set (sorted): %s" % selected_example_idx_ranges) X_test, Y_test, Y_pred = X_test_all[selected_idx], Y_test_all[ selected_idx], Y_pred_all[selected_idx] # The accuracy should be 100%. accuracy_selected = calculate_accuracy(Y_pred, Y_test) mean_conf_selected = calculate_mean_confidence(Y_pred, Y_test) print('Test accuracy on selected legitimate examples %.4f' % (accuracy_selected)) print('Mean confidence on ground truth classes, selected %.4f\n' % (mean_conf_selected)) task = {} task['dataset_name'] = FLAGS.dataset_name task['model_name'] = FLAGS.model_name task['accuracy_test'] = accuracy_all task['mean_confidence_test'] = mean_conf_all task['test_set_selected_length'] = len(selected_idx) task['test_set_selected_idx_ranges'] = selected_example_idx_ranges task['test_set_selected_idx_hash'] = hashlib.sha1( str(selected_idx).encode('utf-8')).hexdigest() task['accuracy_test_selected'] = accuracy_selected task['mean_confidence_test_selected'] = mean_conf_selected task_id = "%s_%d_%s_%s" % \ (task['dataset_name'], task['test_set_selected_length'], task['test_set_selected_idx_hash'][:5], task['model_name'], ) FLAGS.result_folder = os.path.join(FLAGS.result_folder, task_id) if not os.path.isdir(FLAGS.result_folder): os.makedirs(FLAGS.result_folder) from utils.output import save_task_descriptor save_task_descriptor(FLAGS.result_folder, [task]) # 5. Generate adversarial examples. from utils.squeeze import reduce_precision_py from utils.parameter_parser import parse_params attack_string_hash = hashlib.sha1( FLAGS.attacks.encode('utf-8')).hexdigest()[:5] sample_string_hash = task['test_set_selected_idx_hash'][:5] from datasets.datasets_utils import get_next_class, get_least_likely_class Y_test_target_next = get_next_class(Y_test) Y_test_target_ll = get_least_likely_class(Y_pred) X_test_adv_list = [] X_test_adv_discretized_list = [] Y_test_adv_discretized_pred_list = [] attack_string_list = filter(lambda x: len(x) > 0, FLAGS.attacks.lower().split(';')) to_csv = [] X_adv_cache_folder = os.path.join(FLAGS.result_folder, 'adv_examples') adv_log_folder = os.path.join(FLAGS.result_folder, 'adv_logs') predictions_folder = os.path.join(FLAGS.result_folder, 'predictions') for folder in [X_adv_cache_folder, adv_log_folder, predictions_folder]: if not os.path.isdir(folder): os.makedirs(folder) predictions_fpath = os.path.join(predictions_folder, "legitimate.npy") np.save(predictions_fpath, Y_pred, allow_pickle=False) if FLAGS.clip >= 0: epsilon = FLAGS.clip print("Clip the adversarial perturbations by +-%f" % epsilon) max_clip = np.clip(X_test + epsilon, 0, 1) min_clip = np.clip(X_test - epsilon, 0, 1) # NOTE : At the moment we only support single attacks and single detectors. for attack_string in attack_string_list: attack_name, attack_params = parse_params(attack_string) print("\nRunning attack: %s %s" % (attack_name, attack_params)) if 'targeted' in attack_params: targeted = attack_params['targeted'] print("targeted value: %s" % targeted) if targeted == 'next': Y_test_target = Y_test_target_next elif targeted == 'll': Y_test_target = Y_test_target_ll elif targeted == False: attack_params['targeted'] = False Y_test_target = Y_test.copy() else: targeted = False attack_params['targeted'] = False Y_test_target = Y_test.copy() # Note that we use the attack model here instead of the vanilla model # Note that we pass in the Squeezer function for BPDA X_test_adv = eot_adversarial_attack(sess, model_vanilla, models, x, y, x_s, X_test, Y_test_target, attack_params, squeezers) if FLAGS.clip > 0: # This is L-inf clipping. X_test_adv = np.clip(X_test_adv, min_clip, max_clip) X_test_adv_list.append(X_test_adv) # 5.0 Output predictions. Y_test_adv_pred = model_vanilla.predict(X_test_adv) predictions_fpath = os.path.join(predictions_folder, "%s.npy" % attack_string) np.save(predictions_fpath, Y_test_adv_pred, allow_pickle=False) # 5.1 Evaluate the adversarial examples being discretized to uint8. print("\n---Attack (uint8): %s" % attack_string) # All data should be discretized to uint8. X_test_adv_discret = reduce_precision_py(X_test_adv, 256) X_test_adv_discretized_list.append(X_test_adv_discret) Y_test_adv_discret_pred = model_vanilla.predict(X_test_adv_discret) Y_test_adv_discretized_pred_list.append(Y_test_adv_discret_pred) # Y_test_adv_discret_pred is for the vanilla model rec = evaluate_adversarial_examples(X_test, Y_test, X_test_adv_discret, Y_test_target.copy(), targeted, Y_test_adv_discret_pred) rec['dataset_name'] = FLAGS.dataset_name rec['model_name'] = FLAGS.model_name rec['attack_string'] = attack_string rec['discretization'] = True to_csv.append(rec) from utils.output import write_to_csv attacks_evaluation_csv_fpath = os.path.join(FLAGS.result_folder, "%s_attacks_%s_evaluation.csv" % \ (task_id, attack_string_hash)) fieldnames = [ 'dataset_name', 'model_name', 'attack_string', 'discretization', 'success_rate', 'mean_confidence', 'mean_l2_dist', 'mean_li_dist', 'mean_l0_dist_value', 'mean_l0_dist_pixel' ] write_to_csv(to_csv, attacks_evaluation_csv_fpath, fieldnames) if FLAGS.visualize is True: from datasets.visualization import show_imgs_in_rows if FLAGS.test_mode or FLAGS.balance_sampling: selected_idx_vis = range(Y_test.shape[1]) else: selected_idx_vis = get_first_n_examples_id_each_class(Y_test, 1) legitimate_examples = X_test[selected_idx_vis] rows = [legitimate_examples] rows += map(lambda x: x[selected_idx_vis], X_test_adv_list) img_fpath = os.path.join( FLAGS.result_folder, '%s_attacks_%s_examples.png' % (task_id, attack_string_hash)) #show_imgs_in_rows(rows, img_fpath) print('\n===Adversarial image examples are saved in ', img_fpath) # TODO: output the prediction and confidence for each example, both legitimate and adversarial. # 6. Evaluate robust classification techniques. # Example: --robustness \ # "Base;FeatureSqueezing?squeezer=bit_depth_1;FeatureSqueezing?squeezer=median_filter_2;" if FLAGS.robustness != '': """ Test the accuracy with robust classifiers. Evaluate the accuracy on all the legitimate examples. """ from robustness import evaluate_robustness result_folder_robustness = os.path.join(FLAGS.result_folder, "robustness") fname_prefix = "robustness_summary" evaluate_robustness(FLAGS.robustness, model_vanilla, Y_test_all, X_test_all, Y_test, \ attack_string_list, X_test_adv_discretized_list, fname_prefix, selected_idx_vis, result_folder_robustness) # 7. Detection experiment. # Example: --detection "FeatureSqueezing?distance_measure=l1&squeezers=median_smoothing_2,bit_depth_4,bilateral_filter_15_15_60;" if FLAGS.detection != '': from detections.base import DetectionEvaluator result_folder_detection = os.path.join(FLAGS.result_folder, "detection") csv_fname = "detection_summary.csv" de = DetectionEvaluator(model_vanilla, result_folder_detection, csv_fname, FLAGS.dataset_name) Y_test_all_pred = model_vanilla.predict(X_test_all) de.build_detection_dataset(X_test_all, Y_test_all, Y_test_all_pred, selected_idx, X_test_adv_discretized_list, Y_test_adv_discretized_pred_list, attack_string_list, attack_string_hash, FLAGS.clip, Y_test_target_next, Y_test_target_ll) de.evaluate_detections(FLAGS.detection)
def main(argv=None): # 0. Select a dataset. from datasets import MNISTDataset, CIFAR10Dataset, ImageNetDataset from datasets import get_correct_prediction_idx, evaluate_adversarial_examples, calculate_mean_confidence, calculate_accuracy, calculate_real_untargeted_mean_confidence if FLAGS.dataset_name == "MNIST": dataset = MNISTDataset() elif FLAGS.dataset_name == "CIFAR-10": dataset = CIFAR10Dataset() elif FLAGS.dataset_name == "ImageNet": dataset = ImageNetDataset() # 1. Load a dataset. print("\n===Loading %s data..." % FLAGS.dataset_name) if FLAGS.dataset_name == 'ImageNet': if FLAGS.model_name == 'inceptionv3': img_size = 299 else: img_size = 224 X_test_all, Y_test_all = dataset.get_test_data(img_size, 0, 200) else: X_test_all, Y_test_all = dataset.get_test_dataset() # 2. Load a trained model. sess = load_tf_session() #keras.backend.set_learning_phase(0) # Define input TF placeholder x = tf.placeholder(tf.float32, shape=(None, dataset.image_size, dataset.image_size, dataset.num_channels)) y = tf.placeholder(tf.float32, shape=(None, dataset.num_classes)) with tf.variable_scope(FLAGS.model_name): """ Create a model instance for prediction. The scaling argument, 'input_range_type': {1: [0,1], 2:[-0.5, 0.5], 3:[-1, 1]...} """ model = dataset.load_model_by_name(FLAGS.model_name, logits=False, input_range_type=1) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['acc']) # 3. Evaluate the trained model. # TODO: add top-5 accuracy for ImageNet. print("Evaluating the pre-trained model...") #X_test_all = scipy.ndimage.rotate(X_test_all, 5, reshape=False, axes=(2, 1)) Y_pred_all = model.predict(X_test_all) mean_conf_all, _, _, _ = calculate_mean_confidence(Y_pred_all, Y_test_all) accuracy_all = calculate_accuracy(Y_pred_all, Y_test_all) print('Test accuracy on raw legitimate examples %.4f' % (accuracy_all)) print('Mean confidence on ground truth classes %.4f' % (mean_conf_all)) # 4. Select some examples to attack. import hashlib from datasets import get_first_n_examples_id_each_class if FLAGS.select: # Filter out the misclassified examples. correct_idx = get_correct_prediction_idx(Y_pred_all, Y_test_all) if FLAGS.test_mode: # Only select the first example of each class. correct_and_selected_idx = get_first_n_examples_id_each_class( Y_test_all[correct_idx]) selected_idx = [correct_idx[i] for i in correct_and_selected_idx] else: if not FLAGS.balance_sampling: selected_idx = correct_idx[:FLAGS.nb_examples] else: # select the same number of examples for each class label. nb_examples_per_class = int(FLAGS.nb_examples / Y_test_all.shape[1]) correct_and_selected_idx = get_first_n_examples_id_each_class( Y_test_all[correct_idx], n=nb_examples_per_class) selected_idx = [ correct_idx[i] for i in correct_and_selected_idx ] else: selected_idx = np.array(range(FLAGS.nb_examples)) from utils.output import format_number_range selected_example_idx_ranges = format_number_range(sorted(selected_idx)) print("Selected %d examples." % len(selected_idx)) print("Selected index in test set (sorted): %s" % selected_example_idx_ranges) X_test, Y_test, Y_pred = X_test_all[selected_idx], Y_test_all[ selected_idx], Y_pred_all[selected_idx] # The accuracy should be 100%. accuracy_selected = calculate_accuracy(Y_pred, Y_test) mean_conf_selected, max_conf_selected, min_conf_selected, std_conf_selected = calculate_mean_confidence( Y_pred, Y_test) print('Test accuracy on selected legitimate examples %.4f' % (accuracy_selected)) print('Mean confidence on ground truth classes, selected %.4f\n' % (mean_conf_selected)) print('max confidence on ground truth classes, selected %.4f\n' % (max_conf_selected)) print('min confidence on ground truth classes, selected %.4f\n' % (min_conf_selected)) print('std confidence on ground truth classes, selected %.4f\n' % (std_conf_selected)) task = {} task['dataset_name'] = FLAGS.dataset_name task['model_name'] = FLAGS.model_name task['accuracy_test'] = accuracy_all task['mean_confidence_test'] = mean_conf_all task['test_set_selected_length'] = len(selected_idx) task['test_set_selected_idx_ranges'] = selected_example_idx_ranges task['test_set_selected_idx_hash'] = hashlib.sha1( str(selected_idx).encode('utf-8')).hexdigest() task['accuracy_test_selected'] = accuracy_selected task['mean_confidence_test_selected'] = mean_conf_selected task_id = "%s_%d_%s_%s" % \ (task['dataset_name'], task['test_set_selected_length'], task['test_set_selected_idx_hash'][:5], task['model_name']) FLAGS.result_folder = os.path.join(FLAGS.result_folder, task_id) if not os.path.isdir(FLAGS.result_folder): os.makedirs(FLAGS.result_folder) from utils.output import save_task_descriptor save_task_descriptor(FLAGS.result_folder, [task]) # 5. Generate adversarial examples. from attacks import maybe_generate_adv_examples from utils.squeeze import reduce_precision_py from utils.parameter_parser import parse_params attack_string_hash = hashlib.sha1( FLAGS.attacks.encode('utf-8')).hexdigest()[:5] sample_string_hash = task['test_set_selected_idx_hash'][:5] from datasets.datasets_utils import get_next_class, get_least_likely_class, get_most_likely_class Y_test_target_next = get_next_class(Y_test) Y_test_target_ll = get_least_likely_class(Y_pred) Y_test_target_ml = get_most_likely_class(Y_pred) X_test_adv_list = [] X_test_adv_discretized_list = [] Y_test_adv_discretized_pred_list = [] attack_string_list = filter(lambda x: len(x) > 0, FLAGS.attacks.lower().split(';')) to_csv = [] X_adv_cache_folder = os.path.join(FLAGS.result_folder, 'adv_examples') adv_log_folder = os.path.join(FLAGS.result_folder, 'adv_logs') predictions_folder = os.path.join(FLAGS.result_folder, 'predictions') for folder in [X_adv_cache_folder, adv_log_folder, predictions_folder]: if not os.path.isdir(folder): os.makedirs(folder) predictions_fpath = os.path.join(predictions_folder, "legitimate.npy") np.save(predictions_fpath, Y_pred, allow_pickle=False) if FLAGS.clip >= 0: epsilon = FLAGS.clip print("Clip the adversarial perturbations by +-%f" % epsilon) max_clip = np.clip(X_test + epsilon, 0, 1) min_clip = np.clip(X_test - epsilon, 0, 1) for attack_string in attack_string_list: attack_log_fpath = os.path.join(adv_log_folder, "%s_%s.log" % (task_id, attack_string)) attack_name, attack_params = parse_params(attack_string) print("\nRunning attack: %s %s" % (attack_name, attack_params)) if 'targeted' in attack_params: targeted = attack_params['targeted'] print("targeted value: %s" % targeted) if targeted == 'next': Y_test_target = Y_test_target_next #Y_test_target = Y_test.copy() elif targeted == 'll': Y_test_target = Y_test_target_ll #Y_test_target = Y_test.copy() #print (Y_test_target_ll) elif targeted == 'most': Y_test_target = Y_test_target_ml #Y_test_target = Y_test.copy() #print (Y_test_target_ml) elif targeted == False: attack_params['targeted'] = False Y_test_target = Y_test.copy() else: targeted = False attack_params['targeted'] = False Y_test_target = Y_test.copy() Y_test_target_all = Y_test_all.copy() x_adv_fname = "%s_%s.pickle" % (task_id, attack_string) x_adv_fpath = os.path.join(X_adv_cache_folder, x_adv_fname) X_test_adv, aux_info = maybe_generate_adv_examples( sess, model, x, y, X_test, Y_test_target, attack_name, attack_params, use_cache=x_adv_fpath, verbose=FLAGS.verbose, attack_log_fpath=attack_log_fpath) if FLAGS.clip > 0: # This is L-inf clipping. X_test_adv = np.clip(X_test_adv, min_clip, max_clip) X_test_adv_list.append(X_test_adv) if isinstance(aux_info, float): duration = aux_info else: duration = aux_info['duration'] dur_per_sample = duration / len(X_test_adv) # 5.0 Output predictions. Y_test_adv_pred = model.predict(X_test_adv) #predictions_fpath = os.path.join(predictions_folder, "%s.npy"% attack_string) #np.save(predictions_fpath, Y_test_adv_pred, allow_pickle=False) # 5.1 Evaluate the adversarial examples being discretized to uint8. print("\n---Attack (uint8): %s" % attack_string) #import utils.squeeze as squeezer # All data should be discretized to uint8. X_test_adv_discret = reduce_precision_py(X_test_adv, 256) #X_test_adv_discret = reduce_precision_py(X_test_adv, 2) X_test_adv_discretized_list.append(X_test_adv_discret) Y_test_adv_discret_pred = model.predict(X_test_adv_discret) #Y_test_adv_discret_pred1 = to_categorical(np.argmax(model1.predict(X_test_adv_discret), axis=1)) from LID.extract_artifacts_obfus import get_lid from LID.util_obfus import get_noisy_samples, random_split, block_split, train_lr, compute_roc from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.preprocessing import scale, MinMaxScaler, StandardScaler #from LID.extract_artifact import * #from LID_util import * X_test_noisy = get_noisy_samples(X_test, X_test_adv, 'mnist', 'fgsm') artifacts, labels = get_lid(model, X_test, X_test_noisy, X_test_adv_discret, 20, 100, 'mnist') #X=artifacts #Y=labels print(X_test_noisy.shape) #print (artifacts.shape) # standarization scaler = MinMaxScaler().fit(artifacts) artifacts = scaler.transform(artifacts) # X = scale(X) # Z-norm # test attack is the same as training attack X_train_lid, Y_train_lid, X_test_lid, Y_test_lid = block_split( artifacts, labels) print(X_train_lid.shape) print(X_test_lid.shape) ## Build detector # print("LR Detector on [dataset: %s, train_attack: %s, test_attack: %s] with:" % # (args.dataset, args.attack, args.test_attack)) lr = train_lr(X_train_lid, Y_train_lid) ## Evaluate detector y_pred_lid = lr.predict_proba(X_test_lid)[:, 1] y_label_pred = lr.predict(X_test_lid) Y_test_lid = np.reshape(Y_test_lid, Y_test_lid.shape[0]) # AUC _, _, auc_score = compute_roc(Y_test_lid[:100], y_pred_lid[:100], plot=False) precision = precision_score(Y_test_lid[:100], y_label_pred[:100]) recall = recall_score(Y_test_lid[:100], y_label_pred[:100]) y_label_pred = lr.predict(X_test_lid[:100]) acc = accuracy_score(Y_test_lid[:100], y_label_pred[:100]) print( 'Detector ROC-AUC score: %0.4f, accuracy: %.4f, precision: %.4f, recall(TPR): %.4f' % (auc_score, acc, precision, recall)) from detections.base import evalulate_detection_test a, b, c, d, e = evalulate_detection_test(Y_test_lid[:100], y_label_pred[:100]) f1 = f1_score(Y_test_lid[:100], y_label_pred) print( 'SAE_acc: %0.4f, tpr: %.4f, fpr: %.4f, fdr (1- precision): %.4f, fbr (official name false omission rate): %.4f, f1 score: %.4f' % (a, b, c, d, e, f1)) from datasets.datasets_utils_NAT import evaluate_undetected_SAE_examples evaluate_undetected_SAE_examples(X_test[50:], Y_test[50:], X_test_adv_discret[50:], Y_test_target[50:], y_label_pred[:50], targeted, Y_test_adv_pred[50:]) # Y_test_adv_discretized_pred_list.append(Y_test_adv_discret_pred) # # rec = evaluate_adversarial_examples(X_test, Y_test, X_test_adv_discret, Y_test_target.copy(), targeted, # Y_test_adv_discret_pred) # #rec = evaluate_adversarial_examples(X_test_all, Y_test_all, X_test_all, Y_test_target_all.copy(), targeted, # # Y_test_adv_discret_pred) # rec['dataset_name'] = FLAGS.dataset_name # rec['model_name'] = FLAGS.model_name # rec['attack_string'] = attack_string # rec['duration_per_sample'] = dur_per_sample # rec['discretization'] = True # to_csv.append(rec) from utils.output import write_to_csv attacks_evaluation_csv_fpath = os.path.join(FLAGS.result_folder, "%s_attacks_%s_evaluation.csv" % \ (task_id, attack_string_hash)) fieldnames = [ 'dataset_name', 'model_name', 'attack_string', 'duration_per_sample', 'discretization', 'success_rate', 'mean_confidence', 'mean_l2_dist', 'mean_li_dist', 'mean_l0_dist_value', 'mean_l0_dist_pixel' ] write_to_csv(to_csv, attacks_evaluation_csv_fpath, fieldnames) if FLAGS.visualize is True: from datasets.visualization import show_imgs_in_rows if FLAGS.test_mode or FLAGS.balance_sampling: selected_idx_vis = range(Y_test.shape[1]) else: selected_idx_vis = get_first_n_examples_id_each_class(Y_test, 1) legitimate_examples = X_test[selected_idx_vis] rows = [legitimate_examples] rows += map(lambda x: x[selected_idx_vis], X_test_adv_list) img_fpath = os.path.join( FLAGS.result_folder, '%s_attacks_%s_examples.png' % (task_id, attack_string_hash)) show_imgs_in_rows(rows, img_fpath) print('\n===Adversarial image examples are saved in ', img_fpath) # TODO: output the prediction and confidence for each example, both legitimate and adversarial. # 6. Evaluate robust classification techniques. # Example: --robustness \ # "Base;FeatureSqueezing?squeezer=bit_depth_1;FeatureSqueezing?squeezer=median_filter_2;" if FLAGS.robustness != '': """ Test the accuracy with robust classifiers. Evaluate the accuracy on all the legitimate examples. """ from robustness import evaluate_robustness result_folder_robustness = os.path.join(FLAGS.result_folder, "robustness") fname_prefix = "%s_%s_robustness" % (task_id, attack_string_hash) evaluate_robustness(FLAGS.robustness, model, Y_test_all, X_test_all, Y_test, \ attack_string_list, X_test_adv_discretized_list, fname_prefix, selected_idx_vis, result_folder_robustness) # 7. Detection experiment. # Example: --detection "FeatureSqueezing?distance_measure=l1&squeezers=median_smoothing_2,bit_depth_4,bilateral_filter_15_15_60;" if FLAGS.detection != '': from detections.base import DetectionEvaluator result_folder_detection = os.path.join(FLAGS.result_folder, "detection") csv_fname = "%s_attacks_%s_detection.csv" % (task_id, attack_string_hash) de = DetectionEvaluator(model, result_folder_detection, csv_fname, FLAGS.dataset_name) Y_test_all_pred = model.predict(X_test_all) de.build_detection_dataset(X_test_all, Y_test_all, Y_test_all_pred, selected_idx, X_test_adv_discretized_list, Y_test_adv_discretized_pred_list, attack_string_list, attack_string_hash, FLAGS.clip, Y_test_target_next, Y_test_target_ll) de.evaluate_detections(FLAGS.detection)