def run_protection(self, image_paths, th=0.04, sd=1e7, lr=10, max_step=500, batch_size=1, format='png', separate_target=True, debug=False, no_align=False, exp="", maximize=True, save_last_on_failed=True): current_param = "-".join([str(x) for x in [self.th, sd, self.lr, self.max_step, batch_size, format, separate_target, debug]]) image_paths, loaded_images = filter_image_paths(image_paths) if not image_paths: print("No images in the directory") return 3 faces = Faces(image_paths, loaded_images, self.aligner, verbose=1, no_align=no_align) original_images = faces.cropped_faces if len(original_images) == 0: print("No face detected. ") return 2 original_images = np.array(original_images) if current_param != self.protector_param: self.protector_param = current_param if self.protector is not None: del self.protector if batch_size == -1: batch_size = len(original_images) self.protector = FawkesMaskGeneration(self.feature_extractors_ls, batch_size=batch_size, mimic_img=True, intensity_range=PREPROCESS, initial_const=sd, learning_rate=self.lr, max_iterations=self.max_step, l_threshold=self.th, verbose=debug, maximize=maximize, keep_final=False, image_shape=(IMG_SIZE, IMG_SIZE, 3), loss_method='features', tanh_process=True, save_last_on_failed=save_last_on_failed, ) protected_images = generate_cloak_images(self.protector, original_images) faces.cloaked_cropped_faces = protected_images final_images, images_without_face = faces.merge_faces( reverse_process_cloaked(protected_images, preprocess=PREPROCESS), reverse_process_cloaked(original_images, preprocess=PREPROCESS)) for i in range(len(final_images)): if i in images_without_face: continue p_img = final_images[i] path = image_paths[i] file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), self.mode, format) dump_image(p_img, file_name, format=format) print("Done!") return 1
def run_protection(self, image_paths, mode='min', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png', separate_target=True, debug=True, no_align=False): if mode == 'custom': pass else: th, max_step, lr = self.mode2param(mode) current_param = "-".join([ str(x) for x in [ mode, th, sd, lr, max_step, batch_size, format, separate_target, debug ] ]) print("Fininshed Setting Parameters at: {}".format(datetime.now())) image_paths, loaded_images = filter_image_paths(image_paths) print("Finished Loading Images at: {}".format(datetime.now())) if not image_paths: print("No images in the directory") return 3 with graph.as_default(): faces = Faces(image_paths, loaded_images, self.aligner, verbose=1, no_align=no_align) original_images = faces.cropped_faces print("Finished Detecting Faces In The Images at: {}".format( datetime.now())) if len(original_images) == 0: print("No face detected. ") return 2 original_images = np.array(original_images) with sess.as_default(): if separate_target: target_embedding = [] i = 0 print("Start image reshape at: {}".format(datetime.now())) for org_img in original_images: org_img = org_img.reshape([1] + list(org_img.shape)) print("Finished Image {} Reshape at: {}".format( i, datetime.now())) tar_emb = select_target_label( org_img, self.feature_extractors_ls, self.fs_names) print( "Finished Target Embedding Image {} at: {}".format( i, datetime.now())) target_embedding.append(tar_emb) i += 1 target_embedding = np.concatenate(target_embedding) else: target_embedding = select_target_label( original_images, self.feature_extractors_ls, self.fs_names) print("Finished All Images Target Embedding at: {}".format( datetime.now())) if current_param != self.protector_param: self.protector_param = current_param if self.protector is not None: del self.protector self.protector = FawkesMaskGeneration( sess, self.feature_extractors_ls, batch_size=batch_size, mimic_img=True, intensity_range='imagenet', initial_const=sd, learning_rate=lr, max_iterations=max_step, l_threshold=th, verbose=1 if debug else 0, maximize=False, keep_final=False, image_shape=(224, 224, 3)) print("Finished Initializing Fawkes Protector at: {}".format( datetime.now())) protected_images = generate_cloak_images( self.protector, original_images, target_emb=target_embedding) print("Finished Protecting Images at: {}".format( datetime.now())) faces.cloaked_cropped_faces = protected_images final_images = faces.merge_faces( reverse_process_cloaked(protected_images), reverse_process_cloaked(original_images)) print("Finished Merging Faces at: {}".format(datetime.now())) for p_img, path in zip(final_images, image_paths): file_name = "{}_{}_cloaked.{}".format( ".".join(path.split(".")[:-1]), mode, format) dump_image(p_img, file_name, format=format) print("Done! and Finished Exporting File at: {}".format( datetime.now())) return 1
def main(): sess = init_gpu(args.gpu) ali = aligner(sess) print("Build attacker's model") image_paths = glob.glob(os.path.join(args.directory, "*")) cloak_file_name = "_cloaked" original_image_paths = sorted( [path for path in image_paths if "cloaked" not in path.split("/")[-1]]) original_image_paths, original_loaded_images = filter_image_paths( original_image_paths) protect_image_paths = sorted([ path for path in image_paths if cloak_file_name in path.split("/")[-1] ]) protect_image_paths, protected_loaded_images = filter_image_paths( protect_image_paths) print("Find {} original image and {} cloaked images".format( len(original_image_paths), len(protect_image_paths))) original_faces = Faces(original_image_paths, original_loaded_images, ali, verbose=1, eval_local=True) original_faces = original_faces.cropped_faces cloaked_faces = Faces(protect_image_paths, protected_loaded_images, ali, verbose=1, eval_local=True) cloaked_faces = cloaked_faces.cropped_faces if len(original_faces) <= 10 or len(protect_image_paths) <= 10: raise Exception( "Must have more than 10 protected images to run the evaluation") num_classes = args.num_other_classes + 1 datagen = DataGenerator(original_faces, cloaked_faces) original_test_X, original_test_Y = datagen.test_original() print("{} Training Images | {} Testing Images".format( len(datagen.protect_images_train), len(original_test_X))) train_generator = datagen.generate() test_generator = datagen.generate(test=True) base_model = load_extractor(args.base_model) model = load_victim_model(teacher_model=base_model, number_classes=num_classes) cb = CallbackGenerator(original_imgs=original_test_X, protect_imgs=cloaked_faces, original_y=original_test_Y, original_protect_y=None, test_gen=test_generator) model.fit_generator(train_generator, steps_per_epoch=num_classes * 10 // 32, epochs=args.n_epochs, verbose=1, callbacks=[cb]) _, acc_original = model.evaluate(original_test_X, original_test_Y, verbose=0) print("Protection Success Rate: {:.4f}".format(1 - acc_original))
def run_protection(self, image_paths, mode='low', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png', separate_target=True, debug=False): if mode == 'custom': pass else: th, max_step, lr = self.mode2param(mode) current_param = "-".join([ str(x) for x in [ mode, th, sd, lr, max_step, batch_size, format, separate_target, debug ] ]) image_paths, loaded_images = filter_image_paths(image_paths) if not image_paths: raise Exception("No images in the directory") with graph.as_default(): faces = Faces(image_paths, loaded_images, self.aligner, verbose=1) original_images = faces.cropped_faces original_images = np.array(original_images) with sess.as_default(): if separate_target: target_embedding = [] for org_img in original_images: org_img = org_img.reshape([1] + list(org_img.shape)) tar_emb = select_target_label( org_img, self.feature_extractors_ls, self.fs_names) target_embedding.append(tar_emb) target_embedding = np.concatenate(target_embedding) else: target_embedding = select_target_label( original_images, self.feature_extractors_ls, self.fs_names) if current_param != self.protector_param: self.protector_param = current_param if self.protector is not None: del self.protector self.protector = FawkesMaskGeneration( sess, self.feature_extractors_ls, batch_size=batch_size, mimic_img=True, intensity_range='imagenet', initial_const=sd, learning_rate=lr, max_iterations=max_step, l_threshold=th, verbose=1 if debug else 0, maximize=False, keep_final=False, image_shape=(224, 224, 3)) protected_images = generate_cloak_images( self.protector, original_images, target_emb=target_embedding) faces.cloaked_cropped_faces = protected_images cloak_perturbation = reverse_process_cloaked( protected_images) - reverse_process_cloaked( original_images) final_images = faces.merge_faces(cloak_perturbation) for p_img, path in zip(final_images, image_paths): file_name = "{}_{}_cloaked.{}".format( ".".join(path.split(".")[:-1]), mode, format) dump_image(p_img, file_name, format=format) print("Done!") return None
def run_protection(self, image_paths, mode='mid', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png', separate_target=True, debug=False): if mode == 'custom': pass else: th, max_step, lr = self.mode2param(mode) image_paths, loaded_images = filter_image_paths(image_paths) if not image_paths: raise Exception("No images in the directory") with graph.as_default(): faces = Faces(image_paths, loaded_images, self.aligner, verbose=1) original_images = faces.cropped_faces original_images = np.array(original_images) with sess.as_default(): if separate_target: target_embedding = [] for org_img in original_images: org_img = org_img.reshape([1] + list(org_img.shape)) tar_emb = select_target_label( org_img, self.feature_extractors_ls, self.fs_names) target_embedding.append(tar_emb) target_embedding = np.concatenate(target_embedding) else: target_embedding = select_target_label( original_images, self.feature_extractors_ls, self.fs_names) protected_images = generate_cloak_images( sess, self.feature_extractors_ls, original_images, target_emb=target_embedding, th=th, faces=faces, sd=sd, lr=lr, max_step=max_step, batch_size=batch_size, debug=debug) faces.cloaked_cropped_faces = protected_images cloak_perturbation = reverse_process_cloaked( protected_images) - reverse_process_cloaked( original_images) final_images = faces.merge_faces(cloak_perturbation) for p_img, path in zip(final_images, image_paths): file_name = "{}_{}_cloaked.{}".format( ".".join(path.split(".")[:-1]), mode, format) dump_image(p_img, file_name, format=format) # elapsed_time = time.time() - start_time print("Done!") return None