def main(): sample_dir = './test_DI-2-FGSM-3/' if not os.path.exists(sample_dir): os.makedirs(sample_dir) InceptionResnet_model_1 = InceptionResnetV1( pretrained='vggface2').eval().to(device_0) print('load InceptionResnet-vggface2.pt successfully') # InceptionResnet_model_2 = InceptionResnetV1(pretrained='casia-webface').eval().to(device_0) # print('load InceptionResnet-casia-webface.pt successfully') IR_50_model_1 = IR_50([112, 112]) IR_50_model_1.load_state_dict( torch.load( '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/backbone_ir50_asia.pth' )) IR_50_model_1.eval().to(device_0) print('load IR_50 successfully') # IR_152_model_1 = IR_152([112, 112]) # IR_152_model_1.load_state_dict( # torch.load( # '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/Backbone_IR_152_Epoch_112_Batch_2547328_Time_2019-07-13-02-59_checkpoint.pth')) # IR_152_model_1.eval().to(device_0) # print('load IR_152 successfully') # IR_SE_50 = Backbone(50,mode='ir_se').eval().to(device_1) # print('load IR_SE_50 successfully') # mobileFaceNet = MobileFaceNet(512).eval().to(device_0) # print('load mobileFaceNet successfully') Insightface_iresnet34 = insightface.iresnet34(pretrained=True) Insightface_iresnet34.eval().to(device_1) print('load Insightface_iresnet34 successfully') Insightface_iresnet50 = insightface.iresnet50(pretrained=True) Insightface_iresnet50.eval().to(device_1) print('load Insightface_iresnet50 successfully') Insightface_iresnet100 = insightface.iresnet100(pretrained=True) Insightface_iresnet100.eval().to(device_1) print('load Insightface_iresnet100 successfully') # ##########################vgg16 # from Face_recognition.vgg16.vgg16 import CenterLossModel,loadCheckpoint # vgg16_checkpoint=loadCheckpoint('/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/vgg16/model'); # # VGG16 = CenterLossModel(embedding_size=512,num_classes=712,checkpoint=vgg16_checkpoint).eval().to(device_1) # print('load VGG16 successfully') arc_face_ir_se_50 = Arcface() arc_face_ir_se_50.eval() arc_face_ir_se_50.to(device_0) models = [] models.append(InceptionResnet_model_1) models.append(IR_50_model_1) models.append(Insightface_iresnet34) models.append(Insightface_iresnet50) models.append(Insightface_iresnet100) models.append(arc_face_ir_se_50) criterion = nn.MSELoss() # cpu # collect all images to attack paths = [] picpath = '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/images' for root, dirs, files in os.walk(picpath): for f in files: paths.append(os.path.join(root, f)) random.shuffle(paths) # paras eps = 1 steps = 50 output_path = './output_img' momentum = 0.3 alpha = 0.35 beta = 0.3 gamma = 0.1 #####cal mean feature face print('cal mean feature face #########################') # ########## # cal mean feature face on only 712 images # mean_face_1 = torch.zeros(1,3,112,112).detach() # for path in tqdm(paths): # image = Image.open(path) # in_tensor_1 = img2tensor(np.array(image)) # mean_face_1 += in_tensor_1 # mean_face_1 = mean_face_1 / 712 # ########## # with torch.no_grad(): # mean_face = torch.zeros(512).detach() # for path in tqdm(paths): # start = time.time() # print('cal mean face ' + path + ' ===============>') # image = Image.open(path) # # # # define paras # # in_tensor is origin tensor of image # # in_variable changes with gradient # # in_tensor_1 = img2tensor(np.array(image)) # # print(in_tensor.shape) # this_feature_face = None # # # # # origin feature # # _origin_InceptionResnet_model_1 = InceptionResnet_model_1(in_tensor_1.to(device_0)).cpu() # #################### # # _origin_InceptionResnet_model_2 = InceptionResnet_model_2(in_tensor_1.to(device_0)).cpu() # ###################### # # _origin_IR_50_model_1 = IR_50_model_1(in_tensor_1.to(device_0)).cpu() # ########################### # # _origin_IR_152_model_1 = IR_152_model_1(in_tensor_1.to(device_0)).cpu() # # _origin_IR_SE_50 = IR_SE_50(in_tensor_1.to(device_1)).cpu() # # _origin_mobileFaceNet = mobileFaceNet(in_tensor_1.to(device_0)).cpu() # ############################# # # _origin_Insightface_iresent34 = Insightface_iresnet34(in_tensor_1.to(device_1)).cpu() # # # _origin_Insightface_iresent50 = Insightface_iresnet50(in_tensor_1.to(device_1)).cpu() # # # _origin_Insightface_iresent100 = Insightface_iresnet100(in_tensor_1.to(device_1)).cpu() # # # _origin_arcface = arc_face_ir_se_50(in_tensor_1.to(device_0)).cpu() # # ######################## # # _origin_VGG16 = VGG16.forward_GetFeature(in_tensor_1.to(device_1)).cpu() # ######################## # # this_feature_face = _origin_InceptionResnet_model_1 + \ # _origin_Insightface_iresent34 + \ # _origin_Insightface_iresent50 + \ # _origin_Insightface_iresent100 + \ # _origin_arcface # # # this_feature_face = _origin_InceptionResnet_model_1 + \ # # _origin_InceptionResnet_model_2 +\ # # _origin_IR_50_model_1 + \ # # _origin_IR_152_model_1 +\ # # _origin_IR_SE_50 +\ # # _origin_mobileFaceNet +\ # # _origin_Insightface_iresent34 + \ # # _origin_Insightface_iresent50 + \ # # _origin_Insightface_iresent100 + \ # # _origin_arcface +\ # # _origin_VGG16 # # # this_feature_face = this_feature_face / 5. # mean_face = mean_face + this_feature_face # # # del _origin_InceptionResnet_model_1 # # del _origin_InceptionResnet_model_2 # # del _origin_IR_50_model_1 # # del _origin_IR_152_model_1 # # del _origin_IR_SE_50 # # del _origin_mobileFaceNet # # del _origin_Insightface_iresent34 # # del _origin_Insightface_iresent50 # # del _origin_Insightface_iresent100 # # del _origin_VGG16 # # del _origin_arcface # # del this_feature_face # # del in_tensor_1 # # del _origin_InceptionResnet_model_1 # # del _origin_IR_50_model_1 # del _origin_Insightface_iresent34 # del _origin_Insightface_iresent50 # del _origin_Insightface_iresent100 # del _origin_arcface # del this_feature_face # del in_tensor_1 # # mean_face = mean_face / 712. mean_face = cal_mean_face_by_extend_dataset(models) print('finish cal mean face...') ############################ print('######attack...##################') from mydataset import CustomDataset custom_dataset = CustomDataset() train_loader = torch.utils.data.DataLoader(dataset=custom_dataset, batch_size=6, shuffle=True) count = 0 progressRate = 0.0 for i, (x, path) in enumerate(train_loader): start = time.time() print('processing ' + str(progressRate) + ' ===============>') in_tensor = x origin_variable = in_tensor.detach() origin_variable = origin_variable in_variable = in_tensor.detach() in_variable = in_variable ########## in_variable_max = in_tensor.detach() in_variable_min = in_tensor.detach() in_variable_max = in_variable + 0.1 in_variable_min = in_variable - 0.1 ########## in_tensor = in_tensor.squeeze() in_tensor = in_tensor adv = None perturbation = torch.Tensor(x.size(0), 3, 112, 112).uniform_(-0.05, 0.05) ###这里测试 perturbation = perturbation in_variable += perturbation in_variable.data.clamp_(-1.0, 1.0) in_variable.requires_grad = True g_noise = torch.zeros_like(in_variable) g_noise = g_noise origin_InceptionResnet_model_1 = InceptionResnet_model_1( origin_variable.to(device_0)).cpu() # origin_InceptionResnet_model_2 = InceptionResnet_model_2(origin_variable.to(device_0)).cpu() origin_IR_50_model_1 = IR_50_model_1( origin_variable.to(device_0)).cpu() # origin_IR_152_model_1 = IR_152_model_1(origin_variable.to(device_0)).cpu() # origin_IR_SE_50 = IR_SE_50(origin_variable.to(device_1)).cpu() # origin_mobileFaceNet = mobileFaceNet(origin_variable.to(device_0)).cpu() origin_Insightface_iresent34 = Insightface_iresnet34( origin_variable.to(device_1)).cpu() origin_Insightface_iresent50 = Insightface_iresnet50( origin_variable.to(device_1)).cpu() origin_Insightface_iresent100 = Insightface_iresnet100( origin_variable.to(device_1)).cpu() origin_arcface = arc_face_ir_se_50(origin_variable.to(device_0)).cpu() # origin_VGG16 = VGG16.forward_GetFeature(origin_variable.to(device_1)).cpu() # origin_average_out = (origin_InceptionResnet_model_1+origin_IR_50_model_1+origin_Insightface_iresent34+\ # origin_Insightface_iresent50+origin_Insightface_iresent100+origin_arcface)/6 # origin_average_out =(origin_InceptionResnet_model_1+\ # origin_InceptionResnet_model_2+ \ # origin_IR_50_model_1+\ # origin_IR_152_model_1+\ # origin_IR_SE_50+\ # origin_mobileFaceNet+\ # origin_Insightface_iresent34+\ # origin_Insightface_iresent50 +\ # origin_Insightface_iresent100 +\ # origin_arcface +\ # origin_VGG16) /11. origin_average_out =(origin_InceptionResnet_model_1+ \ origin_IR_50_model_1 +\ origin_Insightface_iresent34+\ origin_Insightface_iresent50 +\ origin_Insightface_iresent100 +\ origin_arcface ) /6. # target pix mean face # After verification, it found no use. # target_mean_face_InceptionResnet_model_1 = InceptionResnet_model_1(mean_face_1.to(device_0)).cpu() # target_mean_face_IR_50_model_1 = IR_50_model_1(mean_face_1.to(device_0)).cpu() # target_mean_face_Insightface_iresent34 = Insightface_iresnet34(mean_face_1.to(device_1)).cpu() # target_mean_face_Insightface_iresent50 = Insightface_iresnet50(mean_face_1.to(device_1)).cpu() # target_mean_faceInsightface_iresent100 = Insightface_iresnet100(mean_face_1.to(device_1)).cpu() # target_mean_face_arcface = arc_face_ir_se_50(mean_face_1.to(device_0)).cpu() # target_mean_face_average_out = (target_mean_face_InceptionResnet_model_1 + target_mean_face_IR_50_model_1 + target_mean_face_Insightface_iresent34 + target_mean_face_Insightface_iresent50 + target_mean_faceInsightface_iresent100 + target_mean_face_arcface)/ 6 # sum gradient for i in range(steps): print('step: ' + str(i)) # new_variable = input_diversity(in_variable,112,0.5) # 通过随机的size的padding,增加input的多样性 mediate_InceptionResnet_model_1 = InceptionResnet_model_1( in_variable.to(device_0)).cpu() # mediate_InceptionResnet_model_2 = InceptionResnet_model_2(new_variable.to(device_0)).cpu() mediate_IR_50_model_1 = IR_50_model_1( in_variable.to(device_0)).cpu() # mediate_IR_152_model_1 = IR_152_model_1(new_variable.to(device_0)).cpu() # mediate_IR_SE_50 = IR_SE_50(new_variable.to(device_1)).cpu() # mediate_mobileFaceNet = mobileFaceNet(new_variable.to(device_0)).cpu() mediate_Insightface_iresent34 = Insightface_iresnet34( in_variable.to(device_1)).cpu() mediate_Insightface_iresent50 = Insightface_iresnet50( in_variable.to(device_1)).cpu() mediate_Insightface_iresent100 = Insightface_iresnet100( in_variable.to(device_1)).cpu() # mediate_VGG16 = VGG16.forward_GetFeature(new_variable.to(device_1)).cpu() mediate_arcface = arc_face_ir_se_50(in_variable.to(device_0)).cpu() # average_out = (mediate_InceptionResnet_model_1+mediate_InceptionResnet_model_2+mediate_IR_50_model_1+\ # mediate_IR_152_model_1+mediate_IR_SE_50+mediate_mobileFaceNet+mediate_Insightface_iresent34+\ # mediate_Insightface_iresent50+mediate_Insightface_iresent100+mediate_VGG16)/10 # mediate_average_out = (mediate_InceptionResnet_model_1+mediate_IR_50_model_1+mediate_Insightface_iresent34+\ # mediate_Insightface_iresent50+mediate_Insightface_iresent100+mediate_arcface)/6 # mediate_average_out = (mediate_InceptionResnet_model_1+\ # mediate_InceptionResnet_model_2+\ # mediate_IR_50_model_1+\ # mediate_IR_152_model_1+\ # mediate_IR_SE_50+\ # mediate_mobileFaceNet+\ # mediate_Insightface_iresent34+\ # mediate_Insightface_iresent50+\ # mediate_Insightface_iresent100 +\ # mediate_VGG16+\ # mediate_arcface) /11. mediate_average_out = (mediate_InceptionResnet_model_1+ \ mediate_IR_50_model_1 +\ mediate_Insightface_iresent34+\ mediate_Insightface_iresent50+\ mediate_Insightface_iresent100 +\ mediate_arcface) /6. # loss1 = criterion(mediate_InceptionResnet_model_1, origin_InceptionResnet_model_1) + \ # criterion(mediate_InceptionResnet_model_2, origin_InceptionResnet_model_2) + \ # criterion(mediate_IR_50_model_1, origin_IR_50_model_1) + \ # criterion(mediate_IR_152_model_1, origin_IR_152_model_1) + \ # criterion(mediate_IR_SE_50, origin_IR_SE_50) + \ # criterion(mediate_mobileFaceNet, origin_mobileFaceNet)+ \ # criterion(mediate_Insightface_iresent34, origin_Insightface_iresent34)+ \ # criterion(mediate_Insightface_iresent50, origin_Insightface_iresent50) + \ # criterion(mediate_Insightface_iresent100, origin_Insightface_iresent100) + \ # criterion(mediate_VGG16, origin_VGG16) loss1 = criterion(mediate_average_out, origin_average_out) # loss2 = criterion(mediate_InceptionResnet_model_1, mean_face) + \ # criterion(mediate_InceptionResnet_model_2, mean_face) + \ # criterion(mediate_IR_50_model_1, mean_face) + \ # criterion(mediate_IR_152_model_1, mean_face) +\ # criterion(mediate_IR_SE_50, mean_face) + \ # criterion(mediate_mobileFaceNet, mean_face) + \ # criterion(mediate_Insightface_iresent34, mean_face) + \ # criterion(mediate_Insightface_iresent50, mean_face) + \ # criterion(mediate_Insightface_iresent100, mean_face) + \ # criterion(mediate_VGG16, mean_face) # loss2 = criterion(mediate_average_out, target_mean_face_average_out) # loss3 = criterion(mediate_average_out,torch.zeros(512).detach()) loss2 = criterion(mediate_average_out, mean_face) # loss3 = criterion(mediate_InceptionResnet_model_1,average_out)+ \ # criterion(mediate_InceptionResnet_model_2,average_out)+ \ # criterion(mediate_IR_50_model_1,average_out) + \ # criterion(mediate_IR_152_model_1,average_out) + \ # criterion(mediate_mobileFaceNet,average_out) + \ # criterion(mediate_Insightface_iresent34,average_out)+ \ # criterion(mediate_Insightface_iresent50,average_out) + \ # criterion(mediate_Insightface_iresent100,average_out)+ \ # criterion(mediate_VGG16,average_out)+ \ # criterion(mediate_IR_SE_50,average_out) # # loss = alpha * loss1 - beta* loss2 - gamma*loss3 loss = alpha * loss1 - beta * loss2 # print('loss : %f ' % loss,'loss1 : %f ' % loss1,'loss2 : %f ' % loss2,'loss3 : %f ' % loss3) # compute gradients loss.backward(retain_graph=True) g_noise = momentum * g_noise + (in_variable.grad / in_variable.grad.data.norm(1)) g_noise = g_noise / g_noise.data.norm(1) g1 = g_noise g2 = g_noise # if i % 3 == 0 : kernel = gkern(3, 2).astype(np.float32) gaussian_blur1 = GaussianBlur(kernel) gaussian_blur1 g1 = gaussian_blur1(g1) # else: addition = TVLoss() addition g2 = addition(g2) g_noise = 0.25 * g1 + 0.75 * g2 in_variable.data = in_variable.data + ( (eps / 255.) * torch.sign(g_noise) ) # * torch.from_numpy(mat).unsqueeze(0).float() in_variable.data = clip_by_tensor(in_variable.data, in_variable_min.data, in_variable_max.data) in_variable.grad.data.zero_() # unnecessary # del new_variable # g_noise = in_variable.data - origin_variable # g_noise.clamp_(-0.2, 0.2) # in_variable.data = origin_variable + g_noise # deprocess image for i in range(len(in_variable.data.cpu().numpy())): adv = in_variable.data.cpu().numpy()[i] # (3, 112, 112) perturbation = (adv - in_tensor.cpu().numpy()) adv = adv * 128.0 + 127.0 adv = adv.swapaxes(0, 1).swapaxes(1, 2) adv = adv[..., ::-1] adv = np.clip(adv, 0, 255).astype(np.uint8) # sample_dir = './target_mean_face/' # if not os.path.exists(sample_dir): # os.makedirs(sample_dir) advimg = sample_dir + path[i].split('/')[-1].split( '.')[-2] + '.jpg' print(advimg) cv2.imwrite(advimg, adv) print("save path is " + advimg) print('cost time is %.2f s ' % (time.time() - start)) count += 6 progressRate = count / 712.
def main(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # model920 = model_920().to(device) model920_facenet_criterion = nn.MSELoss() # model921 = model_921() # model921_facenet_criterion = nn.MSELoss() InceptionResnet_model_1 = InceptionResnetV1( pretrained='vggface2').eval().to(device) print('load InceptionResnet-vggface2.pt successfully') InceptionResnet_model_2 = InceptionResnetV1( pretrained='casia-webface').eval().to(device) print('load InceptionResnet-casia-webface.pt successfully') IR_50_model_1 = IR_50([112, 112]) IR_50_model_1.load_state_dict( torch.load( '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/backbone_ir50_asia.pth' )) IR_50_model_1.eval().to(device) print('load IR_50 successfully') IR_152_model_1 = IR_152([112, 112]) IR_152_model_1.load_state_dict( torch.load( '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/Backbone_IR_152_Epoch_112_Batch_2547328_Time_2019-07-13-02-59_checkpoint.pth' )) IR_152_model_1.eval().to(device) print('load IR_152 successfully') # IR_152_model_2 = IR_152([112, 112]) # IR_152_model_2.load_state_dict( # torch.load( # '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/Head_ArcFace_Epoch_112_Batch_2547328_Time_2019-07-13-02-59_checkpoint.pth')) # IR_152_model_2.eval().to(device) # print('load IR_152_ArcFace successfully') import insightface # Insightface_iresent100 = insightface.iresnet100(pretrained=True) # Insightface_iresent100.eval().to(device) # print('load Insightface_iresent100 successfully') Insightface_iresnet34 = insightface.iresnet34(pretrained=True) Insightface_iresnet34.eval().to(device) print('load Insightface_iresnet34 successfully') Insightface_iresnet50 = insightface.iresnet50(pretrained=True) Insightface_iresnet50.eval().to(device) print('load Insightface_iresnet50 successfully') Insightface_iresnet100 = insightface.iresnet100(pretrained=True) Insightface_iresnet100.eval().to(device) print('load Insightface_iresnet100 successfully') criterion = nn.MSELoss() # cpu # collect all images to attack paths = [] picpath = '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/images' for root, dirs, files in os.walk(picpath): for f in files: paths.append(os.path.join(root, f)) random.shuffle(paths) # paras eps = 1 steps = 75 output_path = './output_img' momentum = 1.0 for path in tqdm(paths): start = time.time() print('processing ' + path + ' ===============>') image = Image.open(path) # define paras # in_tensor is origin tensor of image # in_variable changes with gradient in_tensor = img2tensor(np.array(image)) # print(in_tensor.shape) in_variable = in_tensor.detach().to(device) in_tensor = in_tensor.squeeze().to(device) adv = None # in_tensor= img2tensor_224(image) # # print(in_tensor.shape) # in_variable = in_tensor.to(device) # in_tensor = in_tensor.squeeze().to(device) # adv = None # # # origin feature # origin_model920 = model920(in_variable).to(device) # origin_model921 = model921(in_variable) origin_InceptionResnet_model_1 = InceptionResnet_model_1(in_variable) origin_InceptionResnet_model_2 = InceptionResnet_model_2(in_variable) origin_IR_50_model_1 = IR_50_model_1(in_variable) origin_IR_152_model_1 = IR_152_model_1(in_variable) # origin_IR_152_model_2 = IR_152_model_2(in_variable) origin_Insightface_iresent34 = Insightface_iresnet34(in_variable) origin_Insightface_iresent50 = Insightface_iresnet50(in_variable) origin_Insightface_iresent100 = Insightface_iresnet100(in_variable) # 1. untarget attack -> random noise # 2. target attack -> x = alpha * target + (1 - alpha) * x perturbation = torch.Tensor(3, 112, 112).uniform_(-0.1, 0.1).to(device) in_variable = in_variable + perturbation in_variable.data.clamp_(-1.0, 1.0) in_variable.requires_grad = True g_noise = 0.0 # sum gradient for i in range(steps): # print('step: ' + str(i)) # in_variable = in_variable.to(device) # out_model920 = model920(in_variable) # out_model921 = model921(in_variable) out_InceptionResnet_model_1 = InceptionResnet_model_1(in_variable) out_InceptionResnet_model_2 = InceptionResnet_model_2(in_variable) out_IR_50_model_1 = IR_50_model_1(in_variable) out_IR_152_model_1 = IR_152_model_1(in_variable) # out_IR_152_model_2 = IR_152_model_2(in_variable) out_Insightface_iresent34 = Insightface_iresnet34(in_variable) out_Insightface_iresent50 = Insightface_iresnet50(in_variable) out_Insightface_iresent100 = Insightface_iresnet100(in_variable) loss = criterion(origin_InceptionResnet_model_1, out_InceptionResnet_model_1) + \ criterion(origin_InceptionResnet_model_2, out_InceptionResnet_model_2) + \ criterion(origin_IR_50_model_1, out_IR_50_model_1) + \ criterion(origin_IR_152_model_1, out_IR_152_model_1) + \ criterion(origin_Insightface_iresent34, out_Insightface_iresent34) + \ criterion(origin_Insightface_iresent50, out_Insightface_iresent50) +\ criterion(origin_Insightface_iresent100, out_Insightface_iresent100) # print('loss : %f' % loss) # compute gradients loss.backward(retain_graph=True) g_noise = momentum * g_noise + (in_variable.grad / in_variable.grad.data.norm(1)) g_noise = g_noise / g_noise.data.norm(1) if i % 2 == 0: kernel = gkern(3, 2).astype(np.float32) gaussian_blur1 = GaussianBlur(kernel).to(device) g_noise = gaussian_blur1(g_noise) g_noise = torch.clamp(g_noise, -0.1, 0.1) else: addition = TVLoss() g_noise = addition(g_noise) in_variable.data = in_variable.data + ( (eps / 255.) * torch.sign(g_noise) ) # * torch.from_numpy(mat).unsqueeze(0).float() in_variable.grad.data.zero_() # unnecessary # deprocess image adv = in_variable.data.cpu().numpy()[0] # (3, 112, 112) perturbation = (adv - in_tensor.cpu().numpy()) adv = adv * 128.0 + 127.0 adv = adv.swapaxes(0, 1).swapaxes(1, 2) adv = adv[..., ::-1] adv = np.clip(adv, 0, 255).astype(np.uint8) sample_dir = '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/main_3_output-8-28/' if not os.path.exists(sample_dir): os.makedirs(sample_dir) advimg = sample_dir + path.split('/')[-1].split('.')[-2] + '.jpg' cv2.imwrite(advimg, adv) print("save path is " + advimg) print('cost time is %.2f s ' % (time.time() - start))
def main(): sample_dir = './target_mean_face_1/' if not os.path.exists(sample_dir): os.makedirs(sample_dir) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') InceptionResnet_model_1 = InceptionResnetV1( pretrained='vggface2').eval().to(device) print('load InceptionResnet-vggface2.pt successfully') InceptionResnet_model_2 = InceptionResnetV1( pretrained='casia-webface').eval().to(device) print('load InceptionResnet-casia-webface.pt successfully') IR_50_model_1 = IR_50([112, 112]) IR_50_model_1.load_state_dict( torch.load( '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/backbone_ir50_asia.pth' )) IR_50_model_1.eval().to(device) print('load IR_50 successfully') IR_152_model_1 = IR_152([112, 112]) IR_152_model_1.load_state_dict( torch.load( '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/Backbone_IR_152_Epoch_112_Batch_2547328_Time_2019-07-13-02-59_checkpoint.pth' )) IR_152_model_1.eval().to(device) print('load IR_152 successfully') IR_SE_50 = Backbone(50, mode='ir_se').eval().to(device) print('load IR_SE_50 successfully') mobileFaceNet = MobileFaceNet(512).eval().to(device) print('load mobileFaceNet successfully') # IR_152_model_2 = IR_152([112, 112]) # IR_152_model_2.load_state_dict( # torch.load( # '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/Head_ArcFace_Epoch_112_Batch_2547328_Time_2019-07-13-02-59_checkpoint.pth')) # IR_152_model_2.eval().to(device) # print('load IR_152_ArcFace successfully') import insightface Insightface_iresnet34 = insightface.iresnet34(pretrained=True) Insightface_iresnet34.eval().to(device) print('load Insightface_iresnet34 successfully') Insightface_iresnet50 = insightface.iresnet50(pretrained=True) Insightface_iresnet50.eval().to(device) print('load Insightface_iresnet50 successfully') Insightface_iresnet100 = insightface.iresnet100(pretrained=True) Insightface_iresnet100.eval().to(device) print('load Insightface_iresnet100 successfully') ###########################vgg16 from Face_recognition.vgg16.vgg16 import CenterLossModel, loadCheckpoint vgg16_checkpoint = loadCheckpoint( '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/vgg16/model' ) VGG16 = CenterLossModel(embedding_size=512, num_classes=712, checkpoint=vgg16_checkpoint).eval().to(device) print('load VGG16 successfully') # ################on swj's server # InceptionResnet_model_1 = InceptionResnetV1(pretrained='vggface2').eval() # print('load InceptionResnet-vggface2.pt successfully') # # InceptionResnet_model_2 = InceptionResnetV1(pretrained='casia-webface').eval() # print('load InceptionResnet-casia-webface.pt successfully') # # IR_50_model_1 = IR_50([112, 112]) # IR_50_model_1.load_state_dict(torch.load('./face_recognition/irse/model/backbone_ir50_asia.pth')) # IR_50_model_1.eval() # print('load IR_50 successfully') # # IR_152_model_1 = IR_152([112, 112]) # IR_152_model_1.load_state_dict(torch.load( # './face_recognition/irse/model/Backbone_IR_152_Epoch_112_Batch_2547328_Time_2019-07-13-02-59_checkpoint.pth')) # IR_152_model_1.eval() # print('load IR_152 successfully') # # IR_SE_50 = Backbone(50, mode='ir_se').eval() # print('load IR_SE_50 successfully') # # mobileFaceNet = MobileFaceNet(512).eval() # print('load mobileFaceNet successfully') # # Insightface_iresnet34 = insightface.iresnet34(pretrained=True) # Insightface_iresnet34.eval() # print('load Insightface_iresnet34 successfully') # # Insightface_iresnet50 = insightface.iresnet50(pretrained=True) # Insightface_iresnet50.eval() # print('load Insightface_iresnet50 successfully') # # Insightface_iresnet100 = insightface.iresnet100(pretrained=True) # Insightface_iresnet100.eval() # print('load Insightface_iresnet100 successfully') # # vgg16_checkpoint = loadCheckpoint('./face_recognition/vgg16/model') # VGG16 = CenterLossModel(embedding_size=512, num_classes=712, checkpoint=vgg16_checkpoint).eval() # print('load vgg16 successfully') ####load model to cuda InceptionResnet_model_1.to(device) InceptionResnet_model_2.to(device) IR_50_model_1.to(device) IR_152_model_1.to(device) IR_SE_50.to(device) mobileFaceNet.to(device) Insightface_iresnet34.to(device) Insightface_iresnet50.to(device) Insightface_iresnet100.to(device) VGG16.to(device) criterion = nn.MSELoss() # cpu # collect all images to attack paths = [] picpath = '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/images' for root, dirs, files in os.walk(picpath): for f in files: paths.append(os.path.join(root, f)) random.shuffle(paths) # paras eps = 1 steps = 50 output_path = './output_img' momentum = 1.0 alpha = 0.35 beta = 0.9 gamma = 0.1 #######cal mean feature face print('cal mean feature face #########################') mean_face = torch.zeros(512).detach().to(device) for path in tqdm(paths): start = time.time() print('cal mean face ' + path + ' ===============>') image = Image.open(path) # define paras # in_tensor is origin tensor of image # in_variable changes with gradient in_tensor_1 = img2tensor(np.array(image)) # print(in_tensor.shape) in_variable_1 = in_tensor_1.detach().to(device) in_tensor_1 = in_tensor_1.squeeze().to(device) this_feature_face = None # # origin feature _origin_InceptionResnet_model_1 = InceptionResnet_model_1( in_variable_1).volatile = True _origin_InceptionResnet_model_2 = InceptionResnet_model_2( in_variable_1).volatile = True _origin_IR_50_model_1 = IR_50_model_1(in_variable_1).volatile = True _origin_IR_152_model_1 = IR_152_model_1(in_variable_1).volatile = True _origin_IR_SE_50 = IR_SE_50(in_variable_1).volatile = True _origin_mobileFaceNet = mobileFaceNet(in_variable_1).volatile = True _origin_Insightface_iresent34 = Insightface_iresnet34( in_variable_1).volatile = True _origin_Insightface_iresent50 = Insightface_iresnet50( in_variable_1).volatile = True _origin_Insightface_iresent100 = Insightface_iresnet100( in_variable_1).volatile = True _origin_VGG16 = VGG16.forward_GetFeature(in_variable_1).volatile = True this_feature_face = _origin_InceptionResnet_model_1*0.7 + \ _origin_InceptionResnet_model_2*0.7 + \ _origin_IR_50_model_1 *0.8+ \ _origin_IR_152_model_1 *0.8 + \ _origin_IR_SE_50 *0.7+ \ _origin_mobileFaceNet *0.7+ \ _origin_Insightface_iresent34 *0.8 + \ _origin_Insightface_iresent50 *0.9 + \ _origin_Insightface_iresent100 *0.9 + \ _origin_VGG16 *0.7 this_feature_face = this_feature_face / 10. mean_face = mean_face + this_feature_face del _origin_InceptionResnet_model_1 del _origin_InceptionResnet_model_2 del _origin_IR_50_model_1 del _origin_IR_152_model_1 del _origin_IR_SE_50 del _origin_mobileFaceNet del _origin_Insightface_iresent34 del _origin_Insightface_iresent50 del _origin_Insightface_iresent100 del _origin_VGG16 del this_feature_face del in_tensor_1 del in_variable_1 mean_face = mean_face / 712. print('finish cal mean face...') ############################# ##### print('######attack...##################') for path in tqdm(paths): start = time.time() print('processing ' + path + ' ===============>') image = Image.open(path) # define paras # in_tensor is origin tensor of image # in_variable changes with gradient in_tensor = img2tensor(np.array(image)) origin_variable = in_tensor.detach() origin_variable = origin_variable.to(device) tar_tensor = mean_face.to(device) in_variable = in_tensor.detach() in_variable = in_variable.to(device) tar_variable = tar_tensor.detach() tar_variable = tar_variable.to(device) in_tensor = in_tensor.squeeze() in_tensor = in_tensor.to(device) adv = None perturbation = torch.Tensor(3, 112, 112).uniform_(-0.05, 0.05) perturbation = perturbation.to(device) in_variable += perturbation in_variable.data.clamp_(-1.0, 1.0) in_variable.requires_grad = True g_noise = torch.zeros_like(in_variable) g_noise = g_noise.to(device) origin_InceptionResnet_model_1 = InceptionResnet_model_1( origin_variable) origin_InceptionResnet_model_2 = InceptionResnet_model_2( origin_variable) origin_IR_50_model_1 = IR_50_model_1(origin_variable) origin_IR_152_model_1 = IR_152_model_1(origin_variable) origin_IR_SE_50 = IR_SE_50(origin_variable) origin_mobileFaceNet = mobileFaceNet(origin_variable) # # origin_IR_152_model_2 = IR_152_model_2(in_variable) origin_Insightface_iresent34 = Insightface_iresnet34(origin_variable) origin_Insightface_iresent50 = Insightface_iresnet50(origin_variable) origin_Insightface_iresent100 = Insightface_iresnet100(origin_variable) origin_VGG16 = VGG16.forward_GetFeature(origin_variable) # sum gradient for i in range(steps): print('step: ' + str(i)) mediate_InceptionResnet_model_1 = InceptionResnet_model_1( in_variable) mediate_InceptionResnet_model_2 = InceptionResnet_model_2( in_variable) mediate_IR_50_model_1 = IR_50_model_1(in_variable) mediate_IR_152_model_1 = IR_152_model_1(in_variable) mediate_IR_SE_50 = IR_SE_50(in_variable) mediate_mobileFaceNet = mobileFaceNet(in_variable) # # origin_IR_152_model_2 = IR_152_model_2(in_variable) mediate_Insightface_iresent34 = Insightface_iresnet34(in_variable) mediate_Insightface_iresent50 = Insightface_iresnet50(in_variable) mediate_Insightface_iresent100 = Insightface_iresnet100( in_variable) mediate_VGG16 = VGG16.forward_GetFeature(in_variable) average_out = (mediate_InceptionResnet_model_1+mediate_InceptionResnet_model_2+mediate_IR_50_model_1+\ mediate_IR_152_model_1+mediate_IR_SE_50+mediate_mobileFaceNet+mediate_Insightface_iresent34+\ mediate_Insightface_iresent50+mediate_Insightface_iresent100+mediate_VGG16)/10 # loss1 far away from orgin image, loss2 approach target image # loss1 = criterion(origin_InceptionResnet_model_1, mediate_InceptionResnet_model_1) + \ # criterion(origin_InceptionResnet_model_2, mediate_InceptionResnet_model_2) + \ # criterion(origin_IR_50_model_1, mediate_IR_50_model_1) + \ # criterion(origin_IR_SE_50, mediate_IR_SE_50) + \ # criterion(origin_mobileFaceNet, mediate_mobileFaceNet) + \ # criterion(origin_Insightface_iresent34, mediate_Insightface_iresent34) + \ # criterion(origin_Insightface_iresent50, mediate_Insightface_iresent50) + \ # criterion(origin_Insightface_iresent100, mediate_Insightface_iresent100) + \ # criterion(origin_VGG16, mediate_VGG16) loss1 = criterion(origin_InceptionResnet_model_1, mediate_InceptionResnet_model_1) * 0.7 + \ criterion(origin_InceptionResnet_model_2, mediate_InceptionResnet_model_2) * 0.7 + \ criterion(origin_IR_50_model_1, mediate_IR_50_model_1) * 0.8 + \ criterion(origin_IR_152_model_1, mediate_IR_152_model_1) * 0.8 + \ criterion(origin_IR_SE_50, mediate_IR_SE_50) * 0.7 + \ criterion(origin_mobileFaceNet, mediate_mobileFaceNet) * 0.7 + \ criterion(origin_Insightface_iresent34, mediate_Insightface_iresent34) * 0.8 + \ criterion(origin_Insightface_iresent50, mediate_Insightface_iresent50) * 0.9 + \ criterion(origin_Insightface_iresent100, mediate_Insightface_iresent100) * 0.9 + \ criterion(origin_VGG16, mediate_VGG16) * 0.7 loss2 = criterion(mediate_InceptionResnet_model_1, mean_face) *0.7+ \ criterion(mediate_InceptionResnet_model_2, mean_face) *0.7+ \ criterion(mediate_IR_50_model_1, mean_face) *0.8+ \ criterion(mediate_IR_152_model_1, mean_face) *0.8+\ criterion(mediate_IR_SE_50, mean_face) *0.7+ \ criterion(mediate_mobileFaceNet, mean_face) *0.7+ \ criterion(mediate_Insightface_iresent34, mean_face) *0.8+ \ criterion(mediate_Insightface_iresent50, mean_face) *0.9+ \ criterion(mediate_Insightface_iresent100, mean_face)*0.9 + \ criterion(mediate_VGG16, mean_face)*0.7 loss3 = criterion(average_out,mediate_InceptionResnet_model_1)+ \ criterion(average_out,mediate_InceptionResnet_model_2)+ \ criterion(average_out,mediate_IR_50_model_1) + \ criterion(average_out,mediate_IR_152_model_1) + \ criterion(average_out,mediate_mobileFaceNet) + \ criterion(average_out,mediate_Insightface_iresent34)+ \ criterion(average_out,mediate_Insightface_iresent50) + \ criterion(average_out,mediate_Insightface_iresent100)+ \ criterion(average_out,mediate_VGG16)+ \ criterion(average_out,mediate_IR_SE_50) loss = alpha * loss1 - beta * loss2 - gamma * loss3 print('loss : %f' % loss) print('loss1 : %f' % loss1) print('loss2 : %f' % loss2) # compute gradients loss.backward(retain_graph=True) g_noise = momentum * g_noise + ( in_variable.grad / in_variable.grad.data.norm(1)) * 0.9 g_noise = g_noise / g_noise.data.norm(1) g1 = g_noise g2 = g_noise # if i % 3 == 0 : kernel = gkern(3, 2).astype(np.float32) gaussian_blur1 = GaussianBlur(kernel) gaussian_blur1.to(device) g1 = gaussian_blur1(g1) g1 = torch.clamp(g1, -0.2, 0.2) # else: addition = TVLoss() addition.to(device) g2 = addition(g2) g_noise = 0.25 * g1 + 0.75 * g2 g_noise.clamp_(-0.05, 0.05) in_variable.data = in_variable.data + ( (eps / 255.) * torch.sign(g_noise) ) # * torch.from_numpy(mat).unsqueeze(0).float() in_variable.grad.data.zero_() # unnecessary # g_noise = in_variable.data - origin_variable # g_noise.clamp_(-0.2, 0.2) # in_variable.data = origin_variable + g_noise # deprocess image adv = in_variable.data.cpu().numpy()[0] # (3, 112, 112) perturbation = (adv - in_tensor.cpu().numpy()) adv = adv * 128.0 + 127.0 adv = adv.swapaxes(0, 1).swapaxes(1, 2) adv = adv[..., ::-1] adv = np.clip(adv, 0, 255).astype(np.uint8) # sample_dir = './target_mean_face/' # if not os.path.exists(sample_dir): # os.makedirs(sample_dir) advimg = sample_dir + path.split('/')[-1].split('.')[-2] + '.jpg' cv2.imwrite(advimg, adv) print("save path is " + advimg) print('cost time is %.2f s ' % (time.time() - start))