def __init__(self, device, backbone_name = cfg['BACKBONE_NAME'], INPUT_SIZE = cfg['INPUT_SIZE'], BACKBONE_RESUME_ROOT = cfg['BACKBONE_RESUME_ROOT']): super().__init__() BACKBONE_DICT = {'ResNet_50': ResNet_50(INPUT_SIZE), 'ResNet_101': ResNet_101(INPUT_SIZE), 'ResNet_152': ResNet_152(INPUT_SIZE), 'IR_50': IR_50(INPUT_SIZE), 'IR_101': IR_101(INPUT_SIZE), 'IR_152': IR_152(INPUT_SIZE), 'IR_SE_50': IR_SE_50(INPUT_SIZE), 'IR_SE_101': IR_SE_101(INPUT_SIZE), 'IR_SE_152': IR_SE_152(INPUT_SIZE)} self.device = device self.embedding = BACKBONE_DICT[backbone_name] self.embedding.load_state_dict(torch.load(BACKBONE_RESUME_ROOT)) self.embedding = self.embedding.to(device)
if BACKBONE_NAME == 'ResNet_50': BACKBONE = ResNet_50(INPUT_SIZE) # 'ResNet_101': resnet101(INPUT_SIZE), # 'ResNet_152': resnet152(INPUT_SIZE), elif BACKBONE_NAME == 'IR_50': BACKBONE = IR_50(INPUT_SIZE) elif BACKBONE_NAME == 'IR_101': BACKBONE = IR_101(INPUT_SIZE) elif BACKBONE_NAME == 'IR_152': BACKBONE = IR_152(INPUT_SIZE) elif BACKBONE_NAME == 'IR_SE_50': BACKBONE = IR_SE_50(INPUT_SIZE) elif BACKBONE_NAME == 'IR_SE_101': BACKBONE = IR_SE_101(INPUT_SIZE) elif BACKBONE_NAME == 'IR_SE_152': BACKBONE = IR_SE_152(INPUT_SIZE) elif BACKBONE_NAME == 'ShuffleNet': BACKBONE = shufflenet(cfg=cfg) elif BACKBONE_NAME == 'ShuffleNetV2': BACKBONE = shufflenetv2(cfg=cfg) elif BACKBONE_NAME == 'Mobilenet': BACKBONE = mobilenet() elif BACKBONE_NAME == 'Mobilenetv2': BACKBONE = mobilenetv2() elif BACKBONE_NAME == 'mobileface': BACKBONE = mobileface() else: raise NotImplementedError print("=" * 60) print(BACKBONE) print("{} Backbone Generated".format(BACKBONE_NAME))
print("Number of Training Classes: {}".format(NUM_CLASS)) lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data( DATA_ROOT) # ======= model & loss & optimizer =======# BACKBONE_DICT = { 'ResNet_50': ResNet_50(INPUT_SIZE), 'ResNet_101': ResNet_101(INPUT_SIZE), 'ResNet_152': ResNet_152(INPUT_SIZE), 'IR_50': IR_50(INPUT_SIZE), 'IR_101': IR_101(INPUT_SIZE), 'IR_152': IR_152(INPUT_SIZE), 'IR_SE_50': IR_SE_50(INPUT_SIZE), 'IR_SE_101': IR_SE_101(INPUT_SIZE), 'IR_SE_152': IR_SE_152(INPUT_SIZE) } BACKBONE = BACKBONE_DICT[BACKBONE_NAME] print("=" * 60) print(BACKBONE) print("{} Backbone Generated".format(BACKBONE_NAME)) print("=" * 60) HEAD_DICT = { 'ArcFace': ArcFace(in_features=EMBEDDING_SIZE, out_features=NUM_CLASS), 'CosFace': CosFace(in_features=EMBEDDING_SIZE, out_features=NUM_CLASS), 'SphereFace': SphereFace(in_features=EMBEDDING_SIZE, out_features=NUM_CLASS), 'Am_softmax':
model_IR_152_Epoch_112.load_state_dict( torch.load( './Defense_Model/Backbone_IR_152_Epoch_112_Batch_2547328_Time_2019-07-13-02-59_checkpoint.pth', map_location='cuda')) model_IR_152_Epoch_112.eval() criterion_IR_152_Epoch_112 = nn.MSELoss() # 3 model_IR_SE_50_Epoch_2 = IR_SE_50([112, 112]) model_IR_SE_50_Epoch_2.load_state_dict( torch.load( './Defense_Model/Backbone_IR_SE_50_Epoch_2_Batch_45488_Time_2019-08-03-19-39_checkpoint.pth', map_location='cuda')) model_IR_SE_50_Epoch_2.eval() criterion_IR_SE_50_Epoch_2 = nn.MSELoss() # 4 model_IR_SE_152_Epoch_4 = IR_SE_152([112, 112]) model_IR_SE_152_Epoch_4.load_state_dict( torch.load( './Defense_Model/Backbone_IR_SE_152_Epoch_4_Batch_181956_Time_2019-08-06-07-29_checkpoint.pth', map_location='cuda')) model_IR_SE_152_Epoch_4.eval() criterion_IR_SE_152_Epoch_4 = nn.MSELoss() # 5 model_ResNet_101_Epoch_4 = ResNet_101([112, 112]) model_ResNet_101_Epoch_4.load_state_dict( torch.load( './Defense_Model/Backbone_ResNet_101_Epoch_4_Batch_90976_Time_2019-08-04-11-34_checkpoint.pth', map_location='cuda')) model_ResNet_101_Epoch_4.eval() criterion_ResNet_101_Epoch_4 = nn.MSELoss() # 6
def main(ARGS): if ARGS.model_path == None: raise AssertionError("Path should not be None") use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ####### Model setup print('Model type: %s' % ARGS.model_type) if ARGS.model_type == 'ResNet_50': model = ResNet_50(ARGS.input_size) elif ARGS.model_type == 'ResNet_101': model = ResNet_101(ARGS.input_size) elif ARGS.model_type == 'ResNet_152': model = ResNet_152(ARGS.input_size) elif ARGS.model_type == 'IR_50': model = IR_50(ARGS.input_size) elif ARGS.model_type == 'IR_101': model = IR_101(ARGS.input_size) elif ARGS.model_type == 'IR_152': model = IR_152(ARGS.input_size) elif ARGS.model_type == 'IR_SE_50': model = IR_SE_50(ARGS.input_size) elif ARGS.model_type == 'IR_SE_101': model = IR_SE_101(ARGS.input_size) elif ARGS.model_type == 'IR_SE_152': model = IR_SE_152(ARGS.input_size) else: raise AssertionError( 'Unsuported model_type {}. We only support: [\'ResNet_50\', \'ResNet_101\', \'ResNet_152\', \'IR_50\', \'IR_101\', \'IR_152\', \'IR_SE_50\', \'IR_SE_101\', \'IR_SE_152\']' .format(ARGS.model_type)) if use_cuda: model.load_state_dict(torch.load(ARGS.model_path)) else: model.load_state_dict(torch.load(ARGS.model_path, map_location='cpu')) model.to(device) # embedding_size = 512 model.eval() # DATA_ROOT = './../evoLVe_data/data' # the parent root where your train/val/test data are stored # INPUT_SIZE = [112, 112] # support: [112, 112] and [224, 224] # BACKBONE_RESUME_ROOT = './../evoLVe_data/pth/backbone_ir50_ms1m_epoch120.pth' # the root to resume training from a saved checkpoint # BACKBONE_RESUME_ROOT = './../pytorch-face/pth/IR_50_MODEL_arcface_casia_epoch56_lfw9925.pth' MULTI_GPU = False # flag to use multiple GPUs; if you choose to train with single GPU, you should first run "export CUDA_VISILE_DEVICES=device_id" to specify the GPU card you want to use # DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # EMBEDDING_SIZE = 512 # feature dimension # BATCH_SIZE = 512 # BACKBONE = IR_50(INPUT_SIZE) # if os.path.isfile(BACKBONE_RESUME_ROOT): # print("Loading Backbone Checkpoint '{}'".format(BACKBONE_RESUME_ROOT)) # BACKBONE.load_state_dict(torch.load(BACKBONE_RESUME_ROOT, map_location='cpu')) # else: # print("No Checkpoint Found at '{}'.".format(BACKBONE_RESUME_ROOT)) # sys.exit() print("=" * 60) print( "Performing Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints..." ) #### LFW print("Performing Evaluation on LFW...") lfw, lfw_issame = get_val_pair(ARGS.data_root, 'lfw') accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val( MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, lfw, lfw_issame) print("Evaluation: LFW Acc: {}".format(accuracy_lfw)) #### CALFW WORKS print("Performing Evaluation on CALFW...") calfw, calfw_issame = get_val_pair(ARGS.data_root, 'calfw') accuracy_calfw, best_threshold_calfw, roc_curve_calfw = perform_val( MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, calfw, calfw_issame) print("Evaluation: CALFW Acc: {}".format(accuracy_calfw)) #### CPLFW print("Performing Evaluation on CPLFW...") cplfw, cplfw_issame = get_val_pair(ARGS.data_root, 'cplfw') accuracy_cplfw, best_threshold_calfw, roc_curve_calfw = perform_val( MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cplfw, cplfw_issame) print("Evaluation: CPLFW Acc: {}".format(accuracy_cplfw)) #### CFP-FF print("Performing Evaluation on CFP-FF...") cfp_ff, cfp_ff_issame = get_val_pair(ARGS.data_root, 'cfp_ff') accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val( MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cfp_ff, cfp_ff_issame) print("Evaluation: CFP-FF Acc: {}".format(accuracy_cfp_ff)) #### CFP-FP print("Performing Evaluation on CFP-FP...") cfp_fp, cfp_fp_issame = get_val_pair(ARGS.data_root, 'cfp_fp') accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val( MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cfp_fp, cfp_fp_issame) print("Evaluation: CFP-FP Acc: {}".format(accuracy_cfp_fp)) #### AgeDB_30 print("Performing Evaluation on AgeDB_30...") agedb_30, agedb_30_issame = get_val_pair(ARGS.data_root, 'agedb_30') accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val( MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, agedb_30, agedb_30_issame) print("Evaluation: AgeDB_30 Acc: {}".format(accuracy_agedb)) #### VggFace2_FP print("Performing Evaluation on VggFace2_FP...") vgg2_fp, vgg2_fp_issame = get_val_pair(ARGS.data_root, 'vgg2_fp') accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp = perform_val( MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, vgg2_fp, vgg2_fp_issame) print("Evaluation: VggFace2_FP Acc: {}".format(accuracy_vgg2_fp)) print("=" * 60) print("FINAL RESULTS:") print( "Evaluation: LFW Acc: {}, CFP_FF Acc: {}, CFP_FP Acc: {}, AgeDB Acc: {}, CALFW Acc: {}, CPLFW Acc: {}, VGG2_FP Acc: {}" .format(accuracy_lfw, accuracy_cfp_ff, accuracy_cfp_fp, accuracy_agedb, accuracy_calfw, accuracy_cplfw, accuracy_vgg2_fp)) print("=" * 60)
'ResNet_101': ResNet_101(INPUT_SIZE), 'ResNet_152': ResNet_152(INPUT_SIZE), 'IR_50': IR_50(INPUT_SIZE), 'IR_101': IR_101(INPUT_SIZE), 'IR_152': IR_152(INPUT_SIZE), 'IR_SE_50': IR_SE_50(INPUT_SIZE), 'IR_SE_101': IR_SE_101(INPUT_SIZE), 'IR_SE_152': IR_SE_152(INPUT_SIZE), 'ShuffleNetV2_0.5': shufflenet_v2_x0_5(pretrained=True, only_features=True), 'ShuffleNetV2_1.0': shufflenet_v2_x1_0(pretrained=False, only_features=True), 'ShuffleNetV2_1.5': shufflenet_v2_x1_5(pretrained=False, only_features=True), 'ShuffleNetV2_2.0': shufflenet_v2_x2_0(pretrained=False, only_features=True), 'GhostNet': ghost_net(), 'MobileFaceNet': MobileFaceNet(512) } BACKBONE = BACKBONE_DICT[BACKBONE_NAME] print("=" * 60)
train_loader = torch.utils.data.DataLoader( dataset_train, batch_size = BATCH_SIZE, sampler = sampler, pin_memory = PIN_MEMORY, num_workers = NUM_WORKERS, drop_last = DROP_LAST ) NUM_CLASS = len(train_loader.dataset.classes) print("Number of Training Classes: {}".format(NUM_CLASS)) # validate on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGGFace2_FP lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT) #======= model & loss & optimizer =======# BACKBONE_DICT = {'ResNet_50': ResNet_50(INPUT_SIZE), 'ResNet_101': ResNet_101(INPUT_SIZE), 'ResNet_152': ResNet_152(INPUT_SIZE), 'IR_50': IR_50(INPUT_SIZE), 'IR_101': IR_101(INPUT_SIZE), 'IR_152': IR_152(INPUT_SIZE), 'IR_SE_50': IR_SE_50(INPUT_SIZE), 'IR_SE_101': IR_SE_101(INPUT_SIZE), 'IR_SE_152': IR_SE_152(INPUT_SIZE)} BACKBONE = BACKBONE_DICT[BACKBONE_NAME] print("=" * 60) print(BACKBONE) print("{} Backbone Generated".format(BACKBONE_NAME)) print("=" * 60) HEAD_DICT = {'ArcFace': ArcFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS), 'CosFace': CosFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS), 'SphereFace': SphereFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS), 'Am_softmax': Am_softmax(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS)} HEAD = HEAD_DICT[HEAD_NAME] print("=" * 60) print(HEAD) print("{} Head Generated".format(HEAD_NAME)) print("=" * 60)