def feature_extract(data_list, extract_folder):
    f = open(data_list, 'r')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    INPUT_SIZE = [112, 112]

    backbone_path = './backbone/CurricularFace_Backbone.pth'
    backbone_model = IR_101(INPUT_SIZE).to(device)

    checkpoint = torch.load(backbone_path,
                            map_location=lambda storage, loc: storage)
    if 'state_dict' not in checkpoint:
        backbone_model.load_state_dict(checkpoint)

    backbone_model.eval()

    data_transform = transforms.Compose([
        transforms.Resize((INPUT_SIZE[0], INPUT_SIZE[1])),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    for file_path in tqdm.tqdm(f):
        file_path = file_path.replace('\n', '')
        img_tensor = read_img(file_path, data_transform)
        img_tensor = img_tensor.unsqueeze(0).to(device)

        with torch.no_grad():
            mu, conv_final = backbone_model(img_tensor)
            mu = l2_norm(mu)

        mu = mu.cpu().data.numpy()
        conv_final = conv_final.cpu().data.numpy()

        sub_dir = file_path.split('/')[-2]
        org_folder = file_path.split('/')[2]
        extract_dir = extract_folder.split('/')[2]
        # print('org_folder:', org_folder)
        # print('extract_dir:', extract_dir)

        new_dir = extract_folder + sub_dir
        if not os.path.isdir(new_dir):
            os.mkdir(new_dir)
        new_file_path = file_path.replace(org_folder, extract_dir)
        mu_file_path = new_file_path.replace('.jpg', '_mu.npy')
        conv_final_file_path = new_file_path.replace('.jpg', '_conv_final.npy')
        # print(new_dir)
        # print(mu_file_path)
        # print(conv_final_file_path)
        np.save(mu_file_path, mu)
        np.save(conv_final_file_path, conv_final)

    f.close()
Exemplo n.º 2
0
    def __init__(self, device, backbone_name = cfg['BACKBONE_NAME'], INPUT_SIZE = cfg['INPUT_SIZE'], BACKBONE_RESUME_ROOT = cfg['BACKBONE_RESUME_ROOT']):
        super().__init__()
        BACKBONE_DICT = {'ResNet_50': ResNet_50(INPUT_SIZE), 
                    'ResNet_101': ResNet_101(INPUT_SIZE), 
                    'ResNet_152': ResNet_152(INPUT_SIZE),
                    'IR_50': IR_50(INPUT_SIZE), 
                    'IR_101': IR_101(INPUT_SIZE), 
                    'IR_152': IR_152(INPUT_SIZE),
                    'IR_SE_50': IR_SE_50(INPUT_SIZE), 
                    'IR_SE_101': IR_SE_101(INPUT_SIZE), 
                    'IR_SE_152': IR_SE_152(INPUT_SIZE)}

        self.device = device
        self.embedding = BACKBONE_DICT[backbone_name]
        self.embedding.load_state_dict(torch.load(BACKBONE_RESUME_ROOT))
        self.embedding = self.embedding.to(device)
Exemplo n.º 3
0
def eval(data_path, file_name):
    pairs, dirFiles = generate_pair(data_path, file_name)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    B = 1
    INPUT_SIZE = [112, 112]
    backbone_path = './backbone/CurricularFace_Backbone.pth'
    backbone_model = IR_101(INPUT_SIZE).to(device)

    checkpoint = torch.load(backbone_path, map_location=lambda storage, loc: storage)
    if 'state_dict' not in checkpoint:
        backbone_model.load_state_dict(checkpoint)
    else:
        print('use dict')
        pretrained_weights = checkpoint['state_dict']
        model_weights = backbone_model.state_dict()
        pretrained_weights = {k: v for k, v in pretrained_weights.items() \
                              if k in model_weights}
        model_weights.update(pretrained_weights)
        backbone_model.load_state_dict(model_weights)

    backbone_model.eval()

    # Load model files and config file
    config_file = 'config/sphere64_msarcface.py'
    config = imp.load_source('config', config_file)
    config.batch_format['size'] = 1
    network = Network()
    network.initialize(config)
    network.load_model('log/sphere64_casia_am_PFE/20201028-015531')

    data_transform = transforms.Compose([
        transforms.Resize((INPUT_SIZE[0], INPUT_SIZE[1])),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    embeddings_org_list = []
    embeddings_mu_list = []
    embeddings_sigma_list = []

    for i, (filename) in tqdm.tqdm(enumerate((dirFiles))):
        start_time = time.time()
        img_tensor = read_img(filename, data_transform)
        img_tensor = img_tensor.unsqueeze(0).to(device)

        with torch.no_grad():
            mu, conv_final = backbone_model(img_tensor)
            mu = l2_norm(mu)
            mu = mu.cpu().data.numpy()
            conv_final = conv_final.cpu().data.numpy()

        embeddings_org_list.append(mu.reshape(-1))
        mu, sigma_sq = network.extract_feature(mu, conv_final)

        # print('mu:', mu.shape)
        # print('sigma_sq:', sigma_sq.shape)

        embeddings_mu_list.append(mu.reshape(-1))
        embeddings_sigma_list.append(sigma_sq.reshape(-1))

    embeddings_org = np.array(embeddings_org_list)
    accuracy, threshold = evaluate(embeddings_org, pairs, nrof_folds=10)
    print('Org Accuracy: %1.5f' % accuracy)
    print('threshold: %1.5f' % threshold)

    embeddings_mu = np.array(embeddings_mu_list)
    embeddings_sigma = np.array(embeddings_sigma_list)

    accuracy, threshold = evaluate(embeddings_mu, pairs, nrof_folds=10, compare_func=pair_MLS_score,
                                   sigma_sq=embeddings_sigma)
    print('MLS Accuracy: %1.5f' % accuracy)
    print('threshold: %1.5f' % threshold)
Exemplo n.º 4
0
	NUM_CLASS = len(train_loader.dataset.classes)
	print("Number of Training Classes: {}".format(NUM_CLASS))

	lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)


	#======= model & loss & optimizer =======#
	if BACKBONE_NAME == 'ResNet_50':
		BACKBONE = ResNet_50(INPUT_SIZE)
#					 'ResNet_101': resnet101(INPUT_SIZE),
#					 'ResNet_152': resnet152(INPUT_SIZE),
	elif BACKBONE_NAME == 'IR_50':
		BACKBONE = IR_50(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_101':
		BACKBONE = IR_101(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_152':
		BACKBONE = IR_152(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_SE_50':
		BACKBONE = IR_SE_50(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_SE_101':
		BACKBONE = IR_SE_101(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_SE_152':
		BACKBONE = IR_SE_152(INPUT_SIZE)
	elif BACKBONE_NAME == 'ShuffleNet':
		BACKBONE = shufflenet(cfg=cfg) 
	elif BACKBONE_NAME == 'ShuffleNetV2':
		BACKBONE = shufflenetv2(cfg=cfg)
	elif BACKBONE_NAME == 'Mobilenet':
		BACKBONE = mobilenet()
	elif BACKBONE_NAME == 'Mobilenetv2':
Exemplo n.º 5
0
                                               num_workers=NUM_WORKERS,
                                               drop_last=DROP_LAST)

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))

    lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(
        DATA_ROOT)

    # ======= model & loss & optimizer =======#
    BACKBONE_DICT = {
        'ResNet_50': ResNet_50(INPUT_SIZE),
        'ResNet_101': ResNet_101(INPUT_SIZE),
        'ResNet_152': ResNet_152(INPUT_SIZE),
        'IR_50': IR_50(INPUT_SIZE),
        'IR_101': IR_101(INPUT_SIZE),
        'IR_152': IR_152(INPUT_SIZE),
        'IR_SE_50': IR_SE_50(INPUT_SIZE),
        'IR_SE_101': IR_SE_101(INPUT_SIZE),
        'IR_SE_152': IR_SE_152(INPUT_SIZE)
    }
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
    print(BACKBONE)
    print("{} Backbone Generated".format(BACKBONE_NAME))
    print("=" * 60)

    HEAD_DICT = {
        'ArcFace':
        ArcFace(in_features=EMBEDDING_SIZE, out_features=NUM_CLASS),
        'CosFace':
Exemplo n.º 6
0
def main(ARGS):

    if ARGS.model_path == None:
        raise AssertionError("Path should not be None")

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    ####### Model setup
    print('Model type: %s' % ARGS.model_type)
    if ARGS.model_type == 'ResNet_50':
        model = ResNet_50(ARGS.input_size)
    elif ARGS.model_type == 'ResNet_101':
        model = ResNet_101(ARGS.input_size)
    elif ARGS.model_type == 'ResNet_152':
        model = ResNet_152(ARGS.input_size)
    elif ARGS.model_type == 'IR_50':
        model = IR_50(ARGS.input_size)
    elif ARGS.model_type == 'IR_101':
        model = IR_101(ARGS.input_size)
    elif ARGS.model_type == 'IR_152':
        model = IR_152(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_50':
        model = IR_SE_50(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_101':
        model = IR_SE_101(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_152':
        model = IR_SE_152(ARGS.input_size)
    else:
        raise AssertionError(
            'Unsuported model_type {}. We only support: [\'ResNet_50\', \'ResNet_101\', \'ResNet_152\', \'IR_50\', \'IR_101\', \'IR_152\', \'IR_SE_50\', \'IR_SE_101\', \'IR_SE_152\']'
            .format(ARGS.model_type))

    if use_cuda:
        model.load_state_dict(torch.load(ARGS.model_path))
    else:
        model.load_state_dict(torch.load(ARGS.model_path, map_location='cpu'))

    model.to(device)
    # embedding_size = 512
    model.eval()

    # DATA_ROOT = './../evoLVe_data/data' # the parent root where your train/val/test data are stored
    # INPUT_SIZE = [112, 112] # support: [112, 112] and [224, 224]
    # BACKBONE_RESUME_ROOT = './../evoLVe_data/pth/backbone_ir50_ms1m_epoch120.pth' # the root to resume training from a saved checkpoint
    # BACKBONE_RESUME_ROOT = './../pytorch-face/pth/IR_50_MODEL_arcface_casia_epoch56_lfw9925.pth'

    MULTI_GPU = False  # flag to use multiple GPUs; if you choose to train with single GPU, you should first run "export CUDA_VISILE_DEVICES=device_id" to specify the GPU card you want to use
    # DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # EMBEDDING_SIZE = 512 # feature dimension
    # BATCH_SIZE = 512

    # BACKBONE = IR_50(INPUT_SIZE)

    # if os.path.isfile(BACKBONE_RESUME_ROOT):
    #     print("Loading Backbone Checkpoint '{}'".format(BACKBONE_RESUME_ROOT))
    #     BACKBONE.load_state_dict(torch.load(BACKBONE_RESUME_ROOT, map_location='cpu'))
    # else:
    #     print("No Checkpoint Found at '{}'.".format(BACKBONE_RESUME_ROOT))
    #     sys.exit()

    print("=" * 60)
    print(
        "Performing Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints..."
    )

    #### LFW
    print("Performing Evaluation on LFW...")
    lfw, lfw_issame = get_val_pair(ARGS.data_root, 'lfw')
    accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, lfw,
        lfw_issame)
    print("Evaluation: LFW Acc: {}".format(accuracy_lfw))

    #### CALFW WORKS
    print("Performing Evaluation on CALFW...")
    calfw, calfw_issame = get_val_pair(ARGS.data_root, 'calfw')
    accuracy_calfw, best_threshold_calfw, roc_curve_calfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, calfw,
        calfw_issame)
    print("Evaluation: CALFW Acc: {}".format(accuracy_calfw))

    #### CPLFW
    print("Performing Evaluation on CPLFW...")
    cplfw, cplfw_issame = get_val_pair(ARGS.data_root, 'cplfw')
    accuracy_cplfw, best_threshold_calfw, roc_curve_calfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cplfw,
        cplfw_issame)
    print("Evaluation: CPLFW Acc: {}".format(accuracy_cplfw))

    #### CFP-FF
    print("Performing Evaluation on CFP-FF...")
    cfp_ff, cfp_ff_issame = get_val_pair(ARGS.data_root, 'cfp_ff')
    accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cfp_ff,
        cfp_ff_issame)
    print("Evaluation: CFP-FF Acc: {}".format(accuracy_cfp_ff))

    #### CFP-FP
    print("Performing Evaluation on CFP-FP...")
    cfp_fp, cfp_fp_issame = get_val_pair(ARGS.data_root, 'cfp_fp')
    accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cfp_fp,
        cfp_fp_issame)
    print("Evaluation: CFP-FP Acc: {}".format(accuracy_cfp_fp))

    #### AgeDB_30
    print("Performing Evaluation on AgeDB_30...")
    agedb_30, agedb_30_issame = get_val_pair(ARGS.data_root, 'agedb_30')
    accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model,
        agedb_30, agedb_30_issame)
    print("Evaluation: AgeDB_30 Acc: {}".format(accuracy_agedb))

    #### VggFace2_FP
    print("Performing Evaluation on VggFace2_FP...")
    vgg2_fp, vgg2_fp_issame = get_val_pair(ARGS.data_root, 'vgg2_fp')
    accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model,
        vgg2_fp, vgg2_fp_issame)
    print("Evaluation: VggFace2_FP Acc: {}".format(accuracy_vgg2_fp))

    print("=" * 60)
    print("FINAL RESULTS:")
    print(
        "Evaluation: LFW Acc: {}, CFP_FF Acc: {}, CFP_FP Acc: {}, AgeDB Acc: {}, CALFW Acc: {}, CPLFW Acc: {}, VGG2_FP Acc: {}"
        .format(accuracy_lfw, accuracy_cfp_ff, accuracy_cfp_fp, accuracy_agedb,
                accuracy_calfw, accuracy_cplfw, accuracy_vgg2_fp))
    print("=" * 60)
Exemplo n.º 7
0
    lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(
        DATA_ROOT)

    #======= model & loss & optimizer =======#
    BACKBONE_DICT = {
        'ResNet_50':
        ResNet_50(INPUT_SIZE),
        'ResNet_101':
        ResNet_101(INPUT_SIZE),
        'ResNet_152':
        ResNet_152(INPUT_SIZE),
        'IR_50':
        IR_50(INPUT_SIZE),
        'IR_101':
        IR_101(INPUT_SIZE),
        'IR_152':
        IR_152(INPUT_SIZE),
        'IR_SE_50':
        IR_SE_50(INPUT_SIZE),
        'IR_SE_101':
        IR_SE_101(INPUT_SIZE),
        'IR_SE_152':
        IR_SE_152(INPUT_SIZE),
        'ShuffleNetV2_0.5':
        shufflenet_v2_x0_5(pretrained=True, only_features=True),
        'ShuffleNetV2_1.0':
        shufflenet_v2_x1_0(pretrained=False, only_features=True),
        'ShuffleNetV2_1.5':
        shufflenet_v2_x1_5(pretrained=False, only_features=True),
        'ShuffleNetV2_2.0':
Exemplo n.º 8
0
    train_loader = torch.utils.data.DataLoader(
        dataset_train, batch_size = BATCH_SIZE, sampler = sampler, pin_memory = PIN_MEMORY,
        num_workers = NUM_WORKERS, drop_last = DROP_LAST
    )

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))
    
    # validate on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGGFace2_FP
    lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)


    #======= model & loss & optimizer =======#
    BACKBONE_DICT = {'ResNet_50': ResNet_50(INPUT_SIZE), 'ResNet_101': ResNet_101(INPUT_SIZE), 'ResNet_152': ResNet_152(INPUT_SIZE),
                     'IR_50': IR_50(INPUT_SIZE), 'IR_101': IR_101(INPUT_SIZE), 'IR_152': IR_152(INPUT_SIZE),
                     'IR_SE_50': IR_SE_50(INPUT_SIZE), 'IR_SE_101': IR_SE_101(INPUT_SIZE), 'IR_SE_152': IR_SE_152(INPUT_SIZE)}
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
    print(BACKBONE)
    print("{} Backbone Generated".format(BACKBONE_NAME))
    print("=" * 60)

    HEAD_DICT = {'ArcFace': ArcFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS),
                 'CosFace': CosFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS),
                'SphereFace': SphereFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS),
                'Am_softmax': Am_softmax(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS)}
    HEAD = HEAD_DICT[HEAD_NAME]
    print("=" * 60)
    print(HEAD)
    print("{} Head Generated".format(HEAD_NAME))