Пример #1
0
def main(args):
    print(args)
    MULTI_GPU = False
    DEVICE = torch.device("cuda:0")
    DATA_ROOT = '/raid/Data/ms1m-retinaface-t1/'
    with open(os.path.join(DATA_ROOT, 'property'), 'r') as f:
        NUM_CLASS, h, w = [int(i) for i in f.read().split(',')]

    if args.network == 'VIT':
        model = ViT_face(image_size=112,
                         patch_size=8,
                         loss_type='CosFace',
                         GPU_ID=DEVICE,
                         num_class=NUM_CLASS,
                         dim=512,
                         depth=20,
                         heads=8,
                         mlp_dim=2048,
                         dropout=0.1,
                         emb_dropout=0.1)
    elif args.network == 'VITs':
        model = ViTs_face(loss_type='CosFace',
                          GPU_ID=DEVICE,
                          num_class=NUM_CLASS,
                          image_size=112,
                          patch_size=8,
                          ac_patch_size=12,
                          pad=4,
                          dim=512,
                          depth=20,
                          heads=8,
                          mlp_dim=2048,
                          dropout=0.1,
                          emb_dropout=0.1)

    model_root = args.model
    model.load_state_dict(torch.load(model_root))

    #debug
    w = torch.load(model_root)
    for x in w.keys():
        print(x, w[x].shape)
    #embed()
    TARGET = [i for i in args.target.split(',')]
    vers = get_val_data('./eval/', TARGET)
    acc = []

    for ver in vers:
        name, data_set, issame = ver
        accuracy, std, xnorm, best_threshold, roc_curve = perform_val(
            MULTI_GPU, DEVICE, 512, args.batch_size, model, data_set, issame)
        print('[%s]XNorm: %1.5f' % (name, xnorm))
        print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (name, accuracy, std))
        print('[%s]Best-Threshold: %1.5f' % (name, best_threshold))
        acc.append(accuracy)
    print('Average-Accuracy: %1.5f' % (np.mean(acc)))
Пример #2
0
	dataset_train = datasets.ImageFolder(os.path.join(DATA_ROOT, 'ms1mv2-asia'), train_transform)

	# create a weighted random sampler to process imbalanced data
	weights = make_weights_for_balanced_classes(dataset_train.imgs, len(dataset_train.classes))
	weights = torch.DoubleTensor(weights)
	sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))

	train_loader = torch.utils.data.DataLoader(
		dataset_train, batch_size = BATCH_SIZE, sampler = sampler, pin_memory = PIN_MEMORY,
		num_workers = NUM_WORKERS, drop_last = DROP_LAST
	)

	NUM_CLASS = len(train_loader.dataset.classes)
	print("Number of Training Classes: {}".format(NUM_CLASS))

	lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)


	#======= model & loss & optimizer =======#
	if BACKBONE_NAME == 'ResNet_50':
		BACKBONE = ResNet_50(INPUT_SIZE)
#					 'ResNet_101': resnet101(INPUT_SIZE),
#					 'ResNet_152': resnet152(INPUT_SIZE),
	elif BACKBONE_NAME == 'IR_50':
		BACKBONE = IR_50(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_101':
		BACKBONE = IR_101(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_152':
		BACKBONE = IR_152(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_SE_50':
		BACKBONE = IR_SE_50(INPUT_SIZE)
Пример #3
0
    with open(os.path.join(DATA_ROOT, 'property'), 'r') as f:
        NUM_CLASS, h, w = [int(i) for i in f.read().split(',')]
    assert h == INPUT_SIZE[0] and w == INPUT_SIZE[1]

    dataset = FaceDataset(os.path.join(DATA_ROOT, 'train.rec'),
                          rand_mirror=True)
    trainloader = torch.utils.data.DataLoader(dataset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=len(GPU_ID),
                                              drop_last=True)

    print("Number of Training Classes: {}".format(NUM_CLASS))

    vers = get_val_data(EVAL_PATH, TARGET)
    highest_acc = [0.0 for t in TARGET]

    #embed()
    #======= model & loss & optimizer =======#
    BACKBONE_DICT = {
        'VIT':
        ViT_face(loss_type=HEAD_NAME,
                 GPU_ID=GPU_ID,
                 num_class=NUM_CLASS,
                 image_size=112,
                 patch_size=8,
                 dim=512,
                 depth=20,
                 heads=8,
                 mlp_dim=2048,
Пример #4
0
    BACKBONE_NAME = cfg[
        'BACKBONE_NAME']  # support: ['ResNet_50', 'ResNet_101', 'ResNet_152', 'IR_50', 'IR_101', 'IR_152', 'IR_SE_50', 'IR_SE_101', 'IR_SE_152']
    HEAD_NAME = cfg[
        'HEAD_NAME']  # support:  ['Softmax', 'ArcFace', 'CosFace', 'SphereFace', 'Am_softmax']
    INPUT_SIZE = cfg['INPUT_SIZE']
    BATCH_SIZE = cfg['BATCH_SIZE']
    RGB_MEAN = cfg['RGB_MEAN']  # for normalize inputs
    RGB_STD = cfg['RGB_STD']
    EMBEDDING_SIZE = cfg['EMBEDDING_SIZE']  # feature dimension
    GPU_ID = cfg['TEST_GPU_ID']  # specify your GPU ids
    print("Overall Configurations:")
    print(cfg)
    #val_data_dir = os.path.join(VAL_DATA_ROOT, 'val_data')
    val_data_dir = VAL_DATA_ROOT
    lfw, cfp_fp, agedb_30, calfw, cplfw, vgg2_fp, lfw_issame, cfp_fp_issame, agedb_30_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(
        VAL_DATA_ROOT)

    #======= model =======#
    BACKBONE_DICT = {
        'ResNet_50': ResNet_50,
        'ResNet_101': ResNet_101,
        'ResNet_152': ResNet_152,
        'IR_50': IR_50,
        'IR_100': IR_100,
        'IR_101': IR_101,
        'IR_152': IR_152,
        'IR_SE_50': IR_SE_50,
        'IR_SE_101': IR_SE_101,
        'IR_SE_152': IR_SE_152,
        'MobileFaceNet': MobileFaceNet
    }
Пример #5
0
    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=BATCH_SIZE,
                                               sampler=sampler,
                                               pin_memory=PIN_MEMORY,
                                               num_workers=NUM_WORKERS,
                                               drop_last=DROP_LAST)

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))

    # lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, \
    # cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame,\
    #  vgg2_fp_issame = get_val_data(VAL_ROOT)

    calfw, cplfw, calfw_issame, cplfw_issame = get_val_data(VAL_ROOT)

    #======= model & loss & optimizer =======#
    BACKBONE_DICT = {
        'ResNet_50': ResNet_50(INPUT_SIZE),
        'ResNet_101': ResNet_101(INPUT_SIZE),
        'ResNet_152': ResNet_152(INPUT_SIZE),
        'IR_50': IR_50(INPUT_SIZE),
        'IR_101': IR_101(INPUT_SIZE),
        'IR_152': IR_152(INPUT_SIZE),
        'IR_SE_50': IR_SE_50(INPUT_SIZE),
        'IR_SE_101': IR_SE_101(INPUT_SIZE),
        'IR_SE_152': IR_SE_152(INPUT_SIZE)
    }
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
Пример #6
0
                                                len(dataset_train.classes))
    weights = torch.DoubleTensor(weights)
    sampler = torch.utils.data.sampler.WeightedRandomSampler(
        weights, len(weights))

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=BATCH_SIZE,
                                               sampler=sampler,
                                               pin_memory=PIN_MEMORY,
                                               num_workers=NUM_WORKERS,
                                               drop_last=DROP_LAST)

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))

    agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame = get_val_data(
        DATA_ROOT)

    #======= model & loss & optimizer =======#
    BACKBONE_DICT = {
        'ResNet_50': ResNet_50(INPUT_SIZE),
        'ResNet_101': ResNet_101(INPUT_SIZE),
        'ResNet_152': ResNet_152(INPUT_SIZE),
        'IR_50': IR_50(INPUT_SIZE),
        'IR_101': IR_101(INPUT_SIZE),
        'IR_152': IR_152(INPUT_SIZE),
        'IR_SE_50': IR_SE_50(INPUT_SIZE),
        'IR_SE_101': IR_SE_101(INPUT_SIZE),
        'IR_SE_152': IR_SE_152(INPUT_SIZE)
    }
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
Пример #7
0
    weights = torch.DoubleTensor(weights)
    sampler = torch.utils.data.sampler.WeightedRandomSampler(
        weights, len(weights))

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=BATCH_SIZE,
                                               sampler=sampler,
                                               pin_memory=PIN_MEMORY,
                                               num_workers=NUM_WORKERS,
                                               drop_last=DROP_LAST)

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))

    #lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)
    lfw, lfw_issame = get_val_data(DATA_ROOT)

    #======= model & loss & optimizer =======#
    BACKBONE_DICT = {
        'ResNet_50': ResNet_50(INPUT_SIZE),
        'ResNet_101': ResNet_101(INPUT_SIZE),
        'ResNet_152': ResNet_152(INPUT_SIZE),
        'IR_50': IR_50(INPUT_SIZE),
        'IR_101': IR_101(INPUT_SIZE),
        'IR_152': IR_152(INPUT_SIZE),
        'IR_SE_50': IR_SE_50(INPUT_SIZE),
        'IR_SE_101': IR_SE_101(INPUT_SIZE),
        'IR_SE_152': IR_SE_152(INPUT_SIZE)
    }
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
Пример #8
0
    dataset_train = datasets.ImageFolder(os.path.join(DATA_ROOT, 'imgs'), train_transform)

    # create a weighted random sampler to process imbalanced data
    weights = make_weights_for_balanced_classes(dataset_train.imgs, len(dataset_train.classes))
    weights = torch.DoubleTensor(weights)
    sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))

    train_loader = torch.utils.data.DataLoader(
        dataset_train, batch_size=BATCH_SIZE, sampler=sampler, pin_memory=PIN_MEMORY,
        num_workers=NUM_WORKERS, drop_last=DROP_LAST
    )

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))

    lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)

    # ======= model & loss & optimizer =======#
    BACKBONE_DICT = {'ResNet_50': ResNet_50(INPUT_SIZE),
                     'ResNet_101': ResNet_101(INPUT_SIZE),
                     'ResNet_152': ResNet_152(INPUT_SIZE),
                     'IR_50': IR_50(INPUT_SIZE),
                     'IR_101': IR_101(INPUT_SIZE),
                     'IR_152': IR_152(INPUT_SIZE),
                     'IR_SE_50': IR_SE_50(INPUT_SIZE),
                     'IR_SE_101': IR_SE_101(INPUT_SIZE),
                     'IR_SE_152': IR_SE_152(INPUT_SIZE)}
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
    print(BACKBONE)
    print("{} Backbone Generated".format(BACKBONE_NAME))
Пример #9
0
    DATA_ROOT = cfg['DATA_ROOT'] # the parent root where your train/val/test data are stored
    BACKBONE_RESUME_ROOT = cfg['BACKBONE_RESUME_ROOT'] # the root to resume training from a saved checkpoint
    HEAD_RESUME_ROOT = cfg['HEAD_RESUME_ROOT']  # the root to resume training from a saved checkpoint

    BACKBONE_NAME = cfg['BACKBONE_NAME'] # support: ['ResNet_50', 'ResNet_101', 'ResNet_152', 'IR_50', 'IR_101', 'IR_152', 'IR_SE_50', 'IR_SE_101', 'IR_SE_152']
    HEAD_NAME = cfg['HEAD_NAME'] # support:  ['Softmax', 'ArcFace', 'CosFace', 'SphereFace', 'Am_softmax']
    INPUT_SIZE = cfg['INPUT_SIZE']
    BATCH_SIZE = cfg['BATCH_SIZE']
    RGB_MEAN = cfg['RGB_MEAN'] # for normalize inputs
    RGB_STD = cfg['RGB_STD']
    EMBEDDING_SIZE = cfg['EMBEDDING_SIZE'] # feature dimension
    GPU_ID = cfg['TEST_GPU_ID'] # specify your GPU ids
    print("Overall Configurations:")
    print(cfg)
    val_data_dir = os.path.join(DATA_ROOT, 'val_data')
    lfw, cfp_fp, agedb, cplfw, calfw, lfw_issame, cfp_fp_issame, agedb_issame, cplfw_issame, calfw_issame = get_val_data(val_data_dir)

    #======= model =======#
    BACKBONE_DICT = {'ResNet_50': ResNet_50, 
                     'ResNet_101': ResNet_101, 
                     'ResNet_152': ResNet_152,
                     'IR_50': IR_50, 
                     'IR_101': IR_101, 
                     'IR_152': IR_152,
                     'IR_SE_50': IR_SE_50, 
                     'IR_SE_101': IR_SE_101, 
                     'IR_SE_152': IR_SE_152,
                     'MobileFaceNet': MobileFaceNet}

    BACKBONE = BACKBONE_DICT[BACKBONE_NAME](INPUT_SIZE)
    print("=" * 60)