Ejemplo n.º 1
0
def search(img, k=10, nprobe=10):
    """Find k near neighbours in index
    
    Arguments:
        img {PIL.Image} -- Face
    
    Keyword Arguments:
        k {int} -- Count of nearest neighbours (default: {10})
        nprobe {int} -- How many nearest clusters to scan (read in Faiss docs) (default: {10})
    
    Returns:
        np.array -- k distances
        np.array -- k indices
    """
    backbone = ResNet_50([112, 112])
    pth = os.path.join(settings.BACKBONE_DIR,
                       BACKBONE_FILE)  # Pretrained backbone for ResNet50

    index = faiss.read_index(os.path.join(DATASET_PATH,
                                          DATASET_INDEX))  # load index
    index_ivf = faiss.downcast_index(index.index)
    index_ivf.nprobe = nprobe  # change nprobe

    query = np.array(extract_one_embedding(img, backbone,
                                           pth)).astype('float32').reshape(
                                               1, -1)
    D, I = index.search(query, k)
    return D[0], I[0]
Ejemplo n.º 2
0
    def __init__(self, device, backbone_name = cfg['BACKBONE_NAME'], INPUT_SIZE = cfg['INPUT_SIZE'], BACKBONE_RESUME_ROOT = cfg['BACKBONE_RESUME_ROOT']):
        super().__init__()
        BACKBONE_DICT = {'ResNet_50': ResNet_50(INPUT_SIZE), 
                    'ResNet_101': ResNet_101(INPUT_SIZE), 
                    'ResNet_152': ResNet_152(INPUT_SIZE),
                    'IR_50': IR_50(INPUT_SIZE), 
                    'IR_101': IR_101(INPUT_SIZE), 
                    'IR_152': IR_152(INPUT_SIZE),
                    'IR_SE_50': IR_SE_50(INPUT_SIZE), 
                    'IR_SE_101': IR_SE_101(INPUT_SIZE), 
                    'IR_SE_152': IR_SE_152(INPUT_SIZE)}

        self.device = device
        self.embedding = BACKBONE_DICT[backbone_name]
        self.embedding.load_state_dict(torch.load(BACKBONE_RESUME_ROOT))
        self.embedding = self.embedding.to(device)
Ejemplo n.º 3
0
	sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))

	train_loader = torch.utils.data.DataLoader(
		dataset_train, batch_size = BATCH_SIZE, sampler = sampler, pin_memory = PIN_MEMORY,
		num_workers = NUM_WORKERS, drop_last = DROP_LAST
	)

	NUM_CLASS = len(train_loader.dataset.classes)
	print("Number of Training Classes: {}".format(NUM_CLASS))

	lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)


	#======= model & loss & optimizer =======#
	if BACKBONE_NAME == 'ResNet_50':
		BACKBONE = ResNet_50(INPUT_SIZE)
#					 'ResNet_101': resnet101(INPUT_SIZE),
#					 'ResNet_152': resnet152(INPUT_SIZE),
	elif BACKBONE_NAME == 'IR_50':
		BACKBONE = IR_50(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_101':
		BACKBONE = IR_101(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_152':
		BACKBONE = IR_152(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_SE_50':
		BACKBONE = IR_SE_50(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_SE_101':
		BACKBONE = IR_SE_101(INPUT_SIZE)
	elif BACKBONE_NAME == 'IR_SE_152':
		BACKBONE = IR_SE_152(INPUT_SIZE)
	elif BACKBONE_NAME == 'ShuffleNet':
Ejemplo n.º 4
0
    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=BATCH_SIZE,
                                               sampler=sampler,
                                               pin_memory=PIN_MEMORY,
                                               num_workers=NUM_WORKERS,
                                               drop_last=DROP_LAST)

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))

    lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(
        DATA_ROOT)

    # ======= model & loss & optimizer =======#
    BACKBONE_DICT = {
        'ResNet_50': ResNet_50(INPUT_SIZE),
        'ResNet_101': ResNet_101(INPUT_SIZE),
        'ResNet_152': ResNet_152(INPUT_SIZE),
        'IR_50': IR_50(INPUT_SIZE),
        'IR_101': IR_101(INPUT_SIZE),
        'IR_152': IR_152(INPUT_SIZE),
        'IR_SE_50': IR_SE_50(INPUT_SIZE),
        'IR_SE_101': IR_SE_101(INPUT_SIZE),
        'IR_SE_152': IR_SE_152(INPUT_SIZE)
    }
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
    print(BACKBONE)
    print("{} Backbone Generated".format(BACKBONE_NAME))
    print("=" * 60)
Ejemplo n.º 5
0
if not os.path.exists(ADV_DIR):
    os.mkdir(ADV_DIR)
MULTIGPU = False

CKPT_LIST = [
    'backbone_resnet50.pth', 'backbone_ir50_ms1m.pth',
    'backbone_ir50_asia.pth', 'backbone_ir152.pth'
]
CKPT_LIST = [
    os.path.join('/home/zhangao/model_zoo/face_model_zoo', ckpt)
    for ckpt in CKPT_LIST
]

model_list = [
    ResNet_50([112, 112]),
    IR_50([112, 112]),
    IR_50([112, 112]),
    IR_152([112, 112])
]
weights = []
LOAD_EMBEDDINGS = None
#LOAD_EMBEDDINGS="id_embeddings_rgb.pkl"
PIN_MEMORY = False
NUM_WORKERS = 0
BATCH_SIZE = 8
EMBEDDING_SIZE = 512
SAVE = True

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
Ejemplo n.º 6
0
model_ResNet_101_Epoch_4.load_state_dict(
    torch.load(
        './Defense_Model/Backbone_ResNet_101_Epoch_4_Batch_90976_Time_2019-08-04-11-34_checkpoint.pth',
        map_location='cuda'))
model_ResNet_101_Epoch_4.eval()
criterion_ResNet_101_Epoch_4 = nn.MSELoss()
# 6
model_ResNet_152_Epoch_1 = ResNet_152([112, 112])
model_ResNet_152_Epoch_1.load_state_dict(
    torch.load(
        './Defense_Model/Backbone_ResNet_152_Epoch_1_Batch_22744_Time_2019-08-03-01-01_checkpoint.pth',
        map_location='cuda'))
model_ResNet_152_Epoch_1.eval()
criterion_ResNet_152_Epoch_1 = nn.MSELoss()
# 7
model_ResNet_50_Epoch_3 = ResNet_50([112, 112])
model_ResNet_50_Epoch_3.load_state_dict(
    torch.load(
        './Defense_Model/Backbone_ResNet_50_Epoch_3_Batch_34116_Time_2019-08-02-19-12_checkpoint.pth',
        map_location='cuda'))
model_ResNet_50_Epoch_3.eval()
criterion_ResNet_50_Epoch_3 = nn.MSELoss()

criterion = nn.MSELoss()

# cpu
# collect all images to attack
paths = []
picpath = os.getcwd() + '/raw_faces'
dire = None
for root, dirs, files in os.walk(picpath):
Ejemplo n.º 7
0
    weights = torch.DoubleTensor(weights)
    sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))

    train_loader = torch.utils.data.DataLoader(
        dataset_train, batch_size = BATCH_SIZE, sampler = sampler, pin_memory = PIN_MEMORY,
        num_workers = NUM_WORKERS, drop_last = DROP_LAST
    )

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))

    lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)


    #======= model & loss & optimizer =======#
    BACKBONE_DICT = {'ResNet_50': ResNet_50(INPUT_SIZE), 
                     'ResNet_101': ResNet_101(INPUT_SIZE), 
                     'ResNet_152': ResNet_152(INPUT_SIZE),
                     'IR_50': IR_50(INPUT_SIZE), 
                     'IR_101': IR_101(INPUT_SIZE), 
                     'IR_152': IR_152(INPUT_SIZE),
                     'IR_SE_50': IR_SE_50(INPUT_SIZE), 
                     'IR_SE_101': IR_SE_101(INPUT_SIZE), 
                     'IR_SE_152': IR_SE_152(INPUT_SIZE)}
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
    print(BACKBONE)
    print("{} Backbone Generated".format(BACKBONE_NAME))
    print("=" * 60)

    HEAD_DICT = {'ArcFace': ArcFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID),
Ejemplo n.º 8
0
def main(ARGS):

    if ARGS.model_path == None:
        raise AssertionError("Path should not be None")

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    ####### Model setup
    print('Model type: %s' % ARGS.model_type)
    if ARGS.model_type == 'ResNet_50':
        model = ResNet_50(ARGS.input_size)
    elif ARGS.model_type == 'ResNet_101':
        model = ResNet_101(ARGS.input_size)
    elif ARGS.model_type == 'ResNet_152':
        model = ResNet_152(ARGS.input_size)
    elif ARGS.model_type == 'IR_50':
        model = IR_50(ARGS.input_size)
    elif ARGS.model_type == 'IR_101':
        model = IR_101(ARGS.input_size)
    elif ARGS.model_type == 'IR_152':
        model = IR_152(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_50':
        model = IR_SE_50(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_101':
        model = IR_SE_101(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_152':
        model = IR_SE_152(ARGS.input_size)
    else:
        raise AssertionError(
            'Unsuported model_type {}. We only support: [\'ResNet_50\', \'ResNet_101\', \'ResNet_152\', \'IR_50\', \'IR_101\', \'IR_152\', \'IR_SE_50\', \'IR_SE_101\', \'IR_SE_152\']'
            .format(ARGS.model_type))

    if use_cuda:
        model.load_state_dict(torch.load(ARGS.model_path))
    else:
        model.load_state_dict(torch.load(ARGS.model_path, map_location='cpu'))

    model.to(device)
    # embedding_size = 512
    model.eval()

    # DATA_ROOT = './../evoLVe_data/data' # the parent root where your train/val/test data are stored
    # INPUT_SIZE = [112, 112] # support: [112, 112] and [224, 224]
    # BACKBONE_RESUME_ROOT = './../evoLVe_data/pth/backbone_ir50_ms1m_epoch120.pth' # the root to resume training from a saved checkpoint
    # BACKBONE_RESUME_ROOT = './../pytorch-face/pth/IR_50_MODEL_arcface_casia_epoch56_lfw9925.pth'

    MULTI_GPU = False  # flag to use multiple GPUs; if you choose to train with single GPU, you should first run "export CUDA_VISILE_DEVICES=device_id" to specify the GPU card you want to use
    # DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # EMBEDDING_SIZE = 512 # feature dimension
    # BATCH_SIZE = 512

    # BACKBONE = IR_50(INPUT_SIZE)

    # if os.path.isfile(BACKBONE_RESUME_ROOT):
    #     print("Loading Backbone Checkpoint '{}'".format(BACKBONE_RESUME_ROOT))
    #     BACKBONE.load_state_dict(torch.load(BACKBONE_RESUME_ROOT, map_location='cpu'))
    # else:
    #     print("No Checkpoint Found at '{}'.".format(BACKBONE_RESUME_ROOT))
    #     sys.exit()

    print("=" * 60)
    print(
        "Performing Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints..."
    )

    #### LFW
    print("Performing Evaluation on LFW...")
    lfw, lfw_issame = get_val_pair(ARGS.data_root, 'lfw')
    accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, lfw,
        lfw_issame)
    print("Evaluation: LFW Acc: {}".format(accuracy_lfw))

    #### CALFW WORKS
    print("Performing Evaluation on CALFW...")
    calfw, calfw_issame = get_val_pair(ARGS.data_root, 'calfw')
    accuracy_calfw, best_threshold_calfw, roc_curve_calfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, calfw,
        calfw_issame)
    print("Evaluation: CALFW Acc: {}".format(accuracy_calfw))

    #### CPLFW
    print("Performing Evaluation on CPLFW...")
    cplfw, cplfw_issame = get_val_pair(ARGS.data_root, 'cplfw')
    accuracy_cplfw, best_threshold_calfw, roc_curve_calfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cplfw,
        cplfw_issame)
    print("Evaluation: CPLFW Acc: {}".format(accuracy_cplfw))

    #### CFP-FF
    print("Performing Evaluation on CFP-FF...")
    cfp_ff, cfp_ff_issame = get_val_pair(ARGS.data_root, 'cfp_ff')
    accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cfp_ff,
        cfp_ff_issame)
    print("Evaluation: CFP-FF Acc: {}".format(accuracy_cfp_ff))

    #### CFP-FP
    print("Performing Evaluation on CFP-FP...")
    cfp_fp, cfp_fp_issame = get_val_pair(ARGS.data_root, 'cfp_fp')
    accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cfp_fp,
        cfp_fp_issame)
    print("Evaluation: CFP-FP Acc: {}".format(accuracy_cfp_fp))

    #### AgeDB_30
    print("Performing Evaluation on AgeDB_30...")
    agedb_30, agedb_30_issame = get_val_pair(ARGS.data_root, 'agedb_30')
    accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model,
        agedb_30, agedb_30_issame)
    print("Evaluation: AgeDB_30 Acc: {}".format(accuracy_agedb))

    #### VggFace2_FP
    print("Performing Evaluation on VggFace2_FP...")
    vgg2_fp, vgg2_fp_issame = get_val_pair(ARGS.data_root, 'vgg2_fp')
    accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model,
        vgg2_fp, vgg2_fp_issame)
    print("Evaluation: VggFace2_FP Acc: {}".format(accuracy_vgg2_fp))

    print("=" * 60)
    print("FINAL RESULTS:")
    print(
        "Evaluation: LFW Acc: {}, CFP_FF Acc: {}, CFP_FP Acc: {}, AgeDB Acc: {}, CALFW Acc: {}, CPLFW Acc: {}, VGG2_FP Acc: {}"
        .format(accuracy_lfw, accuracy_cfp_ff, accuracy_cfp_fp, accuracy_agedb,
                accuracy_calfw, accuracy_cplfw, accuracy_vgg2_fp))
    print("=" * 60)
Ejemplo n.º 9
0
    sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))

    train_loader = torch.utils.data.DataLoader(
        dataset_train, batch_size = BATCH_SIZE, sampler = sampler, pin_memory = PIN_MEMORY,
        num_workers = NUM_WORKERS, drop_last = DROP_LAST
    )

    NUM_CLASS = len(train_loader.dataset.classes)
    print("Number of Training Classes: {}".format(NUM_CLASS))
    
    # validate on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGGFace2_FP
    lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(DATA_ROOT)


    #======= model & loss & optimizer =======#
    BACKBONE_DICT = {'ResNet_50': ResNet_50(INPUT_SIZE), 'ResNet_101': ResNet_101(INPUT_SIZE), 'ResNet_152': ResNet_152(INPUT_SIZE),
                     'IR_50': IR_50(INPUT_SIZE), 'IR_101': IR_101(INPUT_SIZE), 'IR_152': IR_152(INPUT_SIZE),
                     'IR_SE_50': IR_SE_50(INPUT_SIZE), 'IR_SE_101': IR_SE_101(INPUT_SIZE), 'IR_SE_152': IR_SE_152(INPUT_SIZE)}
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME]
    print("=" * 60)
    print(BACKBONE)
    print("{} Backbone Generated".format(BACKBONE_NAME))
    print("=" * 60)

    HEAD_DICT = {'ArcFace': ArcFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS),
                 'CosFace': CosFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS),
                'SphereFace': SphereFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS),
                'Am_softmax': Am_softmax(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS)}
    HEAD = HEAD_DICT[HEAD_NAME]
    print("=" * 60)
    print(HEAD)