def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=32, gpus='0', do_norm=False):

    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'Res50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='se_ir')
    elif backbone_net == 'CBAMRes50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='cbam_ir')
    elif backbone_net == 'Res100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='se_ir')
    elif backbone_net == 'CBAMRes100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='cbam_ir')
    else:
        print(args.backbone, ' is not available!')

    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(model_path)['net_state_dict'])
    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)
    net.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    megaface_dataset = MegaFace(face_scrub_path, megaface_path, transform=transform)
    megaface_loader = torch.utils.data.DataLoader(megaface_dataset, batch_size=batch_size,
                                             shuffle=False, num_workers=12, drop_last=False)

    for data in megaface_loader:
        img, img_path= data[0].to(device), data[1]
        with torch.no_grad():
            output = net(img).data.cpu().numpy()

        if do_norm is False:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                write_mat(abs_path, output[i])
            print('extract 1 batch...without feature normalization')
        else:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                feat = output[i]
                feat = feat / np.sqrt((np.dot(feat, feat)))
                write_mat(abs_path, feat)
            print('extract 1 batch...with feature normalization')
    print('all images have been processed!')
Esempio n. 2
0
def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None):

    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'Res50_IR':
        net = cbam.CBAMResNet_IR(50, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes50_IR':
        net = cbam.CBAMResNet_IR(50,
                                 feature_dim=args.feature_dim,
                                 mode='se_ir')
    elif backbone_net == 'CBAMRes50_IR':
        net = cbam.CBAMResNet_IR(50,
                                 feature_dim=args.feature_dim,
                                 mode='cbam_ir')
    elif backbone_net == 'Res100_IR':
        net = cbam.CBAMResNet_IR(100, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'SERes100_IR':
        net = cbam.CBAMResNet_IR(100,
                                 feature_dim=args.feature_dim,
                                 mode='se_ir')
    elif backbone_net == 'CBAMRes100_IR':
        net = cbam.CBAMResNet_IR(100,
                                 feature_dim=args.feature_dim,
                                 mode='cbam_ir')
    else:
        print(args.backbone, ' is not available!')

    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(resume)['net_state_dict'])

    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    cfp_dataset = CFP_FP(data_root, file_list, transform=transform)
    cfp_loader = torch.utils.data.DataLoader(cfp_dataset,
                                             batch_size=128,
                                             shuffle=False,
                                             num_workers=4,
                                             drop_last=False)

    return net.eval(), device, cfp_dataset, cfp_loader