示例#1
0
def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None):

    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'CBAM_50':
        net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'CBAM_50_SE':
        net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se')
    elif backbone_net == 'CBAM_100':
        net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'CBAM_100_SE':
        net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')
    else:
        print(backbone_net, ' is not available!')

    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(resume)['net_state_dict'])

    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    agedb_dataset = AgeDB30(data_root, file_list, transform=transform)
    agedb_loader = torch.utils.data.DataLoader(agedb_dataset,
                                               batch_size=128,
                                               shuffle=False,
                                               num_workers=2,
                                               drop_last=False)

    return net.eval(), device, agedb_dataset, agedb_loader
def loadModel(args, idx):
    if args.backbone_net[idx] == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif args.backbone_net[idx] == 'CBAM_50':
        net = cbam.CBAMResNet(50, feature_dim=args.feature_dim[idx], mode='ir')
    elif args.backbone_net[idx] == 'CBAM_50_SE':
        net = cbam.CBAMResNet(50,
                              feature_dim=args.feature_dim[idx],
                              mode='ir_se')
    elif args.backbone_net[idx] == 'CBAM_100':
        net = cbam.CBAMResNet(100,
                              feature_dim=args.feature_dim[idx],
                              mode='ir')
    elif args.backbone_net[idx] == 'CBAM_100_SE':
        net = cbam.CBAMResNet(100,
                              feature_dim=args.feature_dim[idx],
                              mode='ir_se')
    elif args.backbone_net[idx] == 'CBAM_152':
        net = cbam.CBAMResNet(152,
                              feature_dim=args.feature_dim[idx],
                              mode='ir')
    elif args.backbone_net[idx] == 'CBAM_152_SE':
        net = cbam.CBAMResNet(152,
                              feature_dim=args.feature_dim[idx],
                              mode='ir_se')
    elif args.backbone_net[idx] == 'Attention_56':
        net = attention.ResidualAttentionNet_56(
            feature_dim=args.feature_dim[idx])
    else:
        net = None
        print(args.backbone_net[idx], ' is not available!')
        assert 1 == 0

    # gpu init
    multi_gpus = False
    # if len(args.gpus.split(',')) > 1:
    #     multi_gpus = True
    # os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(args.resume[idx])['net_state_dict'])

    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    return net.eval()
def loadModel(backbone_net, feature_dim, gpus, resume, root, dev_path, flip):
    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'CBAM_50':
        net = cbam.CBAMResNet(50, feature_dim=feature_dim, mode='ir')
    elif backbone_net == 'CBAM_50_SE':
        net = cbam.CBAMResNet(50, feature_dim=feature_dim, mode='ir_se')
    elif backbone_net == 'CBAM_100':
        net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir')
    elif backbone_net == 'CBAM_100_SE':
        net = cbam.CBAMResNet(100, feature_dim=feature_dim, mode='ir_se')
    elif backbone_net == 'CBAM_152':
        net = cbam.CBAMResNet(152, feature_dim=feature_dim, mode='ir')
    elif backbone_net == 'CBAM_152_SE':
        net = cbam.CBAMResNet(152, feature_dim=feature_dim, mode='ir_se')
    elif backbone_net == 'Attention_56':
        net = attention.ResidualAttentionNet_56(feature_dim=feature_dim)
    else:
        net = None
        print(backbone_net, ' is not available!')
        assert 1 == 0

    # gpu init
    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(resume)['net_state_dict'])

    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    lfw_dataset = LFW(root, dev_path, transform=transform, flip=flip)
    lfw_loader = DataLoader(lfw_dataset, batch_size=1, shuffle=False)

    return net.eval(), device, lfw_dataset, lfw_loader
def extract_feature(model_path,
                    backbone_net,
                    face_scrub_path,
                    megaface_path,
                    batch_size=32,
                    gpus='0',
                    do_norm=False):

    if backbone_net == 'MobileFace':
        net = mobilefacenet.MobileFaceNet()
    elif backbone_net == 'CBAM_50':
        net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'CBAM_50_SE':
        net = cbam.CBAMResNet(50, feature_dim=args.feature_dim, mode='ir_se')
    elif backbone_net == 'CBAM_100':
        net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir')
    elif backbone_net == 'CBAM_100_SE':
        net = cbam.CBAMResNet(100, feature_dim=args.feature_dim, mode='ir_se')
    else:
        print(args.backbone, ' is not available!')

    multi_gpus = False
    if len(gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    net.load_state_dict(torch.load(model_path)['net_state_dict'])
    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)
    net.eval()

    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    megaface_dataset = MegaFace(face_scrub_path,
                                megaface_path,
                                transform=transform)
    megaface_loader = torch.utils.data.DataLoader(megaface_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=12,
                                                  drop_last=False)

    for data in megaface_loader:
        img, img_path = data[0].to(device), data[1]
        with torch.no_grad():
            output = net(img).data.cpu().numpy()

        if do_norm is False:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                write_mat(abs_path, output[i])
            print('extract 1 batch...without feature normalization')
        else:
            for i in range(len(img_path)):
                abs_path = img_path[i] + '.feat'
                feat = output[i]
                feat = feat / np.sqrt((np.dot(feat, feat)))
                write_mat(abs_path, feat)
            print('extract 1 batch...with feature normalization')
    print('all images have been processed!')