def build_backbone(backbone, output_stride, BatchNorm): if backbone == 'resnet': return resnet.ResNet101(output_stride, BatchNorm) elif backbone == 'xception': return xception.AlignedXception(output_stride, BatchNorm) else: raise NotImplementedError
def build_backbone(backbone, output_stride, BatchNorm): if backbone == 'resnet': return resnet.ResNet101(output_stride, BatchNorm) elif backbone == 'xception': return xception.AlignedXception(output_stride, BatchNorm) elif backbone == 'drn': return drn.drn_d_54(BatchNorm) elif backbone == 'mobilenet': return mobilenet.MobileNetV2(output_stride, BatchNorm) else: raise NotImplementedError
def extract_feature(model_path, backbone_net, face_scrub_path, megaface_path, batch_size=32, gpus='0', do_norm=False): if backbone_net == 'MobileFace': net = mobilefacenet.MobileFaceNet() elif backbone_net == 'Res50': net = resnet.ResNet50() elif backbone_net == 'Res101': net = resnet.ResNet101() elif backbone_net == 'Res50_IR': net = arcfacenet.SEResNet_IR(50, feature_dim=args.feature_dim, mode='ir') elif backbone_net == 'SERes50_IR': net = arcfacenet.SEResNet_IR(50, feature_dim=args.feature_dim, mode='se_ir') else: print(backbone_net, 'is not available!') multi_gpus = False if len(gpus.split(',')) > 1: multi_gpus = True os.environ['CUDA_VISIBLE_DEVICES'] = gpus device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') net.load_state_dict(torch.load(model_path)['net_state_dict']) if multi_gpus: net = DataParallel(net).to(device) else: net = net.to(device) net.eval() transform = transforms.Compose([ transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] ]) megaface_dataset = MegaFace(face_scrub_path, megaface_path, transform=transform) megaface_loader = torch.utils.data.DataLoader(megaface_dataset, batch_size=batch_size, shuffle=False, num_workers=12, drop_last=False) for data in megaface_loader: img, img_path= data[0].to(device), data[1] with torch.no_grad(): output = net(img).data.cpu().numpy() if do_norm is False: for i in range(len(img_path)): abs_path = img_path[i] + '.feat' write_mat(abs_path, output[i]) print('extract 1 batch...without feature normalization') else: for i in range(len(img_path)): abs_path = img_path[i] + '.feat' feat = output[i] feat = feat / np.sqrt((np.dot(feat, feat))) write_mat(abs_path, feat) print('extract 1 batch...with feature normalization') print('all images have been processed!')
def loadModel(data_root, file_list, backbone_net, gpus='0', resume=None): if backbone_net == 'MobileFace': net = mobilefacenet.MobileFaceNet() elif backbone_net == 'Res50': net = resnet.ResNet50() elif backbone_net == 'Res101': net = resnet.ResNet101() elif backbone_net == 'Res50_IR': net = arcfacenet.SEResNet_IR(50, feature_dim=args.feature_dim, mode='ir') elif backbone_net == 'SERes50_IR': net = arcfacenet.SEResNet_IR(50, feature_dim=args.feature_dim, mode='se_ir') else: print(backbone_net, 'is not available!') # gpu init multi_gpus = False if len(gpus.split(',')) > 1: multi_gpus = True os.environ['CUDA_VISIBLE_DEVICES'] = gpus device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') net.load_state_dict(torch.load(resume)['net_state_dict']) if multi_gpus: net = DataParallel(net).to(device) else: net = net.to(device) transform = transforms.Compose([ transforms.ToTensor(), # range [0, 255] -> [0.0,1.0] transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) # range [0.0, 1.0] -> [-1.0,1.0] ]) lfw_dataset = LFW(data_root, file_list, transform=transform) lfw_loader = torch.utils.data.DataLoader(lfw_dataset, batch_size=128, shuffle=False, num_workers=2, drop_last=False) return net.eval(), device, lfw_dataset, lfw_loader