def setUp(self):
     config = read_py_config('./configs/config.py')
     self.config = config
     self.model = build_model(config,
                              device='cpu',
                              strict=True,
                              mode='convert')
     self.img_size = tuple(map(int, config.resize.values()))
Exemple #2
0
def main():
    # parsing arguments
    parser = argparse.ArgumentParser(description='antispoofing training')
    parser.add_argument('--draw_graph', default=False, type=bool, required=False,
                        help='whether or not to draw graphics')
    parser.add_argument('--GPU', default=0, type=int, required=False,
                        help='specify which GPU to use')
    parser.add_argument('--config', type=str, default=None, required=True,
                        help='path to configuration file')
    parser.add_argument('--device', type=str, default='cuda',
                        help='if you want to eval model on cpu, pass "cpu" param')
    args = parser.parse_args()

    # reading config and manage device
    path_to_config = args.config
    config = read_py_config(path_to_config)
    device = args.device + f':{args.GPU}' if args.device == 'cuda' else 'cpu'

    # building model
    model = build_model(config, device, strict=True, mode='eval')
    model.to(device)
    if config.data_parallel.use_parallel:
        model = nn.DataParallel(model, **config.data_parallel.parallel_params)

    # load snapshot
    path_to_experiment = os.path.join(config.checkpoint.experiment_path, config.checkpoint.snapshot_name)
    epoch_of_checkpoint = load_checkpoint(path_to_experiment, model, map_location=device, optimizer=None)

    # preprocessing, making dataset and loader
    normalize = A.Normalize(**config.img_norm_cfg)
    test_transform = A.Compose([
                                A.Resize(**config.resize, interpolation=cv.INTER_CUBIC),
                                normalize
                               ])
    test_transform = Transform(val=test_transform)
    test_dataset = make_dataset(config, val_transform=test_transform, mode='eval')
    test_loader = DataLoader(dataset=test_dataset, batch_size=100, shuffle=True, num_workers=2)

    # computing metrics
    auc_, eer, accur, apcer, bpcer, acer, fpr, tpr  = evaluate(model, test_loader,
                                                               config, device,
                                                               compute_accuracy=True)
    print((f'eer = {round(eer*100,2)}\n'
           + f'accuracy on test data = {round(np.mean(accur),3)}\n'
           + f'auc = {round(auc_,3)}\n'
           + f'apcer = {round(apcer*100,2)}\n'
           + f'bpcer = {round(bpcer*100,2)}\n'
           + f'acer = {round(acer*100,2)}\n'
           + f'checkpoint made on {epoch_of_checkpoint} epoch'))

    # draw graphics if needed
    if args.draw_graph:
        fnr = 1 - tpr
        plot_roc_curve(fpr, tpr, config)
        det_curve(fpr, fnr, eer, config)
Exemple #3
0
def main():
    # parse arguments
    parser = argparse.ArgumentParser(description='converting model to onnx')
    parser.add_argument('--GPU',
                        type=int,
                        default=0,
                        required=False,
                        help='specify which gpu to use')
    parser.add_argument('--config',
                        type=str,
                        default=None,
                        required=True,
                        help='path to configuration file')
    parser.add_argument('--model_path',
                        type=str,
                        default='MobileNetv3.onnx',
                        required=False,
                        help='path where to save the model in onnx format')
    parser.add_argument(
        '--num_layers',
        type=int,
        default=16,
        required=False,
        help=
        'number of the layers of your model to create required number of the input names'
    )
    parser.add_argument('--img_size',
                        type=tuple,
                        default=(128, 128),
                        required=False,
                        help='height and width of the image to resize')
    parser.add_argument(
        '--device',
        type=str,
        default='cuda',
        help='if you want to eval model on cpu, pass "cpu" param')
    args = parser.parse_args()
    # read config
    path_to_config = args.config
    config = read_py_config(path_to_config)
    device = f'cuda:{args.GPU}' if args.device == 'cuda' else 'cpu'
    image_size = args.img_size
    save_path = args.model_path
    num_layers = args.num_layers
    export_onnx(config,
                device=device,
                num_layers=num_layers,
                img_size=image_size,
                save_path=save_path)
Exemple #4
0
def main():
    """Prepares data for the antispoofing recognition demo"""

    parser = argparse.ArgumentParser(description='antispoofing recognition live demo script')
    parser.add_argument('--video', type=str, default=None, help='Input video')
    parser.add_argument('--cam_id', type=int, default=-1, help='Input cam')
    parser.add_argument('--config', type=str, default=None, required=False,
                        help='Configuration file')
    parser.add_argument('--fd_model', type=str, required=True)
    parser.add_argument('--fd_thresh', type=float, default=0.6, help='Threshold for FD')
    parser.add_argument('--spoof_thresh', type=float, default=0.4,
                        help='Threshold for predicting spoof/real. The lower the more model oriented on spoofs')
    parser.add_argument('--spf_model', type=str, default=None,
                        help='path to .pth checkpoint of model or .xml IR OpenVINO model', required=True)
    parser.add_argument('--device', type=str, default='CPU')
    parser.add_argument('--GPU', type=int, default=0, help='specify which GPU to use')
    parser.add_argument('-l', '--cpu_extension',
                        help='MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels '
                             'impl.', type=str, default=None)
    parser.add_argument('--write_video', type=bool, default=False,
                        help='if you set this arg to True, the video of the demo will be recoreded')
    args = parser.parse_args()
    device = args.device + f':{args.GPU}' if args.device == 'cuda' else 'cpu'
    write_video = args.write_video

    if args.cam_id >= 0:
        log.info('Reading from cam {}'.format(args.cam_id))
        cap = cv.VideoCapture(args.cam_id)
        cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
        cap.set(cv.CAP_PROP_FRAME_HEIGHT, 720)
        cap.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG'))
    else:
        assert args.video
        log.info('Reading from {}'.format(args.video))
        cap = cv.VideoCapture(args.video)
        cap.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG'))
    assert cap.isOpened()
    face_detector = FaceDetector(args.fd_model, args.fd_thresh, args.device, args.cpu_extension)
    if args.spf_model.endswith('pth.tar'):
        if not args.config:
            raise ValueError('You should pass config file to work with a Pytorch model')
        config = utils.read_py_config(args.config)
        spoof_model = utils.build_model(config, args, strict=True, mode='eval')
        spoof_model = TorchCNN(spoof_model, args.spf_model, config, device=device)
    else:
        assert args.spf_model.endswith('.xml')
        spoof_model = VectorCNN(args.spf_model)
    # running demo
    run(args, cap, face_detector, spoof_model, write_video)
def main():
    """Prepares data for the accuracy convertation checker"""
    parser = argparse.ArgumentParser(description='antispoofing recognition live demo script')
    parser.add_argument('--config', type=str, default=None, required=True,
                        help='Configuration file')
    parser.add_argument('--spf_model_openvino', type=str, default=None,
                        help='path to .xml IR OpenVINO model', required=True)
    parser.add_argument('--spf_model_torch', type=str, default=None,
                        help='path to .pth.tar checkpoint', required=True)
    parser.add_argument('--device', type=str, default='CPU')

    args = parser.parse_args()
    config = utils.read_py_config(args.config)
    assert args.spf_model_openvino.endswith('.xml') and args.spf_model_torch.endswith('.pth.tar')
    spoof_model_torch = utils.build_model(config, args.device.lower(), strict=True, mode='eval')
    spoof_model_torch = TorchCNN(spoof_model_torch, args.spf_model_torch, config, device=args.device.lower())
    spoof_model_openvino = VectorCNN(args.spf_model_openvino)
    # running checker
    avg_diff = run(spoof_model_torch, spoof_model_openvino)
    print((f'mean difference on the first predicted class : {avg_diff[0]}\n'
           + f'mean difference on the second predicted class : {avg_diff[1]}'))
def main():
    # parse arguments
    parser = argparse.ArgumentParser(description='antispoofing training')
    parser.add_argument('--GPU',
                        type=int,
                        default=0,
                        help='specify which gpu to use')
    parser.add_argument('--save_checkpoint',
                        type=bool,
                        default=True,
                        help='whether or not to save your model')
    parser.add_argument('--config',
                        type=str,
                        default=None,
                        required=True,
                        help='Configuration file')
    parser.add_argument(
        '--device',
        type=str,
        default='cuda',
        choices=['cuda', 'cpu'],
        help='if you want to train model on cpu, pass "cpu" param')
    args = parser.parse_args()

    # manage device, arguments, reading config
    path_to_config = args.config
    config = read_py_config(path_to_config)
    device = args.device + f':{args.GPU}' if args.device == 'cuda' else 'cpu'
    if config.data_parallel.use_parallel:
        device = f'cuda:{config.data_parallel.parallel_params.output_device}'
    if config.multi_task_learning and config.dataset != 'celeba_spoof':
        raise NotImplementedError(
            'Note, that multi task learning is avaliable for celeba_spoof only. '
            'Please, switch it off in config file')
    # launch training, validation, testing
    train(config, device, args.save_checkpoint)