def main():
    """Runs flops counter"""
    parser = argparse.ArgumentParser(
        description='Evaluation script for Face Recognition in PyTorch')
    parser.add_argument('--embed_size',
                        type=int,
                        default=128,
                        help='Size of the face embedding.')
    parser.add_argument('--model',
                        choices=list(models_backbones.keys()) +
                        list(models_landmarks.keys()),
                        type=str,
                        default='rmnet')
    args = parser.parse_args()

    with torch.no_grad():
        if args.model in models_landmarks.keys():
            model = models_landmarks[args.model]()
        else:
            model = models_backbones[args.model](
                embedding_size=args.embed_size, feature=True)

        flops, params = get_model_complexity_info(model,
                                                  model.get_input_res(),
                                                  as_strings=True,
                                                  print_per_layer_stat=True)
        print('Flops:  {}'.format(flops))
        print('Params: {}'.format(params))
def main():
    parser = argparse.ArgumentParser(
        description='Conversion script for FR models from PyTorch to ONNX')
    parser.add_argument('--embed_size',
                        type=int,
                        default=128,
                        help='Size of the face embedding.')
    parser.add_argument('--snap',
                        type=str,
                        required=True,
                        help='Snapshot to convert.')
    parser.add_argument('--device',
                        '-d',
                        default=-1,
                        type=int,
                        help='Device for model placement.')
    parser.add_argument('--output_dir',
                        default='./',
                        type=str,
                        help='Output directory.')
    parser.add_argument('--model',
                        choices=list(models_backbones.keys()) +
                        list(models_landmarks.keys()),
                        type=str,
                        default='rmnet')

    args = parser.parse_args()

    if args.model in models_landmarks.keys():
        model = models_landmarks[args.model]()
    else:
        model = models_backbones[args.model](embedding_size=args.embed_size,
                                             feature=True)

    model = load_model_state(model, args.snap, args.device, eval_state=True)
    input_var = torch.rand(1, 3, *model.get_input_res())
    dump_name = args.snap[args.snap.rfind('/') + 1:-3]

    torch.onnx.export(model,
                      input_var,
                      dump_name + '.onnx',
                      verbose=True,
                      export_params=True)
def parse_argument(argv):
    parser = argparse.ArgumentParser(
        description='Save embeddings to MegaFace features files')
    parser.add_argument('--model',
                        choices=models_backbones.keys(),
                        type=str,
                        default='rmnet',
                        help='Model type.')
    parser.add_argument('input_dir', help='Path to MegaFace Features')
    parser.add_argument('output_dir', help='Path to FaceScrub Features')
    parser.add_argument('--input_list',
                        default='list.txt',
                        type=str,
                        required=False)
    parser.add_argument('--batch_size', type=int, default=128)
    parser.add_argument('--embedding_size', type=int, default=128)
    parser.add_argument('--devices',
                        type=int,
                        nargs='+',
                        default=[0],
                        help='CUDA devices to use.')
    parser.add_argument('--snap',
                        type=str,
                        required=True,
                        help='Snapshot to evaluate.')
    parser.add_argument(
        '--noises_list',
        type=str,
        default='',
        required=False,
        help=
        'A list of the Megaface or Facescrub noises produced by insightface. \
                                                                        See https://github.com/deepinsight/insightface/blob/master/src/megaface/README.md'
    )
    parser.add_argument('--file_ending',
                        help='Ending appended to original photo files. i.e.\
                        11084833664_0.jpg_LBP_100x100.bin => _LBP_100x100.bin',
                        default='_rmnet.bin')
    parser.add_argument('--trillion_format', action='store_true')
    return parser.parse_args(argv)
示例#4
0
def main():
    """Runs flops counter"""
    parser = argparse.ArgumentParser(
        description='Evaluation script for Face Recognition in PyTorch')
    parser.add_argument('--embed_size',
                        type=int,
                        default=128,
                        help='Size of the face embedding.')
    parser.add_argument('--device',
                        type=int,
                        default=0,
                        help='Device to store the model.')
    parser.add_argument('--model',
                        choices=list(models_backbones.keys()) +
                        list(models_landmarks.keys()),
                        type=str,
                        default='rmnet')
    args = parser.parse_args()

    with torch.cuda.device(args.device), torch.no_grad():
        bs = 1
        if args.model in models_landmarks.keys():
            model = add_flops_counting_methods(models_landmarks[args.model]())
            batch = torch.Tensor(bs, 3, *model.get_input_res())
        else:
            net = models_backbones[args.model](embedding_size=args.embed_size,
                                               feature=True)
            batch = torch.Tensor(bs, 3, *net.get_input_res())
            model = add_flops_counting_methods(net)

        model.cuda().eval().start_flops_count()
        output = model(batch.cuda())

        print(model)
        print('Output shape: {}'.format(list(output.shape)))
        print('Flops:  {}'.format(
            flops_to_string(model.compute_average_flops_cost())))
        print('Params: ' + get_model_parameters_number(model))
def main():
    parser = argparse.ArgumentParser(
        description='Evaluation script for Face Recognition in PyTorch')
    parser.add_argument('--devices',
                        type=int,
                        nargs='+',
                        default=[0],
                        help='CUDA devices to use.')
    parser.add_argument('--embed_size',
                        type=int,
                        default=128,
                        help='Size of the face embedding.')
    parser.add_argument('--val_data_root',
                        dest='val',
                        required=True,
                        type=str,
                        help='Path to validation data.')
    parser.add_argument('--val_list',
                        dest='v_list',
                        required=True,
                        type=str,
                        help='Path to train data image list.')
    parser.add_argument('--val_landmarks',
                        dest='v_land',
                        default='',
                        required=False,
                        type=str,
                        help='Path to landmarks for the test images.')
    parser.add_argument('--val_batch_size',
                        type=int,
                        default=8,
                        help='Validation batch size.')
    parser.add_argument('--snap',
                        type=str,
                        required=False,
                        help='Snapshot to evaluate.')
    parser.add_argument('--roc_fname', type=str, default='', help='ROC file.')
    parser.add_argument('--dump_embeddings',
                        action='store_true',
                        help='Dump embeddings to summary writer.')
    parser.add_argument('--dist',
                        choices=['l2', 'cos'],
                        type=str,
                        default='cos',
                        help='Distance.')
    parser.add_argument('--flipped_emb',
                        action='store_true',
                        help='Flipped embedding concatenation trick.')
    parser.add_argument('--show_failed',
                        action='store_true',
                        help='Show misclassified pairs.')
    parser.add_argument('--model',
                        choices=models_backbones.keys(),
                        type=str,
                        default='rmnet',
                        help='Model type.')
    parser.add_argument('--engine',
                        choices=['pt', 'ie'],
                        type=str,
                        default='pt',
                        help='Framework to use for eval.')

    # IE-related options
    parser.add_argument('--fr_model', type=str, required=False)
    parser.add_argument('--lm_model', type=str, required=False)
    parser.add_argument('-pp',
                        '--plugin_dir',
                        type=str,
                        default=None,
                        help='Path to a plugin folder')
    args = parser.parse_args()

    if args.engine == 'pt':
        assert args.snap is not None, 'To evaluate PyTorch snapshot, please, specify --snap option.'
        with torch.cuda.device(args.devices[0]):
            data, embeddings_fun = load_test_dataset(args)
            model = models_backbones[args.model](
                embedding_size=args.embed_size, feature=True)
            model = load_model_state(model, args.snap, args.devices[0])
            evaluate(args, data, model, embeddings_fun, args.val_batch_size,
                     args.dump_embeddings, args.roc_fname, args.snap, True,
                     args.show_failed)
    else:
        from utils.ie_tools import load_ie_model

        assert args.fr_model is not None, 'To evaluate IE model, please, specify --fr_model option.'
        fr_model = load_ie_model(args.fr_model, 'CPU', args.plugin_dir)
        lm_model = None
        if args.lm_model:
            lm_model = load_ie_model(args.lm_model, 'CPU', args.plugin_dir)
        input_size = tuple(fr_model.get_input_shape()[2:])

        lfw = LFW(args.val, args.v_list, args.v_land)
        if not lfw.use_landmarks or lm_model:
            lfw.transform = t.Compose(
                [ResizeNumpy(220),
                 CenterCropNumpy(input_size)])
            lfw.use_landmarks = False
        else:
            log.info('Using landmarks for the LFW images.')
            lfw.transform = t.Compose([ResizeNumpy(input_size)])

        evaluate(args,
                 lfw,
                 fr_model,
                 partial(compute_embeddings_lfw_ie, lm_model=lm_model),
                 val_batch_size=1,
                 dump_embeddings=False,
                 roc_fname='',
                 snap_name='',
                 verbose=True,
                 show_failed=False)
示例#6
0
def main():
    """Creates a command line parser and starts training"""
    parser = ArgumentParserWithYaml(
        description='Training Face Recognition in PyTorch',
        fromfile_prefix_chars='@',
        epilog="Please, note that you can parse parameters from a yaml file if \
                                    you add @<path_to_yaml_file> to command line"
    )

    #datasets configuration
    parser.add_argument('--train_dataset',
                        choices=['vgg', 'ms1m', 'trp', 'imdbface'],
                        type=str,
                        default='vgg',
                        help='Name of the train dataset.')
    parser.add_argument('--train_data_root',
                        dest='train',
                        required=True,
                        type=str,
                        help='Path to train data.')
    parser.add_argument('--train_list',
                        dest='t_list',
                        required=False,
                        type=str,
                        help='Path to train data image list.')
    parser.add_argument('--train_landmarks',
                        default='',
                        dest='t_land',
                        required=False,
                        type=str,
                        help='Path to landmarks for the train images.')

    parser.add_argument('--val_data_root',
                        dest='val',
                        required=True,
                        type=str,
                        help='Path to val data.')
    parser.add_argument('--val_step',
                        type=int,
                        default=1000,
                        help='Evaluate model each val_step during each epoch.')
    parser.add_argument('--val_list',
                        dest='v_list',
                        required=True,
                        type=str,
                        help='Path to test data image list.')
    parser.add_argument('--val_landmarks',
                        dest='v_land',
                        default='',
                        required=False,
                        type=str,
                        help='Path to landmarks for test images.')

    #model configuration
    parser.add_argument('--model',
                        choices=models_backbones.keys(),
                        type=str,
                        default='mobilenet',
                        help='Model type.')
    parser.add_argument('--embed_size',
                        type=int,
                        default=256,
                        help='Size of the face embedding.')

    #optimizer configuration
    parser.add_argument('--train_batch_size',
                        type=int,
                        default=170,
                        help='Train batch size.')
    parser.add_argument('--epoch_total_num',
                        type=int,
                        default=30,
                        help='Number of epochs to train.')
    parser.add_argument('--lr', type=float, default=0.4, help='Learning rate.')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        help='Momentum.')
    parser.add_argument('--weight_decay',
                        type=float,
                        default=0.0001,
                        help='Weight decay.')

    #loss configuration
    parser.add_argument('--mining_type',
                        choices=['focal', 'sv'],
                        type=str,
                        default='sv',
                        help='Hard mining method in loss.')
    parser.add_argument(
        '--t',
        type=float,
        default=1.1,
        help=
        't in support vector softmax. See https://arxiv.org/abs/1812.11317 for details'
    )
    parser.add_argument(
        '--gamma',
        type=float,
        default=2.,
        help=
        'Gamma in focal loss. See https://arxiv.org/abs/1708.02002 for details'
    )
    parser.add_argument('--m',
                        type=float,
                        default=0.35,
                        help='Margin size for AMSoftmax.')
    parser.add_argument('--s',
                        type=float,
                        default=30.,
                        help='Scale for AMSoftmax.')
    parser.add_argument('--margin_type',
                        choices=['cos', 'arc'],
                        type=str,
                        default='cos',
                        help='Margin type for AMSoftmax loss.')

    #other parameters
    parser.add_argument('--devices',
                        type=int,
                        nargs='+',
                        default=[0],
                        help='CUDA devices to use.')
    parser.add_argument('--val_batch_size',
                        type=int,
                        default=20,
                        help='Validation batch size.')
    parser.add_argument('--snap_folder',
                        type=str,
                        default='./snapshots/',
                        help='Folder to save snapshots.')
    parser.add_argument('--snap_prefix',
                        type=str,
                        default='FaceReidNet',
                        help='Prefix for snapshots.')
    parser.add_argument('--snap_to_resume',
                        type=str,
                        default=None,
                        help='Snapshot to resume.')
    parser.add_argument('--weighted', action='store_true')

    args = parser.parse_args()
    log.info('Arguments:\n' + pformat(args.__dict__))

    with torch.cuda.device(args.devices[0]):
        train(args)
示例#7
0
def main():
    """Runs the accuracy check"""
    parser = argparse.ArgumentParser(
        description='Accuracy check script (pt vs caffe)')
    parser.add_argument('--embed_size',
                        type=int,
                        default=128,
                        help='Size of the face embedding.')
    parser.add_argument('--snap',
                        type=str,
                        required=True,
                        help='Snapshot to convert.')
    parser.add_argument('--device',
                        '-d',
                        default=0,
                        type=int,
                        help='Device for model placement.')
    parser.add_argument('--model',
                        choices=list(models_backbones.keys()) +
                        list(models_landmarks.keys()),
                        type=str,
                        default='rmnet')

    # IE-related options
    parser.add_argument('--ie_model', type=str, required=True)
    parser.add_argument(
        "-l",
        "--cpu_extension",
        help=
        "MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
        "impl.",
        type=str,
        default=None)
    parser.add_argument("-pp",
                        "--plugin_dir",
                        help="Path to a plugin folder",
                        type=str,
                        default=None)
    parser.add_argument(
        "-d_ie",
        "--device_ie",
        help=
        "Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
        "will look for a suitable plugin for device specified (CPU by default)",
        default="CPU",
        type=str)

    args = parser.parse_args()

    max_err = 0.
    with torch.cuda.device(args.device):
        if args.model in models_landmarks.keys():
            pt_model = models_landmarks[args.model]
        else:
            pt_model = models_backbones[args.model](
                embedding_size=args.embed_size, feature=True)
        pt_model = load_model_state(pt_model, args.snap, args.device)

        ie_model = load_ie_model(args.ie_model, args.device_ie,
                                 args.plugin_dir, args.cpu_extension)
        np.random.seed(0)

        for _ in tqdm(range(100)):
            input_img = np.random.randint(0,
                                          high=255,
                                          size=(*pt_model.get_input_res(), 3),
                                          dtype=np.uint8)
            input_bgr = cv.cvtColor(input_img, cv.COLOR_BGR2RGB)

            input_pt = torch.unsqueeze(torch.from_numpy(
                input_img.transpose(2, 0, 1).astype('float32') / 255.).cuda(),
                                       dim=0)
            pt_output = (pt_model(input_pt)).data.cpu().numpy().reshape(1, -1)
            ie_output = ie_model.forward(input_bgr).reshape(1, -1)

            max_err = max(np.linalg.norm(pt_output - ie_output, np.inf),
                          max_err)

    log.info('Max l_inf error: %e', max_err)