Exemple #1
0
    def __init__(self, model_root, device, fa_model, input_size=[112, 112]):

        # define data preprocessing
        self.transform = transforms.Compose([
            transforms.Resize([
                int(128 * input_size[0] / 112),
                int(128 * input_size[0] / 112)
            ]),
            transforms.CenterCrop([input_size[0], input_size[1]]),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ])
        # assure that horizontal flip is the same id
        self.insurance = transforms.Compose([
            transforms.Resize([
                int(128 * input_size[0] / 112),
                int(128 * input_size[0] / 112)
            ]),
            transforms.CenterCrop([input_size[0], input_size[1]]),
            transforms.RandomHorizontalFlip(p=1),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ])

        # initiate face verification model
        self.fa_model = fa_model
        self.model = IR_50(input_size)
        self.model_root = model_root
        model_path = "{}/backbone_ir50_ms1m_epoch63.pth".format(model_root)
        self.model.load_state_dict(torch.load(model_path))
        self.model.to(device)
        self.model.eval()

        self.input_size = input_size
        self.device = device
Exemple #2
0
def perform_val(device,
                model_path,
                embedding_size,
                batch_size,
                carray,
                issame,
                nrof_folds=10,
                tta=True):

    backbone = IR_50((112, 112))
    checkpoint = torch.load(model_path)
    backbone.load_state_dict(checkpoint)
    backbone = backbone.to(device)
    backbone.eval()  # switch to evaluation mode

    idx = 0
    embeddings = np.zeros([len(carray), embedding_size])
    with torch.no_grad():
        while idx + batch_size <= len(carray):
            batch = torch.tensor(carray[idx:idx + batch_size])
            if tta:
                ccropped = ccrop_batch(batch)
                fliped = hflip_batch(ccropped)
                emb_batch = backbone(ccropped.to(device)).cpu() + backbone(
                    fliped.to(device)).cpu()
                embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
            else:
                ccropped = ccrop_batch(batch)
                embeddings[idx:idx + batch_size] = l2_norm(
                    backbone(ccropped.to(device))).cpu()
            idx += batch_size
        if idx < len(carray):
            batch = torch.tensor(carray[idx:])
            if tta:
                ccropped = ccrop_batch(batch)
                fliped = hflip_batch(ccropped)
                emb_batch = backbone(ccropped.to(device)).cpu() + backbone(
                    fliped.to(device)).cpu()
                embeddings[idx:] = l2_norm(emb_batch)
            else:
                ccropped = ccrop_batch(batch)
                embeddings[idx:] = l2_norm(backbone(ccropped.to(device))).cpu()

    tpr, fpr, accuracy, best_thresholds = evaluate(embeddings, issame,
                                                   nrof_folds)
    buf = gen_plot(fpr, tpr)
    roc_curve = Image.open(buf)
    roc_curve_tensor = transforms.ToTensor()(roc_curve)

    return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
Exemple #3
0
def extract_feature(path_img1, path_img2, model_root, input_size=[112, 112]):

    # set device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # pre-requisites
    assert (os.path.exists(model_root))
    print('Backbone Model Root:', model_root)

    # define data loader
    transform = transforms.Compose([
        transforms.Resize(
            [int(128 * input_size[0] / 112),
             int(128 * input_size[0] / 112)]),  # smaller side resized
        transforms.CenterCrop([input_size[0], input_size[1]]),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])

    # load model from a checkpoint
    model_path = "{}/backbone_ir50_ms1m_epoch63.pth".format(model_root)
    print("Loading model Checkpoint '{}'".format(model_path))
    model = IR_50(input_size)
    model.load_state_dict(torch.load(model_path))
    model.to(device)
    model.eval()

    # align and warp images
    input1 = align_(path_img1, input_size[0], model_root)
    input2 = align_(path_img2, input_size[0], model_root)
    if input1 == 4 or input2 == 4:
        return 4

    # transform to torch, norm, extract features
    input1 = transform(input1).unsqueeze(0).to(device)
    output1 = model(input1)
    output1 = l2_norm(output1)

    input2 = transform(input2).unsqueeze(0).to(device)
    output2 = model(input2)
    output2 = l2_norm(output2)

    return output1, output2
    def __init__(self, cpu=True, crop_size=112, weights_path="./weights/backbone_ir50_asia.pth"):
        self.cpu = cpu
        self.crop_size = crop_size
        self.weights_path = weights_path

        torch.set_grad_enabled(False)
        self.device = torch.device('cpu' if self.cpu else 'cuda:0')

        # Feature Extraction Model
        self.arcface_r50_asian = IR_50([self.crop_size, self.crop_size])
        self.arcface_r50_asian.load_state_dict(torch.load(weights_path, map_location='cpu' if self.cpu else 'cuda'))
        self.arcface_r50_asian.eval()
        self.arcface_r50_asian.to(self.device)

        # Align
        self.scale = self.crop_size / 112.
        self.reference = get_reference_facial_points(default_square=True) * self.scale

        # Facial Detection Model
        self.face_detector = FaceDetector()
        embedding1 = self.arcface_r50_asian(input1)
        embedding2 = self.arcface_r50_asian(input2)

        embedding1 = embedding1.detach().numpy()
        print(embedding1.shape)
        embedding2 = embedding2.detach().numpy()
        cosin = cosine_similarity(self.l2_normalize(embedding1), self.l2_normalize(embedding2))
        distance = euclidean_distances(embedding1, embedding2)

        return cosin[0, 0], distance[0, 0], warped_face1, warped_face2


if __name__ == '__main__':
    # extraction model
    device = torch.device('cpu')
    arcface_r50_asian = IR_50([112, 112])
    arcface_r50_asian.load_state_dict(torch.load(
        './weights/backbone_ir50_asia.pth',
        map_location='cpu'))
    arcface_r50_asian.eval()
    arcface_r50_asian.to(device)

    crop_size = 112
    scale = crop_size / 112.
    reference = get_reference_facial_points(default_square=True) * scale

    # face detector
    face_detector = FaceDetector()

    image_1 = cv2.imread('./4/id.png')
    image_2 = cv2.imread('./4/selfie.png')
Exemple #6
0
def main():
    args = parse_args()
    update_config(cfg, args)
    # assert args.text_arch

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)

    # Loss
    criterion = FocalLoss().cuda()

    # load arch
    genotype = eval(
        "Genotype(normal=[('dil_conv_5x5', 1), ('dil_conv_3x3', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('sep_conv_3x3', 2), ('dil_conv_3x3', 2), ('max_pool_3x3', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('dil_conv_5x5', 2), ('max_pool_3x3', 1), ('dil_conv_5x5', 3), ('dil_conv_3x3', 2), ('dil_conv_5x5', 4), ('dil_conv_5x5', 2)], reduce_concat=range(2, 6))"
    )

    model = IR_50(cfg.MODEL.NUM_CLASSES)
    # model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES, cfg.MODEL.LAYERS, genotype)
    model = model.cuda()

    # optimizer = optim.Adam(
    #     model.parameters(),
    #     lr=cfg.TRAIN.LR
    # )
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint_file = args.load_path
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)

        # load checkpoint
        begin_epoch = checkpoint['epoch']
        last_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        best_eer = checkpoint['best_eer']
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.path_helper = checkpoint['path_helper']

        # begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        # last_epoch = -1
        # best_eer = 1.0
        # del checkpoint['state_dict']['classifier.weight']
        # del checkpoint['state_dict']['classifier.bias']
        # model.load_state_dict(checkpoint['state_dict'], strict=False)
        # # best_eer = checkpoint['best_eer']
        # # optimizer.load_state_dict(checkpoint['optimizer'])
        # exp_name = args.cfg.split('/')[-1].split('.')[0]
        # args.path_helper = set_path('/content/drive/My Drive/zalo/AutoSpeech/logs_scratch', exp_name)

        logger = create_logger(args.path_helper['log_path'])
        logger.info("=> loaded checkloggpoint '{}'".format(checkpoint_file))
    else:
        exp_name = args.cfg.split('/')[-1].split('.')[0]
        args.path_helper = set_path('logs_scratch', exp_name)
        logger = create_logger(args.path_helper['log_path'])
        begin_epoch = cfg.TRAIN.BEGIN_EPOCH
        best_eer = 1.0
        last_epoch = -1
    logger.info(args)
    logger.info(cfg)
    logger.info(f"selected architecture: {genotype}")
    logger.info("Number of parameters: {}".format(count_parameters(model)))

    # dataloader
    train_dataset = DeepSpeakerDataset(Path(cfg.DATASET.DATA_DIR),
                                       cfg.DATASET.SUB_DIR,
                                       cfg.DATASET.PARTIAL_N_FRAMES)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=True,
        drop_last=True,
    )
    test_dataset_verification = VoxcelebTestset(Path(cfg.DATASET.DATA_DIR),
                                                cfg.DATASET.PARTIAL_N_FRAMES)
    test_loader_verification = torch.utils.data.DataLoader(
        dataset=test_dataset_verification,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=False,
        drop_last=False,
    )

    # training setting
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': begin_epoch * len(train_loader),
        'valid_global_steps': begin_epoch // cfg.VAL_FREQ,
    }

    # training loop
    # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
    #     optimizer, cfg.TRAIN.END_EPOCH, cfg.TRAIN.LR_MIN,
    #     last_epoch=last_epoch
    # )

    for epoch in tqdm(range(begin_epoch, cfg.TRAIN.END_EPOCH),
                      desc='train progress'):
        model.train()
        model.drop_path_prob = cfg.MODEL.DROP_PATH_PROB * epoch / cfg.TRAIN.END_EPOCH

        train_from_scratch(cfg, model, optimizer, train_loader, criterion,
                           epoch, writer_dict)

        if epoch == 210 or epoch == 240 or epoch == 270:
            schedule_lr(optimizer)

        if epoch % cfg.VAL_FREQ == 0 or epoch == cfg.TRAIN.END_EPOCH - 1:
            # eer = validate_verification(cfg, model, test_loader_verification)

            # # remember best acc@1 and save checkpoint
            # is_best = eer < best_eer
            # best_eer = min(eer, best_eer)

            # save
            logger.info('=> saving checkpoint to {}'.format(
                args.path_helper['ckpt_path']))
            print('=> saving checkpoint to {}'.format(
                args.path_helper['ckpt_path']))
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_eer': best_eer,
                    'optimizer': optimizer.state_dict(),
                    'path_helper': args.path_helper
                }, True, args.path_helper['ckpt_path'],
                'checkpoint_{}.pth'.format(epoch))
Exemple #7
0
        '--weight_path',
        default=
        "/home/phamvanhanh/PycharmProjects/FaceVerification/weights/backbone_ir50_asia.pth",
        type=str)
    parser.add_argument('-cpu', '--cpu', default=True, type=bool)
    parser.add_argument('-crop_size', '--crop_size', default=112, type=int)
    args = parser.parse_args()

    dataset_folder = args.dataset_folder
    embeddings_folder = args.embeddings_folder

    torch.set_grad_enabled(False)
    device = torch.device('cpu' if args.cpu else 'cuda:0')

    # extraction feature model
    arcface_r50_asian = IR_50([args.crop_size, args.crop_size])
    arcface_r50_asian.load_state_dict(
        torch.load(args.weight_path,
                   map_location='cpu' if args.cpu else 'cuda'))
    arcface_r50_asian.eval()
    arcface_r50_asian.to(device)

    folders = os.listdir(dataset_folder)
    folders.sort(key=lambda x: int(x.split(".")[0]))

    if not os.path.isdir(embeddings_folder):
        os.mkdir(embeddings_folder)

    for subfolder in tqdm(folders):
        embeddings = list()
def main():
    args = parse_args()
    update_config(cfg, args)
    if args.load_path is None:
        raise AttributeError("Please specify load path.")

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # Set the random seed manually for reproducibility.
    np.random.seed(cfg.SEED)
    torch.manual_seed(cfg.SEED)
    torch.cuda.manual_seed_all(cfg.SEED)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # model and optimizer
    if cfg.MODEL.NAME == 'model':
        if args.load_path and os.path.exists(args.load_path):
            checkpoint = torch.load(args.load_path)
            # genotype = checkpoint['genotype']
            genotype = eval(
                "Genotype(normal=[('dil_conv_5x5', 1), ('dil_conv_3x3', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('sep_conv_3x3', 2), ('dil_conv_3x3', 2), ('max_pool_3x3', 1)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('dil_conv_5x5', 2), ('max_pool_3x3', 1), ('dil_conv_5x5', 3), ('dil_conv_3x3', 2), ('dil_conv_5x5', 4), ('dil_conv_5x5', 2)], reduce_concat=range(2, 6))"
            )
        else:
            raise AssertionError('Please specify the model to evaluate')
        model = IR_50(1)
        # model = Network(cfg.MODEL.INIT_CHANNELS, cfg.MODEL.NUM_CLASSES, cfg.MODEL.LAYERS, genotype)
        model.drop_path_prob = 0.0
    else:
        model = eval('resnet.{}(num_classes={})'.format(
            cfg.MODEL.NAME, cfg.MODEL.NUM_CLASSES))
    model = model.to(device)

    nb_params = sum([param.view(-1).size()[0] for param in model.parameters()])
    print('nb_params: {}'.format(nb_params))

    # resume && make log dir and logger
    if args.load_path and os.path.exists(args.load_path):
        checkpoint = torch.load(args.load_path, map_location="cpu")

        # load checkpoint
        del checkpoint['state_dict']['classifier.weight']
        del checkpoint['state_dict']['classifier.bias']
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        args.path_helper = checkpoint['path_helper']

        logger = create_logger(os.path.dirname(args.load_path))
        logger.info("=> loaded checkpoint '{}'".format(args.load_path))
    else:
        raise AssertionError('Please specify the model to evaluate')
    logger.info(args)
    logger.info(cfg)

    # dataloader
    # test_dataset_verification = VoxcelebTestset(
    #     Path(cfg.DATASET.DATA_DIR), cfg.DATASET.PARTIAL_N_FRAMES
    # )
    test_dataset_verification = VoxcelebTestsetZalo(
        Path(cfg.DATASET.DATA_DIR), cfg.DATASET.PARTIAL_N_FRAMES)
    test_loader_verification = torch.utils.data.DataLoader(
        dataset=test_dataset_verification,
        batch_size=1,
        num_workers=cfg.DATASET.NUM_WORKERS,
        pin_memory=True,
        shuffle=False,
        drop_last=False,
    )

    validate_verification(cfg, model, test_loader_verification)