Esempio n. 1
0
def main():
    lfw_dataroot = args.lfw
    model_path = args.model_path
    far_target = args.far_target

    checkpoint = torch.load(model_path)
    model = Resnet18Triplet(embedding_dimension=checkpoint['embedding_dimension'])
    model.load_state_dict(checkpoint['model_state_dict'])

    flag_gpu_available = torch.cuda.is_available()

    if flag_gpu_available:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    lfw_transforms = transforms.Compose([
        transforms.Resize(size=224),
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.5157, 0.4062, 0.3550],
            std=[0.2858, 0.2515, 0.2433]
        )
    ])

    lfw_dataloader = torch.utils.data.DataLoader(
        dataset=LFWDataset(
            dir=lfw_dataroot,
            pairs_path='../datasets/LFW_pairs.txt',
            transform=lfw_transforms
        ),
        batch_size=256,
        num_workers=2,
        shuffle=False
    )

    model.to(device)
    model = model.eval()

    with torch.no_grad():
        l2_distance = PairwiseDistance(2).cuda()
        distances, labels = [], []

        progress_bar = enumerate(tqdm(lfw_dataloader))

        for batch_index, (data_a, data_b, label) in progress_bar:
            data_a, data_b, label = data_a.cuda(), data_b.cuda(), label.cuda()

            output_a, output_b = model(data_a), model(data_b)
            distance = l2_distance.forward(output_a, output_b)  # Euclidean distance

            distances.append(distance.cpu().detach().numpy())
            labels.append(label.cpu().detach().numpy())

        labels = np.array([sublabel for label in labels for sublabel in label])
        distances = np.array([subdist for distance in distances for subdist in distance])

        _, _, _, _, _, _, _, tar, far = evaluate_lfw(distances=distances, labels=labels, far_target=far_target)

        print("TAR: {:.4f}+-{:.4f} @ FAR: {:.4f}".format(np.mean(tar), np.std(tar), np.mean(far)))
Esempio n. 2
0
def set_model_architecture(model_architecture, pretrained,
                           embedding_dimension):
    if model_architecture == "resnet18":
        model = Resnet18Triplet(embedding_dimension=embedding_dimension,
                                pretrained=pretrained)
    elif model_architecture == "resnet34":
        model = Resnet34Triplet(embedding_dimension=embedding_dimension,
                                pretrained=pretrained)
    elif model_architecture == "resnet50":
        model = Resnet50Triplet(embedding_dimension=embedding_dimension,
                                pretrained=pretrained)
    elif model_architecture == "resnet101":
        model = Resnet101Triplet(embedding_dimension=embedding_dimension,
                                 pretrained=pretrained)
    elif model_architecture == "resnet152":
        model = Resnet152Triplet(embedding_dimension=embedding_dimension,
                                 pretrained=pretrained)
    elif model_architecture == "inceptionresnetv2":
        model = InceptionResnetV2Triplet(
            embedding_dimension=embedding_dimension, pretrained=pretrained)
    elif model_architecture == "mobilenetv2":
        model = MobileNetV2Triplet(embedding_dimension=embedding_dimension,
                                   pretrained=pretrained)
    print("Using {} model architecture.".format(model_architecture))

    return model
def main():
    lfw_dataroot = args.lfw
    model_path = args.model_path
    far_target = args.far_target
    batch_size = args.batch_size

    flag_gpu_available = torch.cuda.is_available()

    if flag_gpu_available:
        device = torch.device("cuda")
        print('Using GPU')
    else:
        device = torch.device("cpu")
        print('Using CPU')

    checkpoint = torch.load(model_path, map_location=device)
    model = Resnet18Triplet(
        embedding_dimension=checkpoint['embedding_dimension'])
    model.load_state_dict(checkpoint['model_state_dict'])

    # desiredFaceWidth, height and desiredLeftEye work together to produce centered aligned faces
    # desiredLeftEye (x,y) says how the face moves in the heightxwidth window. bigger y moves the face down
    # bigger x zooms-out the face and severe values migh flip the face upside down, you wan it in range [0.2;0.36]
    desiredFaceHeight = 448  # set non-equal width, height so the tf_resize stretch the faces, select bigger than 224 values so we don't lose too much of the image quality
    desiredFaceWidth = 352
    desiredLeftEye = (0.28, 0.35)

    tf_align = transform_align(
        landmark_predictor_weight_path=landmark_predictor_weights,
        face_detector_path=face_detector_weights_path,
        desiredFaceWidth=desiredFaceWidth,
        desiredFaceHeight=desiredFaceHeight,
        desiredLeftEye=desiredLeftEye)

    lfw_transforms = transforms.Compose([
        tf_align,
        transforms.Resize(size=(224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.6068, 0.4517, 0.3800],
                             std=[0.2492, 0.2173, 0.2082])
    ])

    lfw_dataloader = torch.utils.data.DataLoader(
        dataset=LFWDataset(dir=lfw_dataroot,
                           pairs_path='../datasets/LFW_pairs.txt',
                           transform=lfw_transforms),
        batch_size=
        batch_size,  # default = 256; 160 - allows running under 2GB VRAM
        num_workers=2,
        shuffle=False)

    model.to(device)
    model = model.eval()

    with torch.no_grad():
        l2_distance = PairwiseDistance(p=2)
        distances, labels = [], []

        progress_bar = enumerate(tqdm(lfw_dataloader))

        for batch_index, (data_a, data_b, label) in progress_bar:
            data_a = data_a.to(device)  # data_a = data_a.cuda()
            data_b = data_b.to(device)  # data_b = data_b.cuda()

            output_a, output_b = model(data_a), model(data_b)
            distance = l2_distance.forward(output_a,
                                           output_b)  # Euclidean distance

            distances.append(distance.cpu().detach().numpy())
            labels.append(label.cpu().detach().numpy())

        labels = np.array([sublabel for label in labels for sublabel in label])
        distances = np.array(
            [subdist for distance in distances for subdist in distance])

        _, _, _, _, _, _, _, tar, far = evaluate_lfw(distances=distances,
                                                     labels=labels,
                                                     far_target=far_target)

        print("TAR: {:.4f}+-{:.4f} @ FAR: {:.4f}".format(
            np.mean(tar), np.std(tar), np.mean(far)))