Exemple #1
0
def show_pairs(images, features, pairs):
    dists = np.sqrt(np.sum((features[0] - features[1])**2, axis=1))
    ds_utils.denormalize(images[0])
    ds_utils.denormalize(images[1])
    images[1] = vis.add_error_to_images(images[1],
                                        dists,
                                        size=2.0,
                                        thickness=2,
                                        vmin=0,
                                        vmax=1)
    images[1] = vis.add_id_to_images(images[1],
                                     pairs.numpy(),
                                     size=1.2,
                                     thickness=2,
                                     color=(1, 0, 1))
    thresh = 0.4
    corrects = (dists < thresh) == pairs.cpu().numpy()
    colors = [(0, 1, 0) if c else (1, 0, 0) for c in corrects]
    images[1] = vis.add_cirle_to_images(images[1], colors)
    images[0] = vis._to_disp_images(images[0])
    img_rows = [
        vis.make_grid(imgs,
                      fx=0.75,
                      fy=0.75,
                      nCols=len(dists),
                      normalize=False) for imgs in images
    ]
    vis.vis_square(img_rows, nCols=1, normalize=False)
Exemple #2
0
        return np.concatenate(sizes)


if __name__ == '__main__':
    import torch
    from utils import vis
    from utils.nn import Batch
    from datasets import ds_utils
    from datasets import affectnet

    torch.manual_seed(0)
    torch.cuda.manual_seed_all(0)

    train = True
    datasets = [
        affectnet.AffectNet(train=train, max_samples=1000),
        # vggface2.VggFace2(train=train, max_samples=1000),
    ]
    multi_ds = MultiFaceDataset(datasets, train=True, max_samples=5000)
    print(multi_ds)
    dl = td.DataLoader(multi_ds, batch_size=40, shuffle=False, num_workers=0)
    for data in dl:
        batch = Batch(data, gpu=False)
        inputs = batch.images.clone()
        ds_utils.denormalize(inputs)
        imgs = vis.add_landmarks_to_images(inputs.numpy(),
                                           batch.landmarks.numpy())
        # imgs = vis.add_pose_to_images(inputs.numpy(), batch.poses.numpy())
        # imgs = vis.add_emotion_to_images(imgs, batch.emotions.numpy())
        vis.vis_square(imgs, nCols=20, fx=0.6, fy=0.6, normalize=False)
Exemple #3
0
    args = parser.parse_args()

    extract_features(st=args.st, nd=args.nd)


if __name__ == '__main__':
    # extract_main()
    # create_annotations(split='dev', num_ids=500)
    # extract_crops()

    # read_openface_csvs()
    # exit()

    from utils import vis, face_processing

    ds = VoxCeleb(train=True, max_samples=50000, use_cache=True)
    print(ds)
    dl = td.DataLoader(ds, batch_size=40, shuffle=False, num_workers=0)
    from utils.nn import Batch

    for data in dl:
        batch = Batch(data)
        print(batch.ids)
        ds_utils.denormalize(batch.images)
        # vis.show_images_in_batch(batch.images.detach().cpu())
        vis.vis_square(batch.images.detach().cpu(),
                       fx=0.7,
                       fy=0.7,
                       normalize=False)
        # print(item)