Example #1
0
def main(opt):
    model = build_model(opt)
    model = model.cuda()
    weights = torch.load(opt.load_weights_path,
                         map_location='cpu')['model_state_dict']
    utils.safe_load_weights(model, weights)
    model = model.eval()

    img_a = imageio.imread('./sample_data/imgs/face_1.png', pilmode='RGB')
    img_b = imageio.imread('./sample_data/imgs/face_2.png', pilmode='RGB')
    queries = np.load('./sample_data/face_landmarks.npy')[0]

    engine = SparseEngine(model, 32, mode='stretching')
    corrs = engine.cotr_corr_multiscale(img_a,
                                        img_b,
                                        np.linspace(0.5, 0.0625, 4),
                                        1,
                                        queries_a=queries,
                                        force=False)

    f, axarr = plt.subplots(1, 2)
    axarr[0].imshow(img_a)
    axarr[0].scatter(*queries.T, s=1)
    axarr[0].title.set_text('Reference Face')
    axarr[0].axis('off')
    axarr[1].imshow(img_b)
    axarr[1].scatter(*corrs[:, 2:].T, s=1)
    axarr[1].title.set_text('Target Face')
    axarr[1].axis('off')
    plt.show()
Example #2
0
def main(opt):
    model = build_model(opt)
    model = model.cuda()
    weights = torch.load(opt.load_weights_path,
                         map_location='cpu')['model_state_dict']
    utils.safe_load_weights(model, weights)
    model = model.eval()

    img_a = imageio.imread('./sample_data/imgs/paint_1.JPG', pilmode='RGB')
    img_b = imageio.imread('./sample_data/imgs/paint_2.jpg', pilmode='RGB')
    rep_img = imageio.imread('./sample_data/imgs/Meisje_met_de_parel.jpg',
                             pilmode='RGB')
    rep_mask = np.ones(rep_img.shape[:2])
    lu_corner = [932, 1025]
    ru_corner = [2469, 901]
    lb_corner = [908, 2927]
    rb_corner = [2436, 3080]
    queries = np.array([lu_corner, ru_corner, lb_corner,
                        rb_corner]).astype(np.float32)
    rep_coord = np.array([[0, 0], [rep_img.shape[1], 0], [0, rep_img.shape[0]],
                          [rep_img.shape[1],
                           rep_img.shape[0]]]).astype(np.float32)

    engine = SparseEngine(model, 32, mode='stretching')
    corrs = engine.cotr_corr_multiscale(img_a,
                                        img_b,
                                        np.linspace(0.5, 0.0625, 4),
                                        1,
                                        queries_a=queries,
                                        force=True)

    T = cv2.getPerspectiveTransform(rep_coord, corrs[:, 2:].astype(np.float32))
    vmask = cv2.warpPerspective(rep_mask, T,
                                (img_b.shape[1], img_b.shape[0])) > 0
    warped = cv2.warpPerspective(rep_img, T, (img_b.shape[1], img_b.shape[0]))
    out = warped * vmask[..., None] + img_b * (~vmask[..., None])

    f, axarr = plt.subplots(1, 4)
    axarr[0].imshow(rep_img)
    axarr[0].title.set_text('Virtual Paint')
    axarr[0].axis('off')
    axarr[1].imshow(img_a)
    axarr[1].title.set_text('Annotated Frame')
    axarr[1].axis('off')
    axarr[2].imshow(img_b)
    axarr[2].title.set_text('Target Frame')
    axarr[2].axis('off')
    axarr[3].imshow(out)
    axarr[3].title.set_text('Overlay')
    axarr[3].axis('off')
    plt.show()
Example #3
0
    def match_pairs(self, im1_path, im2_path, queries_im1=None):
        im1 = imageio.imread(im1_path, pilmode='RGB')
        im2 = imageio.imread(im2_path, pilmode='RGB')
        engine = SparseEngine(self.model, self.batch_size, mode='tile')
        matches = engine.cotr_corr_multiscale(im1,
                                              im2,
                                              np.linspace(0.5, 0.0625, 4),
                                              1,
                                              max_corrs=self.max_corrs,
                                              queries_a=queries_im1,
                                              force=True)

        # Fake scores as not output by the model
        scores = np.ones(len(matches))
        kpts1 = matches[:, :2]
        kpts2 = matches[:, 2:4]
        return matches, kpts1, kpts2, scores
Example #4
0
def main(opt):
    model = build_model(opt)
    model = model.cuda()
    weights = torch.load(opt.load_weights_path)['model_state_dict']
    utils.safe_load_weights(model, weights)
    model = model.eval()

    img_a = imageio.imread('./sample_data/imgs/petrzin_01.png')
    img_b = imageio.imread('./sample_data/imgs/petrzin_02.png')
    img_a_area = 1.0
    img_b_area = 1.0
    gt_corrs = np.loadtxt('./sample_data/petrzin_pts.txt')
    kp_a = gt_corrs[:, :2]
    kp_b = gt_corrs[:, 2:]

    engine = SparseEngine(model, 32, mode='tile')
    t0 = time.time()
    corrs = engine.cotr_corr_multiscale(img_a,
                                        img_b,
                                        np.linspace(0.75, 0.1, 4),
                                        1,
                                        max_corrs=kp_a.shape[0],
                                        queries_a=kp_a,
                                        force=True,
                                        areas=[img_a_area, img_b_area])
    t1 = time.time()
    print(f'COTR spent {t1-t0} seconds.')

    utils.visualize_corrs(img_a, img_b, corrs)
    plt.imshow(img_b)
    plt.scatter(kp_b[:, 0], kp_b[:, 1])
    plt.scatter(corrs[:, 2], corrs[:, 3])
    plt.plot(np.stack([kp_b[:, 0], corrs[:, 2]], axis=1).T,
             np.stack([kp_b[:, 1], corrs[:, 3]], axis=1).T,
             color=[1, 0, 0])
    plt.show()