def match_adalam_with_magsac(kps1, kps2, descs1, descs2, h1, w1, h2, w2):

    matcher = AdalamFilter()
    kp1, s1, a1, r1 = convert_kpts_to_imc(kps1)
    kp2, s2, a2, r2 = convert_kpts_to_imc(kps2)
    idxs = matcher.match_and_filter(kp1,
                                    kp2,
                                    descs1,
                                    descs2,
                                    im1shape=(h1, w1),
                                    im2shape=(h2, w2),
                                    o1=a1.reshape(-1),
                                    o2=a2.reshape(-1),
                                    s1=s1.reshape(-1),
                                    s2=s2.reshape(-1)).detach().cpu().numpy()
    if len(idxs) <= 15:
        return None
    src_pts = kp1[idxs[:, 0]]
    dst_pts = kp2[idxs[:, 1]]

    F, inliers_mask = cv2.findFundamentalMat(src_pts,
                                             dst_pts,
                                             method=cv2.USAC_MAGSAC,
                                             ransacReprojThreshold=0.25,
                                             confidence=0.99999,
                                             maxIters=100000)
    if np.array(inliers_mask).sum() < 15:
        return None
    inliers_mask = np.array(inliers_mask).astype(bool).reshape(-1)
    return np.concatenate([src_pts[inliers_mask], dst_pts[inliers_mask]],
                          axis=1)
Esempio n. 2
0
def sequential_adalam_matcher(kp1, kp2, desc1, desc2, meta_desc1,
                              meta_desc2, img_shape1, img_shape2):
    device = desc1.device
    desc_sims = 0.
    weights_sum = 0.
    for i in range(4):
        weights = torch.exp(meta_desc1[:, i, :] @ meta_desc2[:, i, :].t())
        weights_sum += weights
        desc_sims += (desc1[:, i, :] @ desc2[:, i, :].t()) * weights
    del meta_desc1, meta_desc2, desc1, desc2, weights
    desc_sims /= weights_sum
    del weights_sum
    desc_sims = 2 - 2 * desc_sims

    # Compute putative matches and mutual neighbors
    dd12, nn12 = torch.topk(desc_sims, k=2, dim=1, largest=False)
    putative_matches = nn12[:, 0]
    scores = dd12[:, 0] / dd12[:, 1].clamp_min_(1e-3)
    dd21, nn21 = torch.min(desc_sims, dim=0)
    mnn = nn21[putative_matches] == torch.arange(kp1.shape[0], device=device)
    del desc_sims

    # Filter the matches with Adalam
    matcher = AdalamFilter()
    matches = matcher.filter_matches(
        kp1, kp2, putative_matches, scores, mnn, img_shape1, img_shape2,
        None, None, None, None)
    return matches.data.cpu().numpy()
Esempio n. 3
0
    def calculateImages(self, im1, im2):
        k1, o1, s1, d1, im1 = self.extract_keypoints(im1)
        k2, o2, s2, d2, im2 = self.extract_keypoints(im2)

        print("After extracting points, k1 size: ", len(k1))
        print("After extracting points, k2 size: ", len(k2))

        matcher = AdalamFilter()
        matches = matcher.match_and_filter(
            k1=k1,
            k2=k2,
            o1=o1,
            o2=o2,
            d1=d1,
            d2=d2,
            s1=s1,
            s2=s2,
            im1shape=im1.shape[:2],
            im2shape=im2.shape[:2]).cpu().numpy()

        k1 = k1[matches[:, 0]]
        k2 = k2[matches[:, 1]]
        F, mask = cv.findFundamentalMat(k1, k2, cv.FM_LMEDS)
        print("\n")
        print("Fundamental Matrix is: ")
        print(F)
        print("\n")
        pts1 = k1[mask.ravel() == 1]
        pts2 = k2[mask.ravel() == 1]
        # print("size of pts1 is: ", len(pts1))
        file1 = open("Adalam_matched_Left.txt", "w")
        file2 = open("Adalam_matched_Right.txt", "w")
        for (x1, y1), (x2, y2) in zip(k1, k2):
            # print(x1, " ", y1, " ", x2, " ", y2)
            file1.write(str(x1) + ", " + str(y1) + "\n")
            file2.write(str(x2) + ", " + str(y2) + "\n")
        file1.close()
        file2.close()

        out1 = k1[mask.ravel() == 0]
        out2 = k2[mask.ravel() == 0]

        self.show_matches(im1, im2, pts1, pts2, out1, out2)
Esempio n. 4
0
def adalam_matcher(kp1, kp2, desc1, desc2, meta_desc1,
                   meta_desc2, img_shape1, img_shape2):
    device = desc1.device
    desc_weights = torch.einsum('nid,mid->nim', (meta_desc1, meta_desc2))
    del meta_desc1, meta_desc2
    desc_weights = func.softmax(desc_weights, dim=1)
    desc_sims = torch.einsum('nid,mid->nim', (desc1, desc2)) * desc_weights
    del desc1, desc2, desc_weights
    desc_sims = 2 - 2 * torch.sum(desc_sims, dim=1)

    # Compute putative matches and mutual neighbors
    dd12, nn12 = torch.topk(desc_sims, k=2, dim=1, largest=False)
    putative_matches = nn12[:, 0]
    scores = dd12[:, 0] / dd12[:, 1].clamp_min_(1e-3)
    dd21, nn21 = torch.min(desc_sims, dim=0)
    mnn = nn21[putative_matches] == torch.arange(kp1.shape[0], device=device)
    del desc_sims

    # Filter the matches with Adalam
    matcher = AdalamFilter()
    matches = matcher.filter_matches(
        kp1, kp2, putative_matches, scores, mnn, img_shape1, img_shape2,
        None, None, None, None)
    return matches.data.cpu().numpy()
import argparse
from adalam import AdalamFilter


if __name__ == '__main__':
    p = argparse.ArgumentParser(description="Match a colmap database with AdaLAM")
    p.add_argument("--database_path", "-d", required=True)
    p.add_argument("--image_pairs_path", "-i", required=True)
    opt = p.parse_args()
    matcher = AdalamFilter()
    matcher.match_colmap_database(database_path=opt.database_path, image_pairs_path=opt.image_pairs_path)
Esempio n. 6
0
        cv.line(vis, (x1, y1), (x2, y2), [0, 255, 0], 1)

    cv.imshow("AdaLAM example", vis)
    cv.waitKey()


if __name__ == '__main__':
    p = argparse.ArgumentParser()
    p.add_argument("--im1", required=True)
    p.add_argument("--im2", required=True)
    opt = p.parse_args()

    # results = function(im1, im2)
    k1, o1, s1, d1, im1 = extract_keypoints(opt.im1)
    k2, o2, s2, d2, im2 = extract_keypoints(opt.im2)

    matcher = AdalamFilter()
    matches = matcher.match_and_filter(k1=k1,
                                       k2=k2,
                                       o1=o1,
                                       o2=o2,
                                       d1=d1,
                                       d2=d2,
                                       s1=s1,
                                       s2=s2,
                                       im1shape=im1.shape[:2],
                                       im2shape=im2.shape[:2]).cpu().numpy()

    show_matches(im1, im2, k1=k1[matches[:, 0]], k2=k2[matches[:, 1]])
    # Save as image and rename