Beispiel #1
0
def verify_pydegensac(kps1, kps2, tentatives, th=4.0, n_iter=2000):
    src_pts = np.float32([kps1[m.queryIdx].pt
                          for m in tentatives]).reshape(-1, 2)
    dst_pts = np.float32([kps2[m.trainIdx].pt
                          for m in tentatives]).reshape(-1, 2)
    H, mask = pydegensac.findHomography(src_pts, dst_pts, th, 0.99, n_iter)
    print('pydegensac found {} inliers'.format(
        int(deepcopy(mask).astype(np.float32).sum())))
    return H, mask
Beispiel #2
0
    def test_find_homography(self):
        src_pts = np.float32([[0, 0], [0, 1], [1, 1], [1, 0]]).reshape(-1, 2)
        dst_pts = np.float32([[0, 0], [0, -1], [-1, -1], [-1,
                                                          0]]).reshape(-1, 2)

        H, mask = pydegensac.findHomography(src_pts, dst_pts, 4, 1)

        self.assertEqual(3, len(H))
        self.assertEqual(4, len(mask))
Beispiel #3
0
def siftMatching(img1, img2):
    # img1 = np.array(cv2.cvtColor(np.array(img1), cv2.COLOR_BGR2RGB))
    # img2 = np.array(cv2.cvtColor(np.array(img2), cv2.COLOR_BGR2RGB))

    surf = cv2.xfeatures2d.SURF_create(100)
    # surf = cv2.xfeatures2d.SIFT_create()

    kp1, des1 = surf.detectAndCompute(img1, None)
    kp2, des2 = surf.detectAndCompute(img2, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)

    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 2)

    if (src_pts.shape[0] < 5):
        return None
    # model, inliers = ransac(
    # 		(src_pts, dst_pts),
    # 		AffineTransform, min_samples=4,
    # 		residual_threshold=8, max_trials=10000
    # 	)
    H, inliers = pydegensac.findHomography(src_pts, dst_pts, 8.0, 0.99, 10000)

    n_inliers = np.sum(inliers)

    inlier_keypoints_left = [
        cv2.KeyPoint(point[0], point[1], 1) for point in src_pts[inliers]
    ]
    inlier_keypoints_right = [
        cv2.KeyPoint(point[0], point[1], 1) for point in dst_pts[inliers]
    ]
    placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]
    image3 = cv2.drawMatches(img1, inlier_keypoints_left, img2,
                             inlier_keypoints_right, placeholder_matches, None)

    # cv2.imshow('Matches', image3)
    # cv2.waitKey(0)

    src_pts = np.float32([
        inlier_keypoints_left[m.queryIdx].pt for m in placeholder_matches
    ]).reshape(-1, 2)
    dst_pts = np.float32([
        inlier_keypoints_right[m.trainIdx].pt for m in placeholder_matches
    ]).reshape(-1, 2)

    return image3
Beispiel #4
0
def cv2D2netMatching(image1, image2, feat1, feat2, matcher="BF"):
	if(matcher == "BF"):

		t0 = time.time()
		bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
		matches = bf.match(feat1['descriptors'], feat2['descriptors'])
		matches = sorted(matches, key=lambda x:x.distance)
		t1 = time.time()
		print("Time to extract matches: ", t1-t0)

		print("Number of raw matches:", len(matches))

		match1 = [m.queryIdx for m in matches]
		match2 = [m.trainIdx for m in matches]

		keypoints_left = feat1['keypoints'][match1, : 2]
		keypoints_right = feat2['keypoints'][match2, : 2]

		np.random.seed(0)

		t0 = time.time()

		### Ransac ###
		# model, inliers = ransac(
		# 	(keypoints_left, keypoints_right),
		# 	AffineTransform, min_samples=4,
		# 	residual_threshold=8, max_trials=10000
		# )
		####

		H, inliers = pydegensac.findHomography(keypoints_left, keypoints_right, 8.0, 0.99, 10000)

		t1 = time.time()
		print("Time for ransac: ", t1-t0)

		n_inliers = np.sum(inliers)
		print('Number of inliers: %d.' % n_inliers)

		inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_left[inliers]]
		inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_right[inliers]]
		placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]

		image3 = cv2.drawMatches(image1, inlier_keypoints_left, image2, inlier_keypoints_right, placeholder_matches, None)

		#### Visualization ####
		# plt.figure(figsize=(20, 20))
		# plt.imshow(image3)
		# plt.axis('off')
		# plt.show()

		src_pts = np.float32([ inlier_keypoints_left[m.queryIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
		dst_pts = np.float32([ inlier_keypoints_right[m.trainIdx].pt for m in placeholder_matches ]).reshape(-1, 2)

		return src_pts, dst_pts
Beispiel #5
0
def getPerspKeypoints(rgbFile1, rgbFile2, HFile1, HFile2, model, device):
	if HFile1 is None:
		igp1, img1 = read_and_process_image(rgbFile1, H=None)
	else:
		H1 = np.load(HFile1)
		igp1, img1 = read_and_process_image(rgbFile1, H=H1)

	c,h,w = igp1.shape

	if HFile2 is None:
		igp2, img2 = read_and_process_image(rgbFile2, H=None)
	else:
		H2 = np.load(HFile2)
		igp2, img2 = read_and_process_image(rgbFile2, H=H2)

	feat1 = extractSingle(igp1, model, device)
	feat2 = extractSingle(igp2, model, device)

	matches = mnn_matcher(
			torch.from_numpy(feat1['descriptors']).to(device=device),
			torch.from_numpy(feat2['descriptors']).to(device=device),
		)
	pos_a = feat1["keypoints"][matches[:, 0], : 2]
	pos_b = feat2["keypoints"][matches[:, 1], : 2]

	H, inliers = pydegensac.findHomography(pos_a, pos_b, 8.0, 0.99, 10000)
	pos_a = pos_a[inliers]
	pos_b = pos_b[inliers]

	inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in pos_a]
	inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in pos_b]
	placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(len(pos_a))]

	image3 = cv2.drawMatches(img1, inlier_keypoints_left, img2, inlier_keypoints_right, placeholder_matches, None, matchColor=[0, 255, 0])
	image3 = cv2.cvtColor(image3, cv2.COLOR_BGR2RGB)

	#### Visualization ####
	# cv2.imshow('Matches', image3)
	# cv2.waitKey()

	orgSrc, orgDst = orgKeypoints(pos_a, pos_b, H1, H2)
	drawOrg(cv2.imread(rgbFile1), cv2.imread(rgbFile2), orgSrc, orgDst) # Reproject matches to perspective View

	return orgSrc, orgDst
Beispiel #6
0
def rordMatching(image1, image2, feat1, feat2, matcher="BF"):
	if(matcher == "BF"):

		t0 = time.time()
		bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
		matches = bf.match(feat1['descriptors'], feat2['descriptors'])
		matches = sorted(matches, key=lambda x:x.distance)
		t1 = time.time()
		print("Time to extract matches: ", t1-t0)

		print("Number of raw matches:", len(matches))

		match1 = [m.queryIdx for m in matches]
		match2 = [m.trainIdx for m in matches]

		keypoints_left = feat1['keypoints'][match1, : 2]
		keypoints_right = feat2['keypoints'][match2, : 2]

		np.random.seed(0)

		t0 = time.time()

		H, inliers = pydegensac.findHomography(keypoints_left, keypoints_right, 10.0, 0.99, 10000)

		t1 = time.time()
		print("Time for ransac: ", t1-t0)

		n_inliers = np.sum(inliers)
		print('Number of inliers: %d.' % n_inliers)

		inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_left[inliers]]
		inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_right[inliers]]
		placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]

		draw_params = dict(matchColor = (0,255,0),
		                   singlePointColor = (255,0,0),
		                   # matchesMask = matchesMask,
		                   flags = 0)
		image3 = cv2.drawMatches(image1, inlier_keypoints_left, image2, inlier_keypoints_right, placeholder_matches, None, **draw_params)

		plt.figure(figsize=(20, 20))
		plt.imshow(image3)
		plt.axis('off')
		plt.show()
Beispiel #7
0
def get_single_result(ms, m, method, params):
    mask = (ms <= params['match_th']).reshape(-1)
    tentatives = m[mask]
    tentative_idxs = np.arange(len(mask))[mask]
    src_pts = tentatives[:, :2]
    dst_pts = tentatives[:, 2:]
    if tentatives.shape[0] <= 5:
        return np.eye(3), np.array([False] * len(mask))
    if method == 'cv2h':
        H, mask_inl = cv2.findHomography(src_pts,
                                         dst_pts,
                                         cv2.RANSAC,
                                         params['inl_th'],
                                         maxIters=params['maxiter'],
                                         confidence=params['conf'])
    elif method == 'pyransac':
        H, mask_inl = pydegensac.findHomography(src_pts,
                                                dst_pts,
                                                params['inl_th'],
                                                conf=params['conf'],
                                                max_iters=params['maxiter'])
    elif method == 'sklearn':
        try:
            #print(src_pts.shape, dst_pts.shape)
            H, mask_inl = skransac([src_pts, dst_pts],
                                   ProjectiveTransform,
                                   min_samples=4,
                                   residual_threshold=params['inl_th'],
                                   max_trials=params['maxiter'],
                                   stop_probability=params['conf'])
            mask_inl = mask_inl.astype(bool).flatten()
            H = H.params
        except Exception as e:
            print("Fail!", e)
            return np.eye(3), np.array([False] * len(mask))
    else:
        raise ValueError('Unknown method')

    final_inliers = np.array([False] * len(mask))
    if H is not None:
        for i, x in enumerate(mask_inl):
            final_inliers[tentative_idxs[i]] = x
    return H, final_inliers
def get_num_inliers(test_keypoints, test_descriptors, train_keypoints,
                    train_descriptors):
    """Returns the number of RANSAC inliers."""

    test_match_kp, train_match_kp = get_putative_matching_keypoints(
        test_keypoints, test_descriptors, train_keypoints, train_descriptors)

    if test_match_kp.shape[0] <= 4:
        return 0

    try:
        _, mask = pydegensac.findHomography(test_match_kp, train_match_kp,
                                            MAX_REPROJECTION_ERROR,
                                            HOMOGRAPHY_CONFIDENCE,
                                            MAX_RANSAC_ITERATIONS)
    except np.linalg.LinAlgError:
        return 0

    return int(copy.deepcopy(mask).astype(np.float32).sum())
Beispiel #9
0
def numInliers2(feat1, feat2):
	bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
	matches = bf.match(feat1['descriptors'], feat2['descriptors'])
	matches = sorted(matches, key=lambda x:x.distance)

	match1 = [m.queryIdx for m in matches]
	match2 = [m.trainIdx for m in matches]

	keypoints_left = feat1['keypoints'][match1, : 2]
	keypoints_right = feat2['keypoints'][match2, : 2]

	np.random.seed(0)

	# model, inliers = ransac(
	# 	(keypoints_left, keypoints_right),
	# 	AffineTransform, min_samples=4,
	# 	residual_threshold=8, max_trials=10000
	# )
	
	H, inliers = pydegensac.findHomography(keypoints_left, keypoints_right, 8.0, 0.99, 10000)

	n_inliers = np.sum(inliers)

	return n_inliers
Beispiel #10
0
def getPerspKeypoints2(model1, model2, rgbFile1, rgbFile2, HFile1, HFile2, device):
	if HFile1 is None:
		igp1, img1 = read_and_process_image(rgbFile1, H=None)
	else:
		H1 = np.load(HFile1)
		igp1, img1 = read_and_process_image(rgbFile1, H=H1)

	c,h,w = igp1.shape

	if HFile2 is None:
		igp2, img2 = read_and_process_image(rgbFile2, H=None)
	else:
		H2 = np.load(HFile2)
		igp2, img2 = read_and_process_image(rgbFile2, H=H2)

	with torch.no_grad():
		keypoints_a1, scores_a1, descriptors_a1 = process_multiscale(
			igp1.to(device).unsqueeze(0),
			model1,
			scales=[1]
		)
		keypoints_a1 = keypoints_a1[:, [1, 0, 2]]

		keypoints_a2, scores_a2, descriptors_a2 = process_multiscale(
			igp1.to(device).unsqueeze(0),
			model2,
			scales=[1]
		)
		keypoints_a2 = keypoints_a2[:, [1, 0, 2]]

		keypoints_b1, scores_b1, descriptors_b1 = process_multiscale(
			igp2.to(device).unsqueeze(0),
			model1,
			scales=[1]
		)
		keypoints_b1 = keypoints_b1[:, [1, 0, 2]]

		keypoints_b2, scores_b2, descriptors_b2 = process_multiscale(
			igp2.to(device).unsqueeze(0),
			model2,
			scales=[1]
		)
		keypoints_b2 = keypoints_b2[:, [1, 0, 2]]

	# calculating matches for both models
	matches1, dist_1 = mnn_matcher_scorer(
		torch.from_numpy(descriptors_a1).to(device=device),
		torch.from_numpy(descriptors_b1).to(device=device),
#                 len(matches1)
	)
	matches2, dist_2 = mnn_matcher_scorer(
		torch.from_numpy(descriptors_a2).to(device=device),
		torch.from_numpy(descriptors_b2).to(device=device),
#                 len(matches1)
	)

	full_matches = torch.cat([matches1, matches2])
	full_dist = torch.cat([dist_1, dist_2])
	assert len(full_dist)==(len(dist_1)+len(dist_2)), "something wrong"

	k_final = len(full_dist)//2
	# k_final = len(full_dist)
	# k_final = max(len(dist_1), len(dist_2))
	top_k_mask = torch.topk(full_dist, k=k_final)[1]
	first = []
	second = []

	for valid_id in top_k_mask:
		if valid_id<len(dist_1):
			first.append(valid_id)
		else:
			second.append(valid_id-len(dist_1))
	# final_matches = full_matches[top_k_mask]

	matches1 = matches1[torch.tensor(first, device=device).long()].data.cpu().numpy()
	matches2 = matches2[torch.tensor(second, device=device).long()].data.cpu().numpy()

	pos_a1 = keypoints_a1[matches1[:, 0], : 2]
	pos_b1 = keypoints_b1[matches1[:, 1], : 2]

	pos_a2 = keypoints_a2[matches2[:, 0], : 2]
	pos_b2 = keypoints_b2[matches2[:, 1], : 2]

	pos_a = np.concatenate([pos_a1, pos_a2], 0)
	pos_b = np.concatenate([pos_b1, pos_b2], 0)

	# pos_a, pos_b, inliers = apply_ransac(pos_a, pos_b)
	H, inliers = pydegensac.findHomography(pos_a, pos_b, 8.0, 0.99, 10000)
	pos_a = pos_a[inliers]
	pos_b = pos_b[inliers]

	inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in pos_a]
	inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in pos_b]
	placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(len(pos_a))]

	image3 = cv2.drawMatches(img1, inlier_keypoints_left, img2, inlier_keypoints_right, placeholder_matches, None, matchColor=[0, 255, 0])
	image3 = cv2.cvtColor(image3, cv2.COLOR_BGR2RGB)
	# cv2.imshow('Matches', image3)
	# cv2.waitKey()


	orgSrc, orgDst = orgKeypoints(pos_a, pos_b, H1, H2)
	drawOrg(cv2.imread(rgbFile1), cv2.imread(rgbFile2), orgSrc, orgDst)

	return orgSrc, orgDst
Beispiel #11
0
def siftMatching(img1, img2, HFile1, HFile2, device):

	H1 = np.load(HFile1)
	H2 = np.load(HFile2)

	img1 = Image.open(img1)
	rgbFile1 = img1
	if(img1.mode != 'RGB'):
		img1 = img1.convert('RGB')
	img1 = np.array(img1)
	img1 = cv2.warpPerspective(img1, H1, dsize=(400,400))

	#### Visualization ####
	# cv2.imshow("Image", cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
	# cv2.waitKey(0)

	img2 = Image.open(img2)
	rgbFile2 = img2
	if(img2.mode != 'RGB'):
		img2 = img2.convert('RGB')
	img2 = np.array(img2)
	img2 = cv2.warpPerspective(img2, H2, dsize=(400,400))

	#### Visualization ####
	# cv2.imshow("Image", cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))
	# cv2.waitKey(0)

	# surf = cv2.xfeatures2d.SURF_create(100) # SURF
	surf = cv2.xfeatures2d.SIFT_create()

	kp1, des1 = surf.detectAndCompute(img1, None)
	kp2, des2 = surf.detectAndCompute(img2, None)

	matches = mnn_matcher(
			torch.from_numpy(des1).float().to(device=device),
			torch.from_numpy(des2).float().to(device=device)
		)

	src_pts = np.float32([ kp1[m[0]].pt for m in matches ]).reshape(-1, 2)
	dst_pts = np.float32([ kp2[m[1]].pt for m in matches ]).reshape(-1, 2)

	if(src_pts.shape[0] < 5 or dst_pts.shape[0] < 5):
		return [], []

	H, inliers = pydegensac.findHomography(src_pts, dst_pts, 8.0, 0.99, 10000)

	n_inliers = np.sum(inliers)

	inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in src_pts[inliers]]
	inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in dst_pts[inliers]]
	placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]

	#### Visualization ####
	# image3 = cv2.drawMatches(img1, inlier_keypoints_left, img2, inlier_keypoints_right, placeholder_matches, None)
	# cv2.imshow('Matches', image3)
	# cv2.waitKey()

	src_pts = np.float32([ inlier_keypoints_left[m.queryIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
	dst_pts = np.float32([ inlier_keypoints_right[m.trainIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
	orgSrc, orgDst = orgKeypoints(src_pts, dst_pts, H1, H2)
	
	return orgSrc, orgDst
Beispiel #12
0
def numInliers(frontFeat1, rearFeat1, frontFeat2, rearFeat2):
    keypoints_a1 = frontFeat1['keypoints']
    descriptors_a1 = frontFeat1['descriptors']
    keypoints_a2 = frontFeat2['keypoints']
    descriptors_a2 = frontFeat2['descriptors']

    keypoints_b1 = rearFeat1['keypoints']
    descriptors_b1 = rearFeat1['descriptors']
    keypoints_b2 = rearFeat2['keypoints']
    descriptors_b2 = rearFeat2['descriptors']

    keypoints_a1 = keypoints_a1[:, [1, 0, 2]]
    keypoints_a2 = keypoints_a2[:, [1, 0, 2]]
    keypoints_b1 = keypoints_b1[:, [1, 0, 2]]
    keypoints_b2 = keypoints_b2[:, [1, 0, 2]]

    use_cuda = torch.cuda.is_available()
    device = torch.device('cuda:1' if use_cuda else 'cpu')

    # calculating matches for both models
    matches1, dist_1 = mnn_matcher_scorer(
        torch.from_numpy(descriptors_a1).to(device=device),
        torch.from_numpy(descriptors_b1).to(device=device),
        #                 len(matches1)
    )
    matches2, dist_2 = mnn_matcher_scorer(
        torch.from_numpy(descriptors_a2).to(device=device),
        torch.from_numpy(descriptors_b2).to(device=device),
        #                 len(matches1)
    )

    full_matches = torch.cat([matches1, matches2])
    full_dist = torch.cat([dist_1, dist_2])
    assert len(full_dist) == (len(dist_1) + len(dist_2)), "something wrong"

    k_final = len(full_dist) // 2
    # k_final = len(full_dist)
    # k_final = max(len(dist_1), len(dist_2))
    top_k_mask = torch.topk(full_dist, k=k_final)[1]
    first = []
    second = []

    for valid_id in top_k_mask:
        if valid_id < len(dist_1):
            first.append(valid_id)
        else:
            second.append(valid_id - len(dist_1))

    matches1 = matches1[torch.tensor(first,
                                     device=device).long()].data.cpu().numpy()
    matches2 = matches2[torch.tensor(second,
                                     device=device).long()].data.cpu().numpy()

    pos_a1 = keypoints_a1[matches1[:, 0], :2]
    pos_b1 = keypoints_b1[matches1[:, 1], :2]

    pos_a2 = keypoints_a2[matches2[:, 0], :2]
    pos_b2 = keypoints_b2[matches2[:, 1], :2]

    pos_a = np.concatenate([pos_a1, pos_a2], 0)
    pos_b = np.concatenate([pos_b1, pos_b2], 0)

    H, inliers = pydegensac.findHomography(pos_a, pos_b, 8.0, 0.99, 10000)

    n_inliers = np.sum(inliers)

    return n_inliers
Beispiel #13
0
def cv2D2netMatching(image1, image2, feat1, feat2, matcher="BF"):
    if (matcher == "BF"):

        t0 = time.time()
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        matches = bf.match(feat1['descriptors'], feat2['descriptors'])
        matches = sorted(matches, key=lambda x: x.distance)
        t1 = time.time()
        print("Time to extract matches: ", t1 - t0)

        print("Number of raw matches:", len(matches))

        match1 = [m.queryIdx for m in matches]
        match2 = [m.trainIdx for m in matches]

        keypoints_left = feat1['keypoints'][match1, :2]
        keypoints_right = feat2['keypoints'][match2, :2]

        np.random.seed(0)

        t0 = time.time()

        H, inliers = pydegensac.findHomography(keypoints_left, keypoints_right,
                                               10.0, 0.99, 10000)

        t1 = time.time()
        print("Time for ransac: ", t1 - t0)

        n_inliers = np.sum(inliers)
        print('Number of inliers: %d.' % n_inliers)

        inlier_keypoints_left = [
            cv2.KeyPoint(point[0], point[1], 1)
            for point in keypoints_left[inliers]
        ]
        inlier_keypoints_right = [
            cv2.KeyPoint(point[0], point[1], 1)
            for point in keypoints_right[inliers]
        ]
        placeholder_matches = [
            cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)
        ]

        draw_params = dict(
            matchColor=(0, 255, 0),
            singlePointColor=(255, 0, 0),
            # matchesMask = matchesMask,
            flags=0)
        image3 = cv2.drawMatches(image1, inlier_keypoints_left, image2,
                                 inlier_keypoints_right, placeholder_matches,
                                 None, **draw_params)

        plt.figure(figsize=(20, 20))
        plt.imshow(image3)
        plt.axis('off')
        plt.show()

    elif (matcher == "FLANN"):

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        t0 = time.time()
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(feat1['descriptors'],
                                 feat2['descriptors'],
                                 k=2)
        t1 = time.time()
        print("Time to extract matches: ", t1 - t0)

        print("Number of raw matches:", len(matches))

        t0 = time.time()
        good = []
        for m, n in matches:
            if m.distance < 0.9 * n.distance:
                good.append(m)
        matches = good
        t1 = time.time()
        print("Time for outlier rejection: ", t1 - t0)
        print("Number of inliers: ", len(matches))

        match1 = [m.queryIdx for m in matches]
        match2 = [m.trainIdx for m in matches]

        keypoints_left = feat1['keypoints'][match1, :2].T
        keypoints_right = feat2['keypoints'][match2, :2].T

        for i in range(keypoints_left.shape[1]):
            image1 = cv2.circle(
                image1, (int(keypoints_left[0, i]), int(keypoints_left[1, i])),
                2, (0, 0, 255), 4)
        for i in range(keypoints_right.shape[1]):
            image2 = cv2.circle(
                image2,
                (int(keypoints_right[0, i]), int(keypoints_right[1, i])), 2,
                (0, 0, 255), 4)

        im4 = cv2.hconcat([image1, image2])

        for i in range(keypoints_left.shape[1]):
            im4 = cv2.line(
                im4, (int(keypoints_left[0, i]), int(keypoints_left[1, i])),
                (int(keypoints_right[0, i]) + image1.shape[1],
                 int(keypoints_right[1, i])), (0, 255, 0), 1)

        cv2.imshow("Image_lines", im4)
        cv2.waitKey(0)
def eval_hpatches(
    matcher,
    data_root,
    method='',
    task='both',
    scale_H=False,
    h_solver='degensac',
    ransac_thres=2,
    thres=[1, 3, 5, 10],
    lprint_=print,
    print_out=False,
    save_npy=None,
    debug=False,
):
    """Evaluate a matcher on HPatches sequences for image matching and homogray estimation.
    The matching metric is adopted from D2Net paper, i.e., the precentage of correctly matched
    keypoints at the given re-projection error thresholds. 
    For homography estimation, the average distances between the corners transformed using 
    the estimated and GT homographies are computed. Both percentage of the corner distance at
    the given thresholds and the area under the cumulative error curve (AUC) at those thresholds
    are reported.
    
    Args:
        - matcher: the matching function that inputs an image pair paths and 
                   outputs the matches and keypoints. 
        - data_root: the folder directory of HPatches dataset.
        - method: the description of the evaluated method.
        - task: the target task, options = [matching|homography|both]
        - ransac_thres: the set of ransac thresholds used by the solver to estimate homographies.
                        Results under each ransac threshold are printed per line.
        - thres: error thresholds in pixels to compute the metrics.
        - lprint: the printing function. If needed it can be implemented to outstream to a log file.
        - print_out: when set to True, per-pair results are printed during the evaluation.
    """

    np.set_printoptions(precision=2)
    from PIL import Image

    if task == 'both':
        task = 'matching+homography'
    seq_dirs = sorted(glob.glob('{}/*'.format(data_root)))
    lprint_(f'\n>>>>Eval hpatches: task={task} method={method} scale_H={scale_H} rthres={ransac_thres} thres={thres} ')
    
    # Matching
    if 'matching' in task:
        thres_range = np.arange(1, 16)
        i_err = {thr: 0 for thr in thres_range}
        v_err = {thr: 0 for thr in thres_range}
        n_feats = []
        seq_type = []

    # Homography
    if 'homography' in task:
        inlier_ratio = []
        h_failed = 0
        dists_sa = []
        dists_si = []
        dists_sv = []

    match_failed = 0
    n_matches = []
    match_time = []
    start_time = time.time()
    for seq_idx, seq_dir in tqdm(enumerate(seq_dirs[::-1]), total=len(seq_dirs), smoothing=.5):
        if debug and seq_idx > 10:
            break
        sname = seq_dir.split('/')[-1]
        im1_path = os.path.join(seq_dir, '1.ppm')

        # Eval on composed pairs within seq
        for im_idx in range(2, 7):
            im2_path = os.path.join(seq_dir, '{}.ppm'.format(im_idx))
            H_gt = np.loadtxt(os.path.join(seq_dir, 'H_1_{}'.format(im_idx)))
            scale = np.ones(4)

            # Predict matches
            try:
                t0 = time.time()
                match_res = matcher(im1_path, im2_path)
                match_time.append(time.time() - t0)
                matches, p1s, p2s = match_res[0:3]
                if scale_H:
                    # scale = (wo / wt, ho / ht) for im1 & im2
                    scale = match_res[4]

                    # Scale gt homoragphies
                    H_scale_im1 = scale_homography(scale[0], scale[1])
                    H_scale_im2 = scale_homography(scale[2], scale[3])
                    H_gt = np.linalg.inv(H_scale_im2) @ H_gt @ H_scale_im1
            except:
                p1s = p2s = matches = []
                match_failed += 1
            n_matches.append(len(matches))
            
            if 'matching' in task:
                n_feats.append(len(p1s))
                n_feats.append(len(p2s))
                seq_type.append(sname[0])
                if len(matches) == 0:
                    dist = np.array([float("inf")])
                else:
                    dist = cal_reproj_dists(matches[:, :2], matches[:, 2:], H_gt)
                for thr in thres_range:
                    if sname[0] == 'i':
                        i_err[thr] += np.mean(dist <= thr)
                    else:
                        v_err[thr] += np.mean(dist <= thr)

            if 'homography' in task:
                try:
                    if 'cv' in h_solver:
                        H_pred, inliers = cv2.findHomography(matches[:, :2], matches[:, 2:4], cv2.RANSAC, ransac_thres)
                    else:
                        H_pred, inliers = pydegensac.findHomography(matches[:, :2], matches[:, 2:4], ransac_thres)
                except:
                    H_pred = None

                if H_pred is None:
                    corner_dist = np.nan
                    irat = 0
                    h_failed += 1
                    inliers = []
                else:
                    im = Image.open(im1_path)
                    w, h = im.size
                    w, h = w / scale[0], h / scale[1]
                    corners = np.array([[0, 0, 1],
                                        [0, h - 1, 1],
                                        [w - 1, 0, 1],
                                        [w - 1, h - 1, 1]])
                    real_warped_corners = np.dot(corners, np.transpose(H_gt))
                    real_warped_corners = real_warped_corners[:, :2] / real_warped_corners[:, 2:]
                    warped_corners = np.dot(corners, np.transpose(H_pred))
                    warped_corners = warped_corners[:, :2] / warped_corners[:, 2:]
                    corner_dist = np.mean(np.linalg.norm(real_warped_corners - warped_corners, axis=1))
                    irat = np.mean(inliers)
                inlier_ratio.append(irat)
                dists_sa.append(corner_dist)
                if sname[0] == 'i':
                    dists_si.append(corner_dist)
                if sname[0] == 'v':
                    dists_sv.append(corner_dist)
                    
            if print_out:
                print(f'Scene {sname}, pair:1-{im_idx} matches:{len(matches)}')
                if 'matching' in task:
                    print(f'Median matching dist:{np.median(dist):.2f} <1px:{np.mean(dist <= 1):.3f}')
                if 'homography' in task:
                    print(f'Corner dist:{corner_dist:.2f} inliers:{np.sum(inliers)}')

    lprint_(f'>>Finished, pairs={len(match_time)} match_failed={match_failed} matches={np.mean(n_matches):.1f} match_time={np.mean(match_time):.2f}s')

    if 'matching' in task:
        results = i_err, v_err, [np.array(seq_type), np.array(n_feats), np.array(n_matches)]
        lprint_('==== Image Matching ====')
        lprint_(eval_summary_matching(results, thres, save_npy=save_npy))
    if 'homography' in task:
        lprint_('==== Homography Estimation ====')        
        lprint_(f'Hest solver={h_solver} est_failed={h_failed} ransac_thres={ransac_thres} inlier_rate={np.mean(inlier_ratio):.2f}')
        lprint_(eval_summary_homography(dists_sa, dists_si, dists_sv, thres))
Beispiel #15
0
def compute_num_inliers(test_keypoints,
                        test_descriptors,
                        train_keypoints,
                        train_descriptors,
                        use_ratio_test=False,
                        draw_matches=True,
                        query_im_array=None,
                        index_im_array=None):
    """Returns the number of RANSAC inliers."""
    test_match_kp, train_match_kp = \
            compute_putative_matching_keypoints(test_keypoints,
                                                test_descriptors,
                                                train_keypoints,
                                                train_descriptors,
                                                use_ratio_test=use_ratio_test)
    if test_match_kp.shape[
            0] <= 4:  # Min keypoints supported by `pydegensac.findHomography()`
        return 0, b''

    try:
        _, mask = pydegensac.findHomography(test_match_kp, train_match_kp,
                                            MAX_REPROJECTION_ERROR,
                                            HOMOGRAPHY_CONFIDENCE,
                                            MAX_RANSAC_ITERATIONS)
    except np.linalg.LinAlgError:  # When det(H)=0, can't invert matrix.
        return 0, b''

    inliers = mask if mask is not None else []

    match_viz_bytes = b''
    if isinstance(query_im_array, np.ndarray) and isinstance(
            index_im_array, np.ndarray) and draw_matches:
        query_im_scale_factors = [1.0, 1.0]
        index_im_scale_factors = [1.0, 1.0]
        inlier_idxs = np.nonzero(inliers)[0]
        _, ax = plt.subplots()
        ax.axis('off')
        ax.xaxis.set_major_locator(plt.NullLocator())
        ax.yaxis.set_major_locator(plt.NullLocator())
        plt.subplots_adjust(top=1,
                            bottom=0,
                            right=1,
                            left=0,
                            hspace=0,
                            wspace=0)
        plt.margins(0, 0)
        feature.plot_matches(ax,
                             query_im_array,
                             index_im_array,
                             test_match_kp * query_im_scale_factors,
                             train_match_kp * index_im_scale_factors,
                             np.column_stack((inlier_idxs, inlier_idxs)),
                             only_matches=False)

        match_viz_io = io.BytesIO()
        plt.savefig(match_viz_io,
                    format='jpeg',
                    bbox_inches='tight',
                    pad_inches=0)
        match_viz_bytes = match_viz_io.getvalue()

    return int(copy.deepcopy(mask).astype(np.float32).sum()), match_viz_bytes