コード例 #1
0
def test_feature_matching_speed():
    """
	Test how long feature matching takes to execute on the Notre Dame pair.
	"""
    start = time.time()
    image1 = load_image(f'{ROOT}/data/1a_notredame.jpg')
    image2 = load_image(f'{ROOT}/data/1b_notredame.jpg')
    eval_file = f'{ROOT}/ground_truth/notredame.pkl'
    scale_factor = 0.5
    image1 = PIL_resize(image1, (int(
        image1.shape[1] * scale_factor), int(image1.shape[0] * scale_factor)))
    image2 = PIL_resize(image2, (int(
        image2.shape[1] * scale_factor), int(image2.shape[0] * scale_factor)))
    image1_bw = rgb2gray(image1)
    image2_bw = rgb2gray(image2)

    tensor_type = torch.FloatTensor
    torch.set_default_tensor_type(tensor_type)
    to_tensor = transforms.ToTensor()

    image_input1 = to_tensor(image1_bw).unsqueeze(0)
    image_input2 = to_tensor(image2_bw).unsqueeze(0)

    x1, y1, _ = get_interest_points(image_input1)
    x2, y2, _ = get_interest_points(image_input2)

    x1, x2 = x1.detach().numpy(), x2.detach().numpy()
    y1, y2 = y1.detach().numpy(), y2.detach().numpy()

    image1_features = get_siftnet_features(image_input1, x1, y1)
    image2_features = get_siftnet_features(image_input2, x2, y2)

    matches, confidences = match_features(image1_features, image2_features, x1,
                                          y1, x2, y2)
    print('{:d} matches from {:d} corners'.format(len(matches), len(x1)))

    # num_pts_to_evaluate = len(matches)
    num_pts_to_evaluate = 100
    _, c = evaluate_correspondence(image1, image2, eval_file, scale_factor,
                                   x1[matches[:num_pts_to_evaluate, 0]],
                                   y1[matches[:num_pts_to_evaluate, 0]],
                                   x2[matches[:num_pts_to_evaluate, 1]],
                                   y2[matches[:num_pts_to_evaluate, 1]])
    end = time.time()
    duration = end - start
    print(
        f'Your Feature matching pipeline takes {duration} seconds to run on Notre Dame'
    )
コード例 #2
0
def test_get_siftnet_features():
	"""
	"""
	x = np.array([8,8,7,9]) # numpy array
	y = np.array([7,9,8,8]) # numpy array
	img_bw = np.arange(256).reshape(16,16) # numpy array
	img_bw = torch.from_numpy(img_bw)
	img_bw = img_bw.unsqueeze(0).unsqueeze(0)
	img_bw = img_bw.type(torch.FloatTensor)

	features = get_siftnet_features(img_bw, x, y)

	assert np.allclose(features.sum(), 22.039, atol=1)
	assert features.shape == (4, 128)
コード例 #3
0
def test_get_siftnet_features():
    """
	"""
    x = np.array([8, 8, 7, 9])  # numpy array
    y = np.array([7, 9, 8, 8])  # numpy array
    img_bw = np.arange(256).reshape(16, 16)  # numpy array
    img_bw = torch.from_numpy(img_bw)
    img_bw = img_bw.unsqueeze(0).unsqueeze(0)
    img_bw = img_bw.type(torch.FloatTensor)

    features = get_siftnet_features(img_bw, x, y)
    assert np.allclose(features.sum(), 22.039, atol=1)
    assert features.shape == (4, 128)

    gt_feat_crop = np.array([[0.28135952,
                              0.20184263], [0.28796558, 0.17183169],
                             [0.27522191, 0.12444288], [0., 0.23030874]])
    assert np.allclose(features[:, 64:66], gt_feat_crop)