def test_feature_matching_speed():
    """
	Test how long feature matching takes to execute on the Notre Dame pair.
	"""
    start = time.time()
    image1 = load_image(f'{ROOT}/data/1a_notredame.jpg')
    image2 = load_image(f'{ROOT}/data/1b_notredame.jpg')
    eval_file = f'{ROOT}/ground_truth/notredame.pkl'
    scale_factor = 0.5
    image1 = PIL_resize(image1, (int(
        image1.shape[1] * scale_factor), int(image1.shape[0] * scale_factor)))
    image2 = PIL_resize(image2, (int(
        image2.shape[1] * scale_factor), int(image2.shape[0] * scale_factor)))
    image1_bw = rgb2gray(image1)
    image2_bw = rgb2gray(image2)

    tensor_type = torch.FloatTensor
    torch.set_default_tensor_type(tensor_type)
    to_tensor = transforms.ToTensor()

    image_input1 = to_tensor(image1_bw).unsqueeze(0)
    image_input2 = to_tensor(image2_bw).unsqueeze(0)

    x1, y1, _ = get_interest_points(image_input1)
    x2, y2, _ = get_interest_points(image_input2)

    x1, x2 = x1.detach().numpy(), x2.detach().numpy()
    y1, y2 = y1.detach().numpy(), y2.detach().numpy()

    image1_features = get_siftnet_features(image_input1, x1, y1)
    image2_features = get_siftnet_features(image_input2, x2, y2)

    matches, confidences = match_features(image1_features, image2_features, x1,
                                          y1, x2, y2)
    print('{:d} matches from {:d} corners'.format(len(matches), len(x1)))

    # num_pts_to_evaluate = len(matches)
    num_pts_to_evaluate = 100
    _, c = evaluate_correspondence(image1, image2, eval_file, scale_factor,
                                   x1[matches[:num_pts_to_evaluate, 0]],
                                   y1[matches[:num_pts_to_evaluate, 0]],
                                   x2[matches[:num_pts_to_evaluate, 1]],
                                   y2[matches[:num_pts_to_evaluate, 1]])
    end = time.time()
    duration = end - start
    print(
        f'Your Feature matching pipeline takes {duration} seconds to run on Notre Dame'
    )
def test_top10_notredame1a():
    """
	Verify that we can retrieve the 10 pixels with highest cornerness score (that do not lie 
	at invalid locations close to image borders).
	"""
    image1 = load_image(f'{ROOT}/data/1a_notredame.jpg')

    scale_factor = 0.5
    image1 = PIL_resize(image1, (int(
        image1.shape[1] * scale_factor), int(image1.shape[0] * scale_factor)))
    image1_bw = rgb2gray(image1)
    tensor_type = torch.FloatTensor
    torch.set_default_tensor_type(tensor_type)
    to_tensor = transforms.ToTensor()
    image_input1 = to_tensor(image1_bw).unsqueeze(0)

    x1, y1, _ = get_interest_points(image_input1, num_points=12)
    x1, y1 = x1.detach().numpy(), y1.detach().numpy()

    gt_x1 = np.array([411, 395, 346, 364, 379, 467, 349, 468, 656, 404])
    gt_y1 = np.array([408, 410, 439, 411, 410, 159, 427, 170, 156, 400])

    # only checking the first 10/12, in case you pruned borders before or after confidence pruning
    assert np.allclose(x1[:10], gt_x1)
    assert np.allclose(y1[:10], gt_y1)
def test_ImageGradientsLayer():
    """
	"""
    imgrad_layer = ImageGradientsLayer()
    img = load_image(f'{ROOT}/data/1a_notredame.jpg')
    assert np.allclose(5392768.5, img.sum(), atol=1)

    image1_bw = rgb2gray(img)
    image1_bw = torch.from_numpy(image1_bw).unsqueeze(0).unsqueeze(0)
    im_grads = imgrad_layer(image1_bw)
    im_grads = im_grads.detach()

    assert [1, 2, 2048, 1536] == [im_grads.shape[i] for i in range(4)]
    assert torch.allclose(im_grads.sum(), torch.tensor(-3154.8), atol=1)

    gt_crop = torch.tensor([[[0.0257, -0.0184], [0.0243, 0.0104]],
                            [[0.0330, 0.0410], [-0.0219, -0.0026]]])
    assert torch.allclose(gt_crop, im_grads[0, :, 500:502, 500:502], atol=1)