def test_triangulation_individualCal_without_ransac(self): """Tests that the triangulation is accurate for individual camera calibration, without RANSAC-based triangulation. Checks if cameras and triangulated 3D point are as expected. """ k1 = 0 k2 = 0 f, u0, v0 = 1500, 640, 480 f_, u0_, v0_ = 1600, 650, 440 K1 = Cal3Bundler(f, k1, k2, u0, v0) K2 = Cal3Bundler(f_, k1, k2, u0_, v0_) keypoints_list, _, cameras = generate_noisy_2d_measurements( world_point=self.expected_landmark, calibrations=[K1, K2], per_image_noise_vecs=np.zeros((2, 2)), poses=get_pose3_vector(num_poses=2), ) # create matches # since there is only one measurement in each image, both assigned feature index 0 matches_dict = {(0, 1): np.array([[0, 0]])} da = DataAssociation( reproj_error_thresh=5, # 5 px min_track_len=2, # at least 2 measurements required mode=TriangulationParam.NO_RANSAC, ) sfm_data, _ = da.run(len(cameras), cameras, matches_dict, keypoints_list) estimated_landmark = sfm_data.get_track(0).point3() self.gtsamAssertEquals(estimated_landmark, self.expected_landmark, 1e-2) for i in sfm_data.get_valid_camera_indices(): self.gtsamAssertEquals(sfm_data.get_camera(i), cameras.get(i))
def verify_triangulation_sharedCal_2poses( self, triangulation_mode: TriangulationParam): """Tests that the triangulation is accurate for shared calibration with a specified triangulation mode. Checks whether the triangulated landmark map formed from 2 measurements is valid, if min track length = 3 (should be invalid) The noise vectors represent the amount of noise to be added to measurements. """ keypoints_list, _, cameras = generate_noisy_2d_measurements( world_point=self.expected_landmark, calibrations=[self.sharedCal, self.sharedCal], per_image_noise_vecs=np.array([[-0.1, -0.5], [0.2, -0.3]]), poses=get_pose3_vector(num_poses=2), ) # create matches # since there is only one measurement in each image, both assigned feature index 0 matches_dict = {(0, 1): np.array([[0, 0]])} da = DataAssociation( reproj_error_thresh=5, # 5 px min_track_len=3, # at least 3 measurements required mode=triangulation_mode, num_ransac_hypotheses=20, ) triangulated_landmark_map, _ = da.run(len(cameras), cameras, matches_dict, keypoints_list) # assert that we cannot obtain even 1 length-3 track if we have only 2 camera poses # result should be empty, since nb_measurements < min track length assert ( triangulated_landmark_map.number_tracks() == 0 ), "Failure: tracks exceed expected track length (should be 0 tracks)"
def test_create_computation_graph(self): """Tests the graph to create data association for 3 images. Checks if result from dask computation graph is the same as result without dask.""" keypoints_list, img_idxs, cameras = generate_noisy_2d_measurements( world_point=self.expected_landmark, calibrations=[self.sharedCal, self.sharedCal, self.sharedCal], per_image_noise_vecs=np.array([[-0.1, -0.5], [-0.2, 0.3], [0.1, -0.1]]), poses=get_pose3_vector(num_poses=3), ) # create matches # since there is only one measurement in each image, both assigned feature index 0 matches_dict = {(0, 1): np.array([[0, 0]]), (1, 2): np.array([[0, 0]])} # Run without computation graph da = DataAssociation( reproj_error_thresh=5, # 5 px min_track_len=3, # at least 3 measurements required mode=TriangulationParam.RANSAC_TOPK_BASELINES, num_ransac_hypotheses=20, ) expected_sfm_data, expected_metrics = da.run(len(cameras), cameras, matches_dict, keypoints_list) # Run with computation graph delayed_sfm_data, delayed_metrics = da.create_computation_graph( len(cameras), cameras, matches_dict, keypoints_list, ) with dask.config.set(scheduler="single-threaded"): dask_sfm_data, dask_metrics = dask.compute(delayed_sfm_data, delayed_metrics) assert expected_sfm_data.number_tracks( ) == dask_sfm_data.number_tracks(), "Dask not configured correctly" self.assertDictEqual(dask_metrics, expected_metrics) for k in range(expected_sfm_data.number_tracks()): assert (expected_sfm_data.get_track(k).number_measurements() == dask_sfm_data.get_track(k).number_measurements() ), "Dask tracks incorrect" # Test if the measurement in both are equal np.testing.assert_array_almost_equal( expected_sfm_data.get_track(k).measurement(0)[1], dask_sfm_data.get_track(k).measurement(0)[1], decimal=1, err_msg="Dask measurements incorrect", ) for i in expected_sfm_data.get_valid_camera_indices(): self.gtsamAssertEquals(expected_sfm_data.get_camera(i), cameras.get(i))
def verify_triangulation_sharedCal_3poses( self, triangulation_mode: TriangulationSamplingMode): """Tests that the triangulation is accurate for shared calibration with a specified triangulation mode. Checks whether the sfm data formed from 3 measurements is valid. The noise vectors represent the amount of noise to be added to measurements. """ keypoints_list, _, cameras = generate_noisy_2d_measurements( world_point=self.expected_landmark, calibrations=[self.sharedCal, self.sharedCal, self.sharedCal], per_image_noise_vecs=np.array([[-0.1, -0.5], [-0.2, 0.3], [0.1, -0.1]]), poses=get_pose3_vector(num_poses=3), ) # create matches # since there is only one measurement in each image, both assigned feature index 0 matches_dict = {(0, 1): np.array([[0, 0]]), (1, 2): np.array([[0, 0]])} triangulation_options = TriangulationOptions(reproj_error_threshold=5, mode=triangulation_mode, min_num_hypotheses=20) da = DataAssociation(min_track_len=3, triangulation_options=triangulation_options) sfm_data, _ = da.run( len(cameras), cameras, matches_dict, keypoints_list, cameras_gt=[None] * len(cameras), relative_pose_priors={}, ) estimated_landmark = sfm_data.get_track(0).point3() # checks if computed 3D point is as expected self.gtsamAssertEquals(estimated_landmark, self.expected_landmark, 1e-2) # checks if number of tracks are as expected, should be just 1, over all 3 cameras assert sfm_data.number_tracks() == 1, "more tracks than expected" # checks if cameras saved to result are as expected for i in cameras.keys(): self.gtsamAssertEquals(sfm_data.get_camera(i), cameras.get(i))
def test_data_association_with_missing_camera(self): """Tests the data association with input tracks which use a camera index for which the camera doesn't exist.""" triangulation_options = TriangulationOptions( reproj_error_threshold=5, mode=TriangulationSamplingMode.NO_RANSAC, min_num_hypotheses=20) da = DataAssociation(min_track_len=3, triangulation_options=triangulation_options) # add cameras 0 and 2 cameras = { 0: PinholeCameraCal3Bundler( Pose3(Rot3.RzRyRx(0, np.deg2rad(20), 0), np.zeros((3, 1)))), 2: PinholeCameraCal3Bundler( Pose3(Rot3.RzRyRx(0, 0, 0), np.array([10, 0, 0]))), } # just have one track, chaining cams 0->1 , and cams 1->2 corr_idxs_dict = { (0, 1): np.array([[0, 0]], dtype=np.int32), (1, 2): np.array([[0, 0]], dtype=np.int32) } keypoints_shared = Keypoints(coordinates=np.array([[20.0, 10.0]])) # will lead to a cheirality exception because keypoints are identical in two cameras # no track will be formed, and thus connected component will be empty sfm_data, _ = da.run( num_images=3, cameras=cameras, corr_idxs_dict=corr_idxs_dict, keypoints_list=[keypoints_shared] * 3, cameras_gt=[None] * 3, relative_pose_priors={}, ) self.assertEqual(len(sfm_data.get_valid_camera_indices()), 0) self.assertEqual(sfm_data.number_tracks(), 0)