def test_lund_door(self): loader = FolderLoader(str(DATA_ROOT_PATH / "set1_lund_door"), image_extension="JPG") expected_wTi_list = [ loader.get_camera_pose(x) for x in range(len(loader)) ] wRi_list = [x.rotation() for x in expected_wTi_list] i2Ui1_dict = dict() for (i1, i2) in loader.get_valid_pairs(): i2Ti1 = expected_wTi_list[i2].between(expected_wTi_list[i1]) i2Ui1_dict[(i1, i2)] = Unit3((i2Ti1.translation())) wti_list = self.obj.run(len(loader), i2Ui1_dict, wRi_list) wTi_list = [ Pose3(wRi, wti) if wti is not None else None for (wRi, wti) in zip(wRi_list, wti_list) ] # TODO: using a v high value for translation relative threshold. Fix it self.assertTrue( geometry_comparisons.compare_global_poses(wTi_list, expected_wTi_list, trans_err_thresh=2e1))
def test_get_camera_pose_missing(self): """Tests that the camera pose is None, because it is missing on disk.""" loader = FolderLoader(str(NO_EXTRINSICS_FOLDER), image_extension="JPG") fetched_pose = loader.get_camera_pose(5) self.assertIsNone(fetched_pose)
def test_get_camera_intrinsics_missing(self): """Tests getter for intrinsics when explicit numpy arrays are absent and we fall back on exif.""" loader = FolderLoader(NO_EXIF_FOLDER, image_extension="JPG") computed = loader.get_camera_intrinsics(5) self.assertIsNone(computed)
def setUp(self) -> None: """ """ self.loader = FolderLoader( str(TEST_DATA_ROOT_PATH / "argoverse" / "train1" / "273c1883-673a-36bf-b124-88311b1a80be" / "ring_front_center"), image_extension="jpg", ) assert len(self.loader)
def test_get_camera_intrinsics_exif(self): """Tests getter for intrinsics when explicit numpy arrays are absent and we fall back on exif.""" loader = FolderLoader(EXIF_FOLDER, image_extension="JPG") computed = loader.get_camera_intrinsics(5) expected = Cal3Bundler(fx=2378.983, k1=0, k2=0, u0=648.0, v0=968.0) self.assertTrue(expected.equals(computed, 1e-3))
class TestDetectorBase(unittest.TestCase): """Main test class for detector base class in frontend.""" def setUp(self): super().setUp() self.detector = DummyDetector() self.loader = FolderLoader(TEST_DATA_PATH, image_extension="JPG") def test_number_of_detections(self): """Tests that the number of detections is less than the maximum number configured.""" test_image = self.loader.get_image(0) keypoints = self.detector.detect(test_image) self.assertLessEqual(len(keypoints), self.detector.max_keypoints) def test_coordinates_range(self): """Tests that each coordinate is within the image bounds.""" test_image = self.loader.get_image(0) keypoints = self.detector.detect(test_image) np.testing.assert_array_equal(keypoints.coordinates[:, 0] >= 0, True) np.testing.assert_array_equal( keypoints.coordinates[:, 0] <= test_image.width, True) np.testing.assert_array_equal(keypoints.coordinates[:, 1] >= 0, True) np.testing.assert_array_equal( keypoints.coordinates[:, 1] <= test_image.height, True) def test_scale(self): """Tests that the scales are positive.""" keypoints = self.detector.detect(self.loader.get_image(0)) np.testing.assert_array_equal(keypoints.scales >= 0, True) def test_computation_graph(self): """Test the dask's computation graph formation using a single image.""" idx_under_test = 0 image_graph = self.loader.create_computation_graph_for_images( )[idx_under_test] keypoints_graph = self.detector.create_computation_graph(image_graph) with dask.config.set(scheduler="single-threaded"): keypoints = dask.compute(keypoints_graph)[0] # check the results via normal workflow and dask workflow for an image expected_keypoints = self.detector.detect(self.loader.get_image(0)) self.assertEqual(keypoints, expected_keypoints) def test_pickleable(self): """Tests that the detector object is pickleable (required for dask).""" try: pickle.dumps(self.detector) except TypeError: self.fail("Cannot dump detector using pickle")
def testSimpleTriangulationOnDoorDataset(self): """Test the tracks of the door dataset using simple triangulation initialization. Using computed tracks with ground truth camera params. Expecting failures on 2 tracks which have incorrect matches.""" with open(DOOR_TRACKS_PATH, "rb") as handle: tracks = pickle.load(handle) loader = FolderLoader(DOOR_DATASET_PATH, image_extension="JPG") camera_dict = { i: PinholeCameraCal3Bundler(loader.get_camera_pose(i), loader.get_camera_intrinsics(i)) for i in range(len(loader)) } initializer = Point3dInitializer(camera_dict, TriangulationParam.NO_RANSAC, reproj_error_thresh=1e5) # tracks which have expected failures # (both tracks have incorrect measurements) expected_failures = [ SfmTrack2d(measurements=[ SfmMeasurement(i=1, uv=np.array([1252.22729492, 1487.29431152])), SfmMeasurement(i=2, uv=np.array([1170.96679688, 1407.35876465])), SfmMeasurement(i=4, uv=np.array([263.32104492, 1489.76965332 ])), ]), SfmTrack2d(measurements=[ SfmMeasurement(i=6, uv=np.array([1142.34545898, 735.92169189 ])), SfmMeasurement(i=7, uv=np.array([1179.84155273, 763.04095459 ])), SfmMeasurement(i=9, uv=np.array([216.54107666, 774.74017334])), ]), ] for track_2d in tracks: triangulated_track = initializer.triangulate(track_2d) if triangulated_track is None: # assert we have failures which are already expected self.assertIn(track_2d, expected_failures)
def run_scene_optimizer() -> None: """ """ with initialize_config_module(config_module="gtsfm.configs"): # config is relative to the gtsfm module cfg = compose(config_name="default_lund_door_set1_config.yaml") scene_optimizer: SceneOptimizer = instantiate(cfg.SceneOptimizer) loader = FolderLoader(os.path.join(DATA_ROOT, "set1_lund_door"), image_extension="JPG") sfm_result_graph = scene_optimizer.create_computation_graph( len(loader), loader.get_valid_pairs(), loader.create_computation_graph_for_images(), loader.create_computation_graph_for_intrinsics(), use_intrinsics_in_verification=True, gt_pose_graph=loader.create_computation_graph_for_poses(), ) # create dask client cluster = LocalCluster(n_workers=2, threads_per_worker=4) with Client(cluster), performance_report(filename="dask-report.html"): sfm_result = sfm_result_graph.compute() assert isinstance(sfm_result, SfmResult)
def setUp(self): """Set up the loader for the test.""" super().setUp() self.loader = FolderLoader(str(DEFAULT_FOLDER), image_extension="JPG")
class TestFolderLoader(unittest.TestCase): """Unit tests for folder loader, which loads image from a folder on disk.""" def setUp(self): """Set up the loader for the test.""" super().setUp() self.loader = FolderLoader(str(DEFAULT_FOLDER), image_extension="JPG") def test_len(self): """Test the number of entries in the loader.""" self.assertEqual(12, len(self.loader)) def test_get_image_valid_index(self): """Tests that get_image works for all valid indices.""" for idx in range(len(self.loader)): self.assertIsNotNone(self.loader.get_image(idx)) def test_get_image_invalid_index(self): """Test that get_image raises an exception on an invalid index.""" # negative index with self.assertRaises(IndexError): self.loader.get_image(-1) # len() as index with self.assertRaises(IndexError): self.loader.get_image(12) # index > len() with self.assertRaises(IndexError): self.loader.get_image(15) def test_image_contents(self): """Test the actual image which is being fetched by the loader at an index. This test's primary purpose is to check if the ordering of filename is being respected by the loader """ index_to_test = 5 file_path = DEFAULT_FOLDER / "images" / "DSC_0006.JPG" loader_image = self.loader.get_image(index_to_test) expected_image = io_utils.load_image(file_path) np.testing.assert_allclose(expected_image.value_array, loader_image.value_array) def test_get_camera_pose_exists(self): """Tests that the correct pose is fetched (present on disk).""" fetched_pose = self.loader.get_camera_pose(5) expected_pose = Pose3( np.array([ [0.9387, 0.0592, 0.3510, -4.5075], [-0.0634, 1.0043, -0.01437, 0.2307], [-0.3618, -0.0227, 0.9362, 1.4820], [0.0, 0.0, 0.0, 1.0], ])) self.assertTrue(expected_pose.equals(fetched_pose, 1e-2)) def test_get_camera_pose_missing(self): """Tests that the camera pose is None, because it is missing on disk.""" loader = FolderLoader(str(NO_EXTRINSICS_FOLDER), image_extension="JPG") fetched_pose = loader.get_camera_pose(5) self.assertIsNone(fetched_pose) def test_get_camera_intrinsics_explicit(self): """Tests getter for intrinsics when explicit numpy arrays with intrinsics are present on disk.""" computed = self.loader.get_camera_intrinsics(5) expected = Cal3Bundler(fx=2378.983, k1=0, k2=0, u0=968.0, v0=648.0) self.assertTrue(expected.equals(computed, 1e-3)) def test_get_camera_intrinsics_exif(self): """Tests getter for intrinsics when explicit numpy arrays are absent and we fall back on exif.""" loader = FolderLoader(EXIF_FOLDER, image_extension="JPG") computed = loader.get_camera_intrinsics(5) expected = Cal3Bundler(fx=2378.983, k1=0, k2=0, u0=648.0, v0=968.0) self.assertTrue(expected.equals(computed, 1e-3)) def test_get_camera_intrinsics_missing(self): """Tests getter for intrinsics when explicit numpy arrays are absent and we fall back on exif.""" loader = FolderLoader(NO_EXIF_FOLDER, image_extension="JPG") computed = loader.get_camera_intrinsics(5) self.assertIsNone(computed) def test_create_computation_graph_for_images(self): """Tests the graph for loading all the images.""" image_graph = self.loader.create_computation_graph_for_images() # check the length of the graph self.assertEqual(12, len(image_graph)) results = dask.compute(image_graph)[0] # randomly check image loads from a few indices np.testing.assert_allclose(results[5].value_array, self.loader.get_image(5).value_array) np.testing.assert_allclose(results[7].value_array, self.loader.get_image(7).value_array) def test_create_computation_graph_for_intrinsics(self): """Tests the graph for all intrinsics.""" intrinsics_graph = self.loader.create_computation_graph_for_intrinsics( ) # check the length of the graph self.assertEqual(12, len(intrinsics_graph)) results = dask.compute(intrinsics_graph)[0] # randomly check intrinsics from a few indices self.assertTrue( self.loader.get_camera_intrinsics(5).equals(results[5], 1e-5)) self.assertTrue( self.loader.get_camera_intrinsics(7).equals(results[7], 1e-5))
def setUp(self): super().setUp() self.detector = DummyDetector() self.loader = FolderLoader(TEST_DATA_PATH, image_extension="JPG")
def setUp(self) -> None: self.loader = FolderLoader(str(DATA_ROOT_PATH / "set1_lund_door"), image_extension="JPG") assert len(self.loader)
class TestSceneOptimizer(unittest.TestCase): """Unit test for SceneOptimizer, which runs SfM for a scene.""" def setUp(self) -> None: self.loader = FolderLoader(str(DATA_ROOT_PATH / "set1_lund_door"), image_extension="JPG") assert len(self.loader) def test_find_largest_connected_component(self): """Tests the function to prune the scene graph to its largest connected component.""" # create a graph with two connected components of length 4 and 3. input_essential_matrices = { (0, 1): generate_random_essential_matrix(), (1, 5): None, (3, 1): generate_random_essential_matrix(), (3, 2): generate_random_essential_matrix(), (2, 7): None, (4, 6): generate_random_essential_matrix(), (6, 7): generate_random_essential_matrix(), } # generate Rot3 and Unit3 inputs input_relative_rotations = dict() input_relative_unit_translations = dict() for (i1, i2), i2Ei1 in input_essential_matrices.items(): if i2Ei1 is None: input_relative_rotations[(i1, i2)] = None input_relative_unit_translations[(i1, i2)] = None else: input_relative_rotations[(i1, i2)] = i2Ei1.rotation() input_relative_unit_translations[(i1, i2)] = i2Ei1.direction() expected_edges = [(0, 1), (3, 2), (3, 1)] ( computed_relative_rotations, computed_relative_unit_translations, ) = select_largest_connected_component(input_relative_rotations, input_relative_unit_translations) # check the edges in the pruned graph self.assertCountEqual(list(computed_relative_rotations.keys()), expected_edges) self.assertCountEqual(list(computed_relative_unit_translations.keys()), expected_edges) # check the actual Rot3 and Unit3 values for (i1, i2) in expected_edges: self.assertTrue(computed_relative_rotations[(i1, i2)].equals(input_relative_rotations[(i1, i2)], 1e-2)) self.assertTrue( computed_relative_unit_translations[(i1, i2)].equals(input_relative_unit_translations[(i1, i2)], 1e-2) ) def test_create_computation_graph(self): """Will test Dask multi-processing capabilities and ability to serialize all objects.""" use_intrinsics_in_verification = False with initialize_config_module(config_module="gtsfm.configs"): # config is relative to the gtsfm module cfg = compose(config_name="scene_optimizer_unit_test_config.yaml") self.obj: SceneOptimizer = instantiate(cfg.SceneOptimizer) # generate the dask computation graph sfm_result_graph = self.obj.create_computation_graph( len(self.loader), self.loader.get_valid_pairs(), self.loader.create_computation_graph_for_images(), self.loader.create_computation_graph_for_intrinsics(), use_intrinsics_in_verification=use_intrinsics_in_verification, ) # create dask client cluster = LocalCluster(n_workers=1, threads_per_worker=4) with Client(cluster): sfm_result = dask.compute(sfm_result_graph)[0] self.assertIsInstance(sfm_result, SfmResult) # compare the camera poses poses = sfm_result.get_camera_poses() expected_poses = [self.loader.get_camera_pose(i) for i in range(len(self.loader))] self.assertTrue(comp_utils.compare_global_poses(poses, expected_poses))
class TestFrontend(unittest.TestCase): """Tests a combined FeatureExtractor and TwoViewEstimator using an Argoverse image pair.""" def setUp(self) -> None: """ """ self.loader = FolderLoader( str(TEST_DATA_ROOT_PATH / "argoverse" / "train1" / "273c1883-673a-36bf-b124-88311b1a80be" / "ring_front_center"), image_extension="jpg", ) assert len(self.loader) def __get_frontend_computation_graph( self, feature_extractor: FeatureExtractor, two_view_estimator: TwoViewEstimator, ) -> Tuple[Delayed, Delayed]: """Copied from SceneOptimizer class, without back-end code""" image_pair_indices = self.loader.get_valid_pairs() image_graph = self.loader.create_computation_graph_for_images() camera_intrinsics_graph = self.loader.create_computation_graph_for_intrinsics( ) use_intrinsics_in_verification = True # detection and description graph keypoints_graph_list = [] descriptors_graph_list = [] for delayed_image in image_graph: ( delayed_dets, delayed_descs, ) = feature_extractor.create_computation_graph(delayed_image) keypoints_graph_list += [delayed_dets] descriptors_graph_list += [delayed_descs] # estimate two-view geometry and get indices of verified correspondences. i2Ri1_graph_dict = {} i2Ui1_graph_dict = {} for (i1, i2) in image_pair_indices: (i2Ri1, i2Ui1, _, _, _, _) = two_view_estimator.create_computation_graph( keypoints_graph_list[i1], keypoints_graph_list[i2], descriptors_graph_list[i1], descriptors_graph_list[i2], camera_intrinsics_graph[i1], camera_intrinsics_graph[i2], use_intrinsics_in_verification, ) i2Ri1_graph_dict[(i1, i2)] = i2Ri1 i2Ui1_graph_dict[(i1, i2)] = i2Ui1 return i2Ri1_graph_dict, i2Ui1_graph_dict def test_sift_twoway_ransac(self): """Check DoG + SIFT + 2-way Matcher + RANSAC-5pt frontend.""" det_desc = SIFTDetectorDescriptor() feature_extractor = FeatureExtractor(det_desc) two_view_estimator = TwoViewEstimator(matcher=TwoWayMatcher(), verifier=Ransac(), corr_metric_dist_threshold=0.1) self.__compare_frontend_result_error( feature_extractor, two_view_estimator, euler_angle_err_tol=1.4, translation_err_tol=0.026, ) def test_sift_twoway_degensac(self): """Check DoG + SIFT + 2-way Matcher + DEGENSAC-8pt frontend.""" det_desc = SIFTDetectorDescriptor() feature_extractor = FeatureExtractor(det_desc) two_view_estimator = TwoViewEstimator(matcher=TwoWayMatcher(), verifier=Degensac(), corr_metric_dist_threshold=0.1) self.__compare_frontend_result_error( feature_extractor, two_view_estimator, euler_angle_err_tol=0.95, translation_err_tol=0.03, ) def __compare_frontend_result_error( self, feature_extractor: FeatureExtractor, two_view_estimator: TwoViewEstimator, euler_angle_err_tol: float, translation_err_tol: float, ) -> None: """Compare recovered relative rotation and translation with ground truth.""" ( i2Ri1_graph_dict, i2Ui1_graph_dict, ) = self.__get_frontend_computation_graph(feature_extractor, two_view_estimator) with dask.config.set(scheduler="single-threaded"): i2Ri1_results, i2ti1_results = dask.compute( i2Ri1_graph_dict, i2Ui1_graph_dict) i2Ri1 = i2Ri1_results[(0, 1)] i2Ui1 = i2ti1_results[(0, 1)] # Ground truth is provided in inverse format, so invert SE(3) object i2Ti1 = Pose3(i2Ri1, i2Ui1.point3()) i1Ti2 = i2Ti1.inverse() i1ti2 = i1Ti2.translation() i1Ri2 = i1Ti2.rotation().matrix() euler_angles = Rotation.from_matrix(i1Ri2).as_euler("zyx", degrees=True) gt_euler_angles = np.array([-0.37, 32.47, -0.42]) np.testing.assert_allclose(gt_euler_angles, euler_angles, atol=euler_angle_err_tol) gt_i1ti2 = np.array([0.21, -0.0024, 0.976]) np.testing.assert_allclose(gt_i1ti2, i1ti2, atol=translation_err_tol)
def setUp(self): self.descriptor = DummyDescriptor() self.loader = FolderLoader(str(TEST_DATA_PATH), image_extension="JPG")
class TestDescriptorBase(unittest.TestCase): """Unit tests for the DescriptorBase class. Should be inherited by all descriptor unit tests. """ def setUp(self): self.descriptor = DummyDescriptor() self.loader = FolderLoader(str(TEST_DATA_PATH), image_extension="JPG") def test_result_size(self): """Check if the number of descriptors are same as number of features.""" input_image = self.loader.get_image(0) input_keypoints = Keypoints(coordinates=np.random.randint( low=[0, 0], high=[input_image.width, input_image.height], size=(5, 2), )) result = self.descriptor.describe(input_image, input_keypoints) self.assertEqual(len(input_keypoints), result.shape[0]) def test_with_no_features(self): """Checks that empty feature inputs works well.""" input_image = self.loader.get_image(0) input_keypoints = Keypoints(coordinates=np.array([])) result = self.descriptor.describe(input_image, input_keypoints) self.assertEqual(0, result.size) def test_create_computation_graph(self): """Checks the dask computation graph.""" # testing some indices idxs_under_test = [0, 5] for idx in idxs_under_test: test_image = self.loader.get_image(idx) test_keypoints = Keypoints(coordinates=np.random.randint( low=[0, 0], high=[test_image.width, test_image.height], size=(np.random.randint(5, 10), 2), )) descriptor_graph = self.descriptor.create_computation_graph( dask.delayed(test_image), dask.delayed(test_keypoints), ) with dask.config.set(scheduler="single-threaded"): descriptors = dask.compute(descriptor_graph)[0] expected_descriptors = self.descriptor.describe( test_image, test_keypoints) np.testing.assert_allclose(descriptors, expected_descriptors) def test_pickleable(self): """Tests that the descriptor is pickleable (required for dask).""" try: pickle.dumps(self.descriptor) except TypeError: self.fail("Cannot dump descriptor using pickle")