def values_to_sfm_data(values: Values, initial_data: SfmData) -> SfmData: """Cast results from the optimization to SfmData object. Args: values: results of factor graph optimization. initial_data: data used to generate the factor graph; used to extract information about poses and 3d points in the graph. Returns: optimized poses and landmarks. """ result = SfmData() # add cameras for i in range(initial_data.number_cameras()): result.add_camera(values.atPinholeCameraCal3Bundler(C(i))) # add tracks for j in range(initial_data.number_tracks()): input_track = initial_data.track(j) # populate the result with optimized 3D point result_track = SfmTrack(values.atPoint3(P(j)), ) for measurement_idx in range(input_track.number_measurements()): i, uv = input_track.measurement(measurement_idx) result_track.add_measurement(i, uv) result.add_track(result_track) return result
def run( self, cameras: Dict[int, PinholeCameraCal3Bundler], corr_idxs_dict: Dict[Tuple[int, int], np.ndarray], keypoints_list: List[Keypoints], ) -> SfmData: """Perform the data association. Args: cameras: dictionary with image index as key, and camera object w/ intrinsics + extrinsics as value. corr_idxs_dict: dictionary, with key as image pair (i1,i2) and value as matching keypoint indices. keypoints_list: keypoints for each image. Returns: cameras and tracks as SfmData """ available_cams = np.array(list(cameras.keys()), dtype=np.uint32) # form few tracks randomly tracks = [] num_tracks = random.randint(5, 10) for _ in range(num_tracks): # obtain 3D points for the track randomly point_3d = np.random.rand(3, 1) # create GTSAM's SfmTrack object sfmTrack = SfmTrack(point_3d) # randomly select cameras for this track selected_cams = np.random.choice(available_cams, self.min_track_len, replace=False) # for each selected camera, randomly select a point for cam_idx in selected_cams: measurement_idx = random.randint(0, len(keypoints_list[cam_idx]) - 1) measurement = keypoints_list[cam_idx].coordinates[measurement_idx] sfmTrack.add_measurement(cam_idx, measurement) tracks.append(sfmTrack) # create the final SfmData object sfm_data = SfmData() for cam in cameras.values(): sfm_data.add_camera(cam) for track in tracks: sfm_data.add_track(track) return sfm_data
def test_filter_landmarks(self): """Tests filtering of SfmData based on reprojection error.""" max_reproj_error = 15 VALID_TRACK_INDICES = [0, 1, 5] # construct expected data w/ tracks with reprojection errors below the # threshold expected_data = SfmData() for i in range(EXAMPLE_RESULT.sfm_data.number_cameras()): expected_data.add_camera(EXAMPLE_RESULT.sfm_data.camera(i)) for j in VALID_TRACK_INDICES: expected_data.add_track(EXAMPLE_RESULT.sfm_data.track(j)) # run the fn under test filtered_sfm_data = EXAMPLE_RESULT.filter_landmarks(max_reproj_error) # compare the SfmData objects self.assertTrue(filtered_sfm_data.equals(expected_data, 1e-9))
def run( self, cameras: Dict[int, PinholeCameraCal3Bundler], corr_idxs_dict: Dict[Tuple[int, int], np.ndarray], keypoints_list: List[Keypoints], ) -> Tuple[SfmData, Dict[str, Any]]: """Perform the data association. Args: cameras: dictionary, with image index -> camera mapping. corr_idxs_dict: dictionary, with key as image pair (i1,i2) and value as matching keypoint indices. keypoints_list: keypoints for each image. Returns: cameras and tracks as SfmData. """ # generate tracks for 3D points using pairwise correspondences tracks_2d = SfmTrack2d.generate_tracks_from_pairwise_matches( corr_idxs_dict, keypoints_list) # metrics on tracks w/o triangulation check num_tracks_2d = len(tracks_2d) track_lengths = list(map(lambda x: x.number_measurements(), tracks_2d)) mean_2d_track_length = np.mean(track_lengths) logger.debug("[Data association] input number of tracks: %s", num_tracks_2d) logger.debug("[Data association] input avg. track length: %s", mean_2d_track_length) # initializer of 3D landmark for each track point3d_initializer = Point3dInitializer( cameras, self.mode, self.reproj_error_thresh, self.num_ransac_hypotheses, ) num_tracks_w_cheirality_exceptions = 0 per_accepted_track_avg_errors = [] per_rejected_track_avg_errors = [] # form SFMdata object after triangulation triangulated_data = SfmData() for track_2d in tracks_2d: # triangulate and filter based on reprojection error sfm_track, avg_track_reproj_error, is_cheirality_failure = point3d_initializer.triangulate( track_2d) if is_cheirality_failure: num_tracks_w_cheirality_exceptions += 1 if sfm_track is not None and self.__validate_track(sfm_track): triangulated_data.add_track(sfm_track) per_accepted_track_avg_errors.append(avg_track_reproj_error) else: per_rejected_track_avg_errors.append(avg_track_reproj_error) num_accepted_tracks = triangulated_data.number_tracks() accepted_tracks_ratio = num_accepted_tracks / len(tracks_2d) track_cheirality_failure_ratio = num_tracks_w_cheirality_exceptions / len( tracks_2d) # TODO: improve dropped camera handling num_cameras = len(cameras.keys()) expected_camera_indices = np.arange(num_cameras) # add cameras to landmark_map for i, cam in cameras.items(): if i != expected_camera_indices[i]: raise RuntimeError("Some cameras must have been dropped ") triangulated_data.add_camera(cam) mean_3d_track_length, median_3d_track_length, track_lengths_3d = SfmResult( triangulated_data, None).get_track_length_statistics() logger.debug("[Data association] output number of tracks: %s", num_accepted_tracks) logger.debug("[Data association] output avg. track length: %s", mean_3d_track_length) # dump the 3d point cloud before Bundle Adjustment for offline visualization points_3d = [ list(triangulated_data.track(j).point3()) for j in range(num_accepted_tracks) ] # bin edges are halfway between each integer track_lengths_histogram, _ = np.histogram(track_lengths_3d, bins=np.linspace( -0.5, 10.5, 12)) # min possible track len is 2, above 10 is improbable histogram_dict = { f"num_len_{i}_tracks": int(track_lengths_histogram[i]) for i in range(2, 11) } data_assoc_metrics = { "mean_2d_track_length": np.round(mean_2d_track_length, 3), "accepted_tracks_ratio": np.round(accepted_tracks_ratio, 3), "track_cheirality_failure_ratio": np.round(track_cheirality_failure_ratio, 3), "num_accepted_tracks": num_accepted_tracks, "3d_tracks_length": { "median": median_3d_track_length, "mean": mean_3d_track_length, "min": int(track_lengths_3d.min()), "max": int(track_lengths_3d.max()), "track_lengths_histogram": histogram_dict, }, "mean_accepted_track_avg_error": np.array(per_accepted_track_avg_errors).mean(), "per_rejected_track_avg_errors": per_rejected_track_avg_errors, "per_accepted_track_avg_errors": per_accepted_track_avg_errors, "points_3d": points_3d, } return triangulated_data, data_assoc_metrics