Ejemplo n.º 1
0
    def setUp(self):
        super().setUp()

        self.simple_triangulation_initializer = Point3dInitializer(
            CAMERAS, TriangulationParam.NO_RANSAC, reproj_error_thresh=5)

        self.ransac_uniform_sampling_initializer = Point3dInitializer(
            CAMERAS,
            TriangulationParam.RANSAC_SAMPLE_UNIFORM,
            reproj_error_thresh=5,
            num_ransac_hypotheses=100,
        )
Ejemplo n.º 2
0
def classify_tracks2d_with_gt_cameras(
    tracks: List[SfmTrack2d], cameras_gt: List[PinholeCameraCal3Bundler], reproj_error_thresh_px: float = 3
) -> List[TriangulationExitCode]:
    """Classifies the 2D tracks w.r.t ground truth cameras by performing triangulation and collecting exit codes.

    Args:
        tracks: list of 2d tracks.
        cameras_gt: cameras with GT params.
        reproj_error_thresh_px (optional): Reprojection error threshold (in pixels) for a track to be considered an
                                           all-inlier one. Defaults to 3.

    Returns:
        The triangulation exit code for each input track, as list of the same length as of tracks.
    """
    # do a simple triangulation with the GT cameras
    cameras_dict: Dict[int, PinholeCameraCal3Bundler] = {i: cam for i, cam in enumerate(cameras_gt)}
    point3d_initializer = Point3dInitializer(
        track_camera_dict=cameras_dict,
        options=TriangulationOptions(
            reproj_error_threshold=reproj_error_thresh_px, mode=TriangulationSamplingMode.NO_RANSAC
        ),
    )

    exit_codes: List[TriangulationExitCode] = []
    for track in tracks:
        _, _, triangulation_exit_code = point3d_initializer.triangulate(track_2d=track)
        exit_codes.append(triangulation_exit_code)

    return exit_codes
Ejemplo n.º 3
0
    def __runWithCheiralityException(self, obj: Point3dInitializer) -> bool:
        """Run the initialization in a a-cheiral setup, and check that the result is a None track."""

        cameras = obj.track_camera_dict

        # flip the cameras first
        yaw = np.pi
        camera_flip_pose = Pose3(Rot3.RzRyRx(yaw, 0, 0), np.zeros((3, 1)))
        flipped_cameras = {
            i: PinholeCameraCal3Bundler(cam.pose().compose(camera_flip_pose),
                                        cam.calibration())
            for i, cam in cameras.items()
        }

        obj_with_flipped_cameras = Point3dInitializer(
            flipped_cameras,
            obj.mode,
            obj.reproj_error_thresh,
            obj.num_ransac_hypotheses,
        )

        sfm_track = obj_with_flipped_cameras.triangulate(
            SfmTrack2d(MEASUREMENTS))

        return sfm_track is None
Ejemplo n.º 4
0
    def __runWithSingleOutlier(self, obj: Point3dInitializer) -> bool:
        """Run the initialization for a track with all inlier measurements except one, and checks for correctness of
        the estimated point."""

        sfm_track = obj.triangulate(SfmTrack2d(get_track_with_one_outlier()))
        point3d = sfm_track.point3()

        return np.array_equal(point3d, LANDMARK_POINT)
Ejemplo n.º 5
0
    def __runWithTwoMeasurements(self, obj: Point3dInitializer) -> bool:
        """Run the initialization with a track with all correct measurements, and checks for correctness of the
        recovered 3D point."""

        sfm_track = obj.triangulate(SfmTrack2d(MEASUREMENTS[:2]))
        point3d = sfm_track.point3()

        return np.allclose(point3d, LANDMARK_POINT)
Ejemplo n.º 6
0
    def __runWithDuplicateMeasurements(self, obj: Point3dInitializer) -> bool:
        """Run the initialization for a track with all inlier measurements except one, and checks for correctness of
        the estimated point."""

        sfm_track = obj.triangulate(
            SfmTrack2d(get_track_with_duplicate_measurements()))
        point3d = sfm_track.point3()

        return np.allclose(point3d, LANDMARK_POINT, atol=1, rtol=1e-1)
Ejemplo n.º 7
0
    def testSimpleTriangulationOnDoorDataset(self):
        """Test the tracks of the door dataset using simple triangulation initialization. Using computed tracks with
        ground truth camera params.

        Expecting failures on 2 tracks which have incorrect matches."""
        with open(DOOR_TRACKS_PATH, "rb") as handle:
            tracks = pickle.load(handle)

        loader = FolderLoader(DOOR_DATASET_PATH, image_extension="JPG")

        camera_dict = {
            i: PinholeCameraCal3Bundler(loader.get_camera_pose(i),
                                        loader.get_camera_intrinsics(i))
            for i in range(len(loader))
        }

        initializer = Point3dInitializer(camera_dict,
                                         TriangulationParam.NO_RANSAC,
                                         reproj_error_thresh=1e5)

        # tracks which have expected failures
        # (both tracks have incorrect measurements)
        expected_failures = [
            SfmTrack2d(measurements=[
                SfmMeasurement(i=1,
                               uv=np.array([1252.22729492, 1487.29431152])),
                SfmMeasurement(i=2,
                               uv=np.array([1170.96679688, 1407.35876465])),
                SfmMeasurement(i=4, uv=np.array([263.32104492, 1489.76965332
                                                 ])),
            ]),
            SfmTrack2d(measurements=[
                SfmMeasurement(i=6, uv=np.array([1142.34545898, 735.92169189
                                                 ])),
                SfmMeasurement(i=7, uv=np.array([1179.84155273, 763.04095459
                                                 ])),
                SfmMeasurement(i=9, uv=np.array([216.54107666, 774.74017334])),
            ]),
        ]

        for track_2d in tracks:
            triangulated_track = initializer.triangulate(track_2d)

            if triangulated_track is None:
                # assert we have failures which are already expected
                self.assertIn(track_2d, expected_failures)
Ejemplo n.º 8
0
    def run(
        self,
        num_images: int,
        cameras: Dict[int, PinholeCameraCal3Bundler],
        corr_idxs_dict: Dict[Tuple[int, int], np.ndarray],
        keypoints_list: List[Keypoints],
        images: Optional[List[Image]] = None
    ) -> Tuple[GtsfmData, Dict[str, Any]]:
        """Perform the data association.

        Args:
            num_images: Number of images in the scene.
            cameras: dictionary, with image index -> camera mapping.
            corr_idxs_dict: dictionary, with key as image pair (i1,i2) and value as matching keypoint indices.
            keypoints_list: keypoints for each image.
            images: a list of all images in scene (optional and only for track patch visualization)
            viz_patch_sz: width and height of patches, if if dumping/visualizing a patch for each 2d track measurement

        Returns:
            Cameras and tracks as GtsfmData.
        """
        # generate tracks for 3D points using pairwise correspondences
        tracks_2d = SfmTrack2d.generate_tracks_from_pairwise_matches(
            corr_idxs_dict, keypoints_list)

        if self.save_track_patches_viz and images is not None:
            io_utils.save_track_visualizations(tracks_2d,
                                               images,
                                               save_dir=os.path.join(
                                                   "plots", "tracks_2d"))

        # metrics on tracks w/o triangulation check
        num_tracks_2d = len(tracks_2d)
        track_lengths = list(map(lambda x: x.number_measurements(), tracks_2d))
        mean_2d_track_length = np.mean(track_lengths)

        logger.debug("[Data association] input number of tracks: %s",
                     num_tracks_2d)
        logger.debug("[Data association] input avg. track length: %s",
                     mean_2d_track_length)

        # initializer of 3D landmark for each track
        point3d_initializer = Point3dInitializer(
            cameras,
            self.mode,
            self.reproj_error_thresh,
            self.num_ransac_hypotheses,
        )

        num_tracks_w_cheirality_exceptions = 0
        per_accepted_track_avg_errors = []
        per_rejected_track_avg_errors = []

        # form GtsfmData object after triangulation
        triangulated_data = GtsfmData(num_images)

        # add all cameras
        for i, camera in cameras.items():
            triangulated_data.add_camera(i, camera)

        # add valid tracks where triangulation is successful
        for track_2d in tracks_2d:
            # triangulate and filter based on reprojection error
            sfm_track, avg_track_reproj_error, is_cheirality_failure = point3d_initializer.triangulate(
                track_2d)
            if is_cheirality_failure:
                num_tracks_w_cheirality_exceptions += 1

            if avg_track_reproj_error is not None:
                # need no more than 3 significant figures in json report
                avg_track_reproj_error = np.round(avg_track_reproj_error, 3)

            if sfm_track is not None and self.__validate_track(sfm_track):
                triangulated_data.add_track(sfm_track)
                per_accepted_track_avg_errors.append(avg_track_reproj_error)
            else:
                per_rejected_track_avg_errors.append(avg_track_reproj_error)

        track_cheirality_failure_ratio = num_tracks_w_cheirality_exceptions / len(
            tracks_2d)

        # pick only the largest connected component
        connected_data = triangulated_data.select_largest_connected_component()
        num_accepted_tracks = connected_data.number_tracks()
        accepted_tracks_ratio = num_accepted_tracks / len(tracks_2d)

        mean_3d_track_length, median_3d_track_length = connected_data.get_track_length_statistics(
        )
        track_lengths_3d = connected_data.get_track_lengths()

        logger.debug("[Data association] output number of tracks: %s",
                     num_accepted_tracks)
        logger.debug("[Data association] output avg. track length: %s",
                     np.round(mean_3d_track_length, 2))

        # dump the 3d point cloud before Bundle Adjustment for offline visualization
        points_3d = [
            list(connected_data.get_track(j).point3())
            for j in range(num_accepted_tracks)
        ]
        # bin edges are halfway between each integer
        track_lengths_histogram, _ = np.histogram(track_lengths_3d,
                                                  bins=np.linspace(
                                                      -0.5, 10.5, 12))

        # min possible track len is 2, above 10 is improbable
        histogram_dict = {
            f"num_len_{i}_tracks": int(track_lengths_histogram[i])
            for i in range(2, 11)
        }

        data_assoc_metrics = {
            "mean_2d_track_length":
            np.round(mean_2d_track_length, 3),
            "accepted_tracks_ratio":
            np.round(accepted_tracks_ratio, 3),
            "track_cheirality_failure_ratio":
            np.round(track_cheirality_failure_ratio, 3),
            "num_accepted_tracks":
            num_accepted_tracks,
            "3d_tracks_length": {
                "median":
                median_3d_track_length,
                "mean":
                mean_3d_track_length,
                "min":
                int(track_lengths_3d.min())
                if track_lengths_3d.size > 0 else None,
                "max":
                int(track_lengths_3d.max())
                if track_lengths_3d.size > 0 else None,
                "track_lengths_histogram":
                histogram_dict,
            },
            "mean_accepted_track_avg_error":
            np.array(per_accepted_track_avg_errors).mean(),
            "per_rejected_track_avg_errors":
            per_rejected_track_avg_errors,
            "per_accepted_track_avg_errors":
            per_accepted_track_avg_errors,
            "points_3d":
            points_3d,
        }

        return connected_data, data_assoc_metrics
Ejemplo n.º 9
0
    def __runWithOneMeasurement(self, obj: Point3dInitializer) -> bool:
        """Run the initialization with a track with all correct measurements, and checks for a None track as a result."""
        sfm_track = obj.triangulate(SfmTrack2d(MEASUREMENTS[:1]))

        return sfm_track is None
Ejemplo n.º 10
0
    def run(
        self,
        cameras: Dict[int, PinholeCameraCal3Bundler],
        corr_idxs_dict: Dict[Tuple[int, int], np.ndarray],
        keypoints_list: List[Keypoints],
    ) -> Tuple[SfmData, Dict[str, Any]]:
        """Perform the data association.

        Args:
            cameras: dictionary, with image index -> camera mapping.
            corr_idxs_dict: dictionary, with key as image pair (i1,i2) and value as matching keypoint indices.
            keypoints_list: keypoints for each image.

        Returns:
            cameras and tracks as SfmData.
        """
        # generate tracks for 3D points using pairwise correspondences
        tracks_2d = SfmTrack2d.generate_tracks_from_pairwise_matches(
            corr_idxs_dict, keypoints_list)

        # metrics on tracks w/o triangulation check
        num_tracks_2d = len(tracks_2d)
        track_lengths = list(map(lambda x: x.number_measurements(), tracks_2d))
        mean_2d_track_length = np.mean(track_lengths)

        logger.debug("[Data association] input number of tracks: %s",
                     num_tracks_2d)
        logger.debug("[Data association] input avg. track length: %s",
                     mean_2d_track_length)

        # initializer of 3D landmark for each track
        point3d_initializer = Point3dInitializer(
            cameras,
            self.mode,
            self.reproj_error_thresh,
            self.num_ransac_hypotheses,
        )

        num_tracks_w_cheirality_exceptions = 0
        per_accepted_track_avg_errors = []
        per_rejected_track_avg_errors = []
        # form SFMdata object after triangulation
        triangulated_data = SfmData()
        for track_2d in tracks_2d:
            # triangulate and filter based on reprojection error
            sfm_track, avg_track_reproj_error, is_cheirality_failure = point3d_initializer.triangulate(
                track_2d)
            if is_cheirality_failure:
                num_tracks_w_cheirality_exceptions += 1

            if sfm_track is not None and self.__validate_track(sfm_track):
                triangulated_data.add_track(sfm_track)
                per_accepted_track_avg_errors.append(avg_track_reproj_error)
            else:
                per_rejected_track_avg_errors.append(avg_track_reproj_error)

        num_accepted_tracks = triangulated_data.number_tracks()
        accepted_tracks_ratio = num_accepted_tracks / len(tracks_2d)
        track_cheirality_failure_ratio = num_tracks_w_cheirality_exceptions / len(
            tracks_2d)

        # TODO: improve dropped camera handling
        num_cameras = len(cameras.keys())
        expected_camera_indices = np.arange(num_cameras)
        # add cameras to landmark_map
        for i, cam in cameras.items():
            if i != expected_camera_indices[i]:
                raise RuntimeError("Some cameras must have been dropped ")
            triangulated_data.add_camera(cam)

        mean_3d_track_length, median_3d_track_length, track_lengths_3d = SfmResult(
            triangulated_data, None).get_track_length_statistics()

        logger.debug("[Data association] output number of tracks: %s",
                     num_accepted_tracks)
        logger.debug("[Data association] output avg. track length: %s",
                     mean_3d_track_length)

        # dump the 3d point cloud before Bundle Adjustment for offline visualization
        points_3d = [
            list(triangulated_data.track(j).point3())
            for j in range(num_accepted_tracks)
        ]
        # bin edges are halfway between each integer
        track_lengths_histogram, _ = np.histogram(track_lengths_3d,
                                                  bins=np.linspace(
                                                      -0.5, 10.5, 12))

        # min possible track len is 2, above 10 is improbable
        histogram_dict = {
            f"num_len_{i}_tracks": int(track_lengths_histogram[i])
            for i in range(2, 11)
        }

        data_assoc_metrics = {
            "mean_2d_track_length":
            np.round(mean_2d_track_length, 3),
            "accepted_tracks_ratio":
            np.round(accepted_tracks_ratio, 3),
            "track_cheirality_failure_ratio":
            np.round(track_cheirality_failure_ratio, 3),
            "num_accepted_tracks":
            num_accepted_tracks,
            "3d_tracks_length": {
                "median": median_3d_track_length,
                "mean": mean_3d_track_length,
                "min": int(track_lengths_3d.min()),
                "max": int(track_lengths_3d.max()),
                "track_lengths_histogram": histogram_dict,
            },
            "mean_accepted_track_avg_error":
            np.array(per_accepted_track_avg_errors).mean(),
            "per_rejected_track_avg_errors":
            per_rejected_track_avg_errors,
            "per_accepted_track_avg_errors":
            per_accepted_track_avg_errors,
            "points_3d":
            points_3d,
        }

        return triangulated_data, data_assoc_metrics
Ejemplo n.º 11
0
    def run(
        self,
        num_images: int,
        cameras: Dict[int, gtsfm_types.CAMERA_TYPE],
        corr_idxs_dict: Dict[Tuple[int, int], np.ndarray],
        keypoints_list: List[Keypoints],
        cameras_gt: List[Optional[gtsfm_types.CALIBRATION_TYPE]],
        relative_pose_priors: Dict[Tuple[int, int], Optional[PosePrior]],
        images: Optional[List[Image]] = None,
    ) -> Tuple[GtsfmData, GtsfmMetricsGroup]:
        """Perform the data association.

        Args:
            num_images: Number of images in the scene.
            cameras: dictionary, with image index -> camera mapping.
            corr_idxs_dict: dictionary, with key as image pair (i1,i2) and value as matching keypoint indices.
            keypoints_list: keypoints for each image.
            cameras_gt: list of GT cameras, to be used for benchmarking the tracks.
            images: a list of all images in scene (optional and only for track patch visualization)

        Returns:
            A tuple of GtsfmData with cameras and tracks, and a GtsfmMetricsGroup with data association metrics
        """
        # generate tracks for 3D points using pairwise correspondences
        tracks_estimator = DsfTracksEstimator()
        tracks_2d = tracks_estimator.run(corr_idxs_dict, keypoints_list)

        if self.save_track_patches_viz and images is not None:
            io_utils.save_track_visualizations(tracks_2d, images, save_dir=os.path.join("plots", "tracks_2d"))

        # track lengths w/o triangulation check
        track_lengths_2d = np.array(list(map(lambda x: int(x.number_measurements()), tracks_2d)), dtype=np.uint32)

        logger.debug("[Data association] input number of tracks: %s", len(tracks_2d))
        logger.debug("[Data association] input avg. track length: %s", np.mean(track_lengths_2d))

        # Initialize 3D landmark for each track
        point3d_initializer = Point3dInitializer(cameras, self.triangulation_options)

        # form GtsfmData object after triangulation
        triangulated_data = GtsfmData(num_images)

        # add all cameras
        for i, camera in cameras.items():
            triangulated_data.add_camera(i, camera)

        exit_codes_wrt_gt = track_utils.classify_tracks2d_with_gt_cameras(tracks=tracks_2d, cameras_gt=cameras_gt)

        # add valid tracks where triangulation is successful
        exit_codes_wrt_computed: List[TriangulationExitCode] = []
        per_accepted_track_avg_errors = []
        per_rejected_track_avg_errors = []
        for track_2d in tracks_2d:
            # triangulate and filter based on reprojection error
            sfm_track, avg_track_reproj_error, triangulation_exit_code = point3d_initializer.triangulate(track_2d)
            exit_codes_wrt_computed.append(triangulation_exit_code)
            if triangulation_exit_code == TriangulationExitCode.CHEIRALITY_FAILURE:
                continue

            if sfm_track is not None and self.__validate_track(sfm_track):
                triangulated_data.add_track(sfm_track)
                per_accepted_track_avg_errors.append(avg_track_reproj_error)
            else:
                per_rejected_track_avg_errors.append(avg_track_reproj_error)

        # aggregate the exit codes to get the distribution w.r.t each triangulation exit
        # get the exit codes distribution w.r.t. the camera params computed by the upstream modules of GTSFM
        exit_codes_wrt_computed_distribution = Counter(exit_codes_wrt_computed)
        # compute the exit codes distribution w.r.t. a tuple of exit codes: the exit code when triangulated with the
        # ground truth cameras and the exit code when triangulated with the computed cameras.
        exit_codes_wrt_gt_and_computed_distribution = None
        if exit_codes_wrt_gt is not None:
            exit_codes_wrt_gt_and_computed_distribution = Counter(zip(exit_codes_wrt_gt, exit_codes_wrt_computed))

        track_cheirality_failure_ratio = exit_codes_wrt_computed_distribution[
            TriangulationExitCode.CHEIRALITY_FAILURE
        ] / len(tracks_2d)

        # pick only the largest connected component
        # TODO(Ayush): remove this for hilti as disconnected components not an issue?
        cam_edges_from_prior = [k for k, v in relative_pose_priors.items() if v is not None]
        connected_data = triangulated_data.select_largest_connected_component(extra_camera_edges=cam_edges_from_prior)
        num_accepted_tracks = connected_data.number_tracks()
        accepted_tracks_ratio = num_accepted_tracks / len(tracks_2d)

        mean_3d_track_length, median_3d_track_length = connected_data.get_track_length_statistics()
        track_lengths_3d = connected_data.get_track_lengths()

        logger.debug("[Data association] output number of tracks: %s", num_accepted_tracks)
        logger.debug("[Data association] output avg. track length: %.2f", mean_3d_track_length)

        data_assoc_metrics = GtsfmMetricsGroup(
            "data_association_metrics",
            [
                GtsfmMetric(
                    "2D_track_lengths",
                    track_lengths_2d,
                    store_full_data=False,
                    plot_type=GtsfmMetric.PlotType.HISTOGRAM,
                ),
                GtsfmMetric("accepted_tracks_ratio", accepted_tracks_ratio),
                GtsfmMetric("track_cheirality_failure_ratio", track_cheirality_failure_ratio),
                GtsfmMetric("num_accepted_tracks", num_accepted_tracks),
                GtsfmMetric(
                    "3d_tracks_length",
                    track_lengths_3d,
                    store_full_data=False,
                    plot_type=GtsfmMetric.PlotType.HISTOGRAM,
                ),
                GtsfmMetric("accepted_track_avg_errors_px", per_accepted_track_avg_errors, store_full_data=False),
                GtsfmMetric(
                    "rejected_track_avg_errors_px",
                    np.array(per_rejected_track_avg_errors).astype(np.float32),
                    store_full_data=False,
                ),
                GtsfmMetric(name="number_cameras", data=len(connected_data.get_valid_camera_indices())),
            ],
        )

        if exit_codes_wrt_gt_and_computed_distribution is not None:
            for (gt_exit_code, computed_exit_code), count in exit_codes_wrt_gt_and_computed_distribution.items():
                # Each track has 2 associated exit codes: the triangulation exit codes w.r.t ground truth cameras
                # and w.r.t cameras computed by upstream modules of GTSFM. We get the distribution of the number of
                # tracks for each pair of (triangulation exit code w.r.t GT cams, triangulation exit code w.r.t
                # computed cams)
                metric_name = "#tracks triangulated with GT cams: {}, computed cams: {}".format(
                    gt_exit_code.name, computed_exit_code.name
                )

                data_assoc_metrics.add_metric(GtsfmMetric(name=metric_name, data=count))

        return connected_data, data_assoc_metrics