Beispiel #1
0
    def test_all(self):
        """Test everything in DFSMap."""
        def key(index_pair):
            return index_pair.i(), index_pair.j()

        dsf = gtsam.DSFMapIndexPair()
        pair1 = gtsam.IndexPair(1, 18)
        self.assertEqual(key(dsf.find(pair1)), key(pair1))
        pair2 = gtsam.IndexPair(2, 2)
        dsf.merge(pair1, pair2)
        self.assertTrue(dsf.find(pair1), dsf.find(pair1))
Beispiel #2
0
    def test_all(self) -> None:
        """Test everything in DFSMap."""
        def key(index_pair) -> Tuple[int, int]:
            return index_pair.i(), index_pair.j()

        dsf = gtsam.DSFMapIndexPair()
        pair1 = gtsam.IndexPair(1, 18)
        self.assertEqual(key(dsf.find(pair1)), key(pair1))
        pair2 = gtsam.IndexPair(2, 2)

        # testing the merge feature of dsf
        dsf.merge(pair1, pair2)
        self.assertEqual(key(dsf.find(pair1)), key(dsf.find(pair2)))
    def filter_bad_landmarks(self, landmark_map, dsf, enable):
        """Filter bad landmarks:
            1. landmark observations<3
            2. landmarks with more than one observations in an image.
            3. Features with more than one landmark correspondences"""

        # filter bad matches
        if enable is True:
            bad_matches = self.find_bad_matches()
        else:
            bad_matches = {}
        bad_key_list = set()
        for bad_match in bad_matches:
            landmark_representative = dsf.find(
                gtsam.IndexPair(bad_match[0], bad_match[1]))
            key = (landmark_representative.i(), landmark_representative.j())
            bad_key_list.add(key)
        for key in bad_key_list:
            del landmark_map[key]
        landmark_map_values = [
            landmark_map[key] for key in sorted(landmark_map.keys())
        ]
        landmark_map_new = copy.copy(landmark_map_values)

        for observation_list in landmark_map_values:
            if len(observation_list) < self._seen:
                landmark_map_new.remove(observation_list)

        return landmark_map_new
    def test_find_dsf(self):
        """Test functions related to DSF"""
        # """Test find bad matches."""
        data_directory = 'mapping/datasets/dsf_test_data/'
        num_images = 3
        filter_bad_landmarks_enable = False
        min_obersvation_number = 3
        back_end = MappingBackEnd(
            data_directory, num_images, gtsam.Cal3_S2(), [], [], [], filter_bad_landmarks_enable, min_obersvation_number)
        bad_matches = back_end.find_bad_matches()
        self.assertEqual(bad_matches, {(2, 6), (0, 4)})

        # """Test create landmark map."""
        actual_landmark_map, dsf = back_end.create_landmark_map(False)
        expected_landmark_map = {(0, 1): [(0, Point2(1, 1)), (1, Point2(2, 1)), (2, Point2(3, 1))], (2, 0): [(2, Point2(0, 0))], (0, 0): [(0, Point2(0, 0))], (0, 5): [(0, Point2(1, 5)), (0, Point2(1, 6)), (2, Point2(3, 6))], (0, 4): [
            (0, Point2(1, 4)), (2, Point2(3, 3)), (2, Point2(3, 5))], (1, 0): [(1, Point2(0, 0))], (0, 3): [(0, Point2(1, 3)), (1, Point2(2, 2)), (2, Point2(3, 2))], (0, 2): [(0, Point2(1, 2)), (2, Point2(3, 4))]}
        self.assert_landmark_map_equal(
            actual_landmark_map, expected_landmark_map)

        # """Test generate dsf."""
        landmark_representative = dsf.find(gtsam.IndexPair(2, 1))
        key = (landmark_representative.i(), landmark_representative.j())
        self.assertEqual(key, (0, 1))

        # """Test filter bad landmarks."""
        actual_landmark_map = back_end.filter_bad_landmarks(
            expected_landmark_map, dsf, True)
        expected_landmark_map = [[(0, Point2(1, 1)), (1, Point2(2, 1)), (2, Point2(3, 1))], [
            (0, Point2(1, 3)), (1, Point2(2, 2)), (2, Point2(3, 2))]]
        # self.assert_landmark_map_equal(actual_landmark_map, expected_landmark_map)
        for i, observations in enumerate(expected_landmark_map):
            for j, observation in enumerate(observations):
                self.assertEqual(actual_landmark_map[i][j][0], observation[0])
                self.gtsamAssertEquals(
                    actual_landmark_map[i][j][1], observation[1])
    def generate_dsf(self, enable=True):
        """Use dsf to find data association between landmark and landmark observation(features)"""
        dsf = gtsam.DSFMapIndexPair()

        for i in range(0, self._nrimages - 1):
            for j in range(i + 1, self._nrimages):
                matches = self.load_matches(i, j)
                if enable:
                    bad_essential, matches = self.ransac_filter_keypoints(
                        matches, i, j)
                    if bad_essential:
                        print(
                            "Not enough points to generate essential matrix for image_",
                            i, " and image_", j)
                        continue
                for frame_1, keypt_1, frame_2, keypt_2 in matches:
                    dsf.merge(gtsam.IndexPair(frame_1, keypt_1),
                              gtsam.IndexPair(frame_2, keypt_2))

        return dsf
 def create_landmark_map(self, enable=True):
     """Create a list to map landmarks and their correspondences.
         [Landmark_i:[(i,Point2()), (j,Point2())...]...]"""
     dsf = self.generate_dsf(enable)
     landmark_map = defaultdict(list)
     for img_index, feature_list in enumerate(self._image_features):
         for feature_index, feature in enumerate(feature_list):
             landmark_representative = dsf.find(
                 gtsam.IndexPair(img_index, feature_index))
             key = (landmark_representative.i(),
                    landmark_representative.j())
             landmark_map[key].append((img_index, feature))
     return landmark_map, dsf
Beispiel #7
0
    def generate_tracks_from_pairwise_matches(
        matches_dict: Dict[Tuple[int, int], np.ndarray],
        keypoints_list: List[Keypoints],
    ) -> List["SfmTrack2d"]:
        """Factory function that creates a list of tracks from 2d point correspondences.

        Creates a disjoint-set forest (DSF) and 2d tracks from pairwise matches. We create a singleton for union-find
        set elements from camera index of a detection and the index of that detection in that camera's keypoint list,
        i.e. (i,k).

        Args:
            matches_dict: Dict of pairwise matches of type:
                    key: pose indices for the matched pair of images
                    val: feature indices, as array of Nx2 shape; N being nb of features. A row is (feature_idx1,
                         feature_idx2).
            keypoints_list: List of keypoints for each image.

        Returns:
            list of all valid SfmTrack2d generated by the matches.
        """
        # check to ensure dimensions of coordinates are correct
        dims_valid = all([kps.coordinates.ndim == 2 for kps in keypoints_list])
        if not dims_valid:
            raise Exception(
                "Dimensions for Keypoint coordinates incorrect. Array needs to be 2D"
            )

        # Generate the DSF to form tracks
        dsf = gtsam.DSFMapIndexPair()
        track_2d_list = []
        # for DSF finally
        # measurement_idxs represented by ks
        for (i1, i2), k_pairs in matches_dict.items():
            for (k1, k2) in k_pairs:
                dsf.merge(gtsam.IndexPair(i1, k1), gtsam.IndexPair(i2, k2))

        key_set = dsf.sets()
        # create a landmark map: a list of tracks
        # Each track is represented as a list of (camera_idx, measurements)
        for set_id in key_set:
            index_pair_set = key_set[
                set_id]  # key_set is a wrapped C++ map, so this unusual syntax is required

            # Initialize track from measurements
            track_measurements = []
            for index_pair in gtsam.IndexPairSetAsArray(index_pair_set):
                # camera_idx is represented by i
                # measurement_idx is represented by k
                i = index_pair.i()
                k = index_pair.j()
                # add measurement in this track
                track_measurements += [
                    SfmMeasurement(i, keypoints_list[i].coordinates[k])
                ]

            track_2d = SfmTrack2d(track_measurements)

            if track_2d.validate_unique_cameras():
                track_2d_list += [track_2d]

        return track_2d_list
Beispiel #8
0
    def run(self, matches_dict: Dict[Tuple[int, int], np.ndarray],
            keypoints_list: List[Keypoints]) -> List[SfmTrack2d]:
        """Estimate tracks from feature correspondences.

        Creates a disjoint-set forest (DSF) and 2d tracks from pairwise matches. We create a singleton for union-find
        set elements from camera index of a detection and the index of that detection in that camera's keypoint list,
        i.e. (i,k).

        Args:
            matches_dict: Dict of pairwise matches of type:
                    key: indices for the matched pair of images
                    val: feature indices, as array of Nx2 shape; N being number of features. A row is (feature_idx1,
                         feature_idx2).
            keypoints_list: List of keypoints for each image.

        Returns:
            list of all valid SfmTrack2d generated by the matches.
        """
        # check to ensure dimensions of coordinates are correct
        dims_valid = all([kps.coordinates.ndim == 2 for kps in keypoints_list])
        if not dims_valid:
            raise Exception(
                "Dimensions for Keypoint coordinates incorrect. Array needs to be 2D"
            )

        # Generate the DSF to form tracks
        dsf = gtsam.DSFMapIndexPair()
        track_2d_list = []
        # for DSF finally
        # measurement_idxs represented by ks
        for (i1, i2), k_pairs in matches_dict.items():
            for (k1, k2) in k_pairs:
                dsf.merge(gtsam.IndexPair(i1, k1), gtsam.IndexPair(i2, k2))

        key_set = dsf.sets()

        erroneous_track_count = 0
        # create a landmark map: a list of tracks
        # Each track is represented as a list of (camera_idx, measurements)
        for set_id in key_set:
            index_pair_set = key_set[
                set_id]  # key_set is a wrapped C++ map, so this unusual syntax is required

            # Initialize track from measurements
            track_measurements = []
            for index_pair in gtsam.IndexPairSetAsArray(index_pair_set):
                # camera_idx is represented by i
                # measurement_idx is represented by k
                i = index_pair.i()
                k = index_pair.j()
                # add measurement in this track
                track_measurements += [
                    SfmMeasurement(i, keypoints_list[i].coordinates[k])
                ]

            track_2d = SfmTrack2d(track_measurements)

            # Skip erroneous track that had repeated measurements within the same image (i.e., violates transitivity).
            # This is an expected result from an incorrect correspondence slipping through.
            if track_2d.validate_unique_cameras():
                track_2d_list += [track_2d]
            else:
                erroneous_track_count += 1

        erroneous_track_pct = erroneous_track_count / len(
            key_set) * 100 if len(key_set) > 0 else np.NaN
        logger.info(
            f"DSF Union-Find: {erroneous_track_pct:.2f}% of tracks discarded from multiple obs. in a single image."
        )
        return track_2d_list