예제 #1
0
    def test_profile_mono(self, ):
        import cProfile as profile

        stats_file = "orbslam_mono.prof"

        path_manager = PathManager([Path(__file__).parent], self.temp_folder)
        system = OrbSlam2(vocabulary_file=self.vocab_path,
                          mode=SensorMode.MONOCULAR,
                          orb_num_features=1000,
                          orb_num_levels=8,
                          orb_scale_factor=1.2,
                          orb_ini_threshold_fast=7,
                          orb_min_threshold_fast=12)

        image_builder = DemoImageBuilder(mode=ImageMode.MONOCULAR,
                                         width=640,
                                         height=480,
                                         num_stars=150,
                                         length=self.max_time * self.speed,
                                         speed=self.speed,
                                         close_ratio=0.6,
                                         min_size=10,
                                         max_size=100)

        profile.runctx(
            "run_orbslam(system, image_builder, path_manager, self.num_frames, self.max_time)",
            locals=locals(),
            globals=globals(),
            filename=stats_file)
예제 #2
0
    def test_profile_stereo(self, ):
        import cProfile as profile

        stats_file = "orbslam_stereo.prof"

        path_manager = PathManager([Path(__file__).parent], self.temp_folder)
        system = OrbSlam2(vocabulary_file=self.vocab_path,
                          mode=SensorMode.STEREO,
                          orb_ini_threshold_fast=12,
                          orb_min_threshold_fast=7)

        image_builder = DemoImageBuilder(mode=ImageMode.STEREO,
                                         stereo_offset=0.15,
                                         width=320,
                                         height=240,
                                         num_stars=500,
                                         length=self.max_time * self.speed,
                                         speed=self.speed,
                                         min_size=4,
                                         max_size=50)

        profile.runctx(
            "run_orbslam(system, image_builder, path_manager, self.num_frames, self.max_time)",
            locals=locals(),
            globals=globals(),
            filename=stats_file)
예제 #3
0
def run_orbslam_with_vocab(vocab_path, temp_folder, seed=1000, num_frames=25):
    # Actually run the system using mocked images
    max_time = 50
    speed = 0.1
    path_manager = PathManager([Path(__file__).parent], temp_folder)
    image_builder = DemoImageBuilder(seed=seed,
                                     mode=ImageMode.STEREO,
                                     stereo_offset=0.15,
                                     width=320,
                                     height=240,
                                     num_stars=500,
                                     length=max_time * speed,
                                     speed=speed,
                                     min_size=4,
                                     max_size=50)
    subject = OrbSlam2(vocabulary_file=vocab_path,
                       mode=SensorMode.STEREO,
                       orb_ini_threshold_fast=12,
                       orb_min_threshold_fast=7)
    subject.resolve_paths(path_manager)
    subject.set_camera_intrinsics(image_builder.get_camera_intrinsics(),
                                  max_time / num_frames)
    subject.set_stereo_offset(image_builder.get_stereo_offset())

    subject.start_trial(ImageSequenceType.SEQUENTIAL)
    for idx in range(num_frames):
        time = max_time * idx / num_frames
        image = image_builder.create_frame(time)
        subject.process_image(image, time)
    return subject.finish_trial()
예제 #4
0
    def test_simple_trial_run_generated(self):
        # TODO: The state of things:
        # - With the configured sequence, the ORB-SLAM subprocess silently dies about frame 346-347
        # - With my bastardised removal of the subprocess, it seems to work? and not crash?
        # - I have no idea what is different? the pipe?
        sequence_folder, left_path, right_path = ndds_loader.find_files(NDDS_SEQUENCE)
        camera_intrinsics = ndds_loader.read_camera_intrinsics(left_path / '_camera_settings.json')
        max_img_id = ndds_loader.find_max_img_id(lambda idx: left_path / ndds_loader.IMG_TEMPLATE.format(idx))
        with (NDDS_SEQUENCE / 'timestamps.json').open('r') as fp:
            timestamps = json.load(fp)
        self.temp_folder.mkdir(parents=True, exist_ok=True)
        path_manager = PathManager([VOCAB_PATH.parent], self.temp_folder)

        subject = OrbSlam2(
            vocabulary_file=str(VOCAB_PATH.name),
            mode=SensorMode.STEREO,
            vocabulary_branching_factor=5,
            vocabulary_depth=6,
            vocabulary_seed=0,
            depth_threshold=387.0381720715473,
            orb_num_features=598,
            orb_scale_factor=np.power(480 / 104, 1 / 11),  # = (480 / min_height)^(1/num_levels)
            orb_num_levels=11,
            orb_ini_threshold_fast=86,
            orb_min_threshold_fast=48
        )
        subject.resolve_paths(path_manager)
        subject.set_camera_intrinsics(camera_intrinsics, 0.1)

        # Read the first frame data to get the baseline
        left_frame_data = ndds_loader.read_json(left_path / ndds_loader.DATA_TEMPLATE.format(0))
        right_frame_data = ndds_loader.read_json(right_path / ndds_loader.DATA_TEMPLATE.format(0))
        left_camera_pose = ndds_loader.read_camera_pose(left_frame_data)
        right_camera_pose = ndds_loader.read_camera_pose(right_frame_data)
        subject.set_stereo_offset(left_camera_pose.find_relative(right_camera_pose))

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        image_group = 'test'
        with image_manager.get().get_group(image_group, allow_write=True):
            for img_idx in range(max_img_id + 1):
                left_pixels = image_utils.read_colour(left_path / ndds_loader.IMG_TEMPLATE.format(img_idx))
                right_pixels = image_utils.read_colour(right_path / ndds_loader.IMG_TEMPLATE.format(img_idx))

                image = StereoImage(
                    pixels=left_pixels,
                    right_pixels=right_pixels,
                    image_group=image_group,
                    metadata=imeta.ImageMetadata(camera_pose=left_camera_pose),
                    right_metadata=imeta.ImageMetadata(camera_pose=right_camera_pose)
                )
                subject.process_image(image, timestamps[img_idx])
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        self.assertEqual(subject, result.system)
        self.assertTrue(result.success)
        self.assertFalse(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertEqual(max_img_id + 1, len(result.results))
예제 #5
0
    def test_can_run_on_colour_images(self):
        # Actually run the system using mocked images
        num_frames = 100
        max_time = 50
        speed = 0.1
        path_manager = PathManager([Path(__file__).parent], self.temp_folder)
        image_builder = DemoImageBuilder(mode=ImageMode.MONOCULAR,
                                         width=640,
                                         height=480,
                                         num_stars=150,
                                         length=max_time * speed,
                                         speed=speed,
                                         close_ratio=0.6,
                                         min_size=10,
                                         max_size=100,
                                         colour=True)
        # image_builder.visualise_sequence(max_time, frame_interval=0.5)
        # return

        subject = OrbSlam2(vocabulary_file=self.vocab_path,
                           mode=SensorMode.MONOCULAR,
                           orb_num_features=1000,
                           orb_num_levels=8,
                           orb_scale_factor=1.2,
                           orb_ini_threshold_fast=7,
                           orb_min_threshold_fast=12)
        subject.resolve_paths(path_manager)
        subject.set_camera_intrinsics(image_builder.get_camera_intrinsics(),
                                      max_time / num_frames)

        subject.start_trial(ImageSequenceType.SEQUENTIAL)
        for idx in range(num_frames):
            time = max_time * idx / num_frames
            image = image_builder.create_frame(time)
            subject.process_image(image, time)
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        with no_auto_dereference(SLAMTrialResult):
            self.assertEqual(subject.pk, result.system)
        self.assertTrue(result.success)
        self.assertFalse(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertIsNotNone(result.settings)
        self.assertEqual(num_frames, len(result.results))

        has_been_found = False
        for idx, frame_result in enumerate(result.results):
            self.assertEqual(max_time * idx / num_frames,
                             frame_result.timestamp)
            self.assertIsNotNone(frame_result.pose)
            self.assertIsNotNone(frame_result.motion)
            if frame_result.tracking_state is TrackingState.OK:
                has_been_found = True
        self.assertTrue(has_been_found)
예제 #6
0
    def test_run_with_loops(self):
        # Actually run the system using mocked images
        num_frames = [20, 20, 27, 25]
        max_time = 25
        speed = 1
        path_manager = PathManager([Path(__file__).parent], self.temp_folder)
        image_builder = DemoImageBuilder(mode=ImageMode.STEREO,
                                         stereo_offset=0.15,
                                         width=320,
                                         height=240,
                                         num_stars=300,
                                         length=max_time * speed,
                                         speed=speed,
                                         min_size=4,
                                         max_size=50,
                                         close_ratio=0.5)
        subject = OrbSlam2(vocabulary_file=self.vocab_path,
                           mode=SensorMode.STEREO,
                           orb_ini_threshold_fast=12,
                           orb_min_threshold_fast=7)
        subject.resolve_paths(path_manager)
        subject.set_camera_intrinsics(
            image_builder.get_camera_intrinsics(),
            max_time * len(num_frames) / sum(num_frames))
        subject.set_stereo_offset(image_builder.get_stereo_offset())

        subject.start_trial(ImageSequenceType.SEQUENTIAL)
        for loop_idx, loop_frames in enumerate(num_frames):
            for idx in range(loop_frames):
                time = max_time * idx / loop_frames + max_time * loop_idx
                if loop_idx % 2 == 1:
                    builder_time = max_time * (1 - idx / loop_frames)
                else:
                    builder_time = max_time * idx / loop_frames
                image = image_builder.create_frame(builder_time)
                subject.process_image(image, time)
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        with no_auto_dereference(SLAMTrialResult):
            self.assertEqual(subject.pk, result.system)
        self.assertTrue(result.success)
        self.assertTrue(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertIsNotNone(result.settings)
        self.assertEqual(sum(num_frames), len(result.results))

        has_been_found = False
        for idx, frame_result in enumerate(result.results):
            self.assertIsNotNone(frame_result.pose)
            self.assertIsNotNone(frame_result.motion)
            if frame_result.tracking_state is TrackingState.OK:
                has_been_found = True
        self.assertTrue(has_been_found)
예제 #7
0
    def test_passes_orb_settings_to_vocabulary(self,
                                               mock_vocabulary_builder_class):
        mock_builder = mock.create_autospec(spec=VocabularyBuilder,
                                            spec_set=True)
        mock_vocabulary_builder_class.return_value = mock_builder

        branching_factor = 12
        vocab_depth = 2
        vocab_seed = 1618673921

        num_features = 12253
        scale_factor = 1.23415
        num_levels = 7
        ini_threshold = 15
        min_threshold = 22
        subject = OrbSlam2(mode=SensorMode.STEREO,
                           vocabulary_branching_factor=branching_factor,
                           vocabulary_depth=vocab_depth,
                           vocabulary_seed=vocab_seed,
                           orb_num_features=num_features,
                           orb_scale_factor=scale_factor,
                           orb_num_levels=num_levels,
                           orb_ini_threshold_fast=ini_threshold,
                           orb_min_threshold_fast=min_threshold)
        subject.pk = ObjectId()
        subject.build_vocabulary([self.image_collection],
                                 self.path_manager.get_output_dir())

        self.assertTrue(mock_vocabulary_builder_class.called)
        self.assertEqual(
            mock.call(num_features, scale_factor, num_levels, 31, 0, 2, 1, 31,
                      min(min_threshold, ini_threshold)),
            mock_vocabulary_builder_class.call_args)

        vocab_path = self.path_manager.get_output_dir(
        ) / subject.vocabulary_file
        self.assertTrue(mock_builder.add_image.called)
        self.assertTrue(mock_builder.build_vocabulary.called)
        self.assertEqual(
            mock.call(str(vocab_path),
                      branchingFactor=branching_factor,
                      numLevels=vocab_depth,
                      seed=vocab_seed),
            mock_builder.build_vocabulary.call_args)
예제 #8
0
 def test_can_build_a_vocab_file(self):
     subject = OrbSlam2(mode=SensorMode.STEREO,
                        vocabulary_branching_factor=6,
                        vocabulary_depth=3,
                        vocabulary_seed=158627,
                        orb_num_features=125,
                        orb_scale_factor=2,
                        orb_num_levels=2,
                        orb_ini_threshold_fast=12,
                        orb_min_threshold_fast=7)
     subject.pk = ObjectId(
     )  # object needs to be "saved" to generate a vocab filename
     self.assertEqual('', subject.vocabulary_file)
     subject.build_vocabulary([self.image_collection],
                              self.path_manager.get_output_dir())
     self.assertNotEqual('', subject.vocabulary_file)
     vocab_path = self.path_manager.find_file(subject.vocabulary_file)
     self.assertTrue(vocab_path.exists())
     vocab_path.unlink()
예제 #9
0
    def test_building_allows_orbslam_to_run(self):
        subject = OrbSlam2(mode=SensorMode.STEREO,
                           vocabulary_branching_factor=6,
                           vocabulary_depth=4,
                           vocabulary_seed=158627,
                           orb_num_features=1000,
                           orb_scale_factor=1.2,
                           orb_num_levels=7,
                           orb_ini_threshold_fast=12,
                           orb_min_threshold_fast=7)
        subject.pk = ObjectId(
        )  # object needs to be "saved" to generate a vocab filename
        subject.build_vocabulary([self.image_collection],
                                 self.path_manager.get_output_dir())

        subject.resolve_paths(self.path_manager)
        subject.set_camera_intrinsics(self.image_collection.camera_intrinsics,
                                      self.max_time / self.num_frames)
        subject.set_stereo_offset(self.image_collection.stereo_offset)

        subject.start_trial(ImageSequenceType.SEQUENTIAL)
        for timestamp, image in self.image_collection:
            subject.process_image(image, timestamp)
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        with no_auto_dereference(SLAMTrialResult):
            self.assertEqual(subject.pk, result.system)
        self.assertTrue(result.success)
        self.assertTrue(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertIsNotNone(result.settings)
        self.assertEqual(len(self.image_collection), len(result.results))

        has_been_found = False
        for idx, frame_result in enumerate(result.results):
            self.assertEqual(self.image_collection.timestamps[idx],
                             frame_result.timestamp)
            self.assertIsNotNone(frame_result.pose)
            self.assertIsNotNone(frame_result.motion)
            if frame_result.tracking_state is TrackingState.OK:
                has_been_found = True
        self.assertTrue(has_been_found)