Example #1
0
def run_orbslam_with_vocab(vocab_path, temp_folder, seed=1000, num_frames=25):
    # Actually run the system using mocked images
    max_time = 50
    speed = 0.1
    path_manager = PathManager([Path(__file__).parent], temp_folder)
    image_builder = DemoImageBuilder(seed=seed,
                                     mode=ImageMode.STEREO,
                                     stereo_offset=0.15,
                                     width=320,
                                     height=240,
                                     num_stars=500,
                                     length=max_time * speed,
                                     speed=speed,
                                     min_size=4,
                                     max_size=50)
    subject = OrbSlam2(vocabulary_file=vocab_path,
                       mode=SensorMode.STEREO,
                       orb_ini_threshold_fast=12,
                       orb_min_threshold_fast=7)
    subject.resolve_paths(path_manager)
    subject.set_camera_intrinsics(image_builder.get_camera_intrinsics(),
                                  max_time / num_frames)
    subject.set_stereo_offset(image_builder.get_stereo_offset())

    subject.start_trial(ImageSequenceType.SEQUENTIAL)
    for idx in range(num_frames):
        time = max_time * idx / num_frames
        image = image_builder.create_frame(time)
        subject.process_image(image, time)
    return subject.finish_trial()
Example #2
0
    def setUpClass(cls):
        dbconn.setup_image_manager()
        cls.temp_folder.mkdir(parents=True, exist_ok=True)
        cls.path_manager = PathManager([Path(__file__).parent],
                                       cls.temp_folder)

        image_builder = DemoImageBuilder(mode=ImageMode.STEREO,
                                         stereo_offset=0.15,
                                         width=320,
                                         height=240,
                                         num_stars=500,
                                         length=cls.max_time * cls.speed,
                                         speed=cls.speed,
                                         min_size=4,
                                         max_size=50)

        # Make an image source from the image builder
        images = []
        for time in range(cls.num_frames):
            image = image_builder.create_frame(time)
            images.append(image)
        cls.image_collection = ImageCollection(
            images=images,
            timestamps=list(range(len(images))),
            sequence_type=ImageSequenceType.SEQUENTIAL)
Example #3
0
    def test_can_run_on_colour_images(self):
        # Actually run the system using mocked images
        num_frames = 100
        max_time = 50
        speed = 0.1
        path_manager = PathManager([Path(__file__).parent], self.temp_folder)
        image_builder = DemoImageBuilder(mode=ImageMode.MONOCULAR,
                                         width=640,
                                         height=480,
                                         num_stars=150,
                                         length=max_time * speed,
                                         speed=speed,
                                         close_ratio=0.6,
                                         min_size=10,
                                         max_size=100,
                                         colour=True)
        # image_builder.visualise_sequence(max_time, frame_interval=0.5)
        # return

        subject = OrbSlam2(vocabulary_file=self.vocab_path,
                           mode=SensorMode.MONOCULAR,
                           orb_num_features=1000,
                           orb_num_levels=8,
                           orb_scale_factor=1.2,
                           orb_ini_threshold_fast=7,
                           orb_min_threshold_fast=12)
        subject.resolve_paths(path_manager)
        subject.set_camera_intrinsics(image_builder.get_camera_intrinsics(),
                                      max_time / num_frames)

        subject.start_trial(ImageSequenceType.SEQUENTIAL)
        for idx in range(num_frames):
            time = max_time * idx / num_frames
            image = image_builder.create_frame(time)
            subject.process_image(image, time)
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        with no_auto_dereference(SLAMTrialResult):
            self.assertEqual(subject.pk, result.system)
        self.assertTrue(result.success)
        self.assertFalse(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertIsNotNone(result.settings)
        self.assertEqual(num_frames, len(result.results))

        has_been_found = False
        for idx, frame_result in enumerate(result.results):
            self.assertEqual(max_time * idx / num_frames,
                             frame_result.timestamp)
            self.assertIsNotNone(frame_result.pose)
            self.assertIsNotNone(frame_result.motion)
            if frame_result.tracking_state is TrackingState.OK:
                has_been_found = True
        self.assertTrue(has_been_found)
    def test_run_with_loops(self):
        # Actually run the system using mocked images
        num_frames = [20, 20, 27, 25]
        max_time = 25
        speed = 1
        path_manager = PathManager([Path(__file__).parent], self.temp_folder)
        image_builder = DemoImageBuilder(mode=ImageMode.STEREO,
                                         stereo_offset=0.15,
                                         width=320,
                                         height=240,
                                         num_stars=300,
                                         length=max_time * speed,
                                         speed=speed,
                                         min_size=4,
                                         max_size=50,
                                         close_ratio=0.5)
        subject = OrbSlam2(vocabulary_file=self.vocab_path,
                           mode=SensorMode.STEREO,
                           orb_ini_threshold_fast=12,
                           orb_min_threshold_fast=7)
        subject.resolve_paths(path_manager)
        subject.set_camera_intrinsics(
            image_builder.get_camera_intrinsics(),
            max_time * len(num_frames) / sum(num_frames))
        subject.set_stereo_offset(image_builder.get_stereo_offset())

        subject.start_trial(ImageSequenceType.SEQUENTIAL)
        for loop_idx, loop_frames in enumerate(num_frames):
            for idx in range(loop_frames):
                time = max_time * idx / loop_frames + max_time * loop_idx
                if loop_idx % 2 == 1:
                    builder_time = max_time * (1 - idx / loop_frames)
                else:
                    builder_time = max_time * idx / loop_frames
                image = image_builder.create_frame(builder_time)
                subject.process_image(image, time)
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        with no_auto_dereference(SLAMTrialResult):
            self.assertEqual(subject.pk, result.system)
        self.assertTrue(result.success)
        self.assertTrue(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertIsNotNone(result.settings)
        self.assertEqual(sum(num_frames), len(result.results))

        has_been_found = False
        for idx, frame_result in enumerate(result.results):
            self.assertIsNotNone(frame_result.pose)
            self.assertIsNotNone(frame_result.motion)
            if frame_result.tracking_state is TrackingState.OK:
                has_been_found = True
        self.assertTrue(has_been_found)
Example #5
0
    def test_profile(self, ):
        import cProfile as profile

        stats_file = "dso.prof"

        system = DSO(
            rectification_mode=RectificationMode.NONE,
            # These should be irrelevant
            rectification_intrinsics=CameraIntrinsics(width=320,
                                                      height=240,
                                                      fx=160,
                                                      fy=160,
                                                      cx=160,
                                                      cy=120))

        image_builder = DemoImageBuilder(mode=ImageMode.MONOCULAR,
                                         seed=0,
                                         width=640,
                                         height=480,
                                         num_stars=100,
                                         length=self.max_time * self.speed,
                                         speed=self.speed,
                                         close_ratio=0.5,
                                         min_size=10,
                                         max_size=200)

        profile.runctx(
            "run_dso(system, image_builder, self.num_frames, self.max_time)",
            locals=locals(),
            globals=globals(),
            filename=stats_file)
    def test_profile_stereo(self, ):
        import cProfile as profile

        stats_file = "orbslam_stereo.prof"

        path_manager = PathManager([Path(__file__).parent], self.temp_folder)
        system = OrbSlam2(vocabulary_file=self.vocab_path,
                          mode=SensorMode.STEREO,
                          orb_ini_threshold_fast=12,
                          orb_min_threshold_fast=7)

        image_builder = DemoImageBuilder(mode=ImageMode.STEREO,
                                         stereo_offset=0.15,
                                         width=320,
                                         height=240,
                                         num_stars=500,
                                         length=self.max_time * self.speed,
                                         speed=self.speed,
                                         min_size=4,
                                         max_size=50)

        profile.runctx(
            "run_orbslam(system, image_builder, path_manager, self.num_frames, self.max_time)",
            locals=locals(),
            globals=globals(),
            filename=stats_file)
    def test_profile_mono(self, ):
        import cProfile as profile

        stats_file = "orbslam_mono.prof"

        path_manager = PathManager([Path(__file__).parent], self.temp_folder)
        system = OrbSlam2(vocabulary_file=self.vocab_path,
                          mode=SensorMode.MONOCULAR,
                          orb_num_features=1000,
                          orb_num_levels=8,
                          orb_scale_factor=1.2,
                          orb_ini_threshold_fast=7,
                          orb_min_threshold_fast=12)

        image_builder = DemoImageBuilder(mode=ImageMode.MONOCULAR,
                                         width=640,
                                         height=480,
                                         num_stars=150,
                                         length=self.max_time * self.speed,
                                         speed=self.speed,
                                         close_ratio=0.6,
                                         min_size=10,
                                         max_size=100)

        profile.runctx(
            "run_orbslam(system, image_builder, path_manager, self.num_frames, self.max_time)",
            locals=locals(),
            globals=globals(),
            filename=stats_file)
Example #8
0
    def test_is_consistent_with_fixed_seed(self):
        # Actually run the system using mocked images
        num_frames = 20
        max_time = 50
        speed = 0.1
        image_builder = DemoImageBuilder(
            mode=ImageMode.MONOCULAR,
            width=640, height=480, num_stars=150,
            length=max_time * speed, speed=speed,
            close_ratio=0.6, min_size=10, max_size=100
        )

        subject = LibVisOMonoSystem(motion_threshold=1000)
        subject.set_camera_intrinsics(image_builder.get_camera_intrinsics(), max_time / num_frames)

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        for idx in range(num_frames):
            time = max_time * idx / num_frames
            image = image_builder.create_frame(time)
            subject.process_image(image, time)
        result1 = subject.finish_trial()

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        for idx in range(num_frames):
            time = max_time * idx / num_frames
            image = image_builder.create_frame(time)
            subject.process_image(image, time)
        result2 = subject.finish_trial()

        has_any_estimate = False
        self.assertEqual(len(result1.results), len(result2.results))
        for frame_result_1, frame_result_2 in zip(result1.results, result2.results):
            self.assertEqual(frame_result_1.timestamp, frame_result_2.timestamp)
            self.assertEqual(frame_result_1.tracking_state, frame_result_2.tracking_state)
            if frame_result_1.estimated_motion is None or frame_result_2.estimated_motion is None:
                self.assertEqual(frame_result_1.estimated_motion, frame_result_2.estimated_motion)
            else:
                has_any_estimate = True
                motion1 = frame_result_1.estimated_motion
                motion2 = frame_result_2.estimated_motion

                loc_diff = motion1.location - motion2.location
                self.assertNPClose(loc_diff, np.zeros(3), rtol=0, atol=1e-14)
                quat_diff = motion1.rotation_quat(True) - motion2.rotation_quat(True)
                self.assertNPClose(quat_diff, np.zeros(4), rtol=0, atol=1e-14)
        self.assertTrue(has_any_estimate)
    def test_result_saves(self):
        # Make an image collection with some number of images
        images = []
        image_builder = DemoImageBuilder(mode=ImageMode.STEREO,
                                         stereo_offset=0.15,
                                         width=160,
                                         height=120)
        num_images = 10
        for time in range(num_images):
            image = image_builder.create_frame(time / num_images)
            image.save()
            images.append(image)
        image_collection = ImageCollection(
            images=images,
            timestamps=list(range(len(images))),
            sequence_type=ImageSequenceType.SEQUENTIAL)
        image_collection.save()

        subject = LibVisOStereoSystem()
        subject.save()

        # Actually run the system using mocked images
        subject.set_camera_intrinsics(image_builder.get_camera_intrinsics(),
                                      1 / 10)
        subject.set_stereo_offset(image_builder.get_stereo_offset())
        subject.start_trial(ImageSequenceType.SEQUENTIAL)
        for time, image in enumerate(images):
            subject.process_image(image, time)
        result = subject.finish_trial()
        self.assertIsInstance(result, SLAMTrialResult)
        self.assertEqual(len(image_collection), len(result.results))
        result.image_source = image_collection
        result.save()

        # Load all the entities
        all_entities = list(SLAMTrialResult.objects.all())
        self.assertGreaterEqual(len(all_entities), 1)
        self.assertEqual(all_entities[0], result)
        all_entities[0].delete()

        SLAMTrialResult._mongometa.collection.drop()
        ImageCollection._mongometa.collection.drop()
        StereoImage._mongometa.collection.drop()
Example #10
0
def create_vocab(vocab_path='ORBvoc-synth.txt'):
    """
    Tiny script to create a vocabulary from the demo image builder
    This gives me a vocab designed to handle the synthetic images I throw at it while testing.
    :return:
    """
    total_time = 10  # seconds
    num_frames = 20
    speed = 3.0
    vocab_builder = VocabularyBuilder()
    for seed in tqdm(range(100), total=100):
        image_builder = DemoImageBuilder(mode=ImageMode.MONOCULAR,
                                         seed=seed,
                                         length=total_time * speed)
        for idx in range(num_frames):
            time = total_time * idx / num_frames
            image = image_builder.create_frame(time)
            vocab_builder.add_image(image.pixels)
    vocab_builder.build_vocabulary(str(vocab_path))
Example #11
0
def create_vocab(vocab_builder: VocabularyBuilder,
                 vocab_path: Path,
                 branching_factor: int,
                 depth: int,
                 num_variants: int = 10,
                 seed=100):
    """
    Tiny script to create a vocabulary from the demo image builder
    This gives me a vocab designed to handle the synthetic images I throw at it while testing.
    :return:
    """
    total_time = 10  # seconds
    num_frames = 20  # Total frames to pull
    speed = 3.0  # Units / second
    for img_seed in range(num_variants):
        image_builder = DemoImageBuilder(mode=ImageMode.MONOCULAR,
                                         seed=img_seed,
                                         length=total_time * speed)
        for idx in range(num_frames):
            time = total_time * idx / num_frames
            image = image_builder.create_frame(time)
            vocab_builder.add_image(image.pixels)
    vocab_builder.build_vocabulary(str(vocab_path), branching_factor, depth,
                                   seed)
Example #12
0
    def test_is_different_with_changed_seed(self):
        # Actually run the system using mocked images
        num_frames = 20
        max_time = 50
        speed = 0.1
        image_builder = DemoImageBuilder(mode=ImageMode.STEREO,
                                         stereo_offset=0.15,
                                         width=640,
                                         height=480,
                                         num_stars=150,
                                         length=max_time * speed,
                                         speed=speed,
                                         close_ratio=0.6,
                                         min_size=10,
                                         max_size=100)

        subject = LibVisOStereoSystem()
        subject.set_camera_intrinsics(image_builder.get_camera_intrinsics(),
                                      max_time / num_frames)
        subject.set_stereo_offset(image_builder.get_stereo_offset())

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        for idx in range(num_frames):
            time = max_time * idx / num_frames
            image = image_builder.create_frame(time)
            subject.process_image(image, time)
        result1 = subject.finish_trial()

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=2)
        for idx in range(num_frames):
            time = max_time * idx / num_frames
            image = image_builder.create_frame(time)
            subject.process_image(image, time)
        result2 = subject.finish_trial()

        self.assertEqual(len(result1.results), len(result2.results))
        different_tracking = 0
        loc_diff = np.zeros(3)
        quat_diff = np.zeros(4)
        for frame_result_1, frame_result_2 in zip(result1.results,
                                                  result2.results):
            self.assertEqual(frame_result_1.timestamp,
                             frame_result_2.timestamp)
            if frame_result_1.tracking_state != frame_result_2.tracking_state:
                different_tracking += 1
            elif frame_result_1.estimated_motion is not None and frame_result_2.estimated_motion is not None:
                motion1 = frame_result_1.estimated_motion
                motion2 = frame_result_2.estimated_motion

                loc_diff += np.abs(motion1.location - motion2.location)
                quat_diff += np.abs(
                    motion1.rotation_quat(True) - motion2.rotation_quat(True))
        if different_tracking <= 0:
            # If the tracking is the same, make sure the estimates are at least different
            self.assertNotNPClose(loc_diff, np.zeros(3), rtol=0, atol=1e-10)
            self.assertNotNPClose(quat_diff, np.zeros(4), rtol=0, atol=1e-10)
Example #13
0
    def test_profile_stereo(self, ):
        import cProfile as profile

        stats_file = "libviso_stereo.prof"

        system = LibVisOStereoSystem()

        image_builder = DemoImageBuilder(mode=ImageMode.STEREO,
                                         stereo_offset=0.15,
                                         width=640,
                                         height=480,
                                         num_stars=150,
                                         length=self.max_time * self.speed,
                                         speed=self.speed,
                                         close_ratio=0.6,
                                         min_size=10,
                                         max_size=100)

        profile.runctx(
            "run_libviso(system, image_builder, self.num_frames, self.max_time, 0)",
            locals=locals(),
            globals=globals(),
            filename=stats_file)
Example #14
0
    def test_can_run_on_colour_images(self):
        # Actually run the system using mocked images
        num_frames = 50
        max_time = 50
        speed = 0.1
        image_builder = DemoImageBuilder(
            mode=ImageMode.MONOCULAR,
            width=640, height=480, num_stars=150,
            length=max_time * speed, speed=speed,
            close_ratio=0.6, min_size=1, max_size=50, colour=True
        )

        subject = LibVisOMonoSystem(motion_threshold=1000)
        subject.set_camera_intrinsics(image_builder.get_camera_intrinsics(), max_time / num_frames)

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        for idx in range(num_frames):
            time = max_time * idx / num_frames
            image = image_builder.create_frame(time)
            subject.process_image(image, time)
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        self.assertEqual(subject, result.system)
        self.assertTrue(result.success)
        self.assertFalse(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertEqual({
            'seed': 0,
            'in_fx': image_builder.focal_length,
            'in_fy': image_builder.focal_length,
            'in_cu': image_builder.width / 2,
            'in_cv': image_builder.height / 2,
            'in_width': image_builder.width,
            'in_height': image_builder.height
        }, result.settings)
        self.assertEqual(num_frames, len(result.results))

        has_been_found = False
        has_been_lost = False
        for idx, frame_result in enumerate(result.results):
            self.assertEqual(max_time * idx / num_frames, frame_result.timestamp)
            self.assertIsNotNone(frame_result.pose)
            self.assertIsNotNone(frame_result.motion)

            # If we're lost, our tracking state should depend of if we've been lost before
            is_first_frame = False
            if frame_result.tracking_state != TrackingState.OK:
                if has_been_found:
                    has_been_lost = True
                    self.assertEqual(frame_result.tracking_state, TrackingState.LOST)
                else:
                    self.assertEqual(frame_result.tracking_state, TrackingState.NOT_INITIALIZED)
            elif has_been_found is False:
                is_first_frame = True
                has_been_found = True

            # Motion should be none when we are lost, and on the first found frame
            if is_first_frame or frame_result.tracking_state != TrackingState.OK:
                self.assertIsNone(frame_result.estimated_motion)
            else:
                self.assertIsNotNone(frame_result.estimated_motion)

            # Estimates will be none until we get a successful estimate, or after it has lost
            if not has_been_found or has_been_lost:
                self.assertIsNone(frame_result.estimated_pose)
            else:
                self.assertIsNotNone(frame_result.estimated_pose)
        self.assertTrue(has_been_found)