Exemplo n.º 1
0
    def test_load_rgbd_dataset_freiburg1_360(self):
        dbconn.connect_to_test_db()
        dbconn.setup_image_manager(mock=False)
        logging.disable(logging.CRITICAL)

        # Make sure there is nothing in the database
        ImageCollection.objects.all().delete()
        Image.objects.all().delete()

        result = tum_loader.import_dataset(
            dataset_root / 'rgbd_dataset_freiburg1_360',
            'rgbd_dataset_freiburg1_360')
        self.assertIsInstance(result, ImageCollection)
        self.assertIsNotNone(result.pk)
        self.assertIsNotNone(result.image_group)
        self.assertEqual(1, ImageCollection.objects.all().count())
        # Make sure we got all the images (there are 756 RGB images but only 755 depth maps)
        self.assertEqual(755, Image.objects.all().count())

        # Make sure we got the depth and position data
        with image_manager.get().get_group(result.get_image_group()):
            for timestamp, image in result:
                self.assertIsNotNone(image.pixels)
                self.assertIsNotNone(image.depth)
                self.assertIsNotNone(image.camera_pose)

        # Clean up after ourselves by dropping the collections for the models
        ImageCollection._mongometa.collection.drop()
        Image._mongometa.collection.drop()
        dbconn.tear_down_image_manager()
        logging.disable(logging.NOTSET)
Exemplo n.º 2
0
    def test_simple_trial_run_generated(self):
        # TODO: The state of things:
        # - With the configured sequence, the ORB-SLAM subprocess silently dies about frame 346-347
        # - With my bastardised removal of the subprocess, it seems to work? and not crash?
        # - I have no idea what is different? the pipe?
        sequence_folder, left_path, right_path = ndds_loader.find_files(NDDS_SEQUENCE)
        camera_intrinsics = ndds_loader.read_camera_intrinsics(left_path / '_camera_settings.json')
        max_img_id = ndds_loader.find_max_img_id(lambda idx: left_path / ndds_loader.IMG_TEMPLATE.format(idx))
        with (NDDS_SEQUENCE / 'timestamps.json').open('r') as fp:
            timestamps = json.load(fp)
        self.temp_folder.mkdir(parents=True, exist_ok=True)
        path_manager = PathManager([VOCAB_PATH.parent], self.temp_folder)

        subject = OrbSlam2(
            vocabulary_file=str(VOCAB_PATH.name),
            mode=SensorMode.STEREO,
            vocabulary_branching_factor=5,
            vocabulary_depth=6,
            vocabulary_seed=0,
            depth_threshold=387.0381720715473,
            orb_num_features=598,
            orb_scale_factor=np.power(480 / 104, 1 / 11),  # = (480 / min_height)^(1/num_levels)
            orb_num_levels=11,
            orb_ini_threshold_fast=86,
            orb_min_threshold_fast=48
        )
        subject.resolve_paths(path_manager)
        subject.set_camera_intrinsics(camera_intrinsics, 0.1)

        # Read the first frame data to get the baseline
        left_frame_data = ndds_loader.read_json(left_path / ndds_loader.DATA_TEMPLATE.format(0))
        right_frame_data = ndds_loader.read_json(right_path / ndds_loader.DATA_TEMPLATE.format(0))
        left_camera_pose = ndds_loader.read_camera_pose(left_frame_data)
        right_camera_pose = ndds_loader.read_camera_pose(right_frame_data)
        subject.set_stereo_offset(left_camera_pose.find_relative(right_camera_pose))

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        image_group = 'test'
        with image_manager.get().get_group(image_group, allow_write=True):
            for img_idx in range(max_img_id + 1):
                left_pixels = image_utils.read_colour(left_path / ndds_loader.IMG_TEMPLATE.format(img_idx))
                right_pixels = image_utils.read_colour(right_path / ndds_loader.IMG_TEMPLATE.format(img_idx))

                image = StereoImage(
                    pixels=left_pixels,
                    right_pixels=right_pixels,
                    image_group=image_group,
                    metadata=imeta.ImageMetadata(camera_pose=left_camera_pose),
                    right_metadata=imeta.ImageMetadata(camera_pose=right_camera_pose)
                )
                subject.process_image(image, timestamps[img_idx])
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        self.assertEqual(subject, result.system)
        self.assertTrue(result.success)
        self.assertFalse(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertEqual(max_img_id + 1, len(result.results))
Exemplo n.º 3
0
    def test_simple_trial_run_generated(self):
        sequence_folder, left_path, right_path = ndds_loader.find_files(
            NDDS_SEQUENCE)
        camera_intrinsics = ndds_loader.read_camera_intrinsics(
            left_path / '_camera_settings.json')
        max_img_id = ndds_loader.find_max_img_id(
            lambda idx: left_path / ndds_loader.IMG_TEMPLATE.format(idx))
        with (NDDS_SEQUENCE / 'timestamps.json').open('r') as fp:
            timestamps = json.load(fp)

        subject = LibVisOMonoSystem(
            matcher_nms_n=10,
            matcher_nms_tau=66,
            matcher_match_binsize=50,
            matcher_match_radius=245,
            matcher_match_disp_tolerance=2,
            matcher_outlier_disp_tolerance=5,
            matcher_outlier_flow_tolerance=2,
            matcher_multi_stage=False,
            matcher_half_resolution=False,
            matcher_refinement=MatcherRefinement.SUBPIXEL,
            bucketing_max_features=6,
            bucketing_bucket_width=136,
            bucketing_bucket_height=102,
            height=1.0,
            pitch=0.0,
            ransac_iters=439,
            inlier_threshold=4.921875,
            motion_threshold=609.375)
        subject.set_camera_intrinsics(camera_intrinsics, 0.1)

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        image_group = 'test'
        with image_manager.get().get_group(image_group, allow_write=True):
            for img_idx in range(max_img_id + 1):
                pixels = image_utils.read_colour(
                    left_path / ndds_loader.IMG_TEMPLATE.format(img_idx))
                image = Image(
                    _id=bson.ObjectId(),
                    pixels=pixels,
                    image_group=image_group,
                    metadata=imeta.ImageMetadata(camera_pose=Transform()))
                subject.process_image(image, timestamps[img_idx])
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        self.assertEqual(subject, result.system)
        self.assertTrue(result.success)
        self.assertFalse(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertEqual(max_img_id + 1, len(result.results))
Exemplo n.º 4
0
    def test_preload_image_data_loads_pixels(self):
        # Mock the image manager
        group_name = 'test'
        dbconn.setup_image_manager()
        image_group = im_manager.get().get_group(group_name, allow_write=True)
        image_group.get_image = mock.Mock(wraps=image_group.get_image)

        # Make an image, and then let it go out of scope, so the data is not in memory
        image_id = make_and_store_image(image_group=group_name)

        system = MockSystem.get_instance()
        self.assertFalse(image_group.get_image.called)
        image = Image.objects.get({'_id': image_id})
        system.preload_image_data(image)
        self.assertTrue(image_group.get_image.called)

        # Clean up
        Image._mongometa.collection.drop()
        dbconn.tear_down_image_manager()
    def test_load_configured_sequence(self):
        dbconn.connect_to_test_db()
        dbconn.setup_image_manager(mock=False)
        logging.disable(logging.CRITICAL)

        # count the number of images we expect to import
        left_images = dataset_root / 'dataset' / 'sequences' / '{0:02}'.format(
            sequence) / 'image_2'
        right_images = dataset_root / 'dataset' / 'sequences' / '{0:02}'.format(
            sequence) / 'image_3'
        num_images = sum(1 for file in left_images.iterdir()
                         if file.is_file() and file.suffix == '.png' and (
                             right_images / file.name).exists())

        # Make sure there is nothing in the database
        ImageCollection.objects.all().delete()
        StereoImage.objects.all().delete()

        result = kitti_loader.import_dataset(dataset_root, sequence)
        self.assertIsInstance(result, ImageCollection)
        self.assertIsNotNone(result.pk)
        self.assertIsNotNone(result.image_group)
        self.assertEqual(1, ImageCollection.objects.all().count())
        self.assertEqual(num_images,
                         StereoImage.objects.all().count()
                         )  # Make sure we got all the images

        # Make sure we got the depth and position data
        with image_manager.get().get_group(result.get_image_group()):
            for timestamp, image in result:
                self.assertIsNotNone(image.pixels)
                self.assertIsNotNone(image.camera_pose)
                self.assertIsNotNone(image.right_pixels)
                self.assertIsNotNone(image.right_camera_pose)

        # Clean up after ourselves by dropping the collections for the models
        ImageCollection._mongometa.collection.drop()
        StereoImage._mongometa.collection.drop()
        dbconn.tear_down_image_manager()
        logging.disable(logging.NOTSET)
Exemplo n.º 6
0
    def test_load_rgbd_dataset_freiburg1_desk_from_tarball(self):
        # Ensure the uncompressed dataset doesn't exist, so we can
        if (dataset_root / 'rgbd_dataset_freiburg1_desk').is_dir():
            shutil.rmtree(dataset_root / 'rgbd_dataset_freiburg1_desk')

        dbconn.connect_to_test_db()
        dbconn.setup_image_manager(mock=False)
        logging.disable(logging.CRITICAL)

        # Make sure there is nothing in the database
        ImageCollection.objects.all().delete()
        Image.objects.all().delete()

        result = tum_loader.import_dataset(
            dataset_root / 'rgbd_dataset_freiburg1_desk.tgz',
            'rgbd_dataset_freiburg1_desk')
        self.assertIsInstance(result, ImageCollection)
        self.assertIsNotNone(result.pk)
        self.assertIsNotNone(result.image_group)
        self.assertEqual(1, ImageCollection.objects.all().count())
        # Make sure we got all the images (there are 756 RGB images but only 755 depth maps)
        self.assertEqual(595, Image.objects.all().count())

        # Make sure we got the depth and position data
        with image_manager.get().get_group(result.get_image_group()):
            for timestamp, image in result:
                self.assertIsNotNone(image.pixels)
                self.assertIsNotNone(image.depth)
                self.assertIsNotNone(image.camera_pose)

        # Make sure the loader cleaned up after itself by removing the extracted data
        self.assertFalse(
            (dataset_root / 'rgbd_dataset_freiburg1_desk').exists())

        # Clean up after ourselves by dropping the collections for the models
        ImageCollection._mongometa.collection.drop()
        Image._mongometa.collection.drop()
        dbconn.tear_down_image_manager()
        logging.disable(logging.NOTSET)
Exemplo n.º 7
0
    def test_load_configured_sequence(self):
        dbconn.connect_to_test_db()
        dbconn.setup_image_manager(mock=False)
        logging.disable(logging.CRITICAL)

        # Make sure there is nothing in the database
        ImageCollection.objects.all().delete()
        Image.objects.all().delete()

        # count the number of images we expect to import
        rgb_images = dataset_root / sequence / 'rgb'
        depth_images = dataset_root / sequence / 'depth'
        num_images = min(
            sum(1 for file in rgb_images.iterdir()
                if file.is_file() and file.suffix == '.png'),
            sum(1 for file in depth_images.iterdir()
                if file.is_file() and file.suffix == '.png'))

        result = tum_loader.import_dataset(dataset_root / sequence, sequence)
        self.assertIsInstance(result, ImageCollection)
        self.assertIsNotNone(result.pk)
        self.assertIsNotNone(result.image_group)
        self.assertEqual(1, ImageCollection.objects.all().count())
        # Make sure we got all the images (there are 756 RGB images but only 755 depth maps)
        self.assertEqual(num_images, Image.objects.all().count())

        # Make sure we got the depth and position data
        with image_manager.get().get_group(result.get_image_group()):
            for timestamp, image in result:
                self.assertIsNotNone(image.pixels)
                self.assertIsNotNone(image.depth)
                self.assertIsNotNone(image.camera_pose)

        # Clean up after ourselves by dropping the collections for the models
        ImageCollection._mongometa.collection.drop()
        Image._mongometa.collection.drop()
        dbconn.tear_down_image_manager()
        logging.disable(logging.NOTSET)