Beispiel #1
0
    def test_simple_trial_run_generated(self):
        # TODO: The state of things:
        # - With the configured sequence, the ORB-SLAM subprocess silently dies about frame 346-347
        # - With my bastardised removal of the subprocess, it seems to work? and not crash?
        # - I have no idea what is different? the pipe?
        sequence_folder, left_path, right_path = ndds_loader.find_files(NDDS_SEQUENCE)
        camera_intrinsics = ndds_loader.read_camera_intrinsics(left_path / '_camera_settings.json')
        max_img_id = ndds_loader.find_max_img_id(lambda idx: left_path / ndds_loader.IMG_TEMPLATE.format(idx))
        with (NDDS_SEQUENCE / 'timestamps.json').open('r') as fp:
            timestamps = json.load(fp)
        self.temp_folder.mkdir(parents=True, exist_ok=True)
        path_manager = PathManager([VOCAB_PATH.parent], self.temp_folder)

        subject = OrbSlam2(
            vocabulary_file=str(VOCAB_PATH.name),
            mode=SensorMode.STEREO,
            vocabulary_branching_factor=5,
            vocabulary_depth=6,
            vocabulary_seed=0,
            depth_threshold=387.0381720715473,
            orb_num_features=598,
            orb_scale_factor=np.power(480 / 104, 1 / 11),  # = (480 / min_height)^(1/num_levels)
            orb_num_levels=11,
            orb_ini_threshold_fast=86,
            orb_min_threshold_fast=48
        )
        subject.resolve_paths(path_manager)
        subject.set_camera_intrinsics(camera_intrinsics, 0.1)

        # Read the first frame data to get the baseline
        left_frame_data = ndds_loader.read_json(left_path / ndds_loader.DATA_TEMPLATE.format(0))
        right_frame_data = ndds_loader.read_json(right_path / ndds_loader.DATA_TEMPLATE.format(0))
        left_camera_pose = ndds_loader.read_camera_pose(left_frame_data)
        right_camera_pose = ndds_loader.read_camera_pose(right_frame_data)
        subject.set_stereo_offset(left_camera_pose.find_relative(right_camera_pose))

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        image_group = 'test'
        with image_manager.get().get_group(image_group, allow_write=True):
            for img_idx in range(max_img_id + 1):
                left_pixels = image_utils.read_colour(left_path / ndds_loader.IMG_TEMPLATE.format(img_idx))
                right_pixels = image_utils.read_colour(right_path / ndds_loader.IMG_TEMPLATE.format(img_idx))

                image = StereoImage(
                    pixels=left_pixels,
                    right_pixels=right_pixels,
                    image_group=image_group,
                    metadata=imeta.ImageMetadata(camera_pose=left_camera_pose),
                    right_metadata=imeta.ImageMetadata(camera_pose=right_camera_pose)
                )
                subject.process_image(image, timestamps[img_idx])
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        self.assertEqual(subject, result.system)
        self.assertTrue(result.success)
        self.assertFalse(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertEqual(max_img_id + 1, len(result.results))
Beispiel #2
0
 def test_read_actual_camera_data(self):
     camera_data = ndds_loader.read_json(CAMERA_SETTINGS)
     self.assertEqual({'camera_settings'}, set(camera_data.keys()))
     for camera_entry in camera_data['camera_settings']:
         self.assertEqual(
             {
                 'name', 'horizontal_fov', 'intrinsic_settings',
                 'captured_image_size'
             }, set(camera_entry.keys()))
Beispiel #3
0
 def test_read_actual_frame_data(self):
     frame_data = ndds_loader.read_json(FRAME_JSON)
     self.assertEqual({'camera_data', 'objects'}, set(frame_data.keys()))
     self.assertEqual({'location_worldframe', 'quaternion_xyzw_worldframe'},
                      set(frame_data['camera_data'].keys()))
     self.assertEqual(7, len(frame_data['objects']))
     for object_data in frame_data['objects']:
         self.assertEqual(
             {
                 "name", "class", "visibility", "location",
                 "quaternion_xyzw", "pose_transform", "cuboid_centroid",
                 "projected_cuboid_centroid", "bounding_box", "cuboid",
                 "projected_cuboid"
             }, set(object_data.keys()))
Beispiel #4
0
 def test_read_actual_object_data(self):
     object_data = ndds_loader.read_json(OBJECT_SETTINGS)
     self.assertEqual({'exported_object_classes', 'exported_objects'},
                      set(object_data.keys()))
     self.assertEqual(66, len(object_data['exported_object_classes']))
     self.assertEqual(66, len(object_data['exported_objects']))
     for obj_idx, instance_data in enumerate(
             object_data['exported_objects']):
         self.assertEqual(
             {
                 "name", "class", "segmentation_class_id",
                 "segmentation_instance_id", "fixed_model_transform",
                 "cuboid_dimensions"
             }, set(instance_data.keys()))
         self.assertEqual(object_data['exported_object_classes'][obj_idx],
                          instance_data['class'])
Beispiel #5
0
 def test_read_from_actual_frame_data(self):
     frame_data = ndds_loader.read_json(FRAME_JSON)
     camera_pose = ndds_loader.read_camera_pose(frame_data)
     # These values are copied from the frame_data.json
     location = [-397.50518798828125, 492.243408203125, 179.62669372558594]
     rotation = [
         -0.034200001507997513, 0.17430000007152557, -0.071800000965595245,
         0.98150002956390381
     ]
     self.assertNPEqual(
         [location[0] / 100, -location[1] / 100, location[2] / 100],
         camera_pose.location)
     quat = t3.quaternions.qinverse(
         [rotation[3], rotation[0], -rotation[1], rotation[2]])
     self.assertNPClose(
         quat / np.linalg.norm(
             quat
         ),  # t3 winds up with |q|^2 as the norm, rather than |q|, renormalise
         camera_pose.rotation_quat(True),
         atol=0,
         rtol=1e-15)