コード例 #1
0
def import_camera(opensfm_camera: Dict[str, float],
                  name: Optional[str] = None) -> kapture.Camera:
    # opensfm_camera['projection_type'] can be perspective, brown, fisheye or equirectangular
    if 'perspective' == opensfm_camera['projection_type']:
        # convert to CameraType.RADIAL [w, h, f, cx, cy, k1, k2]
        # missing principal point, just fake it at image center
        largest_side_in_pixel = float(
            max(opensfm_camera['width'], opensfm_camera['height']))
        camera_params = [
            # w, h:
            opensfm_camera['width'],
            opensfm_camera['height'],
            # f: The focal length provided by the EXIF metadata divided by the sensor width
            opensfm_camera['focal'] * largest_side_in_pixel,
            # cx, cy: no principal point, guess one at image center
            opensfm_camera['width'] / 2,
            opensfm_camera['height'] / 2,
            # k1, k2
            opensfm_camera.get('k1', 0.0),
            opensfm_camera.get('k2', 0.0),
        ]
        return kapture.Camera(camera_type=kapture.CameraType.RADIAL,
                              camera_params=camera_params,
                              name=name)

    else:
        raise ValueError(
            f'unable to convert camera of type {opensfm_camera["projection_type"]}'
        )
コード例 #2
0
    def test_sensors_write(self):
        cam0 = kapture.Camera(name='cam0', camera_type='SIMPLE_PINHOLE', camera_params=[640, 480, 100, 320, 240])
        cam1 = kapture.Camera(name='cam1', camera_type='SIMPLE_PINHOLE', camera_params=[640, 480, 100, 320, 240])
        formatted_expected = '\n'.join([csv.KAPTURE_FORMAT_1,
                                        '# sensor_id, name, sensor_type, [sensor_params]+',
                                        'cam0, cam0, camera, SIMPLE_PINHOLE, 640, 480, 100, 320, 240',
                                        'cam1, cam1, camera, SIMPLE_PINHOLE, 640, 480, 100, 320, 240',
                                        ''])
        sensors = kapture.Sensors()
        sensors['cam0'] = cam0
        sensors['cam1'] = cam1
        csv.sensors_to_file(self._temp_filepath, sensors)
        with open(self._temp_filepath, 'rt') as f:
            formatted_actual = ''.join(f.readlines())

        self.assertEqual(formatted_actual, formatted_expected)
コード例 #3
0
 def test_sensor_write(self):
     cam0 = kapture.Camera(name='name', camera_type='SIMPLE_PINHOLE', camera_params=[640, 480, 100, 320, 240])
     sensor_fields = csv.sensor_to_list(cam0)
     self.assertIsInstance(sensor_fields, list)
     self.assertEqual(len(sensor_fields), 8)
     self.assertEqual(sensor_fields,
                      ['name', 'camera', 'SIMPLE_PINHOLE', '640', '480', '100', '320', '240'])
コード例 #4
0
def get_cameras_from_database(database: COLMAPDatabase) -> kapture.Sensors:
    """
    Creates kapture sensors from the colmap database.

    :param database: colmap database
    :return: kapture sensors
    """
    logger.info('parsing cameras  ...')
    kapture_cameras = kapture.Sensors()

    for camera_id, model_id, width, height, params, prior_focal_length in database.execute(
            'SELECT camera_id, model, width, height, params, prior_focal_length FROM cameras;'
    ):
        if model_id not in CAMERA_MODEL_NAMES:
            logger.warning(
                f'unable to convert colmap camera model ({model_id}) for camera {camera_id}.'
            )
            # use 0 as default
            model_id = 0

        camera_id = get_camera_kapture_id_from_colmap_id(camera_id)
        model_name = CAMERA_MODEL_NAMES[model_id]

        #  By setting the prior_focal_length flag to 0 or 1,
        #  you can give a hint whether the reconstruction algorithm should trust the focal length value.
        params = blob_to_array(params, np.float64)
        params = [width, height] + params.tolist()

        kapture_camera = kapture.Camera(model_name, params)
        kapture_cameras[camera_id] = kapture_camera
    return kapture_cameras
コード例 #5
0
def import_extended_cmu_seasons_intrinsics(
        intrinsics_file_path: str) -> kapture.Sensors:
    """
    Read and convert intrinsics file
    Format: [Camera ID] [Distortion model] [image width] [image height] [fx] [fy] [cx] [cy] [k1] [k2] [p1] [p2]

    :param intrinsics_file_path: path to the CMU intrinsics file
    :return: kapture cameras
    """
    cameras = kapture.Sensors()
    with open(intrinsics_file_path) as fin:
        table = fin.readlines()
        # remove comment lines
        table = (l1 for l1 in table if not l1.startswith('#'))
        # remove empty lines
        table = (l2 for l2 in table if l2.strip())
        # trim trailing EOL
        table = (l3.rstrip("\n\r") for l3 in table)
        # split space
        table = (re.split(r'\s+', l4) for l4 in table)
        # remove empty split
        table = ([s for s in l5 if s] for l5 in table)

    for camera_id, distortion_model, *camera_params in table:
        cameras[camera_id] = kapture.Camera(distortion_model,
                                            list(camera_params))

    return cameras
コード例 #6
0
ファイル: test_core.py プロジェクト: zebrajack/kapture
    def test_init(self):
        # test bare minimum
        sensor = kapture.Sensor('unknown', [])
        self.assertEqual(sensor.name, None)
        self.assertEqual(sensor.sensor_type, 'unknown')
        self.assertListEqual(sensor.sensor_params, [])

        # test typical camera
        sensor_name = 'GOPRO_FUSION'
        sensor_type = 'camera'
        #                 SIMPLE_PINHOLE,   w,   h,   f,  cx,  cy
        sensor_params = ['SIMPLE_PINHOLE', 640, 480, 100, 320, 240]
        sensor = kapture.Sensor(sensor_type, sensor_params, name=sensor_name)
        self.assertEqual(sensor.name, sensor_name)
        self.assertEqual(sensor.sensor_type, sensor_type)
        self.assertListEqual(sensor.sensor_params, [i for i in sensor_params])
        self.assertIsInstance(sensor.__repr__(), str)

        sensor = kapture.Camera(sensor_params[0],
                                sensor_params[1:],
                                name=sensor_name)
        self.assertEqual(sensor.name, sensor_name)
        self.assertEqual(sensor.sensor_type, sensor_type)
        self.assertEqual(sensor.camera_type, kapture.CameraType.SIMPLE_PINHOLE)
        self.assertListEqual(sensor.sensor_params,
                             [str(i) for i in sensor_params])
        self.assertListEqual(sensor.camera_params,
                             [float(i) for i in sensor_params[1:]])
        self.assertIsInstance(sensor.__repr__(), str)
コード例 #7
0
 def test_sensor_file_version(self):
     cam0 = kapture.Camera(name='cam0', camera_type='SIMPLE_PINHOLE', camera_params=[640, 480, 100, 320, 240])
     sensors = kapture.Sensors()
     sensors['cam0'] = cam0
     csv.sensors_to_file(self._temp_filepath, sensors)
     version = csv.get_version_from_csv_file(self._temp_filepath)
     current_version = csv.current_format_version()
     self.assertEqual(current_version, version, "Version correctly stored")
コード例 #8
0
def import_image_folder(
        images_path: str,
        kapture_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports the images of a folder to a kapture. This creates only images and cameras.

    :param images_path: path to directory containing the images.
    :param kapture_path: path to kapture root directory.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path,
                                  force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()

    file_list = [
        os.path.relpath(os.path.join(dirpath, filename), images_path)
        for dirpath, dirs, filenames in os.walk(images_path)
        for filename in filenames
    ]
    file_list = sorted(file_list)

    logger.info('starting conversion...')
    for n, filename in enumerate(file_list):
        # test if file is a valid image
        try:
            # lazy load
            with Image.open(path.join(images_path, filename)) as im:
                width, height = im.size
                model_params = [width, height]
        except (OSError, PIL.UnidentifiedImageError):
            # It is not a valid image: skip it
            logger.info(f'Skipping invalid image file {filename}')
            continue

        camera_id = f'sensor{n}'
        images[(n, camera_id)] = path_secure(filename)  # don't forget windows
        cameras[camera_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA,
                                            model_params)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(images_path, kapture_path, filename_list,
                                     images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
コード例 #9
0
ファイル: test_compare.py プロジェクト: zsqiang001/kapture
    def test_equal_sensors(self):
        kapture_data_a = copy.deepcopy(self._kapture_data)
        kapture_data_b = copy.deepcopy(self._kapture_data)
        self.assertTrue(
            equal_sensors(kapture_data_a.sensors, kapture_data_b.sensors))

        kapture_data_a.sensors['7497487'] = kapture.Camera(
            kapture.CameraType.UNKNOWN_CAMERA, [3882, 382])
        self.assertFalse(
            equal_sensors(kapture_data_a.sensors, kapture_data_b.sensors))

        kapture_data_b.sensors['7497487'] = kapture.Camera(
            kapture.CameraType.UNKNOWN_CAMERA, [3882, 382])
        self.assertTrue(
            equal_sensors(kapture_data_a.sensors, kapture_data_b.sensors))

        kapture_data_b.sensors['7497487'] = kapture.Camera(
            kapture.CameraType.UNKNOWN_CAMERA, [3882, 383])
        self.assertFalse(
            equal_sensors(kapture_data_a.sensors, kapture_data_b.sensors))
コード例 #10
0
def _import_cameras(silda_dir_path, snapshots,
                    fallback_cam_model) -> kapture.Sensors:
    logger.info('Processing sensors ...')
    cameras = kapture.Sensors()
    # use hard coded intrinsics
    # evaluated using colmap
    # 1 OPENCV_FISHEYE 1024 1024 393.299 394.815 512 512 -0.223483 0.117325 -0.0326138 0.00361082
    #                  fx, fy, cx, cy, omega
    # 1 FOV 1024 1024 300 300 512 512 0.899632
    cam_id_list = sorted(
        set(cam_id for _, cam_id, _ in kapture.flatten(snapshots)))
    for cam_id in cam_id_list:
        # pick a image for that cam id
        random_image_intrinsic = next(
            f'{timestamp}_{cam_id}.intrinsics'  # keep only filename (thats what silda expect)
            for timestamp, cid, filename in kapture.flatten(snapshots)
            if cid == cam_id)
        logger.debug(
            f'camera {cam_id} intrinsics : picking at random: ("{random_image_intrinsic}")'
        )
        intrinsic_filepath = path.join(silda_dir_path, 'camera-intrinsics',
                                       random_image_intrinsic)
        logger.debug(f'loading file: "{intrinsic_filepath}"')
        silda_proj_params = np.loadtxt(intrinsic_filepath)
        # only retrieve principal point from intrinsics,
        # because the rest correspond to a fisheye model not available in colmap.
        principal_point = (silda_proj_params[0:2] *
                           SILDA_IMAGE_SIZE).flatten().tolist()
        projection = fallback_cam_model
        if 'OPENCV_FISHEYE' == projection:
            focal_length = [393.299, 394.815]
            fisheye_coefficients = [
                -0.223483, 0.117325, -0.0326138, 0.00361082
            ]
            #          //    fx, fy, cx, cy, k1, k2, k3, k4
            proj_params = focal_length + principal_point + fisheye_coefficients
        elif 'FOV' == projection:
            # use hard coded intrinsics from Torsten reconstruction, ie. :
            #       217.294036, 217.214703, 512.000000, 507.897400, -0.769113
            focal_length = [217.294036, 217.214703]
            # principal_point = [512.000000, 507.897400]
            omega = [-0.769113]
            #                  fx, fy, cx, cy, omega
            proj_params = focal_length + principal_point + omega
        else:
            raise ValueError(
                'Only accepts OPENCV_FISHEYE, or FOV as projection model.')

        camera = kapture.Camera(projection,
                                SILDA_IMAGE_SIZE.tolist() + proj_params)
        cameras[cam_id] = camera
    return cameras
コード例 #11
0
 def setUp(self):
     samples_folder = path.abspath(
         path.join(path.dirname(__file__), '../samples/'))
     self.aachen_folder = path.join(samples_folder, 'Aachen-Day-Night')
     self.aachen_models_folder = path.join(self.aachen_folder, '3D-models')
     self.images_folder = path.join(self.aachen_folder, 'images_upright')
     self.bundler_sensors = kapture.Sensors()
     self.bundler_sensors['sensor0'] = kapture.Camera(
         kapture.CameraType.RADIAL, [
             1600, 1067, 1.084590000e+03, 800, 533.5, 0.000000000e+00,
             6.894198313e-08
         ])
     self.bundler_sensors['sensor1'] = kapture.Camera(
         kapture.CameraType.RADIAL, [
             1200, 1600, 1.556980000e+03, 600, 800, 0.000000000e+00,
             3.565154420e-08
         ])
     self.bundler_sensors['sensor2'] = kapture.Camera(
         kapture.CameraType.RADIAL, [
             1600, 1067, 1.103400000e+03, 800, 533.5, 0.000000000e+00,
             6.527248534e-08
         ])
コード例 #12
0
def opencv_model_to_kapture(width, height, K, distortion):
    """
    get kapture.Camera from opencv intrinsic matrix and distortion parameters
    """
    # opencv: k1, k2, p1, p2, k3, k4, k5, k6
    distortion = np.pad(distortion, [0, 8 - len(distortion)],
                        mode='constant',
                        constant_values=0)

    # kapture: w, h, fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
    params = [width, height, K[0, 0], K[1, 1], K[0, 2], K[1, 2]
              ] + list(distortion)
    return kapture.Camera(kapture.CameraType.FULL_OPENCV, params)
コード例 #13
0
def convert_testing_intrinsics(testing_intrinsics: Iterable[VirtualGalleryTestingIntrinsic],
                               sensors: kapture.Sensors) -> None:
    """
    Import all testing intrinsics into the sensors definitions.

    :param testing_intrinsics: testing intrinsics to import
    :param sensors: list of sensor definitions where to add the new definitions
    """
    logger.info("Converting testing cameras...")
    for intrinsic in testing_intrinsics:
        camera_device_id = _get_testing_camera_name(intrinsic.light_id, intrinsic.occlusion_id, intrinsic.frame_id)
        camera = kapture.Camera(virtual_gallery_camera_model,
                                [virtual_gallery_width, virtual_gallery_height] + intrinsic.intrinsics)
        sensors[camera_device_id] = camera
コード例 #14
0
ファイル: test_image_list.py プロジェクト: zebrajack/kapture
    def test_import_without_intrinsics(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            queries_without_intrinsics_path = path.join(self.query_folder, 'day_time_queries_without_intrinsics.txt')

            import_image_list([queries_without_intrinsics_path], self.images_folder, tmpdirname,
                              force_overwrite_existing=True)

            expected_kdata = kapture_from_dir(self.kapture_query_path)
            # set all sensors to unknown
            for sensor_id in expected_kdata.sensors.keys():
                sensor = expected_kdata.sensors[sensor_id]
                assert isinstance(sensor, kapture.Camera)
                expected_kdata.sensors[sensor_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA,
                                                                   sensor.camera_params[0:2])
            imported_aachen_data = kapture_from_dir(tmpdirname)
            self.assertTrue(equal_kapture(imported_aachen_data, expected_kdata))
コード例 #15
0
    def set_camera(self, id, name, cam: Camera):
        self.default_cam = ('%s' % id, name)

        if self.kapture.sensors is None:
            self.kapture.sensors = kt.Sensors()

        mx = cam.cam_mx
        sc = self.scale
        params = [
            cam.width * sc, cam.height * sc, mx[0, 0] * sc, mx[1, 1] * sc,
            mx[0, 2] * sc, mx[1, 2] * sc
        ] + [0.] * 8
        if cam.dist_coefs is not None:
            for i, c in enumerate(cam.dist_coefs):
                params[6 + i] = c

        self.kapture.sensors[self.default_cam[0]] = kt.Camera(
            CameraType.FULL_OPENCV, camera_params=params, name=name)
コード例 #16
0
def import_robotcar_cameras(intrinsics_dir_path: str) -> kapture.Sensors:
    """
    Read and convert intrinsics files
    :param intrinsics_dir_path:
    :return: kapture.cameras
    """
    cameras = kapture.Sensors()
    for root, dirs, files in os.walk(intrinsics_dir_path):
        for intrinsic_filename in files:
            (camera_id, _) = intrinsic_filename.split('_')
            intrinsic_file = open(
                path.join(intrinsics_dir_path, intrinsic_filename), 'r')
            (_, fx) = intrinsic_file.readline().split()
            (_, fy) = intrinsic_file.readline().split()
            (_, cx) = intrinsic_file.readline().split()
            (_, cy) = intrinsic_file.readline().split()
            intrinsic_file.close()
            # w, h, fx, fy, cx, cy
            model = kapture.CameraType.PINHOLE
            model_params = [1024, 1024, fx, fy, cx, cy]
            cameras[camera_id] = kapture.Camera(model, model_params)

    return cameras
コード例 #17
0
def import_from_colmap_cameras_txt(colmap_cameras_filepath: str) -> kapture.Sensors:
    """
    Imports Sensors from colmap cameras.txt

    :param colmap_cameras_filepath: input path to colmap cameras.txt file
    :return: kapture sensors
    """
    sensors = kapture.Sensors()
    # cameras[cam_id] = camera
    with open(colmap_cameras_filepath, 'r') as colmap_cameras_filepath:
        lines = colmap_cameras_filepath.readlines()
        # eliminate comments
        lines = (line for line in lines if not line.startswith('#'))
        # split by space and or comma
        lines = (re.findall(colmap_reconstruction_split_pattern, line.rstrip())
                 for line in lines)  # split fields
        for fields in lines:
            camera_id = get_camera_kapture_id_from_colmap_id(int(fields[0]))
            camera_type = str(fields[1])
            image_size = [str(s) for s in fields[2:4]]
            projection_params = [str(f) for f in fields[4:]]
            camera = kapture.Camera(camera_type, image_size + projection_params)
            sensors[camera_id] = camera
    return sensors
コード例 #18
0
def import_7scenes(
        d7scenes_path: str,
        kapture_dir_path: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path,
                                  force_erase=force_overwrite_existing)

    logger.info('loading all content ...')
    POSE_SUFFIX = 'pose'
    RGB_SUFFIX = 'color'
    DEPTH_SUFFIX = 'depth'
    CAMERA_ID = 'kinect'
    d7s_filename_re = re.compile(
        r'frame-(?P<timestamp>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate
    d7s_filenames = (path.basename(path.join(dp, fn))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)
    d7s_filenames = {
        filename: d7s_filename_re.match(filename).groupdict()
        for filename in d7s_filenames if d7s_filename_re.match(filename)
    }
    # d7s_filenames -> timestamp, suffix, ext
    if not d7s_filenames:
        raise ValueError(
            'no pose file found: make sure the path to 7scenes sequence is valid.'
        )

    # images
    logger.info('populating image files ...')
    d7s_filenames_images = ((int(v['timestamp']), filename)
                            for filename, v in d7s_filenames.items()
                            if v['suffix'] == RGB_SUFFIX)
    snapshots = kapture.RecordsCamera()
    for timestamp, image_filename in sorted(d7s_filenames_images):
        snapshots[timestamp, CAMERA_ID] = image_filename

    # poses
    logger.info('import poses files ...')
    d7s_filenames_poses = ((int(v['timestamp']), filename)
                           for filename, v in d7s_filenames.items()
                           if v['suffix'] == POSE_SUFFIX)
    trajectories = kapture.Trajectories()
    for timestamp, pose_filename in d7s_filenames_poses:
        pose_filepath = path.join(d7scenes_path, pose_filename)
        pose_mat = np.loadtxt(
            pose_filepath
        )  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat,
                                                    t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[timestamp, CAMERA_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the 
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used 
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).
    """
    sensors = kapture.Sensors()
    sensors[CAMERA_ID] = kapture.Camera(
        name='kinect',
        camera_type=kapture.CameraType.SIMPLE_PINHOLE,
        camera_params=[640, 480, 585, 320, 240]  # w, h, f, cx, cy
    )

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path,
                                     image_filenames, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(records_camera=snapshots,
                                       trajectories=trajectories,
                                       sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #19
0
ファイル: import_openmvg.py プロジェクト: zebrajack/kapture
def openmvg_to_kapture(input_json: Dict[str, Union[str, Dict]],
                       kapture_images_path: str,
                       image_action=TransferAction.skip) -> kapture.Kapture:
    """
    Convert an openMVG structure to a kapture object. Also copy, move or link the images files if necessary.

    :param input_json: the openmvg JSON parsed as a dictionary
    :param kapture_images_path: top directory to create the kapture images tree
    :param image_action: action to apply on images: link, copy, move or do nothing.
    :return: the constructed kapture object
    """

    polymorphic_id_to_value = {}
    root_path: str = ''

    if input_json[ROOT_PATH]:
        root_path = input_json[ROOT_PATH]
    elif image_action == TransferAction.skip:
        logger.warning("No root_path in input file")
    else:  # It is needed to execute an action with the image file
        raise ValueError(
            f"Missing root_path to do image action '{image_action.name}'")
    openmvg_images_dir = path.basename(root_path)

    kapture_cameras = kapture.Sensors()
    if input_json.get(INTRINSICS):
        logger.info(f'Importing intrinsics')
        for sensor in input_json[INTRINSICS]:
            value = sensor[VALUE]
            if POLYMORPHIC_NAME in value:
                # new type name: store it for next instances
                polymorphic_id = value[POLYMORPHIC_ID] & GET_ID_MASK
                polymorphic_id_to_value[polymorphic_id] = value[
                    POLYMORPHIC_NAME]
                logger.debug("New camera_type: " +
                             polymorphic_id_to_value[polymorphic_id])
            else:
                if POLYMORPHIC_ID not in value:
                    raise ValueError(
                        f'{POLYMORPHIC_ID} is missing (intrinsics)')
                polymorphic_id = value[POLYMORPHIC_ID]

            if polymorphic_id not in polymorphic_id_to_value:
                raise ValueError(f'Unknown polymorphic_id {polymorphic_id}')

            camera_model = CameraModel(polymorphic_id_to_value[polymorphic_id])
            camera_data = value[PTR_WRAPPER][DATA]

            if camera_model == CameraModel.pinhole:
                # w, h, f, cx, cy
                camera = kapture.Camera(kapture.CameraType.SIMPLE_PINHOLE, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]),
                    camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1],
                ])
            elif camera_model == CameraModel.pinhole_radial_k1:
                # w, h, f, cx, cy, k
                camera = kapture.Camera(kapture.CameraType.SIMPLE_RADIAL, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]), camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1], camera_data[DISTO_K1][0]
                ])
            elif camera_model == CameraModel.pinhole_radial_k3:
                # w, h, f, cx, cy, k1, k2, k3
                camera = kapture.Camera(kapture.CameraType.RADIAL, [
                    int(camera_data[WIDTH]),
                    int(camera_data[HEIGHT]), camera_data[FOCAL_LENGTH],
                    camera_data[PRINCIPAL_POINT][0],
                    camera_data[PRINCIPAL_POINT][1], camera_data[DISTO_K3][0],
                    camera_data[DISTO_K3][1]
                ])
                # camera_data["disto_k3"][2] ignored: radial model has two distortion param, while openMVG's has three
            elif camera_model == CameraModel.pinhole_brown_t2:
                # w, h, f, cx, cy, k1, k2, k3, t1, t2
                if float(camera_data[DISTO_T2][2]) != 0:
                    # if k3 not null, use FULL_OPENCV, otherwise OPENCV
                    # w, h, fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
                    value0 = camera_data[VALUE0]
                    disto_t2 = camera_data[DISTO_T2]
                    camera = kapture.Camera(kapture.CameraType.FULL_OPENCV, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[FOCAL_LENGTH], value0[PRINCIPAL_POINT][0],
                        value0[PRINCIPAL_POINT][1], disto_t2[0], disto_t2[1],
                        disto_t2[3], disto_t2[4], disto_t2[2], 0, 0, 0
                    ])
                else:
                    # w, h, fx, fy, cx, cy, k1, k2, p1, p2
                    value0 = camera_data[VALUE0]
                    disto_t2 = camera_data[DISTO_T2]
                    camera = kapture.Camera(kapture.CameraType.OPENCV, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[FOCAL_LENGTH], value0[PRINCIPAL_POINT][0],
                        value0[PRINCIPAL_POINT][1], disto_t2[0], disto_t2[1],
                        disto_t2[3], disto_t2[4]
                    ])
            elif camera_model == CameraModel.fisheye:
                logger.warning(
                    "OpenMVG fisheye models are not compatible with OpenCV."
                    " Using SIMPLE_RADIAL_FISHEYE and forcing distortion to 0")
                # w, h, f, cx, cy, k
                value0 = camera_data[VALUE0]
                camera = kapture.Camera(
                    kapture.CameraType.SIMPLE_RADIAL_FISHEYE, [
                        int(value0[WIDTH]),
                        int(value0[HEIGHT]), value0[FOCAL_LENGTH],
                        value0[PRINCIPAL_POINT][0], value0[PRINCIPAL_POINT][1],
                        0
                    ])
            else:
                raise ValueError(f'Camera model {camera_model} not supported')

            kapture_cameras[str(sensor[KEY])] = camera

    device_identifiers = {int: str}  # Pose id -> device id
    timestamp_for_pose = {int: int}  # Pose id -> timestamp
    records_camera = kapture.RecordsCamera()
    if input_json.get(VIEWS):
        views = input_json[VIEWS]
        if image_action == TransferAction.root_link:
            # Do a unique images directory link
            # kapture/<records_dir>/openmvg_top_images_directory -> openmvg_root_path
            kapture_records_path = get_image_fullpath(kapture_images_path)
            os.makedirs(kapture_records_path, exist_ok=True)
            os.symlink(root_path,
                       path.join(kapture_records_path, openmvg_images_dir))
        logger.info(f'Importing {len(views)} images')
        # Progress bar only in debug or info level
        if image_action != TransferAction.skip and image_action != TransferAction.root_link\
                and logger.getEffectiveLevel() <= logging.INFO:
            progress_bar = tqdm(total=len(views))
        else:
            progress_bar = None
        for view in views:
            input_data = view[VALUE][PTR_WRAPPER][DATA]
            pose_id = input_data[ID_POSE]
            # All two values should be the same (?)
            if input_data[ID_VIEW]:
                timestamp = input_data[ID_VIEW]
            else:
                timestamp = view[KEY]
            device_id = str(input_data[ID_INTRINSIC]
                            )  # device_id must be a string for kapture
            device_identifiers[pose_id] = device_id
            timestamp_for_pose[pose_id] = timestamp
            filename: str
            if input_data.get(LOCAL_PATH):
                filename = path.join(input_data[LOCAL_PATH],
                                     input_data[FILENAME])
            else:
                filename = input_data[FILENAME]
            if root_path:
                src_path = path.join(root_path, filename)
            else:
                src_path = filename

            # Add the common openmvg images directory in front of the filename
            kapture_filename = path.join(openmvg_images_dir, filename)
            if image_action != TransferAction.skip and image_action != TransferAction.root_link:
                dst_path = get_image_fullpath(kapture_images_path,
                                              kapture_filename)
                # Create destination directory if necessary
                dst_dir = path.dirname(dst_path)
                if not path.isdir(dst_dir):
                    os.makedirs(dst_dir, exist_ok=True)
                # Check if already exist
                if path.exists(dst_path):
                    os.unlink(dst_path)
                # Create file or link
                if image_action == TransferAction.copy:
                    shutil.copy2(src_path, dst_path)
                elif image_action == TransferAction.move:
                    shutil.move(src_path, dst_path)
                else:
                    # Individual link
                    if image_action == TransferAction.link_relative:
                        # Compute relative path
                        src_path = path.relpath(src_path, dst_dir)
                    os.symlink(src_path, dst_path)
                    # This might crash on Windows if the user executing this code has no admin privilege
                progress_bar and progress_bar.update(1)

            key = (timestamp, device_id)  # tuple of int,str
            records_camera[key] = path_secure(kapture_filename)
        progress_bar and progress_bar.close()

    trajectories = kapture.Trajectories()
    if input_json.get(EXTRINSICS):
        extrinsics = input_json[EXTRINSICS]
        logger.info(f'Importing {len(extrinsics)} extrinsics -> trajectories')
        for pose in extrinsics:
            pose_id = pose[KEY]
            center = pose[VALUE][CENTER]
            rotation = pose[VALUE][ROTATION]
            kap_translation = -1 * np.matmul(rotation, center)
            kap_pose = kapture.PoseTransform(
                quaternion.from_rotation_matrix(rotation), kap_translation)
            timestamp = timestamp_for_pose.get(pose_id)
            if timestamp is None:
                logger.warning(f'Missing timestamp for extrinsic {pose_id}')
                continue
            device_id = device_identifiers.get(pose_id)
            if device_id is None:
                logger.warning(f'Missing device for extrinsic {pose_id}')
                continue
            trajectories[(timestamp,
                          device_id)] = kap_pose  # tuple of int,str -> 6D pose

    kapture_data = kapture.Kapture(sensors=kapture_cameras,
                                   records_camera=records_camera,
                                   trajectories=trajectories)
    return kapture_data
コード例 #20
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):
            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename
        kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg'  # kapture depth files are not png
        depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).    
    ----
    We use the extr. kinect camera parameters from https://projet.liris.cnrs.fr/voir/activities-dataset/kinect-calibration.html. 
    """
    sensors = kapture.Sensors()
    # camera_type = kapture.CameraType.OPENCV
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02,
    #                  2.5673002693536984e-01, -9.3976085633794137e-01, -1.8605549188751580e-03, -2.2232238578189420e-03]  # w, h, f, cx, cy, k1, k2, p1, p2, k3
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # camera_params = [640, 480, 5.2161910696979987e+02, 5.2132946256749767e+02, 3.1755491910920682e+02, 2.5921654718027673e+02]  # w, h, fx, fy, cx, cy
    camera_params = [640, 480, 525, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    # depth_camera_type = kapture.CameraType.OPENCV
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02,
    #                        -1.8932947734719333e-01, 1.1358015104098631e+00, -4.4260345347128536e-03, -5.4869578635708153e-03, -2.2460143607712921e+00] # w, h, f, cx, cy, k1, k2, p1, p2, k3
    depth_camera_type = kapture.CameraType.SIMPLE_PINHOLE
    # depth_camera_params = [640, 480, 5.8818670481438744e+02, 5.8724220649505514e+02, 3.1076280589210484e+02, 2.2887144980135292e+02] # w, h, fx, fy, cx, cy
    depth_camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=depth_camera_params,
        sensor_type='depth'
    )
    sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera(
        name=REG_DEPTH_SENSOR_ID,
        camera_type=depth_camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    R = np.array([[9.9996518012567637e-01, 2.6765126468950343e-03, -7.9041012313000904e-03],
                  [-2.7409311281316700e-03, 9.9996302803027592e-01, -8.1504520778013286e-03],
                  [7.8819942130445332e-03, 8.1718328771890631e-03, 9.9993554558014031e-01]])
    T = np.array([-2.5558943178152542e-02, 1.0109636268061706e-04, 2.0318321729487039e-03])
    Rt = np.vstack((np.hstack((R, T.reshape(3, 1))), np.array([0, 0, 0, 1])))
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform(quaternion.from_rotation_matrix(R), T)
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        if '.reg' in depth_map_filename:
            continue
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)
        # register depth to rgb
        reg_depth_map = register_depth(get_K(depth_camera_type, depth_camera_params), get_K(camera_type, camera_params),
                                       Rt, depth_map, camera_params[0], camera_params[1])
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #21
0
def import_openmvg_cameras(input_json) -> kapture.Sensors:  # noqa: C901
    kapture_cameras = kapture.Sensors()
    if input_json.get(JSON_KEY.INTRINSICS):
        polymorphic_id_to_value = {}
        logger.info('Importing intrinsics')
        for sensor in input_json[JSON_KEY.INTRINSICS]:
            value = sensor[JSON_KEY.VALUE]
            if JSON_KEY.POLYMORPHIC_NAME in value:
                # new type name: store it for next instances
                polymorphic_id = value[JSON_KEY.POLYMORPHIC_ID] & GET_ID_MASK
                polymorphic_id_to_value[polymorphic_id] = value[
                    JSON_KEY.POLYMORPHIC_NAME]
                logger.debug("New camera_type: " +
                             polymorphic_id_to_value[polymorphic_id])
            else:
                if JSON_KEY.POLYMORPHIC_ID not in value:
                    raise ValueError(
                        f'{JSON_KEY.POLYMORPHIC_ID} is missing (intrinsics)')
                polymorphic_id = value[JSON_KEY.POLYMORPHIC_ID]

            if polymorphic_id not in polymorphic_id_to_value:
                raise ValueError(f'Unknown polymorphic_id {polymorphic_id}')

            camera_model = CameraModel(polymorphic_id_to_value[polymorphic_id])
            camera_data = value[JSON_KEY.PTR_WRAPPER][JSON_KEY.DATA]

            if camera_model == CameraModel.pinhole:
                # w, h, f, cx, cy
                camera = kapture.Camera(kapture.CameraType.SIMPLE_PINHOLE, [
                    int(camera_data[JSON_KEY.WIDTH]),
                    int(camera_data[JSON_KEY.HEIGHT]),
                    camera_data[JSON_KEY.FOCAL_LENGTH],
                    camera_data[JSON_KEY.PRINCIPAL_POINT][0],
                    camera_data[JSON_KEY.PRINCIPAL_POINT][1],
                ])
            elif camera_model == CameraModel.pinhole_radial_k1:
                # w, h, f, cx, cy, k
                camera = kapture.Camera(kapture.CameraType.SIMPLE_RADIAL, [
                    int(camera_data[JSON_KEY.WIDTH]),
                    int(camera_data[JSON_KEY.HEIGHT]),
                    camera_data[JSON_KEY.FOCAL_LENGTH],
                    camera_data[JSON_KEY.PRINCIPAL_POINT][0],
                    camera_data[JSON_KEY.PRINCIPAL_POINT][1],
                    camera_data[JSON_KEY.DISTO_K1][0]
                ])
            elif camera_model == CameraModel.pinhole_radial_k3:
                # w, h, f, cx, cy, k1, k2, k3
                camera = kapture.Camera(kapture.CameraType.RADIAL, [
                    int(camera_data[JSON_KEY.WIDTH]),
                    int(camera_data[JSON_KEY.HEIGHT]),
                    camera_data[JSON_KEY.FOCAL_LENGTH],
                    camera_data[JSON_KEY.PRINCIPAL_POINT][0],
                    camera_data[JSON_KEY.PRINCIPAL_POINT][1],
                    camera_data[JSON_KEY.DISTO_K3][0],
                    camera_data[JSON_KEY.DISTO_K3][1]
                ])
                # camera_data["disto_k3"][2] ignored: radial model has two distortion param, while openMVG's has three
            elif camera_model == CameraModel.pinhole_brown_t2:
                # w, h, f, cx, cy, k1, k2, k3, t1, t2
                if float(camera_data[JSON_KEY.DISTO_T2][2]) != 0:
                    # if k3 not null, use FULL_OPENCV, otherwise OPENCV
                    # w, h, fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6
                    value0 = camera_data[JSON_KEY.VALUE0]
                    disto_t2 = camera_data[JSON_KEY.DISTO_T2]
                    camera = kapture.Camera(kapture.CameraType.FULL_OPENCV, [
                        int(value0[JSON_KEY.WIDTH]),
                        int(value0[JSON_KEY.HEIGHT]),
                        value0[JSON_KEY.FOCAL_LENGTH],
                        value0[JSON_KEY.FOCAL_LENGTH],
                        value0[JSON_KEY.PRINCIPAL_POINT][0],
                        value0[JSON_KEY.PRINCIPAL_POINT][1], disto_t2[0],
                        disto_t2[1], disto_t2[3], disto_t2[4], disto_t2[2], 0,
                        0, 0
                    ])
                else:
                    # w, h, fx, fy, cx, cy, k1, k2, p1, p2
                    value0 = camera_data[JSON_KEY.VALUE0]
                    disto_t2 = camera_data[JSON_KEY.DISTO_T2]
                    camera = kapture.Camera(kapture.CameraType.OPENCV, [
                        int(value0[JSON_KEY.WIDTH]),
                        int(value0[JSON_KEY.HEIGHT]),
                        value0[JSON_KEY.FOCAL_LENGTH],
                        value0[JSON_KEY.FOCAL_LENGTH],
                        value0[JSON_KEY.PRINCIPAL_POINT][0],
                        value0[JSON_KEY.PRINCIPAL_POINT][1], disto_t2[0],
                        disto_t2[1], disto_t2[3], disto_t2[4]
                    ])
            elif camera_model == CameraModel.fisheye:
                logger.warning(
                    "OpenMVG fisheye models are not compatible with OpenCV."
                    " Using SIMPLE_RADIAL_FISHEYE and forcing distortion to 0")
                # w, h, f, cx, cy, k
                value0 = camera_data[JSON_KEY.VALUE0]
                camera = kapture.Camera(
                    kapture.CameraType.SIMPLE_RADIAL_FISHEYE, [
                        int(value0[JSON_KEY.WIDTH]),
                        int(value0[JSON_KEY.HEIGHT]),
                        value0[JSON_KEY.FOCAL_LENGTH],
                        value0[JSON_KEY.PRINCIPAL_POINT][0],
                        value0[JSON_KEY.PRINCIPAL_POINT][1], 0
                    ])
            else:
                raise ValueError(f'Camera model {camera_model} not supported')

            kapture_cameras[str(sensor[JSON_KEY.KEY])] = camera

    return kapture_cameras
コード例 #22
0
def import_idl_dataset_cvpr17(idl_dataset_path: str,
                              gt_path: Union[str, None],
                              kapture_path: str,
                              force_overwrite_existing: bool = False,
                              images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Reads the IDL dataset and copy it to a kapture.

    :param idl_dataset_path: path to the IDL dataset
    :param gt_path: ground truth data path
    :param kapture_path: path to the kapture top directory to create
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """

    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories()

    file_list = [os.path.relpath(os.path.join(dirpath, filename), idl_dataset_path)
                 for dirpath, dirs, filenames in os.walk(idl_dataset_path)
                 for filename in filenames]
    file_list = sorted(file_list)

    logger.info('starting conversion...')
    for n, filename in enumerate(file_list):
        # test if file is a valid image
        try:
            # lazy load
            with Image.open(path.join(idl_dataset_path, filename)) as im:
                width, height = im.size
                model_params = [width, height]
        except Exception:
            continue

        camera_id = f'sensor{n}'
        images[(n, camera_id)] = path_secure(filename)  # don't forget windows
        model = kapture.CameraType.UNKNOWN_CAMERA
        if gt_path is not None:
            # replace image extension with .camera
            file_gt_path = os.path.splitext(os.path.join(gt_path, filename))[0] + ".camera"

            if os.path.isfile(file_gt_path):
                with open(file_gt_path) as fin:
                    lines = fin.readlines()
                    lines = (line.rstrip().split() for line in lines)  # split fields
                    lines = list(lines)
                fx = float(lines[0][0])
                cx = float(lines[0][2])
                fy = float(lines[1][1])
                cy = float(lines[1][2])
                width_file = float(lines[8][0])
                height_file = float(lines[8][1])
                assert (width_file == width)
                assert (height_file == height)
                model = kapture.CameraType.PINHOLE
                model_params = [width, height, fx, fy, cx, cy]

                rotation_matrix = [[float(v) for v in line] for line in lines[4:7]]
                rotation = quaternion.from_rotation_matrix(rotation_matrix)
                center_of_projection = [float(v) for v in lines[7]]
                pose = kapture.PoseTransform(rotation, center_of_projection).inverse()
                trajectories[(n, camera_id)] = pose
        cameras[camera_id] = kapture.Camera(model, model_params)

    # if no trajectory were added, no need to create the file
    if not trajectories:
        trajectories = None

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(idl_dataset_path, kapture_path, filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images, trajectories=trajectories)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)
コード例 #23
0
    def test_evaluation(self):
        position = [1.658, 0, 0]
        position_a = [2.658, 0, 0]
        position_b = [1.758, 0, 0]
        position_c = [10.1, 0, 0]
        position_d = [2., 0, 0]
        position_e = [6.658, 0, 0]

        rotation = quaternion.from_euler_angles(np.deg2rad(110.0), 0, 0)
        rotation_a = quaternion.from_euler_angles(np.deg2rad(111.0), 0, 0)
        rotation_b = quaternion.from_euler_angles(np.deg2rad(108.0), 0, 0)
        rotation_c = quaternion.from_euler_angles(np.deg2rad(10.0), 0, 0)
        rotation_d = quaternion.from_euler_angles(np.deg2rad(110.0), 0, 0)

        pose_gt = kapture.PoseTransform(r=rotation, t=position).inverse()
        pose_a = kapture.PoseTransform(r=rotation_a, t=position_a).inverse()
        pose_b = kapture.PoseTransform(r=rotation_b, t=position_b).inverse()
        pose_c = kapture.PoseTransform(r=rotation_c, t=position_c).inverse()
        pose_d = kapture.PoseTransform(r=rotation_d, t=position_d).inverse()
        pose_e = kapture.PoseTransform(r=None, t=[-x for x in position_e])

        kdata = kapture.Kapture(sensors=kapture.Sensors(),
                                records_camera=kapture.RecordsCamera(),
                                trajectories=kapture.Trajectories())
        kdata.sensors['cam0'] = kapture.Camera(
            kapture.CameraType.UNKNOWN_CAMERA, [25, 13])
        kdata.records_camera[(0, 'cam0')] = 'a'
        kdata.records_camera[(1, 'cam0')] = 'b'
        kdata.records_camera[(2, 'cam0')] = 'c'
        kdata.records_camera[(3, 'cam0')] = 'd'
        kdata.records_camera[(4, 'cam0')] = 'e'

        kdata.trajectories[(0, 'cam0')] = pose_a
        kdata.trajectories[(1, 'cam0')] = pose_b
        kdata.trajectories[(2, 'cam0')] = pose_c
        kdata.trajectories[(3, 'cam0')] = pose_d

        kdata2 = copy.deepcopy(kdata)
        kdata2.trajectories[(4, 'cam0')] = pose_e
        kdata2.records_camera[(5, 'cam0')] = 'f'

        kdata_gt = copy.deepcopy(kdata2)
        kdata_gt.trajectories[(0, 'cam0')] = pose_gt
        kdata_gt.trajectories[(1, 'cam0')] = pose_gt
        kdata_gt.trajectories[(2, 'cam0')] = pose_gt
        kdata_gt.trajectories[(3, 'cam0')] = pose_gt
        kdata_gt.trajectories[(4, 'cam0')] = pose_gt
        kdata_gt.trajectories[(5, 'cam0')] = pose_gt

        kdata_list = [kdata, kdata2, kdata_gt]
        intersection = {'a', 'b', 'c', 'd', 'e'}

        result1 = evaluate(kdata, kdata_gt, intersection)
        self.assertEqual(len(result1), 5)
        self.assertEqual(result1[0][0], 'a')
        self.assertAlmostEqual(result1[0][1], 1.0)
        self.assertAlmostEqual(result1[0][2], 1.0)
        self.assertEqual(result1[1][0], 'b')
        self.assertAlmostEqual(result1[1][1], 0.1)
        self.assertAlmostEqual(result1[1][2], 2.0)
        self.assertEqual(result1[2][0], 'c')
        self.assertAlmostEqual(result1[2][1], 8.442)
        self.assertAlmostEqual(result1[2][2], 100.0)
        self.assertEqual(result1[3][0], 'd')
        self.assertAlmostEqual(result1[3][1], 0.342)
        self.assertAlmostEqual(result1[3][2], 0.0)
        self.assertEqual(result1[4][0], 'e')
        self.assertTrue(math.isnan(result1[4][1]))
        self.assertTrue(math.isnan(result1[4][2]))

        result2 = evaluate(kdata2, kdata_gt, intersection)
        self.assertEqual(len(result2), 5)
        self.assertEqual(result2[0][0], 'a')
        self.assertAlmostEqual(result2[0][1], 1.0)
        self.assertAlmostEqual(result2[0][2], 1.0)
        self.assertEqual(result2[1][0], 'b')
        self.assertAlmostEqual(result2[1][1], 0.1)
        self.assertAlmostEqual(result2[1][2], 2.0)
        self.assertEqual(result2[2][0], 'c')
        self.assertAlmostEqual(result2[2][1], 8.442)
        self.assertAlmostEqual(result2[2][2], 100.0)
        self.assertEqual(result2[3][0], 'd')
        self.assertAlmostEqual(result2[3][1], 0.342)
        self.assertAlmostEqual(result2[3][2], 0.0)
        self.assertEqual(result2[4][0], 'e')
        self.assertAlmostEqual(result2[4][1], 5.0)
        self.assertTrue(math.isnan(result2[4][2]))

        bins1 = fill_bins(result1, [(0.9, 5), (10, 105)])
        self.assertEqual(len(bins1), 2)
        self.assertEqual(bins1[0][0], 0.9)
        self.assertEqual(bins1[0][1], 5)
        self.assertEqual(bins1[0][2], 2)
        self.assertEqual(bins1[1][0], 10)
        self.assertEqual(bins1[1][1], 105)
        self.assertEqual(bins1[1][2], 4)

        bins2 = fill_bins(result1, [(0.9, 5), (10, 105)])
        self.assertEqual(len(bins2), 2)
        self.assertEqual(bins2[0][0], 0.9)
        self.assertEqual(bins2[0][1], 5)
        self.assertEqual(bins2[0][2], 2)
        self.assertEqual(bins2[1][0], 10)
        self.assertEqual(bins2[1][1], 105)
        self.assertEqual(bins2[1][2], 4)

        bins3 = fill_bins(result2, [(0.9, math.nan), (10, math.nan)])
        self.assertEqual(len(bins3), 2)
        self.assertEqual(bins3[0][0], 0.9)
        self.assertTrue(math.isnan(bins3[0][1]))
        self.assertEqual(bins3[0][2], 2)
        self.assertEqual(bins3[1][0], 10)
        self.assertTrue(math.isnan(bins3[1][1]))
        self.assertEqual(bins3[1][2], 5)

        bins4 = fill_bins(result2, [(0.9, -1), (10, -1)])
        self.assertEqual(len(bins4), 2)
        self.assertEqual(bins4[0][0], 0.9)
        self.assertEqual(bins4[0][1], -1)
        self.assertEqual(bins4[0][2], 2)
        self.assertEqual(bins4[1][0], 10)
        self.assertEqual(bins4[1][1], -1)
        self.assertEqual(bins4[1][2], 5)
コード例 #24
0
def parse_cameras(number_of_cameras: int,
                  nvm_content: List[str],
                  offset: int,
                  camera_id_offset: int,
                  filter_list: Optional[Set[str]],
                  nvm_images_path: str,
                  cameras: kapture.Sensors,
                  images: kapture.RecordsCamera,
                  trajectories: Optional[kapture.Trajectories]) -> List[str]:
    """
    Parse the <List of cameras> section
    Fill cameras, images, trajectories in place.
    Image files must exist to be able to retrieve height and width.

    :param number_of_cameras: number of cameras to process
    :param nvm_content: content of NVM file
    :param offset: number of characters to skip while reading every line
    :param camera_id_offset:
    :param filter_list: optional list of images to process
    :param nvm_images_path: path to NVM images directory
    :param cameras: kapture cameras to extend
    :param images: kapture images to extend
    :param trajectories: kapture trajectories to extend
    :return: list of images with position = index
    """
    image_idx_to_image_name = []
    # parse all cameras
    for i in range(0, number_of_cameras):
        line = nvm_content[i + offset].split()
        timestamp = i + camera_id_offset
        camera_id = f'sensor{timestamp}'
        image_file_name = line[0]
        image_idx_to_image_name.append(image_file_name)
        if filter_list is not None and image_file_name not in filter_list:
            # file_name is not in the list, do not add it
            continue

        focal_length = float(line[1])
        quaternion_wxyz = quaternion.from_float_array([float(v) for v in line[2:6]])
        camera_center = np.array([float(v) for v in line[6:9]])
        # https://github.com/colmap/colmap/blob/67e96894d4beed7cc93f1c0755a98d3664f85e63/src/base/reconstruction.cc#L891
        radial_distortion = -float(line[9])  # SIGN !

        try:
            # lazy open
            with Image.open(path.join(nvm_images_path, image_file_name)) as im:
                width, height = im.size
        except (OSError, PIL.UnidentifiedImageError):
            # It is not a valid image: skip it
            logger.info(f'Skipping invalid image file {image_file_name}')
            continue

        translation = - np.matmul(quaternion.as_rotation_matrix(quaternion_wxyz), camera_center)
        pose = kapture.PoseTransform(quaternion_wxyz, translation)

        camera = kapture.Camera(MODEL, [width, height, focal_length, width / 2, height / 2, radial_distortion])
        cameras[camera_id] = camera

        images[(timestamp, camera_id)] = image_file_name
        if trajectories is not None:
            trajectories[(timestamp, camera_id)] = pose
    return image_idx_to_image_name
コード例 #25
0
def import_7scenes(d7scenes_path: str,
                   kapture_dir_path: str,
                   force_overwrite_existing: bool = False,
                   images_import_method: TransferAction = TransferAction.skip,
                   partition: Optional[str] = None
                   ) -> None:
    """
    Imports RGB-D Dataset 7-Scenes dataset and save them as kapture.

    :param d7scenes_path: path to the 7scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'. Requires d7scenes_path/TestSplit.txt or TrainSplit.txt
                    to exists.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'((?P<sequence>.+)/)?frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d7scenes_path))
                     for dp, _, fs in os.walk(d7scenes_path) for fn in fs)

    logger.info('populating 7-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = (file_attribs.get('sequence'), file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d7scenes_path, PARTITION_FILENAMES[partition])
        if not path.isfile(partition_filepath):

            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')
        with open(partition_filepath, 'rt') as file:
            split_sequences = [f'seq-{int(seq.strip()[len("sequence"):]):02}' for seq in file.readlines()]
        assert len(split_sequences) > 0
        # filter out
        shots = {(seq, frame): shot
                 for (seq, frame), shot in shots.items()
                 if seq in split_sequences}

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 7scenes sequence is valid.')

    # eg. shots['seq-01', '000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d7scenes_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    From authors: The RGB and depth camera have not been calibrated and we can’t provide calibration parameters at the
    moment. The recorded frames correspond to the raw, uncalibrated camera images. In the KinectFusion pipeline we used
    the following default intrinsics for the depth camera: Principle point (320,240), Focal length (585,585).
    """
    sensors = kapture.Sensors()
    camera_type = kapture.CameraType.SIMPLE_PINHOLE
    camera_params = [640, 480, 585, 320, 240]  # w, h, f, cx, cy
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params
    )
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d7scenes_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        depth_map_filepath_7scenes = path.join(d7scenes_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_7scenes))
        # change invalid depth from 65535 to 0
        depth_map[depth_map == 65535] = 0
        # depth maps is in mm in 7scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #26
0
def import_12scenes(d12scenes_path: str,
                    kapture_dir_path: str,
                    force_overwrite_existing: bool = False,
                    images_import_method: TransferAction = TransferAction.skip,
                    partition: Optional[str] = None
                    ) -> None:
    """
    Imports RGB-D Dataset 12-Scenes dataset and save them as kapture.

    :param d12scenes_path: path to the 12scenes sequence root path
    :param kapture_dir_path: path to kapture top directory
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    :param partition: if specified = 'mapping' or 'query'.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path, force_erase=force_overwrite_existing)

    logger.info('loading all content ...')

    d7s_filename_re = re.compile(r'frame-(?P<frame_id>\d{6})\.(?P<suffix>\w*)\.(?P<ext>\w*)')

    # populate all relevant files
    d12images_path = os.path.join(d12scenes_path, 'data')
    d7s_filenames = (path_secure(path.relpath(path.join(dp, fn), d12images_path))
                     for dp, _, fs in os.walk(d12images_path) for fn in fs)

    logger.info('populating 12-scenes files ...')
    d7s_filenames = {filename: d7s_filename_re.search(filename).groupdict()
                     for filename in sorted(d7s_filenames)
                     if d7s_filename_re.search(filename)}

    # reorg as shot[seq, id] = {color: , depth: , pose: , ...}
    shots = {}
    for timestamp, (filename, file_attribs) in enumerate(d7s_filenames.items()):
        shot_id = int(file_attribs['frame_id'])
        shots.setdefault(shot_id, {})[file_attribs['suffix']] = filename

    # fake timestamps
    for timestamp, shot_id in enumerate(shots):
        shots[shot_id]['timestamp'] = timestamp

    # if given, filter partition
    if partition is not None:
        # read the authors split file
        partition_filepath = path.join(d12scenes_path, 'split.txt')
        if not path.isfile(partition_filepath):
            raise FileNotFoundError(f'partition file is missing: {partition_filepath}.')

        with open(partition_filepath, 'rt') as file:
            # note from dsac++; the first sequence is used for testing, everything else for training
            d7s_split_exp = r'^sequence(?P<sequence>\d+) \[frames=(?P<count>\d+)\]  \[start=(?P<start_frame>\d+) ;' \
                            r' end=(?P<end_frame>\d+)\]$'
            d7s_split_re = re.compile(d7s_split_exp)
            split_sequences = [re.match(d7s_split_re, line) for line in file.readlines()]
            if len(split_sequences) < 1 or not split_sequences[0]:
                raise ValueError('failed to parse split.txt file')
            test_split = (int(split_sequences[0].group('start_frame')), int(split_sequences[0].group('end_frame')))

            # filter out
            if partition == "query":
                shots = {frame: shot
                         for frame, shot in shots.items()
                         if test_split[0] <= frame <= test_split[1]
                         }
            elif partition == "mapping":
                shots = {frame: shot
                         for frame, shot in shots.items()
                         if frame < test_split[0] or frame > test_split[1]
                         }
            else:
                raise ValueError('invalid partition name')

    if len(shots) == 0:
        raise FileNotFoundError('no file found: make sure the path to 12scenes sequence is valid.')

    # eg. shots['000000'] =
    #       {
    #           'color': 'seq-01/frame-000000.color.jpg',
    #           'depth': 'seq-01/frame-000000.depth.png',
    #           'pose': 'seq-01/frame-000000.pose.txt',
    #           'timestamp': 0}

    # images + depth maps
    logger.info('populating image and depth maps files ...')
    snapshots = kapture.RecordsCamera()
    depth_maps = kapture.RecordsDepth()
    for shot in shots.values():
        snapshots[shot['timestamp'], RGB_SENSOR_ID] = shot['color']
        kapture_depth_map_filename = shot['depth'][:-len('.png')]  # kapture depth files are not png
        depth_maps[shot['timestamp'], DEPTH_SENSOR_ID] = kapture_depth_map_filename
        kapture_registered_depth_map_filename = shot['depth'][:-len('.png')] + '.reg'  # kapture depth files are not png
        depth_maps[shot['timestamp'], REG_DEPTH_SENSOR_ID] = kapture_registered_depth_map_filename

    # poses
    logger.info('import poses files ...')
    trajectories = kapture.Trajectories()
    for shot in shots.values():
        pose_filepath = path.join(d12images_path, shot['pose'])
        pose_mat = np.loadtxt(pose_filepath)  # camera-to-world, 4×4 matrix in homogeneous coordinates
        with open(pose_filepath, 'r') as file:
            if 'INF' in file.read():
                timestamp = shot['timestamp']
                image_name = shot['color']
                logger.debug(f'ts={timestamp}, name={image_name}: ignored inf pose')
                continue
        rotation_mat = pose_mat[0:3, 0:3]
        position_vec = pose_mat[0:3, 3]
        rotation_quat = quaternion.from_rotation_matrix(rotation_mat)
        pose_world_from_cam = kapture.PoseTransform(r=rotation_quat, t=position_vec)
        pose_cam_from_world = pose_world_from_cam.inverse()
        trajectories[shot['timestamp'], RGBD_SENSOR_ID] = pose_cam_from_world

    # sensors
    """
    Read info.txt
    """
    info_filepath = path.join(d12scenes_path, 'info.txt')
    if not path.isfile(info_filepath):
        raise FileNotFoundError(f'info file is missing: {info_filepath}.')

    with open(info_filepath, 'rt') as file:
        info_dict = {}
        for line in file.readlines():
            line_splits = line.rstrip().split(' = ')
            info_dict[line_splits[0]] = line_splits[1]

    sensors = kapture.Sensors()
    camera_type = kapture.CameraType.PINHOLE
    assert 'm_calibrationColorIntrinsic' in info_dict
    assert 'm_colorWidth' in info_dict
    assert 'm_colorHeight' in info_dict
    rgb_intrinsics = [float(v) for v in info_dict['m_calibrationColorIntrinsic'].split(' ')]
    # w, h, fx, fy, cx, cy
    rgb_camera_params = [int(info_dict['m_colorWidth']), int(info_dict['m_colorHeight']),
                         rgb_intrinsics[0], rgb_intrinsics[5], rgb_intrinsics[2], rgb_intrinsics[6]]
    sensors[RGB_SENSOR_ID] = kapture.Camera(
        name=RGB_SENSOR_ID,
        camera_type=camera_type,
        camera_params=rgb_camera_params
    )

    assert 'm_calibrationDepthIntrinsic' in info_dict
    assert 'm_depthWidth' in info_dict
    assert 'm_depthHeight' in info_dict
    depth_intrinsics = [float(v) for v in info_dict['m_calibrationDepthIntrinsic'].split(' ')]
    # w, h, fx, fy, cx, cy
    depth_camera_params = [int(info_dict['m_depthWidth']), int(info_dict['m_depthHeight']),
                           depth_intrinsics[0], depth_intrinsics[5], depth_intrinsics[2], depth_intrinsics[6]]
    sensors[DEPTH_SENSOR_ID] = kapture.Camera(
        name=DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=depth_camera_params,
        sensor_type='depth'
    )

    sensors[REG_DEPTH_SENSOR_ID] = kapture.Camera(
        name=REG_DEPTH_SENSOR_ID,
        camera_type=camera_type,
        camera_params=rgb_camera_params,
        sensor_type='depth'
    )

    # bind camera and depth sensor into a rig
    logger.info('building rig with camera and depth sensor ...')
    rigs = kapture.Rigs()
    rigs[RGBD_SENSOR_ID, RGB_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, DEPTH_SENSOR_ID] = kapture.PoseTransform()
    rigs[RGBD_SENSOR_ID, REG_DEPTH_SENSOR_ID] = kapture.PoseTransform()

    # import (copy) image files.
    logger.info('copying image files ...')
    image_filenames = [f for _, _, f in kapture.flatten(snapshots)]
    import_record_data_from_dir_auto(d12images_path, kapture_dir_path, image_filenames, images_import_method)

    # import (copy) depth map files.
    logger.info('converting depth files ...')
    depth_map_filenames = kapture.io.records.records_to_filepaths(depth_maps, kapture_dir_path)
    hide_progress = logger.getEffectiveLevel() > logging.INFO
    for depth_map_filename, depth_map_filepath_kapture in tqdm(depth_map_filenames.items(), disable=hide_progress):
        if '.reg' in depth_map_filename:
            continue
        depth_map_filepath_12scenes = path.join(d12images_path, depth_map_filename + '.png')
        depth_map = np.array(Image.open(depth_map_filepath_12scenes))
        # depth maps is in mm in 12scenes, convert it to meters
        depth_map = depth_map.astype(np.float32) * 1.0e-3
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture, depth_map)
        # register depth to rgb
        reg_depth_map = register_depth(get_K(camera_type, depth_camera_params), get_K(camera_type, rgb_camera_params),
                                       np.eye(4), depth_map, rgb_camera_params[0], rgb_camera_params[1])
        kapture.io.records.records_depth_to_file(depth_map_filepath_kapture + '.reg', reg_depth_map)

    # pack into kapture format
    imported_kapture = kapture.Kapture(
        records_camera=snapshots,
        records_depth=depth_maps,
        rigs=rigs,
        trajectories=trajectories,
        sensors=sensors)

    logger.info('writing imported data ...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #27
0
def import_bundler(
        bundler_path: str,
        image_list_path: str,
        image_dir_path: str,
        kapture_dir_path: str,
        ignore_trajectories: bool,
        add_reconstruction: bool,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports bundler data and save them as kapture.

    :param bundler_path: path to the bundler model file
    :param image_list_path: path to the file containing the list of image names
    :param image_dir_path: input path to bundler image directory.
    :param kapture_dir_path: path to kapture top directory
    :param ignore_trajectories: if True, will not import the trajectories
    :param add_reconstruction: if True, will create 3D points and observations
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    os.makedirs(kapture_dir_path, exist_ok=True)
    delete_existing_kapture_files(kapture_dir_path,
                                  force_erase=force_overwrite_existing)

    logger.info('loading all content...')
    # if there is a filter list, parse it
    with open(image_list_path) as file:
        file_content = file.readlines()
    # remove end line char and empty lines
    image_list = [line.rstrip() for line in file_content if line != '\n']

    with open(bundler_path) as file:
        bundler_content = file.readlines()
    # remove end line char and empty lines
    bundler_content = [
        line.rstrip() for line in bundler_content if line != '\n'
    ]
    assert bundler_content[0] == "# Bundle file v0.3"
    # <num_cameras> <num_points>
    line_1 = bundler_content[1].split()
    number_of_cameras = int(line_1[0])
    number_of_points = int(line_1[1])
    offset = 2
    number_of_lines_per_camera = 5  # 1 camera + 3 rotation + 1 translation

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()
    trajectories = kapture.Trajectories() if not ignore_trajectories else None
    points3d = [] if add_reconstruction else None
    keypoints = kapture.Keypoints('sift', np.float32,
                                  2) if add_reconstruction else None
    observations = kapture.Observations() if add_reconstruction else None
    image_mapping = []  # bundler camera_id -> (name, width, height)
    for i in range(0, number_of_cameras):
        start_index = i * number_of_lines_per_camera + offset
        file_name = image_list[i]

        # process camera info
        line_camera = bundler_content[start_index].split()
        focal_length = float(line_camera[0])
        k1 = float(line_camera[1])
        k2 = float(line_camera[2])

        # lazy open
        with Image.open(path.join(image_dir_path, file_name)) as im:
            width, height = im.size

        image_mapping.append((file_name, width, height))
        camera = kapture.Camera(
            MODEL,
            [width, height, focal_length, width / 2, height / 2, k1, k2])
        camera_id = f'sensor{i}'
        cameras[camera_id] = camera

        # process extrinsics
        rotation_matrix = [[float(v) for v in line.split()]
                           for line in bundler_content[start_index +
                                                       1:start_index + 4]]

        quaternion_wxyz = quaternion.from_rotation_matrix(rotation_matrix)
        translation = np.array(
            [float(v) for v in bundler_content[start_index + 4].split()])
        pose = kapture.PoseTransform(quaternion_wxyz, translation)

        # The Bundler model uses a coordinate system that differs from the *computer vision camera
        #  coordinate system*. More specifically, they use the camera coordinate system typically used
        #  in *computer graphics*. In this camera coordinate system, the camera is looking down the
        #  `-z`-axis, with the `x`-axis pointing to the right and the `y`-axis pointing upwards.
        # rotation Pi around the x axis to get the *computer vision camera
        #  coordinate system*
        rotation_around_x = quaternion.quaternion(0.0, 1.0, 0.0, 0.0)
        transformation = kapture.PoseTransform(rotation_around_x,
                                               np.array([0, 0, 0]))

        images[(i, camera_id)] = file_name
        if trajectories is not None:
            # transformation.inverse() is equal to transformation (rotation around -Pi or Pi around X is the same)
            trajectories[(i, camera_id)] = kapture.PoseTransform.compose(
                [transformation, pose, transformation])

    if points3d is not None and number_of_points > 0:
        assert keypoints is not None
        assert observations is not None
        offset += number_of_cameras * number_of_lines_per_camera
        number_of_lines_per_point = 3  # position color viewlist

        # (image_name, bundler_keypoint_id ) -> keypoint_id
        known_keypoints = {}
        local_keypoints = {}

        for i in range(0, number_of_points):
            start_index = i * number_of_lines_per_point + offset
            position = [float(v) for v in bundler_content[start_index].split()]
            # apply transformation
            position = [position[0], -position[1], -position[2]]
            color = [
                float(v) for v in bundler_content[start_index + 1].split()
            ]

            # <view list>: length of the list + [<camera> <key> <x> <y>]
            # x, y origin is the center of the image
            view_list = bundler_content[start_index + 2].split()
            number_of_observations = int(view_list[0])

            for j in range(number_of_observations):
                camera_id = int(view_list[1 + 4 * j + 0])
                keypoint_id = int(view_list[1 + 4 * j + 1])
                x = float(view_list[1 + 4 * j + 2])
                y = float(view_list[1 + 4 * j + 3])

                file_name, width, height = image_mapping[camera_id]
                # put (0,0) in upper left corner
                x += (width / 2)
                y += (height / 2)

                # init local_keypoints if needed
                if file_name not in local_keypoints:
                    local_keypoints[file_name] = []
                # do not add the same keypoint twice
                if (file_name, keypoint_id) not in known_keypoints:
                    # in the kapture format, keypoint id is different. Note that it starts from 0
                    known_keypoints[(file_name, keypoint_id)] = len(
                        local_keypoints[file_name])
                    local_keypoints[file_name].append([x, y])
                keypoint_idx = known_keypoints[(file_name, keypoint_id)]
                observations.add(i, file_name, keypoint_idx)
            points3d.append(position + color)
        points3d = np.array(points3d)

        # finally, convert local_keypoints to np.ndarray and add them to the global keypoints variable
        keypoints = kapture.Keypoints('sift', np.float32, 2)
        for image_filename, keypoints_array in local_keypoints.items():
            keypoints_np_array = np.array(keypoints_array).astype(np.float32)
            keypoints_out_path = kapture.io.features.get_keypoints_fullpath(
                kapture_dir_path, image_filename)
            kapture.io.features.image_keypoints_to_file(
                keypoints_out_path, keypoints_np_array)
            keypoints.add(image_filename)

    if points3d is not None:
        points3d = kapture.Points3d(points3d)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(image_dir_path, kapture_dir_path,
                                     filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras,
                                       records_camera=images,
                                       trajectories=trajectories,
                                       points3d=points3d,
                                       keypoints=keypoints,
                                       observations=observations)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_dir_path, imported_kapture)
コード例 #28
0
def import_silda(
    silda_dirpath: str,
    destination_kapture_dirpath: str,
    fallback_cam_model: str = 'FOV',
    do_split_cams: bool = False,
    corpus: Optional[str] = None,
    replace_pose_rig: bool = False,
    force_overwrite_existing: bool = False,
    images_import_strategy: TransferAction = TransferAction.link_absolute
) -> None:
    """
    Imports data from silda dataset.

    :param silda_dirpath: path to the silda top directory
    :param destination_kapture_dirpath: input path to kapture directory.
    :param fallback_cam_model: camera model to fallback when necessary
    :param do_split_cams: If true, re-organises and renames the image files to split apart cameras.
    :param corpus: the list of corpus to be imported, among 'mapping', 'query'.
    :param replace_pose_rig: if True, replaces poses of individual cameras with poses of the rig.
    :param force_overwrite_existing: if true, Silently overwrite kapture files if already exists.
    :param images_import_strategy: how to copy image files.
    """

    # sanity check
    silda_dirpath = path_secure(path.abspath(silda_dirpath))
    destination_kapture_dirpath = path_secure(
        path.abspath(destination_kapture_dirpath))
    if TransferAction.root_link == images_import_strategy and do_split_cams:
        raise ValueError(
            'impossible to only link images directory and applying split cam.')
    hide_progress_bars = logger.getEffectiveLevel() >= logging.INFO

    # prepare output directory
    kapture.io.structure.delete_existing_kapture_files(
        destination_kapture_dirpath, force_overwrite_existing)
    os.makedirs(destination_kapture_dirpath, exist_ok=True)

    # images ###########################################################################################################
    logger.info('Processing images ...')
    # silda-images
    #   ...
    #   ├── 1445_0.png
    #   ├── 1445_1.png
    #   ...
    silda_images_root_path = path.join(silda_dirpath, 'silda-images')
    # list all png files (its PNG in silda) using a generator.
    if corpus is not None:
        assert corpus in SILDA_CORPUS_SPLIT_FILENAMES
        # if corpus specified, filter by those which directory name match corpus.
        logger.debug(f'only importing {corpus} part.')
        coprus_filepath = path.join(silda_dirpath,
                                    SILDA_CORPUS_SPLIT_FILENAMES[corpus])
        with open(coprus_filepath, 'rt') as corpus_file:
            corpus_filenames = corpus_file.readlines()
            image_filenames_original = sorted(filename.strip()
                                              for filename in corpus_filenames)
    else:
        image_filenames_original = sorted(
            filename for dirpath, sd, fs in os.walk(silda_images_root_path)
            for filename in fs if filename.endswith('.png'))

    image_filenames_kapture = []
    snapshots = kapture.RecordsCamera()
    image_name_to_ids = {}  # '1445_0.png' -> (1445, 0)
    for image_filename_original in tqdm(image_filenames_original,
                                        disable=hide_progress_bars):
        # retrieve info from image filename
        shot_info = SILDA_IMAGE_NAME_PATTERN.match(image_filename_original)
        assert shot_info is not None
        shot_info = shot_info.groupdict()
        shot_info['timestamp'] = int(
            shot_info['timestamp']
        )  # To avoid warnings about type of the value
        # eg. file_info = {'filename': '1445_0.png', 'timestamp': 1445, 'cam_id': '0'}
        # create a path of the image into NLE dir
        if do_split_cams:
            # re-organise images with subfolders per corpus/camera/timestamp.png
            kapture_image_filename = path.join(
                shot_info['cam_id'],
                '{:04d}.png'.format(shot_info['timestamp']))
        else:
            # keep the original file hierarchy
            kapture_image_filename = image_filename_original

        image_filenames_kapture.append(kapture_image_filename)
        snapshots[shot_info['timestamp'],
                  shot_info['cam_id']] = kapture_image_filename
        image_name_to_ids[shot_info['filename']] = (shot_info['timestamp'],
                                                    shot_info['cam_id'])

    assert len(image_filenames_kapture) == len(image_filenames_original)
    # intrinsics #######################################################################################################
    logger.info('Processing sensors ...')
    cameras = kapture.Sensors()
    # use hard coded intrinsics
    # evaluated using colmap
    # 1 OPENCV_FISHEYE 1024 1024 393.299 394.815 512 512 -0.223483 0.117325 -0.0326138 0.00361082
    #                  fx, fy, cx, cy, omega
    # 1 FOV 1024 1024 300 300 512 512 0.899632
    cam_id_list = sorted(
        set(cam_id for _, cam_id, _ in kapture.flatten(snapshots)))
    for cam_id in cam_id_list:
        # pick a image for that cam id
        random_image_intrinsic = next(
            f'{timestamp}_{cam_id}.intrinsics'  # keep only filename (thats what silda expect)
            for timestamp, cid, filename in kapture.flatten(snapshots)
            if cid == cam_id)
        logger.debug(
            f'camera {cam_id} intrinsics : picking at random: ("{random_image_intrinsic}")'
        )
        intrinsic_filepath = path.join(silda_dirpath, 'camera-intrinsics',
                                       random_image_intrinsic)
        logger.debug(f'loading file: "{intrinsic_filepath}"')
        silda_proj_params = np.loadtxt(intrinsic_filepath)
        # only retrieve principal point from intrinsics,
        # because the rest correspond to a fisheye model not available in colmap.
        principal_point = (silda_proj_params[0:2] *
                           SILDA_IMAGE_SIZE).flatten().tolist()
        projection = fallback_cam_model
        if 'OPENCV_FISHEYE' == projection:
            focal_length = [393.299, 394.815]
            fisheye_coefficients = [
                -0.223483, 0.117325, -0.0326138, 0.00361082
            ]
            #          //    fx, fy, cx, cy, k1, k2, k3, k4
            proj_params = focal_length + principal_point + fisheye_coefficients
        elif 'FOV' == projection:
            # use hard coded intrinsics from Torsten reconstruction, ie. :
            #       217.294036, 217.214703, 512.000000, 507.897400, -0.769113
            focal_length = [217.294036, 217.214703]
            # principal_point = [512.000000, 507.897400]
            omega = [-0.769113]
            #                  fx, fy, cx, cy, omega
            proj_params = focal_length + principal_point + omega
        else:
            raise ValueError(
                'Only accepts OPENCV_FISHEYE, or FOV as projection model.')

        camera = kapture.Camera(projection,
                                SILDA_IMAGE_SIZE.tolist() + proj_params)
        cameras[cam_id] = camera

    # extrinsics #######################################################################################################
    logger.info('Processing trajectories ...')
    trajectories = kapture.Trajectories()
    with open(path.join(silda_dirpath, 'silda-train-poses.txt')) as file:
        lines = file.readlines()
        lines = (line.rstrip().split() for line in lines)
        extrinsics = {
            line[0]: np.array(line[1:8], dtype=np.float)
            for line in lines
        }

    for silda_image_name, pose_params in tqdm(extrinsics.items(),
                                              disable=hide_progress_bars):
        # Silda poses are 7-dim vectors with the rotation quaternion,
        # and the translation vector. The order needs to be:
        # qw,qx,qy,qz,tx,ty,tz
        # The parameters should be described in terms of camera to world transformations
        if silda_image_name not in image_name_to_ids:
            # if this is not referenced: means its part of the corpus to be ignored.
            continue
        pose = kapture.PoseTransform(pose_params[0:4],
                                     pose_params[4:7]).inverse()
        timestamp, cam_id = image_name_to_ids[silda_image_name]
        trajectories[timestamp, cam_id] = pose

    # rigs
    logger.info('Making up a rig ...')
    rigs = kapture.Rigs()
    pose_babord = kapture.PoseTransform(t=[0, 0, 0],
                                        r=quaternion.from_rotation_vector(
                                            [0, -np.pi / 2, 0]))
    pose_tribord = kapture.PoseTransform(t=[0, 0, 0],
                                         r=quaternion.from_rotation_vector(
                                             [0, np.pi / 2, 0]))
    rigs['silda_rig', '0'] = pose_babord
    rigs['silda_rig', '1'] = pose_tribord
    if replace_pose_rig:
        logger.info('replacing camera poses with rig poses.')
        kapture.rigs_recover_inplace(trajectories, rigs)

    # pack it all together
    kapture_data = kapture.Kapture(sensors=cameras,
                                   records_camera=snapshots,
                                   trajectories=trajectories,
                                   rigs=rigs)

    logger.info('saving to Kapture  ...')
    kapture.io.csv.kapture_to_dir(destination_kapture_dirpath, kapture_data)

    # finally import images
    if images_import_strategy != TransferAction.skip:
        # importing image files
        logger.info(f'importing {len(image_filenames_original)} images ...')
        assert len(image_filenames_original) == len(image_filenames_kapture)
        image_filepaths_original = [
            path.join(silda_images_root_path, image_filename_kapture)
            for image_filename_kapture in image_filenames_original
        ]
        image_filepaths_kapture = [
            get_image_fullpath(destination_kapture_dirpath,
                               image_filename_kapture)
            for image_filename_kapture in image_filenames_kapture
        ]
        transfer_files_from_dir(image_filepaths_original,
                                image_filepaths_kapture,
                                images_import_strategy)
    logger.info('done.')
コード例 #29
0
def import_image_list(images_list_filenames: List[str],
                      images_dirpath: str,
                      kapture_path: str,
                      force_overwrite_existing: bool = False,
                      images_import_method: TransferAction = TransferAction.skip) -> None:
    """
    Imports the list of images to a kapture. This creates only images and cameras.

    :param images_list_filenames: list of text files containing image file names
    :param images_dirpath: path to images directory.
    :param kapture_path: path to kapture root directory.
    :param force_overwrite_existing: Silently overwrite kapture files if already exists.
    :param images_import_method: choose how to import actual image files.
    """
    assert isinstance(images_list_filenames, list)
    os.makedirs(kapture_path, exist_ok=True)
    delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)

    cameras = kapture.Sensors()
    images = kapture.RecordsCamera()

    offset = 0
    logger.info('starting conversion...')
    for images_list_filename in images_list_filenames:
        logger.info(f'loading {images_list_filename}')
        with open(images_list_filename) as file:
            images_list = file.readlines()
            # remove end line char and empty lines
            images_list = [line.rstrip() for line in images_list if line != '\n']

            for i in range(0, len(images_list)):
                line = images_list[i].split()
                image_file_name = line[0]
                if len(line) > 1:
                    model = line[1]
                    model_params = line[2:]
                else:
                    model = kapture.CameraType.UNKNOWN_CAMERA.value
                    try:
                        # lazy open
                        with Image.open(path.join(images_dirpath, image_file_name)) as im:
                            width, height = im.size
                            model_params = [width, height]
                    except (OSError, PIL.UnidentifiedImageError):
                        # It is not a valid image: skip it
                        logger.info(f'Skipping invalid image file {image_file_name}')
                        continue

                camera_id = f'sensor{i + offset}'
                cameras[camera_id] = kapture.Camera(model, model_params)
                images[(i + offset, camera_id)] = image_file_name
            offset += len(images_list)

    # import (copy) image files.
    logger.info('import image files ...')
    filename_list = [f for _, _, f in kapture.flatten(images)]
    import_record_data_from_dir_auto(images_dirpath, kapture_path, filename_list, images_import_method)

    # pack into kapture format
    imported_kapture = kapture.Kapture(sensors=cameras, records_camera=images)
    logger.info('writing imported data...')
    kapture_to_dir(kapture_path, imported_kapture)