コード例 #1
0
def hloc_pipeline_from_kapture_dataset(
        kapture_path_map: str, kapture_path_query: str,
        pairsfile_path_map: str, pairsfile_path_query: str, output_dir: str,
        feature_conf_str: str, matcher_conf_str: str,
        covisibility_clustering: bool, bins_as_str: List[str],
        benchmark_format_style: BenchmarkFormatStyle, colmap_binary: str,
        python_binary: Optional[str], skip_list: List[str]) -> None:
    """
    run hloc on kapture data
    """
    feature_conf = extract_features.confs[feature_conf_str]
    matcher_conf = match_features.confs[matcher_conf_str]
    images_map = get_record_fullpath(kapture_path_map)
    images_query = get_record_fullpath(kapture_path_query)

    os.makedirs(output_dir, exist_ok=True)
    if "convert_pairsfile_map" not in skip_list:
        map_pairs_hloc = path.join(
            output_dir, 'pairfiles/db_pairs',
            path.basename(pairsfile_path_map) + "_hloc.txt")
        convert_pairs_to_hloc_format(pairsfile_path_map, map_pairs_hloc)
        pairsfile_path_map = map_pairs_hloc
    if "convert_pairsfile_query" not in skip_list:
        query_pairs_hloc = path.join(
            output_dir, 'pairfiles/query',
            path.basename(pairsfile_path_query) + "_hloc.txt")
        convert_pairs_to_hloc_format(pairsfile_path_query, query_pairs_hloc)
        pairsfile_path_query = query_pairs_hloc

    feature_path = Path(output_dir, feature_conf['output'] + '.h5')
    if "extract_features_map" not in skip_list:
        image_list_map_path = path.join(output_dir, 'image_list_map.txt')
        convert_kapture_to_hloc_image_list(kapture_path_map,
                                           image_list_map_path)
        feature_path_map = extract_features.main(
            feature_conf,
            Path(images_map),
            Path(output_dir),
            image_list=Path(image_list_map_path))
        assert feature_path_map.resolve() == feature_path.resolve()
    if "extract_features_query" not in skip_list:
        image_list_query_path = path.join(output_dir, 'image_list_query.txt')
        convert_kapture_to_hloc_image_list(kapture_path_query,
                                           image_list_query_path)
        feature_path_query = extract_features.main(
            feature_conf,
            Path(images_query),
            Path(output_dir),
            image_list=Path(image_list_query_path))
        assert feature_path_query.resolve() == feature_path.resolve()

    pairsfile_path_map_pathlib = Path(pairsfile_path_map)
    match_name_map = feature_conf['output'] + '_' + matcher_conf[
        "output"] + f'_{pairsfile_path_map_pathlib.stem}'
    map_match_path = Path(output_dir, match_name_map + '.h5')
    if 'match_map_pairs' not in skip_list:
        map_match_path_actual = match_features.main(
            matcher_conf, pairsfile_path_map_pathlib, feature_conf['output'],
            Path(output_dir))
        assert map_match_path_actual.resolve() == map_match_path.resolve()

    exported_mapping_path = path.join(output_dir,
                                      '3D-models/exported_from_kapture')
    if 'kapture_export_map_to_colmap' not in skip_list:
        export_colmap(kapture_path_map,
                      path.join(exported_mapping_path, 'colmap.db'),
                      exported_mapping_path,
                      force_overwrite_existing=True)
        # convert .txt to .bin
        run_model_converter(colmap_binary, exported_mapping_path,
                            exported_mapping_path, 'BIN')

    triangulate_path = path.join(
        output_dir, 'sfm_' + feature_conf_str + '_' + matcher_conf_str)
    if 'triangulate' not in skip_list:
        triangulation.main(Path(triangulate_path), Path(exported_mapping_path),
                           Path(images_map), pairsfile_path_map_pathlib,
                           feature_path, map_match_path, colmap_binary)

    pairsfile_path_query_pathlib = Path(pairsfile_path_query)
    match_name_query = feature_conf['output'] + '_' + matcher_conf[
        "output"] + f'_{pairsfile_path_query_pathlib.stem}'
    query_match_path = Path(output_dir, match_name_query + '.h5')
    if 'match_query_pairs' not in skip_list:
        query_match_path_actual = match_features.main(
            matcher_conf, pairsfile_path_query_pathlib, feature_conf['output'],
            Path(output_dir))
        assert query_match_path_actual.resolve() == query_match_path.resolve()

    query_as_txt = path.join(output_dir, 'image_list_with_intrinsics.txt')
    export_image_list(kapture_path_query, query_as_txt)
    results_file = path.join(
        output_dir, f'results_{feature_conf_str}_{matcher_conf_str}.txt')
    if 'localize' not in skip_list:
        localize_sfm.main(Path(triangulate_path),
                          Path(query_as_txt),
                          pairsfile_path_query_pathlib,
                          feature_path,
                          query_match_path,
                          Path(results_file),
                          covisibility_clustering=covisibility_clustering)

    results_full = path.join(
        output_dir,
        f'results_{feature_conf_str}_{matcher_conf_str}_fullnames.txt')
    results_kapture = path.join(
        output_dir, f'results_{feature_conf_str}_{matcher_conf_str}_kapture')
    if 'convert_results' not in skip_list:
        convert_results_format(query_as_txt, results_file, results_full)
        convert_results_to_kapture(kapture_path_query, results_full,
                                   results_kapture)
    if 'evaluate' not in skip_list and path.isfile(
            get_csv_fullpath(kapture.Trajectories, kapture_path_query)):
        local_evaluate_path = path.join(pipeline_import_paths.HERE_PATH,
                                        '../tools/kapture_evaluate.py')
        evaluate_args = [
            '-v',
            str(logger.level), '-i', results_kapture, '--labels',
            f'hloc_{feature_conf_str}_{matcher_conf_str}', '-gt',
            kapture_path_query, '-o',
            path.join(results_kapture, 'eval')
        ]
        evaluate_args += ['--bins'] + bins_as_str
        evaluate_args.append('-f')
        run_python_command(local_evaluate_path, evaluate_args, python_binary)

    LTVL2020_output_path = path.join(
        output_dir,
        f'results_{feature_conf_str}_{matcher_conf_str}_LTVL2020_style.txt')
    if 'export_LTVL2020' not in skip_list:
        export_LTVL2020_script_name, export_LTVL2020_args = get_benchmark_format_command(
            benchmark_format_style, results_kapture, LTVL2020_output_path,
            True, logger)
        local_export_LTVL2020_path = path.join(
            pipeline_import_paths.HERE_PATH,
            f'../../kapture/tools/{export_LTVL2020_script_name}')
        run_python_command(local_export_LTVL2020_path, export_LTVL2020_args,
                           python_binary)
コード例 #2
0
def export_opensfm(
        kapture_rootdir: str,
        opensfm_rootdir: str,
        force_overwrite_existing: bool = False,
        images_import_method: TransferAction = TransferAction.copy) -> None:
    """

    :param kapture_rootdir:
    :param opensfm_rootdir:
    :param force_overwrite_existing:
    :param images_import_method:
    :return:
    """

    disable_tqdm = logger.getEffectiveLevel(
    ) > logging.INFO  # dont display tqdm for non-verbose levels
    # load reconstruction
    kapture_data = kapture.io.csv.kapture_from_dir(
        kapture_dirpath=kapture_rootdir)

    # export cameras
    opensfm_cameras = {}
    kapture_cameras = {
        cam_id: cam
        for cam_id, cam in kapture_data.sensors.items()
        if cam.sensor_type == 'camera'
    }
    for cam_id, kapture_camera in kapture_cameras.items():
        opensfm_cameras[cam_id] = export_opensfm_camera(kapture_camera)

    # export shots
    opensfm_shots = {}
    for timestamp, camera_id, image_filename in tqdm(kapture.flatten(
            kapture_data.records_camera),
                                                     disable=disable_tqdm):
        # retrieve pose (if there is one).
        # opensfm_shots = {image_filename: shot}
        # shot = {camera , rotation, translation, capture_time, gps_position, ...}
        opensfm_shot = {
            'capture_time': 0,  # in ms != timestamp
            'camera': camera_id,
        }
        if (timestamp, camera_id) in kapture_data.trajectories:
            pose = kapture_data.trajectories[timestamp, camera_id]
            rotation_vector = quaternion.as_rotation_vector(pose.r)
            translation_vector = pose.t.flatten()
            opensfm_shot.update({
                'rotation': rotation_vector.tolist(),
                'translation': translation_vector.tolist()
            })
        opensfm_shots[image_filename] = opensfm_shot

    # pack it
    opensfm_reconstruction = {
        'cameras': opensfm_cameras,
        'shots': opensfm_shots,
    }

    # images
    logger.info(
        f'writing image files "{path.join(opensfm_rootdir, "images")}".')
    image_filenames = [
        f for _, _, f in kapture.flatten(kapture_data.records_camera)
    ]
    kapture_image_filepaths = [
        get_record_fullpath(kapture_rootdir, image_filename)
        for image_filename in image_filenames
    ]
    opensfm_image_filepaths = [
        path.join(opensfm_rootdir, 'images', image_filename)
        for image_filename in image_filenames
    ]
    transfer_files_from_dir(
        source_filepath_list=kapture_image_filepaths,
        destination_filepath_list=opensfm_image_filepaths,
        force_overwrite=force_overwrite_existing,
        copy_strategy=images_import_method,
    )

    # export features files (keypoints + descriptors)
    opensfm_features_suffix = '.features.npz'
    opensfm_features_dirpath = path.join(opensfm_rootdir, 'features')
    logger.info(
        f'exporting keypoint and descriptors to {opensfm_features_dirpath}')
    os.makedirs(opensfm_features_dirpath, exist_ok=True)
    for image_filename in tqdm(image_filenames, disable=disable_tqdm):
        opensfm_features = {}
        # look and load for keypoints in kapture
        if kapture_data.keypoints is not None and image_filename in kapture_data.keypoints:
            kapture_keypoints_filepath = get_keypoints_fullpath(
                kapture_dirpath=kapture_rootdir, image_filename=image_filename)
            logger.debug(f'loading {kapture_keypoints_filepath}')
            kapture_keypoint = image_keypoints_from_file(
                kapture_keypoints_filepath,
                dtype=kapture_data.keypoints.dtype,
                dsize=kapture_data.keypoints.dsize)
            opensfm_features['points'] = kapture_keypoint

        # look and load for descriptors in kapture
        if kapture_data.descriptors is not None and image_filename in kapture_data.descriptors:
            kapture_descriptor_filepath = get_descriptors_fullpath(
                kapture_dirpath=kapture_rootdir, image_filename=image_filename)
            logger.debug(f'loading {kapture_descriptor_filepath}')
            kapture_descriptor = image_descriptors_from_file(
                kapture_descriptor_filepath,
                dtype=kapture_data.descriptors.dtype,
                dsize=kapture_data.descriptors.dsize)
            opensfm_features['descriptors'] = kapture_descriptor

        # writing opensfm feature file
        if len(opensfm_features) > 0:
            opensfm_features_filepath = path.join(
                opensfm_features_dirpath,
                image_filename + opensfm_features_suffix)
            logger.debug(f'writing {opensfm_features_filepath}')
            os.makedirs(path.dirname(opensfm_features_filepath), exist_ok=True)
            np.save(opensfm_features_filepath, opensfm_features)

    # export matches files
    if kapture_data.matches is not None:
        opensfm_matches_suffix = '_matches.pkl.gz'
        opensfm_matches_dirpath = path.join(opensfm_rootdir, 'matches')
        os.makedirs(opensfm_matches_dirpath, exist_ok=True)
        logger.info(f'exporting matches to {opensfm_matches_dirpath}')
        opensfm_pairs = {}
        for image_filename1, image_filename2 in kapture_data.matches:
            opensfm_pairs.setdefault(image_filename1,
                                     []).append(image_filename2)

        for image_filename1 in tqdm(image_filenames, disable=disable_tqdm):
            opensfm_matches = {}
            opensfm_matches_filepath = path.join(
                opensfm_matches_dirpath,
                image_filename1 + opensfm_matches_suffix)
            logger.debug(f'loading matches for {image_filename1}')
            for image_filename2 in opensfm_pairs.get(image_filename1, []):
                # print(image_filename1, image_filename2)
                kapture_matches_filepath = get_matches_fullpath(
                    (image_filename1, image_filename2),
                    kapture_dirpath=kapture_rootdir)
                kapture_matches = image_matches_from_file(
                    kapture_matches_filepath)
                opensfm_matches[image_filename2] = kapture_matches[:,
                                                                   0:2].astype(
                                                                       np.int)

            os.makedirs(path.dirname(opensfm_matches_filepath), exist_ok=True)
            with gzip.open(opensfm_matches_filepath, 'wb') as f:
                pickle.dump(opensfm_matches, f)

    # export 3D-points files
    if kapture_data.points3d is not None:
        logger.info('exporting points 3-D')
        opensfm_reconstruction['points'] = {}
        for i, (x, y, z, r, g, b) in tqdm(enumerate(kapture_data.points3d),
                                          disable=disable_tqdm):
            opensfm_reconstruction['points'][i] = {
                'coordinates': [x, y, z],
                'color': [r, g, b]
            }

    # write json files #################################################################################################
    os.makedirs(opensfm_rootdir, exist_ok=True)
    # write reconstruction.json
    opensfm_reconstruction_filepath = path.join(opensfm_rootdir,
                                                'reconstruction.json')
    logger.info(
        f'writing reconstruction file "{opensfm_reconstruction_filepath}".')
    with open(opensfm_reconstruction_filepath, 'wt') as f:
        json.dump([opensfm_reconstruction], f, indent=4)

    # write camera_models.json
    opensfm_cameras_filepath = path.join(opensfm_rootdir, 'camera_models.json')
    logger.info(f'writing camera models file "{opensfm_cameras_filepath}".')
    with open(opensfm_cameras_filepath, 'wt') as f:
        json.dump(opensfm_cameras, f, indent=4)
コード例 #3
0
    def add_frames(self, frames: List[Frame], points3d: List[Keypoint]):
        k = self.kapture

        if k.records_camera is None:
            k.records_camera = kt.RecordsCamera()
        if k.trajectories is None:
            k.trajectories = kt.Trajectories()
        if k.keypoints is None:
            k.keypoints = {
                self.default_kp_type:
                kt.Keypoints(self.default_kp_type, np.float32, 2)
            }
        if k.points3d is None:
            k.points3d = kt.Points3d()
        if k.observations is None:
            k.observations = kt.Observations()

        def check_kp(kp):
            return not kp.bad_qlt and kp.inlier_count > self.min_pt3d_obs and kp.inlier_count / kp.total_count > self.min_pt3d_ratio

        kp_ids, pts3d = zip(*[(kp.id, kp.pt3d) for kp in points3d
                              if check_kp(kp)])
        I = np.argsort(kp_ids)
        pt3d_ids = dict(zip(np.array(kp_ids)[I], np.arange(len(I))))
        pt3d_arr = np.array(pts3d)[I, :]
        k.points3d = kt.Points3d(
            np.concatenate((pt3d_arr, np.ones_like(pt3d_arr) * 128), axis=1))

        for f in frames:
            if not f.pose.post:
                continue

            id = f.frame_num
            img = f.orig_image
            img_file = os.path.join(self.default_cam[1],
                                    'frame%06d.%s' % (id, self.img_format))
            img_fullpath = get_record_fullpath(self.path, img_file)
            os.makedirs(os.path.dirname(img_fullpath), exist_ok=True)

            if not np.isclose(self.scale, 1.0):
                img = cv2.resize(img,
                                 None,
                                 fx=self.scale,
                                 fy=self.scale,
                                 interpolation=cv2.INTER_AREA)
            if self.img_format == self.IMG_FORMAT_PNG:
                cv2.imwrite(img_fullpath, img,
                            (cv2.IMWRITE_PNG_COMPRESSION, 9))
            elif self.img_format == self.IMG_FORMAT_JPG:
                cv2.imwrite(img_fullpath, img,
                            (cv2.IMWRITE_JPEG_QUALITY, self.jpg_qlt))
            else:
                assert False, 'Invalid image format: %s' % (self.img_format, )

            record_id = (id, self.default_cam[0])
            k.records_camera[record_id] = img_file

            pose = f.pose.post if 1 else (-f.pose.post)
            k.trajectories[record_id] = kt.PoseTransform(
                r=pose.quat.components, t=pose.loc)
            k.keypoints[self.default_kp_type].add(img_file)

            uvs = np.zeros((len(f.kps_uv), 2), np.float32)
            i = 0
            for kp_id, uv in f.kps_uv.items():
                if kp_id in pt3d_ids:
                    k.observations.add(int(pt3d_ids[kp_id]),
                                       self.default_kp_type, img_file, i)
                    uvs[i, :] = uv / f.img_sc * self.scale
                    i += 1

            image_keypoints_to_file(
                get_keypoints_fullpath(self.default_kp_type, self.path,
                                       img_file), uvs[:i, :])
コード例 #4
0
ファイル: depthmaps.py プロジェクト: oknuutti/hw_visnav
def main():
    parser = argparse.ArgumentParser(
        description='Estimate depthmaps using COLMAP')
    parser.add_argument('-i',
                        '--kapture',
                        required=True,
                        help='input path to kapture data root directory')
    parser.add_argument('-s',
                        '--sensor',
                        default='cam',
                        help='input kapture image sensor name')
    parser.add_argument('-nl',
                        '--keypoint',
                        default='gftt',
                        help='input kapture keypoint type name')
    parser.add_argument('-p', '--path', required=True, help='output base path')
    parser.add_argument('-t',
                        '--txt',
                        default='txt',
                        help='output text folder name')
    parser.add_argument('-d',
                        '--dense',
                        default='dense',
                        help='output dense folder name')
    parser.add_argument('-e', '--export', help='export depth maps here')
    parser.add_argument(
        '--export-bits',
        type=int,
        default=32,
        choices=(16, 24, 32),
        help=
        'Export depth and geometry using float16/24/32 exr-files (default 32). Safe range of '
        'values can be calculated using (2**ceil(log(trg_prec * 2**x)/log(2)) - 1), where '
        'trg_prec is the target precision and x is the length of the significand '
        '(10, 15, 23 for 16, 24, 32 bit floats, respectively). '
        'E.g., if ground resolution at 100m for fl=2315px is 0.043m (100/2315), the safe range '
        'for different choices would be [-63, 63], [-2047, 2047], and [-524287, 524287].'
    )
    parser.add_argument('-c', '--cmd', help='path to colmap command')
    parser.add_argument(
        '--composite-cmd',
        help=
        'composite cmd to invoke colmap command, e.g. "singularity exec colmap.sif colmap"'
    )
    parser.add_argument(
        '--gpu',
        default='0',
        help='gpu indices to use, e.g. 0 (default) or 0,0,0,0 or 0,1,2')
    parser.add_argument('--mem',
                        default=32,
                        type=int,
                        help='max mem usage in GB ')
    parser.add_argument('--min-depth',
                        type=float,
                        default=10,
                        help='min depth for depth map estimation')
    parser.add_argument('--max-depth',
                        type=float,
                        default=200,
                        help='max depth for depth map estimation')
    parser.add_argument(
        '--win-rad',
        type=int,
        default=5,
        help='window radius for colmap depth map estimation (default=5)')
    parser.add_argument(
        '--win-step',
        type=int,
        default=1,
        help='window step size for colmap depth map estimation (default=1)')

    parser.add_argument('--filter-min-ncc',
                        type=float,
                        default=0.1,
                        help='--PatchMatchStereo.filter_min_ncc  arg (=0.1)')
    parser.add_argument(
        '--filter-min-triangulation-angle',
        type=float,
        default=3.0,
        help='--PatchMatchStereo.filter_min_triangulation_angle  arg (=3.0)')
    parser.add_argument(
        '--filter-min-num-consistent',
        type=int,
        default=2,
        help='--PatchMatchStereo.filter_min_num_consistent  arg (=2)')
    parser.add_argument(
        '--filter-geom-consistency-max-cost',
        type=float,
        default=1.0,
        help='--PatchMatchStereo.filter_geom_consistency_max_cost  arg (=1.0)')

    parser.add_argument('--plot-only',
                        action='store_true',
                        help='plot only export results')
    parser.add_argument('--skip-import',
                        action='store_true',
                        help='skip importing kapture to colmap format')
    parser.add_argument('--skip-depth-est',
                        action='store_true',
                        help='skip depth map estimation')
    parser.add_argument('--skip-export',
                        action='store_true',
                        help='skip exporting depth maps to exr format')
    parser.add_argument('--skip-depth-filter',
                        action='store_true',
                        help='skip extra filter scheme for depth map')

    parser.add_argument(
        '--plot',
        help='during export, only process and plot the frame given here')

    args = parser.parse_args()
    txt_rec = os.path.join(args.path, args.txt)
    db_path = os.path.join(args.path, 'colmap.db')
    img_path = get_record_fullpath(args.kapture)
    dense_path = os.path.join(args.path, args.dense)
    os.makedirs(os.path.join(dense_path, 'images', args.sensor), exist_ok=True)
    logging.basicConfig(level=logging.INFO)

    if not args.export:
        args.export = os.path.join(args.kapture, 'reconstruction')

    if args.plot_only:
        plot_only(args.export, args.plot)
        exit()

    if args.composite_cmd:
        cmd = args.composite_cmd.split(' ')
    else:
        assert args.cmd, 'either --cmd or --composite-cmd argument needs to be given'
        cmd = [args.cmd]

    if not args.skip_import:
        export_colmap(args.kapture,
                      db_path,
                      txt_rec,
                      keypoints_type=args.keypoint,
                      force_overwrite_existing=True)
        image_undistorter_args = [
            "image_undistorter",
            "--image_path",
            img_path,
            "--input_path",
            txt_rec,
            "--output_path",
            dense_path,
            "--blank_pixels",
            "1",
        ]
        exec_cmd(cmd + image_undistorter_args)

        for f in ('consistency_graphs', 'depth_maps', 'normal_maps'):
            os.makedirs(os.path.join(dense_path, 'stereo', f, args.sensor),
                        exist_ok=True)

    if not args.skip_depth_est:
        patch_match_stereo_args = [
            "patch_match_stereo",
            "--workspace_path",
            dense_path,
            "--PatchMatchStereo.depth_min",
            str(args.min_depth),
            "--PatchMatchStereo.depth_max",
            str(args.max_depth),
            "--PatchMatchStereo.window_radius",
            str(args.win_rad),
            "--PatchMatchStereo.window_step",
            str(args.win_step),
            "--PatchMatchStereo.gpu_index",
            args.gpu,
            "--PatchMatchStereo.cache_size",
            str(args.mem),
            "--PatchMatchStereo.filter_min_ncc",
            str(args.filter_min_ncc),
            "--PatchMatchStereo.filter_min_triangulation_angle",
            str(args.filter_min_triangulation_angle),
            "--PatchMatchStereo.filter_min_num_consistent",
            str(args.filter_min_num_consistent),
            "--PatchMatchStereo.filter_geom_consistency_max_cost",
            str(args.filter_geom_consistency_max_cost),
        ]
        exec_cmd(cmd + patch_match_stereo_args)

    if not args.skip_export:
        depth_path = os.path.join(dense_path, 'stereo', 'depth_maps',
                                  args.sensor)
        os.makedirs(os.path.join(args.export, 'depth'), exist_ok=True)
        os.makedirs(os.path.join(args.export, 'geometry'), exist_ok=True)
        kapt = kapture_from_dir(args.kapture)
        sensor_id, width, height, fl_x, fl_y, pp_x, pp_y, *dist_coefs = get_cam_params(
            kapt, args.sensor)
        file2id = {fn[sensor_id]: id for id, fn in kapt.records_camera.items()}

        if args.export_bits == 16:
            exr_params_d = exr_params_xyz = (cv2.IMWRITE_EXR_TYPE,
                                             cv2.IMWRITE_EXR_TYPE_HALF)
        else:
            exr_params_d = exr_params_xyz = (cv2.IMWRITE_EXR_TYPE,
                                             cv2.IMWRITE_EXR_TYPE_FLOAT)
            if args.export_bits == 24:
                if hasattr(cv2, 'IMWRITE_EXR_COMPRESSION'):
                    # supported in OpenCV 4, see descriptions at
                    #   https://rainboxlab.org/downloads/documents/EXR_Data_Compression.pdf
                    #   or https://www.openexr.com/documentation/TechnicalIntroduction.pdf
                    exr_params_d += (cv2.IMWRITE_EXR_COMPRESSION,
                                     cv2.IMWRITE_EXR_COMPRESSION_PXR24
                                     )  # zip 24bit floats
                    exr_params_xyz += (cv2.IMWRITE_EXR_COMPRESSION,
                                       cv2.IMWRITE_EXR_COMPRESSION_PXR24)
                else:
                    logging.warning(
                        'This version of OpenCV does not support PXR24 compression, defaulting to float32'
                    )
            elif hasattr(cv2, 'IMWRITE_EXR_COMPRESSION'):
                exr_params_d += (cv2.IMWRITE_EXR_COMPRESSION,
                                 cv2.IMWRITE_EXR_COMPRESSION_ZIP
                                 )  # zip 32bit floats
                exr_params_xyz += (cv2.IMWRITE_EXR_COMPRESSION,
                                   cv2.IMWRITE_EXR_COMPRESSION_ZIP)

        logging.info('Exporting geometric depth maps in EXR format...')
        for fname in tqdm.tqdm(os.listdir(depth_path), mininterval=3):
            m = re.search(r'(.*?)(\.jpg|\.png|\.jpeg)?\.geometric\.bin', fname)
            if m and (not args.plot or m[1] == args.plot):
                depth0 = read_colmap_array(os.path.join(depth_path, fname))
                depth0[depth0 <= args.min_depth] = np.nan
                depth0[depth0 >= args.max_depth] = np.nan
                depth = depth0 if args.skip_depth_filter else filter_depth(
                    depth0, args)

                if width != depth.shape[1] or height != depth.shape[0]:
                    logging.warning(
                        'Depth map for image %s is different size than the camera resolution %s vs %s'
                        % (m[1] + m[2], depth.shape, (height, width)))

                outfile = os.path.join(args.export, 'depth', m[1] + '.d.exr')
                cv2.imwrite(outfile, depth, exr_params_d)

                frame_id = file2id.get(
                    args.sensor + '/' + m[1] + m[2],
                    file2id.get(args.sensor + '\\' + m[1] + m[2], None))
                cf_cam_world_v, cf_cam_world_q = kapt.trajectories[frame_id][
                    sensor_id].t, kapt.trajectories[frame_id][sensor_id].r
                cf_world_cam = -Pose(cf_cam_world_v, cf_cam_world_q)

                px_u = cam_px_u(depth.shape[1], depth.shape[0], fl_x, fl_y,
                                pp_x, pp_y)

                # the depth is actually the z-component, not the distance from the camera to the surface
                dist = depth.flatten() / px_u[:, 2]

                px_u = tools.q_times_mx(cf_world_cam.quat,
                                        px_u * dist[:, None])
                xyz = px_u.reshape(depth.shape +
                                   (3, )) + cf_world_cam.loc.reshape((1, 1, 3))

                outfile = os.path.join(args.export, 'geometry',
                                       m[1] + '.xyz.exr')
                cv2.imwrite(outfile, xyz.astype(np.float32), exr_params_xyz)

                if args.plot:
                    _, depth2 = filter_depth(depth0, args, return_interm=True)
                    xyz = xyz.reshape((-1, 3))

                    import matplotlib.pyplot as plt
                    from mpl_toolkits.mplot3d import Axes3D

                    plt.figure(1)
                    plt.imshow(depth0)

                    plt.figure(2)
                    plt.imshow(depth2)

                    plt.figure(3)
                    plt.imshow(depth)

                    f = plt.figure(4)
                    a = f.add_subplot(111, projection='3d')
                    a.set_xlabel('x')
                    a.set_ylabel('y')
                    a.set_zlabel('z')
                    a.plot(xyz[::5, 0], xyz[::5, 1], xyz[::5, 2], '.')

                    plt.show()
コード例 #5
0
def export_opensfm(
        kapture_root_dir: str,
        opensfm_root_dir: str,
        force_overwrite_existing: bool = False,
        images_export_method: TransferAction = TransferAction.copy) -> None:
    """
    Export the kapture data to an openSfM format

    :param kapture_root_dir: full path to the top kapture directory
    :param opensfm_root_dir: path of the directory where to store the data in openSfM format
    :param force_overwrite_existing: if true, will remove existing openSfM data without prompting the user.
    :param images_export_method:
    """

    disable_tqdm = logger.getEffectiveLevel(
    ) > logging.INFO  # don't display tqdm for non-verbose levels
    # load reconstruction
    kapture_data = kapture.io.csv.kapture_from_dir(kapture_root_dir)

    # export cameras
    opensfm_cameras = {}
    kapture_cameras = {
        cam_id: cam
        for cam_id, cam in kapture_data.sensors.items()
        if cam.sensor_type == 'camera'
    }
    for cam_id, kapture_camera in kapture_cameras.items():
        opensfm_cameras[cam_id] = export_opensfm_camera(kapture_camera)

    # export shots
    opensfm_shots = {}
    for timestamp, camera_id, image_filename in tqdm(kapture.flatten(
            kapture_data.records_camera),
                                                     disable=disable_tqdm):
        # retrieve pose (if there is one).
        # opensfm_shots = {image_filename: shot}
        # shot = {camera , rotation, translation, capture_time, gps_position, ...}
        opensfm_shot = {
            'capture_time': 0,  # in ms != timestamp
            'camera': camera_id,
        }
        if (timestamp, camera_id) in kapture_data.trajectories:
            pose = kapture_data.trajectories[timestamp, camera_id]
            rotation_vector = quaternion.as_rotation_vector(pose.r)
            translation_vector = pose.t.flatten()
            opensfm_shot.update({
                'rotation': rotation_vector.tolist(),
                'translation': translation_vector.tolist()
            })
        opensfm_shots[image_filename] = opensfm_shot

    # pack it
    opensfm_reconstruction = {
        'cameras': opensfm_cameras,
        'shots': opensfm_shots,
    }

    # images
    logger.info(
        f'writing image files "{path.join(opensfm_root_dir, "images")}".')
    image_filenames = [
        f for _, _, f in kapture.flatten(kapture_data.records_camera)
    ]
    kapture_image_file_paths = [
        get_record_fullpath(kapture_root_dir, image_filename)
        for image_filename in image_filenames
    ]
    opensfm_image_file_paths = [
        path.join(opensfm_root_dir, 'images', image_filename)
        for image_filename in image_filenames
    ]
    transfer_files_from_dir(
        source_filepath_list=kapture_image_file_paths,
        destination_filepath_list=opensfm_image_file_paths,
        force_overwrite=force_overwrite_existing,
        copy_strategy=images_export_method,
    )

    _export_opensfm_features_and_matches(image_filenames, kapture_data,
                                         kapture_root_dir, opensfm_root_dir,
                                         disable_tqdm)

    # export 3D-points files
    if kapture_data.points3d is not None:
        logger.info('exporting points 3-D')
        opensfm_reconstruction['points'] = {}
        for i, (x, y, z, r, g, b) in tqdm(enumerate(kapture_data.points3d),
                                          disable=disable_tqdm):
            opensfm_reconstruction['points'][i] = {
                'coordinates': [x, y, z],
                'color': [r, g, b]
            }

    # write json files #################################################################################################
    os.makedirs(opensfm_root_dir, exist_ok=True)
    # write reconstruction.json
    opensfm_reconstruction_filepath = path.join(opensfm_root_dir,
                                                'reconstruction.json')
    logger.info(
        f'writing reconstruction file "{opensfm_reconstruction_filepath}".')
    with open(opensfm_reconstruction_filepath, 'wt') as f:
        json.dump([opensfm_reconstruction], f, indent=4)

    # write camera_models.json
    opensfm_cameras_filepath = path.join(opensfm_root_dir,
                                         'camera_models.json')
    logger.info(f'writing camera models file "{opensfm_cameras_filepath}".')
    with open(opensfm_cameras_filepath, 'wt') as f:
        json.dump(opensfm_cameras, f, indent=4)