Пример #1
0
def create_rigs_with_pattern(data: DataSet, patterns):
    """Create rig data (`rig_models.json` and `rig_assignments.json`) by performing
    pattern matching to group images belonging to the same instances, followed
    by a bit of ad-hoc SfM to find some initial relative poses.
    """

    # Construct instances assignments for each rig
    instances_per_rig = create_instances_with_patterns(data.images(), patterns)
    for rig_id, instances in instances_per_rig.items():
        logger.info(
            f"Found {len(instances)} rig instances for rig {rig_id} using pattern matching."
        )

    # Create some subset DataSet with enough images from each rig
    subset_data = create_subset_dataset_from_instances(data, instances_per_rig,
                                                       "rig_calibration")

    # # Run a bit of SfM without any rig
    logger.info(
        f"Running SfM on a subset of {len(subset_data.images())} images.")
    actions.extract_metadata.run_dataset(subset_data)
    actions.detect_features.run_dataset(subset_data)
    actions.match_features.run_dataset(subset_data)
    actions.create_tracks.run_dataset(subset_data)
    actions.reconstruct.run_dataset(subset_data)

    # Compute some relative poses
    rig_models = create_rig_models_from_reconstruction(
        subset_data.load_reconstruction()[0], instances_per_rig)

    data.save_rig_models(rig_models)
    data.save_rig_assignments(instances_per_rig)
Пример #2
0
def run_dataset(data: DataSet, list_path, bundle_path, undistorted):
    """Export reconstruction to bundler format.

    Args:
        list_path: txt list of images to export
        bundle_path : output path
        undistorted : export undistorted reconstruction

    """

    udata = data.undistorted_dataset()

    default_path = os.path.join(data.data_path, "bundler")
    list_file_path = list_path if list_path else default_path
    bundle_file_path = bundle_path if bundle_path else default_path

    if undistorted:
        reconstructions = udata.load_undistorted_reconstruction()
        track_manager = udata.load_undistorted_tracks_manager()
        images = reconstructions[0].shots.keys()
    else:
        reconstructions = data.load_reconstruction()
        track_manager = data.load_tracks_manager()
        images = data.images()

    io.export_bundler(images, reconstructions, track_manager, bundle_file_path,
                      list_file_path)
Пример #3
0
def validate_image_names(data: DataSet, udata: UndistortedDataSet):
    """Check that image files do not have spaces."""
    for image in data.images():
        filename = image_path(image, udata)
        if " " in filename:
            logger.error(
                'Image name "{}" contains spaces.  '
                "This is not supported by the NVM format.  "
                "Please, rename it before running OpenSfM.".format(filename))
            sys.exit(1)
Пример #4
0
def create_rigs_with_pattern(data: DataSet, patterns):
    """Create rig data (`rig_models.json` and `rig_assignments.json`) by performing
    pattern matching to group images belonging to the same instances, followed
    by a bit of ad-hoc SfM to find some initial relative poses.
    """

    # Construct instances assignments for each rig
    instances_per_rig = create_instances_with_patterns(data.images(), patterns)
    for rig_id, instances in instances_per_rig.items():
        logger.info(
            f"Found {len(instances)} rig instances for rig {rig_id} using pattern matching."
        )

    # Create some subset DataSet with enough images from each rig
    subset_data = create_subset_dataset_from_instances(data, instances_per_rig,
                                                       "rig_calibration")

    # # Run a bit of SfM without any rig
    logger.info(
        f"Running SfM on a subset of {len(subset_data.images())} images.")
    actions.extract_metadata.run_dataset(subset_data)
    actions.detect_features.run_dataset(subset_data)
    actions.match_features.run_dataset(subset_data)
    actions.create_tracks.run_dataset(subset_data)
    actions.reconstruct.run_dataset(subset_data)

    # Compute some relative poses
    rig_models_poses = create_rig_model_from_reconstruction(
        subset_data.load_reconstruction()[0], instances_per_rig)

    # Ad-hoc construction of output model data
    # Will be replaced by `io` counterpart
    models = {}
    for rig_id in patterns:
        rig_pattern = patterns[rig_id]
        model = rig_models_poses[rig_id]

        rig_model = {}
        for rig_camera_id in model:
            pose, camera_id = model[rig_camera_id]
            rig_model[rig_camera_id] = {
                "translation": list(pose.translation),
                "rotation": list(pose.rotation),
                "camera": camera_id,
            }

        models[rig_id] = {
            "rig_relative_type": "shared",
            "rig_cameras": rig_model,
        }

    data.save_rig_models(models)
    data.save_rig_assignments(instances_per_rig)
Пример #5
0
def SfM_match(src, pre, mix=0):  # match_features
    from opensfm.actions.match_features import timer, matching, write_report
    from opensfm.dataset import DataSet
    data = DataSet(src)
    t = timer()
    INFO(f'{SfM_DIR}/bin/opensfm match_features: {src}')
    GPS, RTK = [], []
    if os.path.isdir(pre):
        merge_dir(pre + '/exif', src + '/exif')
        merge_dir(pre + '/features', src + '/features')
        merge_json(pre, src, 'camera_models.json')
        #merge_json(pre, src, 'reports/features.json')
        #merge_dir(pre+'/reports/features', src+'/reports/features')
        GPS, RTK = data.images(), DataSet(pre).images()
    else:  # split data->(GPS,RTK)
        for i in data.images():
            (RTK if i.startswith(pre) else GPS).append(i)
    if mix in (1, 3): GPS += RTK  # 1: match (GPS+RTK, RTK)
    if mix in (2, 3): RTK += GPS  # 2: match (GPS, RTK+GPS)
    pairs, preport = matching.match_images(data, {}, GPS, RTK)
    matching.save_matches(data, GPS, pairs)
    write_report(data, preport, list(pairs.keys()), timer() - t)
Пример #6
0
def _create_image_list(data: DataSet, meta_data):
    ills = []
    for image in data.images():
        exif = data.load_exif(image)
        if ("gps" not in exif or "latitude" not in exif["gps"]
                or "longitude" not in exif["gps"]):
            logger.warning("Skipping {} because of missing GPS".format(image))
            continue

        lat = exif["gps"]["latitude"]
        lon = exif["gps"]["longitude"]
        ills.append((image, lat, lon))

    meta_data.create_image_list(ills)
Пример #7
0
def create_rigs_with_pattern(data: DataSet, patterns: TRigPatterns):
    """Create rig data (`rig_cameras.json` and `rig_assignments.json`) by performing
    pattern matching to group images belonging to the same instances, followed
    by a bit of ad-hoc SfM to find some initial relative poses.
    """

    # Construct instances assignments for each rig
    instances_per_rig, single_shots = create_instances_with_patterns(
        data.images(), patterns
    )
    for rig_id, instances in instances_per_rig.items():
        logger.info(
            f"Found {len(instances)} shots for instance {rig_id} using pattern matching."
        )
    logger.info(f"Found {len(single_shots)} single shots using pattern matching.")

    # Create some random subset DataSet with enough images from each rig and run SfM
    count = 0
    max_rounds = data.config["rig_calibration_max_rounds"]
    best_reconstruction = None
    best_rig_cameras = None
    for subset_data, instances in propose_subset_dataset_from_instances(
        data, instances_per_rig, "rig_calibration"
    ):
        if len(subset_data.images()) == 0:
            continue

        if count > max_rounds:
            break
        count += 1

        # Run a bit of SfM without any rig
        logger.info(
            f"Running SfM on a subset of {len(subset_data.images())} images. Round {count}/{max_rounds}"
        )
        actions.extract_metadata.run_dataset(subset_data)
        actions.detect_features.run_dataset(subset_data)
        actions.match_features.run_dataset(subset_data)
        actions.create_tracks.run_dataset(subset_data)
        actions.reconstruct.run_dataset(subset_data)

        reconstruction = subset_data.load_reconstruction()[0]

        # Compute some relative poses
        rig_cameras = create_rig_cameras_from_reconstruction(
            reconstruction, instances_per_rig
        )
        found_cameras = {c for i in instances_per_rig.values() for _, c in i}
        if set(rig_cameras.keys()) != found_cameras:
            logger.error(
                f"Calibrated {len(rig_cameras)} whereas {len(found_cameras)} were requested. Rig creation failed."
            )
            continue

        reconstructed_instances = count_reconstructed_instances(
            instances, reconstruction
        )
        logger.info(
            f"reconstructed {reconstructed_instances} instances over {len(instances)}"
        )
        if (
            reconstructed_instances
            < len(instances) * data.config["rig_calibration_completeness"]
        ):
            continue

        best_reconstruction = reconstruction
        best_rig_cameras = rig_cameras
        break

    if best_reconstruction and best_rig_cameras:
        logger.info(
            f"Found a candidate for rig calibration with {len(best_reconstruction.shots)} shots"
        )
        data.save_rig_cameras(best_rig_cameras)
        data.save_rig_assignments(list(instances_per_rig.values()))
    else:
        logger.error(
            "Could not run any sucessful SfM on images subset for rig calibration"
        )