示例#1
0
def _reconstruction_from_rigs_and_assignments(data: DataSetBase):
    assignments = data.load_rig_assignments()
    models = data.load_rig_models()

    if not data.reference_lla_exists():
        data.invent_reference_lla()

    base_rotation = np.zeros(3)

    reconstructions = []
    for rig_id, instances in assignments.items():
        rig_cameras = models[rig_id]["rig_cameras"]

        reconstruction = types.Reconstruction()
        reconstruction.cameras = data.load_camera_models()
        for instance in instances:
            for image, camera_id in instance:
                rig_camera = rig_cameras[camera_id]
                rig_pose = pygeometry.Pose(base_rotation)
                rig_pose.set_origin(
                    orec.get_image_metadata(data, image).gps_position.value)
                rig_camera_pose = pygeometry.Pose(rig_camera["rotation"],
                                                  rig_camera["translation"])

                d = data.load_exif(image)
                shot = reconstruction.create_shot(image, d["camera"])
                shot.pose = rig_camera_pose.compose(rig_pose)
                shot.metadata = orec.get_image_metadata(data, image)

        reconstructions.append(reconstruction)
    return reconstructions
示例#2
0
def _reconstruction_from_rigs_and_assignments(data: DataSetBase):
    assignments = data.load_rig_assignments()
    rig_cameras = data.load_rig_cameras()

    data.init_reference()

    reconstruction = types.Reconstruction()
    reconstruction.cameras = data.load_camera_models()
    for rig_instance_id, instance in assignments.items():
        for image, rig_camera_id in instance:
            rig_camera = rig_cameras[rig_camera_id]
            reconstruction.add_rig_camera(
                pymap.RigCamera(rig_camera.pose, rig_camera_id))

            instance_obj = reconstruction.add_rig_instance(
                pymap.RigInstance(rig_instance_id))
            instance_obj.pose.set_origin(
                helpers.get_image_metadata(data, image).gps_position.value)

            d = data.load_exif(image)
            shot = reconstruction.create_shot(
                image,
                camera_id=d["camera"],
                rig_camera_id=rig_camera_id,
                rig_instance_id=rig_instance_id,
            )
            shot.metadata = helpers.get_image_metadata(data, image)
    return [reconstruction]
示例#3
0
def match_images_with_pairs(data: DataSetBase, config_override, exifs, pairs):
    """ Perform pair matchings given pairs. """
    cameras = data.load_camera_models()
    args = list(match_arguments(pairs, data, config_override, cameras, exifs))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info("Matching {} image pairs".format(len(pairs)))
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config["processes"],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info("Matched {} pairs {} in {} seconds ({} seconds/pair).".format(
        len(pairs),
        log_projection_types(pairs, exifs, cameras),
        timer() - start,
        (timer() - start) / len(pairs) if pairs else 0,
    ))

    # Index results per pair
    resulting_pairs = {}
    for im1, im2, m in matches:
        resulting_pairs[im1, im2] = m
    return resulting_pairs
示例#4
0
def cameras_statistics(data: DataSetBase, reconstructions):
    stats = {}
    permutation = np.argsort([-len(r.shots) for r in reconstructions])
    for camera_id, camera_model in data.load_camera_models().items():
        stats[camera_id] = {"initial_values": _cameras_statistics(camera_model)}

    for idx in permutation:
        rec = reconstructions[idx]
        for camera in rec.cameras.values():
            if "optimized_values" in stats[camera.id]:
                continue
            stats[camera.id]["optimized_values"] = _cameras_statistics(camera)

    for camera_id in data.load_camera_models():
        if "optimized_values" not in stats[camera_id]:
            del stats[camera_id]

    return stats
示例#5
0
def compute_image_pairs(track_dict, data: DataSetBase):
    """All matched image pairs sorted by reconstructability."""
    cameras = data.load_camera_models()
    args = _pair_reconstructability_arguments(track_dict, cameras, data)
    processes = data.config["processes"]
    result = parallel_map(_compute_pair_reconstructability, args, processes)
    result = list(result)
    pairs = [(im1, im2) for im1, im2, r in result if r > 0]
    score = [r for im1, im2, r in result if r > 0]
    order = np.argsort(-np.array(score))
    return [pairs[o] for o in order]
示例#6
0
def incremental_reconstruction(data: DataSetBase, tracks_manager):
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    report = {}
    chrono = Chronometer()

    images = tracks_manager.get_shot_ids()

    if not data.reference_lla_exists():
        data.invent_reference_lla(images)

    remaining_images = set(images)
    camera_priors = data.load_camera_models()
    gcp = data.load_ground_control_points()
    common_tracks = tracking.all_common_tracks(tracks_manager)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, camera_priors, data)
    chrono.lap("compute_image_pairs")
    report["num_candidate_image_pairs"] = len(pairs)
    report["reconstructions"] = []
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            rec_report = {}
            report["reconstructions"].append(rec_report)
            _, p1, p2 = common_tracks[im1, im2]
            reconstruction, rec_report["bootstrap"] = bootstrap_reconstruction(
                data, tracks_manager, camera_priors, im1, im2, p1, p2)

            if reconstruction:
                remaining_images.remove(im1)
                remaining_images.remove(im2)
                reconstruction, rec_report["grow"] = grow_reconstruction(
                    data,
                    tracks_manager,
                    reconstruction,
                    remaining_images,
                    camera_priors,
                    gcp,
                )
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
    chrono.lap("compute_reconstructions")
    report["wall_times"] = dict(chrono.lap_times())
    report["not_reconstructed_images"] = list(remaining_images)
    return report, reconstructions
示例#7
0
def is_high_res_panorama(data: DataSetBase, image_key, image_array):
    """Detect if image is a panorama."""
    exif = data.load_exif(image_key)
    if exif:
        camera = data.load_camera_models()[exif["camera"]]
        w, h = int(exif["width"]), int(exif["height"])
        exif_pano = pygeometry.Camera.is_panorama(camera.projection_type)
    elif image_array is not None:
        h, w = image_array.shape[:2]
        exif_pano = False
    else:
        return False
    return w == 2 * h or exif_pano
示例#8
0
def run_dataset(dataset: DataSetBase, input, output):
    """Bundle a reconstructions.

    Args:
        input: input reconstruction JSON in the dataset
        output: input reconstruction JSON in the dataset

    """

    reconstructions = dataset.load_reconstruction(input)
    camera_priors = dataset.load_camera_models()
    gcp = dataset.load_ground_control_points()
    tracks_manager = dataset.load_tracks_manager()

    # load the tracks manager and add its observations to the reconstruction
    # go through all the points and add their shots
    for reconstruction in reconstructions:
        reconstruction.add_correspondences_from_tracks_manager(tracks_manager)
        orec.bundle(reconstruction, camera_priors, gcp, dataset.config)
    dataset.save_reconstruction(reconstructions, output)
示例#9
0
def rig_statistics(
        data: DataSetBase,
        reconstructions: List[types.Reconstruction]) -> Dict[str, Any]:
    stats = {}
    permutation = np.argsort([-len(r.shots) for r in reconstructions])
    rig_cameras = data.load_rig_cameras()
    cameras = data.load_camera_models()
    for rig_camera_id, rig_camera in rig_cameras.items():
        # we skip per-camera rig camera for now
        if rig_camera_id in cameras:
            continue
        stats[rig_camera_id] = {
            "initial_values": {
                "rotation": list(rig_camera.pose.rotation),
                "translation": list(rig_camera.pose.translation),
            }
        }

    for idx in permutation:
        rec = reconstructions[idx]
        for rig_camera in rec.rig_cameras.values():
            if rig_camera.id not in stats:
                continue
            if "optimized_values" in stats[rig_camera.id]:
                continue
            stats[rig_camera.id]["optimized_values"] = {
                "rotation": list(rig_camera.pose.rotation),
                "translation": list(rig_camera.pose.translation),
            }

    for rig_camera_id in rig_cameras:
        if rig_camera.id not in stats:
            continue
        if "optimized_values" not in stats[rig_camera_id]:
            del stats[rig_camera_id]

    return stats
示例#10
0
def bootstrap_reconstruction(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    im1: str,
    im2: str,
    p1: np.ndarray,
    p2: np.ndarray,
) -> Tuple[Optional[types.Reconstruction], Dict[str, Any]]:
    """Start a reconstruction using two shots."""
    logger.info("Starting reconstruction with {} and {}".format(im1, im2))
    report: Dict[str, Any] = {
        "image_pair": (im1, im2),
        "common_tracks": len(p1),
    }

    camera_priors = data.load_camera_models()
    camera1 = camera_priors[data.load_exif(im1)["camera"]]
    camera2 = camera_priors[data.load_exif(im2)["camera"]]

    threshold = data.config["five_point_algo_threshold"]
    min_inliers = data.config["five_point_algo_min_inliers"]
    iterations = data.config["five_point_refine_rec_iterations"]
    R, t, inliers, report[
        "two_view_reconstruction"] = two_view_reconstruction_general(
            p1, p2, camera1, camera2, threshold, iterations)

    logger.info("Two-view reconstruction inliers: {} / {}".format(
        len(inliers), len(p1)))
    if len(inliers) <= 5:
        report["decision"] = "Could not find initial motion"
        logger.info(report["decision"])
        return None, report

    rig_camera_priors = data.load_rig_cameras()
    rig_assignments = data.load_rig_assignments_per_image()

    reconstruction = types.Reconstruction()
    reconstruction.reference = data.load_reference()
    reconstruction.cameras = camera_priors
    reconstruction.rig_cameras = rig_camera_priors

    new_shots = add_shot(data, reconstruction, rig_assignments, im1,
                         pygeometry.Pose())

    if im2 not in new_shots:
        new_shots |= add_shot(data, reconstruction, rig_assignments, im2,
                              pygeometry.Pose(R, t))

    align_reconstruction(reconstruction, None, data.config)
    triangulate_shot_features(tracks_manager, reconstruction, new_shots,
                              data.config)

    logger.info("Triangulated: {}".format(len(reconstruction.points)))
    report["triangulated_points"] = len(reconstruction.points)
    if len(reconstruction.points) < min_inliers:
        report["decision"] = "Initial motion did not generate enough points"
        logger.info(report["decision"])
        return None, report

    to_adjust = {s for s in new_shots if s != im1}
    bundle_shot_poses(reconstruction, to_adjust, camera_priors,
                      rig_camera_priors, data.config)

    retriangulate(tracks_manager, reconstruction, data.config)
    if len(reconstruction.points) < min_inliers:
        report[
            "decision"] = "Re-triangulation after initial motion did not generate enough points"
        logger.info(report["decision"])
        return None, report

    bundle_shot_poses(reconstruction, to_adjust, camera_priors,
                      rig_camera_priors, data.config)

    report["decision"] = "Success"
    report["memory_usage"] = current_memory_usage()
    return reconstruction, report
示例#11
0
def grow_reconstruction(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    reconstruction: types.Reconstruction,
    images: Set[str],
    gcp: List[pymap.GroundControlPoint],
) -> Tuple[types.Reconstruction, Dict[str, Any]]:
    """Incrementally add shots to an initial reconstruction."""
    config = data.config
    report = {"steps": []}

    camera_priors = data.load_camera_models()
    rig_camera_priors = data.load_rig_cameras()

    paint_reconstruction(data, tracks_manager, reconstruction)
    align_reconstruction(reconstruction, gcp, config)

    bundle(reconstruction, camera_priors, rig_camera_priors, None, config)
    remove_outliers(reconstruction, config)
    paint_reconstruction(data, tracks_manager, reconstruction)

    should_bundle = ShouldBundle(data, reconstruction)
    should_retriangulate = ShouldRetriangulate(data, reconstruction)
    while True:
        if config["save_partial_reconstructions"]:
            paint_reconstruction(data, tracks_manager, reconstruction)
            data.save_reconstruction(
                [reconstruction],
                "reconstruction.{}.json".format(
                    datetime.datetime.now().isoformat().replace(":", "_")),
            )

        candidates = reconstructed_points_for_images(tracks_manager,
                                                     reconstruction, images)
        if not candidates:
            break

        logger.info("-------------------------------------------------------")
        threshold = data.config["resection_threshold"]
        min_inliers = data.config["resection_min_inliers"]
        for image, _ in candidates:
            ok, new_shots, resrep = resect(
                data,
                tracks_manager,
                reconstruction,
                image,
                threshold,
                min_inliers,
            )
            if not ok:
                continue

            images -= new_shots
            bundle_shot_poses(
                reconstruction,
                new_shots,
                camera_priors,
                rig_camera_priors,
                data.config,
            )

            logger.info(
                f"Adding {' and '.join(new_shots)} to the reconstruction")
            step = {
                "images": list(new_shots),
                "resection": resrep,
                "memory_usage": current_memory_usage(),
            }
            report["steps"].append(step)

            np_before = len(reconstruction.points)
            triangulate_shot_features(tracks_manager, reconstruction,
                                      new_shots, config)
            np_after = len(reconstruction.points)
            step["triangulated_points"] = np_after - np_before

            if should_retriangulate.should():
                logger.info("Re-triangulating")
                align_reconstruction(reconstruction, gcp, config)
                b1rep = bundle(reconstruction, camera_priors,
                               rig_camera_priors, None, config)
                rrep = retriangulate(tracks_manager, reconstruction, config)
                b2rep = bundle(reconstruction, camera_priors,
                               rig_camera_priors, None, config)
                remove_outliers(reconstruction, config)
                step["bundle"] = b1rep
                step["retriangulation"] = rrep
                step["bundle_after_retriangulation"] = b2rep
                should_retriangulate.done()
                should_bundle.done()
            elif should_bundle.should():
                align_reconstruction(reconstruction, gcp, config)
                brep = bundle(reconstruction, camera_priors, rig_camera_priors,
                              None, config)
                remove_outliers(reconstruction, config)
                step["bundle"] = brep
                should_bundle.done()
            elif config["local_bundle_radius"] > 0:
                bundled_points, brep = bundle_local(
                    reconstruction,
                    camera_priors,
                    rig_camera_priors,
                    None,
                    image,
                    config,
                )
                remove_outliers(reconstruction, config, bundled_points)
                step["local_bundle"] = brep

            break
        else:
            logger.info("Some images can not be added")
            break

    logger.info("-------------------------------------------------------")

    align_reconstruction(reconstruction, gcp, config)
    bundle(reconstruction, camera_priors, rig_camera_priors, gcp, config)
    remove_outliers(reconstruction, config)
    paint_reconstruction(data, tracks_manager, reconstruction)
    return reconstruction, report
示例#12
0
def average_image_size(data: DataSetBase) -> float:
    average_size_mb = 0
    for camera in data.load_camera_models().values():
        average_size_mb += camera.width * camera.height * 4 / 1024 / 1024
    return average_size_mb / len(data.load_camera_models())