Example #1
0
def gcp_errors(data: DataSetBase,
               reconstructions: List[types.Reconstruction]) -> Dict[str, Any]:
    all_errors = []

    reference = data.load_reference()
    gcps = data.load_ground_control_points()
    if not gcps:
        return {}

    all_errors = []
    for gcp in gcps:
        if not gcp.lla:
            continue

        triangulated = None
        for rec in reconstructions:
            triangulated = multiview.triangulate_gcp(gcp, rec.shots, 1.0, 0.1)
            if triangulated is None:
                continue
            else:
                break

        if triangulated is None:
            continue
        gcp_enu = reference.to_topocentric(*gcp.lla_vec)
        all_errors.append(triangulated - gcp_enu)

    return _gps_gcp_errors_stats(np.array(all_errors))
Example #2
0
def rig_statistics(data: DataSetBase, reconstructions):
    stats = {}
    permutation = np.argsort([-len(r.shots) for r in reconstructions])
    for rig_camera_id, rig_camera in data.load_rig_cameras().items():
        stats[rig_camera_id] = {
            "initial_values": {
                "rotation": list(rig_camera.pose.rotation),
                "translation": list(rig_camera.pose.translation),
            }
        }

    for idx in permutation:
        rec = reconstructions[idx]
        for rig_camera in rec.rig_cameras.values():
            if "optimized_values" in stats[rig_camera.id]:
                continue
            stats[rig_camera.id]["optimized_values"] = {
                "rotation": list(rig_camera.pose.rotation),
                "translation": list(rig_camera.pose.translation),
            }

    for rig_camera_id in data.load_rig_cameras():
        if "optimized_values" not in stats[rig_camera_id]:
            del stats[rig_camera_id]

    return stats
Example #3
0
def run_dataset(data: DataSetBase):
    """ Link matches pair-wise matches into tracks. """

    start = timer()
    features, colors, segmentations, instances = tracking.load_features(
        data, data.images())
    features_end = timer()
    matches = tracking.load_matches(data, data.images())
    matches_end = timer()
    tracks_manager = tracking.create_tracks_manager(
        features,
        colors,
        segmentations,
        instances,
        matches,
        data.config,
    )
    tracks_end = timer()
    data.save_tracks_manager(tracks_manager)
    write_report(
        data,
        tracks_manager,
        features_end - start,
        matches_end - features_end,
        tracks_end - matches_end,
    )
Example #4
0
def bootstrap_reconstruction(data: DataSetBase, tracks_manager, camera_priors,
                             im1, im2, p1, p2):
    """Start a reconstruction using two shots."""
    logger.info("Starting reconstruction with {} and {}".format(im1, im2))
    report: Dict[str, Any] = {
        "image_pair": (im1, im2),
        "common_tracks": len(p1),
    }

    camera_id1 = data.load_exif(im1)["camera"]
    camera_id2 = data.load_exif(im2)["camera"]
    camera1 = camera_priors[camera_id1]
    camera2 = camera_priors[camera_id2]

    threshold = data.config["five_point_algo_threshold"]
    min_inliers = data.config["five_point_algo_min_inliers"]
    iterations = data.config["five_point_refine_rec_iterations"]
    R, t, inliers, report[
        "two_view_reconstruction"] = two_view_reconstruction_general(
            p1, p2, camera1, camera2, threshold, iterations)

    logger.info("Two-view reconstruction inliers: {} / {}".format(
        len(inliers), len(p1)))
    if len(inliers) <= 5:
        report["decision"] = "Could not find initial motion"
        logger.info(report["decision"])
        return None, report

    reconstruction = types.Reconstruction()
    reconstruction.reference = data.load_reference()
    reconstruction.cameras = camera_priors
    shot1 = reconstruction.create_shot(im1, camera_id1, pygeometry.Pose())
    shot1.metadata = get_image_metadata(data, im1)

    shot2 = reconstruction.create_shot(im2, camera_id2, pygeometry.Pose(R, t))
    shot2.metadata = get_image_metadata(data, im2)

    triangulate_shot_features(tracks_manager, reconstruction, im1, data.config)

    logger.info("Triangulated: {}".format(len(reconstruction.points)))
    report["triangulated_points"] = len(reconstruction.points)
    if len(reconstruction.points) < min_inliers:
        report["decision"] = "Initial motion did not generate enough points"
        logger.info(report["decision"])
        return None, report

    bundle_single_view(reconstruction, im2, camera_priors, data.config)
    retriangulate(tracks_manager, reconstruction, data.config)

    if len(reconstruction.points) < min_inliers:
        report[
            "decision"] = "Re-triangulation after initial motion did not generate enough points"
        logger.info(report["decision"])
        return None, report
    bundle_single_view(reconstruction, im2, camera_priors, data.config)

    report["decision"] = "Success"
    report["memory_usage"] = current_memory_usage()
    return reconstruction, report
Example #5
0
def write_report(data: DataSetBase, preport, pairs, wall_time):
    report = {
        "wall_time": wall_time,
        "num_pairs": len(pairs),
        "pairs": pairs,
    }
    report.update(preport)
    data.save_report(io.json_dumps(report), "matches.json")
Example #6
0
def run_dataset(data: DataSetBase):
    """ Compute the SfM reconstruction. """

    tracks_manager = data.load_tracks_manager()
    report, reconstructions = reconstruction.incremental_reconstruction(
        data, tracks_manager)
    data.save_reconstruction(reconstructions)
    data.save_report(io.json_dumps(report), "reconstruction.json")
Example #7
0
def save_matches(data: DataSetBase, images_ref, matched_pairs):
    """Given pairwise matches (image 1, image 2) - > matches,
    save them such as only {image E images_ref} will store the matches.
    """

    matches_per_im1 = {im: {} for im in images_ref}
    for (im1, im2), m in matched_pairs.items():
        matches_per_im1[im1][im2] = m

    for im1, im1_matches in matches_per_im1.items():
        data.save_matches(im1, im1_matches)
def write_report(data: DataSetBase, wall_time):
    image_reports = []
    for image in data.images():
        try:
            txt = data.load_report("features/{}.json".format(image))
            image_reports.append(io.json_loads(txt))
        except IOError:
            logger.warning("No feature report image {}".format(image))

    report = {"wall_time": wall_time, "image_reports": image_reports}
    data.save_report(io.json_dumps(report), "features.json")
Example #9
0
def incremental_reconstruction(
    data: DataSetBase, tracks_manager: pysfm.TracksManager
) -> Tuple[Dict[str, Any], List[types.Reconstruction]]:
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    report = {}
    chrono = Chronometer()

    images = tracks_manager.get_shot_ids()

    if not data.reference_lla_exists():
        data.invent_reference_lla(images)

    remaining_images = set(images)
    gcp = data.load_ground_control_points()
    common_tracks = tracking.all_common_tracks(tracks_manager)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, data)
    chrono.lap("compute_image_pairs")
    report["num_candidate_image_pairs"] = len(pairs)
    report["reconstructions"] = []
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            rec_report = {}
            report["reconstructions"].append(rec_report)
            _, p1, p2 = common_tracks[im1, im2]
            reconstruction, rec_report["bootstrap"] = bootstrap_reconstruction(
                data, tracks_manager, im1, im2, p1, p2
            )

            if reconstruction:
                remaining_images -= set(reconstruction.shots)
                reconstruction, rec_report["grow"] = grow_reconstruction(
                    data,
                    tracks_manager,
                    reconstruction,
                    remaining_images,
                    gcp,
                )
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions, key=lambda x: -len(x.shots))

    for k, r in enumerate(reconstructions):
        logger.info(
            "Reconstruction {}: {} images, {} points".format(
                k, len(r.shots), len(r.points)
            )
        )
    logger.info("{} partial reconstructions in total.".format(len(reconstructions)))
    chrono.lap("compute_reconstructions")
    report["wall_times"] = dict(chrono.lap_times())
    report["not_reconstructed_images"] = list(remaining_images)
    return report, reconstructions
Example #10
0
def is_high_res_panorama(data: DataSetBase, image_key, image_array):
    """Detect if image is a panorama."""
    exif = data.load_exif(image_key)
    if exif:
        camera = data.load_camera_models()[exif["camera"]]
        w, h = int(exif["width"]), int(exif["height"])
        exif_pano = pygeometry.Camera.is_panorama(camera.projection_type)
    elif image_array is not None:
        h, w = image_array.shape[:2]
        exif_pano = False
    else:
        return False
    return w == 2 * h or exif_pano
def run_dataset(data: DataSetBase, input, output):
    recs_base = data.load_reconstruction(input)
    if len(recs_base) == 0:
        return

    rec_base = recs_base[0]
    tracks_manager = data.load_tracks_manager()
    rec_base.add_correspondences_from_tracks_manager(tracks_manager)

    images = data.images()
    remaining_images = set(images) - set(rec_base.shots)
    gcp = data.load_ground_control_points()
    report = {}
    rec_report = {}
    report["extend_reconstruction"] = [rec_report]
    rec, rec_report["grow"] = reconstruction.grow_reconstruction(
        data,
        tracks_manager,
        rec_base,
        remaining_images,
        gcp,
    )
    rec_report["num_remaining_images"] = len(remaining_images)
    report["not_reconstructed_images"] = list(remaining_images)
    data.save_reconstruction([rec], output)
    data.save_report(io.json_dumps(report), "reconstruction.json")
Example #12
0
 def load_mask(self, data: DataSetBase, image):
     points, _, _, segmentations, _ = self._load_all_data_unmasked(
         data, image)
     if data.config[
             "features_bake_segmentation"] and segmentations is not None:
         ignore_values = set(data.segmentation_ignore_values(image))
         return [
             False if segmentations[i] in ignore_values else True
             for i in range(len(segmentations))
         ]
     else:
         if points is None:
             return None
         return data.load_features_mask(image, points[:, :2])
Example #13
0
def match_images_with_pairs(data: DataSetBase, config_override, exifs, pairs):
    """ Perform pair matchings given pairs. """
    cameras = data.load_camera_models()
    args = list(match_arguments(pairs, data, config_override, cameras, exifs))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info("Matching {} image pairs".format(len(pairs)))
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(data.config["processes"],
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info("Matched {} pairs {} in {} seconds ({} seconds/pair).".format(
        len(pairs),
        log_projection_types(pairs, exifs, cameras),
        timer() - start,
        (timer() - start) / len(pairs) if pairs else 0,
    ))

    # Index results per pair
    resulting_pairs = {}
    for im1, im2, m in matches:
        resulting_pairs[im1, im2] = m
    return resulting_pairs
 def load_words(self, data: DataSetBase, image: str, masked: bool) -> np.ndarray:
     words = data.load_words(image)
     if masked:
         mask = self.load_mask(data, image)
         if mask is not None:
             words = words[mask]
     return words
Example #15
0
 def load_words(self, data: DataSetBase, image, masked):
     words = data.load_words(image)
     if masked:
         mask = self.load_mask(data, image)
         if mask is not None:
             words = words[mask]
     return words
Example #16
0
def gcp_errors(data: DataSetBase, reconstructions):
    all_errors = []

    gcp = data.load_ground_control_points()
    if not gcp:
        return {}

    all_errors = []
    for gcp in gcp:
        if not gcp.coordinates.has_value:
            continue

        for rec in reconstructions:
            triangulated = multiview.triangulate_gcp(gcp, rec.shots, 1.0, 0.1)
            if triangulated is None:
                continue
            else:
                break

        # pyre-fixme[61]: `triangulated` may not be initialized here.
        if triangulated is None:
            continue
        all_errors.append(triangulated - gcp.coordinates.value)

    return _gps_gcp_errors_stats(all_errors)
Example #17
0
 def _load_features_nocache(self, data: DataSetBase, image):
     points, features, colors, segmentation_data = data.load_features(image)
     if points is None:
         logger.error("Could not load features for image {}".format(image))
     else:
         points = np.array(points[:, :3], dtype=float)
     return points, features, colors, segmentation_data
Example #18
0
 def load_mask(self, data: DataSetBase, image: str) -> Optional[np.ndarray]:
     all_features_data = self._load_all_data_unmasked(data, image)
     if not all_features_data:
         return None
     if (data.config["features_bake_segmentation"]
             and all_features_data.semantic is not None):
         # pyre-fixme [16]: `Optional` has no attribute `segmentation`
         segmentations = all_features_data.semantic.segmentation
         ignore_values = set(data.segmentation_ignore_values(image))
         return np.array([
             False if segmentations[i] in ignore_values else True
             for i in range(len(segmentations))
         ])
     else:
         return data.load_features_mask(image,
                                        all_features_data.points[:, :2])
Example #19
0
def match_images(data: DataSetBase, config_override, ref_images, cand_images):
    """Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.
    """

    # Get EXIFs data
    all_images = list(set(ref_images + cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images,
        cand_images,
        exifs,
        data,
        config_override,
    )

    # Match them !
    return (
        match_images_with_pairs(data, config_override, exifs, ref_images, pairs),
        preport,
    )
Example #20
0
def _pair_reconstructability_arguments(track_dict, cameras, data: DataSetBase):
    threshold = 4 * data.config["five_point_algo_threshold"]
    args = []
    for (im1, im2), (_, p1, p2) in track_dict.items():
        camera1 = cameras[data.load_exif(im1)["camera"]]
        camera2 = cameras[data.load_exif(im2)["camera"]]
        args.append((im1, im2, p1, p2, camera1, camera2, threshold))
    return args
Example #21
0
def _reconstruction_from_rigs_and_assignments(data: DataSetBase):
    assignments = data.load_rig_assignments()
    models = data.load_rig_models()

    if not data.reference_lla_exists():
        data.invent_reference_lla()

    base_rotation = np.zeros(3)

    reconstructions = []
    for rig_id, instances in assignments.items():
        rig_cameras = models[rig_id]["rig_cameras"]

        reconstruction = types.Reconstruction()
        reconstruction.cameras = data.load_camera_models()
        for instance in instances:
            for image, camera_id in instance:
                rig_camera = rig_cameras[camera_id]
                rig_pose = pygeometry.Pose(base_rotation)
                rig_pose.set_origin(
                    orec.get_image_metadata(data, image).gps_position.value)
                rig_camera_pose = pygeometry.Pose(rig_camera["rotation"],
                                                  rig_camera["translation"])

                d = data.load_exif(image)
                shot = reconstruction.create_shot(image, d["camera"])
                shot.pose = rig_camera_pose.compose(rig_pose)
                shot.metadata = orec.get_image_metadata(data, image)

        reconstructions.append(reconstruction)
    return reconstructions
Example #22
0
def run_dataset(data: DataSetBase):
    """ Add delaunay meshes to the reconstruction. """

    tracks_manager = data.load_tracks_manager()
    reconstructions = data.load_reconstruction()

    all_shot_ids = set(tracks_manager.get_shot_ids())
    for r in reconstructions:
        for shot in r.shots.values():
            if shot.id in all_shot_ids:
                vertices, faces = mesh.triangle_mesh(shot.id, r,
                                                     tracks_manager)
                shot.mesh.vertices = vertices
                shot.mesh.faces = faces

    data.save_reconstruction(reconstructions,
                             filename="reconstruction.meshed.json",
                             minify=True)
Example #23
0
def cameras_statistics(data: DataSetBase, reconstructions):
    stats = {}
    permutation = np.argsort([-len(r.shots) for r in reconstructions])
    for camera_id, camera_model in data.load_camera_models().items():
        stats[camera_id] = {"initial_values": _cameras_statistics(camera_model)}

    for idx in permutation:
        rec = reconstructions[idx]
        for camera in rec.cameras.values():
            if "optimized_values" in stats[camera.id]:
                continue
            stats[camera.id]["optimized_values"] = _cameras_statistics(camera)

    for camera_id in data.load_camera_models():
        if "optimized_values" not in stats[camera_id]:
            del stats[camera_id]

    return stats
Example #24
0
def save_matches(data: DataSetBase, images_ref, matched_pairs):
    """Given pairwise matches (image 1, image 2) - > matches,
    save them such as only {image E images_ref} will store the matches.
    """
    images_ref_set = set(images_ref)
    matches_per_im1 = {im: {} for im in images_ref}
    for (im1, im2), m in matched_pairs.items():
        if im1 in images_ref_set:
            matches_per_im1[im1][im2] = m
        elif im2 in images_ref_set:
            matches_per_im1[im2][im1] = m
        else:
            raise RuntimeError(
                "Couldn't save matches for {}. No image found in images_ref.".
                format((im1, im2)))

    for im1, im1_matches in matches_per_im1.items():
        data.save_matches(im1, im1_matches)
Example #25
0
def write_report(data: DataSetBase, tracks_manager, features_time,
                 matches_time, tracks_time):
    view_graph = [
        (k[0], k[1], v)
        for k, v in tracks_manager.get_all_pairs_connectivity().items()
    ]

    report = {
        "wall_times": {
            "load_features": features_time,
            "load_matches": matches_time,
            "compute_tracks": tracks_time,
        },
        "wall_time": features_time + matches_time + tracks_time,
        "num_images": tracks_manager.num_shots(),
        "num_tracks": tracks_manager.num_tracks(),
        "view_graph": view_graph,
    }
    data.save_report(io.json_dumps(report), "tracks.json")
Example #26
0
 def _load_features_nocache(self, data: DataSetBase,
                            image: str) -> Optional[ft.FeaturesData]:
     features_data = data.load_features(image)
     if features_data is None:
         logger.error("Could not load features for image {}".format(image))
         return None
     else:
         features_data.points = np.array(features_data.points[:, :3],
                                         dtype=float)
     return features_data
Example #27
0
def _not_on_blackvue_watermark(p1, p2, matches, im1, im2, data: DataSetBase):
    """Filter Blackvue's watermark."""
    meta1 = data.load_exif(im1)
    meta2 = data.load_exif(im2)

    if meta1["make"].lower() == "blackvue":
        matches = [m for m in matches if _blackvue_valid_mask(p1[m[0]])]
    if meta2["make"].lower() == "blackvue":
        matches = [m for m in matches if _blackvue_valid_mask(p2[m[1]])]
    return matches
Example #28
0
def _not_on_vermont_watermark(p1, p2, matches, im1, im2, data: DataSetBase):
    """Filter Vermont images watermark."""
    meta1 = data.load_exif(im1)
    meta2 = data.load_exif(im2)

    if meta1["make"] == "VTrans_Camera" and meta1["model"] == "VTrans_Camera":
        matches = [m for m in matches if _vermont_valid_mask(p1[m[0]])]
    if meta2["make"] == "VTrans_Camera" and meta2["model"] == "VTrans_Camera":
        matches = [m for m in matches if _vermont_valid_mask(p2[m[1]])]
    return matches
Example #29
0
def features_statistics(data: DataSetBase, tracks_manager, reconstructions):
    stats = {}
    detected = []
    for im in data.images():
        features_data = data.load_features(im)
        if not features_data:
            continue
        detected.append(len(features_data.points))
    if len(detected) > 0:
        stats["detected_features"] = {
            "min": min(detected),
            "max": max(detected),
            "mean": int(np.mean(detected)),
            "median": int(np.median(detected)),
        }
    else:
        stats["detected_features"] = {
            "min": -1,
            "max": -1,
            "mean": -1,
            "median": -1
        }

    per_shots = defaultdict(int)
    for rec in reconstructions:
        all_points_keys = set(rec.points.keys())
        for shot_id in rec.shots:
            if shot_id not in tracks_manager.get_shot_ids():
                continue
            for point_id in tracks_manager.get_shot_observations(shot_id):
                if point_id not in all_points_keys:
                    continue
                per_shots[shot_id] += 1
    per_shots = list(per_shots.values())

    stats["reconstructed_features"] = {
        "min": int(min(per_shots)) if len(per_shots) > 0 else -1,
        "max": int(max(per_shots)) if len(per_shots) > 0 else -1,
        "mean": int(np.mean(per_shots)) if len(per_shots) > 0 else -1,
        "median": int(np.median(per_shots)) if len(per_shots) > 0 else -1,
    }
    return stats
Example #30
0
def load_matches(dataset: DataSetBase, images):
    matches = {}
    for im1 in images:
        try:
            im1_matches = dataset.load_matches(im1)
        except IOError:
            continue
        for im2 in im1_matches:
            if im2 in images:
                matches[im1, im2] = im1_matches[im2]
    return matches