def run_dataset(data: DataSetBase, input, output): recs_base = data.load_reconstruction(input) if len(recs_base) == 0: return rec_base = recs_base[0] tracks_manager = data.load_tracks_manager() rec_base.add_correspondences_from_tracks_manager(tracks_manager) images = data.images() remaining_images = set(images) - set(rec_base.shots) gcp = data.load_ground_control_points() report = {} rec_report = {} report["extend_reconstruction"] = [rec_report] rec, rec_report["grow"] = reconstruction.grow_reconstruction( data, tracks_manager, rec_base, remaining_images, gcp, ) rec_report["num_remaining_images"] = len(remaining_images) report["not_reconstructed_images"] = list(remaining_images) data.save_reconstruction([rec], output) data.save_report(io.json_dumps(report), "reconstruction.json")
def gcp_errors(data: DataSetBase, reconstructions): all_errors = [] gcp = data.load_ground_control_points() if not gcp: return {} all_errors = [] for gcp in gcp: if not gcp.coordinates.has_value: continue for rec in reconstructions: triangulated = multiview.triangulate_gcp(gcp, rec.shots, 1.0, 0.1) if triangulated is None: continue else: break # pyre-fixme[61]: `triangulated` may not be initialized here. if triangulated is None: continue all_errors.append(triangulated - gcp.coordinates.value) return _gps_gcp_errors_stats(all_errors)
def gcp_errors(data: DataSetBase, reconstructions: List[types.Reconstruction]) -> Dict[str, Any]: all_errors = [] reference = data.load_reference() gcps = data.load_ground_control_points() if not gcps: return {} all_errors = [] for gcp in gcps: if not gcp.lla: continue triangulated = None for rec in reconstructions: triangulated = multiview.triangulate_gcp(gcp, rec.shots, 1.0, 0.1) if triangulated is None: continue else: break if triangulated is None: continue gcp_enu = reference.to_topocentric(*gcp.lla_vec) all_errors.append(triangulated - gcp_enu) return _gps_gcp_errors_stats(np.array(all_errors))
def incremental_reconstruction( data: DataSetBase, tracks_manager: pysfm.TracksManager ) -> Tuple[Dict[str, Any], List[types.Reconstruction]]: """Run the entire incremental reconstruction pipeline.""" logger.info("Starting incremental reconstruction") report = {} chrono = Chronometer() images = tracks_manager.get_shot_ids() if not data.reference_lla_exists(): data.invent_reference_lla(images) remaining_images = set(images) gcp = data.load_ground_control_points() common_tracks = tracking.all_common_tracks(tracks_manager) reconstructions = [] pairs = compute_image_pairs(common_tracks, data) chrono.lap("compute_image_pairs") report["num_candidate_image_pairs"] = len(pairs) report["reconstructions"] = [] for im1, im2 in pairs: if im1 in remaining_images and im2 in remaining_images: rec_report = {} report["reconstructions"].append(rec_report) _, p1, p2 = common_tracks[im1, im2] reconstruction, rec_report["bootstrap"] = bootstrap_reconstruction( data, tracks_manager, im1, im2, p1, p2 ) if reconstruction: remaining_images -= set(reconstruction.shots) reconstruction, rec_report["grow"] = grow_reconstruction( data, tracks_manager, reconstruction, remaining_images, gcp, ) reconstructions.append(reconstruction) reconstructions = sorted(reconstructions, key=lambda x: -len(x.shots)) for k, r in enumerate(reconstructions): logger.info( "Reconstruction {}: {} images, {} points".format( k, len(r.shots), len(r.points) ) ) logger.info("{} partial reconstructions in total.".format(len(reconstructions))) chrono.lap("compute_reconstructions") report["wall_times"] = dict(chrono.lap_times()) report["not_reconstructed_images"] = list(remaining_images) return report, reconstructions
def run_dataset(dataset: DataSetBase, input, output): """Bundle a reconstructions. Args: input: input reconstruction JSON in the dataset output: input reconstruction JSON in the dataset """ reconstructions = dataset.load_reconstruction(input) camera_priors = dataset.load_camera_models() gcp = dataset.load_ground_control_points() tracks_manager = dataset.load_tracks_manager() # load the tracks manager and add its observations to the reconstruction # go through all the points and add their shots for reconstruction in reconstructions: reconstruction.add_correspondences_from_tracks_manager(tracks_manager) orec.bundle(reconstruction, camera_priors, gcp, dataset.config) dataset.save_reconstruction(reconstructions, output)
def reconstruction_statistics(data: DataSetBase, tracks_manager, reconstructions): stats = {} stats["components"] = len(reconstructions) gps_count = 0 for rec in reconstructions: for shot in rec.shots.values(): gps_count += shot.metadata.gps_position.has_value stats["has_gps"] = gps_count > 2 stats["has_gcp"] = True if data.load_ground_control_points() else False stats["initial_points_count"] = tracks_manager.num_tracks() stats["initial_shots_count"] = len(data.images()) stats["reconstructed_points_count"] = 0 stats["reconstructed_shots_count"] = 0 stats["observations_count"] = 0 hist_agg = defaultdict(int) for rec in reconstructions: if len(rec.points) > 0: stats["reconstructed_points_count"] += len(rec.points) stats["reconstructed_shots_count"] += len(rec.shots) # get tracks length distrbution for current reconstruction hist, values = _length_histogram(tracks_manager, rec.points) # update aggregrated histogram for length, count_tracks in zip(hist, values): hist_agg[length] += count_tracks # observations total and average tracks lengths hist_agg = sorted(hist_agg.items(), key=lambda x: x[0]) lengths, counts = np.array([int(x[0]) for x in hist_agg ]), np.array([x[1] for x in hist_agg]) points_count = stats["reconstructed_points_count"] points_count_over_two = sum(counts[1:]) stats["observations_count"] = int(sum(lengths * counts)) stats["average_track_length"] = ((stats["observations_count"] / points_count) if points_count > 0 else -1) stats["average_track_length_over_two"] = ( (int(sum(lengths[1:] * counts[1:])) / points_count_over_two) if points_count_over_two > 0 else -1) stats["histogram_track_length"] = {k: v for k, v in hist_agg} ( avg_normalized, avg_pixels, (hist_normalized, bins_normalized), (hist_pixels, bins_pixels), ) = _projection_error(tracks_manager, reconstructions) stats["reprojection_error_normalized"] = avg_normalized stats["reprojection_error_pixels"] = avg_pixels stats["reprojection_histogram_normalized"] = ( list(map(float, hist_normalized)), list(map(float, bins_normalized)), ) stats["reprojection_histogram_pixels"] = ( list(map(float, hist_pixels)), list(map(float, bins_pixels)), ) return stats
def gcp_errors(data: DataSetBase, reconstructions: List[types.Reconstruction]) -> Dict[str, Any]: all_errors = [] reference = data.load_reference() gcps = data.load_ground_control_points() if not gcps: return {} all_errors = [] gcp_stats = [] for gcp in gcps: if not gcp.lla: continue triangulated = None for rec in reconstructions: triangulated = multiview.triangulate_gcp(gcp, rec.shots, 1.0, 0.1) if triangulated is None: continue else: break if triangulated is None: continue gcp_enu = reference.to_topocentric(*gcp.lla_vec) e = triangulated - gcp_enu all_errors.append(e) # Begin computation of GCP stats observations = [] for i, obs in enumerate(gcp.observations): if not obs.shot_id in rec.shots: continue shot = rec.shots[obs.shot_id] reprojected = shot.project(gcp_enu) annotated = obs.projection r_pixel = features.denormalized_image_coordinates( np.array([[reprojected[0], reprojected[1]]]), shot.camera.width, shot.camera.height)[0] r_pixel[0] /= shot.camera.width r_pixel[1] /= shot.camera.height a_pixel = features.denormalized_image_coordinates( np.array([[annotated[0], annotated[1]]]), shot.camera.width, shot.camera.height)[0] a_pixel[0] /= shot.camera.width a_pixel[1] /= shot.camera.height observations.append({ 'shot_id': obs.shot_id, 'annotated': list(a_pixel), 'reprojected': list(r_pixel) }) gcp_stats.append({ 'id': gcp.id, 'coordinates': list(gcp_enu), 'observations': observations, 'error': list(e) }) # End computation of GCP stats with open( os.path.join(data.data_path, "stats", "ground_control_points.json"), 'w') as f: f.write(json.dumps(gcp_stats, indent=4)) return _gps_gcp_errors_stats(np.array(all_errors))