def match_images_with_pairs( data: DataSetBase, config_override: Dict[str, Any], exifs: Dict[str, Any], pairs: List[Tuple[str, str]], poses: Optional[Dict[str, pygeometry.Pose]] = None, ) -> Dict[Tuple[str, str], List[Tuple[int, int]]]: """Perform pair matchings given pairs.""" cameras = data.load_camera_models() args = list( match_arguments(pairs, data, config_override, cameras, exifs, poses)) # Perform all pair matchings in parallel start = timer() logger.info("Matching {} image pairs".format(len(pairs))) processes = config_override.get("processes", data.config["processes"]) mem_per_process = 512 jobs_per_process = 2 processes = context.processes_that_fit_in_memory(processes, mem_per_process) logger.info("Computing pair matching with %d processes" % processes) matches = context.parallel_map(match_unwrap_args, args, processes, jobs_per_process) logger.info("Matched {} pairs {} in {} seconds ({} seconds/pair).".format( len(pairs), log_projection_types(pairs, exifs, cameras), timer() - start, (timer() - start) / len(pairs) if pairs else 0, )) # Index results per pair resulting_pairs = {} for im1, im2, m in matches: resulting_pairs[im1, im2] = m return resulting_pairs
def is_high_res_panorama( data: DataSetBase, image_key: str, image_array: np.ndarray ) -> bool: """Detect if image is a panorama.""" exif = data.load_exif(image_key) if exif: camera = data.load_camera_models()[exif["camera"]] w, h = int(exif["width"]), int(exif["height"]) exif_pano = pygeometry.Camera.is_panorama(camera.projection_type) elif image_array is not None: h, w = image_array.shape[:2] exif_pano = False else: return False return w == 2 * h or exif_pano
def run_dataset(dataset: DataSetBase, input, output) -> None: """Bundle a reconstructions. Args: input: input reconstruction JSON in the dataset output: input reconstruction JSON in the dataset """ reconstructions = dataset.load_reconstruction(input) camera_priors = dataset.load_camera_models() rig_cameras_priors = dataset.load_rig_cameras() tracks_manager = dataset.load_tracks_manager() # load the tracks manager and add its observations to the reconstruction # go through all the points and add their shots for reconstruction in reconstructions: reconstruction.add_correspondences_from_tracks_manager(tracks_manager) gcp = dataset.load_ground_control_points() orec.bundle(reconstruction, camera_priors, rig_cameras_priors, gcp, dataset.config) dataset.save_reconstruction(reconstructions, output)
def reconstruction_from_metadata(data: DataSetBase, images: Iterable[str]) -> types.Reconstruction: """Initialize a reconstruction by using EXIF data for constructing shot poses and cameras.""" data.init_reference() rig_assignments = rig.rig_assignments_per_image(data.load_rig_assignments()) reconstruction = types.Reconstruction() reconstruction.reference = data.load_reference() reconstruction.cameras = data.load_camera_models() for image in images: camera_id = data.load_exif(image)["camera"] if image in rig_assignments: rig_instance_id, rig_camera_id, _ = rig_assignments[image] else: rig_instance_id = image rig_camera_id = camera_id reconstruction.add_rig_camera(pymap.RigCamera(pygeometry.Pose(), rig_camera_id)) reconstruction.add_rig_instance(pymap.RigInstance(rig_instance_id)) shot = reconstruction.create_shot( shot_id=image, camera_id=camera_id, rig_camera_id=rig_camera_id, rig_instance_id=rig_instance_id, ) shot.metadata = get_image_metadata(data, image) if not shot.metadata.gps_position.has_value: reconstruction.remove_shot(image) continue gps_pos = shot.metadata.gps_position.value shot.pose.set_rotation_matrix(rotation_from_shot_metadata(shot)) shot.pose.set_origin(gps_pos) shot.scale = 1.0 return reconstruction
def average_image_size(data: DataSetBase) -> float: average_size_mb = 0 for camera in data.load_camera_models().values(): average_size_mb += camera.width * camera.height * 4 / 1024 / 1024 return average_size_mb / max(1, len(data.load_camera_models()))