예제 #1
0
def run_dataset(data: DataSetBase) -> None:
    """Link matches pair-wise matches into tracks."""

    start = timer()
    features, colors, segmentations, instances = tracking.load_features(
        data, data.images())
    features_end = timer()
    matches = tracking.load_matches(data, data.images())
    matches_end = timer()
    tracks_manager = tracking.create_tracks_manager(
        features,
        colors,
        segmentations,
        instances,
        matches,
        data.config["min_track_length"],
    )
    tracks_end = timer()
    data.save_tracks_manager(tracks_manager)
    write_report(
        data,
        tracks_manager,
        features_end - start,
        matches_end - features_end,
        tracks_end - matches_end,
    )
예제 #2
0
def write_report(data: DataSetBase, preport, pairs, wall_time) -> None:
    report = {
        "wall_time": wall_time,
        "num_pairs": len(pairs),
        "pairs": pairs,
    }
    report.update(preport)
    data.save_report(io.json_dumps(report), "matches.json")
예제 #3
0
def write_report(data: DataSetBase, wall_time: float):
    image_reports = []
    for image in data.images():
        try:
            txt = data.load_report("features/{}.json".format(image))
            image_reports.append(io.json_loads(txt))
        except IOError:
            logger.warning("No feature report image {}".format(image))

    report = {"wall_time": wall_time, "image_reports": image_reports}
    data.save_report(io.json_dumps(report), "features.json")
예제 #4
0
def run_dataset(data: DataSetBase, input, output) -> None:
    recs_base = data.load_reconstruction(input)
    if len(recs_base) == 0:
        return

    rec_base = recs_base[0]
    tracks_manager = data.load_tracks_manager()
    rec_base.add_correspondences_from_tracks_manager(tracks_manager)

    images = data.images()
    remaining_images = set(images) - set(rec_base.shots)
    gcp = data.load_ground_control_points()
    report = {}
    rec_report = {}
    report["extend_reconstruction"] = [rec_report]
    rec, rec_report["grow"] = reconstruction.grow_reconstruction(
        data,
        tracks_manager,
        rec_base,
        remaining_images,
        gcp,
    )
    rec_report["num_remaining_images"] = len(remaining_images)
    report["not_reconstructed_images"] = list(remaining_images)
    data.save_reconstruction([rec], output)
    data.save_report(io.json_dumps(report), "reconstruction.json")
예제 #5
0
def is_high_res_panorama(
    data: DataSetBase, image_key: str, image_array: np.ndarray
) -> bool:
    """Detect if image is a panorama."""
    exif = data.load_exif(image_key)
    if exif:
        camera = data.load_camera_models()[exif["camera"]]
        w, h = int(exif["width"]), int(exif["height"])
        exif_pano = pygeometry.Camera.is_panorama(camera.projection_type)
    elif image_array is not None:
        h, w = image_array.shape[:2]
        exif_pano = False
    else:
        return False
    return w == 2 * h or exif_pano
예제 #6
0
def _load_segmentation_mask(data: DataSetBase,
                            image: str) -> Optional[np.ndarray]:
    """Build a mask from segmentation ignore values.

    The mask is non-zero only for pixels with segmentation
    labels not in segmentation_ignore_values.
    """
    ignore_values = data.segmentation_ignore_values(image)
    if not ignore_values:
        return None

    segmentation = data.load_segmentation(image)
    if segmentation is None:
        return None

    return mask_from_segmentation(segmentation, ignore_values)
예제 #7
0
def match_images(
    data: DataSetBase,
    config_override: Dict[str, Any],
    ref_images: List[str],
    cand_images: List[str],
) -> Tuple[Dict[Tuple[str, str], List[Tuple[int, int]]], Dict[str, Any]]:
    """Perform pair matchings between two sets of images.

    It will do matching for each pair (i, j), i being in
    ref_images and j in cand_images, taking assumption that
    matching(i, j) == matching(j ,i). This does not hold for
    non-symmetric matching options like WORDS. Data will be
    stored in i matching only.
    """

    # Get EXIFs data
    all_images = list(set(ref_images + cand_images))
    exifs = {im: data.load_exif(im) for im in all_images}

    # Generate pairs for matching
    pairs, preport = pairs_selection.match_candidates_from_metadata(
        ref_images,
        cand_images,
        exifs,
        data,
        config_override,
    )

    # Match them !
    return (
        match_images_with_pairs(data, config_override, exifs, pairs),
        preport,
    )
예제 #8
0
def match_images_with_pairs(
    data: DataSetBase,
    config_override: Dict[str, Any],
    exifs: Dict[str, Any],
    pairs: List[Tuple[str, str]],
    poses: Optional[Dict[str, pygeometry.Pose]] = None,
) -> Dict[Tuple[str, str], List[Tuple[int, int]]]:
    """Perform pair matchings given pairs."""
    cameras = data.load_camera_models()
    args = list(
        match_arguments(pairs, data, config_override, cameras, exifs, poses))

    # Perform all pair matchings in parallel
    start = timer()
    logger.info("Matching {} image pairs".format(len(pairs)))
    processes = config_override.get("processes", data.config["processes"])
    mem_per_process = 512
    jobs_per_process = 2
    processes = context.processes_that_fit_in_memory(processes,
                                                     mem_per_process)
    logger.info("Computing pair matching with %d processes" % processes)
    matches = context.parallel_map(match_unwrap_args, args, processes,
                                   jobs_per_process)
    logger.info("Matched {} pairs {} in {} seconds ({} seconds/pair).".format(
        len(pairs),
        log_projection_types(pairs, exifs, cameras),
        timer() - start,
        (timer() - start) / len(pairs) if pairs else 0,
    ))

    # Index results per pair
    resulting_pairs = {}
    for im1, im2, m in matches:
        resulting_pairs[im1, im2] = m
    return resulting_pairs
예제 #9
0
def run_dataset(data: DataSetBase):
    """Compute features for all images."""

    start = timer()
    features_processing.run_features_processing(data, data.images(), False)
    end = timer()
    write_report(data, end - start)
예제 #10
0
def load_features(
    dataset: DataSetBase, images: t.List[str]
) -> t.Tuple[
    t.Dict[str, np.ndarray],
    t.Dict[str, np.ndarray],
    t.Dict[str, np.ndarray],
    t.Dict[str, np.ndarray],
]:
    logging.info("reading features")
    features = {}
    colors = {}
    segmentations = {}
    instances = {}
    for im in images:
        features_data = dataset.load_features(im)

        if not features_data:
            continue

        features[im] = features_data.points[:, :3]
        colors[im] = features_data.colors

        semantic_data = features_data.semantic
        if semantic_data:
            segmentations[im] = semantic_data.segmentation
            if semantic_data.has_instances():
                instances[im] = semantic_data.instances

    return features, colors, segmentations, instances
예제 #11
0
def undistort_reconstruction(
    tracks_manager: Optional[pymap.TracksManager],
    reconstruction: types.Reconstruction,
    data: DataSetBase,
    udata: UndistortedDataSet,
) -> Dict[pymap.Shot, List[pymap.Shot]]:
    all_images = set(data.images())
    image_format = data.config["undistorted_image_format"]
    urec = types.Reconstruction()
    urec.points = reconstruction.points
    urec.reference = reconstruction.reference
    rig_instance_count = itertools.count()
    utracks_manager = pymap.TracksManager()
    logger.debug("Undistorting the reconstruction")
    undistorted_shots = {}
    for shot in reconstruction.shots.values():
        if shot.id not in all_images:
            logger.warning(
                f"Not undistorting {shot.id} as it is missing from the dataset's input images."
            )
            continue
        if shot.camera.projection_type == "perspective":
            urec.add_camera(perspective_camera_from_perspective(shot.camera))
            subshots = [get_shot_with_different_camera(urec, shot, image_format)]
        elif shot.camera.projection_type == "brown":
            urec.add_camera(perspective_camera_from_brown(shot.camera))
            subshots = [get_shot_with_different_camera(urec, shot, image_format)]
        elif shot.camera.projection_type in ["fisheye", "fisheye_opencv"]:
            urec.add_camera(perspective_camera_from_fisheye(shot.camera))
            subshots = [get_shot_with_different_camera(urec, shot, image_format)]
        elif pygeometry.Camera.is_panorama(shot.camera.projection_type):
            subshot_width = int(data.config["depthmap_resolution"])
            subshots = perspective_views_of_a_panorama(
                shot, subshot_width, urec, image_format, rig_instance_count
            )
        else:
            logger.warning(
                f"Not undistorting {shot.id} with unknown camera type."
            )
            continue

        for subshot in subshots:
            if tracks_manager:
                add_subshot_tracks(tracks_manager, utracks_manager, shot, subshot)
        undistorted_shots[shot.id] = subshots

    udata.save_undistorted_reconstruction([urec])
    if tracks_manager:
        udata.save_undistorted_tracks_manager(utracks_manager)

    udata.save_undistorted_shot_ids(
        {
            shot_id: [ushot.id for ushot in ushots]
            for shot_id, ushots in undistorted_shots.items()
        }
    )

    return undistorted_shots
예제 #12
0
def _load_combined_mask(data: DataSetBase, image: str) -> Optional[np.ndarray]:
    """Combine binary mask with segmentation mask.

    Return a mask that is non-zero only where the binary
    mask and the segmentation mask are non-zero.
    """
    mask = data.load_mask(image)
    smask = _load_segmentation_mask(data, image)
    return combine_masks(mask, smask)
예제 #13
0
def run_dataset(data: DataSetBase,
                algorithm: reconstruction.ReconstructionAlgorithm) -> None:
    """Compute the SfM reconstruction."""

    tracks_manager = data.load_tracks_manager()

    if algorithm == reconstruction.ReconstructionAlgorithm.INCREMENTAL:
        report, reconstructions = reconstruction.incremental_reconstruction(
            data, tracks_manager)
    elif algorithm == reconstruction.ReconstructionAlgorithm.TRIANGULATION:
        report, reconstructions = reconstruction.triangulation_reconstruction(
            data, tracks_manager)
    else:
        raise RuntimeError(
            f"Unsupported algorithm for reconstruction {algorithm}")

    data.save_reconstruction(reconstructions)
    data.save_report(io.json_dumps(report), "reconstruction.json")
예제 #14
0
def run_dataset(data: DataSetBase) -> None:
    """Add delaunay meshes to the reconstruction."""

    tracks_manager = data.load_tracks_manager()
    reconstructions = data.load_reconstruction()

    all_shot_ids = set(tracks_manager.get_shot_ids())
    for r in reconstructions:
        for shot in r.shots.values():
            if shot.id in all_shot_ids:
                vertices, faces = mesh.triangle_mesh(shot.id, r,
                                                     tracks_manager)
                shot.mesh.vertices = vertices
                shot.mesh.faces = faces

    data.save_reconstruction(reconstructions,
                             filename="reconstruction.meshed.json",
                             minify=True)
예제 #15
0
def write_report(data: DataSetBase, tracks_manager, features_time,
                 matches_time, tracks_time) -> None:
    view_graph = [
        (k[0], k[1], v)
        for k, v in tracks_manager.get_all_pairs_connectivity().items()
    ]

    report = {
        "wall_times": {
            "load_features": features_time,
            "load_matches": matches_time,
            "compute_tracks": tracks_time,
        },
        "wall_time": features_time + matches_time + tracks_time,
        "num_images": tracks_manager.num_shots(),
        "num_tracks": tracks_manager.num_tracks(),
        "view_graph": view_graph,
    }
    data.save_report(io.json_dumps(report), "tracks.json")
예제 #16
0
def run_dataset(data: DataSetBase) -> None:
    """Match features between image pairs."""

    images = data.images()

    start = timer()
    pairs_matches, preport = matching.match_images(data, {}, images, images)
    matching.save_matches(data, images, pairs_matches)
    matching.clear_cache()
    end = timer()
    write_report(data, preport, list(pairs_matches.keys()), end - start)
예제 #17
0
def _not_on_blackvue_watermark(p1: np.ndarray, p2: np.ndarray, matches,
                               im1: str, im2: str,
                               data: DataSetBase) -> List[Tuple[int, int]]:
    """Filter Blackvue's watermark."""
    meta1 = data.load_exif(im1)
    meta2 = data.load_exif(im2)

    if meta1["make"].lower() == "blackvue":
        matches = [m for m in matches if _blackvue_valid_mask(p1[m[0]])]
    if meta2["make"].lower() == "blackvue":
        matches = [m for m in matches if _blackvue_valid_mask(p2[m[1]])]
    return matches
예제 #18
0
def save_matches(
    data: DataSetBase,
    images_ref: List[str],
    matched_pairs: Dict[Tuple[str, str], List[Tuple[int, int]]],
) -> None:
    """Given pairwise matches (image 1, image 2) - > matches,
    save them such as only {image E images_ref} will store the matches.
    """
    images_ref_set = set(images_ref)
    matches_per_im1 = {im: {} for im in images_ref}
    for (im1, im2), m in matched_pairs.items():
        if im1 in images_ref_set:
            matches_per_im1[im1][im2] = m
        elif im2 in images_ref_set:
            matches_per_im1[im2][im1] = m
        else:
            raise RuntimeError(
                "Couldn't save matches for {}. No image found in images_ref.".
                format((im1, im2)))

    for im1, im1_matches in matches_per_im1.items():
        data.save_matches(im1, im1_matches)
예제 #19
0
def load_matches(
    dataset: DataSetBase, images: t.List[str]
) -> t.Dict[t.Tuple[str, str], t.List[t.Tuple[int, int]]]:
    matches = {}
    for im1 in images:
        try:
            im1_matches = dataset.load_matches(im1)
        except IOError:
            continue
        for im2 in im1_matches:
            if im2 in images:
                matches[im1, im2] = im1_matches[im2]
    return matches
예제 #20
0
def invent_reference_from_gps_and_gcp(
        data: DataSetBase,
        images: Optional[List[str]] = None) -> geo.TopocentricConverter:
    lat, lon, alt = 0.0, 0.0, 0.0
    wlat, wlon, walt = 0.0, 0.0, 0.0
    if images is None:
        images = data.images()
    for image in images:
        d = data.load_exif(image)
        if "gps" in d and "latitude" in d["gps"] and "longitude" in d["gps"]:
            w = 1.0 / max(0.01, d["gps"].get("dop", 15))
            lat += w * d["gps"]["latitude"]
            lon += w * d["gps"]["longitude"]
            wlat += w
            wlon += w
            if "altitude" in d["gps"]:
                alt += w * d["gps"]["altitude"]
                walt += w

    if not wlat and not wlon:
        for gcp in data.load_ground_control_points():
            lat += gcp.lla["latitude"]
            lon += gcp.lla["longitude"]
            wlat += 1
            wlon += 1

            if gcp.has_altitude:
                alt += gcp.lla["altitude"]
                walt += 1

    if wlat:
        lat /= wlat
    if wlon:
        lon /= wlon
    if walt:
        alt /= walt

    return geo.TopocentricConverter(lat, lon, 0)  # Set altitude manually.
예제 #21
0
def read_images(
    queue: queue.Queue,
    data: DataSetBase,
    images: List[str],
    counter: Counter,
    expected: int,
    force: bool,
) -> None:
    full_queue_timeout = 120
    for image in images:
        logger.info(f"Reading data for image {image} (queue-size={queue.qsize()}")
        image_array = data.load_image(image)
        if data.config["features_bake_segmentation"]:
            segmentation_array = data.load_segmentation(image)
            instances_array = data.load_instances(image)
        else:
            segmentation_array, instances_array = None, None
        args = image, image_array, segmentation_array, instances_array, data, force
        queue.put(args, block=True, timeout=full_queue_timeout)
        counter.increment()
        if counter.value() == expected:
            logger.info("Finished reading images")
            queue.put(None)
예제 #22
0
def run_dataset(data: DataSetBase, input: str, output: str) -> None:
    """Reconstruct the from a prior reconstruction."""

    tracks_manager = data.load_tracks_manager()
    rec_prior = data.load_reconstruction(input)
    if len(rec_prior) > 0:
        report, rec = reconstruction.reconstruct_from_prior(
            data, tracks_manager, rec_prior[0])
        data.save_reconstruction([rec], output)
        data.save_report(io.json_dumps(report), "reconstruction.json")
예제 #23
0
def _not_on_vermont_watermark(
    p1: np.ndarray,
    p2: np.ndarray,
    matches: List[Tuple[int, int]],
    im1: str,
    im2: str,
    data: DataSetBase,
) -> List[Tuple[int, int]]:
    """Filter Vermont images watermark."""
    meta1 = data.load_exif(im1)
    meta2 = data.load_exif(im2)

    if meta1["make"] == "VTrans_Camera" and meta1["model"] == "VTrans_Camera":
        matches = [m for m in matches if _vermont_valid_mask(p1[m[0]])]
    if meta2["make"] == "VTrans_Camera" and meta2["model"] == "VTrans_Camera":
        matches = [m for m in matches if _vermont_valid_mask(p2[m[1]])]
    return matches
예제 #24
0
def load_features_mask(
    data: DataSetBase,
    image: str,
    points: np.ndarray,
    mask_image: Optional[np.ndarray] = None,
) -> np.ndarray:
    """Load a feature-wise mask.

    This is a binary array true for features that lie inside the
    combined mask.
    The array is all true when there's no mask.
    """
    if points is None or len(points) == 0:
        return np.array([], dtype=bool)

    if mask_image is None:
        mask_image = _load_combined_mask(data, image)
    if mask_image is None:
        logger.debug(
            "No segmentation for {}, no features masked.".format(image))
        return np.ones((points.shape[0], ), dtype=bool)

    exif = data.load_exif(image)
    width = exif["width"]
    height = exif["height"]
    orientation = exif["orientation"]

    new_height, new_width = mask_image.shape
    ps = upright.opensfm_to_upright(
        points[:, :2],
        width,
        height,
        orientation,
        new_width=new_width,
        new_height=new_height,
    ).astype(int)
    mask = mask_image[ps[:, 1], ps[:, 0]]

    n_removed = np.sum(mask == 0)
    logger.debug("Masking {} / {} ({:.2f}) features for {}".format(
        n_removed, len(mask), n_removed / len(mask), image))

    return np.array(mask, dtype=bool)
예제 #25
0
def _extract_exif(image, data: DataSetBase):
    with data.open_image_file(image) as fp:
        d = exif.extract_exif_from_file(
            fp,
            partial(data.image_size, image),
            data.config["use_exif_size"],
            name=image,
        )

    if data.config["unknown_camera_models_are_different"] and (
            not d["model"] or d["model"] == "unknown"):
        d["model"] = f"unknown_{image}"

    if data.config.get("default_projection_type"):
        d["projection_type"] = data.config.get("default_projection_type")

    d["camera"] = exif.camera_id(d)

    return d
예제 #26
0
def run_dataset(dataset: DataSetBase, input, output) -> None:
    """Bundle a reconstructions.

    Args:
        input: input reconstruction JSON in the dataset
        output: input reconstruction JSON in the dataset

    """

    reconstructions = dataset.load_reconstruction(input)
    camera_priors = dataset.load_camera_models()
    rig_cameras_priors = dataset.load_rig_cameras()
    tracks_manager = dataset.load_tracks_manager()

    # load the tracks manager and add its observations to the reconstruction
    # go through all the points and add their shots
    for reconstruction in reconstructions:
        reconstruction.add_correspondences_from_tracks_manager(tracks_manager)
        gcp = dataset.load_ground_control_points()
        orec.bundle(reconstruction, camera_priors, rig_cameras_priors, gcp,
                    dataset.config)
    dataset.save_reconstruction(reconstructions, output)
예제 #27
0
def reconstruction_from_metadata(data: DataSetBase, images: Iterable[str]) -> types.Reconstruction:
    """Initialize a reconstruction by using EXIF data for constructing shot poses and cameras."""
    data.init_reference()
    rig_assignments = rig.rig_assignments_per_image(data.load_rig_assignments())

    reconstruction = types.Reconstruction()
    reconstruction.reference = data.load_reference()
    reconstruction.cameras = data.load_camera_models()
    for image in images:
        camera_id = data.load_exif(image)["camera"]

        if image in rig_assignments:
            rig_instance_id, rig_camera_id, _ = rig_assignments[image]
        else:
            rig_instance_id = image
            rig_camera_id = camera_id

        reconstruction.add_rig_camera(pymap.RigCamera(pygeometry.Pose(), rig_camera_id))
        reconstruction.add_rig_instance(pymap.RigInstance(rig_instance_id))
        shot = reconstruction.create_shot(
            shot_id=image,
            camera_id=camera_id,
            rig_camera_id=rig_camera_id,
            rig_instance_id=rig_instance_id,
        )

        shot.metadata = get_image_metadata(data, image)

        if not shot.metadata.gps_position.has_value:
            reconstruction.remove_shot(image)
            continue
        gps_pos = shot.metadata.gps_position.value

        shot.pose.set_rotation_matrix(rotation_from_shot_metadata(shot))
        shot.pose.set_origin(gps_pos)
        shot.scale = 1.0
    return reconstruction
예제 #28
0
def detect(
    image: str,
    image_array: np.ndarray,
    segmentation_array: Optional[np.ndarray],
    instances_array: Optional[np.ndarray],
    data: DataSetBase,
    force: bool = False,
) -> None:
    log.setup()

    need_words = (
        data.config["matcher_type"] == "WORDS"
        or data.config["matching_bow_neighbors"] > 0
    )
    has_words = not need_words or data.words_exist(image)
    has_features = data.features_exist(image)

    if not force and has_features and has_words:
        logger.info(
            "Skip recomputing {} features for image {}".format(
                data.feature_type().upper(), image
            )
        )
        return

    logger.info(
        "Extracting {} features for image {}".format(data.feature_type().upper(), image)
    )

    start = timer()

    p_unmasked, f_unmasked, c_unmasked = features.extract_features(
        image_array, data.config, is_high_res_panorama(data, image, image_array)
    )

    # Load segmentation and bake it in the data
    if data.config["features_bake_segmentation"]:
        exif = data.load_exif(image)
        s_unsorted, i_unsorted = bake_segmentation(
            image_array, p_unmasked, segmentation_array, instances_array, exif
        )
        p_unsorted = p_unmasked
        f_unsorted = f_unmasked
        c_unsorted = c_unmasked
    # Load segmentation, make a mask from it mask and apply it
    else:
        s_unsorted, i_unsorted = None, None
        fmask = masking.load_features_mask(data, image, p_unmasked)
        p_unsorted = p_unmasked[fmask]
        f_unsorted = f_unmasked[fmask]
        c_unsorted = c_unmasked[fmask]

    if len(p_unsorted) == 0:
        logger.warning("No features found in image {}".format(image))

    size = p_unsorted[:, 2]
    order = np.argsort(size)
    p_sorted = p_unsorted[order, :]
    f_sorted = f_unsorted[order, :]
    c_sorted = c_unsorted[order, :]
    if s_unsorted is not None:
        semantic_data = features.SemanticData(
            s_unsorted[order],
            i_unsorted[order] if i_unsorted is not None else None,
            data.segmentation_labels(),
        )
    else:
        semantic_data = None
    features_data = features.FeaturesData(p_sorted, f_sorted, c_sorted, semantic_data)
    data.save_features(image, features_data)

    if need_words:
        bows = bow.load_bows(data.config)
        n_closest = data.config["bow_words_to_match"]
        closest_words = bows.map_to_words(
            f_sorted, n_closest, data.config["bow_matcher_type"]
        )
        data.save_words(image, closest_words)

    end = timer()
    report = {
        "image": image,
        "num_features": len(p_sorted),
        "wall_time": end - start,
    }
    data.save_report(io.json_dumps(report), "features/{}.json".format(image))
예제 #29
0
def match_candidates_from_metadata(
    images_ref: List[str],
    images_cand: List[str],
    exifs: Dict[str, Any],
    data: DataSetBase,
    config_override: Dict[str, Any],
) -> Tuple[List[Tuple[str, str]], Dict[str, Any]]:
    """Compute candidate matching pairs between between images_ref and images_cand

    Returns a list of pairs (im1, im2) such that (im1 in images_ref) is true.
    Returned pairs are unique given that (i, j) == (j, i).
    """

    overriden_config = data.config.copy()
    overriden_config.update(config_override)

    max_distance = overriden_config["matching_gps_distance"]
    gps_neighbors = overriden_config["matching_gps_neighbors"]
    graph_rounds = overriden_config["matching_graph_rounds"]
    time_neighbors = overriden_config["matching_time_neighbors"]
    order_neighbors = overriden_config["matching_order_neighbors"]
    bow_neighbors = overriden_config["matching_bow_neighbors"]
    bow_gps_distance = overriden_config["matching_bow_gps_distance"]
    bow_gps_neighbors = overriden_config["matching_bow_gps_neighbors"]
    bow_other_cameras = overriden_config["matching_bow_other_cameras"]
    vlad_neighbors = overriden_config["matching_vlad_neighbors"]
    vlad_gps_distance = overriden_config["matching_vlad_gps_distance"]
    vlad_gps_neighbors = overriden_config["matching_vlad_gps_neighbors"]
    vlad_other_cameras = overriden_config["matching_vlad_other_cameras"]

    data.init_reference()
    reference = data.load_reference()

    if not all(map(has_gps_info, exifs.values())):
        if gps_neighbors != 0:
            logger.warn(
                "Not all images have GPS info. " "Disabling matching_gps_neighbors."
            )
        gps_neighbors = 0
        max_distance = 0
        graph_rounds = 0

    images_ref.sort()

    if (
        max_distance
        == gps_neighbors
        == time_neighbors
        == order_neighbors
        == bow_neighbors
        == vlad_neighbors
        == graph_rounds
        == 0
    ):
        # All pair selection strategies deactivated so we match all pairs
        d = set()
        t = set()
        g = set()
        o = set()
        b = set()
        v = set()
        pairs = {sorted_pair(i, j) for i in images_ref for j in images_cand if i != j}
    else:
        d = match_candidates_by_distance(
            images_ref, images_cand, exifs, reference, gps_neighbors, max_distance
        )
        g = match_candidates_by_graph(
            images_ref, images_cand, exifs, reference, graph_rounds
        )
        t = match_candidates_by_time(images_ref, images_cand, exifs, time_neighbors)
        o = match_candidates_by_order(images_ref, images_cand, order_neighbors)
        b = match_candidates_with_bow(
            data,
            images_ref,
            images_cand,
            exifs,
            reference,
            bow_neighbors,
            bow_gps_distance,
            bow_gps_neighbors,
            bow_other_cameras,
        )
        v = match_candidates_with_vlad(
            data,
            images_ref,
            images_cand,
            exifs,
            reference,
            vlad_neighbors,
            vlad_gps_distance,
            vlad_gps_neighbors,
            vlad_other_cameras,
            {},
        )
        pairs = d | g | t | o | set(b) | set(v)

    pairs = ordered_pairs(pairs, images_ref)

    report = {
        "num_pairs_distance": len(d),
        "num_pairs_graph": len(g),
        "num_pairs_time": len(t),
        "num_pairs_order": len(o),
        "num_pairs_bow": len(b),
        "num_pairs_vlad": len(v),
    }
    return pairs, report
예제 #30
0
def average_image_size(data: DataSetBase) -> float:
    average_size_mb = 0
    for camera in data.load_camera_models().values():
        average_size_mb += camera.width * camera.height * 4 / 1024 / 1024
    return average_size_mb / max(1, len(data.load_camera_models()))