Ejemplo n.º 1
0
def add_pano_subshot_tracks(
    tracks_manager: pymap.TracksManager,
    utracks_manager: pymap.TracksManager,
    panoshot: pymap.Shot,
    perspectiveshot: pymap.Shot,
) -> None:
    """Add edges between subshots and visible tracks."""
    for track_id, obs in tracks_manager.get_shot_observations(panoshot.id).items():
        bearing = panoshot.camera.pixel_bearing(obs.point)
        rotation = np.dot(
            perspectiveshot.pose.get_rotation_matrix(),
            panoshot.pose.get_rotation_matrix().T,
        )

        rotated_bearing = np.dot(bearing, rotation.T)
        if rotated_bearing[2] <= 0:
            continue

        perspective_feature = perspectiveshot.camera.project(rotated_bearing)
        if (
            perspective_feature[0] < -0.5
            or perspective_feature[0] > 0.5
            or perspective_feature[1] < -0.5
            or perspective_feature[1] > 0.5
        ):
            continue

        obs.point = perspective_feature
        utracks_manager.add_observation(perspectiveshot.id, track_id, obs)
Ejemplo n.º 2
0
def all_common_tracks(
    tracks_manager: pymap.TracksManager,
    include_features: bool = True,
    min_common: int = 50,
) -> t.Dict[t.Tuple[str, str], t.Union[TPairTracks, t.List[str]]]:
    """List of tracks observed by each image pair.

    Args:
        tracks_manager: tracks manager
        include_features: whether to include the features from the images
        min_common: the minimum number of tracks the two images need to have
            in common

    Returns:
        tuple: im1, im2 -> tuple: tracks, features from first image, features
        from second image
    """
    common_tracks = {}
    for (im1,
         im2), size in tracks_manager.get_all_pairs_connectivity().items():
        if size < min_common:
            continue

        tuples = tracks_manager.get_all_common_observations(im1, im2)
        if include_features:
            common_tracks[im1, im2] = (
                [v for v, _, _ in tuples],
                np.array([p.point for _, p, _ in tuples]),
                np.array([p.point for _, _, p in tuples]),
            )
        else:
            common_tracks[im1, im2] = [v for v, _, _ in tuples]
    return common_tracks
Ejemplo n.º 3
0
def retriangulate(
    tracks_manager: pymap.TracksManager,
    reconstruction: types.Reconstruction,
    config: Dict[str, Any],
) -> Dict[str, Any]:
    """Retrianguate all points"""
    chrono = Chronometer()
    report = {}
    report["num_points_before"] = len(reconstruction.points)

    threshold = config["triangulation_threshold"]
    min_ray_angle = config["triangulation_min_ray_angle"]

    reconstruction.points = {}

    all_shots_ids = set(tracks_manager.get_shot_ids())

    triangulator = TrackTriangulator(tracks_manager, reconstruction)
    tracks = set()
    for image in reconstruction.shots.keys():
        if image in all_shots_ids:
            tracks.update(tracks_manager.get_shot_observations(image).keys())
    for track in tracks:
        if config["triangulation_type"] == "ROBUST":
            triangulator.triangulate_robust(track, threshold, min_ray_angle)
        elif config["triangulation_type"] == "FULL":
            triangulator.triangulate(track, threshold, min_ray_angle)

    report["num_points_after"] = len(reconstruction.points)
    chrono.lap("retriangulate")
    report["wall_time"] = chrono.total_time()
    return report
Ejemplo n.º 4
0
 def add_correspondences_from_tracks_manager(
         self, tracks_manager: pymap.TracksManager) -> None:
     for track_id in tracks_manager.get_track_ids():
         if track_id not in self.points:
             continue
         track_obs = tracks_manager.get_track_observations(track_id)
         for shot_id in track_obs.keys():
             if shot_id in self.shots:
                 observation = tracks_manager.get_observation(
                     shot_id, track_id)
                 self.add_observation(shot_id, track_id, observation)
Ejemplo n.º 5
0
def as_weighted_graph(tracks_manager: pymap.TracksManager) -> nx.Graph:
    """Return the tracks manager as a weighted graph
    having shots a snodes and weighted by the # of
    common tracks between two nodes.
    """
    images = tracks_manager.get_shot_ids()
    image_graph = nx.Graph()
    for im in images:
        image_graph.add_node(im)
    for k, v in tracks_manager.get_all_pairs_connectivity().items():
        image_graph.add_edge(k[0], k[1], weight=v)
    return image_graph
Ejemplo n.º 6
0
def add_subshot_tracks(
    tracks_manager: pymap.TracksManager,
    utracks_manager: pymap.TracksManager,
    shot: pymap.Shot,
    subshot: pymap.Shot,
) -> None:
    """Add shot tracks to the undistorted tracks_manager."""
    if shot.id not in tracks_manager.get_shot_ids():
        return

    if pygeometry.Camera.is_panorama(shot.camera.projection_type):
        add_pano_subshot_tracks(tracks_manager, utracks_manager, shot, subshot)
    else:
        for track_id, obs in tracks_manager.get_shot_observations(shot.id).items():
            utracks_manager.add_observation(subshot.id, track_id, obs)
Ejemplo n.º 7
0
def features_statistics(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    reconstructions: List[types.Reconstruction],
) -> Dict[str, Any]:
    stats = {}
    detected = []
    images = {s for r in reconstructions for s in r.shots}
    for im in images:
        features_data = feature_loader.instance.load_all_data(
            data, im, False, False)
        if not features_data:
            continue
        detected.append(len(features_data.points))
    if len(detected) > 0:
        stats["detected_features"] = {
            "min": min(detected),
            "max": max(detected),
            "mean": int(np.mean(detected)),
            "median": int(np.median(detected)),
        }
    else:
        stats["detected_features"] = {
            "min": -1,
            "max": -1,
            "mean": -1,
            "median": -1
        }

    per_shots = defaultdict(int)
    for rec in reconstructions:
        all_points_keys = set(rec.points.keys())
        for shot_id in rec.shots:
            if shot_id not in tracks_manager.get_shot_ids():
                continue
            for point_id in tracks_manager.get_shot_observations(shot_id):
                if point_id not in all_points_keys:
                    continue
                per_shots[shot_id] += 1
    per_shots = list(per_shots.values())

    stats["reconstructed_features"] = {
        "min": int(min(per_shots)) if len(per_shots) > 0 else -1,
        "max": int(max(per_shots)) if len(per_shots) > 0 else -1,
        "mean": int(np.mean(per_shots)) if len(per_shots) > 0 else -1,
        "median": int(np.median(per_shots)) if len(per_shots) > 0 else -1,
    }
    return stats
Ejemplo n.º 8
0
def triangle_mesh_spherical(
        shot_id: str, r: types.Reconstruction,
        tracks_manager: pymap.TracksManager) -> Tuple[List[Any], List[Any]]:
    shot = r.shots[shot_id]

    bearings = []
    vertices = []

    # Add vertices to ensure that the camera is inside the convex hull
    # of the points
    for point in itertools.product([-1, 1], repeat=3):  # vertices of a cube
        bearing = 0.3 * np.array(point) / np.linalg.norm(point)
        bearings.append(bearing)
        point = shot.pose.transform_inverse(bearing)
        vertices.append(point.tolist())

    for track_id in tracks_manager.get_shot_observations(shot_id):
        if track_id in r.points:
            point = r.points[track_id].coordinates
            direction = shot.pose.transform(point)
            pixel = direction / np.linalg.norm(direction)
            if not np.isnan(pixel).any():
                vertices.append(point)
                bearings.append(pixel.tolist())

    tri = scipy.spatial.ConvexHull(bearings)
    faces = tri.simplices.tolist()

    return vertices, faces
Ejemplo n.º 9
0
def triangle_mesh(shot_id: str, r: types.Reconstruction,
                  tracks_manager: pymap.TracksManager):
    """
    Create triangle meshes in a list
    """
    if shot_id not in r.shots or shot_id not in tracks_manager.get_shot_ids():
        return [], []

    shot = r.shots[shot_id]

    if shot.camera.projection_type in [
            "perspective",
            "brown",
            "radial",
            "simple_radial",
    ]:
        return triangle_mesh_perspective(shot_id, r, tracks_manager)
    elif shot.camera.projection_type in [
            "fisheye",
            "fisheye_opencv",
            "fisheye62",
            "fisheye624",
            "dual",
    ]:
        return triangle_mesh_fisheye(shot_id, r, tracks_manager)
    elif pygeometry.Camera.is_panorama(shot.camera.projection_type):
        return triangle_mesh_spherical(shot_id, r, tracks_manager)
    else:
        raise NotImplementedError(
            f"triangle_mesh not implemented for projection type {shot.camera.projection_type}"
        )
Ejemplo n.º 10
0
def reconstruct_from_prior(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    rec_prior: types.Reconstruction,
) -> Tuple[Dict[str, Any], types.Reconstruction]:
    """Retriangulate a new reconstruction from the rec_prior"""
    reconstruction = types.Reconstruction()
    report = {}
    rec_report = {}
    report["retriangulate"] = [rec_report]
    images = tracks_manager.get_shot_ids()

    # copy prior poses, cameras
    reconstruction.cameras = rec_prior.cameras
    for shot in rec_prior.shots.values():
        reconstruction.add_shot(shot)
    prior_images = set(rec_prior.shots)
    remaining_images = set(images) - prior_images

    rec_report["num_prior_images"] = len(prior_images)
    rec_report["num_remaining_images"] = len(remaining_images)

    # Start with the known poses
    triangulate_shot_features(tracks_manager, reconstruction, prior_images,
                              data.config)
    paint_reconstruction(data, tracks_manager, reconstruction)
    report["not_reconstructed_images"] = list(remaining_images)
    return report, reconstruction
Ejemplo n.º 11
0
def add_observation_to_reconstruction(
    tracks_manager: pymap.TracksManager,
    reconstruction: types.Reconstruction,
    shot_id: str,
    track_id: str,
) -> None:
    observation = tracks_manager.get_observation(shot_id, track_id)
    reconstruction.add_observation(shot_id, track_id, observation)
Ejemplo n.º 12
0
def _length_histogram(
        tracks_manager: pymap.TracksManager,
        points: Dict[str, pymap.Landmark]) -> Tuple[List[str], List[int]]:
    hist = defaultdict(int)
    for point in points.values():
        obs_count = point.number_of_observations()
        if not obs_count:
            obs_count = len(tracks_manager.get_track_observations(point.id))
        hist[obs_count] += 1
    return list(hist.keys()), list(hist.values())
Ejemplo n.º 13
0
def compute_common_tracks(
    reconstruction1: types.Reconstruction,
    reconstruction2: types.Reconstruction,
    tracks_manager1: pymap.TracksManager,
    tracks_manager2: pymap.TracksManager,
) -> List[Tuple[str, str]]:
    common_tracks = set()
    common_images = set(reconstruction1.shots.keys()).intersection(
        reconstruction2.shots.keys())

    all_shot_ids1 = set(tracks_manager1.get_shot_ids())
    all_shot_ids2 = set(tracks_manager2.get_shot_ids())
    for image in common_images:
        if image not in all_shot_ids1 or image not in all_shot_ids2:
            continue
        at_shot1 = tracks_manager1.get_shot_observations(image)
        at_shot2 = tracks_manager2.get_shot_observations(image)
        for t1, t2 in corresponding_tracks(at_shot1, at_shot2):
            if t1 in reconstruction1.points and t2 in reconstruction2.points:
                common_tracks.add((t1, t2))
    return list(common_tracks)
Ejemplo n.º 14
0
def triangulate_shot_features(
    tracks_manager: pymap.TracksManager,
    reconstruction: types.Reconstruction,
    shot_ids: Set[str],
    config: Dict[str, Any],
) -> None:
    """Reconstruct as many tracks seen in shot_id as possible."""
    reproj_threshold = config["triangulation_threshold"]
    min_ray_angle = config["triangulation_min_ray_angle"]

    triangulator = TrackTriangulator(tracks_manager, reconstruction)

    all_shots_ids = set(tracks_manager.get_shot_ids())
    tracks_ids = {
        t
        for s in shot_ids if s in all_shots_ids
        for t in tracks_manager.get_shot_observations(s)
    }
    for track in tracks_ids:
        if track not in reconstruction.points:
            triangulator.triangulate(track, reproj_threshold, min_ray_angle)
Ejemplo n.º 15
0
def incremental_reconstruction(
    data: DataSetBase, tracks_manager: pymap.TracksManager
) -> Tuple[Dict[str, Any], List[types.Reconstruction]]:
    """Run the entire incremental reconstruction pipeline."""
    logger.info("Starting incremental reconstruction")
    report = {}
    chrono = Chronometer()

    images = tracks_manager.get_shot_ids()

    if not data.reference_lla_exists():
        data.invent_reference_lla(images)

    remaining_images = set(images)
    gcp = data.load_ground_control_points()
    common_tracks = tracking.all_common_tracks_with_features(tracks_manager)
    reconstructions = []
    pairs = compute_image_pairs(common_tracks, data)
    chrono.lap("compute_image_pairs")
    report["num_candidate_image_pairs"] = len(pairs)
    report["reconstructions"] = []
    for im1, im2 in pairs:
        if im1 in remaining_images and im2 in remaining_images:
            rec_report = {}
            report["reconstructions"].append(rec_report)
            _, p1, p2 = common_tracks[im1, im2]
            reconstruction, rec_report["bootstrap"] = bootstrap_reconstruction(
                data, tracks_manager, im1, im2, p1, p2)

            if reconstruction:
                remaining_images -= set(reconstruction.shots)
                reconstruction, rec_report["grow"] = grow_reconstruction(
                    data,
                    tracks_manager,
                    reconstruction,
                    remaining_images,
                    gcp,
                )
                reconstructions.append(reconstruction)
                reconstructions = sorted(reconstructions,
                                         key=lambda x: -len(x.shots))

    for k, r in enumerate(reconstructions):
        logger.info("Reconstruction {}: {} images, {} points".format(
            k, len(r.shots), len(r.points)))
    logger.info("{} partial reconstructions in total.".format(
        len(reconstructions)))
    chrono.lap("compute_reconstructions")
    report["wall_times"] = dict(chrono.lap_times())
    report["not_reconstructed_images"] = list(remaining_images)
    return report, reconstructions
Ejemplo n.º 16
0
def as_graph(tracks_manager: pymap.TracksManager) -> nx.Graph:
    """Return the tracks manager as a bipartite graph (legacy)."""
    tracks = tracks_manager.get_track_ids()
    images = tracks_manager.get_shot_ids()

    graph = nx.Graph()
    for track_id in tracks:
        graph.add_node(track_id, bipartite=1)
    for shot_id in images:
        graph.add_node(shot_id, bipartite=0)
    for track_id in tracks:
        for im, obs in tracks_manager.get_track_observations(track_id).items():
            graph.add_edge(
                im,
                track_id,
                feature=obs.point,
                feature_scale=obs.scale,
                feature_id=obs.id,
                feature_color=obs.color,
                feature_segmentation=obs.segmentation,
                feature_instance=obs.instance,
            )
    return graph
Ejemplo n.º 17
0
def common_tracks_double_dict(
    tracks_manager: pymap.TracksManager,
) -> t.Dict[str, t.Dict[str, t.List[str]]]:
    """List of track ids observed by each image pair.

    Return a dict, ``res``, such that ``res[im1][im2]`` is the list of
    common tracks between ``im1`` and ``im2``.
    """
    common_tracks_per_pair = tracking.all_common_tracks_without_features(
        tracks_manager)
    res = {image: {} for image in tracks_manager.get_shot_ids()}
    for (im1, im2), v in common_tracks_per_pair.items():
        res[im1][im2] = v
        res[im2][im1] = v
    return res
Ejemplo n.º 18
0
def paint_reconstruction(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    reconstruction: types.Reconstruction,
) -> None:
    """Set the color of the points from the color of the tracks."""
    for k, point in reconstruction.points.items():
        point.color = list(
            map(
                float,
                next(
                    iter(
                        tracks_manager.get_track_observations(
                            str(k)).values())).color,
            ))
Ejemplo n.º 19
0
def triangle_mesh_fisheye(
        shot_id: str, r: types.Reconstruction,
        tracks_manager: pymap.TracksManager) -> Tuple[List[Any], List[Any]]:
    shot = r.shots[shot_id]

    bearings = []
    vertices = []

    # Add boundary vertices
    num_circle_points = 20
    for i in range(num_circle_points):
        a = 2 * np.pi * float(i) / num_circle_points
        point = 30 * np.array([np.cos(a), np.sin(a), 0])
        bearing = point / np.linalg.norm(point)
        point = shot.pose.transform_inverse(point)
        vertices.append(point.tolist())
        bearings.append(bearing)

    # Add a single vertex in front of the camera
    point = 30 * np.array([0, 0, 1])
    bearing = 0.3 * point / np.linalg.norm(point)
    point = shot.pose.transform_inverse(point)
    vertices.append(point.tolist())
    bearings.append(bearing)

    # Add reconstructed points
    for track_id in tracks_manager.get_shot_observations(shot_id):
        if track_id in r.points:
            point = r.points[track_id].coordinates
            direction = shot.pose.transform(point)
            pixel = direction / np.linalg.norm(direction)
            if not np.isnan(pixel).any():
                vertices.append(point)
                bearings.append(pixel.tolist())

    # Triangulate
    tri = scipy.spatial.ConvexHull(bearings)
    faces = tri.simplices.tolist()

    # Remove faces having only boundary vertices
    def good_face(face):
        return (face[0] >= num_circle_points or face[1] >= num_circle_points
                or face[2] >= num_circle_points)

    faces = list(filter(good_face, faces))

    return vertices, faces
Ejemplo n.º 20
0
def triangle_mesh_perspective(
        shot_id: str, r: types.Reconstruction,
        tracks_manager: pymap.TracksManager) -> Tuple[List[Any], List[Any]]:
    shot = r.shots[shot_id]
    cam = shot.camera

    dx = float(cam.width) / 2 / max(cam.width, cam.height)
    dy = float(cam.height) / 2 / max(cam.width, cam.height)
    pixels = [[-dx, -dy], [-dx, dy], [dx, dy], [dx, -dy]]
    vertices = [None for i in range(4)]
    for track_id in tracks_manager.get_shot_observations(shot_id):
        if track_id in r.points:
            point = r.points[track_id]
            pixel = shot.project(point.coordinates)
            nonans = not np.isnan(pixel).any()
            if nonans and -dx <= pixel[0] <= dx and -dy <= pixel[1] <= dy:
                vertices.append(point.coordinates)
                pixels.append(pixel.tolist())

    try:
        tri = scipy.spatial.Delaunay(pixels)
    except Exception as e:
        logger.error("Delaunay triangulation failed for input: {}".format(
            repr(pixels)))
        raise e

    sums = [0.0, 0.0, 0.0, 0.0]
    depths = [0.0, 0.0, 0.0, 0.0]
    for t in tri.simplices:
        for i in range(4):
            if i in t:
                for j in t:
                    if j >= 4:
                        depths[i] += shot.pose.transform(vertices[j])[2]
                        sums[i] += 1
    for i in range(4):
        if sums[i] > 0:
            d = depths[i] / sums[i]
        else:
            d = 50.0
        vertices[i] = back_project_no_distortion(shot, pixels[i], d).tolist()

    faces = tri.simplices.tolist()
    return vertices, faces
Ejemplo n.º 21
0
def common_tracks(
    tracks_manager: pymap.TracksManager, im1: str, im2: str
) -> t.Tuple[t.List[str], np.ndarray, np.ndarray]:
    """List of tracks observed in both images.

    Args:
        tracks_manager: tracks manager
        im1: name of the first image
        im2: name of the second image

    Returns:
        tuple: tracks, feature from first image, feature from second image
    """
    t1 = tracks_manager.get_shot_observations(im1)
    t2 = tracks_manager.get_shot_observations(im2)
    tracks, p1, p2 = [], [], []
    for track, obs in t1.items():
        if track in t2:
            p1.append(obs.point)
            p2.append(t2[track].point)
            tracks.append(track)
    p1 = np.array(p1)
    p2 = np.array(p2)
    return tracks, p1, p2
Ejemplo n.º 22
0
def reconstruction_statistics(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    reconstructions: List[types.Reconstruction],
) -> Dict[str, Any]:
    stats = {}

    stats["components"] = len(reconstructions)
    gps_count = 0
    for rec in reconstructions:
        for shot in rec.shots.values():
            gps_count += shot.metadata.gps_position.has_value
    stats["has_gps"] = gps_count > 2
    stats["has_gcp"] = True if data.load_ground_control_points() else False

    stats["initial_points_count"] = tracks_manager.num_tracks()
    stats["initial_shots_count"] = len(data.images())

    stats["reconstructed_points_count"] = 0
    stats["reconstructed_shots_count"] = 0
    stats["observations_count"] = 0
    hist_agg = defaultdict(int)

    for rec in reconstructions:
        if len(rec.points) > 0:
            stats["reconstructed_points_count"] += len(rec.points)
        stats["reconstructed_shots_count"] += len(rec.shots)

        # get tracks length distrbution for current reconstruction
        hist, values = _length_histogram(tracks_manager, rec.points)

        # update aggregrated histogram
        for length, count_tracks in zip(hist, values):
            hist_agg[length] += count_tracks

    # observations total and average tracks lengths
    hist_agg = sorted(hist_agg.items(), key=lambda x: x[0])
    lengths, counts = np.array([int(x[0]) for x in hist_agg
                                ]), np.array([x[1] for x in hist_agg])

    points_count = stats["reconstructed_points_count"]
    points_count_over_two = sum(counts[1:])
    stats["observations_count"] = int(sum(lengths * counts))
    stats["average_track_length"] = ((stats["observations_count"] /
                                      points_count)
                                     if points_count > 0 else -1)
    stats["average_track_length_over_two"] = (
        (int(sum(lengths[1:] * counts[1:])) /
         points_count_over_two) if points_count_over_two > 0 else -1)
    stats["histogram_track_length"] = {k: v for k, v in hist_agg}

    (
        avg_normalized,
        avg_pixels,
        avg_angular,
        (hist_normalized, bins_normalized),
        (hist_pixels, bins_pixels),
        (hist_angular, bins_angular),
    ) = _projection_error(tracks_manager, reconstructions)
    stats["reprojection_error_normalized"] = avg_normalized
    stats["reprojection_error_pixels"] = avg_pixels
    stats["reprojection_error_angular"] = avg_angular
    stats["reprojection_histogram_normalized"] = (
        list(map(float, hist_normalized)),
        list(map(float, bins_normalized)),
    )
    stats["reprojection_histogram_pixels"] = (
        list(map(float, hist_pixels)),
        list(map(float, bins_pixels)),
    )
    stats["reprojection_histogram_angular"] = (
        list(map(float, hist_angular)),
        list(map(float, bins_angular)),
    )

    return stats
Ejemplo n.º 23
0
def save_matchgraph(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    reconstructions: List[types.Reconstruction],
    output_path: str,
    io_handler: io.IoFilesystemBase,
) -> None:
    all_shots = []
    all_points = []
    shot_component = {}
    for i, rec in enumerate(reconstructions):
        all_points += rec.points
        all_shots += rec.shots
        for shot in rec.shots:
            shot_component[shot] = i

    connectivity = tracks_manager.get_all_pairs_connectivity(
        all_shots, all_points)
    all_values = connectivity.values()
    lowest = np.percentile(list(all_values), 5)
    highest = np.percentile(list(all_values), 95)

    plt.clf()
    cmap = cm.get_cmap("viridis")
    for (node1, node2), edge in sorted(connectivity.items(),
                                       key=lambda x: x[1]):
        if edge < 2 * data.config["resection_min_inliers"]:
            continue
        comp1 = shot_component[node1]
        comp2 = shot_component[node2]
        if comp1 != comp2:
            continue
        o1 = reconstructions[comp1].shots[node1].pose.get_origin()
        o2 = reconstructions[comp2].shots[node2].pose.get_origin()
        c = max(0, min(1.0, 1 - (edge - lowest) / (highest - lowest)))
        plt.plot([o1[0], o2[0]], [o1[1], o2[1]], linestyle="-", color=cmap(c))

    for i, rec in enumerate(reconstructions):
        for shot in rec.shots.values():
            o = shot.pose.get_origin()
            c = i / len(reconstructions)
            plt.plot(o[0], o[1], linestyle="", marker="o", color=cmap(c))

    plt.xticks([])
    plt.yticks([])
    ax = plt.gca()
    for b in ["top", "bottom", "left", "right"]:
        ax.spines[b].set_visible(False)

    norm = colors.Normalize(vmin=lowest, vmax=highest)
    sm = cm.ScalarMappable(norm=norm, cmap=cmap.reversed())
    sm.set_array([])
    plt.colorbar(
        sm,
        orientation="horizontal",
        label="Number of matches between images",
        pad=0.0,
    )

    with io_handler.open(os.path.join(output_path, "matchgraph.png"),
                         "wb") as fwb:
        plt.savefig(
            fwb,
            dpi=300,
            bbox_inches="tight",
        )
Ejemplo n.º 24
0
def resect(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    reconstruction: types.Reconstruction,
    shot_id: str,
    threshold: float,
    min_inliers: int,
) -> Tuple[bool, Set[str], Dict[str, Any]]:
    """Try resecting and adding a shot to the reconstruction.

    Return:
        True on success.
    """

    rig_assignments = data.load_rig_assignments_per_image()
    camera = reconstruction.cameras[data.load_exif(shot_id)["camera"]]

    bs, Xs, ids = [], [], []
    for track, obs in tracks_manager.get_shot_observations(shot_id).items():
        if track in reconstruction.points:
            b = camera.pixel_bearing(obs.point)
            bs.append(b)
            Xs.append(reconstruction.points[track].coordinates)
            ids.append(track)
    bs = np.array(bs)
    Xs = np.array(Xs)
    if len(bs) < 5:
        return False, set(), {"num_common_points": len(bs)}

    T = multiview.absolute_pose_ransac(bs, Xs, threshold, 1000, 0.999)

    R = T[:, :3]
    t = T[:, 3]

    reprojected_bs = R.T.dot((Xs - t).T).T
    reprojected_bs /= np.linalg.norm(reprojected_bs, axis=1)[:, np.newaxis]

    inliers = np.linalg.norm(reprojected_bs - bs, axis=1) < threshold
    ninliers = int(sum(inliers))

    logger.info("{} resection inliers: {} / {}".format(shot_id, ninliers,
                                                       len(bs)))
    report = {
        "num_common_points": len(bs),
        "num_inliers": ninliers,
    }
    if ninliers >= min_inliers:
        R = T[:, :3].T
        t = -R.dot(T[:, 3])
        assert shot_id not in reconstruction.shots

        new_shots = add_shot(data, reconstruction, rig_assignments, shot_id,
                             pygeometry.Pose(R, t))

        if shot_id in rig_assignments:
            triangulate_shot_features(tracks_manager, reconstruction,
                                      new_shots, data.config)
        for i, succeed in enumerate(inliers):
            if succeed:
                add_observation_to_reconstruction(tracks_manager,
                                                  reconstruction, shot_id,
                                                  ids[i])
        # pyre-fixme [6]: Expected `int` for 2nd positional
        report["shots"] = list(new_shots)
        return True, new_shots, report
    else:
        return False, set(), report
Ejemplo n.º 25
0
def save_topview(
    data: DataSetBase,
    tracks_manager: pymap.TracksManager,
    reconstructions: List[types.Reconstruction],
    output_path: str,
    io_handler: io.IoFilesystemBase,
) -> None:
    points = []
    colors = []
    for rec in reconstructions:
        for point in rec.points.values():
            track = tracks_manager.get_track_observations(point.id)
            if len(track) < 2:
                continue
            coords = point.coordinates
            points.append(coords)

            r, g, b = [], [], []
            for obs in track.values():
                r.append(obs.color[0])
                g.append(obs.color[1])
                b.append(obs.color[2])
            colors.append((statistics.median(r), statistics.median(g),
                           statistics.median(b)))

    all_x = []
    all_y = []
    for rec in reconstructions:
        for shot in rec.shots.values():
            o = shot.pose.get_origin()
            all_x.append(o[0])
            all_y.append(o[1])
            if not shot.metadata.gps_position.has_value:
                continue
            gps = shot.metadata.gps_position.value
            all_x.append(gps[0])
            all_y.append(gps[1])

    # compute camera's XY bounding box
    low_x, high_x = np.min(all_x), np.max(all_x)
    low_y, high_y = np.min(all_y), np.max(all_y)

    # get its size
    size_x = high_x - low_x
    size_y = high_y - low_y

    # expand bounding box by some margin
    margin = 0.05
    low_x -= size_x * margin
    high_x += size_y * margin
    low_y -= size_x * margin
    high_y += size_y * margin

    # update size
    size_x = high_x - low_x
    size_y = high_y - low_y

    im_size_x = 2000
    im_size_y = int(im_size_x * size_y / size_x)
    topview = np.zeros((im_size_y, im_size_x, 3))

    # splat points using gaussian + max-pool
    splatting = 15
    size = 2 * splatting + 1
    kernel = _get_gaussian_kernel(splatting, 2)
    kernel /= kernel[splatting, splatting]
    for point, color in zip(points, colors):
        x, y = int((point[0] - low_x) / size_x * im_size_x), int(
            (point[1] - low_y) / size_y * im_size_y)
        if not ((0 < x < (im_size_x - 1)) and (0 < y < (im_size_y - 1))):
            continue

        k_low_x, k_low_y = -min(x - splatting, 0), -min(y - splatting, 0)
        k_high_x, k_high_y = (
            size - max(x + splatting - (im_size_x - 2), 0),
            size - max(y + splatting - (im_size_y - 2), 0),
        )
        h_low_x, h_low_y = max(x - splatting, 0), max(y - splatting, 0)
        h_high_x, h_high_y = min(x + splatting + 1,
                                 im_size_x - 1), min(y + splatting + 1,
                                                     im_size_y - 1)

        for i in range(3):
            current = topview[h_low_y:h_high_y, h_low_x:h_high_x, i]
            splat = kernel[k_low_y:k_high_y, k_low_x:k_high_x]
            topview[h_low_y:h_high_y, h_low_x:h_high_x,
                    i] = np.maximum(splat * (color[i] / 255.0), current)

    plt.clf()
    plt.imshow(topview)

    # display computed camera's XY
    linewidth = 1
    markersize = 4
    for rec in reconstructions:
        sorted_shots = sorted(rec.shots.values(),
                              key=lambda x: x.metadata.capture_time.value)
        c_camera = cm.get_cmap("cool")(0 / len(reconstructions))
        c_gps = cm.get_cmap("autumn")(0 / len(reconstructions))
        for j, shot in enumerate(sorted_shots):
            o = shot.pose.get_origin()
            x, y = int((o[0] - low_x) / size_x * im_size_x), int(
                (o[1] - low_y) / size_y * im_size_y)
            plt.plot(
                x,
                y,
                linestyle="",
                marker="o",
                color=c_camera,
                markersize=markersize,
                linewidth=1,
            )

            # also display camera path using capture time
            if j < len(sorted_shots) - 1:
                n = sorted_shots[j + 1].pose.get_origin()
                nx, ny = int((n[0] - low_x) / size_x * im_size_x), int(
                    (n[1] - low_y) / size_y * im_size_y)
                plt.plot([x, nx], [y, ny],
                         linestyle="-",
                         color=c_camera,
                         linewidth=linewidth)

            # display GPS error
            if not shot.metadata.gps_position.has_value:
                continue
            gps = shot.metadata.gps_position.value
            gps_x, gps_y = int((gps[0] - low_x) / size_x * im_size_x), int(
                (gps[1] - low_y) / size_y * im_size_y)
            plt.plot(
                gps_x,
                gps_y,
                linestyle="",
                marker="v",
                color=c_gps,
                markersize=markersize,
                linewidth=1,
            )
            plt.plot([x, gps_x], [y, gps_y],
                     linestyle="-",
                     color=c_gps,
                     linewidth=linewidth)

    plt.xticks(
        [0, im_size_x / 2, im_size_x],
        [0, f"{int(size_x / 2):.0f}", f"{size_x:.0f} meters"],
        fontsize="small",
    )
    plt.yticks(
        [im_size_y, im_size_y / 2, 0],
        [0, f"{int(size_y / 2):.0f}", f"{size_y:.0f} meters"],
        fontsize="small",
    )
    with io_handler.open(os.path.join(output_path, "topview.png"),
                         "wb") as fwb:
        plt.savefig(
            fwb,
            dpi=300,
            bbox_inches="tight",
        )
Ejemplo n.º 26
0
 def save_undistorted_tracks_manager(
     self, tracks_manager: pymap.TracksManager
 ) -> None:
     filename = os.path.join(self.data_path, "tracks.csv")
     with self.io_handler.open(filename, "w") as fw:
         fw.write(tracks_manager.as_string())
Ejemplo n.º 27
0
 def save_tracks_manager(
     self, tracks_manager: pymap.TracksManager, filename: Optional[str] = None
 ) -> None:
     with self.io_handler.open(self._tracks_manager_file(filename), "w") as fw:
         fw.write(tracks_manager.as_string())