Ejemplo n.º 1
0
def load_segmentation_labels(data: UndistortedDataSet, shot):
    """Load the undistorted segmentation labels.

    If no segmentation exists return an array of zeros.
    """
    if data.undistorted_segmentation_exists(shot.id):
        return data.load_undistorted_segmentation(shot.id)
    else:
        size = shot.camera.height, shot.camera.width
        return np.zeros(size, dtype=np.uint8)
Ejemplo n.º 2
0
def add_views_to_depth_cleaner(data: UndistortedDataSet, neighbors, dc):
    for shot in neighbors:
        if not data.raw_depthmap_exists(shot.id):
            continue
        depth, plane, score, nghbr, nghbrs = data.load_raw_depthmap(shot.id)
        height, width = depth.shape
        K = shot.camera.get_K_in_pixel_coordinates(width, height)
        R = shot.pose.get_rotation_matrix()
        t = shot.pose.translation
        dc.add_view(K, R, t, depth)
Ejemplo n.º 3
0
def compute_depthmaps(
    data: UndistortedDataSet,
    graph: pysfm.TracksManager,
    reconstruction: types.Reconstruction,
):
    """Compute and refine depthmaps for all shots.

    Args:
        data: an UndistortedDataset
        graph: the tracks graph
        reconstruction: the undistorted reconstruction
    """
    logger.info("Computing neighbors")
    config = data.config
    processes = config["processes"]
    num_neighbors = config["depthmap_num_neighbors"]

    neighbors = {}
    common_tracks = common_tracks_double_dict(graph)
    for shot in reconstruction.shots.values():
        neighbors[shot.id] = find_neighboring_images(shot, common_tracks,
                                                     reconstruction,
                                                     num_neighbors)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        mind, maxd = compute_depth_range(graph, reconstruction, shot, config)
        arguments.append((data, neighbors[shot.id], mind, maxd, shot))
    parallel_map(compute_depthmap_catched, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, neighbors[shot.id], shot))
    parallel_map(clean_depthmap_catched, arguments, processes)

    arguments = []
    for shot in reconstruction.shots.values():
        if len(neighbors[shot.id]) <= 1:
            continue
        arguments.append((data, neighbors[shot.id], shot))
    parallel_map(prune_depthmap_catched, arguments, processes)

    point_cloud = merge_depthmaps(data, reconstruction)
    data.save_point_cloud(*point_cloud, filename="merged.ply")
Ejemplo n.º 4
0
def add_views_to_depth_pruner(data: UndistortedDataSet, neighbors, dp):
    for shot in neighbors:
        if not data.clean_depthmap_exists(shot.id):
            continue
        depth, plane, score = data.load_clean_depthmap(shot.id)
        height, width = depth.shape
        color_image = data.load_undistorted_image(shot.id)
        labels = load_segmentation_labels(data, shot)
        height, width = depth.shape
        image = scale_down_image(color_image, width, height)
        labels = scale_image(labels, image.shape[1], image.shape[0],
                             cv2.INTER_NEAREST)
        K = shot.camera.get_K_in_pixel_coordinates(width, height)
        R = shot.pose.get_rotation_matrix()
        t = shot.pose.translation
        dp.add_view(K, R, t, depth, plane, image, labels)
Ejemplo n.º 5
0
def merge_depthmaps(data: UndistortedDataSet, reconstruction):
    """Merge depthmaps into a single point cloud."""
    logger.info("Merging depthmaps")

    shot_ids = [
        s for s in reconstruction.shots if data.pruned_depthmap_exists(s)
    ]

    if not shot_ids:
        logger.warning("Depthmaps contain no points.  Try using more images.")
        return

    def depthmap_provider(shot_id):
        return data.load_pruned_depthmap(shot_id)

    merge_depthmaps_from_provider(data, shot_ids, depthmap_provider,
                                  data.point_cloud_file())
Ejemplo n.º 6
0
def load_combined_mask(data: UndistortedDataSet, shot):
    """Load the undistorted mask.

    If no mask exists return an array of ones.
    """
    mask = data.load_undistorted_combined_mask(shot.id)
    if mask is None:
        size = int(shot.camera.height), int(shot.camera.width)
        return np.ones(size, dtype=np.uint8)
    else:
        return mask
Ejemplo n.º 7
0
def merge_depthmaps(
    data: UndistortedDataSet, reconstruction: types.Reconstruction
) -> t.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
    """Merge depthmaps into a single point cloud."""
    shot_ids = [
        s for s in reconstruction.shots if data.pruned_depthmap_exists(s)
    ]

    def depthmap_provider(shot_id):
        return data.load_pruned_depthmap(shot_id)

    return merge_depthmaps_from_provider(data, shot_ids, depthmap_provider)
Ejemplo n.º 8
0
def undistort_reconstruction(
    tracks_manager: Optional[pymap.TracksManager],
    reconstruction: types.Reconstruction,
    data: DataSetBase,
    udata: UndistortedDataSet,
) -> Dict[pymap.Shot, List[pymap.Shot]]:
    all_images = set(data.images())
    image_format = data.config["undistorted_image_format"]
    urec = types.Reconstruction()
    urec.points = reconstruction.points
    urec.reference = reconstruction.reference
    rig_instance_count = itertools.count()
    utracks_manager = pymap.TracksManager()
    logger.debug("Undistorting the reconstruction")
    undistorted_shots = {}
    for shot in reconstruction.shots.values():
        if shot.id not in all_images:
            logger.warning(
                f"Not undistorting {shot.id} as it is missing from the dataset's input images."
            )
            continue
        if shot.camera.projection_type == "perspective":
            urec.add_camera(perspective_camera_from_perspective(shot.camera))
            subshots = [get_shot_with_different_camera(urec, shot, image_format)]
        elif shot.camera.projection_type == "brown":
            urec.add_camera(perspective_camera_from_brown(shot.camera))
            subshots = [get_shot_with_different_camera(urec, shot, image_format)]
        elif shot.camera.projection_type in ["fisheye", "fisheye_opencv"]:
            urec.add_camera(perspective_camera_from_fisheye(shot.camera))
            subshots = [get_shot_with_different_camera(urec, shot, image_format)]
        elif pygeometry.Camera.is_panorama(shot.camera.projection_type):
            subshot_width = int(data.config["depthmap_resolution"])
            subshots = perspective_views_of_a_panorama(
                shot, subshot_width, urec, image_format, rig_instance_count
            )
        else:
            logger.warning(
                f"Not undistorting {shot.id} with unknown camera type."
            )
            continue

        for subshot in subshots:
            if tracks_manager:
                add_subshot_tracks(tracks_manager, utracks_manager, shot, subshot)
        undistorted_shots[shot.id] = subshots

    udata.save_undistorted_reconstruction([urec])
    if tracks_manager:
        udata.save_undistorted_tracks_manager(utracks_manager)

    udata.save_undistorted_shot_ids(
        {
            shot_id: [ushot.id for ushot in ushots]
            for shot_id, ushots in undistorted_shots.items()
        }
    )

    return undistorted_shots
Ejemplo n.º 9
0
def _transform_dense_point_cloud(udata: UndistortedDataSet, transformation,
                                 output_path):
    """Apply a transformation to the merged point cloud."""
    A, b = transformation[:3, :3], transformation[:3, 3]
    input_path = udata.point_cloud_file()
    with io.open_rt(input_path) as fin:
        with io.open_wt(output_path) as fout:
            for i, line in enumerate(fin):
                if i < 13:
                    fout.write(line)
                else:
                    x, y, z, nx, ny, nz, red, green, blue = line.split()
                    x, y, z = np.dot(A, map(float, [x, y, z])) + b
                    nx, ny, nz = np.dot(A, map(float, [nx, ny, nz]))
                    fout.write("{} {} {} {} {} {} {} {} {}\n".format(
                        x, y, z, nx, ny, nz, red, green, blue))
Ejemplo n.º 10
0
def undistort_reconstruction(
    tracks_manager, reconstruction, data: DataSetBase, udata: UndistortedDataSet
):
    image_format = data.config["undistorted_image_format"]
    urec = types.Reconstruction()
    urec.points = reconstruction.points
    urec.reference = reconstruction.reference
    rig_instance_count = itertools.count()
    utracks_manager = pymap.TracksManager()
    logger.debug("Undistorting the reconstruction")
    undistorted_shots = {}
    for shot in reconstruction.shots.values():
        if shot.camera.projection_type == "perspective":
            camera = perspective_camera_from_perspective(shot.camera)
            urec.add_camera(camera)
            subshots = [
                get_shot_with_different_camera(urec, shot, camera, image_format)
            ]
        elif shot.camera.projection_type == "brown":
            camera = perspective_camera_from_brown(shot.camera)
            urec.add_camera(camera)
            subshots = [
                get_shot_with_different_camera(urec, shot, camera, image_format)
            ]
        elif shot.camera.projection_type in ["fisheye", "fisheye_opencv"]:
            camera = perspective_camera_from_fisheye(shot.camera)
            urec.add_camera(camera)
            subshots = [
                get_shot_with_different_camera(urec, shot, camera, image_format)
            ]
        elif pygeometry.Camera.is_panorama(shot.camera.projection_type):
            subshot_width = int(data.config["depthmap_resolution"])
            subshots = perspective_views_of_a_panorama(
                shot, subshot_width, urec, image_format, rig_instance_count
            )

        for subshot in subshots:
            if tracks_manager:
                add_subshot_tracks(tracks_manager, utracks_manager, shot, subshot)
        # pyre-fixme[61]: `subshots` may not be initialized here.
        undistorted_shots[shot.id] = subshots

    udata.save_undistorted_reconstruction([urec])
    if tracks_manager:
        udata.save_undistorted_tracks_manager(utracks_manager)

    udata.save_undistorted_shot_ids(
        {
            shot_id: [ushot.id for ushot in ushots]
            for shot_id, ushots in undistorted_shots.items()
        }
    )

    return undistorted_shots
Ejemplo n.º 11
0
def add_views_to_depth_estimator(data: UndistortedDataSet, neighbors, de):
    """Add neighboring views to the DepthmapEstimator."""
    num_neighbors = data.config["depthmap_num_matching_views"]
    for shot in neighbors[:num_neighbors + 1]:
        assert shot.camera.projection_type == "perspective"
        color_image = data.load_undistorted_image(shot.id)
        mask = load_combined_mask(data, shot)
        gray_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
        original_height, original_width = gray_image.shape
        width = min(original_width, int(data.config["depthmap_resolution"]))
        height = width * original_height // original_width
        image = scale_down_image(gray_image, width, height)
        mask = scale_down_image(mask, width, height, cv2.INTER_NEAREST)
        K = shot.camera.get_K_in_pixel_coordinates(width, height)
        R = shot.pose.get_rotation_matrix()
        t = shot.pose.translation
        de.add_view(K, R, t, image, mask)
Ejemplo n.º 12
0
def export(reconstruction, tracks_manager, udata: UndistortedDataSet,
           export_only):
    exporter = pydense.OpenMVSExporter()
    for camera in reconstruction.cameras.values():
        if camera.projection_type == "perspective":
            w, h = camera.width, camera.height
            K = np.array([
                [camera.focal * max(w, h), 0, (w - 1.0) / 2.0],
                [0, camera.focal * max(w, h), (h - 1.0) / 2.0],
                [0, 0, 1],
            ])
            exporter.add_camera(str(camera.id), K, w, h)

    for shot in reconstruction.shots.values():
        if export_only is not None and shot.id not in export_only:
            continue

        if shot.camera.projection_type == "perspective":
            image_path = udata._undistorted_image_file(shot.id)
            exporter.add_shot(
                str(os.path.abspath(image_path)),
                str(shot.id),
                str(shot.camera.id),
                shot.pose.get_rotation_matrix(),
                shot.pose.get_origin(),
            )

    for point in reconstruction.points.values():
        observations = tracks_manager.get_track_observations(point.id)

        if export_only is not None:
            shots = [k for k in observations if k in export_only]
        else:
            shots = list(observations)

        if shots:
            coordinates = np.array(point.coordinates, dtype=np.float64)
            exporter.add_point(coordinates, shots)

    io.mkdir_p(udata.data_path + "/openmvs")
    exporter.export(udata.data_path + "/openmvs/scene.mvs")
Ejemplo n.º 13
0
def export(
    reconstruction,
    index,
    image_graph,
    tracks_manager,
    base_output_path,
    data: DataSet,
    undistorted,
    udata: UndistortedDataSet,
    with_points,
    export_only,
):
    logger.info("Reconstruction %d" % index)
    output_path = os.path.join(base_output_path, "recon%d" % index)
    io.mkdir_p(output_path)
    io.mkdir_p(os.path.join(output_path, "visualize"))
    io.mkdir_p(os.path.join(output_path, "txt"))
    io.mkdir_p(os.path.join(output_path, "models"))

    shot_index = {image: i for i, image in enumerate(reconstruction.shots)}

    fvis = open(os.path.join(output_path, "vis.dat"), "w")
    fvis.write("VISDATA\n")
    fvis.write("%d\n" % len(shot_index))

    for image, i in shot_index.items():
        shot = reconstruction.shots[image]
        base = "%08d" % i
        logger.info("Image: %s %s" % (image, base))

        # vis.dat for this image
        if image_graph:
            adj_indices = []
            for adj_image in image_graph[image]:
                weight = image_graph[image][adj_image]["weight"]
                if weight > 0 and adj_image in shot_index:
                    adj_indices.append(shot_index[adj_image])

            num_covisible = len(adj_indices)
            fvis.write("%d " % i)
            fvis.write("%d " % num_covisible)
            for ai in adj_indices:
                fvis.write("%d " % ai)
            fvis.write("\n")

        # radially undistort the original image
        camera = shot.camera
        if undistorted:
            undistorted_image = udata.load_undistorted_image(image)
        else:
            original_image = data.load_image(image)[:, :, ::-1]
            original_h, original_w = original_image.shape[:2]
            K = camera.get_K_in_pixel_coordinates(original_w, original_h)
            distortion = np.array([camera.k1, camera.k2, 0, 0])
            undistorted_image = cv2.undistort(original_image, K, distortion)

        # resize and save the undistorted to visualize/%08d.jpg
        resized_image = features.resized_image(
            undistorted_image, data.config["feature_process_size"])
        new_image_path = os.path.join(output_path, "visualize", base + ".jpg")
        cv2.imwrite(new_image_path, resized_image)

        # write camera projection matrix to txt/%08d.txt
        resized_h, resized_w = resized_image.shape[:2]
        resized_K = camera.get_K_in_pixel_coordinates(resized_w, resized_h)
        P = resized_K.dot(shot.pose.get_world_to_cam()[:3])

        new_txt = os.path.join(output_path, "txt", base + ".txt")
        with open(new_txt, "wb") as f:
            np.savetxt(f, P, str("%f"), header="CONTOUR")

    fvis.close()

    # txt
    with open(os.path.join(output_path, "pmvs_txt"), "w") as f:
        f.write("level 1\n")
        f.write("csize 2\n")
        f.write("threshold 0.7\n")
        f.write("wsize 7\n")
        f.write("minImageNum 3\n")
        f.write("CPU 8\n")
        f.write("setEdge 0\n")
        f.write("useBound 0\n")
        f.write("useVisData {}\n".format(int(image_graph is not None)))
        f.write("sequence -1\n")
        f.write("timages -1 0 %d\n" % len(shot_index))
        f.write("oimages 0\n")
Ejemplo n.º 14
0
def export(reconstruction, tracks_manager, udata: UndistortedDataSet,
           with_points, export_only):
    lines = ["NVM_V3", "", str(len(reconstruction.shots))]
    shot_size_cache = {}
    shot_index = {}
    i = 0
    skipped_shots = 0

    for shot in reconstruction.shots.values():
        if export_only is not None and shot.id not in export_only:
            skipped_shots += 1
            continue

        q = tf.quaternion_from_matrix(shot.pose.get_rotation_matrix())
        o = shot.pose.get_origin()

        shot_size_cache[shot.id] = udata.undistorted_image_size(shot.id)
        shot_index[shot.id] = i
        i += 1
        if shot.camera.projection_type == "brown":
            # Will approximate Brown model, not optimal
            focal_normalized = (shot.camera.focal_x +
                                shot.camera.focal_y) / 2.0
        else:
            focal_normalized = shot.camera.focal

        words = [
            image_path(shot.id, udata),
            focal_normalized * max(shot_size_cache[shot.id]),
            q[0],
            q[1],
            q[2],
            q[3],
            o[0],
            o[1],
            o[2],
            "0",
            "0",
        ]
        lines.append(" ".join(map(str, words)))

    # Adjust shots count
    lines[2] = str(int(lines[2]) - skipped_shots)

    if with_points:
        skipped_points = 0
        lines.append("")
        points = reconstruction.points
        lines.append(str(len(points)))
        points_count_index = len(lines) - 1

        for point_id, point in points.items():
            shots = reconstruction.shots
            coord = point.coordinates
            color = list(map(int, point.color))

            view_line = []
            for shot_key, obs in tracks_manager.get_track_observations(
                    point_id).items():
                if export_only is not None and shot_key not in export_only:
                    continue

                if shot_key in shots.keys():
                    v = obs.point
                    x = (0.5 + v[0]) * shot_size_cache[shot_key][1]
                    y = (0.5 + v[1]) * shot_size_cache[shot_key][0]
                    view_line.append(" ".join(
                        map(str, [shot_index[shot_key], obs.id, x, y])))

            if len(view_line) > 1:
                lines.append(" ".join(map(str, coord)) + " " +
                             " ".join(map(str, color)) + " " +
                             str(len(view_line)) + " " + " ".join(view_line))
            else:
                skipped_points += 1

        # Adjust points count
        lines[points_count_index] = str(
            int(lines[points_count_index]) - skipped_points)
    else:
        lines += ["0", ""]

    lines += ["0", "", "0"]

    with io.open_wt(udata.data_path + "/reconstruction.nvm") as fout:
        fout.write("\n".join(lines))
Ejemplo n.º 15
0
def image_path(image, udata: UndistortedDataSet):
    """Path to the undistorted image relative to the dataset path."""
    path = udata._undistorted_image_file(image)
    return os.path.relpath(path, udata.data_path)