def get_dataset_idp(config):
    reconstruction_mode = get_reconstruction_mode(config)
    dataset_idp = config.get_option_value('dataset_idp',
                                          target_type=str,
                                          section=reconstruction_mode)
    logger.vinfo('dataset_idp', dataset_idp)
    return dataset_idp
Exemple #2
0
def build_affine_transformation_matrix(camera_parameters, render_compatible_camera_parameters):
    # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html

    intrinsic_original = camera_parameters.intrinsic.intrinsic_matrix
    intrinsic_renderer = render_compatible_camera_parameters.intrinsic.intrinsic_matrix

    f_x, f_y, skew, p_x, p_y = Intrinsics.split_intrinsic_mat(
        intrinsic_original)
    f_x_r, f_y_r, skew_r, p_x_r, p_y_r = Intrinsics.split_intrinsic_mat(
        render_compatible_camera_parameters.intrinsic.intrinsic_matrix)
    logger.vinfo('f_x, f_y, skew, p_x, p_y', [f_x, f_y, skew, p_x, p_y])
    assert f_x_r == f_y_r

    trans_mat_renderer_to_original = Intrinsics.compute_intrinsic_transformation(
        intrinsic_original, intrinsic_renderer, check_result=True)

    # Get the first two rows
    affine_mat = trans_mat_renderer_to_original[0:2, :]


    # # Build an affine matrix to compensate incorrect settings of renderer
    # offset_x = p_x - f_x / f_renderer * c_x_renderer
    # offset_y = p_y - f_y / f_renderer * c_y_renderer
    # affine_mat = np.asarray([
    #     [f_x / f_renderer, 0, offset_x],
    #     [0, f_y / f_renderer, offset_y]],
    #     dtype=np.float32)

    return affine_mat
def color_map_optimization(mesh,
                           rgbd_images,
                           camera_trajectory,
                           ofp,
                           config,
                           depth_range=None,
                           maximum_iteration=None):
    reconstruction_mode = get_reconstruction_mode(config)

    # Optimize texture and save the mesh as texture_mapped.ply
    # This is implementation of following paper: "Q.-Y. Zhou and V. Koltun,
    # Color Map Optimization for 3D Reconstruction with Consumer Depth Cameras, SIGGRAPH 2014"

    # Check out default option values here
    #   http://www.open3d.org/docs/latest/python_api/open3d.color_map.ColorMapOptimizationOption.html
    # option.number_of_vertical_anchors = 16
    # option.non_rigid_anchor_point_weight = 0.316
    # option.depth_threshold_for_discontinuity_check = 0.1
    # option.half_dilation_kernel_size_for_discontinuity_map = 3
    # option.image_boundary_margin = 10
    # option.invisible_vertex_color_knn = 3

    option = o3d.pipelines.color_map.ColorMapOptimizationOption()
    option.non_rigid_camera_coordinate = config.get_option_value(
        'non_rigid_camera_coordinate',
        target_type=bool,
        section=reconstruction_mode)

    # This maximum_allowable_depth value is defined w.r.t. to the mesh
    # Therefore the original depth range values must be used
    # (and not the scaled depth maps represented as uint16)
    # One can observe this behavior by providing the depth_arr_min value for a specific image
    # and analysing the corresponding open3d debug output, i.e.
    # [Open3D DEBUG] [cam 0]: 0/951198 (0.00000%) vertices are visible

    #option.maximum_allowable_depth = depth_range[1]
    option.maximum_allowable_depth = sys.float_info.max
    logger.vinfo('depth_range', depth_range)
    logger.vinfo('option.maximum_allowable_depth',
                 option.maximum_allowable_depth)

    # DON'T DO THIS:  option.depth_threshold_for_visibility_check = 0

    # option.maximum_allowable_depth = config.get_option_value(
    #     'maximum_allowable_depth', target_type=float, section=reconstruction_mode)

    if maximum_iteration is not None:
        option.maximum_iteration = maximum_iteration
    else:
        option.maximum_iteration = config.get_option_value(
            'maximum_iteration', target_type=int, section=reconstruction_mode)

    with o3d.utility.VerbosityContextManager(
            o3d.utility.VerbosityLevel.Debug) as cm:
        o3d.pipelines.color_map.color_map_optimization(mesh, rgbd_images,
                                                       camera_trajectory,
                                                       option)
    o3d.visualization.draw_geometries([mesh])
    o3d.io.write_triangle_mesh(ofp, mesh)
    def compute_depth_statistics(self, depth_array_ifp_list):
        # Determine value range
        overall_depth_map_min = float('inf')
        overall_depth_map_max = -float('inf')
        for depth_map_ifp in depth_array_ifp_list:
            depth_arr = self.read_depth_map(depth_map_ifp)

            depth_map_min, depth_map_max = ColmapDepthMapHandler.compute_depth_map_min_max(
                depth_arr)

            overall_depth_map_min = min(overall_depth_map_min, depth_map_min)
            overall_depth_map_max = max(overall_depth_map_max, depth_map_max)
        depth_map_range = (overall_depth_map_min, overall_depth_map_max)
        logger.vinfo(
            'Depth Value Range (colmap):',
            ["min:", overall_depth_map_min, 'max:', overall_depth_map_max])
        return depth_map_range
    def __init__(self, config_fp, working_file_suffix=None):

        self.config_fp = config_fp
        self.config = configparser.RawConfigParser()

        if not os.path.isfile(self.config_fp):
            abs_path = os.path.abspath(os.path.dirname(self.config_fp))
            if not os.path.isdir(abs_path):
                logger.vinfo('abs_path', abs_path)
                assert False  # config folder missing
            open(self.config_fp, 'a').close()
        else:
            self.config.read(self.config_fp)

        if working_file_suffix is not None:
            self.path_to_working_copy = self.config_fp + working_file_suffix
        else:
            self.path_to_working_copy = self.config_fp
def compute_scaling_value(depth_map_max):

    # https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
    #   An uint16 can store up to 65535 values
    depth_map_max_possible_value = 65535.0

    # TODO WHAT IF depth_map_max > depth_map_max_possible_value
    if depth_map_max < depth_map_max_possible_value:
        # To ensure that there are no overflow values we use 65534.0
        depth_scale_value = (depth_map_max_possible_value - 1) / depth_map_max
        # depth_scale_value = float(math.floor(depth_scale_value))
    elif depth_map_max < depth_map_max_possible_value:
        depth_scale_value = depth_map_max / (depth_map_max_possible_value + 1)
        # depth_scale_value = float(math.floor(depth_scale_value))
    else:
        depth_scale_value = 1.0

    logger.vinfo('depth_map_max:', depth_map_max)
    logger.vinfo('depth_scale_value:', depth_scale_value)
    assert depth_map_max * depth_scale_value < depth_map_max_possible_value
    return depth_scale_value
    def write_depth_map_to_disk(self, image_name, depth_map, color_image):

        color_image_original_ifp = os.path.join(self.color_image_idp,
                                                image_name)
        depth_map_original_ifp = os.path.join(
            self.depth_map_idp, image_name + self.depth_map_suffix)
        depth_map_from_mesh_ofp = os.path.join(
            self.depth_map_from_geometry_dp,
            image_name + self.depth_map_from_geometry_suffix)

        # logger.vinfo('color_image_original_ifp', color_image_original_ifp)
        # logger.vinfo('depth_map_original_ifp', depth_map_original_ifp)
        logger.vinfo('depth_map_from_mesh_ofp', depth_map_from_mesh_ofp)

        if self.use_original_depth_maps_as_mask:

            logger.vinfo('Use original depth map as mask: ',
                         self.use_original_depth_maps_as_mask)
            depth_map_original = self.read_depth_map(depth_map_original_ifp)
            if depth_map_original.shape != depth_map.shape:
                logger.vinfo('depth_map_original.shape',
                             depth_map_original.shape)
                logger.vinfo('depth_map_from_mesh.shape', depth_map.shape)
                assert False
            depth_map_from_mesh_masked = copy.deepcopy(depth_map)
            depth_map_from_mesh_masked[depth_map_original == 0.0] = 0
            depth_map = depth_map_from_mesh_masked
        else:
            depth_map_from_mesh_masked = None

        ColmapDepthMapHandler.write_depth_map(depth_map_from_mesh_ofp,
                                              depth_map)

        # Some visualization functions for debugging purposes
        if get_show_color_rendering_flag(self.config):
            plt.imshow(color_image)
            plt.show()

        if get_show_depth_rendering_flag(self.config):
            plt.imshow(depth_map)
            plt.show()

        if get_show_rendering_overview_flag(self.config):
            self.show_color_and_depth_renderings(image_name,
                                                 color_image_original_ifp,
                                                 depth_map_original_ifp,
                                                 color_image, depth_map,
                                                 depth_map_from_mesh_masked)
def examine_model_format(model_idp):
    txt_ext = '.txt'
    cameras_txt_fp, images_txt_fp, points3D_txt_fp = compute_model_fps(
        model_idp, txt_ext)
    txt_model_present = check_model_completness(cameras_txt_fp, images_txt_fp,
                                                points3D_txt_fp)

    bin_ext = '.bin'
    cameras_bin_fp, images_bin_fp, points3D_bin_fp = compute_model_fps(
        model_idp, bin_ext)
    bin_model_present = check_model_completness(cameras_bin_fp, images_bin_fp,
                                                points3D_bin_fp)

    logger.vinfo('txt_model_present', txt_model_present)
    logger.vinfo('bin_model_present', bin_model_present)
    logger.vinfo('model_idp', str(model_idp))

    # If both model formats are present, we use the txt format
    if txt_model_present:
        logger.info('Found TXT model in ' + str(model_idp))
        return txt_ext
    else:
        logger.info('Found BIN model in ' + str(model_idp))
        return bin_ext
Exemple #9
0
# examples/Python/Advanced/color_map_optimization.py

import open3d as o3d

from cto.utility.logging_extension import logger
from cto.visualization import visualize_intermediate_result
from cto.config_api import create_config
from cto.config_api import get_ofp
from cto.color_optimization import color_map_optimization
from cto.data_parsing.reconstruction_parsing import import_reconstruction

if __name__ == "__main__":

    # http://www.open3d.org/docs/release/tutorial/Advanced/color_map_optimization.html
    logger.vinfo('o3d.__version__', o3d.__version__)

    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)

    config = create_config()
    mesh_textured_max_iter_x_ofp = get_ofp(config)
    rgbd_images, camera_trajectory, mesh, depth_range = import_reconstruction(
        config)

    visualize_intermediate_result(rgbd_images, camera_trajectory, mesh, config)

    color_map_optimization(
        mesh,
        rgbd_images,  # are used to compute gradient images
        camera_trajectory,
        ofp=mesh_textured_max_iter_x_ofp,
def compute_depth_maps_from_geometry(mesh_ifp,
                                     camera_trajectory,
                                     ordered_image_names,
                                     depth_map_callback,
                                     config=None):

    logger.info('create_depth_maps_from_mesh: ... ')

    num_params = len(camera_trajectory.parameters)
    logger.vinfo('num_params', num_params)

    camera_parameter_list = camera_trajectory.parameters
    num_images = None
    if num_images is not None:
        camera_parameter_list = camera_parameter_list[:num_images]

    assert os.path.isfile(mesh_ifp)
    # required for certain methods called below
    off_screen_rendering = True
    for image_name, camera_parameters in zip(ordered_image_names,
                                             camera_parameter_list):

        extrinsics = camera_parameters.extrinsic
        cam_to_world_mat_computer_vision = invert_transformation_mat(
            extrinsics)

        # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html
        intrinsics = camera_parameters.intrinsic
        render_compatible_camera_parameters = build_vtk_render_compatible_camera_parameters(
            camera_parameters)

        width = intrinsics.width
        height = intrinsics.height

        render_interface = RenderInterface(
            off_screen_rendering=off_screen_rendering,
            width=width,
            height=height,
            background_color=(0, 127, 127))

        # Can we avoid this redundant loading
        render_interface.load_vtk_mesh_or_point_cloud(mesh_ifp,
                                                      texture_ifp=None)

        render_interface.set_active_cam_from_computer_vision_cam_to_world_mat(
            cam_to_world_mat_computer_vision,
            render_compatible_camera_parameters.intrinsic.intrinsic_matrix,
            width,
            height,
            max_clipping_range=sys.float_info.max)

        render_interface.render()
        #render_interface.show_z_buffer()
        if not off_screen_rendering:
            render_interface.render_and_start()

        # We apply an affine transformation to the depth_map images
        # to compensate differences in the intrinsic parameters
        affine_mat = build_affine_transformation_matrix(
            camera_parameters, render_compatible_camera_parameters)

        depth_map = render_interface.get_computer_vision_depth_buffer_as_numpy_arr(
        )
        color_image = render_interface.get_rgba_buffer_as_numpy_arr()

        color_image = cv2.warpAffine(
            color_image, affine_mat,
            (color_image.shape[1], color_image.shape[0]), cv2.WARP_INVERSE_MAP,
            cv2.BORDER_CONSTANT, 0)

        depth_map = cv2.warpAffine(depth_map, affine_mat,
                                   (depth_map.shape[1], depth_map.shape[0]),
                                   cv2.WARP_INVERSE_MAP, cv2.BORDER_CONSTANT,
                                   0)

        depth_map_callback(image_name, depth_map, color_image)

    # if not off_screen_rendering:
    #     render_interface.render_and_start()
    logger.info('create_depth_maps_from_mesh: Done ')
def compute_depth_maps_from_geometry(mesh_ifp, camera_trajectory,
                                     ordered_image_names, depth_map_callback,
                                     config):
    logger.info('create_depth_maps_from_mesh: ... ')

    # https://github.com/intel-isl/Open3D/blob/master/cpp/open3d/visualization/Visualizer/ViewControl.cpp#L189
    #   bool ViewControl::ConvertFromPinholeCameraParameters(
    #       ...
    #         window_height_ != intrinsic.height_ ||
    #         window_width_ != intrinsic.width_ ||
    #         intrinsic.intrinsic_matrix_(0, 2) !=
    #                 (double)window_width_ / 2.0 - 0.5 ||
    #         intrinsic.intrinsic_matrix_(1, 2) !=
    #                 (double)window_height_ / 2.0 - 0.5) {
    #         utility::LogWarning(
    #                 "[ViewControl] ConvertFromPinholeCameraParameters() failed "
    #                 "because window height and width do not match.");
    #   Therefore, only specific intrinsic matrices are allowed

    num_params = len(camera_trajectory.parameters)
    logger.vinfo('num_params', num_params)

    camera_parameter_list = camera_trajectory.parameters
    num_images = None
    if num_images is not None:
        camera_parameter_list = camera_parameter_list[:num_images]

    # http://www.open3d.org/docs/release/python_api/open3d.visualization.html
    # http://www.open3d.org/docs/release/python_api/open3d.visualization.Visualizer.html
    vis = o3d.visualization.Visualizer()
    show_rendering = False
    for image_name, camera_parameters in zip(ordered_image_names,
                                             camera_parameter_list):

        # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html
        intrinsics = camera_parameters.intrinsic

        if show_rendering:
            if intrinsics.width > 1920 or intrinsics.height > 1080:
                # https://github.com/intel-isl/Open3D/issues/2036
                logger.warning(
                    'THERE IS A KNOWN ISSUE FOR VISUALIZING WINDOW SIZES GREATER THAN THE DEFAULT VALUES: '
                    + '({}, {}) vs ({}, {})'.format(
                        intrinsics.width, intrinsics.height, 1920, 1080))
                logger.warning(
                    'Setting show_rendering=False should avoid this problem ')

        vis.create_window(width=intrinsics.width,
                          height=intrinsics.height,
                          left=0,
                          top=0,
                          visible=show_rendering)

        mesh = o3d.io.read_triangle_mesh(mesh_ifp)
        vis.add_geometry(mesh)

        view_control = vis.get_view_control()
        render_compatible_camera_parameters = build_o3d_render_compatible_camera_parameters(
            camera_parameters)

        view_control.convert_from_pinhole_camera_parameters(
            render_compatible_camera_parameters)

        # http://www.open3d.org/docs/release/tutorial/Advanced/non_blocking_visualization.html
        # vis.update_geometry(pcd)
        vis.poll_events()  # CRUCIAL
        vis.update_renderer()

        # We apply an affine transformation to the depth_map images
        # to compensate differences in the intrinsic parameters
        affine_mat = build_affine_transformation_matrix(
            camera_parameters, render_compatible_camera_parameters)

        # http://www.open3d.org/docs/release/python_api/open3d.visualization.Visualizer.html
        color_image = np.asarray(
            vis.capture_screen_float_buffer(do_render=False), dtype=np.float32)

        depth_map = np.asarray(vis.capture_depth_float_buffer(do_render=False),
                               dtype=np.float32)

        color_image = cv2.warpAffine(
            color_image, affine_mat,
            (color_image.shape[1], color_image.shape[0]), cv2.WARP_INVERSE_MAP,
            cv2.BORDER_CONSTANT, 0)

        depth_map = cv2.warpAffine(depth_map, affine_mat,
                                   (depth_map.shape[1], depth_map.shape[0]),
                                   cv2.WARP_INVERSE_MAP, cv2.BORDER_CONSTANT,
                                   0)

        depth_map_callback(image_name, depth_map, color_image)

    logger.info('create_depth_maps_from_mesh: Done ')
def parse_colmap_rgb_and_depth_data(camera_trajectory, ordered_image_names,
                                    colmap_workspace, config):

    color_image_resized_dp = compute_resized_images(colmap_workspace,
                                                    lazy=True)
    color_image_resized_fp_list = get_resized_image_fp_s(
        ordered_image_names, color_image_resized_dp)

    depth_map_handler = ColmapDepthMapHandler(colmap_workspace, config)

    depth_map_handler.process_depth_maps(camera_trajectory,
                                         ordered_image_names)

    depth_array_ifp_list = depth_map_handler.get_depth_array_fp_s(
        ordered_image_names)
    depth_map_range = depth_map_handler.compute_depth_statistics(
        depth_array_ifp_list)

    overall_depth_map_max = depth_map_range[1]
    depth_scale_value = compute_scaling_value(overall_depth_map_max)

    rgbd_images = []
    for color_image_resized_fp, depth_map_ifp in zip(
            color_image_resized_fp_list, depth_array_ifp_list):

        logger.vinfo('depth_map_ifp', depth_map_ifp)

        depth_arr = depth_map_handler.read_depth_map(depth_map_ifp)
        depth_arr[depth_arr < 0] = 0
        depth_arr_min, depth_arr_max = ColmapDepthMapHandler.compute_depth_map_min_max(
            depth_arr)
        logger.vinfo('depth_arr_min', depth_arr_min)
        logger.vinfo('depth_arr_max', depth_arr_max)

        if depth_arr_min < 0:
            logger.vinfo('depth_arr_min', depth_arr_min)
            assert False

        # Scale the values, so that the cast to uint16 does not remove accuracy
        # depth_arr = (depth_arr - np.full_like(depth_arr, depth_map_min)) * depth_scale_value
        depth_arr_scaled = depth_arr * depth_scale_value
        depth_arr_scaled_min, depth_arr_scaled_max = ColmapDepthMapHandler.compute_depth_map_min_max(
            depth_arr_scaled)
        depth_arr_scaled = np.asarray(depth_arr_scaled, dtype=np.uint16)

        color_image = o3d.io.read_image(os.path.join(color_image_resized_fp))
        depth_map = o3d.geometry.Image(depth_arr_scaled)

        logger.vinfo('depth_arr_scaled_min', depth_arr_scaled_min)
        logger.vinfo('depth_arr_scaled_max', depth_arr_scaled_max)

        rgbd_image = convert_color_depth_to_rgbd(
            color_image,
            depth_map,
            depth_scale=depth_scale_value,
            convert_rgb_to_intensity=False)

        # http://www.open3d.org/docs/release/python_api/open3d.geometry.RGBDImage.html
        # logger.vinfo("dimension", rgbd_image.dimension())
        # logger.vinfo("min bound", rgbd_image.get_min_bound())
        # logger.vinfo("max bound", rgbd_image.get_max_bound())
        rgbd_images.append(rgbd_image)

    return rgbd_images, depth_map_range