コード例 #1
0
def add_camera_intrinsics_animation(op, animated_obj_name, intrinsics_sorted,
                                    number_interpolation_frames):

    log_report("INFO", "Adding camera intrinsic parameter animation: ...", op)

    step_size = number_interpolation_frames + 1
    animated_obj = bpy.data.objects[animated_obj_name]

    for index, intrinsics in enumerate(intrinsics_sorted):
        current_keyframe_index = index * step_size

        if intrinsics is None:
            continue

        animated_obj.data.angle = intrinsics.field_of_view
        animated_obj.data.shift_x = intrinsics.shift_x
        animated_obj.data.shift_y = intrinsics.shift_y

        animated_obj.data.keyframe_insert(data_path="lens",
                                          index=-1,
                                          frame=current_keyframe_index)
        animated_obj.data.keyframe_insert(data_path="shift_x",
                                          index=-1,
                                          frame=current_keyframe_index)
        animated_obj.data.keyframe_insert(data_path="shift_y",
                                          index=-1,
                                          frame=current_keyframe_index)

    log_report("INFO", "Adding camera intrinsic parameter animation: Done", op)
コード例 #2
0
def adjust_render_settings_if_possible(cameras, op=None):

    if len(cameras) == 0:
        return

    possible = True
    width = cameras[0].width
    height = cameras[0].height

    # Check if the cameras have same resolution
    for cam in cameras:
        if cam.width != width or cam.height != height:
            possible = False
            break

    if possible:
        bpy.context.scene.render.resolution_x = width
        bpy.context.scene.render.resolution_y = height
    else:
        log_report(
            "WARNING",
            "Adjustment of render settings not possible, "
            + "since the reconstructed cameras show different resolutions.",
            op,
        )
コード例 #3
0
    def get_computer_vision_camera_matrix(self, blender_camera):
        """
        Blender and Computer Vision Camera Coordinate Frame Systems (like VisualSfM, Bundler)
        differ by their y and z axis
        :param blender_camera:
        :return:
        """

        # Only if the objects have a scale of 1, the 3x3 part
        # of the corresponding matrix_world contains a pure rotation.
        # Otherwise, it also contains scale or shear information
        if not np.allclose(tuple(blender_camera.scale), (1, 1, 1)):
            log_report(
                "ERROR",
                "blender_camera.scale: " + str(blender_camera.scale),
                self,
            )
            assert False

        camera_matrix = np.array(blender_camera.matrix_world)
        gt_camera_rotation_inverse = camera_matrix.copy()[0:3, 0:3]
        gt_camera_rotation = gt_camera_rotation_inverse.T

        # Important: Blender uses a camera coordinate frame system, which looks down the negative z-axis.
        # This differs from the camera coordinate systems used by most SfM tools/frameworks.
        # Thus, rotate the camera rotation matrix by 180 degrees (i.e. invert the y and z axis).
        gt_camera_rotation = self.invert_y_and_z_axis(gt_camera_rotation)
        gt_camera_rotation_inverse = gt_camera_rotation.T

        rotated_camera_matrix_around_x_by_180 = camera_matrix.copy()
        rotated_camera_matrix_around_x_by_180[0:3,
                                              0:3] = gt_camera_rotation_inverse
        return rotated_camera_matrix_around_x_by_180
コード例 #4
0
    def parse_colmap_model_folder(
        model_idp,
        image_dp,
        image_fp_type,
        depth_map_idp,
        suppress_distortion_warnings,
        op=None,
    ):
        """Parse a :code:`Colmap` model."""
        log_report("INFO", "Parse Colmap model folder: " + model_idp, op)

        assert ColmapFileHandler._is_valid_model_folder(model_idp)
        ext = ColmapFileHandler._get_model_folder_ext(model_idp)

        # cameras represent information about the camera model
        # images contain pose information
        id_to_col_cameras, id_to_col_images, id_to_col_points3D = read_model(
            model_idp, ext=ext)

        cameras = ColmapFileHandler._convert_cameras(
            id_to_col_cameras,
            id_to_col_images,
            image_dp,
            image_fp_type,
            depth_map_idp,
            suppress_distortion_warnings,
            op,
        )

        points3D = ColmapFileHandler._convert_points(id_to_col_points3D)

        return cameras, points3D
コード例 #5
0
    def import_photogrammetry_mesh(self, mesh_fp, reconstruction_collection):
        if self.import_mesh and mesh_fp is not None:
            log_report("INFO", "Importing mesh: ...", self)
            previous_collection = bpy.context.collection

            if os.path.splitext(mesh_fp)[1].lower() == ".obj":
                # https://docs.blender.org/api/current/bpy.ops.import_scene.html
                bpy.ops.import_scene.obj(
                    filepath=mesh_fp, axis_forward="Y", axis_up="Z"
                )
            elif os.path.splitext(mesh_fp)[1].lower() == ".ply":
                # https://docs.blender.org/api/current/bpy.ops.import_mesh.html
                bpy.ops.import_mesh.ply(filepath=mesh_fp)
            else:
                assert False

            imported_object = bpy.context.selected_objects[-1]
            reconstruction_collection.objects.link(imported_object)
            previous_collection.objects.unlink(imported_object)

            mesh_has_texture = len(imported_object.data.materials) > 0
            mesh_has_vertex_color = "Col" in imported_object.data.vertex_colors

            if mesh_has_texture:
                if self.add_mesh_color_emission:
                    add_color_emission_to_material(imported_object)
            else:
                if mesh_has_vertex_color:
                    add_mesh_vertex_color_material(
                        imported_object,
                        "VertexColorMaterial",
                        add_mesh_color_emission=self.add_mesh_color_emission,
                    )

            log_report("INFO", "Importing mesh: Done", self)
    def _parse_points_from_json_data(json_data, image_index_to_camera_index,
                                     op):

        points = []
        is_valid_file = "structure" in json_data

        if not is_valid_file:
            log_report(
                "ERROR",
                "FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain " +
                " the SfM reconstruction results: structure.",
                op,
            )
            return points

        structure = json_data["structure"]
        for json_point in structure:
            custom_point = Point(
                coord=np.array(json_point["X"], dtype=float),
                color=np.array(json_point["color"], dtype=int),
                id=int(json_point["landmarkId"]),
                scalars=[],
            )
            points.append(custom_point)
        return points
def redraw_points(dummy):

    # This test is very cheap, so it will not cause
    # huge overheads for scenes without point clouds
    if "contains_opengl_point_clouds" in bpy.context.scene:

        log_report(
            "INFO",
            "Checking scene for missing point cloud draw handlers",
            dummy,
        )
        for obj in bpy.data.objects:
            if "particle_coords" in obj and "particle_colors" in obj:
                coords = obj["particle_coords"]
                colors = obj["particle_colors"]

                draw_manager = DrawManager.get_singleton()
                draw_manager.register_points_draw_callback(obj, coords, colors)
                viz_point_size = (
                    bpy.context.scene.opengl_panel_viz_settings.viz_point_size
                )
                draw_manager.set_point_size(viz_point_size)

        for area in bpy.context.screen.areas:
            if area.type == "VIEW_3D":
                area.tag_redraw()
                break
def add_camera_animation(
    op,
    cameras,
    parent_collection,
    number_interpolation_frames,
    interpolation_type,
    consider_missing_cameras_during_animation,
    remove_rotation_discontinuities,
    image_dp,
    image_fp_type,
):
    log_report("INFO", "Adding Camera Animation: ...")

    if len(cameras) == 0:
        return

    if consider_missing_cameras_during_animation:
        cameras = enhance_cameras_with_dummy_cameras(op, cameras, image_dp,
                                                     image_fp_type)

    # Using the first reconstructed camera as template for the animated camera. The values
    # are adjusted with add_transformation_animation() and add_camera_intrinsics_animation().
    some_cam = cameras[0]
    bcamera = add_single_camera(op, "Animated Camera", some_cam)
    cam_obj = add_obj(bcamera, "Animated Camera", parent_collection)
    cameras_sorted = sorted(cameras,
                            key=lambda camera: camera.get_relative_fp())

    transformations_sorted = []
    camera_intrinsics_sorted = []
    for camera in cameras_sorted:
        if isinstance(camera, DummyCamera):
            matrix_world = None
            camera_intrinsics = None
        else:
            matrix_world = compute_camera_matrix_world(camera)
            shift_x, shift_y = compute_shift(camera,
                                             relativ_to_largest_extend=True)
            camera_intrinsics = CameraIntrinsics(camera.get_field_of_view(),
                                                 shift_x, shift_y)

        transformations_sorted.append(matrix_world)
        camera_intrinsics_sorted.append(camera_intrinsics)

    add_transformation_animation(
        op=op,
        animated_obj_name=cam_obj.name,
        transformations_sorted=transformations_sorted,
        number_interpolation_frames=number_interpolation_frames,
        interpolation_type=interpolation_type,
        remove_rotation_discontinuities=remove_rotation_discontinuities,
    )

    add_camera_intrinsics_animation(
        op=op,
        animated_obj_name=cam_obj.name,
        intrinsics_sorted=camera_intrinsics_sorted,
        number_interpolation_frames=number_interpolation_frames,
    )
コード例 #9
0
def unregister():
    bpy.utils.unregister_class(PhotogrammetryImporterPreferences)

    unregister_importers()
    unregister_exporters()

    bpy.utils.unregister_class(OpenGLPanel)

    log_report("INFO", "Unregistered {}".format(bl_info["name"]))
 def execute(self, context):
     try:
         install_pip()
         for dependency in dependencies:
             install_package(dependency.package_name)
             add_module(dependency.import_name)
     except (subprocess.CalledProcessError, ImportError) as err:
         log_report("ERROR", str(err))
         return {"CANCELLED"}
     return {"FINISHED"}
コード例 #11
0
    def draw_points_callback(self, draw_manager, object_anchor, positions,
                             colors):

        handle_is_valid = True
        try:
            # Check if object still exists
            object_anchor_name = object_anchor.name
        except:
            handle_is_valid = False

        if handle_is_valid:
            if object_anchor_name in bpy.data.objects:

                # Use the visibility of the object to enable /
                # disable the drawing of the point cloud
                if bpy.data.objects[object_anchor_name].visible_get():

                    # Update the batch depending on the anchor pose (only if necessary)
                    object_anchor_has_changed = not np.array_equal(
                        self.object_anchor_pose_previous,
                        object_anchor.matrix_world,
                    )
                    if self.batch_cached is None or object_anchor_has_changed:

                        self.object_anchor_pose_previous = np.copy(
                            object_anchor.matrix_world)
                        transf_pos_list = compute_transformed_coords(
                            object_anchor.matrix_world, positions)

                        self.batch_cached = batch_for_shader(
                            self.shader,
                            "POINTS",
                            {
                                "pos": transf_pos_list,
                                "color": colors
                            },
                        )

                    self.shader.bind()
                    bgl.glPointSize(self.point_size)
                    bgl.glEnable(bgl.GL_DEPTH_TEST)
                    bgl.glDepthMask(bgl.GL_TRUE)

                    self.batch_cached.draw(self.shader)

        else:
            log_report("INFO",
                       "Removing draw handler of deleted point cloud handle")
            if self.draw_handler_handle is not None:
                bpy.types.SpaceView3D.draw_handler_remove(
                    self.draw_handler_handle, "WINDOW")
                self.draw_handler_handle = None
                self.batch_cached = None
                draw_manager.delete_anchor(object_anchor)
    def write_ply_file(
        output_path_to_file,
        vertices,
        with_colors=True,
        with_normals=False,
        faces=None,
        plain_text_output=True,
        with_measurements=False,
    ):

        log_report("INFO", "write_ply_file: " + output_path_to_file)

        ply_data_vertex_data_dtype_list = PLYFileHandler.build_type_list(
            vertices, with_colors, with_normals, with_measurements
        )

        log_report(
            "INFO",
            "ply_data_vertex_data_dtype_list"
            + str(ply_data_vertex_data_dtype_list),
        )

        # Printing output_ply_data_vertex_element SHOWS ONLY THE HEADER
        output_ply_data_vertex_element = (
            PLYFileHandler.__vertices_to_ply_vertex_element(
                vertices, ply_data_vertex_data_dtype_list
            )
        )

        if faces is None or len(faces) == 0:
            log_report("INFO", "Write File With Vertices Only (no faces)")
            output_data = PlyData(
                [output_ply_data_vertex_element], text=plain_text_output
            )
        else:
            log_report("INFO", "Write File With Faces")
            log_report("INFO", "Number faces" + str(len(faces)))

            ply_data_face_data_type = [("vertex_indices", "i4", (3,))]

            # we do not define colors for faces,
            # since we use the vertex colors to colorize the face

            output_ply_data_face_element = (
                PLYFileHandler.__faces_to_ply_face_element(
                    faces, ply_data_face_data_type
                )
            )
            output_data = PlyData(
                [output_ply_data_vertex_element, output_ply_data_face_element],
                text=plain_text_output,
            )

        output_data.write(output_path_to_file)
コード例 #13
0
    def convert_depth_map_to_world_coords(self,
                                          depth_map_display_sparsity=100):

        log_report("INFO", "Converting depth map to world coordinates: ...")
        cam_coords = self.convert_depth_map_to_cam_coords(
            depth_map_display_sparsity)

        world_coords = self.cam_to_world_coord_multiple_coords(cam_coords)

        log_report("INFO", "Converting depth map to world coordinates: Done")
        return world_coords
コード例 #14
0
def add_transformation_animation(
    op,
    animated_obj_name,
    transformations_sorted,
    number_interpolation_frames,
    interpolation_type=None,
    remove_rotation_discontinuities=True,
):
    log_report("INFO", "Adding transformation animation: ...", op)

    scn = bpy.context.scene
    scn.frame_start = 0
    step_size = number_interpolation_frames + 1
    scn.frame_end = step_size * len(transformations_sorted)
    animated_obj = bpy.data.objects[animated_obj_name]

    for index, transformation in enumerate(transformations_sorted):
        # log_report('INFO', 'index: ' + str(index), op)
        # log_report('INFO', 'transformation: ' + str(transformation), op)

        current_keyframe_index = index * step_size

        if transformation is None:
            continue

        animated_obj.matrix_world = Matrix(transformation)

        animated_obj.keyframe_insert(data_path="location",
                                     index=-1,
                                     frame=current_keyframe_index)

        # Don't use euler rotations, they show too many discontinuties
        # animated_obj.keyframe_insert(
        #   data_path="rotation_euler",
        #   index=-1,
        #   frame=current_keyframe_index)

        animated_obj.rotation_mode = "QUATERNION"
        animated_obj.keyframe_insert(
            data_path="rotation_quaternion",
            index=-1,
            frame=current_keyframe_index,
        )

        if remove_rotation_discontinuities:
            # q and -q represent the same rotation
            remove_quaternion_discontinuities(animated_obj)

        if interpolation_type is not None:
            set_fcurve_interpolation(animated_obj, interpolation_type)

    log_report("INFO", "Adding transformation animation: Done", op)
 def parse_camera_image_files(cameras, default_width, default_height, op):
     log_report("INFO", "parse_camera_image_files: ", op)
     success = True
     for camera in cameras:
         image_path = camera.get_absolute_fp()
         success, width, height = ImageFileHandler.parse_camera_image_file(
             image_path, default_width, default_height, op)
         camera.width = width
         camera.height = height
         if not success:
             break
     log_report("INFO", "parse_camera_image_files: Done", op)
     return cameras, success
def set_principal_point_for_cameras(cameras, default_pp_x, default_pp_y, op):

    if not math.isnan(default_pp_x) and not math.isnan(default_pp_y):
        log_report("WARNING", "Setting principal points to default values!")
    else:
        log_report("WARNING", "Setting principal points to image centers!")
        assert cameras[0].width is not None and cameras[0].height is not None
        default_pp_x = cameras[0].width / 2.0
        default_pp_y = cameras[0].height / 2.0

    for camera in cameras:
        if not camera.is_principal_point_initialized():
            camera.set_principal_point([default_pp_x, default_pp_y])
コード例 #17
0
    def execute(self, context):

        path = os.path.join(self.directory, self.filepath)
        log_report("INFO", "path: " + str(path), self)

        self.image_dp = self.get_default_image_path(path, self.image_dp)
        log_report("INFO", "image_dp: " + str(self.image_dp), self)

        cameras, points = OpenSfMJSONFileHandler.parse_opensfm_file(
            path,
            self.image_dp,
            self.image_fp_type,
            self.reconstruction_number,
            self.suppress_distortion_warnings,
            self,
        )

        log_report("INFO", "Number cameras: " + str(len(cameras)), self)
        log_report("INFO", "Number points: " + str(len(points)), self)

        reconstruction_collection = add_collection("Reconstruction Collection")
        self.import_photogrammetry_cameras(cameras, reconstruction_collection)
        self.import_photogrammetry_points(points, reconstruction_collection)
        self.apply_general_options()

        return {"FINISHED"}
コード例 #18
0
def register():
    """ Register importers, exporters and panels. """
    bpy.utils.register_class(PhotogrammetryImporterPreferences)

    import_export_prefs = bpy.context.preferences.addons[__name__].preferences
    register_importers(import_export_prefs)
    register_exporters(import_export_prefs)

    bpy.utils.register_class(OpenGLPanel)

    log_report(
        "INFO",
        "Registered {} with {} modules".format(bl_info["name"], len(modules)),
    )
コード例 #19
0
    def execute(self, context):

        path = self.directory
        # Remove trailing slash
        path = os.path.dirname(path)
        log_report("INFO", "path: " + str(path), self)

        self.image_dp = self.get_default_image_path(path, self.image_dp)
        cameras, points, mesh_ifp = ColmapFileHandler.parse_colmap_folder(
            path,
            self.image_dp,
            self.image_fp_type,
            self.suppress_distortion_warnings,
            self,
        )

        log_report("INFO", "Number cameras: " + str(len(cameras)), self)
        log_report("INFO", "Number points: " + str(len(points)), self)
        log_report("INFO", "Mesh file path: " + str(mesh_ifp), self)

        reconstruction_collection = add_collection("Reconstruction Collection")
        self.import_photogrammetry_cameras(cameras, reconstruction_collection)
        self.import_photogrammetry_points(points, reconstruction_collection)
        self.import_photogrammetry_mesh(mesh_ifp, reconstruction_collection)
        self.apply_general_options()

        return {"FINISHED"}
コード例 #20
0
    def parse_open3d_file(open3d_ifp, image_dp, image_fp_type, op):
        """Parse an :code:`Open3D` (:code:`.json` or :code:`.log`) file.

        The :code:`.json` format supports intrinsics as well as
        extrinsic parameters, whereas the :code:`.log` (`Redwood
        <http://redwood-data.org/indoor/fileformat.html>`_) format contains
        only extrinsic parameters.
        """
        log_report("INFO", "parse_open3d_file: ...", op)
        log_report("INFO", "open3d_ifp: " + open3d_ifp, op)
        log_report("INFO", "image_dp: " + image_dp, op)

        image_relative_fp_list = get_image_file_paths_in_dir(
            image_dp,
            relative_path_only=True,
            without_ext=False,
            sort_result=True,
            recursive=True,
        )

        cams = []
        if os.path.splitext(open3d_ifp)[1].lower() == ".json":
            cams = Open3DFileHandler._parse_open3d_json_file(
                open3d_ifp, image_dp, image_relative_fp_list, image_fp_type,
                op)
        elif os.path.splitext(open3d_ifp)[1].lower() == ".log":
            cams = Open3DFileHandler._parse_open3d_log_file(
                open3d_ifp, image_dp, image_relative_fp_list, image_fp_type,
                op)
        else:
            assert False

        log_report("INFO", "parse_open3d_file: Done", op)
        return cams
コード例 #21
0
    def parse_opensfm_file(
        input_opensfm_fp,
        image_dp,
        image_fp_type,
        suppress_distortion_warnings,
        reconstruction_idx,
        op,
    ):

        log_report("INFO", "parse_opensfm_file: ...", op)
        log_report("INFO", "input_opensfm_fp: " + input_opensfm_fp, op)
        input_file = open(input_opensfm_fp, "r")
        json_data = json.load(input_file)
        num_reconstructions = len(json_data)
        reconstruction_data = json_data[reconstruction_idx]
        if len(json_data) > 1:
            log_report(
                "WARNING",
                "OpenSfM file contains multiple reconstructions. Only reconstruction with index "
                + str(reconstruction_idx)
                + " is imported.",
                op,
            )

        cams = OpenSfMJSONFileHandler.parse_cameras(
            reconstruction_data,
            image_dp,
            image_fp_type,
            suppress_distortion_warnings,
            op,
        )
        points = OpenSfMJSONFileHandler.parse_points(reconstruction_data, op)
        log_report("INFO", "parse_opensfm_file: Done", op)
        return cams, points
    def parse_colmap_folder(idp, image_dp, image_fp_type,
                            suppress_distortion_warnings, op):

        log_report("INFO", "idp: " + str(idp), op)

        if ColmapFileHandler.is_valid_model_folder(idp):
            model_idp = idp
            mesh_ifp = None
            depth_map_idp = None
        elif ColmapFileHandler.is_valid_workspace_folder(idp):
            (
                model_idp,
                image_idp_workspace,
                depth_map_idp,
                mesh_ifp,
            ) = ColmapFileHandler.parse_colmap_workspace_folder(idp)
            if os.path.isdir(image_idp_workspace):
                image_dp = image_idp_workspace
                log_report("INFO", "Using image directory in workspace.", op)
        else:
            log_report("ERROR", "Invalid colmap model / workspace", op)
            assert False

        log_report("INFO", "image_dp: " + image_dp, op)
        cameras, points = ColmapFileHandler.parse_colmap_model_folder(
            model_idp,
            image_dp,
            image_fp_type,
            depth_map_idp,
            suppress_distortion_warnings,
            op,
        )

        return cameras, points, mesh_ifp
    def parse_opensfm_file(
        input_opensfm_fp,
        image_dp,
        image_fp_type,
        reconstruction_idx,
        suppress_distortion_warnings=False,
        op=None,
    ):
        """Parse a :code:`OpenSfM` (:code:`.json`) file."""

        log_report("INFO", "parse_opensfm_file: ...", op)
        log_report("INFO", "input_opensfm_fp: " + input_opensfm_fp, op)
        input_file = open(input_opensfm_fp, "r")
        json_data = json.load(input_file)
        reconstruction_data = json_data[reconstruction_idx]
        if len(json_data) > 1:
            log_report(
                "WARNING",
                "OpenSfM file contains multiple reconstructions. Only " +
                f" reconstruction with index {reconstruction_idx} is" +
                " imported.",
                op,
            )

        cams = OpenSfMJSONFileHandler._parse_cameras(
            reconstruction_data,
            image_dp,
            image_fp_type,
            suppress_distortion_warnings,
            op,
        )
        points = OpenSfMJSONFileHandler._parse_points(reconstruction_data, op)
        log_report("INFO", "parse_opensfm_file: Done", op)
        return cams, points
    def parse_open3d_file(open3d_ifp, image_dp, image_fp_type, op):
        """
        # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraTrajectory.html
        Open3D supports different file formats:
        - *.json (Intrinsics and Extrinsics)
        - *.log (Only extrinsics, Redwood format (http://redwood-data.org/indoor/fileformat.html))
        - *.txt (Only extrinsics, TUM format (https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats))
        :return: cameras
        """
        log_report("INFO", "parse_open3d_file: ...", op)
        log_report("INFO", "open3d_ifp: " + open3d_ifp, op)
        log_report("INFO", "image_dp: " + image_dp, op)

        image_relative_fp_list = get_image_file_paths_in_dir(
            image_dp,
            relative_path_only=True,
            without_ext=False,
            sort_result=True,
            recursive=True,
        )

        cams = []
        if os.path.splitext(open3d_ifp)[1].lower() == ".json":
            cams = Open3DFileHandler._parse_open3d_json_file(
                open3d_ifp, image_dp, image_relative_fp_list, image_fp_type,
                op)
        elif os.path.splitext(open3d_ifp)[1].lower() == ".log":
            cams = Open3DFileHandler._parse_open3d_log_file(
                open3d_ifp, image_dp, image_relative_fp_list, image_fp_type,
                op)
        else:
            assert False

        log_report("INFO", "parse_open3d_file: Done", op)
        return cams
コード例 #25
0
    def parse_nvm_file(
        input_visual_fsm_file_name,
        image_dp,
        image_fp_type,
        suppress_distortion_warnings,
        op=None,
    ):
        """Parse a :code:`VisualSfM` (:code:`.nvm`) file."""
        log_report("INFO", "Parse NVM file: " + input_visual_fsm_file_name, op)
        input_file = open(input_visual_fsm_file_name, "r")
        # Documentation of *.NVM data format
        # http://ccwu.me/vsfm/doc.html#nvm

        # In a simple case there is only one model

        # Each reconstructed <model> contains the following
        # <Number of cameras>   <List of cameras>
        # <Number of 3D points> <List of points>

        # Read the first two lines (fixed)
        current_line = (input_file.readline()).rstrip()
        calibration_matrix = NVMFileHandler._parse_fixed_calibration(
            current_line, op)
        current_line = (input_file.readline()).rstrip()
        assert current_line == ""

        amount_cameras = int((input_file.readline()).rstrip())
        log_report(
            "INFO",
            "Amount Cameras (Images in NVM file): " + str(amount_cameras),
            op,
        )

        cameras = NVMFileHandler._parse_cameras(
            input_file,
            amount_cameras,
            calibration_matrix,
            image_dp,
            image_fp_type,
            suppress_distortion_warnings,
            op,
        )
        current_line = (input_file.readline()).rstrip()
        assert current_line == ""
        current_line = (input_file.readline()).rstrip()
        if current_line.isdigit():
            amount_points = int(current_line)
            log_report(
                "INFO",
                "Amount Sparse Points (Points in NVM file): " +
                str(amount_points),
                op,
            )
            points = NVMFileHandler._parse_nvm_points(input_file,
                                                      amount_points)
        else:
            points = []

        log_report("INFO", "Parse NVM file: Done", op)
        return cameras, points
コード例 #26
0
    def execute(self, context):
        log_report("INFO", "Update importers and exporters: ...", self)
        addon_name = get_addon_name()
        import_export_prefs = bpy.context.preferences.addons[
            addon_name
        ].preferences

        unregister_importers()
        register_importers(import_export_prefs)

        unregister_exporters()
        register_exporters(import_export_prefs)

        log_report("INFO", "Update importers and exporters: Done", self)
        return {"FINISHED"}
コード例 #27
0
    def parse_views(
        views_idp,
        default_width,
        default_height,
        add_depth_maps_as_point_cloud,
        op=None,
    ):
        """Parse the :code:`views` directory in the :code:`MVE` workspace."""
        cameras = []
        subdirs = get_subdirs(views_idp)
        for subdir in subdirs:
            folder_name = os.path.basename(subdir)
            # folder_name = view_0000.mve
            camera_name = folder_name.split("_")[1].split(".")[0]
            undistorted_img_ifp = os.path.join(subdir, "undistorted.png")
            success, width, height = ImageFileHandler.read_image_size(
                undistorted_img_ifp,
                default_width=default_width,
                default_height=default_height,
                op=op,
            )
            assert success

            meta_ifp = os.path.join(subdir, "meta.ini")
            camera = MVEFileHandler.parse_meta(
                meta_ifp, width, height, camera_name, op
            )

            if add_depth_maps_as_point_cloud:
                for level in range(9):
                    depth_ifp = os.path.join(
                        subdir, "depth-L" + str(level) + ".mvei"
                    )
                    if os.path.isfile(depth_ifp):
                        camera.set_depth_map(
                            depth_ifp,
                            MVEFileHandler.read_depth_map,
                            Camera.DEPTH_MAP_WRT_UNIT_VECTORS,
                            shift_depth_map_to_pixel_center=True,
                        )
                        break
                if camera.depth_map_fp is None:
                    log_report(
                        "WARNING", "No depth map found in " + subdir, op
                    )

            cameras.append(camera)
        return cameras
コード例 #28
0
def register():
    bpy.utils.register_class(PhotogrammetryImporterPreferences)

    import_export_prefs = bpy.context.preferences.addons[__name__].preferences
    register_importers(import_export_prefs)
    register_exporters(import_export_prefs)

    bpy.utils.register_class(OpenGLPanel)

    # === Uncomment for fast debugging ===
    # bpy.app.handlers.load_post.append(load_handler)

    log_report(
        "INFO",
        "Registered {} with {} modules".format(bl_info["name"], len(modules)),
    )
 def _get_node(json_graph, node_type, node_number, op):
     if node_number == -1:
         return MeshroomFileHandler._get_latest_node(json_graph, node_type)
     else:
         node_key = node_type + "_" + str(node_number)
         if node_key in json_graph:
             return json_graph[node_key]
         else:
             log_report(
                 "ERROR",
                 "Invalid combination of node type (i.e. " + node_type +
                 ") " + "and node number (i.e. " + str(node_number) +
                 ") provided",
                 op,
             )
             assert False
コード例 #30
0
def set_image_size_for_cameras(cameras,
                               default_width,
                               default_height,
                               op=None):
    """ Set image sizes for cameras and return a boolean. """

    log_report("INFO", "set_image_size_for_cameras: ", op)
    success = True
    for camera in cameras:
        image_fp = camera.get_absolute_fp()
        success, width, height = ImageFileHandler.read_image_size(
            image_fp, default_width, default_height, op)
        camera.width = width
        camera.height = height
        if not success:
            break
    log_report("INFO", "set_image_size_for_cameras: Done", op)
    return success