def install_pip(self, lazy, op=None): """Install pip.""" try: if lazy and self.pip_dependency_status.installation_status: import pip log_report( "INFO", "Pip already installed. Using existing pip installation" + f" ({pip.__version__})", op=op, ) return except: pass log_report("INFO", "Installing pip!", op=op) import os import ensurepip # https://github.com/robertguetzkow/blender-python-examples/blob/master/add-ons/install-dependencies/install-dependencies.py # Note that ensurepip.bootstrap() calls pip, which adds the # environment variable PIP_REQ_TRACKER. After ensurepip.bootstrap() # finishes execution, the directory is deleted. # However, any subprocesses calling pip will use the environment # variable PIP_REQ_TRACKER (which points to an invalid path). # Thus, we need to remove the invalid environment variable. ensurepip.bootstrap() os.environ.pop("PIP_REQ_TRACKER", None) self.pip_dependency_status.installation_status = True
def redraw_points(dummy): """Redraw points of the previous Blender session.""" # This test is very cheap, so it will not cause # huge overheads for scenes without point clouds if "contains_opengl_point_clouds" in bpy.context.scene: log_report( "INFO", "Checking scene for missing point cloud draw handlers", op=None, ) for obj in bpy.data.objects: if "particle_coords" in obj and "particle_colors" in obj: coords = obj["particle_coords"] colors = obj["particle_colors"] draw_manager = DrawManager.get_singleton() draw_manager.register_points_draw_callback(obj, coords, colors) viz_point_size = ( bpy.context.scene.opengl_panel_settings.viz_point_size) draw_manager.set_point_size(viz_point_size) for area in bpy.context.screen.areas: if area.type == "VIEW_3D": area.tag_redraw() break
def _get_computer_vision_camera_matrix(self, blender_camera): # Only if the objects have a scale of 1, the 3x3 part # of the corresponding matrix_world contains a pure rotation. # Otherwise, it also contains scale or shear information if not np.allclose(tuple(blender_camera.scale), (1, 1, 1)): log_report( "ERROR", "blender_camera.scale: " + str(blender_camera.scale), self, ) assert False camera_matrix = np.array(blender_camera.matrix_world) gt_camera_rotation_inverse = camera_matrix.copy()[0:3, 0:3] gt_camera_rotation = gt_camera_rotation_inverse.T # Important: Blender uses a camera coordinate frame system, which looks # down the negative z-axis. This differs from the camera coordinate # systems used by most SfM tools/frameworks. Thus, rotate the camera # rotation matrix by 180 degrees (i.e. invert the y and z axis). gt_camera_rotation = invert_y_and_z_axis(gt_camera_rotation) gt_camera_rotation_inverse = gt_camera_rotation.T rotated_camera_matrix_around_x_by_180 = camera_matrix.copy() rotated_camera_matrix_around_x_by_180[0:3, 0:3] = gt_camera_rotation_inverse return rotated_camera_matrix_around_x_by_180
def _add_camera_intrinsics_animation(animated_obj_name, intrinsics_sorted, number_interpolation_frames, op=None): log_report("INFO", "Adding camera intrinsic parameter animation: ...", op) step_size = number_interpolation_frames + 1 animated_obj = bpy.data.objects[animated_obj_name] for index, intrinsics in enumerate(intrinsics_sorted): current_keyframe_index = (index + 1) * step_size if intrinsics is None: continue animated_obj.data.angle = intrinsics.field_of_view animated_obj.data.shift_x = intrinsics.shift_x animated_obj.data.shift_y = intrinsics.shift_y animated_obj.data.keyframe_insert(data_path="lens", index=-1, frame=current_keyframe_index) animated_obj.data.keyframe_insert(data_path="shift_x", index=-1, frame=current_keyframe_index) animated_obj.data.keyframe_insert(data_path="shift_y", index=-1, frame=current_keyframe_index) log_report("INFO", "Adding camera intrinsic parameter animation: Done", op)
def _parse_points_from_json_data(json_data, image_index_to_camera_index, op): points = [] is_valid_file = "structure" in json_data if not is_valid_file: log_report( "ERROR", "FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain " + " the SfM reconstruction results: structure.", op, ) return points structure = json_data["structure"] for json_point in structure: custom_point = Point( coord=np.array(json_point["X"], dtype=float), color=np.array(json_point["color"], dtype=int), id=int(json_point["landmarkId"]), scalars=[], ) points.append(custom_point) return points
def adjust_render_settings_if_possible(cameras, op=None): """Adjust the render settings according to the camera parameters.""" if len(cameras) == 0: return possible = True width = cameras[0].width height = cameras[0].height # Check if the cameras have same resolution for cam in cameras: if cam.width != width or cam.height != height: possible = False break if possible: bpy.context.scene.render.resolution_x = width bpy.context.scene.render.resolution_y = height else: log_report( "WARNING", "Adjustment of render settings not possible, " + "since the reconstructed cameras show different resolutions.", op, )
def _get_mesh_fp(cls, mesh_node_type, cache_dp, json_graph, mesh_node_number, op): if mesh_node_type == "Texturing": mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "Texturing", mesh_node_number, "texturedMesh.obj", op, ) elif mesh_node_type == "MeshFiltering": mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "MeshFiltering", mesh_node_number, "mesh.obj", op, ) elif mesh_node_type == "Meshing": mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "Meshing", mesh_node_number, "mesh.obj", op, ) elif mesh_node_type == "AUTOMATIC": mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "Texturing", mesh_node_number, "texturedMesh.obj", op, ) if mesh_fp is None: mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "MeshFiltering", mesh_node_number, "mesh.obj", op, ) if mesh_fp is None: mesh_fp = cls._get_node_data_fp( cache_dp, json_graph, "Meshing", mesh_node_number, "mesh.obj", op, ) else: log_report("ERROR", "Select Mesh node is not supported!", op) assert False return mesh_fp
def execute(self, context): """Export a screenshot (of the 3D view).""" log_report("INFO", "Export screenshot: ...", self) panel_settings = context.scene.opengl_panel_settings filename_ext = panel_settings.screenshot_file_format ofp = self.filepath + "." + filename_ext # Get panel settings full_screenshot = not panel_settings.only_3d_view use_camera_perspective = panel_settings.use_camera_perspective # Cache previous settings previous_cam = bpy.context.scene.camera area_3d = next(area for area in bpy.context.screen.areas if area.type == "VIEW_3D") previous_perspective = area_3d.spaces[0].region_3d.view_perspective # Create Screenshot selected_cam = get_selected_camera() if use_camera_perspective and selected_cam is not None: bpy.context.scene.camera = selected_cam area_3d.spaces[0].region_3d.view_perspective = "CAMERA" _update_ui(context) bpy.ops.screen.screenshot(filepath=ofp, full=full_screenshot, check_existing=False) # Restore previous settings area_3d.spaces[0].region_3d.view_perspective = previous_perspective bpy.context.scene.camera = previous_cam log_report("INFO", "Export screenshot: Done", self) return {"FINISHED"}
def parse_colmap_model_folder( model_idp, image_dp, image_fp_type, depth_map_idp=None, suppress_distortion_warnings=False, op=None, ): """Parse a :code:`Colmap` model.""" log_report("INFO", "Parse Colmap model folder: " + model_idp, op) assert ColmapFileHandler._is_valid_model_folder(model_idp) ext = ColmapFileHandler._get_model_folder_ext(model_idp) # cameras represent information about the camera model # images contain pose information id_to_col_cameras, id_to_col_images, id_to_col_points3D = read_model( model_idp, ext=ext) cameras = ColmapFileHandler._convert_cameras( id_to_col_cameras, id_to_col_images, image_dp, image_fp_type, depth_map_idp, suppress_distortion_warnings, op, ) points3D = ColmapFileHandler._convert_points(id_to_col_points3D) return cameras, points3D
def execute(self, context): """Install all optional dependencies.""" try: dependency_manager = OptionalDependencyManager.get_singleton() dependency_manager.install_dependencies() except (subprocess.CalledProcessError, ImportError) as err: log_report("ERROR", str(err)) return {"CANCELLED"} return {"FINISHED"}
def unregister(): """Unregister importers, exporters and panels.""" bpy.utils.unregister_class(AddonPreferences) Registration.unregister_importers() Registration.unregister_exporters() bpy.utils.unregister_class(OpenGLPanel) log_report("INFO", "Unregistered {}".format(bl_info["name"]))
def execute(self, context): """Reset import options to factory settings.""" log_report("INFO", "Reset preferences: ...", self) addon_name = _get_addon_name() import_export_prefs = bpy.context.preferences.addons[ addon_name].preferences import_export_prefs.reset_import_options() log_report("INFO", "Reset preferences: Done", self) return {"FINISHED"}
def _draw_points_callback(self, draw_manager, object_anchor, positions, colors): """A callback function to draw a point cloud in Blender's 3D view.""" handle_is_valid = True try: # Check if object still exists object_anchor_name = object_anchor.name except: handle_is_valid = False if handle_is_valid: if object_anchor_name in bpy.data.objects: # Use the visibility of the object to enable / # disable the drawing of the point cloud if bpy.data.objects[object_anchor_name].visible_get(): # Update the batch depending on the anchor pose (only if # necessary) object_anchor_has_changed = not np.array_equal( self._object_anchor_pose_previous, object_anchor.matrix_world, ) if self._batch_cached is None or object_anchor_has_changed: self._object_anchor_pose_previous = np.copy( object_anchor.matrix_world) transf_pos_list = _compute_transformed_coords( object_anchor.matrix_world, positions) self._batch_cached = batch_for_shader( self._shader, "POINTS", { "pos": transf_pos_list, "color": colors }, ) self._shader.bind() bgl.glPointSize(self._point_size) bgl.glEnable(bgl.GL_DEPTH_TEST) bgl.glDepthMask(bgl.GL_TRUE) self._batch_cached.draw(self._shader) else: log_report("INFO", "Removing draw handler of deleted point cloud handle") if self._draw_handler_handle is not None: bpy.types.SpaceView3D.draw_handler_remove( self._draw_handler_handle, "WINDOW") self._draw_handler_handle = None self._batch_cached = None draw_manager.delete_anchor(object_anchor)
def _add_transformation_animation( animated_obj_name, transformations_sorted, number_interpolation_frames, interpolation_type=None, remove_rotation_discontinuities=True, op=None, ): log_report("INFO", "Adding transformation animation: ...", op) scene = bpy.context.scene scene.frame_start = 0 step_size = number_interpolation_frames + 1 scene.frame_end = step_size * len(transformations_sorted) animated_obj = bpy.data.objects[animated_obj_name] for index, transformation in enumerate(transformations_sorted): # log_report('INFO', 'index: ' + str(index), op) # log_report('INFO', 'transformation: ' + str(transformation), op) current_keyframe_index = (index + 1) * step_size if transformation is None: continue animated_obj.matrix_world = Matrix(transformation) animated_obj.keyframe_insert(data_path="location", index=-1, frame=current_keyframe_index) # Don't use euler rotations, they show too many discontinuties # animated_obj.keyframe_insert( # data_path="rotation_euler", # index=-1, # frame=current_keyframe_index) animated_obj.rotation_mode = "QUATERNION" animated_obj.keyframe_insert( data_path="rotation_quaternion", index=-1, frame=current_keyframe_index, ) if remove_rotation_discontinuities: # q and -q represent the same rotation _remove_quaternion_discontinuities(animated_obj) if interpolation_type is not None: _set_fcurve_interpolation(animated_obj, interpolation_type) log_report("INFO", "Adding transformation animation: Done", op)
def execute(self, context): """Import a file with point data (e.g. :code:`PLY`).""" path = os.path.join(self.directory, self.filepath) log_report("INFO", "path: " + str(path), self) points = PointDataFileHandler.parse_point_data_file(path, self) log_report("INFO", "Number points: " + str(len(points)), self) reconstruction_collection = add_collection("Reconstruction Collection") self.import_photogrammetry_points(points, reconstruction_collection) self.apply_general_options() return {"FINISHED"}
def execute(self, context): """Import an :code:`OpenMVG` :code:`JSON` file.""" path = os.path.join(self.directory, self.filepath) log_report("INFO", "path: " + str(path), self) self.image_dp = self.get_default_image_path(path, self.image_dp) log_report("INFO", "image_dp: " + str(self.image_dp), self) cameras, points = OpenMVGJSONFileHandler.parse_openmvg_file( path, self.image_dp, self.image_fp_type, self.suppress_distortion_warnings, self, ) log_report("INFO", "Number cameras: " + str(len(cameras)), self) log_report("INFO", "Number points: " + str(len(points)), self) reconstruction_collection = add_collection("Reconstruction Collection") self.import_photogrammetry_cameras(cameras, reconstruction_collection) self.import_photogrammetry_points(points, reconstruction_collection) self.apply_general_options() return {"FINISHED"}
def register(): """Register importers, exporters and panels.""" bpy.utils.register_class(AddonPreferences) import_export_prefs = bpy.context.preferences.addons[__name__].preferences Registration.register_importers(import_export_prefs) Registration.register_exporters(import_export_prefs) bpy.utils.register_class(OpenGLPanel) log_report( "INFO", "Registered {} with {} modules".format(bl_info["name"], len(modules)), )
def parse_opensfm_file( input_opensfm_fp, image_dp, image_fp_type, reconstruction_idx, suppress_distortion_warnings=False, op=None, ): """Parse a :code:`OpenSfM` (:code:`.json`) file.""" log_report("INFO", "parse_opensfm_file: ...", op) log_report("INFO", "input_opensfm_fp: " + input_opensfm_fp, op) input_file = open(input_opensfm_fp, "r") json_data = json.load(input_file) reconstruction_data = json_data[reconstruction_idx] if len(json_data) > 1: log_report( "WARNING", "OpenSfM file contains multiple reconstructions. Only " + f" reconstruction with index {reconstruction_idx} is" + " imported.", op, ) cams = OpenSfMJSONFileHandler._parse_cameras( reconstruction_data, image_dp, image_fp_type, suppress_distortion_warnings, op, ) points = OpenSfMJSONFileHandler._parse_points(reconstruction_data, op) log_report("INFO", "parse_opensfm_file: Done", op) return cams, points
def parse_open3d_file(open3d_ifp, image_dp, image_fp_type, op): """Parse an :code:`Open3D` (:code:`.json` or :code:`.log`) file. The :code:`.json` format supports intrinsics as well as extrinsic parameters, whereas the :code:`.log` (`Redwood <http://redwood-data.org/indoor/fileformat.html>`_) format contains only extrinsic parameters. """ log_report("INFO", "parse_open3d_file: ...", op) log_report("INFO", "open3d_ifp: " + open3d_ifp, op) log_report("INFO", "image_dp: " + image_dp, op) image_relative_fp_list = get_image_file_paths_in_dir( image_dp, relative_path_only=True, without_ext=False, sort_result=True, recursive=True, ) cams = [] if os.path.splitext(open3d_ifp)[1].lower() == ".json": cams = Open3DFileHandler._parse_open3d_json_file( open3d_ifp, image_dp, image_relative_fp_list, image_fp_type, op) elif os.path.splitext(open3d_ifp)[1].lower() == ".log": cams = Open3DFileHandler._parse_open3d_log_file( open3d_ifp, image_dp, image_relative_fp_list, image_fp_type, op) else: assert False log_report("INFO", "parse_open3d_file: Done", op) return cams
def execute(self, context): """Import a :code:`Colmap` model/workspace.""" path = self.directory # Remove trailing slash path = os.path.dirname(path) log_report("INFO", "path: " + str(path), self) self.image_dp = self.get_default_image_path(path, self.image_dp) cameras, points, mesh_ifp = ColmapFileHandler.parse_colmap_folder( path, self.use_workspace_images, self.image_dp, self.image_fp_type, self.suppress_distortion_warnings, self, ) log_report("INFO", "Number cameras: " + str(len(cameras)), self) log_report("INFO", "Number points: " + str(len(points)), self) log_report("INFO", "Mesh file path: " + str(mesh_ifp), self) reconstruction_collection = add_collection("Reconstruction Collection") self.import_photogrammetry_cameras(cameras, reconstruction_collection) self.import_photogrammetry_points(points, reconstruction_collection) self.import_photogrammetry_mesh(mesh_ifp, reconstruction_collection) self.apply_general_options() return {"FINISHED"}
def set_intrinsics_of_cameras(self, cameras): """Enhances the imported cameras with intrinsic information. Overwrites the method in :code:`CameraImporter`. """ intrinsic_missing = False for cam in cameras: if not cam.has_intrinsics(): intrinsic_missing = True break if not intrinsic_missing: log_report("INFO", "Using intrinsics from file (.json).", self) return cameras, True else: log_report( "INFO", "Using intrinsics from user options, since not present in the" + " reconstruction file (.log).", self, ) if math.isnan(self.default_focal_length): log_report( "ERROR", "User must provide the focal length using the import" + " options.", self, ) return [], False if math.isnan(self.default_pp_x) or math.isnan(self.default_pp_y): log_report( "WARNING", "Setting the principal point to the image center.", self, ) for cam in cameras: if math.isnan(self.default_pp_x) or math.isnan( self.default_pp_y): # If no images are provided, the user must provide a # default principal point assert cam.width is not None # If no images are provided, the user must provide a # default principal point assert cam.height is not None default_cx = cam.width / 2.0 default_cy = cam.height / 2.0 else: default_cx = self.default_pp_x default_cy = self.default_pp_y intrinsics = Camera.compute_calibration_mat( focal_length=self.default_focal_length, cx=default_cx, cy=default_cy, ) cam.set_calibration_mat(intrinsics) return cameras, True
def parse_visualsfm_file( cls, input_visual_fsm_file_name, image_dp, image_fp_type, suppress_distortion_warnings, op=None, ): """Parse a :code:`VisualSfM` (:code:`.nvm`) file.""" log_report("INFO", "Parse NVM file: " + input_visual_fsm_file_name, op) input_file = open(input_visual_fsm_file_name, "r") # Documentation of *.NVM data format # http://ccwu.me/vsfm/doc.html#nvm # In a simple case there is only one model # Each reconstructed <model> contains the following # <Number of cameras> <List of cameras> # <Number of 3D points> <List of points> # Read the first two lines (fixed) current_line = (input_file.readline()).rstrip() calibration_matrix = cls._parse_fixed_calibration(current_line, op) current_line = (input_file.readline()).rstrip() assert current_line == "" amount_cameras = int((input_file.readline()).rstrip()) log_report( "INFO", "Amount Cameras (Images in NVM file): " + str(amount_cameras), op, ) cameras = cls._parse_cameras( input_file, amount_cameras, calibration_matrix, image_dp, image_fp_type, suppress_distortion_warnings, op, ) current_line = (input_file.readline()).rstrip() assert current_line == "" current_line = (input_file.readline()).rstrip() if current_line.isdigit(): amount_points = int(current_line) log_report( "INFO", "Amount Sparse Points (Points in NVM file): " + str(amount_points), op, ) points = cls._parse_nvm_points(input_file, amount_points) else: points = [] log_report("INFO", "Parse NVM file: Done", op) return cameras, points
def _get_node(cls, json_graph, node_type, node_number, op): if node_number == -1: return cls._get_latest_node(json_graph, node_type) else: node_key = node_type + "_" + str(node_number) if node_key in json_graph: return json_graph[node_key] else: log_report( "ERROR", "Invalid combination of node type (i.e. " + node_type + ") " + "and node number (i.e. " + str(node_number) + ") provided", op, ) assert False
def add_command_line_sys_path_if_necessary(dummy): """Function that extends Blender's sys.path if necessary""" dependency_manager = OptionalDependencyManager.get_singleton() dependencies = dependency_manager.get_dependencies() installed = any(dependency.installation_status for dependency in dependencies) if installed: log_report("INFO", "Found installed dependencies. Going to adjust sys.path.") add_command_line_sys_path() else: log_report( "INFO", "Found no installed dependencies. Not going to adjust sys.path.", )
def _set_principal_point_for_cameras(cameras, default_pp_x, default_pp_y, op=None): if not math.isnan(default_pp_x) and not math.isnan(default_pp_y): log_report("WARNING", "Setting principal points to default values!", op) else: log_report("WARNING", "Setting principal points to image centers!", op) assert (cameras[0].width is not None and cameras[0].height is not None) default_pp_x = cameras[0].width / 2.0 default_pp_y = cameras[0].height / 2.0 for camera in cameras: if not camera.has_principal_point(): camera.set_principal_point([default_pp_x, default_pp_y])
def execute(self, context): """Activate and deactivate importers and exporters. Uses the selected options of :class:`.AddonPreferences` to determine active and inactive importers and exporters. """ log_report("INFO", "Update importers and exporters: ...", self) addon_name = _get_addon_name() import_export_prefs = bpy.context.preferences.addons[ addon_name].preferences Registration.unregister_importers() Registration.register_importers(import_export_prefs) Registration.unregister_exporters() Registration.register_exporters(import_export_prefs) log_report("INFO", "Update importers and exporters: Done", self) return {"FINISHED"}
def apply_general_options(self): """Apply the options defined by this class.""" if self.adjust_clipping_distance: log_report("INFO", "Adjust clipping distance of 3D view: ...", self) active_space = None for area in bpy.context.screen.areas: if area.type == "VIEW_3D": active_space = area.spaces.active break # Setting "active_space.clip_end" to values close to "sys.maxsize" # causes strange graphical artifacts in the 3D view. if sys.maxsize == 2**63 - 1: # 2**(63-8) = 2**55 works without artifacts active_space.clip_end = 2**55 - 1 else: active_space.clip_end = 2**23 - 1 log_report("INFO", "Adjust clipping distance of 3D view: Done", self)
def get_additional_command_line_sys_path(): """Function that retrieves additional sys.path of the command line""" script_str = "import sys; import json; pickled_str = json.dumps(sys.path); print(pickled_str)" result = subprocess.run( [_get_python_exe_path(), "-c", script_str], stdout=PIPE, stderr=PIPE, ) command_line_sys_paths = json.loads(result.stdout) blender_sys_paths = copy.deepcopy(sys.path) additional_system_paths = [] for command_line_sys_path in command_line_sys_paths: if command_line_sys_path not in blender_sys_paths: if command_line_sys_path != "": log_report("INFO", f"Add missing sys.path: {command_line_sys_path}") additional_system_paths.append(command_line_sys_path) return additional_system_paths
def _get_sfm_fp(cls, sfm_node_type, cache_dp, json_graph, sfm_node_number, op): if sfm_node_type == "ConvertSfMFormatNode": sfm_fp = cls._get_node_data_fp( cache_dp, json_graph, "ConvertSfMFormat", sfm_node_number, ["sfm.sfm", "sfm.json"], op, ) elif sfm_node_type == "StructureFromMotionNode": sfm_fp = cls._get_node_data_fp( cache_dp, json_graph, "StructureFromMotion", sfm_node_number, "cameras.sfm", op, ) elif sfm_node_type == "AUTOMATIC": sfm_fp = cls._get_node_data_fp( cache_dp, json_graph, "ConvertSfMFormat", sfm_node_number, ["sfm.sfm", "sfm.json"], op, ) if sfm_fp is None: sfm_fp = cls._get_node_data_fp( cache_dp, json_graph, "StructureFromMotion", sfm_node_number, "cameras.sfm", op, ) else: log_report("ERROR", "Selected SfM node is not supported", op) assert False return sfm_fp
def parse_transformation_folder(t_idp, op=None): """Parse a directory with files storing transformations.""" if not os.path.isdir(t_idp): return [] t_fps = sorted([ os.path.join(t_idp, fn) for fn in os.listdir(t_idp) if os.path.isfile(os.path.join(t_idp, fn)) and os.path.splitext(fn)[1] == ".txt" ]) transformations_sorted = [] for t_fp in t_fps: log_report("INFO", "transformation file path: " + t_fp, op) trans_mat = np.loadtxt(t_fp, dtype="f", delimiter=" ") # log_report('INFO', 'transformation mat: ' + str(trans_mat), op) transformations_sorted.append(trans_mat) return transformations_sorted