def parse_meshroom_file(meshroom_ifp, image_dp, image_fp_type,
                            suppress_distortion_warnings, sfm_node_type,
                            sfm_node_number, mesh_node_type, mesh_node_number,
                            op):
        """
        :param meshroom_ifp:
        :return:
        """
        op.report({'INFO'}, 'parse_meshroom_file: ...')
        op.report({'INFO'}, 'meshroom_ifp: ' + meshroom_ifp)

        ext = os.path.splitext(meshroom_ifp)[1].lower()
        if ext == '.mg':
            meshroom_ifp, mesh_fp = MeshroomFileHandler.parse_meshrom_mg_file(
                meshroom_ifp, sfm_node_type, sfm_node_number, mesh_node_type,
                mesh_node_number, op)
        else:
            assert ext == '.json' or ext == '.sfm'
            mesh_fp = None

        if meshroom_ifp is not None:
            cams, points = MeshroomFileHandler.parse_sfm_file(
                meshroom_ifp, image_dp, image_fp_type,
                suppress_distortion_warnings, op)
        else:
            log_report('WARNING',
                       'Meshroom project does not contain cameras or points',
                       op)
            cams = []
            points = []

        op.report({'INFO'}, 'parse_meshroom_file: Done')
        return cams, points, mesh_fp
 def execute(self, context):
     self.report({'INFO'}, 'Save opengl render as image: ...')
     write_point_size = context.scene.opengl_panel_write_settings.write_point_size
     cam = get_selected_camera()
     image_name = "OpenGL Render"
     log_report('INFO', 'image_name: ' + image_name, self)
     render_opengl_image(image_name, cam, write_point_size)
     log_report('INFO', 'Save opengl render as image: Done', self)
     return {'FINISHED'}
 def get_node(json_graph, node_type, node_number, op):
     if node_number == -1:
         return MeshroomFileHandler.get_latest_node(json_graph, node_type)
     else:
         node_key = node_type + "_" + str(node_number)
         if node_key in json_graph:
             return json_graph[node_key]
         else:
             log_report(
                 'ERROR', 'Invalid combination of node type (i.e. ' +
                 node_type + ') ' + 'and node number (i.e. ' +
                 str(node_number) + ') provided', op)
             assert False
예제 #4
0
    def execute(self, context):
        log_report('INFO', 'Update importers and exporters: ...', self)
        import_export_prefs = bpy.context.preferences.addons[
            __name__].preferences

        unregister_importers()
        register_importers(import_export_prefs)

        unregister_exporters()
        register_exporters(import_export_prefs)

        log_report('INFO', 'Update importers and exporters: Done', self)
        return {'FINISHED'}
    def execute(self, context):
        log_report('INFO', 'Export opengl render as animation: ...', self)
        write_point_size = context.scene.opengl_panel_write_settings.write_point_size

        use_camera_keyframes = context.scene.opengl_panel_export_animation_settings.use_camera_keyframes
        file_format = context.scene.opengl_panel_export_image_settings.file_format

        # The export helper stores the path in self.filepath (even if its a directory)
        output_dp = self.filepath
        log_report('INFO', 'Output Directory Path: ' + str(output_dp), self)

        if not os.path.isdir(output_dp):
            os.mkdir(output_dp)

        # Used to cache the results
        image_name = "Export Opengl"
        ext = '.' + file_format
        scene = bpy.context.scene
        cam = get_selected_camera()
        indices = self.get_indices(use_camera_keyframes, cam)
        for idx in indices:
            scene.frame_set(idx)
            current_frame_fn = str(idx).zfill(5) + ext
            current_frame_fp = os.path.join(output_dp, current_frame_fn)

            log_report('INFO', 'Output File Path: ' + str(current_frame_fp),
                       self)
            render_opengl_image(image_name, cam, write_point_size)
            bpy.data.images[image_name].save_render(current_frame_fp)

        log_report('INFO', 'Save opengl render as animation: Done', self)
        return {'FINISHED'}
    def draw_points_callback(self, draw_manager, object_anchor, positions,
                             colors):

        handle_is_valid = True
        try:
            # Check if object still exists
            object_anchor_name = object_anchor.name
        except:
            handle_is_valid = False

        if handle_is_valid:
            if object_anchor_name in bpy.data.objects:

                # Use the visibility of the object to enable /
                # disable the drawing of the point cloud
                if bpy.data.objects[object_anchor_name].visible_get():

                    # Update the batch depending on the anchor pose (only if necessary)
                    object_anchor_has_changed = not np.array_equal(
                        self.object_anchor_pose_previous,
                        object_anchor.matrix_world)
                    if self.batch_cached is None or object_anchor_has_changed:

                        self.object_anchor_pose_previous = np.copy(
                            object_anchor.matrix_world)
                        transf_pos_list = compute_transformed_coords(
                            object_anchor.matrix_world, positions)

                        self.batch_cached = batch_for_shader(
                            self.shader, "POINTS", {
                                "pos": transf_pos_list,
                                "color": colors
                            })

                    self.shader.bind()
                    bgl.glPointSize(self.point_size)
                    bgl.glEnable(bgl.GL_DEPTH_TEST)
                    bgl.glDepthMask(bgl.GL_TRUE)

                    self.batch_cached.draw(self.shader)

        else:
            log_report('INFO',
                       'Removing draw handler of deleted point cloud handle')
            if self.draw_handler_handle is not None:
                bpy.types.SpaceView3D.draw_handler_remove(
                    self.draw_handler_handle, 'WINDOW')
                self.draw_handler_handle = None
                draw_manager.delete_anchor(object_anchor)
예제 #7
0
def draw_points(op, points, add_points_to_point_cloud_handle, reconstruction_collection=None):

    log_report('INFO', 'Add particle draw handlers', op)

    coords, colors = Point.split_points(points)
    object_anchor_handle = add_empty(
        "OpenGL Point Cloud", reconstruction_collection)
    if add_points_to_point_cloud_handle:
        object_anchor_handle['particle_coords'] = coords
        object_anchor_handle['particle_colors'] = colors
        bpy.context.scene['contains_opengl_point_clouds'] = True

    draw_manager = DrawManager.get_singleton()
    draw_manager.register_points_draw_callback(
        object_anchor_handle, coords, colors)
def check_radial_distortion(radial_distortion, camera_name, op):
    # TODO
    # Integrate lens distortion nodes
    # https://docs.blender.org/manual/en/latest/compositing/types/distort/lens_distortion.html
    # to properly support radial distortion consisting of a single parameter

    if radial_distortion is None or radial_distortion == 0.0:
        return

    output = 'Blender does not support radial distortion of cameras in the 3D View.'
    output += ' Distortion of camera ' + camera_name + ': ' + str(
        radial_distortion) + '.'
    output += ' If possible, re-compute the reconstruction using a camera model without radial distortion parameters.'
    output += ' Use "Suppress Distortion Warnings" in the import settings to suppress this message.'
    log_report('WARNING', output, op)
예제 #9
0
def redraw_points(dummy):

    # This test is very cheap, so it will not cause 
    # huge overheads for scenes without point clouds
    if 'contains_opengl_point_clouds' in bpy.context.scene:

        log_report('INFO', 'Checking scene for missing point cloud draw handlers', dummy)
        for obj in bpy.data.objects:
            if 'particle_coords' in obj and 'particle_colors' in obj:
                coords = obj['particle_coords']
                colors = obj['particle_colors']

                draw_manager = DrawManager.get_singleton()
                draw_manager.register_points_draw_callback(
                    obj, coords, colors)
                viz_point_size = bpy.context.scene.opengl_panel_viz_settings.viz_point_size
                draw_manager.set_point_size(viz_point_size)

        for area in bpy.context.screen.areas:
            if area.type == 'VIEW_3D':
                area.tag_redraw()
                break
예제 #10
0
    def parse_opensfm_file(input_opensfm_fp, image_dp, image_fp_type,
                           suppress_distortion_warnings, reconstruction_idx,
                           op):

        log_report('INFO', 'parse_opensfm_file: ...', op)
        log_report('INFO', 'input_opensfm_fp: ' + input_opensfm_fp, op)
        input_file = open(input_opensfm_fp, 'r')
        json_data = json.load(input_file)
        num_reconstructions = len(json_data)
        reconstruction_data = json_data[reconstruction_idx]
        if len(json_data) > 1:
            log_report(
                'WARNING',
                'OpenSfM file contains multiple reconstructions. Only reconstruction with index '
                + str(reconstruction_idx) + ' is imported.', op)

        cams = OpenSfMJSONFileHandler.parse_cameras(
            reconstruction_data, image_dp, image_fp_type,
            suppress_distortion_warnings, op)
        points = OpenSfMJSONFileHandler.parse_points(reconstruction_data, op)
        log_report('INFO', 'parse_opensfm_file: Done', op)
        return cams, points
    def execute(self, context):
        log_report('INFO', 'Export opengl render as image: ...', self)
        write_point_size = context.scene.opengl_panel_write_settings.write_point_size
        file_format = context.scene.opengl_panel_export_image_settings.file_format

        log_report('INFO', 'Output File Path: ' + str(self.filepath), self)

        # Used to cache the results
        image_name = "Export Opengl"
        ext = '.' + file_format
        scene = bpy.context.scene

        cam = get_selected_camera()

        render_opengl_image(image_name, cam, write_point_size)
        bpy.data.images[image_name].save_render(self.filepath)
        log_report('INFO', 'Save opengl render as image: Done', self)
        return {'FINISHED'}
예제 #12
0
    def convert_intrinsics(json_camera_intrinsics, relative_fp,
                           suppress_distortion_warnings, op):

        # See https://www.opensfm.org/docs/_modules/opensfm/types.html

        height = json_camera_intrinsics['height']
        width = json_camera_intrinsics['width']

        radial_distortion = [
            json_camera_intrinsics['k1'], json_camera_intrinsics['k2']
        ]
        projection_type = json_camera_intrinsics['projection_type']

        if projection_type == 'perspective':
            focal_length = json_camera_intrinsics['focal'] * max(width, height)
            cx = width / 2
            cy = height / 2
            log_report(
                'WARNING',
                'Principal point not provided, setting it to the image center.',
                op)
        elif projection_type == 'brown':
            fx = json_camera_intrinsics['focal_x'] * max(width, height)
            fy = json_camera_intrinsics['focal_y'] * max(width, height)
            if fx != fy:
                log_report(
                    'WARNING',
                    'Focal length in x and y direction differs, setting it to the average value.',
                    op)
            focal_length = (fx + fy) * 0.5
            cx = json_camera_intrinsics['c_x']
            cy = json_camera_intrinsics['c_y']
        else:
            log_report('ERROR', 'Projection Type not supported!', op)
            assert False

        if not suppress_distortion_warnings:
            check_radial_distortion(radial_distortion, relative_fp, op)

        return [focal_length, cx, cy, width, height, radial_distortion]
    def parse_meshrom_mg_file(mg_fp, sfm_node_type, sfm_node_number,
                              mesh_node_type, mesh_node_number, op):
        cache_dp = os.path.join(os.path.dirname(mg_fp), 'MeshroomCache')
        json_data = json.load(open(mg_fp, 'r'))
        json_graph = json_data['graph']

        if sfm_node_type == 'ConvertSfMFormatNode':
            sfm_fp = MeshroomFileHandler.get_node_data_fp(
                cache_dp, json_graph, 'ConvertSfMFormat', sfm_node_number,
                ['sfm.sfm', 'sfm.json'], op)
        elif sfm_node_type == 'StructureFromMotionNode':
            sfm_fp = MeshroomFileHandler.get_node_data_fp(
                cache_dp, json_graph, 'StructureFromMotion', sfm_node_number,
                'cameras.sfm', op)
        elif sfm_node_type == 'AUTOMATIC':
            sfm_fp = MeshroomFileHandler.get_node_data_fp(
                cache_dp, json_graph, 'ConvertSfMFormat', sfm_node_number,
                ['sfm.sfm', 'sfm.json'], op)
            if sfm_fp is None:
                sfm_fp = MeshroomFileHandler.get_node_data_fp(
                    cache_dp, json_graph, 'StructureFromMotion',
                    sfm_node_number, 'cameras.sfm', op)
        else:
            log_report('ERROR', 'Select SfM node is not supported', op)
            assert False

        if mesh_node_type == 'Texturing':
            mesh_fp = MeshroomFileHandler.get_node_data_fp(
                cache_dp, json_graph, 'Texturing', mesh_node_number,
                'texturedMesh.obj', op)
        elif mesh_node_type == 'MeshFiltering':
            mesh_fp = MeshroomFileHandler.get_node_data_fp(
                cache_dp, json_graph, 'MeshFiltering', mesh_node_number,
                'mesh.obj', op)
        elif mesh_node_type == 'Meshing':
            mesh_fp = MeshroomFileHandler.get_node_data_fp(
                cache_dp, json_graph, 'Meshing', mesh_node_number, 'mesh.obj',
                op)
        elif mesh_node_type == 'AUTOMATIC':
            mesh_fp = MeshroomFileHandler.get_node_data_fp(
                cache_dp, json_graph, 'Texturing', mesh_node_number,
                'texturedMesh.obj', op)
            if mesh_fp is None:
                mesh_fp = MeshroomFileHandler.get_node_data_fp(
                    cache_dp, json_graph, 'MeshFiltering', mesh_node_number,
                    'mesh.obj', op)
            if mesh_fp is None:
                mesh_fp = MeshroomFileHandler.get_node_data_fp(
                    cache_dp, json_graph, 'Meshing', mesh_node_number,
                    'mesh.obj', op)
        else:
            log_report('ERROR', 'Select Mesh node is not supported', op)
            assert False

        if sfm_fp is not None:
            log_report('INFO', 'Found the following sfm file: ' + sfm_fp, op)
        else:
            log_report(
                'INFO',
                'Request target SfM result does not exist in this meshroom project',
                op)

        if mesh_fp is not None:
            log_report('INFO', 'Found the following mesh file: ' + mesh_fp, op)
        else:
            log_report(
                'INFO',
                'Request target mesh does not exist in this meshroom project',
                op)

        return sfm_fp, mesh_fp
예제 #14
0
    def parse_points(json_data, op, view_index_to_absolute_fp=None):

        compute_color = True
        try:
            from PIL import Image
        except ImportError:
            log_report(
                'WARNING',
                'Can not compute point cloud color information, since Pillow is not installed.',
                op)
            compute_color = False

        if view_index_to_absolute_fp is None:
            log_report(
                'WARNING',
                'Can not compute point cloud color information, since path to images is not correctly set.',
                op)
            compute_color = False

        if compute_color:
            log_report(
                'INFO',
                'Try to collect color information from files (this might take a while)',
                op)
            view_index_to_image = {}
            for view_index, absolute_fp in view_index_to_absolute_fp.items():
                if os.path.isfile(absolute_fp):
                    pil_image = Image.open(absolute_fp)
                    view_index_to_image[view_index] = pil_image
                else:
                    log_report(
                        'WARNING',
                        'Can not compute point cloud color information, since image file path is incorrect.',
                        op)
                    compute_color = False
                    break

        if compute_color:
            log_report(
                'INFO',
                'Compute color information from files (this might take a while)',
                op)

        points = []
        structure = json_data['structure']
        for json_point in structure:

            r = g = b = 0

            # color information can only be computed if input files are provided
            if compute_color:
                for observation in json_point['value']['observations']:
                    view_index = int(observation['key'])

                    # REMARK: The order of ndarray.shape (first height, then width) is complimentary to
                    # pils image.size (first width, then height).
                    # That means
                    # height, width = segmentation_as_matrix.shape
                    # width, height = image.size

                    # Therefore: x_in_openmvg_file == x_image == y_ndarray
                    # and y_in_openmvg_file == y_image == x_ndarray
                    x_in_json_file = float(
                        observation['value']['x'][0])  # x has index 0
                    y_in_json_file = float(
                        observation['value']['x'][1])  # y has index 1

                    current_image = view_index_to_image[view_index]
                    current_r, current_g, current_b = current_image.getpixel(
                        (x_in_json_file, y_in_json_file))
                    r += current_r
                    g += current_g
                    b += current_b

                # normalize the rgb values
                amount_observations = len(json_point['value']['observations'])
                r /= amount_observations
                g /= amount_observations
                b /= amount_observations

            custom_point = Point(coord=np.array(json_point['value']['X'],
                                                dtype=float),
                                 color=np.array([r, g, b], dtype=int),
                                 id=int(json_point['key']),
                                 scalars=[])

            points.append(custom_point)
        return points
예제 #15
0
    def parse_cameras(json_data, image_dp, image_fp_type, suppress_distortion_warnings, op):

        views = {item['key']:item for item in json_data['views']}
        intrinsics = {item['key']:item for item in json_data['intrinsics']}
        extrinsics = {item['key']:item for item in json_data['extrinsics']}

        # Regard 3D stores the polymorhic attribute in the first intrinsic
        default_polymorphic_name = OpenMVGJSONFileHandler.get_default_polymorphic_name(intrinsics)

        # IMPORTANT:
        # Views contain the description about the dataset and attribute to Pose and Intrinsic data.
        # View -> id_pose, id_intrinsic
        # Since sometimes some views cannot be localized, there is some missing pose and intrinsic data.
        # Extrinsics may contain only a subset of views! (Potentially not all views are contained in the reconstruction)

        cams = []
        # Iterate over views, and create camera if Intrinsic and Pose data exist
        for id, view in views.items():    # Iterate over views

            id_view = view['key'] # Should be equal to view['value']['ptr_wrapper']['data']['id_view']
            view_data = view['value']['ptr_wrapper']['data']
            id_pose = view_data['id_pose']
            id_intrinsic = view_data['id_intrinsic']

            # Check if the view is having corresponding Pose and Intrinsic data
            if id_pose in extrinsics.keys() and \
               id_intrinsic in intrinsics.keys():

                camera = Camera()

                camera.image_fp_type = image_fp_type
                camera.image_dp = image_dp
                camera._relative_fp = os.path.join(
                    view_data['local_path'], view_data['filename'])
                camera._absolute_fp = os.path.join(
                    json_data['root_path'], view_data['local_path'], view_data['filename'])
                camera.width = view_data['width']
                camera.height = view_data['height']
                id_intrinsic = view_data['id_intrinsic']

                # handle intrinsic params
                intrinsic_values = intrinsics[int(id_intrinsic)]['value']
                intrinsic_data = intrinsic_values['ptr_wrapper']['data']
                

                if 'polymorphic_name' in intrinsic_values:
                    polymorphic_name = intrinsic_values['polymorphic_name']
                else:
                    polymorphic_name = default_polymorphic_name
                    log_report(
                        'WARNING', 'Key polymorphic_name in intrinsic with id ' + str(id_intrinsic) + 
                        ' is missing, substituting with polymorphic_name of first intrinsic.', op) 

                if polymorphic_name == 'spherical':
                    camera.set_panoramic_type(Camera.panoramic_type_equirectangular)
                    # create some dummy values
                    focal_length = 0     
                    cx = camera.width / 2
                    cy = camera.height / 2
                else:

                    focal_length = intrinsic_data['focal_length']
                    principal_point = intrinsic_data['principal_point']
                    cx = principal_point[0]
                    cy = principal_point[1]
        
                
                # For Radial there are several options: "None", disto_k1, disto_k3
                if 'disto_k3' in intrinsic_data:
                    radial_distortion = [
                        float(intrinsic_data['disto_k3'][0]), 
                        float(intrinsic_data['disto_k3'][1]), 
                        float(intrinsic_data['disto_k3'][2])]
                elif 'disto_k1' in intrinsic_data:
                    radial_distortion = float(intrinsic_data['disto_k1'][0])
                else:  # No radial distortion, i.e. pinhole camera model
                    radial_distortion = 0
                
                if not suppress_distortion_warnings:
                    check_radial_distortion(radial_distortion, camera._relative_fp, op)

                camera_calibration_matrix = np.array([
                    [focal_length, 0, cx],
                    [0, focal_length, cy],
                    [0, 0, 1]])

                camera.set_calibration(
                    camera_calibration_matrix,
                    radial_distortion)
                extrinsic_params = extrinsics[id_pose]
                cam_rotation_list = extrinsic_params['value']['rotation']
                camera.set_rotation_mat(np.array(cam_rotation_list, dtype=float))
                camera.set_camera_center_after_rotation(
                    np.array(extrinsic_params['value']['center'], dtype=float))
                camera.view_index = id_view

                cams.append(camera)
        return cams