def get_selected_cameras_and_vertices_of_meshes(self, odp):
        """Get selected cameras and vertices."""
        log_report("INFO", "get_selected_cameras_and_vertices_of_meshes: ...",
                   self)
        cameras = []
        points = []

        point_index = 0
        camera_index = 0
        for obj in bpy.context.selected_objects:
            if obj.type == "CAMERA":
                obj_name = str(obj.name).replace(" ", "_")
                log_report("INFO", "obj_name: " + obj_name, self)
                calibration_mat = self._get_calibration_mat(obj)
                # log_report('INFO', 'calibration_mat:', self)
                # log_report('INFO', str(calibration_mat), self)

                camera_matrix_computer_vision = (
                    self._get_computer_vision_camera_matrix(obj))

                cam = Camera()
                cam.id = camera_index
                cam.set_relative_fp(obj_name, Camera.IMAGE_FP_TYPE_NAME)
                cam.image_dp = odp
                cam.width = bpy.context.scene.render.resolution_x
                cam.height = bpy.context.scene.render.resolution_y

                cam.set_calibration(calibration_mat, radial_distortion=0)
                cam.set_4x4_cam_to_world_mat(camera_matrix_computer_vision)
                cameras.append(cam)
                camera_index += 1

            else:
                if obj.data is not None:
                    obj_points = []
                    for vert in obj.data.vertices:
                        coord_world = obj.matrix_world @ vert.co
                        obj_points.append(
                            Point(
                                coord=coord_world,
                                color=[0, 255, 0],
                                id=point_index,
                                scalars=[],
                            ))

                        point_index += 1
                    points += obj_points
        log_report(
            "INFO",
            "get_selected_cameras_and_vertices_of_meshes: Done",
            self,
        )
        return cameras, points
Exemplo n.º 2
0
    def _convert_cameras(
        id_to_col_cameras,
        id_to_col_images,
        image_dp,
        image_fp_type,
        depth_map_idp=None,
        suppress_distortion_warnings=False,
        op=None,
    ):
        # From photogrammetry_importer\ext\read_write_model.py
        #   CameraModel = collections.namedtuple(
        #       "CameraModel", ["model_id", "model_name", "num_params"])
        #   Camera = collections.namedtuple(
        #       "Camera", ["id", "model", "width", "height", "params"])
        #   BaseImage = collections.namedtuple(
        #       "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])

        cameras = []
        for col_image in id_to_col_images.values():
            current_camera = Camera()
            current_camera.id = col_image.id
            current_camera.set_rotation_with_quaternion(col_image.qvec)
            current_camera.set_camera_translation_vector_after_rotation(
                col_image.tvec)

            current_camera.image_fp_type = image_fp_type
            current_camera.image_dp = image_dp
            current_camera._relative_fp = col_image.name

            # log_report('INFO', 'image_dp: ' + str(image_dp))
            # log_report('INFO', 'col_image.name: ' + str(col_image.name))

            camera_model = id_to_col_cameras[col_image.camera_id]

            # log_report('INFO', 'image_id: ' + str(col_image.id))
            # log_report('INFO', 'camera_id: ' + str(col_image.camera_id))
            # log_report('INFO', 'camera_model: ' + str(camera_model))

            current_camera.width = camera_model.width
            current_camera.height = camera_model.height

            (
                fx,
                fy,
                cx,
                cy,
                skew,
                r,
            ) = ColmapFileHandler._parse_camera_param_list(camera_model, )
            if not suppress_distortion_warnings:
                check_radial_distortion(r, current_camera._relative_fp, op)

            camera_calibration_matrix = np.array([[fx, skew, cx], [0, fy, cy],
                                                  [0, 0, 1]])
            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion=0)

            if depth_map_idp is not None:
                geometric_ifp = os.path.join(depth_map_idp,
                                             col_image.name + ".geometric.bin")
                photometric_ifp = os.path.join(
                    depth_map_idp, col_image.name + ".photometric.bin")
                if os.path.isfile(geometric_ifp):
                    depth_map_ifp = geometric_ifp
                elif os.path.isfile(photometric_ifp):
                    depth_map_ifp = photometric_ifp
                else:
                    depth_map_ifp = None
                current_camera.set_depth_map_callback(
                    read_array,
                    depth_map_ifp,
                    Camera.DEPTH_MAP_WRT_CANONICAL_VECTORS,
                    shift_depth_map_to_pixel_center=False,
                )
            cameras.append(current_camera)
        return cameras
Exemplo n.º 3
0
    def _parse_cameras(
        cls,
        input_file,
        num_cameras,
        camera_calibration_matrix,
        image_dp,
        image_fp_type,
        suppress_distortion_warnings,
        op=None,
    ):
        """
        VisualSFM CAMERA coordinate system is the standard CAMERA
        coordinate system in computer vision (not the same as
        in computer graphics like in bundler, blender, etc.)
        That means the y axis in the image is pointing downwards (not upwards)
        and the camera is looking along the positive z axis (points in front
        of the camera show a positive z value)

        The camera coordinate system in computer vision VISUALSFM uses camera
        matrices, which are rotated around the x axis by 180 degree
        i.e. the y and z axis of the CAMERA MATRICES are inverted
        therefore, the y and z axis of the TRANSLATION VECTOR are also inverted
        """
        # log_report('INFO', '_parse_cameras: ...', op)
        cameras = []

        for i in range(num_cameras):
            line = input_file.readline()

            # Read the camera section
            # From the docs:
            # <Camera> = <File name> <focal length> <quaternion WXYZ> <camera center> <radial distortion> 0
            line_values = line.split()
            relative_path = line_values[0].replace("/", os.sep)
            focal_length = float(line_values[1])

            quaternion_w = float(line_values[2])
            quaternion_x = float(line_values[3])
            quaternion_y = float(line_values[4])
            quaternion_z = float(line_values[5])
            quaternion = np.array(
                [quaternion_w, quaternion_x, quaternion_y, quaternion_z],
                dtype=float,
            )

            camera_center_x = float(line_values[6])
            camera_center_y = float(line_values[7])
            camera_center_z = float(line_values[8])
            center_vec = np.array(
                [camera_center_x, camera_center_y, camera_center_z])

            radial_distortion = float(line_values[9])
            if not suppress_distortion_warnings:
                check_radial_distortion(radial_distortion, relative_path, op)

            if camera_calibration_matrix is None:
                # In this case, we have no information about the principal point
                # We assume that the principal point lies in the center
                camera_calibration_matrix = np.array([[focal_length, 0, 0],
                                                      [0, focal_length, 0],
                                                      [0, 0, 1]])

            zero_value = float(line_values[10])
            assert zero_value == 0

            current_camera = Camera()
            # Setting the quaternion also sets the rotation matrix
            current_camera.set_rotation_with_quaternion(quaternion)

            # Set the camera center after rotation
            current_camera._center = center_vec

            # set the camera view direction as normal w.r.t world coordinates
            cam_view_vec_cam_coord = np.array([0, 0, 1]).T
            cam_rotation_matrix_inv = np.linalg.inv(
                current_camera.get_rotation_as_rotation_mat())
            cam_view_vec_world_coord = cam_rotation_matrix_inv.dot(
                cam_view_vec_cam_coord)
            current_camera.normal = cam_view_vec_world_coord

            translation_vec = cls._compute_translation_vector(
                center_vec, current_camera.get_rotation_as_rotation_mat())
            current_camera._translation_vec = translation_vec

            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion=radial_distortion)
            # log_report('INFO', 'Calibration mat:', op)
            # log_report('INFO', str(camera_calibration_matrix), op)

            current_camera.image_fp_type = image_fp_type
            current_camera.image_dp = image_dp
            current_camera._relative_fp = relative_path
            current_camera.id = i
            cameras.append(current_camera)
        # log_report('INFO', '_parse_cameras: Done', op)
        return cameras