def set_intrinsics_of_cameras(self, cameras):
        """Enhances the imported cameras with intrinsic information.

        Overwrites the method in :code:`CameraImporter`.
        """
        intrinsic_missing = False
        for cam in cameras:
            if not cam.has_intrinsics():
                intrinsic_missing = True
                break

        if not intrinsic_missing:
            log_report("INFO", "Using intrinsics from file (.json).", self)
            return cameras, True
        else:
            log_report(
                "INFO",
                "Using intrinsics from user options, since not present in the"
                + " reconstruction file (.log).",
                self,
            )
            if math.isnan(self.default_focal_length):
                log_report(
                    "ERROR",
                    "User must provide the focal length using the import" +
                    " options.",
                    self,
                )
                return [], False

            if math.isnan(self.default_pp_x) or math.isnan(self.default_pp_y):
                log_report(
                    "WARNING",
                    "Setting the principal point to the image center.",
                    self,
                )

            for cam in cameras:
                if math.isnan(self.default_pp_x) or math.isnan(
                        self.default_pp_y):
                    # If no images are provided, the user must provide a
                    # default principal point
                    assert cam.width is not None
                    # If no images are provided, the user must provide a
                    # default principal point
                    assert cam.height is not None
                    default_cx = cam.width / 2.0
                    default_cy = cam.height / 2.0
                else:
                    default_cx = self.default_pp_x
                    default_cy = self.default_pp_y

                intrinsics = Camera.compute_calibration_mat(
                    focal_length=self.default_focal_length,
                    cx=default_cx,
                    cy=default_cy,
                )
                cam.set_calibration_mat(intrinsics)
            return cameras, True
    def _parse_open3d_log_file(open3d_ifp, image_dp, image_relative_fp_list,
                               image_fp_type, op):
        cams = []
        with open(open3d_ifp, "r") as open3d_file:

            lines = open3d_file.readlines()
            # Chunk size: 1 line meta data, 4 lines for the matrix
            chunk_size = 5
            assert len(lines) % chunk_size == 0
            chunks = Open3DFileHandler._chunker(lines, chunk_size)

            if len(chunks) != len(image_relative_fp_list):
                # Create some dummy names for missing images
                image_relative_fp_list = (
                    Open3DFileHandler._create_dummy_fp_list(len(chunks)))

            for chunk, image_relative_fp in zip(chunks,
                                                image_relative_fp_list):
                meta_data = chunk[0]

                matrix_list = [
                    Open3DFileHandler._read_matrix_row(chunk[1]),
                    Open3DFileHandler._read_matrix_row(chunk[2]),
                    Open3DFileHandler._read_matrix_row(chunk[3]),
                ]

                # Note: the transformation matrix in the .json file is the inverse of
                #       the transformation matrix in the .log file
                extrinsic_matrix = np.asarray(matrix_list, dtype=float)

                cam = Camera()
                cam.image_fp_type = image_fp_type
                cam.image_dp = image_dp
                cam._relative_fp = image_relative_fp
                image_absolute_fp = os.path.join(image_dp, image_relative_fp)
                cam._absolute_fp = image_absolute_fp

                # Accuracy of rotation matrices is too low => disable rotation test
                cam.set_4x4_cam_to_world_mat(extrinsic_matrix,
                                             check_rotation=False)

                cams.append(cam)
        return cams
    def get_calibration_mat(self, blender_camera):
        log_report("INFO", "get_calibration_mat: ...", self)
        scene = bpy.context.scene
        render_resolution_width = scene.render.resolution_x
        render_resolution_height = scene.render.resolution_y
        focal_length_in_mm = float(blender_camera.data.lens)
        sensor_width_in_mm = float(blender_camera.data.sensor_width)
        focal_length_in_pixel = (
            float(max(scene.render.resolution_x, scene.render.resolution_y)) *
            focal_length_in_mm / sensor_width_in_mm)

        max_extent = max(render_resolution_width, render_resolution_height)
        p_x = (render_resolution_width / 2.0 -
               blender_camera.data.shift_x * max_extent)
        p_y = (render_resolution_height / 2.0 -
               blender_camera.data.shift_y * max_extent)

        calibration_mat = Camera.compute_calibration_mat(focal_length_in_pixel,
                                                         cx=p_x,
                                                         cy=p_y)

        log_report(
            "INFO",
            "render_resolution_width: " + str(render_resolution_width),
            self,
        )
        log_report(
            "INFO",
            "render_resolution_height: " + str(render_resolution_height),
            self,
        )
        log_report(
            "INFO",
            "focal_length_in_pixel: " + str(focal_length_in_pixel),
            self,
        )

        log_report("INFO", "get_calibration_mat: Done", self)
        return calibration_mat
    def _parse_cameras(json_data, image_dp, image_fp_type,
                       suppress_distortion_warnings, op):

        json_cameras_intrinsics = json_data["cameras"]
        views = json_data["shots"]

        cams = []
        for view_name in views:
            view = views[view_name]

            camera = Camera()
            camera.image_fp_type = image_fp_type
            camera.image_dp = image_dp
            camera._relative_fp = view_name
            camera._absolute_fp = os.path.join(image_dp, view_name)

            intrinsic_key = view["camera"]
            (
                focal_length,
                cx,
                cy,
                width,
                height,
                radial_distortion,
            ) = OpenSfMJSONFileHandler._convert_intrinsics(
                json_cameras_intrinsics[intrinsic_key],
                camera._relative_fp,
                suppress_distortion_warnings,
                op,
            )
            camera.height = height
            camera.width = width
            camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                  [0, focal_length, cy],
                                                  [0, 0, 1]])
            camera.set_calibration(camera_calibration_matrix,
                                   radial_distortion)

            rodrigues_vec = np.array(view["rotation"], dtype=float)
            rot_mat = OpenSfMJSONFileHandler._rodrigues_to_matrix(
                rodrigues_vec)
            camera.set_rotation_mat(rot_mat)
            camera.set_camera_translation_vector_after_rotation(
                np.array(view["translation"], dtype=float))

            cams.append(camera)
        return cams
    def _parse_open3d_json_file(open3d_ifp, image_dp, image_relative_fp_list,
                                image_fp_type, op):
        cams = []

        with open(open3d_ifp, "r") as open3d_file:
            json_data = json.load(open3d_file)
            parameters = json_data["parameters"]

            if len(parameters) != len(image_relative_fp_list):
                # Create some dummy names for missing images
                image_relative_fp_list = (
                    Open3DFileHandler._create_dummy_fp_list(len(parameters)))

            for pinhole_camera_parameter, image_relative_fp in zip(
                    parameters, image_relative_fp_list):

                cam = Camera()
                cam.image_fp_type = image_fp_type
                cam.image_dp = image_dp
                cam._relative_fp = image_relative_fp
                cam._absolute_fp = os.path.join(image_dp, image_relative_fp)

                extrinsic = pinhole_camera_parameter["extrinsic"]
                # Note: the transformation matrix in the .json file is the inverse of
                #       the transformation matrix in the .log file
                extrinsic_mat = np.linalg.inv(
                    np.array(extrinsic, dtype=float).reshape((4, 4)).T)

                intrinsic = pinhole_camera_parameter["intrinsic"]

                cam.width = intrinsic["width"]
                cam.height = intrinsic["height"]

                # Accuracy of rotation matrices is too low => disable test
                cam.set_4x4_cam_to_world_mat(extrinsic_mat,
                                             check_rotation=False)

                intrinsic = intrinsic["intrinsic_matrix"]
                intrinsic_mat = (np.array(intrinsic, dtype=float).reshape(
                    (3, 3)).T)
                cam.set_calibration_mat(intrinsic_mat)

                cams.append(cam)
        return cams
Ejemplo n.º 6
0
    def parse_meta(meta_ifp, width, height, camera_name, op):

        view_specific_dir = os.path.dirname(meta_ifp)
        relative_image_fp = os.path.join(view_specific_dir, "undistorted.png")
        image_dp = os.path.dirname(view_specific_dir)

        camera = Camera()
        camera.image_fp_type = Camera.IMAGE_FP_TYPE_RELATIVE
        camera.image_dp = image_dp
        camera._relative_fp = relative_image_fp
        camera._absolute_fp = os.path.join(image_dp, relative_image_fp)
        camera.width = width
        camera.height = height

        ini_config = configparser.RawConfigParser()
        ini_config.read(meta_ifp)
        focal_length_normalized = float(
            ini_config.get(section="camera", option="focal_length"))
        pixel_aspect = float(
            ini_config.get(section="camera", option="pixel_aspect"))
        if pixel_aspect != 1.0:
            log_report(
                "WARNING",
                "Focal length differs in x and y direction," +
                " setting it to the average value.",
                op,
            )
            focal_length_normalized = (
                focal_length_normalized +
                focal_length_normalized * pixel_aspect) / 2

        max_extend = max(width, height)
        focal_length = focal_length_normalized * max_extend

        principal_point_str = ini_config.get(section="camera",
                                             option="principal_point")
        principal_point_list = MVEFileHandler.str_to_arr(principal_point_str,
                                                         target_type=float)
        cx_normalized = principal_point_list[0]
        cy_normalized = principal_point_list[1]
        cx = cx_normalized * width
        cy = cy_normalized * height

        calib_mat = Camera.compute_calibration_mat(focal_length, cx, cy)
        camera.set_calibration_mat(calib_mat)

        radial_distortion_str = ini_config.get(section="camera",
                                               option="radial_distortion")
        radial_distortion_vec = np.asarray(
            MVEFileHandler.str_to_arr(radial_distortion_str,
                                      target_type=float))
        check_radial_distortion(radial_distortion_vec, relative_image_fp, op)

        rotation_str = ini_config.get(section="camera", option="rotation")
        rotation_mat = np.asarray(
            MVEFileHandler.str_to_arr(rotation_str,
                                      target_type=float)).reshape((3, 3))

        translation_str = ini_config.get(section="camera",
                                         option="translation")
        translation_vec = np.asarray(
            MVEFileHandler.str_to_arr(translation_str, target_type=float))

        camera.set_rotation_mat(rotation_mat)
        camera.set_camera_translation_vector_after_rotation(translation_vec)
        return camera
    def _parse_cameras_from_json_data(json_data, image_dp, image_fp_type,
                                      suppress_distortion_warnings, op):

        cams = []
        image_index_to_camera_index = {}

        is_valid_file = ("views" in json_data and "intrinsics" in json_data
                         and "poses" in json_data)

        if not is_valid_file:
            log_report(
                "ERROR",
                "FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the"
                + " SfM reconstruction results: view, intrinsics and poses.",
                op,
            )
            return cams, image_index_to_camera_index

        views = json_data["views"]  # is a list of dicts (view)
        intrinsics = json_data["intrinsics"]  # is a list of dicts (intrinsic)
        extrinsics = json_data["poses"]  # is a list of dicts (extrinsic)

        # IMPORTANT:
        # Views contain the number of input images
        # Extrinsics may contain only a subset of views!
        # (Not all views are necessarily contained in the reconstruction)

        for rec_index, extrinsic in enumerate(extrinsics):

            camera = Camera()
            view_index = int(extrinsic["poseId"])
            image_index_to_camera_index[view_index] = rec_index

            corresponding_view = MeshroomFileHandler._get_element(
                views, "poseId", view_index)

            camera.image_fp_type = image_fp_type
            camera.image_dp = image_dp
            camera._absolute_fp = str(corresponding_view["path"])
            camera._relative_fp = os.path.basename(
                str(corresponding_view["path"]))
            camera._undistorted_relative_fp = str(extrinsic["poseId"]) + ".exr"
            if image_dp is None:
                camera._undistorted_absolute_fp = None
            else:
                camera._undistorted_absolute_fp = os.path.join(
                    image_dp, camera._undistorted_relative_fp)

            camera.width = int(corresponding_view["width"])
            camera.height = int(corresponding_view["height"])
            id_intrinsic = int(corresponding_view["intrinsicId"])

            intrinsic_params = MeshroomFileHandler._get_element(
                intrinsics, "intrinsicId", id_intrinsic)

            focal_length = float(intrinsic_params["pxFocalLength"])
            cx = float(intrinsic_params["principalPoint"][0])
            cy = float(intrinsic_params["principalPoint"][1])

            if ("distortionParams" in intrinsic_params
                    and len(intrinsic_params["distortionParams"]) > 0):
                # TODO proper handling of distortion parameters
                radial_distortion = float(
                    intrinsic_params["distortionParams"][0])
            else:
                radial_distortion = 0.0

            if not suppress_distortion_warnings:
                check_radial_distortion(radial_distortion, camera._relative_fp,
                                        op)

            camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                  [0, focal_length, cy],
                                                  [0, 0, 1]])

            camera.set_calibration(camera_calibration_matrix,
                                   radial_distortion)
            extrinsic_params = extrinsic["pose"]["transform"]

            cam_rotation_list = extrinsic_params["rotation"]
            camera.set_rotation_mat(
                np.array(cam_rotation_list, dtype=float).reshape(3, 3).T)
            camera.set_camera_center_after_rotation(
                np.array(extrinsic_params["center"], dtype=float))
            camera.view_index = view_index

            cams.append(camera)
        return cams, image_index_to_camera_index
Ejemplo n.º 8
0
    def _convert_cameras(
        id_to_col_cameras,
        id_to_col_images,
        image_dp,
        image_fp_type,
        depth_map_idp=None,
        suppress_distortion_warnings=False,
        op=None,
    ):
        # From photogrammetry_importer\ext\read_write_model.py
        #   CameraModel = collections.namedtuple(
        #       "CameraModel", ["model_id", "model_name", "num_params"])
        #   Camera = collections.namedtuple(
        #       "Camera", ["id", "model", "width", "height", "params"])
        #   BaseImage = collections.namedtuple(
        #       "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])

        cameras = []
        for col_image in id_to_col_images.values():
            current_camera = Camera()
            current_camera.id = col_image.id
            current_camera.set_rotation_with_quaternion(col_image.qvec)
            current_camera.set_camera_translation_vector_after_rotation(
                col_image.tvec)

            current_camera.image_fp_type = image_fp_type
            current_camera.image_dp = image_dp
            current_camera._relative_fp = col_image.name

            # log_report('INFO', 'image_dp: ' + str(image_dp))
            # log_report('INFO', 'col_image.name: ' + str(col_image.name))

            camera_model = id_to_col_cameras[col_image.camera_id]

            # log_report('INFO', 'image_id: ' + str(col_image.id))
            # log_report('INFO', 'camera_id: ' + str(col_image.camera_id))
            # log_report('INFO', 'camera_model: ' + str(camera_model))

            current_camera.width = camera_model.width
            current_camera.height = camera_model.height

            (
                fx,
                fy,
                cx,
                cy,
                skew,
                r,
            ) = ColmapFileHandler._parse_camera_param_list(camera_model, )
            if not suppress_distortion_warnings:
                check_radial_distortion(r, current_camera._relative_fp, op)

            camera_calibration_matrix = np.array([[fx, skew, cx], [0, fy, cy],
                                                  [0, 0, 1]])
            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion=0)

            if depth_map_idp is not None:
                geometric_ifp = os.path.join(depth_map_idp,
                                             col_image.name + ".geometric.bin")
                photometric_ifp = os.path.join(
                    depth_map_idp, col_image.name + ".photometric.bin")
                if os.path.isfile(geometric_ifp):
                    depth_map_ifp = geometric_ifp
                elif os.path.isfile(photometric_ifp):
                    depth_map_ifp = photometric_ifp
                else:
                    depth_map_ifp = None
                current_camera.set_depth_map_callback(
                    read_array,
                    depth_map_ifp,
                    Camera.DEPTH_MAP_WRT_CANONICAL_VECTORS,
                    shift_depth_map_to_pixel_center=False,
                )
            cameras.append(current_camera)
        return cameras
    def parse_cameras(json_data, image_dp, image_fp_type,
                      suppress_distortion_warnings, op):

        views = {item["key"]: item for item in json_data["views"]}
        intrinsics = {item["key"]: item for item in json_data["intrinsics"]}
        extrinsics = {item["key"]: item for item in json_data["extrinsics"]}

        # Regard 3D stores the polymorhic attribute in the first intrinsic
        default_polymorphic_name = (
            OpenMVGJSONFileHandler.get_default_polymorphic_name(intrinsics))

        # IMPORTANT:
        # Views contain the description about the dataset and attribute to Pose and Intrinsic data.
        # View -> id_pose, id_intrinsic
        # Since sometimes some views cannot be localized, there is some missing pose and intrinsic data.
        # Extrinsics may contain only a subset of views! (Potentially not all views are contained in the reconstruction)

        cams = []
        # Iterate over views, and create camera if Intrinsic and Pose data exist
        for id, view in views.items():  # Iterate over views

            id_view = view[
                "key"]  # Should be equal to view['value']['ptr_wrapper']['data']['id_view']
            view_data = view["value"]["ptr_wrapper"]["data"]
            id_pose = view_data["id_pose"]
            id_intrinsic = view_data["id_intrinsic"]

            # Check if the view is having corresponding Pose and Intrinsic data
            if (id_pose in extrinsics.keys()
                    and id_intrinsic in intrinsics.keys()):

                camera = Camera()

                camera.image_fp_type = image_fp_type
                camera.image_dp = image_dp
                camera._relative_fp = os.path.join(view_data["local_path"],
                                                   view_data["filename"])
                camera._absolute_fp = os.path.join(
                    json_data["root_path"],
                    view_data["local_path"],
                    view_data["filename"],
                )
                camera.width = view_data["width"]
                camera.height = view_data["height"]
                id_intrinsic = view_data["id_intrinsic"]

                # handle intrinsic params
                intrinsic_values = intrinsics[int(id_intrinsic)]["value"]
                intrinsic_data = intrinsic_values["ptr_wrapper"]["data"]

                if "polymorphic_name" in intrinsic_values:
                    polymorphic_name = intrinsic_values["polymorphic_name"]
                else:
                    polymorphic_name = default_polymorphic_name
                    log_report(
                        "WARNING",
                        "Key polymorphic_name in intrinsic with id " +
                        str(id_intrinsic) +
                        " is missing, substituting with polymorphic_name of first intrinsic.",
                        op,
                    )

                if polymorphic_name == "spherical":
                    camera.set_panoramic_type(
                        Camera.panoramic_type_equirectangular)
                    # create some dummy values
                    focal_length = 0
                    cx = camera.width / 2
                    cy = camera.height / 2
                else:

                    focal_length = intrinsic_data["focal_length"]
                    principal_point = intrinsic_data["principal_point"]
                    cx = principal_point[0]
                    cy = principal_point[1]

                # For Radial there are several options: "None", disto_k1, disto_k3
                if "disto_k3" in intrinsic_data:
                    radial_distortion = [
                        float(intrinsic_data["disto_k3"][0]),
                        float(intrinsic_data["disto_k3"][1]),
                        float(intrinsic_data["disto_k3"][2]),
                    ]
                elif "disto_k1" in intrinsic_data:
                    radial_distortion = float(intrinsic_data["disto_k1"][0])
                else:  # No radial distortion, i.e. pinhole camera model
                    radial_distortion = 0

                if not suppress_distortion_warnings:
                    check_radial_distortion(radial_distortion,
                                            camera._relative_fp, op)

                camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                      [0, focal_length, cy],
                                                      [0, 0, 1]])

                camera.set_calibration(camera_calibration_matrix,
                                       radial_distortion)
                extrinsic_params = extrinsics[id_pose]
                cam_rotation_list = extrinsic_params["value"]["rotation"]
                camera.set_rotation_mat(
                    np.array(cam_rotation_list, dtype=float))
                camera.set_camera_center_after_rotation(
                    np.array(extrinsic_params["value"]["center"], dtype=float))
                camera.view_index = id_view

                cams.append(camera)
        return cams
Ejemplo n.º 10
0
    def _parse_cameras(
        json_data,
        image_dp,
        image_fp_type,
        suppress_distortion_warnings,
        op=None,
    ):

        # For each input image there exists an entry in "views". In contrast,
        # "extrinsics" contains only information of registered images (i.e.
        # reconstructed camera poses) and may contain only information for a
        # subset of images.
        views = {item["key"]: item for item in json_data["views"]}
        intrinsics = {item["key"]: item for item in json_data["intrinsics"]}
        extrinsics = {item["key"]: item for item in json_data["extrinsics"]}

        # Regard 3D stores the polymorhic attribute in the first intrinsic
        default_polymorphic_name = (
            OpenMVGJSONFileHandler._get_default_polymorphic_name(intrinsics))

        cams = []
        # Iterate over views and create a camera if intrinsic and extrinsic
        # parameters exist
        for id, view in views.items():  # Iterate over views

            id_view = view["key"]
            # view["value"]["ptr_wrapper"]["data"] should be equal to
            # view["value"]["ptr_wrapper"]["data"]["id_view"]
            view_data = view["value"]["ptr_wrapper"]["data"]
            id_pose = view_data["id_pose"]
            id_intrinsic = view_data["id_intrinsic"]

            # Check if the view is having corresponding Pose and Intrinsic data
            if (id_pose in extrinsics.keys()
                    and id_intrinsic in intrinsics.keys()):

                camera = Camera()

                camera.image_fp_type = image_fp_type
                camera.image_dp = image_dp
                camera._relative_fp = os.path.join(view_data["local_path"],
                                                   view_data["filename"])
                camera._absolute_fp = os.path.join(
                    json_data["root_path"],
                    view_data["local_path"],
                    view_data["filename"],
                )
                camera.width = view_data["width"]
                camera.height = view_data["height"]
                id_intrinsic = view_data["id_intrinsic"]

                # Handle intrinsic params
                intrinsic_values = intrinsics[int(id_intrinsic)]["value"]
                intrinsic_data = intrinsic_values["ptr_wrapper"]["data"]

                if "polymorphic_name" in intrinsic_values:
                    polymorphic_name = intrinsic_values["polymorphic_name"]
                else:
                    polymorphic_name = default_polymorphic_name
                    log_report(
                        "WARNING",
                        "Key polymorphic_name in intrinsic with id " +
                        str(id_intrinsic) +
                        " is missing, substituting with polymorphic_name of" +
                        " first intrinsic.",
                        op,
                    )

                if polymorphic_name == "spherical":
                    camera.set_panoramic_type(
                        Camera.panoramic_type_equirectangular)
                    # create some dummy values
                    focal_length = 0
                    cx = camera.width / 2
                    cy = camera.height / 2
                else:

                    focal_length = intrinsic_data["focal_length"]
                    principal_point = intrinsic_data["principal_point"]
                    cx = principal_point[0]
                    cy = principal_point[1]

                # For Radial there are several options:
                # "None", disto_k1, disto_k3
                if "disto_k3" in intrinsic_data:
                    radial_distortion = [
                        float(intrinsic_data["disto_k3"][0]),
                        float(intrinsic_data["disto_k3"][1]),
                        float(intrinsic_data["disto_k3"][2]),
                    ]
                elif "disto_k1" in intrinsic_data:
                    radial_distortion = float(intrinsic_data["disto_k1"][0])
                else:  # No radial distortion, i.e. pinhole camera model
                    radial_distortion = 0

                if not suppress_distortion_warnings:
                    check_radial_distortion(radial_distortion,
                                            camera._relative_fp, op)

                camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                      [0, focal_length, cy],
                                                      [0, 0, 1]])

                camera.set_calibration(camera_calibration_matrix,
                                       radial_distortion)
                extrinsic_params = extrinsics[id_pose]
                cam_rotation_list = extrinsic_params["value"]["rotation"]
                camera.set_rotation_with_rotation_mat(
                    np.array(cam_rotation_list, dtype=float))
                camera.set_camera_center_after_rotation(
                    np.array(extrinsic_params["value"]["center"], dtype=float))
                camera.view_index = id_view

                cams.append(camera)
        return cams
Ejemplo n.º 11
0
    def _parse_cameras(
        cls,
        input_file,
        num_cameras,
        camera_calibration_matrix,
        image_dp,
        image_fp_type,
        suppress_distortion_warnings,
        op=None,
    ):
        """
        VisualSFM CAMERA coordinate system is the standard CAMERA
        coordinate system in computer vision (not the same as
        in computer graphics like in bundler, blender, etc.)
        That means the y axis in the image is pointing downwards (not upwards)
        and the camera is looking along the positive z axis (points in front
        of the camera show a positive z value)

        The camera coordinate system in computer vision VISUALSFM uses camera
        matrices, which are rotated around the x axis by 180 degree
        i.e. the y and z axis of the CAMERA MATRICES are inverted
        therefore, the y and z axis of the TRANSLATION VECTOR are also inverted
        """
        # log_report('INFO', '_parse_cameras: ...', op)
        cameras = []

        for i in range(num_cameras):
            line = input_file.readline()

            # Read the camera section
            # From the docs:
            # <Camera> = <File name> <focal length> <quaternion WXYZ> <camera center> <radial distortion> 0
            line_values = line.split()
            relative_path = line_values[0].replace("/", os.sep)
            focal_length = float(line_values[1])

            quaternion_w = float(line_values[2])
            quaternion_x = float(line_values[3])
            quaternion_y = float(line_values[4])
            quaternion_z = float(line_values[5])
            quaternion = np.array(
                [quaternion_w, quaternion_x, quaternion_y, quaternion_z],
                dtype=float,
            )

            camera_center_x = float(line_values[6])
            camera_center_y = float(line_values[7])
            camera_center_z = float(line_values[8])
            center_vec = np.array(
                [camera_center_x, camera_center_y, camera_center_z])

            radial_distortion = float(line_values[9])
            if not suppress_distortion_warnings:
                check_radial_distortion(radial_distortion, relative_path, op)

            if camera_calibration_matrix is None:
                # In this case, we have no information about the principal point
                # We assume that the principal point lies in the center
                camera_calibration_matrix = np.array([[focal_length, 0, 0],
                                                      [0, focal_length, 0],
                                                      [0, 0, 1]])

            zero_value = float(line_values[10])
            assert zero_value == 0

            current_camera = Camera()
            # Setting the quaternion also sets the rotation matrix
            current_camera.set_rotation_with_quaternion(quaternion)

            # Set the camera center after rotation
            current_camera._center = center_vec

            # set the camera view direction as normal w.r.t world coordinates
            cam_view_vec_cam_coord = np.array([0, 0, 1]).T
            cam_rotation_matrix_inv = np.linalg.inv(
                current_camera.get_rotation_as_rotation_mat())
            cam_view_vec_world_coord = cam_rotation_matrix_inv.dot(
                cam_view_vec_cam_coord)
            current_camera.normal = cam_view_vec_world_coord

            translation_vec = cls._compute_translation_vector(
                center_vec, current_camera.get_rotation_as_rotation_mat())
            current_camera._translation_vec = translation_vec

            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion=radial_distortion)
            # log_report('INFO', 'Calibration mat:', op)
            # log_report('INFO', str(camera_calibration_matrix), op)

            current_camera.image_fp_type = image_fp_type
            current_camera.image_dp = image_dp
            current_camera._relative_fp = relative_path
            current_camera.id = i
            cameras.append(current_camera)
        # log_report('INFO', '_parse_cameras: Done', op)
        return cameras
    def export_selected_cameras_and_vertices_of_meshes(self, odp):
        log_report("INFO",
                   "export_selected_cameras_and_vertices_of_meshes: ...", self)
        cameras = []
        points = []

        point_index = 0
        camera_index = 0
        for obj in bpy.context.selected_objects:
            if obj.type == "CAMERA":
                log_report("INFO", "obj.name: " + str(obj.name), self)
                calibration_mat = self.get_calibration_mat(obj)
                # log_report('INFO', 'calibration_mat:', self)
                # log_report('INFO', str(calibration_mat), self)

                camera_matrix_computer_vision = (
                    self.get_computer_vision_camera_matrix(obj))

                cam = Camera()
                cam.id = camera_index
                cam.set_relative_fp(str(obj.name), Camera.IMAGE_FP_TYPE_NAME)
                cam.image_dp = odp
                cam.width = bpy.context.scene.render.resolution_x
                cam.height = bpy.context.scene.render.resolution_y

                cam.set_calibration(calibration_mat, radial_distortion=0)
                cam.set_4x4_cam_to_world_mat(camera_matrix_computer_vision)
                cameras.append(cam)
                camera_index += 1

            else:
                if obj.data is not None:
                    obj_points = []
                    for vert in obj.data.vertices:
                        coord_world = obj.matrix_world @ vert.co
                        obj_points.append(
                            Point(
                                coord=coord_world,
                                color=[0, 255, 0],
                                id=point_index,
                                scalars=[],
                            ))

                        point_index += 1
                    points += obj_points
        log_report(
            "INFO",
            "export_selected_cameras_and_vertices_of_meshes: Done",
            self,
        )
        return cameras, points