def get_calibration_mat(op, blender_camera):
    op.report({'INFO'}, 'get_calibration_mat: ...')
    scene = bpy.context.scene
    render_resolution_width = scene.render.resolution_x
    render_resolution_height = scene.render.resolution_y
    focal_length_in_mm = float(blender_camera.data.lens)
    sensor_width_in_mm = float(blender_camera.data.sensor_width)
    focal_length_in_pixel = \
        float(max(scene.render.resolution_x, scene.render.resolution_y)) * \
        focal_length_in_mm / sensor_width_in_mm

    max_extent = max(render_resolution_width, render_resolution_height)
    p_x = render_resolution_width / 2.0 - blender_camera.data.shift_x * max_extent
    p_y = render_resolution_height / 2.0 - blender_camera.data.shift_y * max_extent

    calibration_mat = Camera.compute_calibration_mat(focal_length_in_pixel,
                                                     cx=p_x,
                                                     cy=p_y)

    op.report({'INFO'},
              'render_resolution_width: ' + str(render_resolution_width))
    op.report({'INFO'},
              'render_resolution_height: ' + str(render_resolution_height))
    op.report({'INFO'}, 'focal_length_in_pixel: ' + str(focal_length_in_pixel))

    op.report({'INFO'}, 'get_calibration_mat: Done')
    return calibration_mat
Exemplo n.º 2
0
    def enhance_camera_with_intrinsics(self, cameras):

        intrinsic_missing = False
        for cam in cameras:
            if not cam.has_intrinsics():
                intrinsic_missing = True
                break

        if not intrinsic_missing:
            self.report({'INFO'}, 'Using intrinsics from file (.json).')
            return cameras, True
        else:
            self.report({'INFO'}, 'Using intrinsics from user options, since not present in the reconstruction file (.log).')
            if math.isnan(self.default_focal_length):
                self.report({'ERROR'}, 'User must provide the focal length using the import options.')
                return [], False 

            if math.isnan(self.default_pp_x) or math.isnan(self.default_pp_y):
                self.report({'WARNING'}, 'Setting the principal point to the image center.')

            for cam in cameras:
                if math.isnan(self.default_pp_x) or math.isnan(self.default_pp_y):
                    assert cam.width is not None    # If no images are provided, the user must provide a default principal point
                    assert cam.height is not None   # If no images are provided, the user must provide a default principal point
                    default_cx = cam.width / 2.0
                    default_cy = cam.height / 2.0
                else:
                    default_cx = self.default_pp_x
                    default_cy = self.default_pp_y
                
                intrinsics = Camera.compute_calibration_mat(
                    focal_length=self.default_focal_length, cx=default_cx, cy=default_cy)
                cam.set_calibration_mat(intrinsics)
            return cameras, True
    def _parse_open3d_log_file(open3d_ifp, image_dp, image_relative_fp_list, image_fp_type, op):
        cams = []
        with open(open3d_ifp, 'r') as open3d_file:

            lines = open3d_file.readlines()
            # Chunk size: 1 line meta data, 4 lines for the matrix
            chunk_size = 5
            assert len(lines) % chunk_size == 0
            chunks = Open3DFileHandler._chunker(lines, chunk_size)

            if len(chunks) != len(image_relative_fp_list):
                # Create some dummy names for missing images
                image_relative_fp_list = Open3DFileHandler._create_dummy_fp_list(
                    len(chunks))

            for chunk, image_relative_fp in zip(chunks, image_relative_fp_list):
                meta_data = chunk[0]
        
                matrix_list = [
                    Open3DFileHandler._read_matrix_row(chunk[1]),
                    Open3DFileHandler._read_matrix_row(chunk[2]),
                    Open3DFileHandler._read_matrix_row(chunk[3])]

                # Note: the transformation matrix in the .json file is the inverse of 
                #       the transformation matrix in the .log file 
                extrinsic_matrix = np.asarray(matrix_list, dtype=float)

                cam = Camera()
                cam.image_fp_type = image_fp_type
                cam.image_dp = image_dp
                cam._relative_fp = image_relative_fp
                image_absolute_fp = os.path.join(image_dp, image_relative_fp)
                cam._absolute_fp = image_absolute_fp

                # Accuracy of rotation matrices is too low => disable rotation test
                cam.set_4x4_cam_to_world_mat(
                    extrinsic_matrix, check_rotation=False)

                cams.append(cam)
        return cams
def export_selected_cameras_and_vertices_of_meshes(op):
    op.report({'INFO'}, 'export_selected_cameras_and_vertices_of_meshes: ...')
    cameras = []
    points = []

    point_index = 0
    camera_index = 0
    for obj in bpy.context.selected_objects:
        if obj.type == 'CAMERA':
            op.report({'INFO'}, 'obj.name: ' + str(obj.name))
            calibration_mat = get_calibration_mat(op, obj)
            # op.report({'INFO'}, 'calibration_mat:' )
            # op.report({'INFO'}, str(calibration_mat))

            camera_matrix_computer_vision = get_computer_vision_camera_matrix(
                op, obj)

            cam = Camera()
            cam.file_name = str('camera_index')
            cam.set_calibration(calibration_mat, radial_distortion=0)
            cam.set_4x4_cam_to_world_mat(camera_matrix_computer_vision)
            cameras.append(cam)
            camera_index += 1

        else:
            if obj.data is not None:
                obj_points = []
                for vert in obj.data.vertices:
                    coord_world = obj.matrix_world * vert.co
                    obj_points.append(
                        Point(coord=coord_world,
                              color=[255, 255, 255],
                              measurements=[],
                              id=point_index,
                              scalars=[]))

                    point_index += 1
                points += obj_points
    op.report({'INFO'}, 'export_selected_cameras_and_vertices_of_meshes: Done')
    return cameras, points
    def _parse_cameras(input_file, num_cameras, camera_calibration_matrix, op):
        """
        VisualSFM CAMERA coordinate system is the standard CAMERA coordinate system in computer vision (not the same
        as in computer graphics like in bundler, blender, etc.)
        That means
              the y axis in the image is pointing downwards (not upwards)
              the camera is looking along the positive z axis (points in front of the camera show a positive z value)

        The camera coordinate system in computer vision VISUALSFM uses camera matrices,
        which are rotated around the x axis by 180 degree
        i.e. the y and z axis of the CAMERA MATRICES are inverted
        therefore, the y and z axis of the TRANSLATION VECTOR are also inverted
        """
        # op.report({'INFO'}, '_parse_cameras: ...')
        cameras = []

        for i in range(num_cameras):
            line = input_file.readline()

            # Read the camera section
            # From the docs:
            # <Camera> = <File name> <focal length> <quaternion WXYZ> <camera center> <radial distortion> 0
            line_values = line.split()
            file_name = os.path.basename(line_values[0])
            focal_length = float(line_values[1])

            quaternion_w = float(line_values[2])
            quaternion_x = float(line_values[3])
            quaternion_y = float(line_values[4])
            quaternion_z = float(line_values[5])
            quaternion = np.array(
                [quaternion_w, quaternion_x, quaternion_y, quaternion_z],
                dtype=float)

            camera_center_x = float(line_values[6])
            camera_center_y = float(line_values[7])
            camera_center_z = float(line_values[8])
            center_vec = np.array(
                [camera_center_x, camera_center_y, camera_center_z])

            radial_distortion = float(line_values[9])

            # TODO radial_distortion in camera_calibration_matrix
            if camera_calibration_matrix is None:
                camera_calibration_matrix = np.array([[focal_length, 0, 0],
                                                      [0, focal_length, 0],
                                                      [0, 0, 1]])

            zero_value = float(line_values[10])
            assert (zero_value == 0)

            current_camera = Camera()
            # Setting the quaternion also sets the rotation matrix
            current_camera.set_quaternion(quaternion)

            # Set the camera center after rotation
            # COMMENT FROM PBA CODE:
            #   older format for compability
            #   camera_data[i].SetQuaternionRotation(q); // quaternion from the file
            #   camera_data[i].SetCameraCenterAfterRotation(c); // camera center from the file
            current_camera._center = center_vec

            # set the camera view direction as normal w.r.t world coordinates
            cam_view_vec_cam_coord = np.array([0, 0, 1]).T
            cam_rotation_matrix_inv = np.linalg.inv(
                current_camera.get_rotation_mat())
            cam_view_vec_world_coord = cam_rotation_matrix_inv.dot(
                cam_view_vec_cam_coord)
            current_camera.normal = cam_view_vec_world_coord

            translation_vec = NVMFileHandler.compute_camera_coordinate_system_translation_vector(
                center_vec, current_camera.get_rotation_mat())
            current_camera._translation_vec = translation_vec

            current_camera.set_calibration_mat(camera_calibration_matrix)
            # op.report({'INFO'}, 'Calibration mat:')
            # op.report({'INFO'}, str(camera_calibration_matrix))
            current_camera.file_name = file_name
            current_camera.id = i
            cameras.append(current_camera)
        # op.report({'INFO'}, '_parse_cameras: Done')
        return cameras
    def _parse_cameras_from_json_data(json_data, image_dp, image_fp_type,
                                      suppress_distortion_warnings, op):

        cams = []
        image_index_to_camera_index = {}

        is_valid_file = 'views' in json_data and 'intrinsics' in json_data and 'poses' in json_data

        if not is_valid_file:
            op.report({
                'ERROR'
            }, 'FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the SfM reconstruction results: '
                      + 'view, intrinsics and poses.')
            return cams, image_index_to_camera_index

        views = json_data['views']  # is a list of dicts (view)
        intrinsics = json_data['intrinsics']  # is a list of dicts (intrinsic)
        extrinsics = json_data['poses']  # is a list of dicts (extrinsic)

        # IMPORTANT:
        # Views contain the number of input images
        # Extrinsics may contain only a subset of views!
        # (Not all views are necessarily contained in the reconstruction)

        for rec_index, extrinsic in enumerate(extrinsics):

            camera = Camera()
            view_index = int(extrinsic['poseId'])
            image_index_to_camera_index[view_index] = rec_index

            corresponding_view = get_element(views, "poseId", view_index, op)

            camera.image_fp_type = image_fp_type
            camera.image_dp = image_dp
            camera._absolute_fp = str(corresponding_view['path'])
            camera._relative_fp = os.path.basename(
                str(corresponding_view['path']))
            camera._undistorted_relative_fp = str(extrinsic['poseId']) + '.exr'
            if image_dp is None:
                camera._undistorted_absolute_fp = None
            else:
                camera._undistorted_absolute_fp = os.path.join(
                    image_dp, camera._undistorted_relative_fp)

            camera.width = int(corresponding_view['width'])
            camera.height = int(corresponding_view['height'])
            id_intrinsic = int(corresponding_view['intrinsicId'])

            intrinsic_params = get_element(intrinsics, "intrinsicId",
                                           id_intrinsic, op)

            focal_length = float(intrinsic_params['pxFocalLength'])
            cx = float(intrinsic_params['principalPoint'][0])
            cy = float(intrinsic_params['principalPoint'][1])

            if 'distortionParams' in intrinsic_params and len(
                    intrinsic_params['distortionParams']) > 0:
                # TODO proper handling of distortion parameters
                radial_distortion = float(
                    intrinsic_params['distortionParams'][0])
            else:
                radial_distortion = 0.0

            if not suppress_distortion_warnings:
                check_radial_distortion(radial_distortion, camera._relative_fp,
                                        op)

            camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                  [0, focal_length, cy],
                                                  [0, 0, 1]])

            camera.set_calibration(camera_calibration_matrix,
                                   radial_distortion)
            extrinsic_params = extrinsic['pose']['transform']

            cam_rotation_list = extrinsic_params['rotation']
            camera.set_rotation_mat(
                np.array(cam_rotation_list, dtype=float).reshape(3, 3).T)
            camera.set_camera_center_after_rotation(
                np.array(extrinsic_params['center'], dtype=float))
            camera.view_index = view_index

            cams.append(camera)
        return cams, image_index_to_camera_index
Exemplo n.º 7
0
    def parse_cameras(json_data, image_dp, image_fp_type,
                      suppress_distortion_warnings, op):

        views = {item['key']: item for item in json_data['views']}
        intrinsics = {item['key']: item for item in json_data['intrinsics']}
        extrinsics = {item['key']: item for item in json_data['extrinsics']}

        # IMPORTANT:
        # Views contain the description about the dataset and attribute to Pose and Intrinsic data.
        # View -> id_pose, id_intrinsic
        # Since sometimes some views cannot be localized, there is some missing pose and intrinsic data.
        # Extrinsics may contain only a subset of views! (Potentially not all views are contained in the reconstruction)

        cams = []
        # Iterate over views, and create camera if Intrinsic and Pose data exist
        for id, view in views.items():  # Iterate over views

            id_view = view[
                'key']  # Should be equal to view['value']['ptr_wrapper']['data']['id_view']
            view_data = view['value']['ptr_wrapper']['data']
            id_pose = view_data['id_pose']
            id_intrinsic = view_data['id_intrinsic']

            # Check if the view is having corresponding Pose and Intrinsic data
            if id_pose in extrinsics.keys() and \
               id_intrinsic in intrinsics.keys():

                camera = Camera()

                camera.image_fp_type = image_fp_type
                camera.image_dp = image_dp
                camera._relative_fp = os.path.join(view_data['local_path'],
                                                   view_data['filename'])
                camera._absolute_fp = os.path.join(json_data['root_path'],
                                                   view_data['local_path'],
                                                   view_data['filename'])
                camera.width = view_data['width']
                camera.height = view_data['height']
                id_intrinsic = view_data['id_intrinsic']

                # handle intrinsic params
                intrinsic_data = intrinsics[int(
                    id_intrinsic)]['value']['ptr_wrapper']['data']
                polymorphic_name = intrinsics[int(
                    id_intrinsic)]['value']['polymorphic_name']

                if polymorphic_name == 'spherical':
                    camera.set_panoramic_type(
                        Camera.panoramic_type_equirectangular)
                    # create some dummy values
                    focal_length = 0
                    cx = camera.width / 2
                    cy = camera.height / 2
                else:

                    focal_length = intrinsic_data['focal_length']
                    principal_point = intrinsic_data['principal_point']
                    cx = principal_point[0]
                    cy = principal_point[1]

                # For Radial there are several options: "None", disto_k1, disto_k3
                if 'disto_k3' in intrinsic_data:
                    radial_distortion = [
                        float(intrinsic_data['disto_k3'][0]),
                        float(intrinsic_data['disto_k3'][1]),
                        float(intrinsic_data['disto_k3'][2])
                    ]
                elif 'disto_k1' in intrinsic_data:
                    radial_distortion = float(intrinsic_data['disto_k1'][0])
                else:  # No radial distortion, i.e. pinhole camera model
                    radial_distortion = 0

                if not suppress_distortion_warnings:
                    check_radial_distortion(radial_distortion,
                                            camera._relative_fp, op)

                camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                      [0, focal_length, cy],
                                                      [0, 0, 1]])

                camera.set_calibration(camera_calibration_matrix,
                                       radial_distortion)
                extrinsic_params = extrinsics[id_pose]
                cam_rotation_list = extrinsic_params['value']['rotation']
                camera.set_rotation_mat(
                    np.array(cam_rotation_list, dtype=float))
                camera.set_camera_center_after_rotation(
                    np.array(extrinsic_params['value']['center'], dtype=float))
                camera.view_index = id_view

                cams.append(camera)
        return cams
    def convert_cameras(id_to_col_cameras, id_to_col_images, image_dp,
                        image_fp_type, suppress_distortion_warnings, op):
        # From photogrammetry_importer\ext\read_write_model.py
        #   CameraModel = collections.namedtuple(
        #       "CameraModel", ["model_id", "model_name", "num_params"])
        #   Camera = collections.namedtuple(
        #       "Camera", ["id", "model", "width", "height", "params"])
        #   BaseImage = collections.namedtuple(
        #       "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])

        cameras = []
        for col_image in id_to_col_images.values():
            current_camera = Camera()
            current_camera.id = col_image.id
            current_camera.set_quaternion(col_image.qvec)
            current_camera.set_camera_translation_vector_after_rotation(
                col_image.tvec)

            current_camera.image_fp_type = image_fp_type
            current_camera.image_dp = image_dp
            current_camera._relative_fp = col_image.name

            camera_model = id_to_col_cameras[col_image.camera_id]

            # op.report({'INFO'}, 'image_id: ' + str(col_image.id))
            # op.report({'INFO'}, 'camera_id: ' + str(col_image.camera_id))
            # op.report({'INFO'}, 'camera_model: ' + str(camera_model))

            current_camera.width = camera_model.width
            current_camera.height = camera_model.height

            focal_length, cx, cy, r = parse_camera_param_list(camera_model)
            if not suppress_distortion_warnings:
                check_radial_distortion(r, current_camera._relative_fp, op)

            camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                  [0, focal_length, cy],
                                                  [0, 0, 1]])
            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion=0)

            cameras.append(current_camera)

        return cameras
Exemplo n.º 9
0
    def parse_cameras(json_data, image_dp, image_fp_type,
                      suppress_distortion_warnings, op):

        json_cameras_intrinsics = json_data['cameras']
        views = json_data['shots']

        cams = []
        for view_name in views:
            view = views[view_name]

            camera = Camera()
            camera.image_fp_type = image_fp_type
            camera.image_dp = image_dp
            camera._relative_fp = view_name
            camera._absolute_fp = os.path.join(image_dp, view_name)

            intrinsic_key = view['camera']

            focal_length, cx, cy, width, height, radial_distortion = OpenSfMJSONFileHandler.convert_intrinsics(
                json_cameras_intrinsics[intrinsic_key], camera._relative_fp,
                suppress_distortion_warnings, op)

            camera.height = height
            camera.width = width

            camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                  [0, focal_length, cy],
                                                  [0, 0, 1]])

            camera.set_calibration(camera_calibration_matrix,
                                   radial_distortion)

            rodrigues_vec = np.array(view['rotation'], dtype=float)
            rot_mat = OpenSfMJSONFileHandler.rodrigues_to_matrix(rodrigues_vec)

            camera.set_rotation_mat(rot_mat)
            camera.set_camera_translation_vector_after_rotation(
                np.array(view['translation'], dtype=float))

            cams.append(camera)
        return cams
    def convert_cameras(id_to_col_cameras, id_to_col_images, op):
        # CameraModel = collections.namedtuple(
        #   "CameraModel", ["model_id", "model_name", "num_params"])
        # Camera = collections.namedtuple(
        #    "Camera", ["id", "model", "width", "height", "params"])
        # BaseImage = collections.namedtuple(
        #    "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])

        cameras = []
        for col_image in id_to_col_images.values():
            current_camera = Camera()
            current_camera.id = col_image.id
            current_camera.set_quaternion(col_image.qvec)
            current_camera.set_camera_translation_vector_after_rotation(
                col_image.tvec)
            current_camera.file_name = col_image.name
            camera_models = list(id_to_col_cameras.values())
            # Blender supports only one camera model for all images
            assert len(camera_models) == 1
            camera_model = camera_models[0]

            op.report({'INFO'}, 'camera_model: ' + str(camera_model))

            current_camera.width = camera_model.width
            current_camera.height = camera_model.height

            focal_length, cx, cy = parse_camera_param_list(camera_model)
            camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                  [0, focal_length, cy],
                                                  [0, 0, 1]])
            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion=0)

            cameras.append(current_camera)

        return cameras
def export_selected_cameras_and_vertices_of_meshes(op, odp):
    op.report({'INFO'}, 'export_selected_cameras_and_vertices_of_meshes: ...')
    cameras = []
    points = []

    point_index = 0
    camera_index = 0
    for obj in bpy.context.selected_objects:
        if obj.type == 'CAMERA':
            op.report({'INFO'}, 'obj.name: ' + str(obj.name))
            calibration_mat = get_calibration_mat(op, obj)
            # op.report({'INFO'}, 'calibration_mat:' )
            # op.report({'INFO'}, str(calibration_mat))

            camera_matrix_computer_vision = get_computer_vision_camera_matrix(
                op, obj)

            cam = Camera()
            cam.id = camera_index
            cam.set_relative_fp(str(obj.name), Camera.IMAGE_FP_TYPE_NAME)
            cam.image_dp = odp
            cam.width = bpy.context.scene.render.resolution_x
            cam.height = bpy.context.scene.render.resolution_y

            cam.set_calibration(calibration_mat, radial_distortion=0)
            cam.set_4x4_cam_to_world_mat(camera_matrix_computer_vision)
            cameras.append(cam)
            camera_index += 1

        else:
            if obj.data is not None:
                obj_points = []
                for vert in obj.data.vertices:
                    coord_world = obj.matrix_world @ vert.co
                    obj_points.append(
                        Point(coord=coord_world,
                              color=[0, 255, 0],
                              id=point_index,
                              scalars=[]))

                    point_index += 1
                points += obj_points
    op.report({'INFO'}, 'export_selected_cameras_and_vertices_of_meshes: Done')
    return cameras, points
Exemplo n.º 12
0
    def parse_cameras(json_data, op):

        views = json_data['views']
        intrinsics = json_data['intrinsics']
        extrinsics = json_data['extrinsics']

        # IMPORTANT:
        # Views contain the number of input images
        # Extrinsics may contain only a subset of views! (Potentially not all views are contained in the reconstruction)
        # Matching entries are determined by view['key'] == extrinsics['key']

        cams = []
        image_index_to_camera_index = {}
        for rec_index, extrinsic in enumerate(
                extrinsics):  # Iterate over extrinsics, not views!

            camera = Camera()
            # The key is defined w.r.t. view indices (NOT reconstructed camera indices)
            view_index = int(extrinsic['key'])
            image_index_to_camera_index[view_index] = rec_index
            corresponding_view = views[view_index]

            camera.file_name = corresponding_view['value']['ptr_wrapper'][
                'data']['filename']
            camera.width = corresponding_view['value']['ptr_wrapper']['data'][
                'width']
            camera.height = corresponding_view['value']['ptr_wrapper']['data'][
                'height']
            id_intrinsic = corresponding_view['value']['ptr_wrapper']['data'][
                'id_intrinsic']

            # handle intrinsic params
            intrinsic_params = intrinsics[int(
                id_intrinsic)]['value']['ptr_wrapper']['data']
            focal_length = intrinsic_params['focal_length']
            principal_point = intrinsic_params['principal_point']
            cx = principal_point[0]
            cy = principal_point[1]

            if 'disto_k3' in intrinsic_params:
                op.report({'INFO'},
                          '3 Radial Distortion Parameters are not supported')
                assert False

            # For Radial there are several options: "None", disto_k1, disto_k3
            if 'disto_k1' in intrinsic_params:
                radial_distortion = float(intrinsic_params['disto_k1'][0])
            else:  # No radial distortion, i.e. pinhole camera model
                radial_distortion = 0

            camera_calibration_matrix = np.array([[focal_length, 0, cx],
                                                  [0, focal_length, cy],
                                                  [0, 0, 1]])

            camera.set_calibration(camera_calibration_matrix,
                                   radial_distortion)
            extrinsic_params = extrinsic['value']
            cam_rotation_list = extrinsic_params['rotation']
            camera.set_rotation_mat(np.array(cam_rotation_list, dtype=float))
            camera.set_camera_center_after_rotation(
                np.array(extrinsic_params['center'], dtype=float))
            camera.view_index = view_index

            cams.append(camera)
        return cams, image_index_to_camera_index
    def _parse_open3d_json_file(open3d_ifp, image_dp, image_relative_fp_list, image_fp_type, op):
        cams = []

        with open(open3d_ifp, 'r') as open3d_file:
            json_data = json.load(open3d_file)
            parameters = json_data['parameters']

            if len(parameters) != len(image_relative_fp_list):
                # Create some dummy names for missing images
                image_relative_fp_list = Open3DFileHandler._create_dummy_fp_list(
                    len(parameters))

            for pinhole_camera_parameter, image_relative_fp in zip(parameters, image_relative_fp_list):

                cam = Camera()
                cam.image_fp_type = image_fp_type
                cam.image_dp = image_dp
                cam._relative_fp = image_relative_fp
                cam._absolute_fp = os.path.join(image_dp, image_relative_fp)

                extrinsic = pinhole_camera_parameter['extrinsic']
                # Note: the transformation matrix in the .json file is the inverse of 
                #       the transformation matrix in the .log file 
                extrinsic_mat = np.linalg.inv(
                    np.array(extrinsic, dtype=float).reshape((4,4)).T)

                intrinsic = pinhole_camera_parameter['intrinsic']

                cam.width = intrinsic['width']
                cam.height = intrinsic['height']

                # Accuracy of rotation matrices is too low => disable test
                cam.set_4x4_cam_to_world_mat(extrinsic_mat, check_rotation=False)

                intrinsic = intrinsic['intrinsic_matrix']
                intrinsic_mat = np.array(intrinsic, dtype=float).reshape((3,3)).T
                cam.set_calibration_mat(intrinsic_mat)

                cams.append(cam)
        return cams