def _parse_open3d_json_file(open3d_ifp, image_dp, image_relative_fp_list, image_fp_type, op):
        cams = []

        with open(open3d_ifp, 'r') as open3d_file:
            json_data = json.load(open3d_file)
            parameters = json_data['parameters']

            if len(parameters) != len(image_relative_fp_list):
                # Create some dummy names for missing images
                image_relative_fp_list = Open3DFileHandler._create_dummy_fp_list(
                    len(parameters))

            for pinhole_camera_parameter, image_relative_fp in zip(parameters, image_relative_fp_list):

                cam = Camera()
                cam.image_fp_type = image_fp_type
                cam.image_dp = image_dp
                cam._relative_fp = image_relative_fp
                cam._absolute_fp = os.path.join(image_dp, image_relative_fp)

                extrinsic = pinhole_camera_parameter['extrinsic']
                # Note: the transformation matrix in the .json file is the inverse of 
                #       the transformation matrix in the .log file 
                extrinsic_mat = np.linalg.inv(
                    np.array(extrinsic, dtype=float).reshape((4,4)).T)

                intrinsic = pinhole_camera_parameter['intrinsic']

                cam.width = intrinsic['width']
                cam.height = intrinsic['height']

                # Accuracy of rotation matrices is too low => disable test
                cam.set_4x4_cam_to_world_mat(extrinsic_mat, check_rotation=False)

                intrinsic = intrinsic['intrinsic_matrix']
                intrinsic_mat = np.array(intrinsic, dtype=float).reshape((3,3)).T
                cam.set_calibration_mat(intrinsic_mat)

                cams.append(cam)
        return cams
    def _parse_cameras(input_file, num_cameras, camera_calibration_matrix, op):
        """
        VisualSFM CAMERA coordinate system is the standard CAMERA coordinate system in computer vision (not the same
        as in computer graphics like in bundler, blender, etc.)
        That means
              the y axis in the image is pointing downwards (not upwards)
              the camera is looking along the positive z axis (points in front of the camera show a positive z value)

        The camera coordinate system in computer vision VISUALSFM uses camera matrices,
        which are rotated around the x axis by 180 degree
        i.e. the y and z axis of the CAMERA MATRICES are inverted
        therefore, the y and z axis of the TRANSLATION VECTOR are also inverted
        """
        # op.report({'INFO'}, '_parse_cameras: ...')
        cameras = []

        for i in range(num_cameras):
            line = input_file.readline()

            # Read the camera section
            # From the docs:
            # <Camera> = <File name> <focal length> <quaternion WXYZ> <camera center> <radial distortion> 0
            line_values = line.split()
            file_name = os.path.basename(line_values[0])
            focal_length = float(line_values[1])

            quaternion_w = float(line_values[2])
            quaternion_x = float(line_values[3])
            quaternion_y = float(line_values[4])
            quaternion_z = float(line_values[5])
            quaternion = np.array(
                [quaternion_w, quaternion_x, quaternion_y, quaternion_z],
                dtype=float)

            camera_center_x = float(line_values[6])
            camera_center_y = float(line_values[7])
            camera_center_z = float(line_values[8])
            center_vec = np.array(
                [camera_center_x, camera_center_y, camera_center_z])

            radial_distortion = float(line_values[9])

            # TODO radial_distortion in camera_calibration_matrix
            if camera_calibration_matrix is None:
                camera_calibration_matrix = np.array([[focal_length, 0, 0],
                                                      [0, focal_length, 0],
                                                      [0, 0, 1]])

            zero_value = float(line_values[10])
            assert (zero_value == 0)

            current_camera = Camera()
            # Setting the quaternion also sets the rotation matrix
            current_camera.set_quaternion(quaternion)

            # Set the camera center after rotation
            # COMMENT FROM PBA CODE:
            #   older format for compability
            #   camera_data[i].SetQuaternionRotation(q); // quaternion from the file
            #   camera_data[i].SetCameraCenterAfterRotation(c); // camera center from the file
            current_camera._center = center_vec

            # set the camera view direction as normal w.r.t world coordinates
            cam_view_vec_cam_coord = np.array([0, 0, 1]).T
            cam_rotation_matrix_inv = np.linalg.inv(
                current_camera.get_rotation_mat())
            cam_view_vec_world_coord = cam_rotation_matrix_inv.dot(
                cam_view_vec_cam_coord)
            current_camera.normal = cam_view_vec_world_coord

            translation_vec = NVMFileHandler.compute_camera_coordinate_system_translation_vector(
                center_vec, current_camera.get_rotation_mat())
            current_camera._translation_vec = translation_vec

            current_camera.set_calibration_mat(camera_calibration_matrix)
            # op.report({'INFO'}, 'Calibration mat:')
            # op.report({'INFO'}, str(camera_calibration_matrix))
            current_camera.file_name = file_name
            current_camera.id = i
            cameras.append(current_camera)
        # op.report({'INFO'}, '_parse_cameras: Done')
        return cameras