コード例 #1
0
    def _parse_cameras(input_file, num_cameras, camera_calibration_matrix):

        # VisualSFM CAMERA coordinate system is the standard
        # CAMERA coordinate system in computer vision
        # (not the same as in computer graphics like in bundler, blender, etc.)
        # That means
        #       the y axis in the image is pointing downwards (not upwards)
        #       the camera is looking along the positive z axis
        #       (points in front of the camera show a positive z value)

        # The camera coordinate system in computer vision VISUALSFM
        # uses camera matrices which are rotated around the x axis by 180 degree
        # i.e. the y and z axis of the CAMERA MATRICES are inverted
        # therefore, the y and z axis of the TRANSLATION VECTOR are also inverted

        cameras = []

        for camera_index in range(num_cameras):
            line = input_file.readline()

            # <Camera> = <File name> <focal length> <quaternion WXYZ> <camera center> <radial distortion> 0
            line_values = line.split()

            file_name = os.path.basename(line_values[0])

            focal_length = float(line_values[1])

            if camera_calibration_matrix is None:
                camera_calibration_matrix = np.array([[focal_length, 0, 0],
                                                      [0, focal_length, 0],
                                                      [0, 0, 1]])

            quaternion_w = float(line_values[2])
            quaternion_x = float(line_values[3])
            quaternion_y = float(line_values[4])
            quaternion_z = float(line_values[5])
            quaternion = np.array(
                [quaternion_w, quaternion_x, quaternion_y, quaternion_z],
                dtype=float)

            camera_center_x = float(line_values[6])
            camera_center_y = float(line_values[7])
            camera_center_z = float(line_values[8])
            center_vec = np.array(
                [camera_center_x, camera_center_y, camera_center_z])

            radial_distortion = float(line_values[9])
            zero_value = float(line_values[10])
            assert (zero_value == 0)

            current_camera = Camera()
            # Setting the quaternion also sets the rotation matrix
            current_camera.set_quaternion(quaternion)

            # Set the camera center after rotation
            # COMMENT FROM PBA CODE:
            #   older format for compability
            #   camera_data[i].SetQuaternionRotation(q); // quaternion from the file
            #   camera_data[i].SetCameraCenterAfterRotation(c); // camera center from the file
            current_camera._center = center_vec

            # set the camera view direction as normal w.r.t world coordinates
            cam_view_vec_cam_coord = np.array([0, 0, 1]).T
            cam_rotation_matrix_inv = np.linalg.inv(
                current_camera.get_rotation_mat())
            cam_view_vec_world_coord = cam_rotation_matrix_inv.dot(
                cam_view_vec_cam_coord)
            current_camera.normal = cam_view_vec_world_coord

            translation_vec = compute_camera_coordinate_system_translation_vector(
                center_vec, current_camera.get_rotation_mat())
            current_camera._translation_vec = translation_vec

            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion)
            current_camera.file_name = file_name
            current_camera.camera_index = camera_index

            cameras.append(current_camera)

        return cameras
コード例 #2
0
    def parse_cameras(json_data):

        views = json_data['views']
        intrinsics = json_data['intrinsics']
        extrinsics = json_data['extrinsics']

        # Remark: extrinsics may contain only a subset of views! (no all views are contained in the reconstruction)
        #         Matching entries are determined by view['key'] == extrinsics['key']

        cams = []

        # Mapping from input images to reconstructed cameras
        image_index_to_camera_index = {}

        # IMPORTANT: Views contains number of input images
        #            (this may be more than the number of reconstructed poses)
        for rec_index, extrinsic in enumerate(extrinsics):    # Iterate over extrinsics, not views!

            camera = Camera()

            # The key is defined w.r.t. view indices (NOT reconstructed camera indices)
            view_index = int(extrinsic['key'])

            image_index_to_camera_index[view_index] = rec_index

            view = views[view_index]

            camera.file_name = view['value']['ptr_wrapper']['data']['filename']
            id_intrinsic = view['value']['ptr_wrapper']['data']['id_intrinsic']

            # handle intrinsic params
            intrinsic_params = intrinsics[int(id_intrinsic)]['value']['ptr_wrapper']['data']
            focal_length = intrinsic_params['focal_length']
            principal_point = intrinsic_params['principal_point']

            if 'disto_k3' in intrinsic_params:
                logger.info('3 Radial Distortion Parameters are not supported')
                assert False

            # For Radial there are several options: "None", disto_k1, disto_k3
            if 'disto_k1' in intrinsic_params:
                radial_distortion = float(intrinsic_params['disto_k1'][0])
            else:  # No radial distortion, i.e. pinhole camera model
                radial_distortion = 0

            camera.set_calibration(
                np.asarray([[focal_length, 0, principal_point[0]],
                          [0, focal_length, principal_point[1]],
                          [0, 0, 1]], dtype=float),
                radial_distortion
                )

            # handle extrinsic params (pose = extrinsic)

            extrinsic_params = extrinsic['value']
            cam_rotation_list = extrinsic_params['rotation']
            camera.set_rotation_mat(np.array(cam_rotation_list, dtype=float))
            camera._center = np.array(extrinsic_params['center'], dtype=float).T

            # the camera looks in the z direction
            cam_view_vec_cam_coordinates = np.array([0, 0, 1]).T

            # transform view direction from cam coordinates to world coordinates
            # for rotations the inverse is equal to the transpose
            rotation_inv_mat = camera.get_rotation_mat().T
            camera.normal = rotation_inv_mat.dot(cam_view_vec_cam_coordinates)

            camera.view_index = view_index

            cams.append(camera)
        return cams, image_index_to_camera_index