def convert_colmap_cams_to_cams(id_to_col_cameras, id_to_col_images, image_dp): # From photogrammetry_importer\ext\read_write_model.py # CameraModel = collections.namedtuple( # "CameraModel", ["model_id", "model_name", "num_params"]) # Camera = collections.namedtuple( # "Camera", ["id", "model", "width", "height", "params"]) # BaseImage = collections.namedtuple( # "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) cameras = [] for col_image in id_to_col_images.values(): current_camera = Camera() current_camera.id = col_image.id current_camera.set_quaternion(col_image.qvec) current_camera.set_camera_translation_vector_after_rotation( col_image.tvec) current_camera.image_dp = image_dp current_camera.file_name = col_image.name camera_model = id_to_col_cameras[col_image.camera_id] # op.report({'INFO'}, 'image_id: ' + str(col_image.id)) # op.report({'INFO'}, 'camera_id: ' + str(col_image.camera_id)) # op.report({'INFO'}, 'camera_model: ' + str(camera_model)) current_camera.width = camera_model.width current_camera.height = camera_model.height fx, fy, cx, cy, skew = parse_camera_param_list(camera_model) camera_calibration_matrix = np.array([[fx, skew, cx], [0, fy, cy], [0, 0, 1]]) current_camera.set_calibration(camera_calibration_matrix, radial_distortion=0) cameras.append(current_camera) return cameras
def _parse_cameras(input_file, num_cameras, camera_calibration_matrix): # VisualSFM CAMERA coordinate system is the standard # CAMERA coordinate system in computer vision # (not the same as in computer graphics like in bundler, blender, etc.) # That means # the y axis in the image is pointing downwards (not upwards) # the camera is looking along the positive z axis # (points in front of the camera show a positive z value) # The camera coordinate system in computer vision VISUALSFM # uses camera matrices which are rotated around the x axis by 180 degree # i.e. the y and z axis of the CAMERA MATRICES are inverted # therefore, the y and z axis of the TRANSLATION VECTOR are also inverted cameras = [] for camera_index in range(num_cameras): line = input_file.readline() # <Camera> = <File name> <focal length> <quaternion WXYZ> <camera center> <radial distortion> 0 line_values = line.split() file_name = os.path.basename(line_values[0]) focal_length = float(line_values[1]) if camera_calibration_matrix is None: camera_calibration_matrix = np.array([[focal_length, 0, 0], [0, focal_length, 0], [0, 0, 1]]) quaternion_w = float(line_values[2]) quaternion_x = float(line_values[3]) quaternion_y = float(line_values[4]) quaternion_z = float(line_values[5]) quaternion = np.array( [quaternion_w, quaternion_x, quaternion_y, quaternion_z], dtype=float) camera_center_x = float(line_values[6]) camera_center_y = float(line_values[7]) camera_center_z = float(line_values[8]) center_vec = np.array( [camera_center_x, camera_center_y, camera_center_z]) radial_distortion = float(line_values[9]) zero_value = float(line_values[10]) assert (zero_value == 0) current_camera = Camera() # Setting the quaternion also sets the rotation matrix current_camera.set_quaternion(quaternion) # Set the camera center after rotation # COMMENT FROM PBA CODE: # older format for compability # camera_data[i].SetQuaternionRotation(q); // quaternion from the file # camera_data[i].SetCameraCenterAfterRotation(c); // camera center from the file current_camera._center = center_vec # set the camera view direction as normal w.r.t world coordinates cam_view_vec_cam_coord = np.array([0, 0, 1]).T cam_rotation_matrix_inv = np.linalg.inv( current_camera.get_rotation_mat()) cam_view_vec_world_coord = cam_rotation_matrix_inv.dot( cam_view_vec_cam_coord) current_camera.normal = cam_view_vec_world_coord translation_vec = compute_camera_coordinate_system_translation_vector( center_vec, current_camera.get_rotation_mat()) current_camera._translation_vec = translation_vec current_camera.set_calibration(camera_calibration_matrix, radial_distortion) current_camera.file_name = file_name current_camera.camera_index = camera_index cameras.append(current_camera) return cameras