def render(self, pv_matrix: np.ndarray, tracked_cam_track_pos_float: float): camera_pos_floor = int(np.floor(tracked_cam_track_pos_float)) assert camera_pos_floor >= 0 camera_pos_ceil = int(np.ceil(tracked_cam_track_pos_float)) assert camera_pos_ceil <= len(self._cam_poses) - 1 camera_pos_fraction = tracked_cam_track_pos_float - camera_pos_floor assert 0 <= camera_pos_fraction <= 1 floor_quaternion = Quaternion( matrix=self._cam_poses[camera_pos_floor].r_mat) ceil_quaternion = Quaternion( matrix=self._cam_poses[camera_pos_ceil].r_mat) rotation_matrix = Quaternion.slerp(floor_quaternion, ceil_quaternion, camera_pos_fraction).rotation_matrix floor_trans_vector = self._cam_poses[camera_pos_floor].t_vec ceil_trans_vector = self._cam_poses[camera_pos_ceil].t_vec translation_vector = ceil_trans_vector * camera_pos_fraction + floor_trans_vector * ( 1 - camera_pos_fraction) # cam_model_pose_matrix = _get_pose_matrix(self._cam_poses[camera_pos_floor]) cam_model_pose_matrix = _get_pose_matrix( data3d.Pose(r_mat=rotation_matrix, t_vec=translation_vector)) cam_mvp = pv_matrix.dot(cam_model_pose_matrix) self._cam_model_renderer.render(cam_mvp) self._cam_frustrum_renderer.render(cam_mvp)
def _interpolate_cam_pose(t: float, p0: data3d.Pose, p1: data3d.Pose): def to_quat(m): qr = np.sqrt(1 + m[0][0] + m[1][1] + m[2][2]) / 2 return Quaternion( np.array([ qr, (m[2][1] - m[1][2]) / 4 / qr, (m[0][2] - m[2][0]) / 4 / qr, (m[1][0] - m[0][1]) / 4 / qr ])) return data3d.Pose( Quaternion.slerp(to_quat(p0.r_mat), to_quat(p1.r_mat), t).rotation_matrix.astype(np.float32), p0.t_vec * (1 - t) + p1.t_vec * t)
def __init__(self, cam_model_files: Tuple[str, str], tracked_cam_parameters: data3d.CameraParameters, tracked_cam_track: List[data3d.Pose]): self._cam_model_renderer = _CameraModelRenderer(cam_model_files) self._cam_frustrum_renderer = _CameraFrustrumRenderer( tracked_cam_parameters) self._cam_poses = [ data3d.Pose( _opencv_rotation_matrix.dot( pose.r_mat).dot(_opencv_rotation_matrix), _opencv_rotation_matrix.dot(pose.t_vec)) for pose in tracked_cam_track ]
def display(self, camera_tr_vec, camera_rot_mat, camera_fov_y, tracked_cam_track_pos_float): """ Draw everything with specified render camera position, projection parameters and tracked camera position :param camera_tr_vec: vec3 position of render camera in global space :param camera_rot_mat: mat3 rotation matrix of render camera in global space :param camera_fov_y: render camera field of view. To be used for building a projection matrix. Use glutGet to calculate current aspect ratio :param tracked_cam_track_pos_float: a frame in which tracked camera model and frustrum should be drawn (see tracked_cam_track_pos for basic task) :return: returns nothing """ GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT) view_mat = np.linalg.inv( _get_pose_matrix(data3d.Pose(camera_rot_mat, camera_tr_vec))) aspect_ratio = float(GLUT.glutGet(GLUT.GLUT_WINDOW_WIDTH)) / float( GLUT.glutGet(GLUT.GLUT_WINDOW_HEIGHT)) focus_y = 1.0 / np.tan(camera_fov_y / 2.0) focus_x = focus_y / aspect_ratio near = 0.1 far = 1000.0 projection_mat = np.matrix([ [focus_x, 0, 0, 0], [0, focus_y, 0, 0], [ 0, 0, -(far + near) / (far - near), -2 * far * near / (far - near) ], [0, 0, -1, 0], ]) pv_matrix = projection_mat.dot(view_mat) self._point_cloud_renderer.render(pv_matrix) self._cam_track_line_renderer.render(pv_matrix) self._cam_renderer.render(pv_matrix, tracked_cam_track_pos_float) GLUT.glutSwapBuffers()