def test_rotate_on_spot_pitch(self): N = 14 eye = torch.rand(N, 3) at = torch.rand(N, 3) up = torch.rand(N, 3) R, T = look_at_view_transform(eye=eye, at=at, up=up) # Moving around the x axis looks down. angles = torch.FloatTensor([-radians(10), 0, 0]) rotation = axis_angle_to_matrix(angles) R_rot, T_rot = rotate_on_spot(R, T, rotation) eye_rot, at_rot, up_rot = camera_to_eye_at_up( get_world_to_view_transform(R=R_rot, T=T_rot)) self.assertClose(eye, eye_rot, atol=1e-5) # A vector pointing left is unchanged left = torch.cross(up, at - eye, dim=-1) left_rot = torch.cross(up_rot, at_rot - eye_rot, dim=-1) self.assertClose(normalize(left), normalize(left_rot), atol=1e-5) # The camera has moved down fully_up = torch.cross(at - eye, left, dim=-1) fully_up_rot = torch.cross(at_rot - eye_rot, left_rot, dim=-1) agree = _batched_dotprod(torch.cross(fully_up, fully_up_rot, dim=1), left) self.assertGreater(agree.min(), 0)
def preprocess_poses(cls, poses: tuple): """Generates (N, 6) vector of absolute poses Args: Tuple of batched rotations (N, 3, 3) and translations (N, 3) in Pytorch3d view-to-world coordinates. usually returned from a call to RenderManager._trajectory More information about Pytorch3D's coordinate system: https://github.com/facebookresearch/pytorch3d/blob/master/docs/notes/cameras.md 1. Computes rotation and translation matrices in view-to-world coordinates. 2. Generates unit quaternion from R and computes log q repr 3. Normalizes translation according to mean and stdev Returns: (N, 6) vector: [t1, t2, t3, logq1, logq2, logq3] """ R, T = poses cam_wvt = get_world_to_view_transform(R=R, T=T) pose_transform = cam_wvt.inverse().get_matrix() T = pose_transform[:, 3, :3] R = pose_transform[:, :3, :3] # Compute pose stats std_R, mean_R = torch.std_mean(R) std_T, mean_T = torch.std_mean(T) q = rc.matrix_to_quaternion(R) # q /= torch.norm(q) # q *= torch.sign(q[0]) # hemisphere constraint # logq = qlog(q) T -= mean_T T /= std_T return torch.cat((T, q), dim=1)
def test_view_transform_class_method(self): T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1) R = look_at_rotation(T) RT = get_world_to_view_transform(R=R, T=T) for cam_type in ( OpenGLPerspectiveCameras, OpenGLOrthographicCameras, SfMOrthographicCameras, SfMPerspectiveCameras, ): cam = cam_type(R=R, T=T) RT_class = cam.get_world_to_view_transform() self.assertTrue(torch.allclose(RT.get_matrix(), RT_class.get_matrix())) self.assertTrue(isinstance(RT, Transform3d))
def test_blender_camera(self): """ Test BlenderCamera. """ # Test get_world_to_view_transform. T = torch.randn(10, 3) R = so3_exp_map(torch.randn(10, 3) * 3.0) RT = get_world_to_view_transform(R=R, T=T) cam = BlenderCamera(R=R, T=T) RT_class = cam.get_world_to_view_transform() self.assertTrue(torch.allclose(RT.get_matrix(), RT_class.get_matrix())) self.assertTrue(isinstance(RT, Transform3d)) # Test getting camera center. C = cam.get_camera_center() C_ = -torch.bmm(R, T[:, :, None])[:, :, 0] self.assertTrue(torch.allclose(C, C_, atol=1e-05))
def test_rotate_on_spot_yaw(self): N = 14 eye = torch.rand(N, 3) at = torch.rand(N, 3) up = torch.rand(N, 3) R, T = look_at_view_transform(eye=eye, at=at, up=up) # Moving around the y axis looks left. angles = torch.FloatTensor([0, -radians(10), 0]) rotation = axis_angle_to_matrix(angles) R_rot, T_rot = rotate_on_spot(R, T, rotation) eye_rot, at_rot, up_rot = camera_to_eye_at_up( get_world_to_view_transform(R=R_rot, T=T_rot)) self.assertClose(eye, eye_rot, atol=1e-5) # Make vectors pointing exactly left and up left = torch.cross(up, at - eye, dim=-1) left_rot = torch.cross(up_rot, at_rot - eye_rot, dim=-1) fully_up = torch.cross(at - eye, left, dim=-1) fully_up_rot = torch.cross(at_rot - eye_rot, left_rot, dim=-1) # The up direction is unchanged self.assertClose(normalize(fully_up), normalize(fully_up_rot), atol=1e-5) # The camera has moved left agree = _batched_dotprod(torch.cross(left, left_rot, dim=1), fully_up) self.assertGreater(agree.min(), 0) # Batch dimension for rotation R_rot2, T_rot2 = rotate_on_spot(R, T, rotation.expand(N, 3, 3)) self.assertClose(R_rot, R_rot2) self.assertClose(T_rot, T_rot2) # No batch dimension for either R_rot3, T_rot3 = rotate_on_spot(R[0], T[0], rotation) self.assertClose(R_rot[:1], R_rot3) self.assertClose(T_rot[:1], T_rot3) # No batch dimension for R, T R_rot4, T_rot4 = rotate_on_spot(R[0], T[0], rotation.expand(N, 3, 3)) self.assertClose(R_rot[:1].expand(N, 3, 3), R_rot4) self.assertClose(T_rot[:1].expand(N, 3), T_rot4)
def test_rotate_on_spot_roll(self): N = 14 eye = torch.rand(N, 3) at = torch.rand(N, 3) up = torch.rand(N, 3) R, T = look_at_view_transform(eye=eye, at=at, up=up) # Moving around the z axis rotates the image. angles = torch.FloatTensor([0, 0, -radians(10)]) rotation = axis_angle_to_matrix(angles) R_rot, T_rot = rotate_on_spot(R, T, rotation) eye_rot, at_rot, up_rot = camera_to_eye_at_up( get_world_to_view_transform(R=R_rot, T=T_rot)) self.assertClose(eye, eye_rot, atol=1e-5) self.assertClose(normalize(at - eye), normalize(at_rot - eye), atol=1e-5) # The camera has moved clockwise agree = _batched_dotprod(torch.cross(up, up_rot, dim=1), at - eye) self.assertGreater(agree.min(), 0)
def test_view_transform(self): T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1) R = look_at_rotation(T) RT = get_world_to_view_transform(R=R, T=T) self.assertTrue(isinstance(RT, Transform3d))