def test_rotate_on_spot_pitch(self): N = 14 eye = torch.rand(N, 3) at = torch.rand(N, 3) up = torch.rand(N, 3) R, T = look_at_view_transform(eye=eye, at=at, up=up) # Moving around the x axis looks down. angles = torch.FloatTensor([-radians(10), 0, 0]) rotation = axis_angle_to_matrix(angles) R_rot, T_rot = rotate_on_spot(R, T, rotation) eye_rot, at_rot, up_rot = camera_to_eye_at_up( get_world_to_view_transform(R=R_rot, T=T_rot)) self.assertClose(eye, eye_rot, atol=1e-5) # A vector pointing left is unchanged left = torch.cross(up, at - eye, dim=-1) left_rot = torch.cross(up_rot, at_rot - eye_rot, dim=-1) self.assertClose(normalize(left), normalize(left_rot), atol=1e-5) # The camera has moved down fully_up = torch.cross(at - eye, left, dim=-1) fully_up_rot = torch.cross(at_rot - eye_rot, left_rot, dim=-1) agree = _batched_dotprod(torch.cross(fully_up, fully_up_rot, dim=1), left) self.assertGreater(agree.min(), 0)
def test_rotate_on_spot_yaw(self): N = 14 eye = torch.rand(N, 3) at = torch.rand(N, 3) up = torch.rand(N, 3) R, T = look_at_view_transform(eye=eye, at=at, up=up) # Moving around the y axis looks left. angles = torch.FloatTensor([0, -radians(10), 0]) rotation = axis_angle_to_matrix(angles) R_rot, T_rot = rotate_on_spot(R, T, rotation) eye_rot, at_rot, up_rot = camera_to_eye_at_up( get_world_to_view_transform(R=R_rot, T=T_rot)) self.assertClose(eye, eye_rot, atol=1e-5) # Make vectors pointing exactly left and up left = torch.cross(up, at - eye, dim=-1) left_rot = torch.cross(up_rot, at_rot - eye_rot, dim=-1) fully_up = torch.cross(at - eye, left, dim=-1) fully_up_rot = torch.cross(at_rot - eye_rot, left_rot, dim=-1) # The up direction is unchanged self.assertClose(normalize(fully_up), normalize(fully_up_rot), atol=1e-5) # The camera has moved left agree = _batched_dotprod(torch.cross(left, left_rot, dim=1), fully_up) self.assertGreater(agree.min(), 0) # Batch dimension for rotation R_rot2, T_rot2 = rotate_on_spot(R, T, rotation.expand(N, 3, 3)) self.assertClose(R_rot, R_rot2) self.assertClose(T_rot, T_rot2) # No batch dimension for either R_rot3, T_rot3 = rotate_on_spot(R[0], T[0], rotation) self.assertClose(R_rot[:1], R_rot3) self.assertClose(T_rot[:1], T_rot3) # No batch dimension for R, T R_rot4, T_rot4 = rotate_on_spot(R[0], T[0], rotation.expand(N, 3, 3)) self.assertClose(R_rot[:1].expand(N, 3, 3), R_rot4) self.assertClose(T_rot[:1].expand(N, 3), T_rot4)
def test_invert_eye_at_up(self): # Generate random cameras and check we can reconstruct their eye, at, # and up vectors. N = 13 eye = torch.rand(N, 3) at = torch.rand(N, 3) up = torch.rand(N, 3) R, T = look_at_view_transform(eye=eye, at=at, up=up) cameras = PerspectiveCameras(R=R, T=T) eye2, at2, up2 = camera_to_eye_at_up( cameras.get_world_to_view_transform()) # The retrieved eye matches self.assertClose(eye, eye2, atol=1e-5) self.assertClose(cameras.get_camera_center(), eye) # at-eye as retrieved must be a vector in the same direction as # the original. self.assertClose(normalize(at - eye), normalize(at2 - eye2)) # The up vector as retrieved should be rotated the same amount # around at-eye as the original. The component in the at-eye # direction is unimportant, as is the length. # So check that (up x (at-eye)) as retrieved is in the same # direction as its original value. up_check = torch.cross(up, at - eye, dim=-1) up_check2 = torch.cross(up2, at - eye, dim=-1) self.assertClose(normalize(up_check), normalize(up_check2)) # Master check that we get the same camera if we reinitialise. R2, T2 = look_at_view_transform(eye=eye2, at=at2, up=up2) cameras2 = PerspectiveCameras(R=R2, T=T2) cam_trans = cameras.get_world_to_view_transform() cam_trans2 = cameras2.get_world_to_view_transform() self.assertClose(cam_trans.get_matrix(), cam_trans2.get_matrix(), atol=1e-5)
def test_rotate_on_spot_roll(self): N = 14 eye = torch.rand(N, 3) at = torch.rand(N, 3) up = torch.rand(N, 3) R, T = look_at_view_transform(eye=eye, at=at, up=up) # Moving around the z axis rotates the image. angles = torch.FloatTensor([0, 0, -radians(10)]) rotation = axis_angle_to_matrix(angles) R_rot, T_rot = rotate_on_spot(R, T, rotation) eye_rot, at_rot, up_rot = camera_to_eye_at_up( get_world_to_view_transform(R=R_rot, T=T_rot)) self.assertClose(eye, eye_rot, atol=1e-5) self.assertClose(normalize(at - eye), normalize(at_rot - eye), atol=1e-5) # The camera has moved clockwise agree = _batched_dotprod(torch.cross(up, up_rot, dim=1), at - eye) self.assertGreater(agree.min(), 0)
def plot_scene( plots: Dict[str, Dict[str, Struct]], *, viewpoint_cameras: Optional[CamerasBase] = None, ncols: int = 1, camera_scale: float = 0.3, pointcloud_max_points: int = 20000, pointcloud_marker_size: int = 1, raybundle_max_rays: int = 20000, raybundle_max_points_per_ray: int = 1000, raybundle_ray_point_marker_size: int = 1, raybundle_ray_line_width: int = 1, **kwargs, ): # pragma: no cover """ Main function to visualize Cameras, Meshes, Pointclouds, and RayBundle. Plots input Cameras, Meshes, Pointclouds, and RayBundle data into named subplots, with named traces based on the dictionary keys. Cameras are rendered at the camera center location using a wireframe. Args: plots: A dict containing subplot and trace names, as well as the Meshes, Cameras and Pointclouds objects to be rendered. See below for examples of the format. viewpoint_cameras: an instance of a Cameras object providing a location to view the plotly plot from. If the batch size is equal to the number of subplots, it is a one to one mapping. If the batch size is 1, then that viewpoint will be used for all the subplots will be viewed from that point. Otherwise, the viewpoint_cameras will not be used. ncols: the number of subplots per row camera_scale: determines the size of the wireframe used to render cameras. pointcloud_max_points: the maximum number of points to plot from a pointcloud. If more are present, a random sample of size pointcloud_max_points is used. pointcloud_marker_size: the size of the points rendered by plotly when plotting a pointcloud. raybundle_max_rays: maximum number of rays of a RayBundle to visualize. Randomly subsamples without replacement in case the number of rays is bigger than max_rays. raybundle_max_points_per_ray: the maximum number of points per ray in RayBundle to visualize. If more are present, a random sample of size max_points_per_ray is used. raybundle_ray_point_marker_size: the size of the ray points of a plotted RayBundle raybundle_ray_line_width: the width of the plotted rays of a RayBundle **kwargs: Accepts lighting (a Lighting object) and any of the args xaxis, yaxis and zaxis which Plotly's scene accepts. Accepts axis_args, which is an AxisArgs object that is applied to all 3 axes. Example settings for axis_args and lighting are given at the top of this file. Example: ..code-block::python mesh = ... point_cloud = ... fig = plot_scene({ "subplot_title": { "mesh_trace_title": mesh, "pointcloud_trace_title": point_cloud } }) fig.show() The above example will render one subplot which has both a mesh and pointcloud. If the Meshes, Pointclouds, or Cameras objects are batched, then every object in that batch will be plotted in a single trace. ..code-block::python mesh = ... # batch size 2 point_cloud = ... # batch size 2 fig = plot_scene({ "subplot_title": { "mesh_trace_title": mesh, "pointcloud_trace_title": point_cloud } }) fig.show() The above example renders one subplot with 2 traces, each of which renders both objects from their respective batched data. Multiple subplots follow the same pattern: ..code-block::python mesh = ... # batch size 2 point_cloud = ... # batch size 2 fig = plot_scene({ "subplot1_title": { "mesh_trace_title": mesh[0], "pointcloud_trace_title": point_cloud[0] }, "subplot2_title": { "mesh_trace_title": mesh[1], "pointcloud_trace_title": point_cloud[1] } }, ncols=2) # specify the number of subplots per row fig.show() The above example will render two subplots, each containing a mesh and a pointcloud. The ncols argument will render two subplots in one row instead of having them vertically stacked because the default is one subplot per row. To view plotly plots from a PyTorch3D camera's point of view, we can use viewpoint_cameras: ..code-block::python mesh = ... # batch size 2 R, T = look_at_view_transform(2.7, 0, [0, 180]) # 2 camera angles, front and back # Any instance of CamerasBase works, here we use FoVPerspectiveCameras cameras = FoVPerspectiveCameras(device=device, R=R, T=T) fig = plot_scene({ "subplot1_title": { "mesh_trace_title": mesh[0] }, "subplot2_title": { "mesh_trace_title": mesh[1] } }, viewpoint_cameras=cameras) fig.show() The above example will render the first subplot seen from the camera on the +z axis, and the second subplot from the viewpoint of the camera on the -z axis. We can visualize these cameras as well: ..code-block::python mesh = ... R, T = look_at_view_transform(2.7, 0, [0, 180]) # 2 camera angles, front and back # Any instance of CamerasBase works, here we use FoVPerspectiveCameras cameras = FoVPerspectiveCameras(device=device, R=R, T=T) fig = plot_scene({ "subplot1_title": { "mesh_trace_title": mesh, "cameras_trace_title": cameras, }, }) fig.show() The above example will render one subplot with the mesh object and two cameras. RayBundle visualization is also supproted: ..code-block::python cameras = PerspectiveCameras(...) ray_bundle = RayBundle(origins=..., lengths=..., directions=..., xys=...) fig = plot_scene({ "subplot1_title": { "ray_bundle_trace_title": ray_bundle, "cameras_trace_title": cameras, }, }) fig.show() For an example of using kwargs, see below: ..code-block::python mesh = ... point_cloud = ... fig = plot_scene({ "subplot_title": { "mesh_trace_title": mesh, "pointcloud_trace_title": point_cloud } }, axis_args=AxisArgs(backgroundcolor="rgb(200,230,200)")) # kwarg axis_args fig.show() The above example will render each axis with the input background color. See the tutorials in pytorch3d/docs/tutorials for more examples (namely rendered_color_points.ipynb and rendered_textured_meshes.ipynb). """ subplots = list(plots.keys()) fig = _gen_fig_with_subplots(len(subplots), ncols, subplots) lighting = kwargs.get("lighting", Lighting())._asdict() axis_args_dict = kwargs.get("axis_args", AxisArgs())._asdict() # Set axis arguments to defaults defined at the top of this file x_settings = {**axis_args_dict} y_settings = {**axis_args_dict} z_settings = {**axis_args_dict} # Update the axes with any axis settings passed in as kwargs. x_settings.update(**kwargs.get("xaxis", {})) y_settings.update(**kwargs.get("yaxis", {})) z_settings.update(**kwargs.get("zaxis", {})) camera = { "up": { "x": 0, "y": 1, "z": 0, } # set the up vector to match PyTorch3D world coordinates conventions } viewpoints_eye_at_up_world = None if viewpoint_cameras: n_viewpoint_cameras = len(viewpoint_cameras) if n_viewpoint_cameras == len(subplots) or n_viewpoint_cameras == 1: # Calculate the vectors eye, at, up in world space # to initialize the position of the camera in # the plotly figure viewpoints_eye_at_up_world = camera_to_eye_at_up( viewpoint_cameras.get_world_to_view_transform().cpu()) else: msg = "Invalid number {} of viewpoint cameras were provided. Either 1 \ or {} cameras are required".format(len(viewpoint_cameras), len(subplots)) warnings.warn(msg) for subplot_idx in range(len(subplots)): subplot_name = subplots[subplot_idx] traces = plots[subplot_name] for trace_name, struct in traces.items(): if isinstance(struct, Meshes): _add_mesh_trace(fig, struct, trace_name, subplot_idx, ncols, lighting) elif isinstance(struct, Pointclouds): _add_pointcloud_trace( fig, struct, trace_name, subplot_idx, ncols, pointcloud_max_points, pointcloud_marker_size, ) elif isinstance(struct, CamerasBase): _add_camera_trace(fig, struct, trace_name, subplot_idx, ncols, camera_scale) elif isinstance(struct, RayBundle): _add_ray_bundle_trace( fig, struct, trace_name, subplot_idx, ncols, raybundle_max_rays, raybundle_max_points_per_ray, raybundle_ray_point_marker_size, raybundle_ray_line_width, ) else: raise ValueError( "struct {} is not a Cameras, Meshes, Pointclouds,".format( struct) + " or RayBundle object.") # Ensure update for every subplot. plot_scene = "scene" + str(subplot_idx + 1) current_layout = fig["layout"][plot_scene] xaxis = current_layout["xaxis"] yaxis = current_layout["yaxis"] zaxis = current_layout["zaxis"] # Update the axes with our above default and provided settings. xaxis.update(**x_settings) yaxis.update(**y_settings) zaxis.update(**z_settings) # update camera viewpoint if provided if viewpoints_eye_at_up_world is not None: # Use camera params for batch index or the first camera if only one provided. viewpoint_idx = min(n_viewpoint_cameras - 1, subplot_idx) eye, at, up = (i[viewpoint_idx] for i in viewpoints_eye_at_up_world) eye_x, eye_y, eye_z = eye.tolist() at_x, at_y, at_z = at.tolist() up_x, up_y, up_z = up.tolist() # scale camera eye to plotly [-1, 1] ranges x_range = xaxis["range"] y_range = yaxis["range"] z_range = zaxis["range"] eye_x = _scale_camera_to_bounds(eye_x, x_range, True) eye_y = _scale_camera_to_bounds(eye_y, y_range, True) eye_z = _scale_camera_to_bounds(eye_z, z_range, True) at_x = _scale_camera_to_bounds(at_x, x_range, True) at_y = _scale_camera_to_bounds(at_y, y_range, True) at_z = _scale_camera_to_bounds(at_z, z_range, True) up_x = _scale_camera_to_bounds(up_x, x_range, False) up_y = _scale_camera_to_bounds(up_y, y_range, False) up_z = _scale_camera_to_bounds(up_z, z_range, False) camera["eye"] = {"x": eye_x, "y": eye_y, "z": eye_z} camera["center"] = {"x": at_x, "y": at_y, "z": at_z} camera["up"] = {"x": up_x, "y": up_y, "z": up_z} current_layout.update({ "xaxis": xaxis, "yaxis": yaxis, "zaxis": zaxis, "aspectmode": "cube", "camera": camera, }) return fig