def test_perspective(self): far = 10.0 near = 1.0 cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=60.0) P = cameras.get_projection_transform() # vertices are at the far clipping plane so z gets mapped to 1. vertices = torch.tensor([1, 2, far], dtype=torch.float32) projected_verts = torch.tensor( [np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32) vertices = vertices[None, None, :] v1 = P.transform_points(vertices) v2 = perspective_project_naive(vertices, fov=60.0) self.assertClose(v1[..., :2], v2[..., :2]) self.assertClose(far * v1[..., 2], v2[..., 2]) self.assertClose(v1.squeeze(), projected_verts) # vertices are at the near clipping plane so z gets mapped to 0.0. vertices[..., 2] = near projected_verts = torch.tensor( [np.sqrt(3) / near, 2 * np.sqrt(3) / near, 0.0], dtype=torch.float32) v1 = P.transform_points(vertices) v2 = perspective_project_naive(vertices, fov=60.0) self.assertClose(v1[..., :2], v2[..., :2]) self.assertClose(v1.squeeze(), projected_verts)
def test_get_full_transform(self): cam = OpenGLPerspectiveCameras() T = torch.tensor([0.0, 0.0, 1.0]).view(1, -1) R = look_at_rotation(T) P = cam.get_full_projection_transform(R=R, T=T) self.assertTrue(isinstance(P, Transform3d)) self.assertClose(cam.R, R) self.assertClose(cam.T, T)
def test_perspective_kwargs(self): cameras = OpenGLPerspectiveCameras(znear=5.0, zfar=100.0, fov=0.0) # Override defaults by passing in values to get_projection_transform far = 10.0 P = cameras.get_projection_transform(znear=1.0, zfar=far, fov=60.0) vertices = torch.tensor([1, 2, far], dtype=torch.float32) projected_verts = torch.tensor( [np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32) vertices = vertices[None, None, :] v1 = P.transform_points(vertices) self.assertClose(v1.squeeze(), projected_verts)
def test_transform_points(self): # Check transform_points methods works with default settings for # RT and P far = 10.0 cam = OpenGLPerspectiveCameras(znear=1.0, zfar=far, fov=60.0) points = torch.tensor([1, 2, far], dtype=torch.float32) points = points.view(1, 1, 3).expand(5, 10, -1) projected_points = torch.tensor( [np.sqrt(3) / far, 2 * np.sqrt(3) / far, 1.0], dtype=torch.float32) projected_points = projected_points.view(1, 1, 3).expand(5, 10, -1) new_points = cam.transform_points(points) self.assertClose(new_points, projected_points)
def test_simple_sphere_batched(self): """ Test a mesh with vertex textures can be extended to form a batch, and is rendered correctly with Phong, Gouraud and Flat Shaders. """ batch_size = 5 device = torch.device("cuda:0") # Init mesh with vertex textures. sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_meshes = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_meshes) image_ref = load_rgb_image( "test_simple_sphere_light_%s.png" % name, DATA_DIR) for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() if i == 0 and DEBUG: filename = "DEBUG_simple_sphere_batched_%s.png" % name Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename) self.assertClose(rgb, image_ref, atol=0.05)
def baryclip_cuda( num_meshes: int = 8, ico_level: int = 5, image_size: int = 64, faces_per_pixel: int = 50, device="cuda", ): # Init meshes sphere_meshes = ico_sphere(ico_level, device).extend(num_meshes) # Init transform R, T = look_at_view_transform(1.0, 0.0, 0.0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Init rasterizer raster_settings = RasterizationSettings( image_size=image_size, blur_radius=1e-4, faces_per_pixel=faces_per_pixel, clip_barycentric_coords=True, ) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) torch.cuda.synchronize() def raster_fn(): rasterizer(sphere_meshes) torch.cuda.synchronize() return raster_fn
def test_simple_sphere_batched(self): device = torch.device("cuda:0") sphere_mesh = ico_sphere(1, device) verts_padded = sphere_mesh.verts_padded() verts_padded[..., 1] += 0.2 verts_padded[..., 0] += 0.2 pointclouds = Pointclouds( points=verts_padded, features=torch.ones_like(verts_padded) ) batch_size = 20 pointclouds = pointclouds.extend(batch_size) R, T = look_at_view_transform(2.7, 0.0, 0.0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = PointsRasterizationSettings( image_size=256, radius=5e-2, points_per_pixel=1 ) rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings) compositor = NormWeightedCompositor() renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor) # Load reference image filename = "simple_pointcloud_sphere.png" image_ref = load_rgb_image("test_%s" % filename, DATA_DIR) images = renderer(pointclouds) for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() if i == 0 and DEBUG: filename = "DEBUG_%s" % filename Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) self.assertClose(rgb, image_ref)
def test_simple_sphere(self): device = torch.device("cuda:0") sphere_mesh = ico_sphere(1, device) verts_padded = sphere_mesh.verts_padded() # Shift vertices to check coordinate frames are correct. verts_padded[..., 1] += 0.2 verts_padded[..., 0] += 0.2 pointclouds = Pointclouds( points=verts_padded, features=torch.ones_like(verts_padded) ) R, T = look_at_view_transform(2.7, 0.0, 0.0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = PointsRasterizationSettings( image_size=256, radius=5e-2, points_per_pixel=1 ) rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings) compositor = NormWeightedCompositor() renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor) # Load reference image filename = "simple_pointcloud_sphere.png" image_ref = load_rgb_image("test_%s" % filename, DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer(pointclouds) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_%s" % filename Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) self.assertClose(rgb, image_ref)
def test_perspective_mixed_inputs_grad(self): far = torch.tensor([10.0]) near = 1.0 fov = torch.tensor(60.0, requires_grad=True) cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=fov) P = cameras.get_projection_transform() vertices = torch.tensor([1, 2, 10], dtype=torch.float32) vertices_batch = vertices[None, None, :] v1 = P.transform_points(vertices_batch).squeeze() v1.sum().backward() self.assertTrue(hasattr(fov, "grad")) fov_grad = fov.grad.clone() half_fov_rad = (math.pi / 180.0) * fov.detach() / 2.0 grad_cotan = -(1.0 / (torch.sin(half_fov_rad)**2.0) * 1 / 2.0) grad_fov = (math.pi / 180.0) * grad_cotan grad_fov = (vertices[0] + vertices[1]) * grad_fov / 10.0 self.assertClose(fov_grad, grad_fov)
def test_camera_class_init(self): device = torch.device("cuda:0") cam = OpenGLPerspectiveCameras(znear=10.0, zfar=(100.0, 200.0)) # Check broadcasting self.assertTrue(cam.znear.shape == (2, )) self.assertTrue(cam.zfar.shape == (2, )) # update znear element 1 cam[1].znear = 20.0 self.assertTrue(cam.znear[1] == 20.0) # Get item and get value c0 = cam[0] self.assertTrue(c0.zfar == 100.0) # Test to new_cam = cam.to(device=device) self.assertTrue(new_cam.device == device)
def test_simple_sphere_batched(self): """ Test output of phong shading matches a reference image using the default values for the light sources. """ batch_size = 5 device = torch.device("cuda:0") # Init mesh sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) sphere_meshes = Meshes( verts=verts_padded, faces=faces_padded, textures=textures ) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings ), shader=HardPhongShader( lights=lights, cameras=cameras, materials=materials ), ) images = renderer(sphere_meshes) # Load ref image image_ref = load_rgb_image("test_simple_sphere_light.png") for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / f"DEBUG_simple_sphere_{i}.png" ) self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
def test_perspective_mixed_inputs_broadcast(self): far = torch.tensor([10.0, 20.0], dtype=torch.float32) near = 1.0 fov = torch.tensor(60.0) cameras = OpenGLPerspectiveCameras(znear=near, zfar=far, fov=fov) P = cameras.get_projection_transform() vertices = torch.tensor([1, 2, 10], dtype=torch.float32) z1 = 1.0 # vertices at far clipping plane so z = 1.0 z2 = (20.0 / (20.0 - 1.0) * 10.0 + -(20.0) / (20.0 - 1.0)) / 10.0 projected_verts = torch.tensor( [ [np.sqrt(3) / 10.0, 2 * np.sqrt(3) / 10.0, z1], [np.sqrt(3) / 10.0, 2 * np.sqrt(3) / 10.0, z2], ], dtype=torch.float32, ) vertices = vertices[None, None, :] v1 = P.transform_points(vertices) v2 = perspective_project_naive(vertices, fov=60.0) self.assertClose(v1[..., :2], torch.cat([v2, v2])[..., :2]) self.assertClose(v1.squeeze(), projected_verts)
def test_simple_sphere_batched(self): """ Test a mesh with vertex textures can be extended to form a batch, and is rendered correctly with Phong, Gouraud and Flat Shaders. """ batch_size = 20 device = torch.device("cuda:0") # Init mesh with vertex textures. sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) sphere_meshes = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardGouraudShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init(lights=lights, cameras=cameras, materials=materials) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_meshes) image_ref = load_rgb_image("test_simple_sphere_light_%s.png" % name) for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
def test_silhouette_with_grad(self): """ Test silhouette blending. Also check that gradient calculation works. """ device = torch.device("cuda:0") ref_filename = "test_silhouette.png" image_ref_filename = DATA_DIR / ref_filename sphere_mesh = ico_sphere(5, device) verts, faces = sphere_mesh.get_mesh_verts_faces(0) sphere_mesh = Meshes(verts=[verts], faces=[faces]) blend_params = BlendParams(sigma=1e-4, gamma=1e-4) raster_settings = RasterizationSettings( image_size=512, blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma, faces_per_pixel=80, bin_size=0, ) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings ), shader=SoftSilhouetteShader(blend_params=blend_params), ) images = renderer(sphere_mesh) alpha = images[0, ..., 3].squeeze().cpu() if DEBUG: Image.fromarray((alpha.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_silhouette.png" ) with Image.open(image_ref_filename) as raw_image_ref: image_ref = torch.from_numpy(np.array(raw_image_ref)) image_ref = image_ref.to(dtype=torch.float32) / 255.0 self.assertTrue(torch.allclose(alpha, image_ref, atol=0.055)) # Check grad exist verts.requires_grad = True sphere_mesh = Meshes(verts=[verts], faces=[faces]) images = renderer(sphere_mesh) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad)
def rasterize_transform_with_init( num_meshes: int, ico_level: int = 5, device='cuda' ): # Init meshes sphere_meshes = ico_sphere(ico_level, device).extend(num_meshes) # Init transform R, T = look_at_view_transform(1.0, 0.0, 0.0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Init rasterizer rasterizer = MeshRasterizer(cameras=cameras) torch.cuda.synchronize() def raster_fn(): rasterizer.transform(sphere_meshes) torch.cuda.synchronize() return raster_fn
def baryclip_pytorch( num_meshes: int = 8, ico_level: int = 5, image_size: int = 64, faces_per_pixel: int = 50, device="cuda", ): # Init meshes sphere_meshes = ico_sphere(ico_level, device).extend(num_meshes) # Init transform R, T = look_at_view_transform(1.0, 0.0, 0.0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Init rasterizer raster_settings = RasterizationSettings( image_size=image_size, blur_radius=1e-4, faces_per_pixel=faces_per_pixel, clip_barycentric_coords=False, ) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) torch.cuda.synchronize() def raster_fn(): fragments = rasterizer(sphere_meshes) # Clip bary and reinterpolate clipped_bary_coords = _clip_barycentric_coordinates( fragments.bary_coords) clipped_zbuf = _interpolate_zbuf(fragments.pix_to_face, clipped_bary_coords, sphere_meshes) fragments = Fragments( bary_coords=clipped_bary_coords, zbuf=clipped_zbuf, dists=fragments.dists, pix_to_face=fragments.pix_to_face, ) torch.cuda.synchronize() return raster_fn
def test_simple_sphere(self, elevated_camera=False): """ Test output of phong and gouraud shading matches a reference image using the default values for the light sources. Args: elevated_camera: Defines whether the camera observing the scene should have an elevation of 45 degrees. """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings if elevated_camera: # Elevated and rotated camera R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0) postfix = "_elevated_camera" # If y axis is up, the spot of light should # be on the bottom left of the sphere. else: # No elevation or azimuth rotation R, T = look_at_view_transform(2.7, 0.0, 0.0) postfix = "" cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) # Test several shaders shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init(lights=lights, cameras=cameras, materials=materials) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_mesh) filename = "simple_sphere_light_%s%s.png" % (name, postfix) image_ref = load_rgb_image("test_%s" % filename) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_" % filename Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename) self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05)) ######################################################## # Move the light to the +z axis in world space so it is # behind the sphere. Note that +Z is in, +Y up, # +X left for both world and camera space. ######################################################## lights.location[..., 2] = -2.0 phong_shader = HardPhongShader(lights=lights, cameras=cameras, materials=materials) phong_renderer = MeshRenderer(rasterizer=rasterizer, shader=phong_shader) images = phong_renderer(sphere_mesh, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_simple_sphere_dark%s.png" % postfix Image.fromarray( (rgb.numpy() * 255).astype(np.uint8)).save(DATA_DIR / filename) # Load reference image image_ref_phong_dark = load_rgb_image("test_simple_sphere_dark%s.png" % postfix) self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05))
def test_texture_map(self): """ Test a mesh with a texture map is loaded and rendered correctly. The pupils in the eyes of the cow should always be looking to the left. """ device = torch.device("cuda:0") DATA_DIR = (Path(__file__).resolve().parent.parent / "docs/tutorials/data") obj_filename = DATA_DIR / "cow_mesh/cow.obj" # Load mesh + texture mesh = load_objs_as_meshes([obj_filename], device=device) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) # Place light behind the cow in world space. The front of # the cow is facing the -z direction. lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None] # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=TexturedSoftPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_map_back.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map_back.png") self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05)) # Check grad exists [verts] = mesh.verts_list() verts.requires_grad = True mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures) images = renderer(mesh2) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad) ########################################## # Check rendering of the front of the cow ########################################## R, T = look_at_view_transform(2.7, 0, 180) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Move light to the front of the cow in world space lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] images = renderer(mesh, cameras=cameras, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_map_front.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map_front.png") ################################# # Add blurring to rasterization ################################# R, T = look_at_view_transform(2.7, 0, 180) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) blend_params = BlendParams(sigma=5e-4, gamma=1e-4) raster_settings = RasterizationSettings( image_size=512, blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma, faces_per_pixel=100, bin_size=0, ) images = renderer( mesh.clone(), cameras=cameras, raster_settings=raster_settings, blend_params=blend_params, ) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_blurry_textured_rendering.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_blurry_textured_rendering.png") self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
def test_simple_sphere(self, elevated_camera=False): """ Test output of phong and gouraud shading matches a reference image using the default values for the light sources. Args: elevated_camera: Defines whether the camera observing the scene should have an elevation of 45 degrees. """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings if elevated_camera: R, T = look_at_view_transform(2.7, 45.0, 0.0) postfix = "_elevated_camera" else: R, T = look_at_view_transform(2.7, 0.0, 0.0) postfix = "" cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) renderer = MeshRenderer( rasterizer=rasterizer, shader=HardPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_simple_sphere_light%s.png" % postfix) # Load reference image image_ref_phong = load_rgb_image( "test_simple_sphere_illuminated%s.png" % postfix) self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05)) ################################### # Move the light behind the object ################################### # Check the image is dark lights.location[..., 2] = +2.0 images = renderer(sphere_mesh, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_simple_sphere_dark%s.png" % postfix) # Load reference image image_ref_phong_dark = load_rgb_image("test_simple_sphere_dark%s.png" % postfix) self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05)) ###################################### # Change the shader to a GouraudShader ###################################### lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] renderer = MeshRenderer( rasterizer=rasterizer, shader=HardGouraudShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_simple_sphere_light_gouraud%s.png" % postfix) # Load reference image image_ref_gouraud = load_rgb_image( "test_simple_sphere_light_gouraud%s.png" % postfix) self.assertTrue(torch.allclose(rgb, image_ref_gouraud, atol=0.005)) self.assertFalse(torch.allclose(rgb, image_ref_phong, atol=0.005))
def test_simple_sphere(self): device = torch.device("cuda:0") ref_filename = "test_rasterized_sphere.png" image_ref_filename = DATA_DIR / ref_filename # Rescale image_ref to the 0 - 1 range and convert to a binary mask. image_ref = convert_image_to_binary_mask(image_ref_filename) # Init mesh sphere_mesh = ico_sphere(5, device) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init rasterizer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) #################################### # 1. Test rasterizing a single mesh #################################### fragments = rasterizer(sphere_mesh) image = fragments.pix_to_face[0, ..., 0].squeeze().cpu() # Convert pix_to_face to a binary mask image[image >= 0] = 1.0 image[image < 0] = 0.0 if DEBUG: Image.fromarray((image.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_test_rasterized_sphere.png") self.assertTrue(torch.allclose(image, image_ref)) ################################## # 2. Test with a batch of meshes ################################## batch_size = 10 sphere_meshes = sphere_mesh.extend(batch_size) fragments = rasterizer(sphere_meshes) for i in range(batch_size): image = fragments.pix_to_face[i, ..., 0].squeeze().cpu() image[image >= 0] = 1.0 image[image < 0] = 0.0 self.assertTrue(torch.allclose(image, image_ref)) #################################################### # 3. Test that passing kwargs to rasterizer works. #################################################### # Change the view transform to zoom in. R, T = look_at_view_transform(2.0, 0, 0, device=device) fragments = rasterizer(sphere_mesh, R=R, T=T) image = fragments.pix_to_face[0, ..., 0].squeeze().cpu() image[image >= 0] = 1.0 image[image < 0] = 0.0 ref_filename = "test_rasterized_sphere_zoom.png" image_ref_filename = DATA_DIR / ref_filename image_ref = convert_image_to_binary_mask(image_ref_filename) if DEBUG: Image.fromarray((image.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_test_rasterized_sphere_zoom.png") self.assertTrue(torch.allclose(image, image_ref)) ################################# # 4. Test init without cameras. ################################## # Create a new empty rasterizer: rasterizer = MeshRasterizer() # Check that omitting the cameras in both initialization # and the forward pass throws an error: with self.assertRaisesRegex(ValueError, "Cameras must be specified"): rasterizer(sphere_mesh) # Now pass in the cameras as a kwarg fragments = rasterizer(sphere_mesh, cameras=cameras, raster_settings=raster_settings) image = fragments.pix_to_face[0, ..., 0].squeeze().cpu() # Convert pix_to_face to a binary mask image[image >= 0] = 1.0 image[image < 0] = 0.0 if DEBUG: Image.fromarray((image.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_test_rasterized_sphere.png") self.assertTrue(torch.allclose(image, image_ref))
def test_simple_sphere(self): device = torch.device("cuda:0") # Load reference image ref_filename = "test_simple_pointcloud_sphere.png" image_ref_filename = DATA_DIR / ref_filename # Rescale image_ref to the 0 - 1 range and convert to a binary mask. image_ref = convert_image_to_binary_mask(image_ref_filename).to( torch.int32) sphere_mesh = ico_sphere(1, device) verts_padded = sphere_mesh.verts_padded() verts_padded[..., 1] += 0.2 verts_padded[..., 0] += 0.2 pointclouds = Pointclouds(points=verts_padded) R, T = look_at_view_transform(2.7, 0.0, 0.0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = PointsRasterizationSettings(image_size=256, radius=5e-2, points_per_pixel=1) ################################# # 1. Test init without cameras. ################################## # Initialize without passing in the cameras rasterizer = PointsRasterizer() # Check that omitting the cameras in both initialization # and the forward pass throws an error: with self.assertRaisesRegex(ValueError, "Cameras must be specified"): rasterizer(pointclouds) ########################################## # 2. Test rasterizing a single pointcloud ########################################## fragments = rasterizer(pointclouds, cameras=cameras, raster_settings=raster_settings) # Convert idx to a binary mask image = fragments.idx[0, ..., 0].squeeze().cpu() image[image >= 0] = 1.0 image[image < 0] = 0.0 if DEBUG: Image.fromarray((image.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_test_rasterized_sphere_points.png") self.assertTrue(torch.allclose(image, image_ref[..., 0])) ######################################## # 3. Test with a batch of pointclouds ######################################## batch_size = 10 pointclouds = pointclouds.extend(batch_size) fragments = rasterizer(pointclouds, cameras=cameras, raster_settings=raster_settings) for i in range(batch_size): image = fragments.idx[i, ..., 0].squeeze().cpu() image[image >= 0] = 1.0 image[image < 0] = 0.0 self.assertTrue(torch.allclose(image, image_ref[..., 0]))
def test_texture_map(self): """ Test a mesh with a texture map is loaded and rendered correctly. The pupils in the eyes of the cow should always be looking to the left. """ device = torch.device("cuda:0") obj_dir = Path( __file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = obj_dir / "cow_mesh/cow.obj" # Load mesh + texture mesh = load_objs_as_meshes([obj_filename], device=device) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) # Place light behind the cow in world space. The front of # the cow is facing the -z direction. lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None] blend_params = BlendParams( sigma=1e-1, gamma=1e-4, background_color=torch.tensor([1.0, 1.0, 1.0], device=device), ) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=TexturedSoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ), ) # Load reference image image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map_back.png") # NOTE some pixels can be flaky and will not lead to # `cond1` being true. Add `cond2` and check `cond1 or cond2` cond1 = torch.allclose(rgb, image_ref, atol=0.05) cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5 self.assertTrue(cond1 or cond2) # Check grad exists [verts] = mesh.verts_list() verts.requires_grad = True mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures) images = renderer(mesh2) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad) ########################################## # Check rendering of the front of the cow ########################################## R, T = look_at_view_transform(2.7, 0, 180) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Move light to the front of the cow in world space lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] # Load reference image image_ref = load_rgb_image("test_texture_map_front.png", DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer(mesh, cameras=cameras, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map_front.png") # NOTE some pixels can be flaky and will not lead to # `cond1` being true. Add `cond2` and check `cond1 or cond2` cond1 = torch.allclose(rgb, image_ref, atol=0.05) cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5 self.assertTrue(cond1 or cond2) ################################# # Add blurring to rasterization ################################# R, T = look_at_view_transform(2.7, 0, 180) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) blend_params = BlendParams(sigma=5e-4, gamma=1e-4) raster_settings = RasterizationSettings( image_size=512, blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma, faces_per_pixel=100, clip_barycentric_coords=True, ) # Load reference image image_ref = load_rgb_image("test_blurry_textured_rendering.png", DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer( mesh.clone(), cameras=cameras, raster_settings=raster_settings, blend_params=blend_params, ) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_blurry_textured_rendering.png") self.assertClose(rgb, image_ref, atol=0.05)
def test_joined_spheres(self): """ Test a list of Meshes can be joined as a single mesh and the single mesh is rendered correctly with Phong, Gouraud and Flat Shaders. """ device = torch.device("cuda:0") # Init mesh with vertex textures. # Initialize a list containing two ico spheres of different sizes. sphere_list = [ico_sphere(3, device), ico_sphere(4, device)] # [(42 verts, 80 faces), (162 verts, 320 faces)] # The scale the vertices need to be set at to resize the spheres scales = [0.25, 1] # The distance the spheres ought to be offset horizontally to prevent overlap. offsets = [1.2, -0.3] # Initialize a list containing the adjusted sphere meshes. sphere_mesh_list = [] for i in range(len(sphere_list)): verts = sphere_list[i].verts_padded() * scales[i] verts[0, :, 0] += offsets[i] sphere_mesh_list.append( Meshes(verts=verts, faces=sphere_list[i].faces_padded())) joined_sphere_mesh = join_mesh(sphere_mesh_list) joined_sphere_mesh.textures = Textures( verts_rgb=torch.ones_like(joined_sphere_mesh.verts_padded())) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0.0, 0.0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) image = renderer(joined_sphere_mesh) rgb = image[..., :3].squeeze().cpu() if DEBUG: file_name = "DEBUG_joined_spheres_%s.png" % name Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / file_name) image_ref = load_rgb_image("test_joined_spheres_%s.png" % name, DATA_DIR) self.assertClose(rgb, image_ref, atol=0.05)
def test_texture_map_atlas(self): """ Test a mesh with a texture map as a per face atlas is loaded and rendered correctly. """ device = torch.device("cuda:0") obj_dir = Path( __file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = obj_dir / "cow_mesh/cow.obj" # Load mesh and texture as a per face texture atlas. verts, faces, aux = load_obj( obj_filename, device=device, load_textures=True, create_texture_atlas=True, texture_atlas_size=8, texture_wrap=None, ) mesh = Meshes( verts=[verts], faces=[faces.verts_idx], textures=TexturesAtlas(atlas=[aux.texture_atlas]), ) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, cull_backfaces=True) # Init shader settings materials = Materials(device=device, specular_color=((0, 0, 0), ), shininess=0.0) lights = PointLights(device=device) # Place light behind the cow in world space. The front of # the cow is facing the -z direction. lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None] # The HardPhongShader can be used directly with atlas textures. renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=HardPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR) if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_atlas_8x8_back.png") self.assertClose(rgb, image_ref, atol=0.05)
def test_texture_map(self): """ Test a mesh with a texture map is loaded and rendered correctly """ device = torch.device("cuda:0") DATA_DIR = ( Path(__file__).resolve().parent.parent / "docs/tutorials/data" ) obj_filename = DATA_DIR / "cow_mesh/cow.obj" # Load mesh + texture verts, faces, aux = load_obj(obj_filename) faces_idx = faces.verts_idx.to(device) verts = verts.to(device) texture_uvs = aux.verts_uvs materials = aux.material_colors tex_maps = aux.texture_images # tex_maps is a dictionary of material names as keys and texture images # as values. Only need the images for this example. textures = Textures( maps=list(tex_maps.values()), faces_uvs=faces.textures_idx.to(torch.int64).to(device)[None, :], verts_uvs=texture_uvs.to(torch.float32).to(device)[None, :], ) mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures) # Init rasterizer settings R, T = look_at_view_transform(2.7, 10, 20) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings ), shader=TexturedPhongShader( lights=lights, cameras=cameras, materials=materials ), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_map.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map.png" ) # There's a calculation instability on the corner of the ear of the cow. # We ignore that pixel. image_ref[137, 166] = 0 rgb[137, 166] = 0 self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05)) # Check grad exists verts = verts.clone() verts.requires_grad = True mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures) images = renderer(mesh) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad) # Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.
def test_texture_map(self): """ Test a mesh with a texture map is loaded and rendered correctly """ device = torch.device("cuda:0") DATA_DIR = (Path(__file__).resolve().parent.parent / "docs/tutorials/data") obj_filename = DATA_DIR / "cow_mesh/cow.obj" # Load mesh + texture mesh = load_objs_as_meshes([obj_filename], device=device) # Init rasterizer settings R, T = look_at_view_transform(2.7, 10, 20) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=TexturedSoftPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_map.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map.png") # There's a calculation instability on the corner of the ear of the cow. # We ignore that pixel. image_ref[137, 166] = 0 rgb[137, 166] = 0 self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05)) # Check grad exists [verts] = mesh.verts_list() verts.requires_grad = True mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures) images = renderer(mesh2) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad)