def test_initialize_materials_dimensions_fail(self): """ Color should have shape (N, 3) or (1, 3), Shininess should have shape (1), (1, 1), (N) or (N, 1) """ with self.assertRaises(ValueError): Materials(ambient_color=torch.randn(10, 4)) with self.assertRaises(ValueError): Materials(shininess=torch.randn(10, 2))
def test_simple_sphere_batched(self): """ Test a mesh with vertex textures can be extended to form a batch, and is rendered correctly with Phong, Gouraud and Flat Shaders. """ batch_size = 5 device = torch.device("cuda:0") # Init mesh with vertex textures. sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_meshes = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_meshes) image_ref = load_rgb_image( "test_simple_sphere_light_%s.png" % name, DATA_DIR) for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() if i == 0 and DEBUG: filename = "DEBUG_simple_sphere_batched_%s.png" % name Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename) self.assertClose(rgb, image_ref, atol=0.05)
def test_initialize_materials_broadcast_fail(self): """ Batch dims have to be the same or 1. """ with self.assertRaises(ValueError): Materials(ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(15, 3))
def test_initialize_materials_mixed_inputs_broadcast(self): mat = Materials(ambient_color=torch.randn(10, 3), diffuse_color=((1, 1, 1), )) self.assertTrue(mat.ambient_color.shape == (10, 3)) self.assertTrue(mat.diffuse_color.shape == (10, 3)) self.assertTrue(mat.specular_color.shape == (10, 3)) self.assertTrue(mat.shininess.shape == (10, ))
def test_simple_sphere_screen(self): """ Test output when rendering with PerspectiveCameras & OrthographicCameras in NDC vs screen space. """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) R, T = look_at_view_transform(2.7, 0.0, 0.0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1 ) half_half = (512.0 / 2.0, 512.0 / 2.0) for cam_type in (PerspectiveCameras, OrthographicCameras): cameras = cam_type( device=device, R=R, T=T, principal_point=(half_half,), focal_length=(half_half,), image_size=((512, 512),), in_ndc=False, ) rasterizer = MeshRasterizer( cameras=cameras, raster_settings=raster_settings ) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) shader = HardPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() filename = "test_simple_sphere_light_phong_%s.png" % cam_type.__name__ if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / f"{filename}_.png" ) image_ref = load_rgb_image(filename, DATA_DIR) self.assertClose(rgb, image_ref, atol=0.05)
def test_simple_sphere_outside_zfar(self): """ Test output when rendering a sphere that is beyond zfar with a SoftPhongShader. This renders a sphere of radius 500, with the camera at x=1500 for different settings of zfar. This is intended to check 1) setting cameras.zfar propagates to the blender and that the rendered sphere is (soft) clipped if it is beyond zfar, 2) make sure there are no numerical precision/overflow errors associated with larger world coordinates """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() * 500 faces_padded = sphere_mesh.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) R, T = look_at_view_transform(1500, 0.0, 0.0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device)[None] raster_settings = RasterizationSettings( image_size=256, blur_radius=0.0, faces_per_pixel=1 ) for zfar in (10000.0, 100.0): cameras = FoVPerspectiveCameras( device=device, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=zfar ) rasterizer = MeshRasterizer( cameras=cameras, raster_settings=raster_settings ) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 1.0)) shader = SoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() filename = "test_simple_sphere_outside_zfar_%d.png" % int(zfar) # Load reference image image_ref = load_rgb_image(filename, DATA_DIR) if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / ("DEBUG_" + filename) ) self.assertClose(rgb, image_ref, atol=0.05)
def test_initialize_materials_broadcast(self): materials = Materials( ambient_color=torch.randn(10, 3), diffuse_color=torch.randn(1, 3), specular_color=torch.randn(1, 3), shininess=torch.randn(1), ) self.assertTrue(materials.ambient_color.shape == (10, 3)) self.assertTrue(materials.diffuse_color.shape == (10, 3)) self.assertTrue(materials.specular_color.shape == (10, 3)) self.assertTrue(materials.shininess.shape == (10,))
def test_simple_sphere_batched(self): """ Test output of phong shading matches a reference image using the default values for the light sources. """ batch_size = 5 device = torch.device("cuda:0") # Init mesh sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) sphere_meshes = Meshes( verts=verts_padded, faces=faces_padded, textures=textures ) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings ), shader=HardPhongShader( lights=lights, cameras=cameras, materials=materials ), ) images = renderer(sphere_meshes) # Load ref image image_ref = load_rgb_image("test_simple_sphere_light.png") for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / f"DEBUG_simple_sphere_{i}.png" ) self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
def test_simple_sphere_batched(self): """ Test a mesh with vertex textures can be extended to form a batch, and is rendered correctly with Phong, Gouraud and Flat Shaders. """ batch_size = 20 device = torch.device("cuda:0") # Init mesh with vertex textures. sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) sphere_meshes = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardGouraudShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init(lights=lights, cameras=cameras, materials=materials) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_meshes) image_ref = load_rgb_image("test_simple_sphere_light_%s.png" % name) for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
def test_cameras_kwarg(self): """ Test that when cameras are passed in as a kwarg the rendering works as expected """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # No elevation or azimuth rotation R, T = look_at_view_transform(2.7, 0.0, 0.0) for cam_type in ( FoVPerspectiveCameras, FoVOrthographicCameras, PerspectiveCameras, OrthographicCameras, ): cameras = cam_type(device=device, R=R, T=T) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1 ) rasterizer = MeshRasterizer(raster_settings=raster_settings) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) shader = HardPhongShader( lights=lights, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) # Cameras can be passed into the renderer in the forward pass images = renderer(sphere_mesh, cameras=cameras) rgb = images.squeeze()[..., :3].cpu().numpy() image_ref = load_rgb_image( "test_simple_sphere_light_phong_%s.png" % cam_type.__name__, DATA_DIR ) self.assertClose(rgb, image_ref, atol=0.05)
def test_materials_clone_to(self): device = torch.device("cuda:0") cpu = torch.device("cpu") mat = Materials() new_mat = mat.clone().to(device) self.assertTrue(mat.ambient_color.device == cpu) self.assertTrue(mat.diffuse_color.device == cpu) self.assertTrue(mat.specular_color.device == cpu) self.assertTrue(mat.shininess.device == cpu) self.assertTrue(new_mat.ambient_color.device == device) self.assertTrue(new_mat.diffuse_color.device == device) self.assertTrue(new_mat.specular_color.device == device) self.assertTrue(new_mat.shininess.device == device) self.assertSeparate(new_mat.ambient_color, mat.ambient_color) self.assertSeparate(new_mat.diffuse_color, mat.diffuse_color) self.assertSeparate(new_mat.specular_color, mat.specular_color) self.assertSeparate(new_mat.shininess, mat.shininess)
def __init__(self, background_color: Optional[Union[Tuple, List, torch.Tensor]] = None, device="cpu", cameras=None, lights=None, materials=None, blend_params=None): super().__init__() self.background_color = background_color self.lights = lights if lights is not None else PointLights( device=device) self.materials = (materials if materials is not None else Materials( device=device)) self.cameras = cameras self.blend_params = blend_params if blend_params is not None else BlendParams( )
def test_init(self): """ Initialize Materials class with the default values. """ device = torch.device("cuda:0") mat = Materials(device=device) self.assertTrue(torch.is_tensor(mat.ambient_color)) self.assertTrue(torch.is_tensor(mat.diffuse_color)) self.assertTrue(torch.is_tensor(mat.specular_color)) self.assertTrue(torch.is_tensor(mat.shininess)) self.assertTrue(mat.ambient_color.device == device) self.assertTrue(mat.diffuse_color.device == device) self.assertTrue(mat.specular_color.device == device) self.assertTrue(mat.shininess.device == device) self.assertTrue(mat.ambient_color.shape == (1, 3)) self.assertTrue(mat.diffuse_color.shape == (1, 3)) self.assertTrue(mat.specular_color.shape == (1, 3)) self.assertTrue(mat.shininess.shape == (1,))
def render_torch(self, verts, faces, rgb, bcg_color=(1., 1., 1.), get_depth=False, get_alpha=False): # b, h, w = grid_3d.shape[:3] b = verts.size(0) textures = TexturesVertex(verts_features=rgb.view(b, -1, 3)) mesh = Meshes(verts=verts, faces=faces, textures=textures) fragments = self.rasterizer_torch(mesh) texels = mesh.sample_textures(fragments) materials = Materials(device=verts.device) blend_params = BlendParams(background_color=bcg_color) images = hard_rgb_blend(texels, fragments, blend_params) images = images[..., :3].permute(0, 3, 1, 2) out = (images, ) if get_depth: depth = fragments.zbuf[..., 0] mask = (depth == -1.0).float() max_depth = self.max_depth + 0.5 * (self.max_depth - self.min_depth) depth = mask * max_depth * torch.ones_like(depth) + (1 - mask) * depth out = out + (depth, ) if get_alpha: colors = torch.ones_like(fragments.bary_coords) blend_params = BlendParams(sigma=1e-2, gamma=1e-4, background_color=(1., 1., 1.)) alpha = sigmoid_alpha_blend(colors, fragments, blend_params)[..., -1] out = tuple(out) + (alpha, ) if len(out) == 1: out = out[0] return out
def test_texture_map_atlas(self): """ Test a mesh with a texture map as a per face atlas is loaded and rendered correctly. Also check that the backward pass for texture atlas rendering is differentiable. """ device = torch.device("cuda:0") obj_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = obj_dir / "cow_mesh/cow.obj" # Load mesh and texture as a per face texture atlas. verts, faces, aux = load_obj( obj_filename, device=device, load_textures=True, create_texture_atlas=True, texture_atlas_size=8, texture_wrap=None, ) atlas = aux.texture_atlas mesh = Meshes( verts=[verts], faces=[faces.verts_idx], textures=TexturesAtlas(atlas=[atlas]), ) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, cull_backfaces=True, perspective_correct=False, ) # Init shader settings materials = Materials(device=device, specular_color=((0, 0, 0),), shininess=0.0) lights = PointLights(device=device) # Place light behind the cow in world space. The front of # the cow is facing the -z direction. lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None] # The HardPhongShader can be used directly with atlas textures. rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) renderer = MeshRenderer( rasterizer=rasterizer, shader=HardPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze() # Load reference image image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR) if DEBUG: Image.fromarray((rgb.detach().cpu().numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_atlas_8x8_back.png" ) self.assertClose(rgb.cpu(), image_ref, atol=0.05) # Check gradients are propagated # correctly back to the texture atlas. # Because of how texture sampling is implemented # for the texture atlas it is not possible to get # gradients back to the vertices. atlas.requires_grad = True mesh = Meshes( verts=[verts], faces=[faces.verts_idx], textures=TexturesAtlas(atlas=[atlas]), ) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0001, faces_per_pixel=5, cull_backfaces=True, clip_barycentric_coords=True, ) images = renderer(mesh, raster_settings=raster_settings) images[0, ...].sum().backward() fragments = rasterizer(mesh, raster_settings=raster_settings) # Some of the bary coordinates are outisde the # [0, 1] range as expected because the blur is > 0 self.assertTrue(fragments.bary_coords.ge(1.0).any()) self.assertIsNotNone(atlas.grad) self.assertTrue(atlas.grad.sum().abs() > 0.0)
def test_joined_spheres(self): """ Test a list of Meshes can be joined as a single mesh and the single mesh is rendered correctly with Phong, Gouraud and Flat Shaders. """ device = torch.device("cuda:0") # Init mesh with vertex textures. # Initialize a list containing two ico spheres of different sizes. sphere_list = [ico_sphere(3, device), ico_sphere(4, device)] # [(42 verts, 80 faces), (162 verts, 320 faces)] # The scale the vertices need to be set at to resize the spheres scales = [0.25, 1] # The distance the spheres ought to be offset horizontally to prevent overlap. offsets = [1.2, -0.3] # Initialize a list containing the adjusted sphere meshes. sphere_mesh_list = [] for i in range(len(sphere_list)): verts = sphere_list[i].verts_padded() * scales[i] verts[0, :, 0] += offsets[i] sphere_mesh_list.append( Meshes(verts=verts, faces=sphere_list[i].faces_padded()) ) joined_sphere_mesh = join_meshes_as_scene(sphere_mesh_list) joined_sphere_mesh.textures = TexturesVertex( verts_features=torch.ones_like(joined_sphere_mesh.verts_padded()) ) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0.0, 0.0) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, perspective_correct=False, ) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) image = renderer(joined_sphere_mesh) rgb = image[..., :3].squeeze().cpu() if DEBUG: file_name = "DEBUG_joined_spheres_%s.png" % name Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / file_name ) image_ref = load_rgb_image("test_joined_spheres_%s.png" % name, DATA_DIR) self.assertClose(rgb, image_ref, atol=0.05)
def test_simple_sphere(self, elevated_camera=False, check_depth=False): """ Test output of phong and gouraud shading matches a reference image using the default values for the light sources. Args: elevated_camera: Defines whether the camera observing the scene should have an elevation of 45 degrees. """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings if elevated_camera: # Elevated and rotated camera R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0) postfix = "_elevated_" # If y axis is up, the spot of light should # be on the bottom left of the sphere. else: # No elevation or azimuth rotation R, T = look_at_view_transform(2.7, 0.0, 0.0) postfix = "_" for cam_type in ( FoVPerspectiveCameras, FoVOrthographicCameras, PerspectiveCameras, OrthographicCameras, ): cameras = cam_type(device=device, R=R, T=T) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1 ) rasterizer = MeshRasterizer( cameras=cameras, raster_settings=raster_settings ) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Test several shaders shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) if check_depth: renderer = MeshRendererWithFragments( rasterizer=rasterizer, shader=shader ) images, fragments = renderer(sphere_mesh) self.assertClose(fragments.zbuf, rasterizer(sphere_mesh).zbuf) else: renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() filename = "simple_sphere_light_%s%s%s.png" % ( name, postfix, cam_type.__name__, ) image_ref = load_rgb_image("test_%s" % filename, DATA_DIR) self.assertClose(rgb, image_ref, atol=0.05) if DEBUG: filename = "DEBUG_%s" % filename Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) ######################################################## # Move the light to the +z axis in world space so it is # behind the sphere. Note that +Z is in, +Y up, # +X left for both world and camera space. ######################################################## lights.location[..., 2] = -2.0 phong_shader = HardPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) if check_depth: phong_renderer = MeshRendererWithFragments( rasterizer=rasterizer, shader=phong_shader ) images, fragments = phong_renderer(sphere_mesh, lights=lights) self.assertClose( fragments.zbuf, rasterizer(sphere_mesh, lights=lights).zbuf ) else: phong_renderer = MeshRenderer( rasterizer=rasterizer, shader=phong_shader ) images = phong_renderer(sphere_mesh, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_simple_sphere_dark%s%s.png" % ( postfix, cam_type.__name__, ) Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) image_ref_phong_dark = load_rgb_image( "test_simple_sphere_dark%s%s.png" % (postfix, cam_type.__name__), DATA_DIR, ) self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
def test_texture_map(self): """ Test a mesh with a texture map is loaded and rendered correctly. The pupils in the eyes of the cow should always be looking to the left. """ device = torch.device("cuda:0") obj_dir = Path(__file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = obj_dir / "cow_mesh/cow.obj" # Load mesh + texture verts, faces, aux = load_obj( obj_filename, device=device, load_textures=True, texture_wrap=None ) tex_map = list(aux.texture_images.values())[0] tex_map = tex_map[None, ...].to(faces.textures_idx.device) textures = TexturesUV( maps=tex_map, faces_uvs=[faces.textures_idx], verts_uvs=[aux.verts_uvs] ) mesh = Meshes(verts=[verts], faces=[faces.verts_idx], textures=textures) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1 ) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) # Place light behind the cow in world space. The front of # the cow is facing the -z direction. lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None] blend_params = BlendParams( sigma=1e-1, gamma=1e-4, background_color=torch.tensor([1.0, 1.0, 1.0], device=device), ) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=TexturedSoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ), ) # Load reference image image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map_back.png" ) # NOTE some pixels can be flaky and will not lead to # `cond1` being true. Add `cond2` and check `cond1 or cond2` cond1 = torch.allclose(rgb, image_ref, atol=0.05) cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5 self.assertTrue(cond1 or cond2) # Check grad exists [verts] = mesh.verts_list() verts.requires_grad = True mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures) images = renderer(mesh2) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad) ########################################## # Check rendering of the front of the cow ########################################## R, T = look_at_view_transform(2.7, 0, 180) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) # Move light to the front of the cow in world space lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] # Load reference image image_ref = load_rgb_image("test_texture_map_front.png", DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer(mesh, cameras=cameras, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map_front.png" ) # NOTE some pixels can be flaky and will not lead to # `cond1` being true. Add `cond2` and check `cond1 or cond2` cond1 = torch.allclose(rgb, image_ref, atol=0.05) cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5 self.assertTrue(cond1 or cond2) ################################# # Add blurring to rasterization ################################# R, T = look_at_view_transform(2.7, 0, 180) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) blend_params = BlendParams(sigma=5e-4, gamma=1e-4) raster_settings = RasterizationSettings( image_size=512, blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma, faces_per_pixel=100, clip_barycentric_coords=True, perspective_correct=False, ) # Load reference image image_ref = load_rgb_image("test_blurry_textured_rendering.png", DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer( mesh.clone(), cameras=cameras, raster_settings=raster_settings, blend_params=blend_params, ) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_blurry_textured_rendering.png" ) self.assertClose(rgb, image_ref, atol=0.05)
def test_simple_sphere(self, elevated_camera=False): """ Test output of phong and gouraud shading matches a reference image using the default values for the light sources. Args: elevated_camera: Defines whether the camera observing the scene should have an elevation of 45 degrees. """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) sphere_mesh = Meshes( verts=verts_padded, faces=faces_padded, textures=textures ) # Init rasterizer settings if elevated_camera: # Elevated and rotated camera R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0) postfix = "_elevated_camera" # If y axis is up, the spot of light should # be on the bottom left of the sphere. else: # No elevation or azimuth rotation R, T = look_at_view_transform(2.7, 0.0, 0.0) postfix = "" cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) # Init renderer rasterizer = MeshRasterizer( cameras=cameras, raster_settings=raster_settings ) renderer = MeshRenderer( rasterizer=rasterizer, shader=HardPhongShader( lights=lights, cameras=cameras, materials=materials ), ) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_simple_sphere_light%s.png" % postfix Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) # Load reference image image_ref_phong = load_rgb_image( "test_simple_sphere_light%s.png" % postfix ) self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05)) ######################################################## # Move the light to the +z axis in world space so it is # behind the sphere. Note that +Z is in, +Y up, # +X left for both world and camera space. ######################################################## lights.location[..., 2] = -2.0 images = renderer(sphere_mesh, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_simple_sphere_dark%s.png" % postfix Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) # Load reference image image_ref_phong_dark = load_rgb_image( "test_simple_sphere_dark%s.png" % postfix ) self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05)) ###################################### # Change the shader to a GouraudShader ###################################### lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] renderer = MeshRenderer( rasterizer=rasterizer, shader=HardGouraudShader( lights=lights, cameras=cameras, materials=materials ), ) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_simple_sphere_light_gouraud%s.png" % postfix Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) # Load reference image image_ref_gouraud = load_rgb_image( "test_simple_sphere_light_gouraud%s.png" % postfix ) self.assertTrue(torch.allclose(rgb, image_ref_gouraud, atol=0.005)) ###################################### # Change the shader to a HardFlatShader ###################################### renderer = MeshRenderer( rasterizer=rasterizer, shader=HardFlatShader( lights=lights, cameras=cameras, materials=materials ), ) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_simple_sphere_light_flat%s.png" % postfix Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) # Load reference image image_ref_flat = load_rgb_image( "test_simple_sphere_light_flat%s.png" % postfix ) self.assertTrue(torch.allclose(rgb, image_ref_flat, atol=0.005))
def test_texture_map(self): """ Test a mesh with a texture map is loaded and rendered correctly. The pupils in the eyes of the cow should always be looking to the left. """ device = torch.device("cuda:0") DATA_DIR = (Path(__file__).resolve().parent.parent / "docs/tutorials/data") obj_filename = DATA_DIR / "cow_mesh/cow.obj" # Load mesh + texture mesh = load_objs_as_meshes([obj_filename], device=device) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) # Place light behind the cow in world space. The front of # the cow is facing the -z direction. lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None] # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=TexturedSoftPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_map_back.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map_back.png") self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05)) # Check grad exists [verts] = mesh.verts_list() verts.requires_grad = True mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures) images = renderer(mesh2) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad) ########################################## # Check rendering of the front of the cow ########################################## R, T = look_at_view_transform(2.7, 0, 180) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) # Move light to the front of the cow in world space lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] images = renderer(mesh, cameras=cameras, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_map_front.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map_front.png") ################################# # Add blurring to rasterization ################################# R, T = look_at_view_transform(2.7, 0, 180) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) blend_params = BlendParams(sigma=5e-4, gamma=1e-4) raster_settings = RasterizationSettings( image_size=512, blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma, faces_per_pixel=100, bin_size=0, ) images = renderer( mesh.clone(), cameras=cameras, raster_settings=raster_settings, blend_params=blend_params, ) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_blurry_textured_rendering.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_blurry_textured_rendering.png") self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
def test_render_cow(self): """ Test a larger textured mesh is rendered correctly in a non square image. """ device = torch.device("cuda:0") obj_dir = get_pytorch3d_dir() / "docs/tutorials/data" obj_filename = obj_dir / "cow_mesh/cow.obj" # Load mesh + texture verts, faces, aux = load_obj(obj_filename, device=device, load_textures=True, texture_wrap=None) tex_map = list(aux.texture_images.values())[0] tex_map = tex_map[None, ...].to(faces.textures_idx.device) textures = TexturesUV(maps=tex_map, faces_uvs=[faces.textures_idx], verts_uvs=[aux.verts_uvs]) mesh = Meshes(verts=[verts], faces=[faces.verts_idx], textures=textures) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 180) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=(512, 1024), blur_radius=0.0, faces_per_pixel=1) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] blend_params = BlendParams( sigma=1e-1, gamma=1e-4, background_color=torch.tensor([1.0, 1.0, 1.0], device=device), ) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=SoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ), ) # Load reference image image_ref = load_rgb_image("test_cow_image_rectangle.png", DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_cow_image_rectangle.png") # NOTE some pixels can be flaky cond1 = torch.allclose(rgb, image_ref, atol=0.05) self.assertTrue(cond1)
def test_initialize_materials_mixed_inputs(self): mat = Materials(ambient_color=torch.randn(1, 3), diffuse_color=((1, 1, 1),)) self.assertTrue(mat.ambient_color.shape == (1, 3)) self.assertTrue(mat.diffuse_color.shape == (1, 3))
def test_simple_sphere_batched(self): """ Test a mesh with vertex textures can be extended to form a batch, and is rendered correctly with Phong, Gouraud and Flat Shaders with batched lighting and hard and soft blending. """ batch_size = 5 device = torch.device("cuda:0") # Init mesh with vertex textures. sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_meshes = Meshes( verts=verts_padded, faces=faces_padded, textures=textures ) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=4 ) # Init shader settings materials = Materials(device=device) lights_location = torch.tensor([0.0, 0.0, +2.0], device=device) lights_location = lights_location[None].expand(batch_size, -1) lights = PointLights(device=device, location=lights_location) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shader_tests = [ ShaderTest(HardPhongShader, "phong", "hard_phong"), ShaderTest(SoftPhongShader, "phong", "soft_phong"), ShaderTest(HardGouraudShader, "gouraud", "hard_gouraud"), ShaderTest(HardFlatShader, "flat", "hard_flat"), ] for test in shader_tests: reference_name = test.reference_name debug_name = test.debug_name shader = test.shader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_meshes) image_ref = load_rgb_image( "test_simple_sphere_light_%s_%s.png" % (reference_name, type(cameras).__name__), DATA_DIR, ) for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() if i == 0 and DEBUG: filename = "DEBUG_simple_sphere_batched_%s_%s.png" % ( debug_name, type(cameras).__name__, ) Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) self.assertClose(rgb, image_ref, atol=0.05)
def test_to(self): # Test moving all the tensors in the renderer to a new device # to support multigpu rendering. device1 = torch.device("cpu") R, T = look_at_view_transform(1500, 0.0, 0.0) # Init shader settings materials = Materials(device=device1) lights = PointLights(device=device1) lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device1)[None] raster_settings = RasterizationSettings(image_size=256, blur_radius=0.0, faces_per_pixel=1) cameras = FoVPerspectiveCameras(device=device1, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=100) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) blend_params = BlendParams( 1e-4, 1e-4, background_color=torch.zeros(3, dtype=torch.float32, device=device1), ) shader = SoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) def _check_props_on_device(renderer, device): self.assertEqual(renderer.rasterizer.cameras.device, device) self.assertEqual(renderer.shader.cameras.device, device) self.assertEqual(renderer.shader.lights.device, device) self.assertEqual(renderer.shader.lights.ambient_color.device, device) self.assertEqual(renderer.shader.materials.device, device) self.assertEqual(renderer.shader.materials.ambient_color.device, device) mesh = ico_sphere(2, device1) verts_padded = mesh.verts_padded() textures = TexturesVertex( verts_features=torch.ones_like(verts_padded, device=device1)) mesh.textures = textures _check_props_on_device(renderer, device1) # Test rendering on cpu output_images = renderer(mesh) self.assertEqual(output_images.device, device1) # Move renderer and mesh to another device and re render # This also tests that background_color is correctly moved to # the new device device2 = torch.device("cuda:0") renderer.to(device2) mesh = mesh.to(device2) _check_props_on_device(renderer, device2) output_images = renderer(mesh) self.assertEqual(output_images.device, device2)
def test_texture_map_atlas(self): """ Test a mesh with a texture map as a per face atlas is loaded and rendered correctly. """ device = torch.device("cuda:0") obj_dir = Path( __file__).resolve().parent.parent / "docs/tutorials/data" obj_filename = obj_dir / "cow_mesh/cow.obj" # Load mesh and texture as a per face texture atlas. verts, faces, aux = load_obj( obj_filename, device=device, load_textures=True, create_texture_atlas=True, texture_atlas_size=8, texture_wrap=None, ) mesh = Meshes( verts=[verts], faces=[faces.verts_idx], textures=TexturesAtlas(atlas=[aux.texture_atlas]), ) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 0) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, cull_backfaces=True) # Init shader settings materials = Materials(device=device, specular_color=((0, 0, 0), ), shininess=0.0) lights = PointLights(device=device) # Place light behind the cow in world space. The front of # the cow is facing the -z direction. lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None] # The HardPhongShader can be used directly with atlas textures. renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=HardPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_atlas_8x8_back.png", DATA_DIR) if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_atlas_8x8_back.png") self.assertClose(rgb, image_ref, atol=0.05)
def test_texture_map(self): """ Test a mesh with a texture map is loaded and rendered correctly """ device = torch.device("cuda:0") DATA_DIR = (Path(__file__).resolve().parent.parent / "docs/tutorials/data") obj_filename = DATA_DIR / "cow_mesh/cow.obj" # Load mesh + texture mesh = load_objs_as_meshes([obj_filename], device=device) # Init rasterizer settings R, T = look_at_view_transform(2.7, 10, 20) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=TexturedSoftPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_map.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map.png") # There's a calculation instability on the corner of the ear of the cow. # We ignore that pixel. image_ref[137, 166] = 0 rgb[137, 166] = 0 self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05)) # Check grad exists [verts] = mesh.verts_list() verts.requires_grad = True mesh2 = Meshes(verts=[verts], faces=mesh.faces_list(), textures=mesh.textures) images = renderer(mesh2) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad)
def test_texture_map(self): """ Test a mesh with a texture map is loaded and rendered correctly """ device = torch.device("cuda:0") DATA_DIR = ( Path(__file__).resolve().parent.parent / "docs/tutorials/data" ) obj_filename = DATA_DIR / "cow_mesh/cow.obj" # Load mesh + texture verts, faces, aux = load_obj(obj_filename) faces_idx = faces.verts_idx.to(device) verts = verts.to(device) texture_uvs = aux.verts_uvs materials = aux.material_colors tex_maps = aux.texture_images # tex_maps is a dictionary of material names as keys and texture images # as values. Only need the images for this example. textures = Textures( maps=list(tex_maps.values()), faces_uvs=faces.textures_idx.to(torch.int64).to(device)[None, :], verts_uvs=texture_uvs.to(torch.float32).to(device)[None, :], ) mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures) # Init rasterizer settings R, T = look_at_view_transform(2.7, 10, 20) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0 ) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings ), shader=TexturedPhongShader( lights=lights, cameras=cameras, materials=materials ), ) images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() # Load reference image image_ref = load_rgb_image("test_texture_map.png") if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_texture_map.png" ) # There's a calculation instability on the corner of the ear of the cow. # We ignore that pixel. image_ref[137, 166] = 0 rgb[137, 166] = 0 self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05)) # Check grad exists verts = verts.clone() verts.requires_grad = True mesh = Meshes(verts=[verts], faces=[faces_idx], textures=textures) images = renderer(mesh) images[0, ...].sum().backward() self.assertIsNotNone(verts.grad) # Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.
def test_simple_sphere(self, elevated_camera=False): """ Test output of phong and gouraud shading matches a reference image using the default values for the light sources. Args: elevated_camera: Defines whether the camera observing the scene should have an elevation of 45 degrees. """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() textures = Textures(verts_rgb=torch.ones_like(verts_padded)) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings if elevated_camera: R, T = look_at_view_transform(2.7, 45.0, 0.0) postfix = "_elevated_camera" else: R, T = look_at_view_transform(2.7, 0.0, 0.0) postfix = "" cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1, bin_size=0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) renderer = MeshRenderer( rasterizer=rasterizer, shader=HardPhongShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_simple_sphere_light%s.png" % postfix) # Load reference image image_ref_phong = load_rgb_image( "test_simple_sphere_illuminated%s.png" % postfix) self.assertTrue(torch.allclose(rgb, image_ref_phong, atol=0.05)) ################################### # Move the light behind the object ################################### # Check the image is dark lights.location[..., 2] = +2.0 images = renderer(sphere_mesh, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_simple_sphere_dark%s.png" % postfix) # Load reference image image_ref_phong_dark = load_rgb_image("test_simple_sphere_dark%s.png" % postfix) self.assertTrue(torch.allclose(rgb, image_ref_phong_dark, atol=0.05)) ###################################### # Change the shader to a GouraudShader ###################################### lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] renderer = MeshRenderer( rasterizer=rasterizer, shader=HardGouraudShader(lights=lights, cameras=cameras, materials=materials), ) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_simple_sphere_light_gouraud%s.png" % postfix) # Load reference image image_ref_gouraud = load_rgb_image( "test_simple_sphere_light_gouraud%s.png" % postfix) self.assertTrue(torch.allclose(rgb, image_ref_gouraud, atol=0.005)) self.assertFalse(torch.allclose(rgb, image_ref_phong, atol=0.005))