def test_join_verts(self): """Meshes with TexturesVertex joined into a scene""" # Test the result of rendering two tori with separate textures. # The expected result is consistent with rendering them each alone. torch.manual_seed(1) device = torch.device("cuda:0") plain_torus = torus(r=1, R=4, sides=5, rings=6, device=device) [verts] = plain_torus.verts_list() verts_shifted1 = verts.clone() verts_shifted1 *= 0.5 verts_shifted1[:, 1] += 7 faces = plain_torus.faces_list() textures1 = TexturesVertex(verts_features=[torch.rand_like(verts)]) textures2 = TexturesVertex(verts_features=[torch.rand_like(verts)]) mesh1 = Meshes(verts=[verts], faces=faces, textures=textures1) mesh2 = Meshes(verts=[verts_shifted1], faces=faces, textures=textures2) mesh = join_meshes_as_scene([mesh1, mesh2]) R, T = look_at_view_transform(18, 0, 0) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=256, blur_radius=0.0, faces_per_pixel=1 ) lights = AmbientLights(device=device) blend_params = BlendParams( sigma=1e-1, gamma=1e-4, background_color=torch.tensor([1.0, 1.0, 1.0], device=device), ) renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=HardPhongShader( device=device, blend_params=blend_params, cameras=cameras, lights=lights ), ) output = renderer(mesh) image_ref = load_rgb_image("test_joinverts_final.png", DATA_DIR) if DEBUG: debugging_outputs = [] for mesh_ in [mesh1, mesh2]: debugging_outputs.append(renderer(mesh_)) Image.fromarray( (output[0, ..., :3].cpu().numpy() * 255).astype(np.uint8) ).save(DATA_DIR / "test_joinverts_final_.png") Image.fromarray( (debugging_outputs[0][0, ..., :3].cpu().numpy() * 255).astype(np.uint8) ).save(DATA_DIR / "test_joinverts_1.png") Image.fromarray( (debugging_outputs[1][0, ..., :3].cpu().numpy() * 255).astype(np.uint8) ).save(DATA_DIR / "test_joinverts_2.png") result = output[0, ..., :3].cpu() self.assertClose(result, image_ref, atol=0.05)
def test_interpolate_attributes_grad(self): verts = torch.randn((4, 3), dtype=torch.float32) faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64) vert_tex = torch.tensor( [[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.float32, requires_grad=True, ) tex = TexturesVertex(verts_features=vert_tex[None, :]) mesh = Meshes(verts=[verts], faces=[faces], textures=tex) pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2) barycentric_coords = torch.tensor( [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32 ).view(1, 1, 1, 2, -1) fragments = Fragments( pix_to_face=pix_to_face, bary_coords=barycentric_coords, zbuf=torch.ones_like(pix_to_face), dists=torch.ones_like(pix_to_face), ) grad_vert_tex = torch.tensor( [[0.3, 0.3, 0.3], [0.9, 0.9, 0.9], [0.5, 0.5, 0.5], [0.3, 0.3, 0.3]], dtype=torch.float32, ) verts_features_packed = mesh.textures.verts_features_packed() faces_verts_features = verts_features_packed[mesh.faces_packed()] texels = interpolate_face_attributes( fragments.pix_to_face, fragments.bary_coords, faces_verts_features ) texels.sum().backward() self.assertTrue(hasattr(vert_tex, "grad")) self.assertTrue(torch.allclose(vert_tex.grad, grad_vert_tex[None, :]))
def test_interpolate_attributes(self): verts = torch.randn((4, 3), dtype=torch.float32) faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64) vert_tex = torch.tensor( [[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.float32 ) tex = TexturesVertex(verts_features=vert_tex[None, :]) mesh = Meshes(verts=[verts], faces=[faces], textures=tex) pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2) barycentric_coords = torch.tensor( [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32 ).view(1, 1, 1, 2, -1) expected_vals = torch.tensor( [[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]], dtype=torch.float32 ).view(1, 1, 1, 2, -1) fragments = Fragments( pix_to_face=pix_to_face, bary_coords=barycentric_coords, zbuf=torch.ones_like(pix_to_face), dists=torch.ones_like(pix_to_face), ) verts_features_packed = mesh.textures.verts_features_packed() faces_verts_features = verts_features_packed[mesh.faces_packed()] texels = interpolate_face_attributes( fragments.pix_to_face, fragments.bary_coords, faces_verts_features ) self.assertTrue(torch.allclose(texels, expected_vals[None, :]))
def test_load_cow_no_texture(self): """ Load the cow as converted to a single mesh in a glb file. """ glb = DATA_DIR / "cow.glb" self.assertTrue(glb.is_file()) device = torch.device("cuda:0") mesh = _load(glb, device=device, include_textures=False) self.assertEqual(len(mesh), 1) self.assertIsNone(mesh.textures) self.assertEqual(mesh.faces_packed().shape, (5856, 3)) self.assertEqual(mesh.verts_packed().shape, (3225, 3)) mesh_obj = _load(TUTORIAL_DATA_DIR / "cow_mesh/cow.obj") self.assertClose(mesh_obj.get_bounding_boxes().cpu(), mesh_obj.get_bounding_boxes()) mesh.textures = TexturesVertex(0.5 * torch.ones_like(mesh.verts_padded())) image = _render(mesh, "cow_gray") with Image.open(DATA_DIR / "glb_cow_gray.png") as f: expected = np.array(f) self.assertClose(image, expected)
def test_simple_sphere_batched(self): """ Test a mesh with vertex textures can be extended to form a batch, and is rendered correctly with Phong, Gouraud and Flat Shaders. """ batch_size = 5 device = torch.device("cuda:0") # Init mesh with vertex textures. sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_meshes = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=512, blur_radius=0.0, faces_per_pixel=1) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_meshes) image_ref = load_rgb_image( "test_simple_sphere_light_%s.png" % name, DATA_DIR) for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() if i == 0 and DEBUG: filename = "DEBUG_simple_sphere_batched_%s.png" % name Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename) self.assertClose(rgb, image_ref, atol=0.05)
def test_simple_sphere_outside_zfar(self): """ Test output when rendering a sphere that is beyond zfar with a SoftPhongShader. This renders a sphere of radius 500, with the camera at x=1500 for different settings of zfar. This is intended to check 1) setting cameras.zfar propagates to the blender and that the rendered sphere is (soft) clipped if it is beyond zfar, 2) make sure there are no numerical precision/overflow errors associated with larger world coordinates """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() * 500 faces_padded = sphere_mesh.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) R, T = look_at_view_transform(1500, 0.0, 0.0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device)[None] raster_settings = RasterizationSettings( image_size=256, blur_radius=0.0, faces_per_pixel=1 ) for zfar in (10000.0, 100.0): cameras = FoVPerspectiveCameras( device=device, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=zfar ) rasterizer = MeshRasterizer( cameras=cameras, raster_settings=raster_settings ) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 1.0)) shader = SoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() filename = "test_simple_sphere_outside_zfar_%d.png" % int(zfar) # Load reference image image_ref = load_rgb_image(filename, DATA_DIR) if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / ("DEBUG_" + filename) ) self.assertClose(rgb, image_ref, atol=0.05)
def test_simple_sphere_screen(self): """ Test output when rendering with PerspectiveCameras & OrthographicCameras in NDC vs screen space. """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) R, T = look_at_view_transform(2.7, 0.0, 0.0) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1 ) half_half = (512.0 / 2.0, 512.0 / 2.0) for cam_type in (PerspectiveCameras, OrthographicCameras): cameras = cam_type( device=device, R=R, T=T, principal_point=(half_half,), focal_length=(half_half,), image_size=((512, 512),), in_ndc=False, ) rasterizer = MeshRasterizer( cameras=cameras, raster_settings=raster_settings ) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) shader = HardPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() filename = "test_simple_sphere_light_phong_%s.png" % cam_type.__name__ if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / f"{filename}_.png" ) image_ref = load_rgb_image(filename, DATA_DIR) self.assertClose(rgb, image_ref, atol=0.05)
def test_cameras_kwarg(self): """ Test that when cameras are passed in as a kwarg the rendering works as expected """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # No elevation or azimuth rotation R, T = look_at_view_transform(2.7, 0.0, 0.0) for cam_type in ( FoVPerspectiveCameras, FoVOrthographicCameras, PerspectiveCameras, OrthographicCameras, ): cameras = cam_type(device=device, R=R, T=T) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1 ) rasterizer = MeshRasterizer(raster_settings=raster_settings) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) shader = HardPhongShader( lights=lights, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) # Cameras can be passed into the renderer in the forward pass images = renderer(sphere_mesh, cameras=cameras) rgb = images.squeeze()[..., :3].cpu().numpy() image_ref = load_rgb_image( "test_simple_sphere_light_phong_%s.png" % cam_type.__name__, DATA_DIR ) self.assertClose(rgb, image_ref, atol=0.05)
def get_texture_for_mesh( self, primitive: Dict[str, Any], indices: torch.Tensor ) -> Optional[TexturesBase]: """ Get the texture object representing the given mesh primitive. Args: primitive: the mesh primitive being loaded. indices: the face indices of the mesh """ attributes = primitive["attributes"] vertex_colors = self._get_primitive_attribute(attributes, "COLOR_0", np.float32) if vertex_colors is not None: return TexturesVertex(torch.from_numpy(vertex_colors)) vertex_texcoords_0 = self._get_primitive_attribute( attributes, "TEXCOORD_0", np.float32 ) if vertex_texcoords_0 is not None: verts_uvs = torch.from_numpy(vertex_texcoords_0) verts_uvs[:, 1] = 1 - verts_uvs[:, -1] faces_uvs = indices material_index = primitive.get("material", 0) material = self._json_data["materials"][material_index] material_roughness = material["pbrMetallicRoughness"] if "baseColorTexture" in material_roughness: texture_index = material_roughness["baseColorTexture"]["index"] texture_json = self._json_data["textures"][texture_index] # Todo - include baseColorFactor when also given # Todo - look at the sampler image_index = texture_json["source"] map = self._get_texture_map_image(image_index) elif "baseColorFactor" in material_roughness: # Constant color? map = torch.FloatTensor(material_roughness["baseColorFactor"])[ None, None, :3 ] texture = TexturesUV( # pyre-fixme[61]: `map` may not be initialized here. maps=[map], # alpha channel ignored faces_uvs=[faces_uvs], verts_uvs=[verts_uvs], ) return texture return None
def test_save_load_meshes(self): verts = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=torch.float32) faces = torch.tensor([[0, 1, 2], [0, 2, 3]]) normals = torch.tensor([[0, 1, 0], [1, 0, 0], [1, 4, 1], [1, 0, 0]], dtype=torch.float32) vert_colors = torch.rand_like(verts) texture = TexturesVertex(verts_features=[vert_colors]) for do_textures, do_normals in itertools.product([True, False], [True, False]): mesh = Meshes( verts=[verts], faces=[faces], textures=texture if do_textures else None, verts_normals=[normals] if do_normals else None, ) device = torch.device("cuda:0") io = IO() with NamedTemporaryFile(mode="w", suffix=".ply") as f: io.save_mesh(mesh.cuda(), f.name) f.flush() mesh2 = io.load_mesh(f.name, device=device) self.assertEqual(mesh2.device, device) mesh2 = mesh2.cpu() self.assertClose(mesh2.verts_padded(), mesh.verts_padded()) self.assertClose(mesh2.faces_padded(), mesh.faces_padded()) if do_normals: self.assertTrue(mesh.has_verts_normals()) self.assertTrue(mesh2.has_verts_normals()) self.assertClose(mesh2.verts_normals_padded(), mesh.verts_normals_padded()) else: self.assertFalse(mesh.has_verts_normals()) self.assertFalse(mesh2.has_verts_normals()) self.assertFalse( torch.allclose(mesh2.verts_normals_padded(), normals)) if do_textures: self.assertIsInstance(mesh2.textures, TexturesVertex) self.assertClose(mesh2.textures.verts_features_list()[0], vert_colors) else: self.assertIsNone(mesh2.textures)
def test_save_too_many_colors(self): verts = torch.tensor([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=torch.float32) faces = torch.tensor([[0, 1, 2], [0, 2, 3]]) vert_colors = torch.rand((4, 7)) texture_with_seven_colors = TexturesVertex( verts_features=[vert_colors]) mesh = Meshes( verts=[verts], faces=[faces], textures=texture_with_seven_colors, ) io = IO() msg = "Texture will not be saved as it has 7 colors, not 3." with NamedTemporaryFile(mode="w", suffix=".ply") as f: with self.assertWarnsRegex(UserWarning, msg): io.save_mesh(mesh.cuda(), f.name)
def test_joined_spheres(self): """ Test a list of Meshes can be joined as a single mesh and the single mesh is rendered correctly with Phong, Gouraud and Flat Shaders. """ device = torch.device("cuda:0") # Init mesh with vertex textures. # Initialize a list containing two ico spheres of different sizes. sphere_list = [ico_sphere(3, device), ico_sphere(4, device)] # [(42 verts, 80 faces), (162 verts, 320 faces)] # The scale the vertices need to be set at to resize the spheres scales = [0.25, 1] # The distance the spheres ought to be offset horizontally to prevent overlap. offsets = [1.2, -0.3] # Initialize a list containing the adjusted sphere meshes. sphere_mesh_list = [] for i in range(len(sphere_list)): verts = sphere_list[i].verts_padded() * scales[i] verts[0, :, 0] += offsets[i] sphere_mesh_list.append( Meshes(verts=verts, faces=sphere_list[i].faces_padded()) ) joined_sphere_mesh = join_meshes_as_scene(sphere_mesh_list) joined_sphere_mesh.textures = TexturesVertex( verts_features=torch.ones_like(joined_sphere_mesh.verts_padded()) ) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0.0, 0.0) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1, perspective_correct=False, ) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) image = renderer(joined_sphere_mesh) rgb = image[..., :3].squeeze().cpu() if DEBUG: file_name = "DEBUG_joined_spheres_%s.png" % name Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / file_name ) image_ref = load_rgb_image("test_joined_spheres_%s.png" % name, DATA_DIR) self.assertClose(rgb, image_ref, atol=0.05)
def test_simple_sphere(self, elevated_camera=False, check_depth=False): """ Test output of phong and gouraud shading matches a reference image using the default values for the light sources. Args: elevated_camera: Defines whether the camera observing the scene should have an elevation of 45 degrees. """ device = torch.device("cuda:0") # Init mesh sphere_mesh = ico_sphere(5, device) verts_padded = sphere_mesh.verts_padded() faces_padded = sphere_mesh.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_mesh = Meshes(verts=verts_padded, faces=faces_padded, textures=textures) # Init rasterizer settings if elevated_camera: # Elevated and rotated camera R, T = look_at_view_transform(dist=2.7, elev=45.0, azim=45.0) postfix = "_elevated_" # If y axis is up, the spot of light should # be on the bottom left of the sphere. else: # No elevation or azimuth rotation R, T = look_at_view_transform(2.7, 0.0, 0.0) postfix = "_" for cam_type in ( FoVPerspectiveCameras, FoVOrthographicCameras, PerspectiveCameras, OrthographicCameras, ): cameras = cam_type(device=device, R=R, T=T) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, +2.0], device=device)[None] raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=1 ) rasterizer = MeshRasterizer( cameras=cameras, raster_settings=raster_settings ) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Test several shaders shaders = { "phong": HardPhongShader, "gouraud": HardGouraudShader, "flat": HardFlatShader, } for (name, shader_init) in shaders.items(): shader = shader_init( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) if check_depth: renderer = MeshRendererWithFragments( rasterizer=rasterizer, shader=shader ) images, fragments = renderer(sphere_mesh) self.assertClose(fragments.zbuf, rasterizer(sphere_mesh).zbuf) else: renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_mesh) rgb = images[0, ..., :3].squeeze().cpu() filename = "simple_sphere_light_%s%s%s.png" % ( name, postfix, cam_type.__name__, ) image_ref = load_rgb_image("test_%s" % filename, DATA_DIR) self.assertClose(rgb, image_ref, atol=0.05) if DEBUG: filename = "DEBUG_%s" % filename Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) ######################################################## # Move the light to the +z axis in world space so it is # behind the sphere. Note that +Z is in, +Y up, # +X left for both world and camera space. ######################################################## lights.location[..., 2] = -2.0 phong_shader = HardPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) if check_depth: phong_renderer = MeshRendererWithFragments( rasterizer=rasterizer, shader=phong_shader ) images, fragments = phong_renderer(sphere_mesh, lights=lights) self.assertClose( fragments.zbuf, rasterizer(sphere_mesh, lights=lights).zbuf ) else: phong_renderer = MeshRenderer( rasterizer=rasterizer, shader=phong_shader ) images = phong_renderer(sphere_mesh, lights=lights) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_simple_sphere_dark%s%s.png" % ( postfix, cam_type.__name__, ) Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) image_ref_phong_dark = load_rgb_image( "test_simple_sphere_dark%s%s.png" % (postfix, cam_type.__name__), DATA_DIR, ) self.assertClose(rgb, image_ref_phong_dark, atol=0.05)
def test_to(self): # Test moving all the tensors in the renderer to a new device # to support multigpu rendering. device1 = torch.device("cpu") R, T = look_at_view_transform(1500, 0.0, 0.0) # Init shader settings materials = Materials(device=device1) lights = PointLights(device=device1) lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device1)[None] raster_settings = RasterizationSettings(image_size=256, blur_radius=0.0, faces_per_pixel=1) cameras = FoVPerspectiveCameras(device=device1, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=100) rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) blend_params = BlendParams( 1e-4, 1e-4, background_color=torch.zeros(3, dtype=torch.float32, device=device1), ) shader = SoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) def _check_props_on_device(renderer, device): self.assertEqual(renderer.rasterizer.cameras.device, device) self.assertEqual(renderer.shader.cameras.device, device) self.assertEqual(renderer.shader.lights.device, device) self.assertEqual(renderer.shader.lights.ambient_color.device, device) self.assertEqual(renderer.shader.materials.device, device) self.assertEqual(renderer.shader.materials.ambient_color.device, device) mesh = ico_sphere(2, device1) verts_padded = mesh.verts_padded() textures = TexturesVertex( verts_features=torch.ones_like(verts_padded, device=device1)) mesh.textures = textures _check_props_on_device(renderer, device1) # Test rendering on cpu output_images = renderer(mesh) self.assertEqual(output_images.device, device1) # Move renderer and mesh to another device and re render # This also tests that background_color is correctly moved to # the new device device2 = torch.device("cuda:0") renderer.to(device2) mesh = mesh.to(device2) _check_props_on_device(renderer, device2) output_images = renderer(mesh) self.assertEqual(output_images.device, device2)
def test_simple_sphere_batched(self): """ Test a mesh with vertex textures can be extended to form a batch, and is rendered correctly with Phong, Gouraud and Flat Shaders with batched lighting and hard and soft blending. """ batch_size = 5 device = torch.device("cuda:0") # Init mesh with vertex textures. sphere_meshes = ico_sphere(5, device).extend(batch_size) verts_padded = sphere_meshes.verts_padded() faces_padded = sphere_meshes.faces_padded() feats = torch.ones_like(verts_padded, device=device) textures = TexturesVertex(verts_features=feats) sphere_meshes = Meshes( verts=verts_padded, faces=faces_padded, textures=textures ) # Init rasterizer settings dist = torch.tensor([2.7]).repeat(batch_size).to(device) elev = torch.zeros_like(dist) azim = torch.zeros_like(dist) R, T = look_at_view_transform(dist, elev, azim) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings( image_size=512, blur_radius=0.0, faces_per_pixel=4 ) # Init shader settings materials = Materials(device=device) lights_location = torch.tensor([0.0, 0.0, +2.0], device=device) lights_location = lights_location[None].expand(batch_size, -1) lights = PointLights(device=device, location=lights_location) blend_params = BlendParams(1e-4, 1e-4, (0, 0, 0)) # Init renderer rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings) shader_tests = [ ShaderTest(HardPhongShader, "phong", "hard_phong"), ShaderTest(SoftPhongShader, "phong", "soft_phong"), ShaderTest(HardGouraudShader, "gouraud", "hard_gouraud"), ShaderTest(HardFlatShader, "flat", "hard_flat"), ] for test in shader_tests: reference_name = test.reference_name debug_name = test.debug_name shader = test.shader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ) renderer = MeshRenderer(rasterizer=rasterizer, shader=shader) images = renderer(sphere_meshes) image_ref = load_rgb_image( "test_simple_sphere_light_%s_%s.png" % (reference_name, type(cameras).__name__), DATA_DIR, ) for i in range(batch_size): rgb = images[i, ..., :3].squeeze().cpu() if i == 0 and DEBUG: filename = "DEBUG_simple_sphere_batched_%s_%s.png" % ( debug_name, type(cameras).__name__, ) Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) self.assertClose(rgb, image_ref, atol=0.05)