def test_textures_vertex_init_fail(self): # Incorrect sized tensors with self.assertRaisesRegex(ValueError, "verts_features"): TexturesVertex(verts_features=torch.rand(size=(5, 10))) # Not a list or a tensor with self.assertRaisesRegex(ValueError, "verts_features"): TexturesVertex(verts_features=(1, 1, 1))
def test_sample_vertex_textures_grad(self): verts = torch.randn((4, 3), dtype=torch.float32) faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64) vert_tex = torch.tensor( [[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.float32, requires_grad=True, ) verts_features = vert_tex tex = TexturesVertex(verts_features=[verts_features]) mesh = Meshes(verts=[verts], faces=[faces], textures=tex) pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2) barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32).view( 1, 1, 1, 2, -1) fragments = Fragments( pix_to_face=pix_to_face, bary_coords=barycentric_coords, zbuf=torch.ones_like(pix_to_face), dists=torch.ones_like(pix_to_face), ) grad_vert_tex = torch.tensor( [[0.3, 0.3, 0.3], [0.9, 0.9, 0.9], [0.5, 0.5, 0.5], [0.3, 0.3, 0.3]], dtype=torch.float32, ) texels = mesh.sample_textures(fragments) texels.sum().backward() self.assertTrue(hasattr(vert_tex, "grad")) self.assertTrue(torch.allclose(vert_tex.grad, grad_vert_tex[None, :]))
def test_sample_vertex_textures(self): """ This tests both interpolate_vertex_colors as well as interpolate_face_attributes. """ verts = torch.randn((4, 3), dtype=torch.float32) faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64) vert_tex = torch.tensor([[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.float32) verts_features = vert_tex tex = TexturesVertex(verts_features=[verts_features]) mesh = Meshes(verts=[verts], faces=[faces], textures=tex) pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2) barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32).view( 1, 1, 1, 2, -1) expected_vals = torch.tensor([[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]], dtype=torch.float32).view(1, 1, 1, 2, -1) fragments = Fragments( pix_to_face=pix_to_face, bary_coords=barycentric_coords, zbuf=torch.ones_like(pix_to_face), dists=torch.ones_like(pix_to_face), ) # sample_textures calls interpolate_vertex_colors texels = mesh.sample_textures(fragments) self.assertTrue(torch.allclose(texels, expected_vals[None, :]))
def test_extend(self): B = 10 mesh = init_mesh(B, 30, 50) V = mesh._V tex_uv = TexturesVertex(verts_features=torch.randn((B, V, 3))) tex_mesh = Meshes(verts=mesh.verts_padded(), faces=mesh.faces_padded(), textures=tex_uv) N = 20 new_mesh = tex_mesh.extend(N) self.assertEqual(len(tex_mesh) * N, len(new_mesh)) tex_init = tex_mesh.textures new_tex = new_mesh.textures for i in range(len(tex_mesh)): for n in range(N): self.assertClose( tex_init.verts_features_list()[i], new_tex.verts_features_list()[i * N + n], ) self.assertClose( tex_init._num_faces_per_mesh[i], new_tex._num_faces_per_mesh[i * N + n], ) self.assertAllSeparate([ tex_init.verts_features_padded(), new_tex.verts_features_padded() ]) with self.assertRaises(ValueError): tex_mesh.extend(N=-1)
def test_padded_to_packed(self): # Case where each face in the mesh has 3 unique uv vertex indices # - i.e. even if a vertex is shared between multiple faces it will # have a unique uv coordinate for each face. num_verts_per_mesh = [9, 6] D = 10 verts_features_list = [torch.rand(v, D) for v in num_verts_per_mesh] verts_features_packed = list_to_packed(verts_features_list)[0] verts_features_list = packed_to_list(verts_features_packed, num_verts_per_mesh) tex = TexturesVertex(verts_features=verts_features_list) # This is set inside Meshes when textures is passed as an input. # Here we set _num_faces_per_mesh and _num_verts_per_mesh explicity. tex1 = tex.clone() tex1._num_verts_per_mesh = num_verts_per_mesh verts_packed = tex1.verts_features_packed() verts_verts_list = tex1.verts_features_list() verts_padded = tex1.verts_features_padded() for f1, f2 in zip(verts_verts_list, verts_features_list): self.assertTrue((f1 == f2).all().item()) self.assertTrue(verts_packed.shape == (sum(num_verts_per_mesh), D)) self.assertTrue(verts_padded.shape == (2, 9, D)) # Case where num_verts_per_mesh is not set and textures # are initialized with a padded tensor. tex2 = TexturesVertex(verts_features=verts_padded) verts_packed = tex2.verts_features_packed() verts_list = tex2.verts_features_list() # Packed is just flattened padded as num_verts_per_mesh # has not been provided. self.assertTrue(verts_packed.shape == (9 * 2, D)) for i, (f1, f2) in enumerate(zip(verts_list, verts_features_list)): n = num_verts_per_mesh[i] self.assertTrue((f1[:n] == f2).all().item())
def test_detach(self): tex = TexturesVertex( verts_features=torch.rand(size=(10, 100, 128), requires_grad=True)) tex.verts_features_list() tex_detached = tex.detach() self.assertFalse(tex_detached._verts_features_padded.requires_grad) self.assertClose(tex_detached._verts_features_padded, tex._verts_features_padded) for i in range(tex._N): self.assertClose(tex._verts_features_list[i], tex_detached._verts_features_list[i]) self.assertFalse( tex_detached._verts_features_list[i].requires_grad)
def test_sample_textures_error(self): N = 5 V = 20 verts = torch.rand(size=(N, V, 3)) faces = torch.randint(size=(N, 10, 3), high=V) tex = TexturesVertex(verts_features=torch.randn(size=(N, 10, 128))) # Verts features have the wrong number of verts with self.assertRaisesRegex(ValueError, "do not match the dimensions"): Meshes(verts=verts, faces=faces, textures=tex) # Verts features have the wrong batch dim tex = TexturesVertex(verts_features=torch.randn(size=(1, V, 128))) with self.assertRaisesRegex(ValueError, "do not match the dimensions"): Meshes(verts=verts, faces=faces, textures=tex) meshes = Meshes(verts=verts, faces=faces) meshes.textures = tex # Cannot use the texture attribute set on meshes for sampling # textures if the dimensions don't match with self.assertRaisesRegex(ValueError, "do not match the dimensions"): meshes.sample_textures(None)
def test_clone(self): tex = TexturesVertex(verts_features=torch.rand(size=(10, 100, 128))) tex.verts_features_list() tex_cloned = tex.clone() self.assertSeparate(tex._verts_features_padded, tex_cloned._verts_features_padded) self.assertClose(tex._verts_features_padded, tex_cloned._verts_features_padded) self.assertSeparate(tex.valid, tex_cloned.valid) self.assertTrue(tex.valid.eq(tex_cloned.valid).all()) for i in range(tex._N): self.assertSeparate(tex._verts_features_list[i], tex_cloned._verts_features_list[i]) self.assertClose(tex._verts_features_list[i], tex_cloned._verts_features_list[i])
def renderBatch(self, Rs, ts, ids=[]): if (type(Rs) is list): batch_R = torch.tensor(np.stack(Rs), device=self.device, dtype=torch.float32) else: batch_R = Rs if (type(ts) is list): batch_T = torch.tensor(np.stack(ts), device=self.device, dtype=torch.float32) # Bx3 else: batch_T = ts if (len(ids) == 0): # No ids specified, assuming one object only ids = [0 for r in Rs] # Load meshes based on object ids batch_verts_rgb = list_to_padded([self.textures[i] for i in ids]) batch_textures = TexturesVertex( verts_features=batch_verts_rgb.to(self.device)) batch_verts = [self.vertices[i].to(self.device) for i in ids] batch_faces = [self.faces[i].to(self.device) for i in ids] mesh = Meshes(verts=batch_verts, faces=batch_faces, textures=batch_textures) images = self.renderer(meshes_world=mesh, R=batch_R, T=batch_T) if (self.method == "soft-silhouette"): images = images[..., 3] elif (self.method == "hard-silhouette"): images = images[..., 3] elif (self.method == "hard-phong"): images = images[..., :3] elif (self.method == "soft-phong"): images = images[..., :3] elif (self.method == "soft-depth"): images = images #[..., 0] #torch.mean(images, dim=3) elif (self.method == "hard-depth"): images = images #torch.mean(images, dim=3) elif (self.method == "blurry-depth"): images = torch.mean(images, dim=3) return images
def test_submeshes(self): # define TexturesVertex verts_features = torch.tensor( [ [1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], ], dtype=torch.float32, ) textures = TexturesVertex( verts_features=[verts_features, verts_features, verts_features]) subtextures = textures.submeshes( [ [ torch.LongTensor([0, 2, 3]), torch.LongTensor(list(range(8))), ], [], [ torch.LongTensor([4]), ], ], None, ) subtextures_features = subtextures.verts_features_list() self.assertEqual(len(subtextures_features), 3) self.assertTrue( torch.equal( subtextures_features[0], torch.FloatTensor([[1, 0, 0], [1, 0, 0], [1, 0, 0]]), )) self.assertTrue(torch.equal(subtextures_features[1], verts_features)) self.assertTrue( torch.equal(subtextures_features[2], torch.FloatTensor([[0, 1, 0]])))
def test_getitem(self): N = 5 V = 20 source = {"verts_features": torch.randn(size=(N, V, 128))} tex = TexturesVertex(verts_features=source["verts_features"]) verts = torch.rand(size=(N, V, 3)) faces = torch.randint(size=(N, 10, 3), high=V) meshes = Meshes(verts=verts, faces=faces, textures=tex) tryindex(self, 2, tex, meshes, source) tryindex(self, slice(0, 2, 1), tex, meshes, source) index = torch.tensor([1, 0, 1, 0, 0], dtype=torch.bool) tryindex(self, index, tex, meshes, source) index = torch.tensor([0, 0, 0, 0, 0], dtype=torch.bool) tryindex(self, index, tex, meshes, source) index = torch.tensor([1, 2], dtype=torch.int64) tryindex(self, index, tex, meshes, source) tryindex(self, [2, 4], tex, meshes, source)
def test_faces_verts_textures(self): device = torch.device("cuda:0") verts = torch.randn((2, 4, 3), dtype=torch.float32, device=device) faces = torch.tensor( [[[2, 1, 0], [3, 1, 0]], [[1, 3, 0], [2, 1, 3]]], dtype=torch.int64, device=device, ) # define TexturesVertex verts_texture = torch.rand(verts.shape) textures = TexturesVertex(verts_features=verts_texture) # compute packed faces ff = faces.unbind(0) faces_packed = torch.cat([ff[0], ff[1] + verts.shape[1]]) # face verts textures faces_verts_texts = textures.faces_verts_textures_packed(faces_packed) verts_texts_packed = torch.cat(verts_texture.unbind(0)) faces_verts_texts_packed = verts_texts_packed[faces_packed] self.assertClose(faces_verts_texts_packed, faces_verts_texts)
return loss, image # Set the cuda device device = torch.device("cuda:0") torch.cuda.set_device(device) # Load the obj and ignore the textures and materials. #verts, faces_idx, _ = load_obj("../data/cad-models/teapot.obj") verts, faces_idx, _ = load_obj( "../data/ikea-mug/cad/ikea_mug_scaled_reduced_centered.obj") faces = faces_idx.verts_idx # Initialize each vertex to be white in color. verts_rgb = torch.ones_like(verts)[None] # (1, V, 3) textures = TexturesVertex(verts_features=verts_rgb.to(device)) # Create a Meshes object for the teapot. Here we have only one mesh in the batch. teapot_mesh = Meshes(verts=[verts.to(device)], faces=[faces.to(device)], textures=textures) # Initialize an OpenGL perspective camera. cameras = OpenGLPerspectiveCameras(device=device) # To blend the 100 faces we set a few parameters which control the opacity and the sharpness of # edges. Refer to blending.py for more details. blend_params = BlendParams(sigma=1e-4, gamma=1e-4) # Define the settings for rasterization and shading. Here we set the output image to be of size # 256x256. To form the blended image we use 100 faces for each pixel. Refer to rasterize_meshes.py
def test_clone(self): tex = TexturesVertex(verts_features=torch.rand(size=(10, 100, 128))) tex_cloned = tex.clone() self.assertSeparate(tex._verts_features_padded, tex_cloned._verts_features_padded) self.assertSeparate(tex.valid, tex_cloned.valid)
def render_img(face_shape, face_color, facemodel, image_size=224, fx=1015.0, fy=1015.0, px=112.0, py=112.0, device='cuda:0'): ''' ref: https://github.com/facebookresearch/pytorch3d/issues/184 The rendering function (just for test) Input: face_shape: Tensor[1, 35709, 3] face_color: Tensor[1, 35709, 3] in [0, 1] facemodel: contains `tri` (triangles[70789, 3], index start from 1) ''' from pytorch3d.structures import Meshes from pytorch3d.renderer.mesh.textures import TexturesVertex from pytorch3d.renderer import (PerspectiveCameras, PointLights, RasterizationSettings, MeshRenderer, MeshRasterizer, SoftPhongShader, BlendParams) face_color = TexturesVertex(verts_features=face_color.to(device)) face_buf = torch.from_numpy(facemodel.tri - 1) # index start from 1 face_idx = face_buf.unsqueeze(0) mesh = Meshes(face_shape.to(device), face_idx.to(device), face_color) R = torch.eye(3).view(1, 3, 3).to(device) R[0, 0, 0] *= -1.0 T = torch.zeros([1, 3]).to(device) half_size = (image_size - 1.0) / 2 focal_length = torch.tensor([fx / half_size, fy / half_size], dtype=torch.float32).reshape(1, 2).to(device) principal_point = torch.tensor([(half_size - px) / half_size, (py - half_size) / half_size], dtype=torch.float32).reshape(1, 2).to(device) cameras = PerspectiveCameras(device=device, R=R, T=T, focal_length=focal_length, principal_point=principal_point) raster_settings = RasterizationSettings(image_size=image_size, blur_radius=0.0, faces_per_pixel=1) lights = PointLights(device=device, ambient_color=((1.0, 1.0, 1.0), ), diffuse_color=((0.0, 0.0, 0.0), ), specular_color=((0.0, 0.0, 0.0), ), location=((0.0, 0.0, 1e5), )) blend_params = BlendParams(background_color=(0.0, 0.0, 0.0)) renderer = MeshRenderer(rasterizer=MeshRasterizer( cameras=cameras, raster_settings=raster_settings), shader=SoftPhongShader(device=device, cameras=cameras, lights=lights, blend_params=blend_params)) images = renderer(mesh) images = torch.clamp(images, 0.0, 1.0) return images