def test_load_obj(device='cpu'): mesh = TriangleMesh.from_obj(('tests/model.obj')) if device == 'cuda': mesh.cuda() assert mesh.vertices.shape[0] > 0 assert mesh.vertices.shape[1] == 3 assert mesh.faces.shape[0] > 0 assert mesh.faces.shape[1] == 3 mesh = TriangleMesh.from_obj('tests/model.obj', with_vt=True, texture_res=4) if device == 'cuda': mesh.cuda() assert mesh.textures.shape[0] > 0 mesh = TriangleMesh.from_obj('tests/model.obj', with_vt=True, texture_res=4, enable_adjacency=True) assert mesh.vv.shape[0] > 0 assert mesh.edges.shape[0] > 0 assert mesh.vv_count.shape[0] > 0 assert mesh.ve.shape[0] > 0 assert mesh.ve_count.shape[0] > 0 assert mesh.ff.shape[0] > 0 assert mesh.ff_count.shape[0] > 0 assert mesh.ef.shape[0] > 0 assert mesh.ef_count.shape[0] > 0 assert mesh.ee.shape[0] > 0 assert mesh.ee_count.shape[0] > 0 if device == 'cuda': mesh.cuda()
def test_laplacian_loss(device='cpu'): mesh1 = TriangleMesh.from_obj('tests/model.obj') mesh2 = TriangleMesh.from_obj('tests/model.obj') if device == 'cuda': mesh1.cuda() mesh2.cuda() mesh2.vertices = mesh2.vertices * 1.5 assert kal.metrics.mesh.laplacian_loss(mesh1, mesh2) > 0 assert kal.metrics.mesh.laplacian_loss(mesh1, mesh1) == 0
def test_chamfer_distance(device='cpu'): mesh1 = TriangleMesh.from_obj('tests/model.obj') mesh2 = TriangleMesh.from_obj('tests/model.obj') if device == 'cuda': mesh1.cuda() mesh2.cuda() mesh2.vertices = mesh2.vertices * 1.5 distance = kal.metrics.mesh.chamfer_distance(mesh1, mesh2, num_points=100) distance = kal.metrics.mesh.chamfer_distance(mesh1, mesh2, num_points=200) assert kal.metrics.mesh.chamfer_distance(mesh1, mesh1, num_points=500) <= 0.1
def test_compute_laplacian(device='cpu'): mesh = TriangleMesh.from_obj(('tests/model.obj')) if device == 'cuda': mesh.cuda() lap = mesh.compute_laplacian() assert ((lap**2).sum(dim=1) > .1).sum() == 0 # asserting laplacian of sphere is small
def test_check_sign_fast(device='cuda'): mesh = TriangleMesh.from_obj('tests/model.obj') mesh.to(device) points = torch.rand(1000, 3).to(device) - .5 signs = kal.rep.SDF.check_sign_fast(mesh, points) assert (signs == True).float().sum() > 0 assert (signs == False).sum() > 0
def __init__(self, mesh_path, image_path, args): super(Model, self).__init__() self.args = args ########################### # Load mesh ########################### mesh = TriangleMesh.from_obj(mesh_path) mesh.cuda() # Normalize into unit cube, and expand such that batch size = 1 vertices = normalize_vertices(mesh.vertices).unsqueeze(0) faces = mesh.faces.unsqueeze(0) self.register_buffer('vertices', vertices) self.register_buffer('faces', faces) ########################### # Initialize texture (NMR format) ########################### textures = torch.ones(1, self.faces.shape[1], self.args.texture_size, self.args.texture_size, self.args.texture_size, 3, dtype=torch.float32, device='cuda') self.register_buffer('textures', textures) ########################### # Load target image ########################### image_ref = torch.from_numpy( (imread(image_path)[:, :, :3].max(-1) > 0.1).astype( np.float32))[None, ::] self.register_buffer('image_ref', image_ref) from skimage.io import imsave imsave(image_path + '.test.png', image_ref.numpy().transpose((1, 2, 0))) ########################### # Initialize camera position ########################### self.camera_position = nn.Parameter( torch.from_numpy(np.array(INITIAL_CAMERA_POS, dtype=np.float32))) ########################### # Setup renderer ########################### renderer = Renderer(camera_mode='look_at') renderer.eye = self.camera_position self.renderer = renderer
def test_pointcloud_to_voxelgrid(device='cpu'): mesh = TriangleMesh.from_obj('tests/model.obj') if device == 'cuda': mesh.cuda() pts, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 1000) voxels = kal.conversions.pointcloud_to_voxelgrid(pts, 32, 0.1) assert (voxels.shape == (32, 32, 32))
def _load_points(obj_path): mesh = TriangleMesh.from_obj(obj_path) mesh.vertices -= torch.mean(mesh.vertices, 0) mesh.vertices /= 128 mesh.vertices = mesh.vertices[:, [0, 2, 1]] mesh.vertices[:, 0] *= -1 mesh.vertices /= 1.7 return mesh.sample(2048)[0]
def load_cuboid(): obj_path = os.path.dirname(__file__) + '/objects/sphere.obj' mesh = TriangleMesh.from_obj(obj_path) mesh.vertices -= torch.mean(mesh.vertices, 0) # zero center mesh.vertices /= torch.mean(torch.norm(mesh.vertices, dim=1)) # normalize length mesh.to(DEVICE) return mesh
def test_laplacian_smoothing(device='cpu'): mesh = TriangleMesh.from_obj(('tests/model.obj')) if device == 'cuda': mesh.cuda() v1 = mesh.vertices.clone() mesh.laplacian_smoothing(iterations=3) v2 = mesh.vertices.clone() assert (torch.abs(v1 - v2)).sum() > 0
def test_sample_mesh(device='cpu'): mesh = TriangleMesh.from_obj('tests/model.obj') if device == 'cuda': mesh.cuda() points, choices = mesh.sample(100) assert (set(points.shape) == set([100, 3])) points, choices = mesh.sample(10000) assert (set(points.shape) == set([10000, 3]))
def test_to_trianglemesh_to_pointcloud(device): mesh = TriangleMesh.from_obj('tests/model.obj') if device == 'cuda': mesh.cuda() points, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 10) assert (set(points.shape) == set([10, 3])) points, _ = kal.conversions.trianglemesh_to_pointcloud(mesh, 10000) assert (set(points.shape) == set([10000, 3]))
def test_edge_length(device='cpu'): mesh = TriangleMesh.from_obj('tests/model.obj') if device == 'cuda': mesh.cuda() length1 = kal.metrics.mesh.edge_length(mesh) mesh.vertices = mesh.vertices * 2 length2 = kal.metrics.mesh.edge_length(mesh) assert (length1 < length2) mesh.vertices = mesh.vertices * 0 assert kal.metrics.mesh.edge_length(mesh) == 0
def test_adj_computations(device='cpu'): mesh = TriangleMesh.from_obj(('tests/model.obj')) if device == 'cuda': mesh.cuda() adj_full = mesh.compute_adjacency_matrix_full() adj_sparse = mesh.compute_adjacency_matrix_sparse().coalesce() assert adj_full.shape[0] == mesh.vertices.shape[0] assert ((adj_full - adj_sparse.to_dense()) != 0).sum() == 0
def test_trianglemesh_to_sdf(device): mesh = TriangleMesh.from_obj('tests/model.obj') if device == 'cuda': mesh.cuda() print(mesh.device) sdf = kal.conversions.trianglemesh_to_sdf(mesh) distances = sdf(torch.rand(100, 3).to(device) - .5) assert (set(distances.shape) == set([100])) assert ((distances > 0).sum()) > 0 assert ((distances < 0).sum()) > 0
def main(): args = parse_arguments() ########################### # Load mesh ########################### mesh = TriangleMesh.from_obj(args.mesh) vertices = mesh.vertices faces = mesh.faces.int() # Expand such that batch size = 1 vertices = vertices[None, :, :].cuda() faces = faces[None, :, :].cuda() ########################### # Normalize mesh position ########################### vertices_max = vertices.max() vertices_min = vertices.min() vertices_middle = (vertices_max + vertices_min) / 2. vertices = (vertices - vertices_middle) * MESH_SIZE ########################### # Generate vertex color ########################### vert_min = torch.min(vertices, dim=1, keepdims=True)[0] vert_max = torch.max(vertices, dim=1, keepdims=True)[0] colors = (vertices - vert_min) / (vert_max - vert_min) ########################### # Render ########################### renderer = Renderer(HEIGHT, WIDTH, mode='VertexColor') loop = tqdm.tqdm(list(range(0, 360, 4))) loop.set_description('Drawing') os.makedirs(args.output_path, exist_ok=True) writer = imageio.get_writer(os.path.join(args.output_path, 'example.gif'), mode='I') for azimuth in loop: renderer.set_look_at_parameters([90 - azimuth], [CAMERA_ELEVATION], [CAMERA_DISTANCE]) predictions, _, _ = renderer(points=[vertices, faces[0].long()], colors=[colors]) image = predictions.detach().cpu().numpy()[0] writer.append_data((image * 255).astype(np.uint8)) writer.close()
def test_trianglemesh_to_voxelgrid(device): mesh = TriangleMesh.from_obj('tests/model.obj') if device == 'cuda': mesh.cuda() voxels = kal.conversions.trianglemesh_to_voxelgrid(mesh, 32, normalize='unit') assert (set(voxels.shape) == set([32, 32, 32])) voxels = kal.conversions.trianglemesh_to_voxelgrid(mesh, 64, normalize='unit') assert (set(voxels.shape) == set([64, 64, 64]))
def test_load_and_save_Tensors(device='cpu'): mesh1 = TriangleMesh.from_obj(('tests/model.obj')) if device == 'cuda': mesh1.cuda() mesh1.save_tensors('copy.npz') assert os.path.isfile('copy.npz') mesh2 = TriangleMesh.load_tensors('copy.npz') if device == 'cuda': mesh2.cuda() assert (torch.abs(mesh1.vertices - mesh2.vertices)).sum() == 0 assert (torch.abs(mesh1.faces - mesh2.faces)).sum() == 0 os.remove("copy.npz")
def main(): filename_input = os.path.join(data_dir, 'banana.obj') filename_output = os.path.join(output_directory, 'example1.gif') ########################### # camera settings ########################### camera_distance = 2 elevation = 30 ########################### # load object ########################### mesh = TriangleMesh.from_obj(filename_input) vertices = mesh.vertices faces = mesh.faces.int() face_textures = (faces).clone() vertices = vertices[None, :, :].cuda() faces = faces[None, :, :].cuda() face_textures[None, :, :].cuda() ########################### # normalize verts ########################### vertices_max = vertices.max() vertices_min = vertices.min() vertices_middle = (vertices_max + vertices_min) / 2. vertices = vertices - vertices_middle coef = 5 vertices = vertices * coef ########################### # Soft Rasterizer ########################### textures = torch.ones(1, faces.shape[1], 2, 3, dtype=torch.float32).cuda() mesh = sr.Mesh(vertices, faces, textures) renderer = sr.SoftRenderer(camera_mode='look_at') loop = tqdm.tqdm(list(range(0, 360, 4))) loop.set_description('Drawing SR') writer = imageio.get_writer(os.path.join(output_directory_sr, 'rotation.gif'), mode='I') for azimuth in loop: mesh.reset_() renderer.transform.set_eyes_from_angles(camera_distance, elevation, azimuth) images = renderer.render_mesh(mesh) image = images.detach().cpu().numpy()[0].transpose((1, 2, 0)) writer.append_data((255 * image).astype(np.uint8)) writer.close()
def main(): filename_input = os.path.join(data_dir, 'banana.obj') filename_output = os.path.join(output_directory, 'example1.gif') ########################### # camera settings ########################### camera_distance = 2 elevation = 30 ########################### # load object ########################### mesh = TriangleMesh.from_obj(filename_input) vertices = mesh.vertices faces = mesh.faces.int() face_textures = (faces).clone() vertices = vertices[None, :, :].cuda() faces = faces[None, :, :].cuda() face_textures[None, :, :].cuda() ########################### # normalize verts ########################### vertices_max = vertices.max() vertices_min = vertices.min() vertices_middle = (vertices_max + vertices_min) / 2. vertices = vertices - vertices_middle coef = 5 vertices = vertices * coef ########################### # DIB-Renderer ########################### renderer = Dib_Renderer(256, 256, mode='VertexColor') textures = torch.ones(1, vertices.shape[1], 3).cuda() loop = tqdm.tqdm(list(range(0, 360, 4))) loop.set_description('Drawing Dib_Renderer VertexColor') writer = imageio.get_writer(os.path.join(output_directory_dib, 'rotation_VertexColor.gif'), mode='I') for azimuth in loop: renderer.set_look_at_parameters([90 - azimuth], [elevation], [camera_distance]) predictions, _, _ = renderer.forward( points=[vertices, faces[0].long()], colors=[textures]) image = predictions.detach().cpu().numpy()[0] writer.append_data((image * 255).astype(np.uint8)) writer.close()
def test_point_to_surface(device='cpu'): torch.manual_seed(1) torch.cuda.manual_seed(1) mesh = TriangleMesh.from_obj('tests/model.obj') points = torch.rand(500, 3) - .5 if device == 'cuda': mesh.cuda() points = points.cuda() distance = kal.metrics.mesh.point_to_surface(points, mesh) assert (distance > 1).sum() == 0 assert (distance <= 0).sum() == 0 assert (distance.sum() <= .2)
def main(): args = parse_arguments() ########################### # Load mesh ########################### mesh = TriangleMesh.from_obj(args.mesh) mesh.cuda() # Normalize into unit cube, and expand such that batch size = 1 vertices = normalize_vertices(mesh.vertices).unsqueeze(0) faces = mesh.faces.unsqueeze(0) ########################### # Generate texture (NMR format) ########################### textures = torch.ones(1, faces.shape[1], args.texture_size, args.texture_size, args.texture_size, 3, dtype=torch.float32, device='cuda') ########################### # Render ########################### renderer = Renderer(camera_mode='look_at') loop = tqdm.tqdm(range(0, 360, 4)) loop.set_description('Drawing') os.makedirs(args.output_path, exist_ok=True) writer = imageio.get_writer(os.path.join(args.output_path, 'example1.gif'), mode='I') for azimuth in loop: renderer.eye = get_points_from_angles(args.camera_distance, args.elevation, azimuth) images, _, _ = renderer(vertices, faces, textures) image = images.detach()[0].permute( 1, 2, 0).cpu().numpy() # [image_size, image_size, RGB] writer.append_data((255 * image).astype(np.uint8)) writer.close()
def __getitem__(self, idx): """Returns the sample at index idx. """ # Read in the list of vertices and faces # from the obj file. obj_location = self.paths[idx] mesh = TriangleMesh.from_obj(obj_location) category = self.categories[idx] # Return these tensors as a dictionary. data = dict() attributes = dict() data['vertices'] = mesh.vertices data['faces'] = mesh.faces attributes['rep'] = 'Mesh' attributes['name'] = obj_location attributes['class']: cagetory return {'attributes': attributes, 'data': data}
def __init__(self, mesh_path, image_path, args): super(Model, self).__init__() self.args = args ########################### # Load mesh ########################### mesh = TriangleMesh.from_obj(mesh_path) mesh.cuda() # Normalize into unit cube, and expand such that batch size = 1 vertices = normalize_vertices(mesh.vertices).unsqueeze(0) faces = mesh.faces.unsqueeze(0) self.register_buffer('vertices', vertices) self.register_buffer('faces', faces) ########################### # Initialize texture (NMR format) ########################### textures = torch.zeros( 1, self.faces.shape[1], self.args.texture_size, self.args.texture_size, self.args.texture_size, 3, dtype=torch.float32, device='cuda' ) self.textures = nn.Parameter(textures) ########################### # Load target image ########################### image_ref = torch.from_numpy(imread(image_path).astype( 'float32') / 255.).permute(2, 0, 1)[:3, ...][None, ::] self.register_buffer('image_ref', image_ref) ########################### # Setup renderer ########################### renderer = Renderer(camera_mode='look_at') # renderer.perspective = False renderer.light_intensity_directional = 0.0 renderer.light_intensity_ambient = 1.0 self.renderer = renderer
def test_check_sign(device = 'cpu'): mesh = TriangleMesh.from_obj(('tests/model.obj') ) if device == 'cuda': mesh.cuda() points = torch.rand((1000,3)) -.5 signs = kal.rep.SDF.check_sign(mesh, points) assert (signs == True).sum() > 0 assert (signs == False).sum() > 0 points = (torch.rand((1000,3)) -.5) * .001 signs = kal.rep.SDF.check_sign(mesh, points) assert (signs == False).sum() == 0 points = torch.rand((1000,3)) +10 signs = kal.rep.SDF.check_sign(mesh, points) assert (signs == True).sum() == 0
def generate_acd_dataset(dataset_path: str, data_type='train'): dataset = ShapeNetDataset(data_type) n = 0 obj_paths = [] for data in tqdm(dataset.shapenet_datas): if data.canonical_obj_path in obj_paths: continue obj_paths.append(data.canonical_obj_path) try: mesh = TriangleMesh.from_obj(data.canonical_obj_path) convex_hulls = get_trimesh_from_kaolinmesh( mesh).convex_decomposition(6) except Exception as e: print('[ACD Exception] obj path = %s, %s' % (data.canonical_obj_path, str(e))) continue try: if len(convex_hulls) != 6: print('convex hull num != 6, obj path =', data.canonical_obj_path) except Exception as e: print('[Hull Num Exception] obj path = %s, %s' % (data.canonical_obj_path, str(e))) continue vertices, faces = [], [] for convex_hull in convex_hulls: vertices.append(np.array(convex_hull.vertices).tolist()) faces.append(np.array(convex_hull.faces).tolist()) json_data = json.dumps({ 'vertices': vertices, 'faces': faces, 'obj': data.canonical_obj_path }) with open(os.path.join(dataset_path, 'mesh_%.6d.json' % n), 'w') as f: f.write(json_data) f.close() n += 1
def test_from_tensors(device='cpu'): mesh = TriangleMesh.from_obj('tests/model.obj', with_vt=True, texture_res=4) if device == 'cuda': mesh.cuda() verts = mesh.vertices.clone() faces = mesh.faces.clone() uvs = mesh.uvs.clone() face_textures = mesh.face_textures.clone() textures = mesh.textures.clone() mesh = TriangleMesh.from_tensors(verts, faces, uvs=uvs, face_textures=face_textures, textures=textures)
def main(): filename_input = os.path.join(data_dir, 'banana.obj') filename_output = os.path.join(output_directory, 'example1.gif') ########################### # camera settings ########################### camera_distance = 2 elevation = 30 ########################### # load object ########################### mesh = TriangleMesh.from_obj(filename_input) vertices = mesh.vertices faces = mesh.faces.int() face_textures = (faces).clone() vertices = vertices[None, :, :].cuda() faces = faces[None, :, :].cuda() face_textures[None, :, :].cuda() ########################### # normalize verts ########################### vertices_max = vertices.max() vertices_min = vertices.min() vertices_middle = (vertices_max + vertices_min) / 2. vertices = vertices - vertices_middle coef = 5 vertices = vertices * coef ########################### # NMR ########################### textures = torch.ones(1, faces.shape[1], 2, 2, 2, 3, dtype=torch.float32).cuda() renderer = nr.Renderer(camera_mode='look_at') # loop = tqdm.tqdm(list(range(0, 360, 4))) # loop.set_description('Drawing NMR') # writer = imageio.get_writer(os.path.join(output_directory_nmr, 'rotation.gif'), mode='I') renderer.eye = nr.get_points_from_angles(camera_distance, elevation, 0) images, _, _ = renderer(vertices, faces, textures)
def __getitem__(self, item) -> dict: rgb_path = self.rgb_paths[item] silhouette_path = self.silhouette_paths[item] obj_path = self.obj_paths[item] rgb = img_transform(Image.open(rgb_path)) silhouette = img_transform(Image.open(silhouette_path)) points = TriangleMesh.from_obj(obj_path).sample(2048)[0] angle = 0.0 if AUGMENT_3D['rotate']: rgb, silhouette, angle = self.rotate_img(rgb, silhouette) return { 'rgb': rgb, 'silhouette': silhouette, 'points': points, 'angle': angle, }
def __init__(self, mesh_path, image_path, args): super(Model, self).__init__() self.args = args ########################### # Load mesh ########################### mesh = TriangleMesh.from_obj(mesh_path) mesh.cuda() # Normalize into unit cube, and expand such that batch size = 1 vertices = normalize_vertices(mesh.vertices).unsqueeze(0) faces = mesh.faces.unsqueeze(0) self.vertices = nn.Parameter(vertices) self.register_buffer('faces', faces) ########################### # Generate texture (NMR format) ########################### textures = torch.ones( 1, self.faces.shape[1], self.args.texture_size, self.args.texture_size, self.args.texture_size, 3, dtype=torch.float32, device='cuda' ) self.register_buffer('textures', textures) ########################### # Load target image ########################### image_ref = torch.from_numpy(imread(image_path).astype( np.float32).mean(-1) / 255.)[None, ::] self.register_buffer('image_ref', image_ref) ########################### # Setup renderer ########################### renderer = Renderer(camera_mode='look_at') self.renderer = renderer