def test_copyright(self): root_dir = get_pytorch3d_dir() extensions = ("py", "cu", "cuh", "cpp", "h", "hpp", "sh") expect = "Copyright (c) Facebook, Inc. and its affiliates.\n" files_missing_copyright_header = [] for extension in extensions: for path in root_dir.glob(f"**/*.{extension}"): excluded_files = ( "pytorch3d/transforms/external/kornia_angle_axis_to_rotation_matrix.py", "pytorch3d/csrc/pulsar/include/fastermath.h", ) if in_conda_build: excluded_files += ( "run_test.py", "run_test.sh", "conda_test_runner.sh", "conda_test_env_vars.sh", ) if str(path).endswith(excluded_files): continue with open(path) as f: firstline = f.readline() if firstline.startswith(("# -*-", "#!", "/*")): firstline = f.readline() if not firstline.endswith(expect): files_missing_copyright_header.append(str(path)) if len(files_missing_copyright_header) != 0: self.fail("\n".join(files_missing_copyright_header))
def test_texture_sampling_cow(self): # test texture sampling for the cow example by converting # the cow mesh and its texture uv to a pointcloud with texture device = torch.device("cuda:0") obj_dir = get_pytorch3d_dir() / "docs/tutorials/data" obj_filename = obj_dir / "cow_mesh/cow.obj" for text_type in ("uv", "atlas"): # Load mesh + texture if text_type == "uv": mesh = load_objs_as_meshes( [obj_filename], device=device, load_textures=True, texture_wrap=None ) elif text_type == "atlas": mesh = load_objs_as_meshes( [obj_filename], device=device, load_textures=True, create_texture_atlas=True, texture_atlas_size=8, texture_wrap=None, ) points, normals, textures = sample_points_from_meshes( mesh, num_samples=50000, return_normals=True, return_textures=True ) pointclouds = Pointclouds(points, normals=normals, features=textures) for pos in ("front", "back"): # Init rasterizer settings if pos == "back": azim = 0.0 elif pos == "front": azim = 180 R, T = look_at_view_transform(2.7, 0, azim) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = PointsRasterizationSettings( image_size=512, radius=1e-2, points_per_pixel=1 ) rasterizer = PointsRasterizer( cameras=cameras, raster_settings=raster_settings ) compositor = NormWeightedCompositor() renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor) images = renderer(pointclouds) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_cow_mesh_to_pointcloud_%s_%s.png" % ( text_type, pos, ) Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename )
def test_valid_ipynbs(self): # Check that the ipython notebooks are valid json root_dir = get_pytorch3d_dir() tutorials_dir = root_dir / "docs" / "tutorials" tutorials = sorted(tutorials_dir.glob("*.ipynb")) for tutorial in tutorials: with open(tutorial) as f: json.load(f)
def test_name_clash(self): # For setup.py, all translation units need distinct names, so we # cannot have foo.cu and foo.cpp, even in different directories. source_dir = get_pytorch3d_dir() / "pytorch3d" stems = [] for extension in [".cu", ".cpp"]: files = source_dir.glob(f"**/*{extension}") stems.extend(f.stem for f in files) counter = Counter(stems) for k, v in counter.items(): self.assertEqual(v, 1, f"Too many files with stem {k}.")
def test_enumerated_notes(self): # Check that the notes are all referenced in sidebars.json. root_dir = get_pytorch3d_dir() notes_dir = root_dir / "docs" / "notes" notes_on_disk = sorted(i.stem for i in notes_dir.glob("*.md")) json_file = root_dir / "website" / "sidebars.json" with open(json_file) as f: cfg_dict = json.load(f) listed_in_json = [] for section in cfg_dict["docs"].values(): listed_in_json.extend(section) self.assertListEqual(sorted(listed_in_json), notes_on_disk)
def test_enumerated_ipynbs(self): # Check that the tutorials are all referenced in tutorials.json. root_dir = get_pytorch3d_dir() tutorials_dir = root_dir / "docs" / "tutorials" tutorials_on_disk = sorted(i.stem for i in tutorials_dir.glob("*.ipynb")) json_file = root_dir / "website" / "tutorials.json" with open(json_file) as f: cfg_dict = json.load(f) listed_in_json = [] for section in cfg_dict.values(): listed_in_json.extend(item["id"] for item in section) self.assertListEqual(sorted(listed_in_json), tutorials_on_disk)
def test_no_import_cycles(self): # Check each module of pytorch3d imports cleanly, # which may fail if there are import cycles. with unittest.mock.patch.dict(sys.modules): for module in list(sys.modules): # If any of pytorch3d is already imported, # the test would be pointless. if module.startswith("pytorch3d"): sys.modules.pop(module, None) root_dir = get_pytorch3d_dir() / "pytorch3d" for module_file in root_dir.glob("**/*.py"): if module_file.stem in ("__init__", "plotly_vis"): continue relative_module = str(module_file.relative_to(root_dir))[:-3] module = "pytorch3d." + relative_module.replace("/", ".") with self.subTest(name=module): with unittest.mock.patch.dict(sys.modules): importlib.import_module(module)
def test_render_cow(self): """ Test a larger textured mesh is rendered correctly in a non square image. """ device = torch.device("cuda:0") obj_dir = get_pytorch3d_dir() / "docs/tutorials/data" obj_filename = obj_dir / "cow_mesh/cow.obj" # Load mesh + texture verts, faces, aux = load_obj(obj_filename, device=device, load_textures=True, texture_wrap=None) tex_map = list(aux.texture_images.values())[0] tex_map = tex_map[None, ...].to(faces.textures_idx.device) textures = TexturesUV(maps=tex_map, faces_uvs=[faces.textures_idx], verts_uvs=[aux.verts_uvs]) mesh = Meshes(verts=[verts], faces=[faces.verts_idx], textures=textures) # Init rasterizer settings R, T = look_at_view_transform(2.7, 0, 180) cameras = FoVPerspectiveCameras(device=device, R=R, T=T) raster_settings = RasterizationSettings(image_size=(512, 1024), blur_radius=0.0, faces_per_pixel=1) # Init shader settings materials = Materials(device=device) lights = PointLights(device=device) lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None] blend_params = BlendParams( sigma=1e-1, gamma=1e-4, background_color=torch.tensor([1.0, 1.0, 1.0], device=device), ) # Init renderer renderer = MeshRenderer( rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings), shader=SoftPhongShader( lights=lights, cameras=cameras, materials=materials, blend_params=blend_params, ), ) # Load reference image image_ref = load_rgb_image("test_cow_image_rectangle.png", DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer(mesh) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save( DATA_DIR / "DEBUG_cow_image_rectangle.png") # NOTE some pixels can be flaky cond1 = torch.allclose(rgb, image_ref, atol=0.05) self.assertTrue(cond1)
def test_pointcloud_with_features(self): device = torch.device("cuda:0") file_dir = get_pytorch3d_dir() / "docs/tutorials/data" pointcloud_filename = file_dir / "PittsburghBridge/pointcloud.npz" # Note, this file is too large to check in to the repo. # Download the file to run the test locally. if not path.exists(pointcloud_filename): url = ( "https://dl.fbaipublicfiles.com/pytorch3d/data/" "PittsburghBridge/pointcloud.npz" ) msg = ( "pointcloud.npz not found, download from %s, save it at the path %s, and rerun" % (url, pointcloud_filename) ) warnings.warn(msg) return True # Load point cloud pointcloud = np.load(pointcloud_filename) verts = torch.Tensor(pointcloud["verts"]).to(device) rgb_feats = torch.Tensor(pointcloud["rgb"]).to(device) verts.requires_grad = True rgb_feats.requires_grad = True point_cloud = Pointclouds(points=[verts], features=[rgb_feats]) R, T = look_at_view_transform(20, 10, 0) cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01) raster_settings = PointsRasterizationSettings( # Set image_size so it is not a multiple of 16 (min bin_size) # in order to confirm that there are no errors in coarse rasterization. image_size=500, radius=0.003, points_per_pixel=10, ) renderer = PointsRenderer( rasterizer=PointsRasterizer( cameras=cameras, raster_settings=raster_settings ), compositor=AlphaCompositor(), ) images = renderer(point_cloud) # Load reference image filename = "bridge_pointcloud.png" image_ref = load_rgb_image("test_%s" % filename, DATA_DIR) for bin_size in [0, None]: # Check both naive and coarse to fine produce the same output. renderer.rasterizer.raster_settings.bin_size = bin_size images = renderer(point_cloud) rgb = images[0, ..., :3].squeeze().cpu() if DEBUG: filename = "DEBUG_%s" % filename Image.fromarray((rgb.detach().numpy() * 255).astype(np.uint8)).save( DATA_DIR / filename ) self.assertClose(rgb, image_ref, atol=0.015) # Check grad exists. grad_images = torch.randn_like(images) images.backward(grad_images) self.assertIsNotNone(verts.grad) self.assertIsNotNone(rgb_feats.grad)
import torch from common_testing import TestCaseMixin, get_pytorch3d_dir, get_tests_dir from iopath.common.file_io import PathManager from pytorch3d.io import IO, load_obj, load_objs_as_meshes, save_obj from pytorch3d.io.mtl_io import ( _bilinear_interpolation_grid_sample, _bilinear_interpolation_vectorized, _parse_mtl, ) from pytorch3d.renderer import TexturesAtlas, TexturesUV, TexturesVertex from pytorch3d.structures import Meshes, join_meshes_as_batch from pytorch3d.utils import torus DATA_DIR = get_tests_dir() / "data" TUTORIAL_DATA_DIR = get_pytorch3d_dir() / "docs/tutorials/data" class TestMeshObjIO(TestCaseMixin, unittest.TestCase): def test_load_obj_simple(self): obj_file = "\n".join([ "# this is a comment", # Comments should be ignored. "v 0.1 0.2 0.3", "v 0.2 0.3 0.4", "v 0.3 0.4 0.5", "v 0.4 0.5 0.6", # some obj files have multiple spaces after v "f 1 2 3", "f 1 2 4 3 1", # Polygons should be split into triangles ]) obj_file = StringIO(obj_file) verts, faces, aux = load_obj(obj_file)