コード例 #1
0
    def test_texture_sampling_cow(self):
        # test texture sampling for the cow example by converting
        # the cow mesh and its texture uv to a pointcloud with texture

        device = torch.device("cuda:0")
        obj_dir = get_pytorch3d_dir() / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        for text_type in ("uv", "atlas"):
            # Load mesh + texture
            if text_type == "uv":
                mesh = load_objs_as_meshes(
                    [obj_filename], device=device, load_textures=True, texture_wrap=None
                )
            elif text_type == "atlas":
                mesh = load_objs_as_meshes(
                    [obj_filename],
                    device=device,
                    load_textures=True,
                    create_texture_atlas=True,
                    texture_atlas_size=8,
                    texture_wrap=None,
                )

            points, normals, textures = sample_points_from_meshes(
                mesh, num_samples=50000, return_normals=True, return_textures=True
            )
            pointclouds = Pointclouds(points, normals=normals, features=textures)

            for pos in ("front", "back"):
                # Init rasterizer settings
                if pos == "back":
                    azim = 0.0
                elif pos == "front":
                    azim = 180
                R, T = look_at_view_transform(2.7, 0, azim)
                cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

                raster_settings = PointsRasterizationSettings(
                    image_size=512, radius=1e-2, points_per_pixel=1
                )

                rasterizer = PointsRasterizer(
                    cameras=cameras, raster_settings=raster_settings
                )
                compositor = NormWeightedCompositor()
                renderer = PointsRenderer(rasterizer=rasterizer, compositor=compositor)
                images = renderer(pointclouds)

                rgb = images[0, ..., :3].squeeze().cpu()
                if DEBUG:
                    filename = "DEBUG_cow_mesh_to_pointcloud_%s_%s.png" % (
                        text_type,
                        pos,
                    )
                    Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                        DATA_DIR / filename
                    )
コード例 #2
0
    def __init__(self, settings, mode="test", obj_class=None):
        super().__init__(settings)
        self.mode = mode
        # 1. FILELIST
        with open(self.settings.FILELIST[mode], "r") as f:
            self.list = [line.replace("\n", "") for line in f]

        # 2. MESH
        meshlist = [
            os.path.join(self.settings.DATA_ROOT, "models", m, "textured.obj")
            for m in self.settings.MODEL_DICT.keys()
        ]
        self.mesh = load_objs_as_meshes(meshlist)

        self.obj_class = obj_class
        # 3. POINT USED FOR ICP
        pointlist = [
            os.path.join(self.settings.DATA_ROOT, "models", m, "points.xyz")
            for m in self.settings.MODEL_DICT.keys()
        ]
        point = [
            torch.from_numpy(np.loadtxt(f, dtype=float))[:settings.NUM_POINTS]
            for f in pointlist
        ]
        self.point = point
コード例 #3
0
def load_ShapeNet_pointclouds(data_path, split_path, codes_path, this_device):
    codes = torch.load(codes_path)
    #print(codes['latent_codes']['weight'].shape)
    split = json.load(open(split_path))
    object_list = get_objectnames_from_split(split)
    object_paths = []
    i = 0
    for object_name in object_list:
        object_paths.append(
            os.path.join(data_path, object_name,
                         "models/model_normalized.obj"))
    start1 = time.perf_counter()
    print(start1)
    print("Start loading of objects.")
    input_meshes = load_objs_as_meshes(object_paths,
                                       device=this_device,
                                       load_textures=False)
    loading_time = time.perf_counter() - start1
    print("Loading of objects finished. Time necessary to load " +
          str(len(object_paths)) + " objects is: " + str(loading_time) +
          " seconds.")
    print("Start sampling of points.")
    start2 = time.perf_counter()
    number_samples = 4096
    input_pointclouds = sample_points_from_meshes(input_meshes, number_samples)
    sampling_time = time.perf_counter() - start2
    print("Sampling of points finished. Time necessary to sample " +
          str(number_samples) + " points from " + str(len(object_paths)) +
          " objects each is: " + str(sampling_time) + " seconds.")

    return input_pointclouds, codes['latent_codes']['weight']
コード例 #4
0
ファイル: colab_util.py プロジェクト: sergeyprokudin/pifuhd
def get_segmentation(obj_path, image_path, renderer):

    input_image = cv2.imread(image_path)
    input_image = input_image[:, :input_image.shape[1] // 3]
    input_image = cv2.resize(input_image, (1024, 1024))

    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Load obj file
    verts_rgb_colors = get_verts_rgb_colors(obj_path)
    verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
    textures = TexturesVertex(verts_features=verts_rgb_colors)
    # wo_textures = TexturesVertex(verts_features=torch.ones_like(verts_rgb_colors)*0.75)

    # Load obj
    mesh = load_objs_as_meshes([obj_path], device=device)

    # Set mesh
    vers = mesh._verts_list
    faces = mesh._faces_list
    mesh_w_tex = Meshes(vers, faces, textures)
    mesh_wo_tex = Meshes(vers, faces, wo_textures)

    R, T = look_at_view_transform(1.8, 0, 0, device=device)
    images_w_tex = renderer(mesh_w_tex, R=R, T=T)
    images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0,
                           1.0)[:, :, ::-1] * 255
    images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
    images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0,
                            1.0)[:, :, ::-1] * 255

    return input_image, images_w_tex, images_wo_tex
コード例 #5
0
    def __init__(self, model_configs, intrinsics, unit_in_meters):
        self.model_configs = model_configs
        self.unit_in_meters = unit_in_meters
        self.intrinsics = intrinsics

        self.f = torch.tensor((self.intrinsics.fu, self.intrinsics.fv),
                              dtype=torch.float32,
                              device=device).unsqueeze(0)  # dim = (1, 2)
        self.p = torch.tensor(
            (self.intrinsics.ppu, self.intrinsics.ppv),
            dtype=torch.float32,
            device=device,
        ).unsqueeze(0)  # dim = (1, 2)
        self.img_size = (self.intrinsics.width, self.intrinsics.height)
        print(self.img_size)
        self.meshs = {}
        for model_config, unit_in_meter in zip(self.model_configs,
                                               self.unit_in_meters):
            self.meshs[model_config.name] = {
                "mesh":
                load_objs_as_meshes(
                    [
                        os.path.join(model_config.path,
                                     model_config.model_filename)
                    ],
                    device=device,
                ).scale_verts(unit_in_meter),
                "config":
                model_config,
            }
コード例 #6
0
 def __init__(self,
              dir: str,
              rasterization_settings: dict,
              znear: float = 1.0,
              zfar: float = 1000.0,
              scale_min: float = 0.5,
              scale_max: float = 2.0,
              device: str = 'cuda'):
     super(ToyNeuralGraphicsDataset, self).__init__()
     device = torch.device(device)
     self.device = device
     self.scale_min = scale_min
     self.scale_max = scale_max
     self.scale_range = scale_max - scale_min
     objs = [
         os.path.join(dir, f) for f in os.listdir(dir) if f.endswith('.obj')
     ]
     self.meshes = load_objs_as_meshes(objs, device=device)
     R, T = look_at_view_transform(0, 0, 0)
     self.cameras = FoVPerspectiveCameras(R=R,
                                          T=T,
                                          znear=znear,
                                          zfar=zfar,
                                          device=device)
     self.renderer = MeshRenderer(rasterizer=MeshRasterizer(
         cameras=self.cameras,
         raster_settings=RasterizationSettings(**rasterization_settings),
     ),
                                  shader=HardFlatShader(
                                      device=device,
                                      cameras=self.cameras,
                                  ))
コード例 #7
0
def generate_video_from_obj(obj_path, video_path, renderer):
    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Load obj file
    verts_rgb_colors = get_verts_rgb_colors(obj_path)
    verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
    textures = Textures(verts_rgb=verts_rgb_colors)
    wo_textures = Textures(verts_rgb=torch.ones_like(verts_rgb_colors) * 0.75)

    # Load obj
    mesh = load_objs_as_meshes([obj_path], device=device)

    # Set mesh
    vers = mesh._verts_list
    faces = mesh._faces_list
    mesh_w_tex = Meshes(vers, faces, textures)
    mesh_wo_tex = Meshes(vers, faces, wo_textures)

    # create VideoWriter
    fourcc = cv2.VideoWriter_fourcc(*'MP4V')
    out = cv2.VideoWriter(video_path, fourcc, 20.0, (1024, 512))

    for i in tqdm(range(90)):
        R, T = look_at_view_transform(1.8, 0, i * 4, device=device)
        images_w_tex = renderer(mesh_w_tex, R=R, T=T)
        images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0,
                               1.0)[:, :, ::-1] * 255
        images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
        images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0,
                                1.0)[:, :, ::-1] * 255
        image = np.concatenate([images_w_tex, images_wo_tex], axis=1)
        out.write(image.astype('uint8'))
    out.release()
コード例 #8
0
ファイル: p3d_renderer.py プロジェクト: benjiebob/SMALViewer
    def __init__(self, image_size):
        super(Renderer, self).__init__()

        self.image_size = image_size
        self.dog_obj = load_objs_as_meshes(['data/dog_B/dog_B/dog_B_tpose.obj'])

        raster_settings = RasterizationSettings(
            image_size=self.image_size, 
            blur_radius=0.0, 
            faces_per_pixel=1, 
            bin_size=None
        )

        R, T = look_at_view_transform(2.7, 0, 0) 
        cameras = OpenGLPerspectiveCameras(device=R.device, R=R, T=T)
        lights = PointLights(device=R.device, location=[[0.0, 1.0, 0.0]])

        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftPhongShader(
                device=R.device, 
                cameras=cameras,
                lights=lights
            )
        )
コード例 #9
0
    def project_to_image_plane(self, vertices, texture_map):
        # self.renderer
        if False:  # hardcoded example
            with torch.no_grad():
                transform = transforms.Compose([
                    transforms.ToTensor(),
                ])

                direc = Path('bareteeth.000001.26_C/minibatch_0_Netural_0')
                tex = Image.open(direc / 'mesh.png')
                texture_map = transform(tex).unsqueeze(0)
                mesh = load_objs_as_meshes([direc / 'mesh.obj'],
                                           device=self.device)
                vertices = mesh.verts_padded()
        # final_obj = os.path.join('out/', 'final_model.obj')
        # import datetime
        # now = datetime.datetime.now()
        # final_obj = f'{self.save_dir}/web/images/{now.strftime("%Y-%m-%d_%H:%M:%S")}_fake_mesh.obj'
        # final_obj = f'{self.save_dir}/web/images/{self.opt.epoch_count:03d}_fake_mesh.obj'
        # save_obj(final_obj, vertices[0], torch.from_numpy(self.flamelayer.faces.astype(np.int32)))
        self.estimated_texture_map = texture_map.permute(0, 2, 3, 1)
        texture = Textures(self.estimated_texture_map,
                           faces_uvs=self.faces_uvs1,
                           verts_uvs=self.verts_uvs1)

        self.estimated_mesh = make_mesh(vertices.squeeze(),
                                        self.flamelayer.faces, False, texture)
        # save_obj(final_obj, estimated_mesh.verts_packed(), torch.from_numpy(self.flamelayer.faces.astype(np.int32)),
        #          verts_uvs=estimated_mesh.textures.verts_uvs_packed(), texture_map=self.estimated_texture_map,
        #          faces_uvs=estimated_mesh.textures.faces_uvs_packed())

        images = self.renderer(self.estimated_mesh, materials=self.materials)
        silhouette_images = self.silhouette_renderer(
            self.estimated_mesh, materials=self.materials)[..., 3].unsqueeze(0)
        negative_silhouette_images = self.negative_silhouette_renderer(
            self.estimated_mesh, materials=self.materials)[..., 3].unsqueeze(0)
        if self.opt.verbose:
            transforms.ToPILImage()(silhouette_images.squeeze().permute(
                0, 1).cpu()).save('out/silhouette.png')
            # transforms.ToPILImage()(images.squeeze().permute(2, 0, 1).cpu()).save('out/img.png')
        cull_backfaces_mask = (
            1 - (silhouette_images - negative_silhouette_images).abs())
        img = (images[0][..., :3].detach().cpu().numpy() * 255).astype(
            np.uint8)
        if self.opt.verbose:
            Image.fromarray(img).save('out/test1.png')
        images = Normalize(images)
        silhouette_images = silhouette_images.clamp(0, 1)
        segmented_3d_model_image = self.segmentation_3d_renderer(
            self.estimated_mesh)
        # Image.fromarray(
        #     ((255 * segmentation_image[0, ..., :3]).squeeze().detach().cpu().numpy().astype(np.uint8))).save(
        #     str('out/segmentatino_texture.png')
        # )
        return images[..., :3].permute(
            0, 3, 1, 2
        ), silhouette_images, cull_backfaces_mask, segmented_3d_model_image[
            ..., :3].permute(0, 3, 1, 2)
コード例 #10
0
    def load_pytorch(mesh_path):
        """

        :param mesh_path:  Path to mesh file.
        :return: TriangleMesh
        """

        checks.check_file_exists(mesh_path)
        pytorch_mesh = load_objs_as_meshes([mesh_path])
        return TriangleMesh(mesh_path=mesh_path, pytorch_mesh=pytorch_mesh)
コード例 #11
0
ファイル: shapenet_base.py プロジェクト: zeta1999/pytorch3d
    def render(self,
               model_ids: Optional[List[str]] = None,
               categories: Optional[List[str]] = None,
               sample_nums: Optional[List[int]] = None,
               idxs: Optional[List[int]] = None,
               shader_type=HardPhongShader,
               device="cpu",
               **kwargs) -> torch.Tensor:
        """
        If a list of model_ids are supplied, render all the objects by the given model_ids.
        If no model_ids are supplied, but categories and sample_nums are specified, randomly
        select a number of objects (number specified in sample_nums) in the given categories
        and render these objects. If instead a list of idxs is specified, check if the idxs
        are all valid and render models by the given idxs. Otherwise, randomly select a number
        (first number in sample_nums, default is set to be 1) of models from the loaded dataset
        and render these models.

        Args:
            model_ids: List[str] of model_ids of models intended to be rendered.
            categories: List[str] of categories intended to be rendered. categories
                and sample_nums must be specified at the same time. categories can be given
                in the form of synset offsets or labels, or a combination of both.
            sample_nums: List[int] of number of models to be randomly sampled from
                each category. Could also contain one single integer, in which case it
                will be broadcasted for every category.
            idxs: List[int] of indices of models to be rendered in the dataset.
            shader_type: Select shading. Valid options include HardPhongShader (default),
                SoftPhongShader, HardGouraudShader, SoftGouraudShader, HardFlatShader,
                SoftSilhouetteShader.
            device: torch.device on which the tensors should be located.
            **kwargs: Accepts any of the kwargs that the renderer supports.

        Returns:
            Batch of rendered images of shape (N, H, W, 3).
        """
        paths = self._handle_render_inputs(model_ids, categories, sample_nums,
                                           idxs)
        meshes = load_objs_as_meshes(paths, device=device, load_textures=False)
        meshes.textures = Textures(
            verts_rgb=torch.ones_like(meshes.verts_padded(), device=device))
        cameras = kwargs.get("cameras", OpenGLPerspectiveCameras()).to(device)
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras,
                raster_settings=kwargs.get("raster_settings",
                                           RasterizationSettings()),
            ),
            shader=shader_type(
                device=device,
                cameras=cameras,
                lights=kwargs.get("lights", PointLights()).to(device),
            ),
        )
        return renderer(meshes)
コード例 #12
0
def load_untextured_mesh(mesh_path, device):
    mesh = load_objs_as_meshes([mesh_path], device=device, load_textures = False)
    verts, faces_idx, _ = load_obj(mesh_path)
    faces = faces_idx.verts_idx
    verts_rgb = torch.ones_like(verts)[None] # (1, V, 3)
    textures = Textures(verts_rgb=verts_rgb.to(device))
    mesh_no_texture = Meshes(
        verts=[verts.to(device)],
        faces=[faces.to(device)],
        textures=textures
        )
    return mesh_no_texture
コード例 #13
0
    def test_join_meshes(self):
        """
        Test that join_mesh joins single meshes and the corresponding values are
        consistent with the single meshes.
        """

        # Load cow mesh.
        DATA_DIR = (Path(__file__).resolve().parent.parent /
                    'docs/tutorials/data')
        cow_obj = DATA_DIR / 'cow_mesh/cow.obj'

        cow_mesh = load_objs_as_meshes([cow_obj])
        cow_verts, cow_faces = cow_mesh.get_mesh_verts_faces(0)
        # Join a batch of three single meshes and check that the values are consistent
        # with the individual meshes.
        cow_mesh3 = join_mesh([cow_mesh, cow_mesh, cow_mesh])

        def check_item(x, y, offset):
            self.assertClose(torch.cat([x, x + offset, x + 2 * offset], dim=1),
                             y)

        check_item(cow_mesh.verts_padded(), cow_mesh3.verts_padded(), 0)
        check_item(cow_mesh.faces_padded(), cow_mesh3.faces_padded(),
                   cow_mesh._V)

        # Test the joining of meshes of different sizes.
        teapot_obj = DATA_DIR / 'teapot.obj'
        teapot_mesh = load_objs_as_meshes([teapot_obj])
        teapot_verts, teapot_faces = teapot_mesh.get_mesh_verts_faces(0)

        mix_mesh = join_mesh([cow_mesh, teapot_mesh])
        mix_verts, mix_faces = mix_mesh.get_mesh_verts_faces(0)
        self.assertEqual(len(mix_mesh), 1)

        self.assertClose(mix_verts[:cow_mesh._V], cow_verts)
        self.assertClose(mix_faces[:cow_mesh._F], cow_faces)
        self.assertClose(mix_verts[cow_mesh._V:], teapot_verts)
        self.assertClose(mix_faces[cow_mesh._F:], teapot_faces + cow_mesh._V)
コード例 #14
0
    def test_nomal_rendering(self):
        device = torch.device('cuda:0')

        torch.cuda.set_device(device)
        # Set paths
        data_dir = Path(__file__).resolve().parent / 'data'
        data_dir.mkdir(exist_ok=True)
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load obj file
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # try:
        #     texture_image = mesh.textures.maps_padded()
        # except:
        #     pass

        R, T = look_at_view_transform(2.55, 10, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Define the settings for rasterization and shading. Here we set the output image to be of size
        # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
        # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
        # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
        # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
        # the difference between naive and coarse-to-fine rasterization.
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
            perspective_correct=True,
        )

        renderer = MeshRenderer(rasterizer=MeshRasterizer(
            cameras=cameras, raster_settings=raster_settings),
                                shader=NormalShader(
                                    device=device,
                                    cameras=cameras,
                                ))
        images = renderer(mesh)
        # cv2.imshow('render_normal_texture.png',
        #            ((255 * images[0, ..., :3]).squeeze().cpu().numpy().astype(np.uint8))[..., ::-1])
        Image.fromarray(
            ((255 * images[0, ..., :3]).squeeze().cpu().numpy().astype(
                np.uint8))).save(str(data_dir / 'render_normal_texture.png'))
        # cv2.imwrite(str(data_dir / 'render_normal_texture.png'),
        #             ((255 * images[0, ..., :3]).squeeze().cpu().numpy().astype(np.uint8))[..., ::-1])
        self.assertTrue((data_dir / 'render_normal_texture.png').exists())
コード例 #15
0
    def __init__(self, mesh_dir, device, shuffle=True, max_num=9999):
        self.len = min(len(fnmatch.filter(os.listdir(mesh_dir), '*.obj')),
                       max_num)
        self.mesh_dir = mesh_dir
        self.shuffle = shuffle

        self.mesh_filenames = fnmatch.filter(os.listdir(mesh_dir), '*.obj')
        self.mesh_filenames = self.mesh_filenames[:self.len]
        self.mesh_files = []
        for m in self.mesh_filenames:
            self.mesh_files.append(os.path.join(self.mesh_dir, m))

        print('Meshes: ', self.mesh_files)
        self.meshes = []
        for mesh in self.mesh_files:
            self.meshes.append(
                load_objs_as_meshes([mesh],
                                    device=device,
                                    create_texture_atlas=True,
                                    texture_atlas_size=1))
コード例 #16
0
    def test_save_obj_with_texture(self):
        device = torch.device('cuda:0')
        torch.cuda.set_device(device)
        # Set paths
        data_dir = Path(__file__).parent / 'data'
        data_dir.mkdir(exist_ok=True)
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"
        final_obj = data_dir / "cow_exported.obj"
        # Load obj file
        mesh = load_objs_as_meshes([obj_filename], device=device)

        try:
            texture_image = mesh.textures.maps_padded()
            save_obj(final_obj,
                     mesh.verts_packed(),
                     mesh.faces_packed(),
                     verts_uvs=mesh.textures.verts_uvs_packed(),
                     texture_map=texture_image,
                     faces_uvs=mesh.textures.faces_uvs_packed())

        except:
            pass
コード例 #17
0
ファイル: train_parser.py プロジェクト: garvita-tiwari/sizer
    def __init__(self, train_dataset, val_dataset, opt):
        self.device = opt['train']['device']

        ### garment data from experiment params
        self.garment_class = opt['experiment']['garment_class']
        self.garment_layer = opt['experiment']['garment_layer']
        self.res = opt['experiment']['resolution']
        self.gender = opt['experiment']['gender']
        self.feat = opt['experiment']['feat']
        self.num_neigh = opt['experiment']['num_neigh']

        ##create smpl layer from TailorNet, you can use any SMPL pytorch implementation(TailorNet has hres SMPL also)
        self.smpl = TorchSMPL4Garment(gender=self.gender).to(self.device)
        self.smpl_faces_np = self.smpl.faces
        self.smpl_faces = torch.tensor(self.smpl_faces_np.astype('float32'),
                                       dtype=torch.long).cuda()

        # load training parameters etc
        self.layer_size, self.smpl_size = get_res_vert(self.garment_class,
                                                       self.res,
                                                       self.garment_layer)
        if self.garment_layer == 'Body':  #todo: move this in the function
            self.layer_size = 4448
        # get active vert id
        input_dim = self.smpl_size * 3
        if self.feat == 'vn':
            input_dim = self.smpl_size * 6
        output_dim = self.layer_size * self.num_neigh

        layer_neigh = np.array(
            np.load(
                os.path.join(
                    opt['data']['meta_data'],
                    "{}/{}_{}_{}_gar_order.npy".format(self.garment_class,
                                                       self.garment_layer,
                                                       self.res,
                                                       self.num_neigh))))
        self.layer_neigh = torch.from_numpy(layer_neigh).cuda()

        #separate for body layer
        body_vert = range(self.smpl_size)
        vert_id_upper = get_vid(opt['data']['meta_data'], 'UpperClothes',
                                self.garment_class, self.res)
        vert_id_lower = get_vid(opt['data']['meta_data'], 'Pants',
                                self.garment_class, self.res)
        body_vert2 = [i for i in body_vert if i not in vert_id_upper]
        body_vert2 = [i for i in body_vert2 if i not in vert_id_lower]
        self.body_vert = body_vert2

        all_neighbors = np.array([[vid] for k in layer_neigh for vid in k])
        self.neigh_id2 = all_neighbors
        if self.garment_layer == 'Body':
            self.idx2 = torch.from_numpy(self.neigh_id2).view(
                len(self.body_vert), self.num_neigh).cuda()
        else:
            self.idx2 = torch.from_numpy(self.neigh_id2).view(
                self.layer_size, self.num_neigh).cuda()

        self.vert_indices = get_vid(opt['data']['meta_data'],
                                    self.garment_layer, self.garment_class,
                                    self.res)
        self.vert_indices = torch.tensor(self.vert_indices.astype(
            np.long)).long().cuda()

        if self.garment_layer == 'Body':
            #self.garment_f_np = self.body_f_np
            #self.garment_f_np = Mesh(filename='/BS/garvita2/static00/ClothSize_data/gcn_assets/{}_lres_{}.obj'.format(garment, layer)).f
            self.garment_f_torch = self.smpl_faces
        else:
            mesh = load_objs_as_meshes([
                os.path.join(
                    opt['data']['meta_data'], "{}/{}_{}.obj".format(
                        self.garment_class, self.garment_layer, self.res))
            ],
                                       device=self.device)
            mesh_verts, mesh_faces = mesh.get_mesh_verts_faces(0)
            self.garment_f_torch = mesh_faces

        self.num_faces = len(self.garment_f_torch)
        self.out_layer = torch.nn.Softmax(dim=2)
        #geo_weights = np.load(os.path.join(DATA_DIR, 'real_g5_geo_weights.npy'))  todo: do we need this???
        self.d_tol = 0.002

        #create exp name based on experiment params
        self.loss_weight = {
            'wgt': opt['train']['wgt_wgt'],
            'data': opt['train']['data_wgt'],
            'spr_wgt': opt['train']['spr_wgt']
        }

        self.exp_name = '{}_{}_{}_{}_{}_{}_{}'.format(
            self.loss_weight['wgt'], self.loss_weight['data'],
            self.loss_weight['spr_wgt'], self.garment_layer,
            self.garment_class, self.feat, self.num_neigh)
        self.exp_path = '{}/{}/'.format(opt['experiment']['root_dir'],
                                        self.exp_name)
        self.checkpoint_path = self.exp_path + 'checkpoints/'.format(
            self.exp_name)
        if not os.path.exists(self.checkpoint_path):
            print(self.checkpoint_path)
            os.makedirs(self.checkpoint_path)
        self.writer = SummaryWriter(self.exp_path +
                                    'summary'.format(self.exp_name))

        self.val_min = None
        self.train_min = None
        self.loss = opt['train']['loss_type']
        self.n_part = opt['experiment']['num_part']
        self.loss_mse = torch.nn.MSELoss()

        self.batch_size = opt['train']['batch_size']

        self.relu = nn.ReLU()
        #weight initialiser
        vert_id = self.vert_indices.cpu().numpy()
        init_weights = torch.from_numpy(
            np.array([
                layer_neigh[i] == vert_id[i] for i in range(self.layer_size)
            ]).astype('int64'))
        self.init_weight = torch.stack(
            [init_weights for _ in range(self.batch_size)]).cuda()
        ######endddd####################

        ## train and val dataset
        self.train_dataset = train_dataset
        self.val_dataset = val_dataset

        ### load model and optimizer
        self.model = getattr(net_modules, opt['model']['name'])
        self.model = self.model(opt['model'], input_dim,
                                output_dim).to(self.device)
        self.optimizer = getattr(optim, opt['train']['optimizer'])
        self.optimizer = self.optimizer(self.model.parameters(),
                                        opt['train']['optimizer_param'])

        if self.loss == 'l1':
            self.loss_l1 = torch.nn.L1Loss()
        elif self.loss == 'l2':
            self.loss_l1 = torch.nn.MSELoss()
コード例 #18
0
 def get_pytorch_mesh(self) -> Meshes:
     return load_objs_as_meshes([self.mesh_path])
コード例 #19
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly.
        The pupils in the eyes of the cow should always be looking to the left.
        """
        device = torch.device("cuda:0")
        DATA_DIR = (Path(__file__).resolve().parent.parent /
                    "docs/tutorials/data")
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        # Load mesh + texture
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(lights=lights,
                                           cameras=cameras,
                                           materials=materials),
        )
        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_back.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_map_back.png")

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))

        # Check grad exists
        [verts] = mesh.verts_list()
        verts.requires_grad = True
        mesh2 = Meshes(verts=[verts],
                       faces=mesh.faces_list(),
                       textures=mesh.textures)
        images = renderer(mesh2)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

        ##########################################
        # Check rendering of the front of the cow
        ##########################################

        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Move light to the front of the cow in world space
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        images = renderer(mesh, cameras=cameras, lights=lights)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_front.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_map_front.png")

        #################################
        # Add blurring to rasterization
        #################################
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
            bin_size=0,
        )

        images = renderer(
            mesh.clone(),
            cameras=cameras,
            raster_settings=raster_settings,
            blend_params=blend_params,
        )
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_blurry_textured_rendering.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_blurry_textured_rendering.png")

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))
コード例 #20
0
def generate_cow_renders(num_views: int = 40,
                         data_dir: str = DATA_DIR,
                         azimuth_range: float = 180):
    """
    This function generates `num_views` renders of a cow mesh.
    The renders are generated from viewpoints sampled at uniformly distributed
    azimuth intervals. The elevation is kept constant so that the camera's
    vertical position coincides with the equator.

    For a more detailed explanation of this code, please refer to the
    docs/tutorials/fit_textured_mesh.ipynb notebook.

    Args:
        num_views: The number of generated renders.
        data_dir: The folder that contains the cow mesh files. If the cow mesh
            files do not exist in the folder, this function will automatically
            download them.

    Returns:
        cameras: A batch of `num_views` `FoVPerspectiveCameras` from which the
            images are rendered.
        images: A tensor of shape `(num_views, height, width, 3)` containing
            the rendered images.
        silhouettes: A tensor of shape `(num_views, height, width)` containing
            the rendered silhouettes.
    """

    # set the paths

    # download the cow mesh if not done before
    cow_mesh_files = [
        os.path.join(data_dir, fl)
        for fl in ("cow.obj", "cow.mtl", "cow_texture.png")
    ]
    if any(not os.path.isfile(f) for f in cow_mesh_files):
        os.makedirs(data_dir, exis_ok=True)
        os.system(
            f"wget -P {data_dir} " +
            "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj")
        os.system(
            f"wget -P {data_dir} " +
            "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl")
        os.system(
            f"wget -P {data_dir} " +
            "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png"
        )

    # Setup
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.cuda.set_device(device)
    else:
        device = torch.device("cpu")

    # Load obj file
    obj_filename = os.path.join(data_dir, "cow.obj")
    mesh = load_objs_as_meshes([obj_filename], device=device)

    # We scale normalize and center the target mesh to fit in a sphere of radius 1
    # centered at (0,0,0). (scale, center) will be used to bring the predicted mesh
    # to its original center and scale.  Note that normalizing the target mesh,
    # speeds up the optimization but is not necessary!
    verts = mesh.verts_packed()
    N = verts.shape[0]
    center = verts.mean(0)
    scale = max((verts - center).abs().max(0)[0])
    mesh.offset_verts_(-(center.expand(N, 3)))
    mesh.scale_verts_((1.0 / float(scale)))

    # Get a batch of viewing angles.
    elev = torch.linspace(0, 0, num_views)  # keep constant
    azim = torch.linspace(-azimuth_range, azimuth_range, num_views) + 180.0

    # Place a point light in front of the object. As mentioned above, the front of
    # the cow is facing the -z direction.
    lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

    # Initialize an OpenGL perspective camera that represents a batch of different
    # viewing angles. All the cameras helper methods support mixed type inputs and
    # broadcasting. So we can view the camera from the a distance of dist=2.7, and
    # then specify elevation and azimuth angles for each viewpoint as tensors.
    R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

    # Define the settings for rasterization and shading. Here we set the output
    # image to be of size 128X128. As we are rendering images for visualization
    # purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to
    # rasterize_meshes.py for explanations of these parameters.  We also leave
    # bin_size and max_faces_per_bin to their default values of None, which sets
    # their values using huristics and ensures that the faster coarse-to-fine
    # rasterization method is used.  Refer to docs/notes/renderer.md for an
    # explanation of the difference between naive and coarse-to-fine rasterization.
    raster_settings = RasterizationSettings(image_size=128,
                                            blur_radius=0.0,
                                            faces_per_pixel=1)

    # Create a phong renderer by composing a rasterizer and a shader. The textured
    # phong shader will interpolate the texture uv coordinates for each vertex,
    # sample from a texture image and apply the Phong lighting model
    blend_params = BlendParams(sigma=1e-4,
                               gamma=1e-4,
                               background_color=(0.0, 0.0, 0.0))
    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings),
        shader=SoftPhongShader(device=device,
                               cameras=cameras,
                               lights=lights,
                               blend_params=blend_params),
    )

    # Create a batch of meshes by repeating the cow mesh and associated textures.
    # Meshes has a useful `extend` method which allows us do this very easily.
    # This also extends the textures.
    meshes = mesh.extend(num_views)

    # Render the cow mesh from each viewing angle
    target_images = renderer(meshes, cameras=cameras, lights=lights)

    # Rasterization settings for silhouette rendering
    sigma = 1e-4
    raster_settings_silhouette = RasterizationSettings(
        image_size=128,
        blur_radius=np.log(1.0 / 1e-4 - 1.0) * sigma,
        faces_per_pixel=50)

    # Silhouette renderer
    renderer_silhouette = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings_silhouette),
        shader=SoftSilhouetteShader(),
    )

    # Render silhouette images.  The 3rd channel of the rendering output is
    # the alpha/silhouette channel
    silhouette_images = renderer_silhouette(meshes,
                                            cameras=cameras,
                                            lights=lights)

    # binary silhouettes
    silhouette_binary = (silhouette_images[..., 3] > 1e-4).float()

    return cameras, target_images[..., :3], silhouette_binary
コード例 #21
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly.
        The pupils in the eyes of the cow should always be looking to the left.
        """
        device = torch.device("cuda:0")
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load mesh + texture
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 0, 0)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)

        # Place light behind the cow in world space. The front of
        # the cow is facing the -z direction.
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

        blend_params = BlendParams(
            sigma=1e-1,
            gamma=1e-4,
            background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
        )
        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(
                lights=lights,
                cameras=cameras,
                materials=materials,
                blend_params=blend_params,
            ),
        )

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_back.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size
            images = renderer(mesh)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_back.png")

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)

        # Check grad exists
        [verts] = mesh.verts_list()
        verts.requires_grad = True
        mesh2 = Meshes(verts=[verts],
                       faces=mesh.faces_list(),
                       textures=mesh.textures)
        images = renderer(mesh2)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)

        ##########################################
        # Check rendering of the front of the cow
        ##########################################

        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Move light to the front of the cow in world space
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]

        # Load reference image
        image_ref = load_rgb_image("test_texture_map_front.png", DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(mesh, cameras=cameras, lights=lights)
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_texture_map_front.png")

            # NOTE some pixels can be flaky and will not lead to
            # `cond1` being true. Add `cond2` and check `cond1 or cond2`
            cond1 = torch.allclose(rgb, image_ref, atol=0.05)
            cond2 = ((rgb - image_ref).abs() > 0.05).sum() < 5
            self.assertTrue(cond1 or cond2)

        #################################
        # Add blurring to rasterization
        #################################
        R, T = look_at_view_transform(2.7, 0, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        blend_params = BlendParams(sigma=5e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=np.log(1.0 / 1e-4 - 1.0) * blend_params.sigma,
            faces_per_pixel=100,
            clip_barycentric_coords=True,
        )

        # Load reference image
        image_ref = load_rgb_image("test_blurry_textured_rendering.png",
                                   DATA_DIR)

        for bin_size in [0, None]:
            # Check both naive and coarse to fine produce the same output.
            renderer.rasterizer.raster_settings.bin_size = bin_size

            images = renderer(
                mesh.clone(),
                cameras=cameras,
                raster_settings=raster_settings,
                blend_params=blend_params,
            )
            rgb = images[0, ..., :3].squeeze().cpu()

            if DEBUG:
                Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                    DATA_DIR / "DEBUG_blurry_textured_rendering.png")

            self.assertClose(rgb, image_ref, atol=0.05)
コード例 #22
0
        #     ofile.write('vn '+' '.join(['{}'.format(vn) for vn in nrm])+'\n')
        # if not obj.mtlid:
        #     obj.mtlid = [-1] * len(obj.polygons)
        for f in faces.cpu().numpy():
            # UGLY!
            ofile.write('f ' + ' '.join([('%d' % (vid + 1))
                                         for vid in f]) + '\n')


if __name__ == "__main__":
    device = torch.device(
        "cuda:0") if torch.cuda.is_available() else torch.device("cpu")
    patch = torch.load('data/patch_save_2.pt').to(device)
    idx = torch.load('data/idx_save_2.pt').to(device)
    mesh = load_objs_as_meshes(['data/human.obj'],
                               device=device,
                               create_texture_atlas=True)
    verts, faces, aux = load_obj('data/human.obj',
                                 device=device,
                                 create_texture_atlas=True)

    # This is from pytorch3d current github code repo, which is not exist in stable releases
    atlas_packed = mesh.textures.atlas_packed()
    atlas_packed[idx, :, :, :] = patch
    t0 = atlas_packed[:, 0, -1]  # corresponding to v0  with bary = (1, 0)
    t1 = atlas_packed[:, -1, 0]  # corresponding to v1 with bary = (0, 1)
    t2 = atlas_packed[:, 0, 0]  # corresponding to v2 with bary = (0, 0)
    texture_image = torch.stack((t0, t1, t2), dim=1)
    face_id = faces.verts_idx

    print(verts.shape)
コード例 #23
0
# - rendering

import os
import matplotlib.pyplot as plt

import torch
import pytorch3d
import pytorch3d.io as p3dio
import pytorch3d.structures as p3dstc
import pytorch3d.vis as p3dvis
import pytorch3d.renderer as p3drdr

### mesh loading
mesh_filename = 'mesh.obj'
# mesh = p3dio.load_obj(mesh_filename) # a tuple: (verts, faces, aux)
mesh = p3dio.load_objs_as_meshes([mesh_filename])

### camera, raster and renderer
R, T = look_at_view_transform(2.7, 0, 180)
cameras = FoVPerspectiveCameras(R=R, T=T)

raster_settings = RasterizationSettings(
    image_size=512,
    blur_radius=0.0,
    faces_per_pixel=1,
)

lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

renderer = MeshRenderer(rasterizer=MeshRasterizer(
    cameras=cameras, raster_settings=raster_settings),
コード例 #24
0
import torch
import pytorch3d
from pytorch3d.io import load_objs_as_meshes, load_obj
from pytorch3d.structures import Meshes
from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene
from pytorch3d.vis.texture_vis import texturesuv_image_matplotlib
from pytorch3d.transforms import random_rotations, Translate, Rotate, Transform3d
from pytorch3d.renderer import (look_at_view_transform, FoVPerspectiveCameras,
                                PointLights, DirectionalLights, Materials,
                                RasterizationSettings, MeshRenderer,
                                MeshRasterizer, SoftPhongShader, TexturesUV,
                                TexturesVertex)

device = torch.device("cuda")

mesh = load_objs_as_meshes(['data/cow.obj'], device=device)

# Define the settings for rasterization and shading. Here we set the output image to be of size
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
# and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
    image_size=512,
    blur_radius=0.0,
    faces_per_pixel=1,
)

# Place a point light in front of the object. As mentioned above, the front of the cow is facing the
# -z direction.
コード例 #25
0
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # NAN 値の検出
    if (args.detect_nan):
        torch.autograd.set_detect_anomaly(True)

    # tensorboard 出力
    board_train = SummaryWriter(
        log_dir=os.path.join(args.tensorboard_dir, args.exper_name))

    #================================
    # データセットの読み込み
    #================================
    # メッシュファイルの読み込み / メッシュ : pytorch3d.structures.meshes.Meshes 型
    mesh = load_objs_as_meshes([args.mesh_file], device=device)
    if (args.debug):
        print("mesh.num_verts_per_mesh() : ", mesh.num_verts_per_mesh())
        print("mesh.faces_packed().shape : ", mesh.faces_packed().shape)

    # メッシュの描写
    save_plot3d_mesh_img(
        mesh, os.path.join(args.results_dir, args.exper_name, "mesh.png"),
        "mesh")
    save_mesh_obj(mesh.verts_packed(), mesh.faces_packed(),
                  os.path.join(args.results_dir, args.exper_name, "mesh.obj"))

    # メッシュのテクスチャー / テクスチャー : Tensor 型
    if (args.shader == "textured_soft_phong_shader"):
        texture = mesh.textures.maps_padded()
        print("texture.shape : ",
コード例 #26
0
ファイル: test_obj_io.py プロジェクト: LeeXuefeng/pytorch3d
    def test_join_meshes(self):
        """
        Test that join_meshes and load_objs_as_meshes are consistent with single
        meshes.
        """
        def check_triple(mesh, mesh3):
            """
            Verify that mesh3 is three copies of mesh.
            """
            def check_item(x, y):
                self.assertEqual(x is None, y is None)
                if x is not None:
                    self.assertClose(torch.cat([x, x, x]), y)

            check_item(mesh.verts_padded(), mesh3.verts_padded())
            check_item(mesh.faces_padded(), mesh3.faces_padded())
            if mesh.textures is not None:
                check_item(mesh.textures.maps_padded(),
                           mesh3.textures.maps_padded())
                check_item(
                    mesh.textures.faces_uvs_padded(),
                    mesh3.textures.faces_uvs_padded(),
                )
                check_item(
                    mesh.textures.verts_uvs_padded(),
                    mesh3.textures.verts_uvs_padded(),
                )
                check_item(
                    mesh.textures.verts_rgb_padded(),
                    mesh3.textures.verts_rgb_padded(),
                )

        DATA_DIR = (Path(__file__).resolve().parent.parent /
                    "docs/tutorials/data")
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        mesh = load_objs_as_meshes([obj_filename])
        mesh3 = load_objs_as_meshes([obj_filename, obj_filename, obj_filename])
        check_triple(mesh, mesh3)
        self.assertTupleEqual(mesh.textures.maps_padded().shape,
                              (1, 1024, 1024, 3))

        mesh_notex = load_objs_as_meshes([obj_filename], load_textures=False)
        mesh3_notex = load_objs_as_meshes(
            [obj_filename, obj_filename, obj_filename], load_textures=False)
        check_triple(mesh_notex, mesh3_notex)
        self.assertIsNone(mesh_notex.textures)

        verts = torch.randn((4, 3), dtype=torch.float32)
        faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
        vert_tex = torch.tensor([[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],
                                dtype=torch.float32)
        tex = Textures(verts_rgb=vert_tex[None, :])
        mesh_rgb = Meshes(verts=[verts], faces=[faces], textures=tex)
        mesh_rgb3 = join_meshes([mesh_rgb, mesh_rgb, mesh_rgb])
        check_triple(mesh_rgb, mesh_rgb3)

        teapot_obj = DATA_DIR / "teapot.obj"
        mesh_teapot = load_objs_as_meshes([teapot_obj])
        teapot_verts, teapot_faces = mesh_teapot.get_mesh_verts_faces(0)
        mix_mesh = load_objs_as_meshes([obj_filename, teapot_obj],
                                       load_textures=False)
        self.assertEqual(len(mix_mesh), 2)
        self.assertClose(mix_mesh.verts_list()[0], mesh.verts_list()[0])
        self.assertClose(mix_mesh.faces_list()[0], mesh.faces_list()[0])
        self.assertClose(mix_mesh.verts_list()[1], teapot_verts)
        self.assertClose(mix_mesh.faces_list()[1], teapot_faces)

        cow3_tea = join_meshes([mesh3, mesh_teapot], include_textures=False)
        self.assertEqual(len(cow3_tea), 4)
        check_triple(mesh_notex, cow3_tea[:3])
        self.assertClose(cow3_tea.verts_list()[3], mesh_teapot.verts_list()[0])
        self.assertClose(cow3_tea.faces_list()[3], mesh_teapot.faces_list()[0])
コード例 #27
0
 def load_obj_file(self, obj_file):
     self.meshes = load_objs_as_meshes([obj_file], self.device)
コード例 #28
0
    def test_texture_map(self):
        """
        Test a mesh with a texture map is loaded and rendered correctly
        """
        device = torch.device("cuda:0")
        DATA_DIR = (Path(__file__).resolve().parent.parent /
                    "docs/tutorials/data")
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        # Load mesh + texture
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # Init rasterizer settings
        R, T = look_at_view_transform(2.7, 10, 20)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init shader settings
        materials = Materials(device=device)
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, -2.0], device=device)[None]
        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        # Init renderer
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(lights=lights,
                                           cameras=cameras,
                                           materials=materials),
        )
        images = renderer(mesh)
        rgb = images[0, ..., :3].squeeze().cpu()

        # Load reference image
        image_ref = load_rgb_image("test_texture_map.png")

        if DEBUG:
            Image.fromarray((rgb.numpy() * 255).astype(np.uint8)).save(
                DATA_DIR / "DEBUG_texture_map.png")

        # There's a calculation instability on the corner of the ear of the cow.
        # We ignore that pixel.
        image_ref[137, 166] = 0
        rgb[137, 166] = 0

        self.assertTrue(torch.allclose(rgb, image_ref, atol=0.05))

        # Check grad exists
        [verts] = mesh.verts_list()
        verts.requires_grad = True
        mesh2 = Meshes(verts=[verts],
                       faces=mesh.faces_list(),
                       textures=mesh.textures)
        images = renderer(mesh2)
        images[0, ...].sum().backward()
        self.assertIsNotNone(verts.grad)
コード例 #29
0
    def test_join_meshes_as_batch(self):
        """
        Test that join_meshes_as_batch and load_objs_as_meshes are consistent
        with single meshes.
        """
        def check_triple(mesh, mesh3):
            """
            Verify that mesh3 is three copies of mesh.
            """
            def check_item(x, y):
                self.assertEqual(x is None, y is None)
                if x is not None:
                    self.assertClose(torch.cat([x, x, x]), y)

            check_item(mesh.verts_padded(), mesh3.verts_padded())
            check_item(mesh.faces_padded(), mesh3.faces_padded())

            if mesh.textures is not None:
                if isinstance(mesh.textures, TexturesUV):
                    check_item(
                        mesh.textures.faces_uvs_padded(),
                        mesh3.textures.faces_uvs_padded(),
                    )
                    check_item(
                        mesh.textures.verts_uvs_padded(),
                        mesh3.textures.verts_uvs_padded(),
                    )
                    check_item(mesh.textures.maps_padded(),
                               mesh3.textures.maps_padded())
                elif isinstance(mesh.textures, TexturesVertex):
                    check_item(
                        mesh.textures.verts_features_padded(),
                        mesh3.textures.verts_features_padded(),
                    )
                elif isinstance(mesh.textures, TexturesAtlas):
                    check_item(mesh.textures.atlas_padded(),
                               mesh3.textures.atlas_padded())

        DATA_DIR = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        mesh = load_objs_as_meshes([obj_filename])
        mesh3 = load_objs_as_meshes([obj_filename, obj_filename, obj_filename])
        check_triple(mesh, mesh3)
        self.assertTupleEqual(mesh.textures.maps_padded().shape,
                              (1, 1024, 1024, 3))

        # Try mismatched texture map sizes, which needs a call to interpolate()
        mesh2048 = mesh.clone()
        maps = mesh.textures.maps_padded()
        mesh2048.textures._maps_padded = torch.cat([maps, maps], dim=1)
        join_meshes_as_batch([mesh.to("cuda:0"), mesh2048.to("cuda:0")])

        mesh_notex = load_objs_as_meshes([obj_filename], load_textures=False)
        mesh3_notex = load_objs_as_meshes(
            [obj_filename, obj_filename, obj_filename], load_textures=False)
        check_triple(mesh_notex, mesh3_notex)
        self.assertIsNone(mesh_notex.textures)

        # meshes with vertex texture, join into a batch.
        verts = torch.randn((4, 3), dtype=torch.float32)
        faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
        vert_tex = torch.ones_like(verts)
        rgb_tex = TexturesVertex(verts_features=[vert_tex])
        mesh_rgb = Meshes(verts=[verts], faces=[faces], textures=rgb_tex)
        mesh_rgb3 = join_meshes_as_batch([mesh_rgb, mesh_rgb, mesh_rgb])
        check_triple(mesh_rgb, mesh_rgb3)

        # meshes with texture atlas, join into a batch.
        device = "cuda:0"
        atlas = torch.rand((2, 4, 4, 3), dtype=torch.float32, device=device)
        atlas_tex = TexturesAtlas(atlas=[atlas])
        mesh_atlas = Meshes(verts=[verts], faces=[faces], textures=atlas_tex)
        mesh_atlas3 = join_meshes_as_batch(
            [mesh_atlas, mesh_atlas, mesh_atlas])
        check_triple(mesh_atlas, mesh_atlas3)

        # Test load multiple meshes with textures into a batch.
        teapot_obj = DATA_DIR / "teapot.obj"
        mesh_teapot = load_objs_as_meshes([teapot_obj])
        teapot_verts, teapot_faces = mesh_teapot.get_mesh_verts_faces(0)
        mix_mesh = load_objs_as_meshes([obj_filename, teapot_obj],
                                       load_textures=False)
        self.assertEqual(len(mix_mesh), 2)
        self.assertClose(mix_mesh.verts_list()[0], mesh.verts_list()[0])
        self.assertClose(mix_mesh.faces_list()[0], mesh.faces_list()[0])
        self.assertClose(mix_mesh.verts_list()[1], teapot_verts)
        self.assertClose(mix_mesh.faces_list()[1], teapot_faces)

        cow3_tea = join_meshes_as_batch([mesh3, mesh_teapot],
                                        include_textures=False)
        self.assertEqual(len(cow3_tea), 4)
        check_triple(mesh_notex, cow3_tea[:3])
        self.assertClose(cow3_tea.verts_list()[3], mesh_teapot.verts_list()[0])
        self.assertClose(cow3_tea.faces_list()[3], mesh_teapot.faces_list()[0])

        # Check error raised if all meshes in the batch don't have the same texture type
        with self.assertRaisesRegex(ValueError, "same type of texture"):
            join_meshes_as_batch([mesh_atlas, mesh_rgb, mesh_atlas])
コード例 #30
0
 def load_pytorch_mesh_from_file(self):
     """
     Loads from pytorch mesh object from path attribute
     """
     self.pytorch_mesh = load_objs_as_meshes([self.mesh_path]).cuda()