Esempio n. 1
0
def generate_video_from_obj(obj_path, video_path, renderer):
    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Load obj file
    verts_rgb_colors = get_verts_rgb_colors(obj_path)
    verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
    textures = Textures(verts_rgb=verts_rgb_colors)
    wo_textures = Textures(verts_rgb=torch.ones_like(verts_rgb_colors) * 0.75)

    # Load obj
    mesh = load_objs_as_meshes([obj_path], device=device)

    # Set mesh
    vers = mesh._verts_list
    faces = mesh._faces_list
    mesh_w_tex = Meshes(vers, faces, textures)
    mesh_wo_tex = Meshes(vers, faces, wo_textures)

    # create VideoWriter
    fourcc = cv2.VideoWriter_fourcc(*'MP4V')
    out = cv2.VideoWriter(video_path, fourcc, 20.0, (1024, 512))

    for i in tqdm(range(90)):
        R, T = look_at_view_transform(1.8, 0, i * 4, device=device)
        images_w_tex = renderer(mesh_w_tex, R=R, T=T)
        images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0,
                               1.0)[:, :, ::-1] * 255
        images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
        images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0,
                                1.0)[:, :, ::-1] * 255
        image = np.concatenate([images_w_tex, images_wo_tex], axis=1)
        out.write(image.astype('uint8'))
    out.release()
Esempio n. 2
0
    def test_init_rgb_uv_fail(self):
        V = 20
        # Maps has wrong shape
        with self.assertRaisesRegex(ValueError, 'maps'):
            Textures(
                maps=torch.ones((5, 16, 16, 3, 4)),
                faces_uvs=torch.randint(size=(5, 10, 3), low=0, high=V),
                verts_uvs=torch.ones((5, V, 2)),
            )
        # faces_uvs has wrong shape
        with self.assertRaisesRegex(ValueError, 'faces_uvs'):
            Textures(
                maps=torch.ones((5, 16, 16, 3)),
                faces_uvs=torch.randint(size=(5, 10, 3, 3), low=0, high=V),
                verts_uvs=torch.ones((5, V, 2)),
            )
        # verts_uvs has wrong shape
        with self.assertRaisesRegex(ValueError, 'verts_uvs'):
            Textures(
                maps=torch.ones((5, 16, 16, 3)),
                faces_uvs=torch.randint(size=(5, 10, 3), low=0, high=V),
                verts_uvs=torch.ones((5, V, 2, 3)),
            )
        # verts_rgb has wrong shape
        with self.assertRaisesRegex(ValueError, 'verts_rgb'):
            Textures(verts_rgb=torch.ones((5, 16, 16, 3)))

        # maps provided without verts/faces uvs
        with self.assertRaisesRegex(ValueError,
                                    'faces_uvs and verts_uvs are required'):
            Textures(maps=torch.ones((5, 16, 16, 3)))
Esempio n. 3
0
 def test_clone(self):
     V = 20
     tex = Textures(
         maps=torch.ones((5, 16, 16, 3)),
         faces_uvs=torch.randint(size=(5, 10, 3), low=0, high=V),
         verts_uvs=torch.ones((5, V, 2)),
     )
     tex_cloned = tex.clone()
     self.assertSeparate(tex._faces_uvs_padded, tex_cloned._faces_uvs_padded)
     self.assertSeparate(tex._verts_uvs_padded, tex_cloned._verts_uvs_padded)
     self.assertSeparate(tex._maps_padded, tex_cloned._maps_padded)
Esempio n. 4
0
 def test_to(self):
     V = 20
     tex = Textures(
         maps=torch.ones((5, 16, 16, 3)),
         faces_uvs=torch.randint(size=(5, 10, 3), low=0, high=V),
         verts_uvs=torch.ones((5, V, 2)),
     )
     device = torch.device("cuda:0")
     tex = tex.to(device)
     self.assertTrue(tex._faces_uvs_padded.device == device)
     self.assertTrue(tex._verts_uvs_padded.device == device)
     self.assertTrue(tex._maps_padded.device == device)
Esempio n. 5
0
 def test_interpolate_attributes_grad(self):
     verts = torch.randn((4, 3), dtype=torch.float32)
     faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
     vert_tex = torch.tensor(
         [[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],
         dtype=torch.float32,
         requires_grad=True,
     )
     tex = Textures(verts_rgb=vert_tex[None, :])
     mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
     pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
     barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]],
                                       dtype=torch.float32).view(
                                           1, 1, 1, 2, -1)
     fragments = Fragments(
         pix_to_face=pix_to_face,
         bary_coords=barycentric_coords,
         zbuf=torch.ones_like(pix_to_face),
         dists=torch.ones_like(pix_to_face),
     )
     grad_vert_tex = torch.tensor(
         [
             [0.3, 0.3, 0.3],
             [0.9, 0.9, 0.9],
             [0.5, 0.5, 0.5],
             [0.3, 0.3, 0.3],
         ],
         dtype=torch.float32,
     )
     texels = interpolate_vertex_colors(fragments, mesh)
     texels.sum().backward()
     self.assertTrue(hasattr(vert_tex, "grad"))
     self.assertTrue(torch.allclose(vert_tex.grad, grad_vert_tex[None, :]))
Esempio n. 6
0
def load_objs_as_meshes(files: list, device=None, load_textures: bool = True):
    """
    Load meshes from a list of .obj files using the load_obj function, and
    return them as a Meshes object. This only works for meshes which have a
    single texture image for the whole mesh. See the load_obj function for more
    details. material_colors and normals are not stored.

    Args:
        f: A list of file-like objects (with methods read, readline, tell,
        and seek), pathlib paths or strings containing file names.
        device: Desired device of returned Meshes. Default:
            uses the current device for the default tensor type.
        load_textures: Boolean indicating whether material files are loaded

    Returns:
        New Meshes object.
    """
    mesh_list = []
    for f_obj in files:
        # TODO: update this function to support the two texturing options.
        verts, faces, aux = load_obj(f_obj, load_textures=load_textures)
        verts = verts.to(device)
        tex = None
        tex_maps = aux.texture_images
        if tex_maps is not None and len(tex_maps) > 0:
            verts_uvs = aux.verts_uvs[None, ...].to(device)  # (1, V, 2)
            faces_uvs = faces.textures_idx[None, ...].to(device)  # (1, F, 3)
            image = list(tex_maps.values())[0].to(device)[None]
            tex = Textures(verts_uvs=verts_uvs, faces_uvs=faces_uvs, maps=image)

        mesh = Meshes(verts=[verts], faces=[faces.verts_idx.to(device)], textures=tex)
        mesh_list.append(mesh)
    if len(mesh_list) == 1:
        return mesh_list[0]
    return join_meshes_as_batch(mesh_list)
def project_mesh(mesh, angle):
    start = time.time()
    m = Metadata()
    R, T = look_at_view_transform(1.75,
                                  -45,
                                  angle,
                                  up=((0, 1, 0), ),
                                  at=((0, -0.25, 0), ))
    cameras = OpenGLPerspectiveCameras(device=m.device, R=R, T=T)
    raster_settings = m.raster_settings
    lights = m.lights
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=HardFlatShader(cameras=cameras,
                                                  device=m.device,
                                                  lights=lights))
    verts = mesh.verts_list()[0]

    # faces = meshes.faces_list()[0]

    verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
    # verts_rgb = torch.ones((len(mesh.verts_list()[0]), 1))[None]  # (1, V, 3)
    textures = Textures(verts_rgb=verts_rgb.to(m.device))

    mesh.textures = textures
    mesh.textures._num_faces_per_mesh = mesh._num_faces_per_mesh.tolist()
    mesh.textures._num_verts_per_mesh = mesh._num_verts_per_mesh.tolist()

    image = renderer(mesh)
    return image
Esempio n. 8
0
 def test_interpolate_attributes(self):
     """
     This tests both interpolate_vertex_colors as well as
     interpolate_face_attributes.
     """
     verts = torch.randn((4, 3), dtype=torch.float32)
     faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
     vert_tex = torch.tensor([[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],
                             dtype=torch.float32)
     tex = Textures(verts_rgb=vert_tex[None, :])
     mesh = Meshes(verts=[verts], faces=[faces], textures=tex)
     pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
     barycentric_coords = torch.tensor([[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]],
                                       dtype=torch.float32).view(
                                           1, 1, 1, 2, -1)
     expected_vals = torch.tensor([[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]],
                                  dtype=torch.float32).view(1, 1, 1, 2, -1)
     fragments = Fragments(
         pix_to_face=pix_to_face,
         bary_coords=barycentric_coords,
         zbuf=torch.ones_like(pix_to_face),
         dists=torch.ones_like(pix_to_face),
     )
     texels = interpolate_vertex_colors(fragments, mesh)
     self.assertTrue(torch.allclose(texels, expected_vals[None, :]))
    def load(self, obj_filename):
        # Load obj file
        extension = obj_filename[-3:]
        
        if extension == 'obj':
            verts, faces, aux = load_obj(obj_filename)
            verts_idx = faces.verts_idx        
        elif extension == 'ply':
            verts, faces = load_ply(obj_filename)
            verts_idx = faces

        if os.path.exists(obj_filename[:-3]+'npy'):
            colors = np.load(obj_filename[:-3]+'npy')
            verts_rgb = torch.FloatTensor(colors[...,[2,1,0]])
            verts_rgb = verts_rgb.unsqueeze(0)
            verts_rgb = verts_rgb.to(self.device)
        else:
            # Initialize each vertex to be white in color - bgr
            verts_rgb = torch.ones_like(verts)[None]
            verts_rgb = verts_rgb.to(self.device)
            #textures = Textures(faces_uvs=faces.textures_idx[None,...], verts_uvs=aux.verts_uvs[None,...], verts_rgb=verts_rgb.to(self.device))

        # Create a Meshes object for the face.
        self.face_mesh = Meshes(
            verts = [verts.to(self.device)],
            faces = [verts_idx.to(self.device)],
            textures= Textures(verts_rgb=verts_rgb)
        )
Esempio n. 10
0
def render_shape(model, R, T, args, vertices):
    verts, faces_idx, _ = load_obj(args.obj_path)
    faces = faces_idx.verts_idx
    verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
    textures = Textures(verts_rgb=verts_rgb)
    # center = verts.mean(0)
    # verts = verts - center
    # scale = max(verts.abs().max(0)[0])
    # verts = verts / scale
    mesh = Meshes(verts=[verts], faces=[faces], textures=textures)
    if args.lights:
        images = []
        for i in range(model.nviews):
            if args.net_version == 1:
                model.lights.location = model.camera_position[i]
            else:
                model.lights.location = vertices[i]
            imgs = model.renderer(
                meshes_world=mesh.to(device=args.device).clone(),
                R=R[None, i],
                T=T[None, i],
                lights=model.lights).cpu().detach().numpy()
            images.extend(imgs)
    else:
        # model.lights.location = model.light_position
        meshes = mesh.extend(args.nviews)
        images = model.renderer(meshes.to(device=args.device), R=R,
                                T=T)  # , lights=model.lights)
        images = images.detach().cpu().numpy()

    return images
Esempio n. 11
0
def render_obj(verts, faces, distance, elevation, azimuth):
    device = torch.device("cuda:0")

    verts_rgb = torch.ones_like(verts)[None]
    textures = Textures(verts_rgb=verts_rgb.to(device))

    cur_mesh = Meshes(verts=[verts.to(device)],
                      faces=[faces.to(device)],
                      textures=textures)

    cameras = OpenGLPerspectiveCameras(device=device)

    blend_params = BlendParams(sigma=1e-4, gamma=1e-4)

    raster_settings = RasterizationSettings(image_size=256,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=0)

    lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
    phong_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                                  shader=PhongShader(device=device,
                                                     lights=lights))

    R, T = look_at_view_transform(distance, elevation, azimuth, device=device)

    return phong_renderer(meshes_world=cur_mesh, R=R, T=T).cpu().numpy()
Esempio n. 12
0
    def project_to_image_plane(self, vertices, texture_map):
        # self.renderer
        if False:  # hardcoded example
            with torch.no_grad():
                transform = transforms.Compose([
                    transforms.ToTensor(),
                ])

                direc = Path('bareteeth.000001.26_C/minibatch_0_Netural_0')
                tex = Image.open(direc / 'mesh.png')
                texture_map = transform(tex).unsqueeze(0)
                mesh = load_objs_as_meshes([direc / 'mesh.obj'],
                                           device=self.device)
                vertices = mesh.verts_padded()
        # final_obj = os.path.join('out/', 'final_model.obj')
        # import datetime
        # now = datetime.datetime.now()
        # final_obj = f'{self.save_dir}/web/images/{now.strftime("%Y-%m-%d_%H:%M:%S")}_fake_mesh.obj'
        # final_obj = f'{self.save_dir}/web/images/{self.opt.epoch_count:03d}_fake_mesh.obj'
        # save_obj(final_obj, vertices[0], torch.from_numpy(self.flamelayer.faces.astype(np.int32)))
        self.estimated_texture_map = texture_map.permute(0, 2, 3, 1)
        texture = Textures(self.estimated_texture_map,
                           faces_uvs=self.faces_uvs1,
                           verts_uvs=self.verts_uvs1)

        self.estimated_mesh = make_mesh(vertices.squeeze(),
                                        self.flamelayer.faces, False, texture)
        # save_obj(final_obj, estimated_mesh.verts_packed(), torch.from_numpy(self.flamelayer.faces.astype(np.int32)),
        #          verts_uvs=estimated_mesh.textures.verts_uvs_packed(), texture_map=self.estimated_texture_map,
        #          faces_uvs=estimated_mesh.textures.faces_uvs_packed())

        images = self.renderer(self.estimated_mesh, materials=self.materials)
        silhouette_images = self.silhouette_renderer(
            self.estimated_mesh, materials=self.materials)[..., 3].unsqueeze(0)
        negative_silhouette_images = self.negative_silhouette_renderer(
            self.estimated_mesh, materials=self.materials)[..., 3].unsqueeze(0)
        if self.opt.verbose:
            transforms.ToPILImage()(silhouette_images.squeeze().permute(
                0, 1).cpu()).save('out/silhouette.png')
            # transforms.ToPILImage()(images.squeeze().permute(2, 0, 1).cpu()).save('out/img.png')
        cull_backfaces_mask = (
            1 - (silhouette_images - negative_silhouette_images).abs())
        img = (images[0][..., :3].detach().cpu().numpy() * 255).astype(
            np.uint8)
        if self.opt.verbose:
            Image.fromarray(img).save('out/test1.png')
        images = Normalize(images)
        silhouette_images = silhouette_images.clamp(0, 1)
        segmented_3d_model_image = self.segmentation_3d_renderer(
            self.estimated_mesh)
        # Image.fromarray(
        #     ((255 * segmentation_image[0, ..., :3]).squeeze().detach().cpu().numpy().astype(np.uint8))).save(
        #     str('out/segmentatino_texture.png')
        # )
        return images[..., :3].permute(
            0, 3, 1, 2
        ), silhouette_images, cull_backfaces_mask, segmented_3d_model_image[
            ..., :3].permute(0, 3, 1, 2)
Esempio n. 13
0
    def render(self,
               model_ids: Optional[List[str]] = None,
               categories: Optional[List[str]] = None,
               sample_nums: Optional[List[int]] = None,
               idxs: Optional[List[int]] = None,
               shader_type=HardPhongShader,
               device="cpu",
               **kwargs) -> torch.Tensor:
        """
        If a list of model_ids are supplied, render all the objects by the given model_ids.
        If no model_ids are supplied, but categories and sample_nums are specified, randomly
        select a number of objects (number specified in sample_nums) in the given categories
        and render these objects. If instead a list of idxs is specified, check if the idxs
        are all valid and render models by the given idxs. Otherwise, randomly select a number
        (first number in sample_nums, default is set to be 1) of models from the loaded dataset
        and render these models.

        Args:
            model_ids: List[str] of model_ids of models intended to be rendered.
            categories: List[str] of categories intended to be rendered. categories
                and sample_nums must be specified at the same time. categories can be given
                in the form of synset offsets or labels, or a combination of both.
            sample_nums: List[int] of number of models to be randomly sampled from
                each category. Could also contain one single integer, in which case it
                will be broadcasted for every category.
            idxs: List[int] of indices of models to be rendered in the dataset.
            shader_type: Select shading. Valid options include HardPhongShader (default),
                SoftPhongShader, HardGouraudShader, SoftGouraudShader, HardFlatShader,
                SoftSilhouetteShader.
            device: torch.device on which the tensors should be located.
            **kwargs: Accepts any of the kwargs that the renderer supports.

        Returns:
            Batch of rendered images of shape (N, H, W, 3).
        """
        paths = self._handle_render_inputs(model_ids, categories, sample_nums,
                                           idxs)
        meshes = load_objs_as_meshes(paths, device=device, load_textures=False)
        meshes.textures = Textures(
            verts_rgb=torch.ones_like(meshes.verts_padded(), device=device))
        cameras = kwargs.get("cameras", OpenGLPerspectiveCameras()).to(device)
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras,
                raster_settings=kwargs.get("raster_settings",
                                           RasterizationSettings()),
            ),
            shader=shader_type(
                device=device,
                cameras=cameras,
                lights=kwargs.get("lights", PointLights()).to(device),
            ),
        )
        return renderer(meshes)
def load_mesh_from_obj(file, device):
    verts, faces_idx, _ = load_obj(file)
    faces = faces_idx.verts_idx

    # Initialize each vertex to be white in color.
    verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
    textures = Textures(verts_rgb=verts_rgb.to(device))

    # Create a Meshes object for the teapot. Here we have only one mesh in the batch.
    teapot_mesh = Meshes(verts=[verts.to(device)],
                         faces=[faces.to(device)],
                         textures=textures)
    return teapot_mesh
Esempio n. 15
0
def make_mesh(flamelayer, detach):
    device = torch.device("cuda:0")
    verts, _, _ = flamelayer()
    if detach:
        verts = verts.detach()
    # Initialize each vertex to be white in color.
    verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
    textures = Textures(verts_rgb=verts_rgb.to(device))
    faces = torch.tensor(np.int32(flamelayer.faces), dtype=torch.long).cuda()

    return Meshes(verts=[verts.to(device)],
                  faces=[faces.to(device)],
                  textures=textures)
Esempio n. 16
0
    def rendering(self, light_params, coeffs, vertices, gen_uvmaps,
                  face_model):
        ambient_color = torch.clamp(0.5 + 0.5 * light_params[:, 0:3], 0, 1)
        diffuse_color = torch.clamp(0.5 + 0.5 * light_params[:, 3:6], 0, 1)
        specular_color = torch.clamp(0.2 + 0.2 * light_params[:, 6:9], 0, 1)
        direction = light_params[:, 9:12]
        directions = torch.cat([
            direction, direction *
            torch.tensor([[-1, 1, 1]], dtype=torch.float, device=self.device)
        ],
                               dim=0)
        lights = DirectionalLights(ambient_color=ambient_color.repeat(2, 1),
                                   diffuse_color=diffuse_color.repeat(2, 1),
                                   specular_color=specular_color.repeat(2, 1),
                                   direction=directions,
                                   device=self.device)
        self.renderer.shader.lights = lights

        _, _, _, angles, _, trans = utils.split_bfm09_coeff(coeffs)

        reflect_angles = torch.cat([
            angles, angles *
            torch.tensor([[1, -1, -1]], dtype=torch.float, device=self.device)
        ],
                                   dim=0)
        reflect_trans = torch.cat([
            trans, trans *
            torch.tensor([[-1, 1, 1]], dtype=torch.float, device=self.device)
        ],
                                  dim=0)
        rotated_vert = self.rotate_vert(vertices.repeat(2, 1, 1),
                                        reflect_angles, reflect_trans)

        fliped_uv = torch.flip(gen_uvmaps / 2 + 0.5,
                               (2, 3)).repeat(2, 1, 1, 1).permute(0, 2, 3, 1)
        texture = Textures(
            maps=fliped_uv,
            faces_uvs=self.meshes[face_model].textures.faces_uvs_padded(),
            verts_uvs=self.meshes[face_model].textures.verts_uvs_padded())
        meshes = Meshes(rotated_vert, self.meshes[face_model].faces_padded(),
                        texture)

        renders = self.renderer(meshes)

        renders[..., :3] = renders[..., :3] * 2 - 1
        renders[..., -1] = (renders[..., -1] > 0).float()
        renders = renders.permute(0, 3, 1, 2).contiguous()

        return renders
Esempio n. 17
0
def get_template_texture(vertices: torch.tensor, faces: torch.tensor,
                         texture_map: torch.tensor):

    device = vertices.device

    verts_uv = convert_3d_to_uv_coordinates(vertices)
    vertex_rgb = torch.nn.functional.grid_sample(
        texture_map.unsqueeze(0), 2 * verts_uv.unsqueeze(0).unsqueeze(0) - 1)
    vertex_rgb = vertex_rgb.squeeze(2).permute(0, 2, 1) * 255
    texture = Textures([texture_map.cpu().permute(1, 2, 0)],
                       faces_uvs=faces.unsqueeze(0),
                       verts_uvs=verts_uv.unsqueeze(0),
                       verts_rgb=vertex_rgb).to(device)

    return texture
def make_mesh(verts: torch.tensor,
              faces: np.ndarray,
              detach: bool,
              textures=None) -> Meshes:
    device = torch.device("cuda:0")
    if detach:
        verts = verts.detach()
    # Initialize each vertex to be white in color.
    if textures is None:
        verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)

        textures = Textures(verts_rgb=verts_rgb.to(device))

    faces = torch.tensor(np.int32(faces), dtype=torch.long).cuda()

    # return Meshes(
    #     verts=[verts.to(device)],
    #     faces=[faces.to(device)],
    #     textures=textures
    # )

    return Meshes(verts=verts.to(device),
                  faces=faces.to(device).repeat(verts.shape[0], 1, 1),
                  textures=textures.to(device))
Esempio n. 19
0
    def test_getitem(self):
        N = 5
        V = 20
        source = {
            "maps": torch.rand(size=(N, 16, 16, 3)),
            "faces_uvs": torch.randint(size=(N, 10, 3), low=0, high=V),
            "verts_uvs": torch.rand((N, V, 2)),
        }
        tex = Textures(
            maps=source["maps"],
            faces_uvs=source["faces_uvs"],
            verts_uvs=source["verts_uvs"],
        )

        verts = torch.rand(size=(N, V, 3))
        faces = torch.randint(size=(N, 10, 3), high=V)
        meshes = Meshes(verts=verts, faces=faces, textures=tex)

        def tryindex(index):
            tex2 = tex[index]
            meshes2 = meshes[index]
            tex_from_meshes = meshes2.textures
            for item in source:
                basic = source[item][index]
                from_texture = getattr(tex2, item + "_padded")()
                from_meshes = getattr(tex_from_meshes, item + "_padded")()
                if isinstance(index, int):
                    basic = basic[None]
                self.assertClose(basic, from_texture)
                self.assertClose(basic, from_meshes)
                self.assertEqual(
                    from_texture.ndim, getattr(tex, item + "_padded")().ndim
                )
                if item == "faces_uvs":
                    faces_uvs_list = tex_from_meshes.faces_uvs_list()
                    self.assertEqual(basic.shape[0], len(faces_uvs_list))
                    for i, faces_uvs in enumerate(faces_uvs_list):
                        self.assertClose(faces_uvs, basic[i])

        tryindex(2)
        tryindex(slice(0, 2, 1))
        index = torch.tensor([1, 0, 1, 0, 0], dtype=torch.bool)
        tryindex(index)
        index = torch.tensor([0, 0, 0, 0, 0], dtype=torch.bool)
        tryindex(index)
        index = torch.tensor([1, 2], dtype=torch.int64)
        tryindex(index)
        tryindex([2, 4])
Esempio n. 20
0
def load_texture(aux, faces):
    if aux.verts_uvs is None:
        return None

    device = torch.device(config.cuda.device)

    vertices_uvs = aux.verts_uvs[None, ...].to(device)
    faces_uvs = faces.textures_idx[None, ...].to(device)

    texture_maps = aux.texture_images
    texture_maps = list(texture_maps.values())[0]
    texture_maps = texture_maps[None, ...].to(device)

    return Textures(verts_uvs=vertices_uvs,
                    faces_uvs=faces_uvs,
                    maps=texture_maps)
Esempio n. 21
0
    def forward(self,
                shape_params=None,
                expression_params=None,
                pose_params=None,
                neck_pose=None,
                eye_pose=None,
                transl=None,
                texture_params=None):
        vertices, landmarks = super(TexturedFLAME,
                                    self).forward(shape_params,
                                                  expression_params,
                                                  pose_params, neck_pose,
                                                  eye_pose, transl)
        texture = torch.reshape(
            torch.add(self.texture_mean,
                      torch.matmul(self.texture_params, self.texture_dir)),
            self.texture_shape)
        texture = texture.clamp(0.0, 255.0)
        texture = texture / 255.0
        texture = torch.cat(self.batch_size * [texture.unsqueeze(0)])

        textures = Textures(maps=texture,
                            faces_uvs=self.faces_uvs,
                            verts_uvs=self.verts_uvs)
        meshes = Meshes(
            vertices,
            torch.cat(vertices.shape[0] * [self.faces_tensor.unsqueeze(0)]),
            textures)

        images = bgr_to_rgb(
            self.renderer(meshes)[..., :3].permute(0, -1, 1, 2))
        # images = self.renderer(meshes)[..., :3].permute(0, -1, 1, 2)

        landmarks = self.transform_points(landmarks)
        landmarks[:, :, 0] *= -1
        landmarks[:, :, 1] *= -1

        for bi in range(landmarks.shape[0]):
            for pi in range(landmarks.shape[1]):
                landmarks[bi, pi,
                          0] = self._ndc_to_pix(landmarks[bi, pi, 0],
                                                self.crop_size)
                landmarks[bi, pi,
                          1] = self._ndc_to_pix(landmarks[bi, pi, 1],
                                                self.crop_size)
        landmarks = landmarks[:, :, :2]  # x y only
        return vertices, landmarks, images
Esempio n. 22
0
def load_moon_mesh(_obj_filename):
    # Load the object
    verts, faces, aux = load_obj(_obj_filename)
    faces_idx = faces.verts_idx.to(DEVICE)
    verts = verts.to(DEVICE)

    verts_uvs = aux.verts_uvs[None, ...].to(DEVICE)
    faces_uvs = faces.textures_idx[None, ...].to(DEVICE)
    tex_maps = aux.texture_images
    texture_image = list(tex_maps.values())[0]
    texture_image = texture_image[None, ...].to(DEVICE)
    tex = Textures(verts_uvs=verts_uvs,
                   faces_uvs=faces_uvs,
                   maps=texture_image)
    moon_mesh = Meshes(verts=[verts], faces=[faces_idx], textures=tex)

    return moon_mesh
Esempio n. 23
0
    def set_moon_mesh(self):
        vertices, faces, aux = load_obj(OBJECT_PATH)
        faces_idx = faces.verts_idx.to(self.cuda_device)
        vertices = vertices.to(self.cuda_device)

        verts_uvs = aux.verts_uvs[None, ...].to(self.cuda_device)
        faces_uvs = faces.textures_idx[None, ...].to(self.cuda_device)
        tex_maps = aux.texture_images
        texture_image = list(tex_maps.values())[0]
        texture_image = texture_image[None, ...].to(self.cuda_device)

        tex = Textures(verts_uvs=verts_uvs,
                       faces_uvs=faces_uvs,
                       maps=texture_image)

        self.moon_mesh = Meshes(verts=[vertices],
                                faces=[faces_idx],
                                textures=tex)
Esempio n. 24
0
    def test_interpolate_texture_map(self):
        barycentric_coords = torch.tensor(
            [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32
        ).view(1, 1, 1, 2, -1)
        dummy_verts = torch.zeros(4, 3)
        vert_uvs = torch.tensor(
            [[1, 0], [0, 1], [1, 1], [0, 0]], dtype=torch.float32
        )
        face_uvs = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64)
        interpolated_uvs = torch.tensor(
            [[0.5 + 0.2, 0.3 + 0.2], [0.6, 0.3 + 0.6]], dtype=torch.float32
        )

        # Create a dummy texture map
        H = 2
        W = 2
        x = torch.linspace(0, 1, W).view(1, W).expand(H, W)
        y = torch.linspace(0, 1, H).view(H, 1).expand(H, W)
        tex_map = torch.stack([x, y], dim=2).view(1, H, W, 2)
        pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)
        fragments = Fragments(
            pix_to_face=pix_to_face,
            bary_coords=barycentric_coords,
            zbuf=pix_to_face,
            dists=pix_to_face,
        )
        tex = Textures(
            maps=tex_map,
            faces_uvs=face_uvs[None, ...],
            verts_uvs=vert_uvs[None, ...],
        )
        meshes = Meshes(verts=[dummy_verts], faces=[face_uvs], textures=tex)
        texels = interpolate_texture_map(fragments, meshes)

        # Expected output
        pixel_uvs = interpolated_uvs * 2.0 - 1.0
        pixel_uvs = pixel_uvs.view(2, 1, 1, 2)
        tex_map = torch.flip(tex_map, [1])
        tex_map = tex_map.permute(0, 3, 1, 2)
        tex_map = torch.cat([tex_map, tex_map], dim=0)
        expected_out = F.grid_sample(tex_map, pixel_uvs, align_corners=False)
        self.assertTrue(
            torch.allclose(texels.squeeze(), expected_out.squeeze())
        )
Esempio n. 25
0
    def test_extend(self):
        B = 10
        mesh = TestMeshes.init_mesh(B, 30, 50)
        V = mesh._V
        F = mesh._F
        tex = Textures(
            maps=torch.randn((B, 16, 16, 3)),
            faces_uvs=torch.randint(size=(B, F, 3), low=0, high=V),
            verts_uvs=torch.randn((B, V, 2)),
        )
        tex_mesh = Meshes(
            verts=mesh.verts_padded(), faces=mesh.faces_padded(), textures=tex
        )
        N = 20
        new_mesh = tex_mesh.extend(N)

        self.assertEqual(len(tex_mesh) * N, len(new_mesh))

        tex_init = tex_mesh.textures
        new_tex = new_mesh.textures

        for i in range(len(tex_mesh)):
            for n in range(N):
                self.assertClose(
                    tex_init.faces_uvs_list()[i],
                    new_tex.faces_uvs_list()[i * N + n],
                )
                self.assertClose(
                    tex_init.verts_uvs_list()[i],
                    new_tex.verts_uvs_list()[i * N + n],
                )
        self.assertAllSeparate(
            [
                tex_init.faces_uvs_padded(),
                new_tex.faces_uvs_padded(),
                tex_init.verts_uvs_padded(),
                new_tex.verts_uvs_padded(),
                tex_init.maps_padded(),
                new_tex.maps_padded(),
            ]
        )
        with self.assertRaises(ValueError):
            tex_mesh.extend(N=-1)
    def __getitem__(self, index):
        path = self.paths[index][0]
        label = self.paths[index][1]

        # Load the obj and ignore the textures and materials.
        verts, faces_idx, _ = load_obj(path)
        faces = faces_idx.verts_idx

        # center = verts.mean(0)
        # verts = verts - center
        # scale = max(verts.abs().max(0)[0])
        # verts = verts / scale

        # Initialize each vertex to be white in color.
        verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
        textures = Textures(verts_rgb=verts_rgb)
        # Create a Meshes object for the teapot. Here we have only one mesh in the batch.
        mesh = Meshes(verts=[verts], faces=[faces], textures=textures)

        return mesh, label
def load_mesh(obj_path):
    device = torch.device('cuda')

    vertices, faces, aux = load_obj(obj_path)

    vertices_uvs = aux.verts_uvs[None, ...].to(device)
    faces_uvs = faces.textures_idx[None, ...].to(device)

    texture_maps = aux.texture_images
    texture_maps = list(texture_maps.values())[0]
    texture_maps = texture_maps[None, ...].to(device)

    textures = Textures(
        verts_uvs=vertices_uvs,
        faces_uvs=faces_uvs,
        maps=texture_maps,
    )

    vertices = vertices.to(device)
    faces = faces.verts_idx.to(device)

    mesh = Meshes(verts=[vertices], faces=[faces], textures=textures)

    return mesh
Esempio n. 28
0
def render_images(vertices, faces, texture, faces_uvs, verts_uvs, crop_size,
                  device):
    textures = Textures(maps=texture, faces_uvs=faces_uvs, verts_uvs=verts_uvs)
    meshes = Meshes(vertices, faces, textures)

    R, T = look_at_view_transform(1.0, 0.5, 0, device=device)
    camera = OpenGLPerspectiveCameras(R=R, T=T, fov=20, device=device)

    raster_settings = RasterizationSettings(image_size=crop_size * 2,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=None,
                                            max_faces_per_bin=None)

    lights = PointLights(location=[[0.0, 0.0, -3.0]], device=device)

    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=camera, raster_settings=raster_settings),
                            shader=TexturedSoftPhongShader(cameras=camera,
                                                           device=device,
                                                           lights=lights))

    images = renderer(meshes)
    return images
Esempio n. 29
0
    def get_mesh_from_model_single(self,
                                   model,
                                   basis,
                                   image_folder=config.test_align,
                                   outdir=config.test_obj):
        print("Extract param...")
        # f_model = np.load(config.f_model_path)
        # mu_shape = f_model['mu']
        # w_shape = f_model['w_shape']
        # index = f_model['faces']

        types = ('*.jpg', '*.png')
        image_path_list = []
        for files in types:
            image_path_list.extend(glob(os.path.join(image_folder, files)))
        total_num = len(image_path_list)
        print(total_num)
        # total = 0
        # count = np.zeros(1000)
        #landmarks_map = np.load('landmark.npy')
        for i, image_path in enumerate(image_path_list):
            image = Image.open(image_path)
            data = config.transform_eval_fs(image)
            data = data.unsqueeze(0).cuda()
            _, pred_shape, _, pred_camera_exp = model(data)
            param = torch.cat((pred_shape, pred_camera_exp), dim=1)
            shape_para = pred_shape[:, 0:199]
            exp_para = pred_camera_exp[:, 7:36]
            camera_para = pred_camera_exp[:, 0:7]
            # print(pred_camera_exp[:, 4:7])
            exp_face, rotated, scaled = basis.module(pred_shape, exp_para,
                                                     camera_para)
            # print(scaled)
            scaled_raw = scaled.clone().reshape(3, -1).t()
            scaled_raw[:, 0] = scaled[:, 0] - 56
            scaled_raw[:, 1] = 56 - scaled[:, 1]
            # print(scaled_raw)

            pred_face = basis.module.get_shape_obj(shape_para)
            #exp_face = basis.module.get_exp_obj(shape_para,exp_para)

            face_shape = pred_face.squeeze().reshape(-1,
                                                     3)  #.data.cpu().numpy()
            exp_face = exp_face.squeeze().reshape(-1, 3).data.cpu().numpy()
            rotated = rotated.squeeze().reshape(3, -1).t()
            # scaled_mesh = scaled.squeeze().t()
            scaled = scaled.squeeze()[:2, :].t().unsqueeze(0)
            scaled[:, :, 0] = scaled[:, :, 0] / 96
            scaled[:, :, 1] = 1 - scaled[:, :, 1] / 112
            faces = torch.Tensor(config.index.astype(int))
            tex_rgb = torch.ones_like(rotated).unsqueeze(0)
            texture_img = config.transform_raw_img(image).unsqueeze(
                0).transpose(1, 3).transpose(2, 1)
            #print(scaled_raw)
            textures = Textures(maps=texture_img,
                                verts_uvs=scaled,
                                faces_uvs=faces.long().unsqueeze(0))
            mesh = Meshes(verts=[face_shape], faces=[faces],
                          textures=textures).to(config.device)
            normals = mesh.verts_normals_padded()
            print(normals)

            ##########################################################################
            # Initialize an OpenGL perspective camera.
            # With world coordinates +Y up, +X left and +Z in, the front of the cow is facing the -Z direction.
            # So we move the camera by 180 in the azimuth direction so it is facing the front of the cow.
            #print(rotated)
            R, T = look_at_view_transform(100, 0, 0)
            #print(R,T)
            # principal_point = torch.Tensor[1.0/56,1.0/56]
            cameras = SfMOrthographicCameras(device=config.device,
                                             focal_length=1.0 / 56,
                                             R=R,
                                             T=T)
            #cameras = SfMOrthographicCameras(device=config.device,focal_length=1.0/56,R=R, T=T)
            # Define the settings for rasterization and shading. Here we set the output image to be of size
            # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
            # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
            # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
            # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
            # the difference between naive and coarse-to-fine rasterization.
            raster_settings = RasterizationSettings(
                image_size=112,
                blur_radius=0.0,
                faces_per_pixel=1,
                bin_size=
                None,  # this setting controls whether naive or coarse-to-fine rasterization is used
                max_faces_per_bin=
                None  # this setting is for coarse rasterization
            )

            # Place a point light in front of the object. As mentioned above, the front of the cow is facing the
            # -z direction.
            lights = PointLights(device=config.device,
                                 location=[[0.0, 0.0, 3.0]])

            # Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
            # interpolate the texture uv coordinates for each vertex, sample from a texture image and
            # apply the Phong lighting model
            renderer = MeshRenderer(
                rasterizer=MeshRasterizer(cameras=cameras,
                                          raster_settings=raster_settings),
                shader=TexturedSoftPhongShader(device=config.device,
                                               cameras=cameras,
                                               lights=None))

            # ## 3. Render the mesh

            # The light is in front of the object so it is bright and the image has specular highlights.

            ##########################################################################

            # pose_exp = pose_exp.squeeze().data.cpu().numpy()
            # R = Q2R(pose_exp[0:4])
            # res.append((param.data.cpu().numpy(),imname))
            # norm.append(torch.norm(param,dim=1).mean().item())

            # self.vertices2obj(vertex2.data.cpu().numpy().reshape(len(vertex2),-1,3),target.data.cpu().numpy(), imname=imname)
            # face_shape = (mu_shape + param @ w_shape).reshape(-1,3)
            #filepath = image_path.replace('\n',"")
            #identify = int(filepath.split('/')[-2])
            rotated = rotated.data.cpu().numpy()
            filename = image_path.split('/')[-1][:-3] + "obj"
            filename_exp = image_path.split('/')[-1][:-4] + "_e.obj"
            filename_all = image_path.split('/')[-1][:-4] + "_a.obj"
            filename_render = image_path.split('/')[-1][:-4] + ".png"
            #outdir = os.path.join(out_root,str(identify))
            # if not os.path.exists(outdir):
            #     os.mkdir(outdir)
            filename = os.path.join(outdir, filename)
            filename_exp = os.path.join(outdir, filename_exp)
            filename_all = os.path.join(outdir, filename_all)
            filename_render = os.path.join(outdir, filename_render)

            images = renderer(mesh)
            #print(images)
            images = images[0, :112, :96, :]
            mask = images[..., 3].unsqueeze(-1)
            #mask
            out_img = torch.where(mask == 0, texture_img[0].to(config.device),
                                  images[..., :3])

            #print(texture_img,out_img.shape)
            plt.figure(figsize=(10, 10))
            #print("saving figure!")
            plt.imsave(filename_render, out_img[..., :3].data.cpu().numpy())
            plt.grid("off")
            plt.axis("off")
            #print(face_shape)
            # self.write_obj(filename,face_shape)
            # self.write_obj(filename_exp,exp_face)
            self.write_obj(filename_all, rotated)
Esempio n. 30
0
    def test_join_meshes(self):
        """
        Test that join_meshes and load_objs_as_meshes are consistent with single
        meshes.
        """
        def check_triple(mesh, mesh3):
            """
            Verify that mesh3 is three copies of mesh.
            """
            def check_item(x, y):
                self.assertEqual(x is None, y is None)
                if x is not None:
                    self.assertClose(torch.cat([x, x, x]), y)

            check_item(mesh.verts_padded(), mesh3.verts_padded())
            check_item(mesh.faces_padded(), mesh3.faces_padded())
            if mesh.textures is not None:
                check_item(mesh.textures.maps_padded(),
                           mesh3.textures.maps_padded())
                check_item(
                    mesh.textures.faces_uvs_padded(),
                    mesh3.textures.faces_uvs_padded(),
                )
                check_item(
                    mesh.textures.verts_uvs_padded(),
                    mesh3.textures.verts_uvs_padded(),
                )
                check_item(
                    mesh.textures.verts_rgb_padded(),
                    mesh3.textures.verts_rgb_padded(),
                )

        DATA_DIR = (Path(__file__).resolve().parent.parent /
                    "docs/tutorials/data")
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        mesh = load_objs_as_meshes([obj_filename])
        mesh3 = load_objs_as_meshes([obj_filename, obj_filename, obj_filename])
        check_triple(mesh, mesh3)
        self.assertTupleEqual(mesh.textures.maps_padded().shape,
                              (1, 1024, 1024, 3))

        mesh_notex = load_objs_as_meshes([obj_filename], load_textures=False)
        mesh3_notex = load_objs_as_meshes(
            [obj_filename, obj_filename, obj_filename], load_textures=False)
        check_triple(mesh_notex, mesh3_notex)
        self.assertIsNone(mesh_notex.textures)

        verts = torch.randn((4, 3), dtype=torch.float32)
        faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
        vert_tex = torch.tensor([[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],
                                dtype=torch.float32)
        tex = Textures(verts_rgb=vert_tex[None, :])
        mesh_rgb = Meshes(verts=[verts], faces=[faces], textures=tex)
        mesh_rgb3 = join_meshes([mesh_rgb, mesh_rgb, mesh_rgb])
        check_triple(mesh_rgb, mesh_rgb3)

        teapot_obj = DATA_DIR / "teapot.obj"
        mesh_teapot = load_objs_as_meshes([teapot_obj])
        teapot_verts, teapot_faces = mesh_teapot.get_mesh_verts_faces(0)
        mix_mesh = load_objs_as_meshes([obj_filename, teapot_obj],
                                       load_textures=False)
        self.assertEqual(len(mix_mesh), 2)
        self.assertClose(mix_mesh.verts_list()[0], mesh.verts_list()[0])
        self.assertClose(mix_mesh.faces_list()[0], mesh.faces_list()[0])
        self.assertClose(mix_mesh.verts_list()[1], teapot_verts)
        self.assertClose(mix_mesh.faces_list()[1], teapot_faces)

        cow3_tea = join_meshes([mesh3, mesh_teapot], include_textures=False)
        self.assertEqual(len(cow3_tea), 4)
        check_triple(mesh_notex, cow3_tea[:3])
        self.assertClose(cow3_tea.verts_list()[3], mesh_teapot.verts_list()[0])
        self.assertClose(cow3_tea.faces_list()[3], mesh_teapot.faces_list()[0])