Пример #1
0
    def read(
        self,
        path: Union[str, Path],
        include_textures: bool,
        device,
        path_manager: PathManager,
        **kwargs,
    ) -> Optional[Meshes]:
        if not endswith(path, self.known_suffixes):
            return None

        verts, faces, verts_colors, verts_normals = _load_ply(
            f=path, path_manager=path_manager
        )
        if faces is None:
            faces = torch.zeros(0, 3, dtype=torch.int64)

        texture = None
        if include_textures and verts_colors is not None:
            texture = TexturesVertex([verts_colors.to(device)])

        if verts_normals is not None:
            verts_normals = [verts_normals]
        mesh = Meshes(
            verts=[verts.to(device)],
            faces=[faces.to(device)],
            textures=texture,
            verts_normals=verts_normals,
        )
        return mesh
    def init_meshes(
        num_meshes: int = 10,
        num_verts: int = 1000,
        num_faces: int = 3000,
        device: str = "cpu",
        add_texture: bool = False,
    ):
        device = torch.device(device)
        verts_list = []
        faces_list = []
        texts_list = []
        for _ in range(num_meshes):
            verts = torch.rand((num_verts, 3),
                               dtype=torch.float32,
                               device=device)
            faces = torch.randint(num_verts,
                                  size=(num_faces, 3),
                                  dtype=torch.int64,
                                  device=device)
            texts = torch.rand((num_verts, 3),
                               dtype=torch.float32,
                               device=device)
            verts_list.append(verts)
            faces_list.append(faces)
            texts_list.append(texts)

        # create textures
        textures = None
        if add_texture:
            textures = TexturesVertex(texts_list)
        meshes = Meshes(verts=verts_list, faces=faces_list, textures=textures)

        return meshes
Пример #3
0
    def forward(self, coeff):

        batch_num = coeff.shape[0]

        id_coeff, ex_coeff, tex_coeff, angles, gamma, translation = self.Split_coeff(
            coeff)

        face_shape = self.Shape_formation(id_coeff, ex_coeff)
        face_texture = self.Texture_formation(tex_coeff)
        face_norm = self.Compute_norm(face_shape)
        rotation = self.Compute_rotation_matrix(angles)
        face_norm_r = face_norm.bmm(rotation)
        face_shape_t = self.Rigid_transform_block(face_shape, rotation,
                                                  translation)
        face_color = self.Illumination_layer(face_texture, face_norm_r, gamma)
        face_lms_t = self.get_lms(face_shape_t, self.kp_inds)
        lms = self.Projection_block(face_lms_t)
        lms = torch.stack([lms[:, :, 0], self.img_size - lms[:, :, 1]], dim=2)

        face_color = TexturesVertex(face_color)

        tri = self.tri - 1
        mesh = Meshes(face_shape_t, tri.repeat(batch_num, 1, 1), face_color)
        rendered_img = self.renderer(mesh)
        rendered_img = torch.clamp(rendered_img, 0, 255)

        return rendered_img, lms, face_texture, mesh
Пример #4
0
def load_meshes(meshes_path: str = "./data/meshes", device: str = "") -> dict:

    meshes_path = abspath(join(dirname(__file__), meshes_path))
    meshes_files = [
        join(meshes_path, mesh)
        for mesh in os.listdir(meshes_path)
        if mesh.endswith(".obj")
    ]

    # Load the object without textures and materials
    meshes = {}
    for path in meshes_files:
        verts, faces_idx, aux = load_obj(path)
        faces = faces_idx.verts_idx

        # Scale normalize the target mesh to fit in a sphere of radius 1 centered at (0,0,0)
        center = verts.mean(0)
        verts = verts - center
        scale = max(verts.abs().max(0)[0])
        verts = verts / scale

        # Initialize each vertex to be white in color.
        textures = aux.texture_atlas
        if textures:
            textures = verts.new_ones(faces.shape[0], 4, 4, 3)
            textures = TexturesVertex(verts_features=verts.to(device))

        # Create a Meshes object for the teapot. Here we have only one mesh in the batch.
        mesh_name = path.split("/")[-1].split(".")[0]
        meshes[mesh_name] = Meshes(
            verts=[verts.to(device)], faces=[faces.to(device)], textures=textures
        )

    return meshes
Пример #5
0
    def create_scene(self, hws, alphas, N):
        batch = []
        for i in range(N):
            scene = []
            for mesh_name in self.models:
                hw = hws[mesh_name]
                alpha = alphas[mesh_name]
                N, K, _ = hw.shape
                for k in range(K):
                    c = self.colors[mesh_name].clone()
                    c[..., 3] = alpha[i, k]

                    textures = TexturesVertex(verts_features=[c])
                    m = Meshes(verts=[self.verts[mesh_name].clone()],
                               faces=[self.faces[mesh_name].clone()],
                               textures=textures)

                    t = Translate(y=hw[i, k, 0],
                                  x=hw[i, k, 1],
                                  z=torch.zeros(1, device=self.device),
                                  device=str(self.device))
                    m = m.update_padded(t.transform_points(m.verts_padded()))
                    scene += [m]
            batch += [join_meshes_as_scene(scene)]
        batch = join_meshes_as_batch(batch)
        return batch
Пример #6
0
def get_segmentation(obj_path, image_path, renderer):

    input_image = cv2.imread(image_path)
    input_image = input_image[:, :input_image.shape[1] // 3]
    input_image = cv2.resize(input_image, (1024, 1024))

    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Load obj file
    verts_rgb_colors = get_verts_rgb_colors(obj_path)
    verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
    textures = TexturesVertex(verts_features=verts_rgb_colors)
    # wo_textures = TexturesVertex(verts_features=torch.ones_like(verts_rgb_colors)*0.75)

    # Load obj
    mesh = load_objs_as_meshes([obj_path], device=device)

    # Set mesh
    vers = mesh._verts_list
    faces = mesh._faces_list
    mesh_w_tex = Meshes(vers, faces, textures)
    mesh_wo_tex = Meshes(vers, faces, wo_textures)

    R, T = look_at_view_transform(1.8, 0, 0, device=device)
    images_w_tex = renderer(mesh_w_tex, R=R, T=T)
    images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0,
                           1.0)[:, :, ::-1] * 255
    images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
    images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0,
                            1.0)[:, :, ::-1] * 255

    return input_image, images_w_tex, images_wo_tex
Пример #7
0
    def read(
        self,
        path: PathOrStr,
        include_textures: bool,
        device,
        path_manager: PathManager,
        **kwargs,
    ) -> Optional[Meshes]:
        if not endswith(path, self.known_suffixes):
            return None

        with _open_file(path, path_manager, "rb") as f:
            data = _load_off_stream(f)
        verts = torch.from_numpy(data["verts"]).to(device)
        if "faces" in data:
            faces = torch.from_numpy(data["faces"]).to(dtype=torch.int64,
                                                       device=device)
        else:
            faces = torch.zeros((0, 3), dtype=torch.int64, device=device)

        textures = None
        if "verts_colors" in data:
            if "faces_colors" in data:
                msg = "Faces colors ignored because vertex colors provided too."
                warnings.warn(msg)
            verts_colors = torch.from_numpy(data["verts_colors"]).to(device)
            textures = TexturesVertex([verts_colors])
        elif "faces_colors" in data:
            faces_colors = torch.from_numpy(data["faces_colors"]).to(device)
            textures = TexturesAtlas([faces_colors[:, None, None, :]])

        mesh = Meshes(verts=[verts.to(device)],
                      faces=[faces.to(device)],
                      textures=textures)
        return mesh
Пример #8
0
def create_circle(r=1,
                  offset=[0, 0, 0],
                  rgb=[1.0, 1.0, 1.0],
                  device="cpu",
                  num=16):
    # Create vetices and faces
    theta = np.linspace(0, 2 * np.pi, num=num, endpoint=False)
    x_ = r * np.cos(theta)[:, np.newaxis]
    y_ = r * np.sin(theta)[:, np.newaxis]
    z_ = np.zeros_like(x_)
    vertices = np.concatenate((x_, y_, z_), axis=1)
    vertices = np.r_[vertices, np.array([[0, 0, 0]])].astype(np.float32)
    vertices += np.array(offset)
    faces = np.roll(np.arange(2 * num) // 2, -1).reshape(-1, 2)
    faces = np.c_[faces, np.full(num, num)].astype(np.float32)
    vertices = torch.from_numpy(vertices)
    faces = torch.from_numpy(faces)

    # Set color
    verts_rgb = torch.zeros_like(vertices)[None]
    verts_rgb += torch.from_numpy(np.array(rgb, dtype=np.float32))
    textures = TexturesVertex(verts_features=verts_rgb.to(device))

    # Construct mesh
    mesh = Meshes(verts=[vertices.to(device)],
                  faces=[faces.to(device)],
                  textures=textures)
    return mesh
Пример #9
0
def render_cubified_voxels(voxels: torch.Tensor,
                           shader_type=HardPhongShader,
                           device="cpu",
                           **kwargs):
    """
    Use the Cubify operator to convert inputs voxels to a mesh and then render that mesh.

    Args:
        voxels: FloatTensor of shape (N, D, D, D) where N is the batch size and
            D is the number of voxels along each dimension.
        shader_type: shader_type: shader_type: Shader to use for rendering. Examples
            include HardPhongShader (default), SoftPhongShader etc or any other type
            of valid Shader class.
        device: torch.device on which the tensors should be located.
        **kwargs: Accepts any of the kwargs that the renderer supports.
    Returns:
        Batch of rendered images of shape (N, H, W, 3).
    """
    cubified_voxels = cubify(voxels, CUBIFY_THRESH).to(device)
    cubified_voxels.textures = TexturesVertex(verts_features=torch.ones_like(
        cubified_voxels.verts_padded(), device=device))
    cameras = BlenderCamera(device=device)
    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(
            cameras=cameras,
            raster_settings=kwargs.get("raster_settings",
                                       RasterizationSettings()),
        ),
        shader=shader_type(
            device=device,
            cameras=cameras,
            lights=kwargs.get("lights", PointLights()).to(device),
        ),
    )
    return renderer(cubified_voxels)
Пример #10
0
 def update(self, pose):
     textures = TexturesVertex(verts_features=[self.object.colors.clone()])
     self.mesh = Meshes(verts=[self.object.verts.clone()],
                        faces=[self.object.faces.clone()],
                        textures=textures)
     self.pose = pose
     v = self.pose.transform_points(self.mesh.verts_padded())
     self.mesh = self.mesh.update_padded(v)
Пример #11
0
    def test_mesh_renderer_to(self):
        """
        Test moving all the tensors in the mesh renderer to a new device.
        """

        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device1)
        lights = PointLights(device=device1)
        lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device1)[None]

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )
        cameras = FoVPerspectiveCameras(
            device=device1, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=100
        )
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)

        blend_params = BlendParams(
            1e-4,
            1e-4,
            background_color=torch.zeros(3, dtype=torch.float32, device=device1),
        )

        shader = SoftPhongShader(
            lights=lights,
            cameras=cameras,
            materials=materials,
            blend_params=blend_params,
        )
        renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        textures = TexturesVertex(
            verts_features=torch.ones_like(verts_padded, device=device1)
        )
        mesh.textures = textures
        self._check_mesh_renderer_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device1)

        # Move renderer and mesh to another device and re render
        # This also tests that background_color is correctly moved to
        # the new device
        device2 = torch.device("cuda:0")
        renderer = renderer.to(device2)
        mesh = mesh.to(device2)
        self._check_mesh_renderer_props_on_device(renderer, device2)
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device2)
Пример #12
0
def test_bounding_box():
    verts = torch.tensor([[-2, -1, 0], [2, -1, 0], [2, 1, 0], [-2, 1, 0]],
                         dtype=torch.float) * 20.0

    faces = torch.LongTensor([[0, 1, 2], [0, 2, 3]])

    white = torch.ones_like(verts)
    red = white * torch.tensor([1.0, 0.0, 0.0])
    green = white * torch.tensor([0.0, 1.0, 0.0])
    blue = white * torch.tensor([0.0, 0.0, 1.0])

    meshes = Meshes(verts=[verts],
                    faces=[faces],
                    textures=TexturesVertex([blue]))

    distance = 30
    elevation = 0.0
    azimuth = 0

    R, T = look_at_view_transform(distance, elevation, azimuth)
    cameras = FoVOrthographicCameras(max_x=64.0,
                                     max_y=64.0,
                                     min_x=-64.0,
                                     min_y=-64.0,
                                     scale_xyz=((1, 1, 1), ),
                                     R=R,
                                     T=T)

    bb = BoundingBoxes(meshes, cameras, screen_size=(128, 128))

    raster_settings = RasterizationSettings(
        image_size=128,
        blur_radius=0,
        faces_per_pixel=6,
    )

    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras,
        raster_settings=raster_settings,
    ),
                            shader=IdentityShader())

    fig, ax = plt.subplots(ncols=1,
                           nrows=1,
                           figsize=(10, 10),
                           constrained_layout=False)
    ax.imshow(renderer(meshes)[0, :, :, 0, :])

    boxes_rect = patches.Rectangle(bb.bottom_left(0),
                                   width=bb.width(0),
                                   height=bb.height(0),
                                   linewidth=4,
                                   edgecolor='r',
                                   facecolor='none')
    ax.add_patch(boxes_rect)
    plt.show()
Пример #13
0
            def forward(self, verts, texs):
                batch_size = verts.size(0)
                self.renderer = self.renderer.to(verts.device)
                tex = TexturesVertex(verts_features=texs)
                faces = self.faces.expand(batch_size, -1, -1).to(verts.device)
                mesh = Meshes(verts, faces, tex).to(verts.device)

                test._check_mesh_renderer_props_on_device(self.renderer, verts.device)
                img_render = self.renderer(mesh)
                return img_render[:, :, :, :3]
Пример #14
0
    def __call__(self, vertices, faces, device):
        colors = torch.ones_like(vertices)
        if self.renderer is None:
            self.setup(device)
        textures = TexturesVertex(verts_features=colors)

        mesh = Meshes(verts=vertices, faces=faces, textures=textures)
        r_images = self.renderer(mesh)
        r_images = r_images.permute(0, 3, 1, 2).contiguous()
        r_images = r_images[:, :3, :, :].mean(dim=1, keepdim=True)
        return r_images
Пример #15
0
    def render(self,
               model_ids: Optional[List[str]] = None,
               categories: Optional[List[str]] = None,
               sample_nums: Optional[List[int]] = None,
               idxs: Optional[List[int]] = None,
               shader_type=HardPhongShader,
               device="cpu",
               **kwargs) -> torch.Tensor:
        """
        If a list of model_ids are supplied, render all the objects by the given model_ids.
        If no model_ids are supplied, but categories and sample_nums are specified, randomly
        select a number of objects (number specified in sample_nums) in the given categories
        and render these objects. If instead a list of idxs is specified, check if the idxs
        are all valid and render models by the given idxs. Otherwise, randomly select a number
        (first number in sample_nums, default is set to be 1) of models from the loaded dataset
        and render these models.

        Args:
            model_ids: List[str] of model_ids of models intended to be rendered.
            categories: List[str] of categories intended to be rendered. categories
                and sample_nums must be specified at the same time. categories can be given
                in the form of synset offsets or labels, or a combination of both.
            sample_nums: List[int] of number of models to be randomly sampled from
                each category. Could also contain one single integer, in which case it
                will be broadcasted for every category.
            idxs: List[int] of indices of models to be rendered in the dataset.
            shader_type: Select shading. Valid options include HardPhongShader (default),
                SoftPhongShader, HardGouraudShader, SoftGouraudShader, HardFlatShader,
                SoftSilhouetteShader.
            device: torch.device on which the tensors should be located.
            **kwargs: Accepts any of the kwargs that the renderer supports.

        Returns:
            Batch of rendered images of shape (N, H, W, 3).
        """
        paths = self._handle_render_inputs(model_ids, categories, sample_nums,
                                           idxs)
        meshes = load_objs_as_meshes(paths, device=device, load_textures=False)
        meshes.textures = TexturesVertex(verts_features=torch.ones_like(
            meshes.verts_padded(), device=device))
        cameras = kwargs.get("cameras", OpenGLPerspectiveCameras()).to(device)
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras,
                raster_settings=kwargs.get("raster_settings",
                                           RasterizationSettings()),
            ),
            shader=shader_type(
                device=device,
                cameras=cameras,
                lights=kwargs.get("lights", PointLights()).to(device),
            ),
        )
        return renderer(meshes)
Пример #16
0
def generate_video_from_obj(obj_path, image_path, video_path, renderer):
    input_image = cv2.imread(image_path)
    input_image = input_image[:,:input_image.shape[1]//3]
    input_image = cv2.resize(input_image, (512,512))

    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Load obj file
    verts_rgb_colors = get_verts_rgb_colors(obj_path)
    verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
    textures = TexturesVertex(verts_features=verts_rgb_colors)
    wo_textures = TexturesVertex(verts_features=torch.ones_like(verts_rgb_colors)*0.75)



    # Load obj
    mesh = load_objs_as_meshes([obj_path], device=device)

    # Set mesh
    vers = mesh._verts_list
    faces = mesh._faces_list
    mesh_w_tex = Meshes(vers, faces, textures)
    mesh_wo_tex = Meshes(vers, faces, wo_textures)

    # create VideoWriter
    fourcc = cv2. VideoWriter_fourcc(*'MP4V')
    out = cv2.VideoWriter(video_path, fourcc, 20.0, (1024,512))

    for i in tqdm(range(90)):
        R, T = look_at_view_transform(1.8, 0, i*4, device=device)
        images_w_tex = renderer(mesh_w_tex, R=R, T=T)
        images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255
        # images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
        # images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255
        image = np.concatenate([input_image, images_w_tex], axis=1)
        out.write(image.astype('uint8'))
    out.release()
    def test_texture_sampling(self):
        device = torch.device("cuda:0")
        batch_size = 6
        # verts
        verts = torch.rand((batch_size, 6, 3),
                           device=device,
                           dtype=torch.float32)
        verts[:, :3, 2] = 1.0
        verts[:, 3:, 2] = -1.0
        # textures
        texts = torch.rand((batch_size, 6, 3),
                           device=device,
                           dtype=torch.float32)
        # faces
        faces = torch.tensor([[0, 1, 2], [3, 4, 5]],
                             device=device,
                             dtype=torch.int64)
        faces = faces.view(1, 2, 3).expand(batch_size, -1, -1)

        meshes = Meshes(verts=verts,
                        faces=faces,
                        textures=TexturesVertex(texts))

        num_samples = 24
        samples, normals, textures = sample_points_from_meshes(
            meshes,
            num_samples=num_samples,
            return_normals=True,
            return_textures=True)

        textures_naive = torch.zeros((batch_size, num_samples, 3),
                                     dtype=torch.float32,
                                     device=device)
        for n in range(batch_size):
            for i in range(num_samples):
                p = samples[n, i]
                if p[2] > 0.0:  # sampled from 1st face
                    v0, v1, v2 = verts[n, 0, :2], verts[n, 1, :2], verts[n,
                                                                         2, :2]
                    w0, w1, w2 = barycentric_coordinates(p[:2], v0, v1, v2)
                    t0, t1, t2 = texts[n, 0], texts[n, 1], texts[n, 2]
                else:  # sampled from 2nd face
                    v0, v1, v2 = verts[n, 3, :2], verts[n, 4, :2], verts[n,
                                                                         5, :2]
                    w0, w1, w2 = barycentric_coordinates(p[:2], v0, v1, v2)
                    t0, t1, t2 = texts[n, 3], texts[n, 4], texts[n, 5]

                tt = w0 * t0 + w1 * t1 + w2 * t2
                textures_naive[n, i] = tt

        self.assertClose(textures, textures_naive)
Пример #18
0
def get_camera_meshes(camera_list, radius=0.02):
    verts_list = []
    faces_list = []
    color_list = []
    rots = np.array([
        quaternion.as_rotation_matrix(camera_info["rotation"])
        for camera_info in camera_list
    ])

    # ai habitat frame
    lookat = np.array([0, 0, -1])
    vertical = np.array([0, 1, 0])

    positions = np.array(
        [camera_info["position"].flatten() for camera_info in camera_list])
    lookats = rots @ lookat.T
    verticals = rots @ vertical.T
    predetermined_color = [
        [0.10196, 0.32157, 1.0],
        [1.0, 0.0667, 0.1490],
        [197 / 255, 181 / 255, 24 / 255],
        [73 / 255, 145 / 255, 115 / 255],
        [198 / 255, 120 / 255, 221 / 255],
    ][:len(camera_list)]
    assert len(predetermined_color) == len(camera_list)
    for idx, (position, lookat, vertical, color) in enumerate(
            zip(positions, lookats, verticals, predetermined_color)):
        cur_num_verts = 0
        edges = get_cone_edges(position, lookat, vertical)
        cam_verts = []
        cam_inds = []
        for k in range(len(edges)):
            cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0],
                                                      edges[k][1])
            cyl_verts = [x for x in cyl_verts]
            cyl_ind = [x + cur_num_verts for x in cyl_ind]
            cur_num_verts += len(cyl_verts)
            cam_verts.extend(cyl_verts)
            cam_inds.extend(cyl_ind)
        # Create a textures object
        verts_list.append(torch.tensor(cam_verts, dtype=torch.float32))
        faces_list.append(torch.tensor(cam_inds, dtype=torch.float32))
        color_list.append(color)

    color_tensor = torch.tensor(color_list, dtype=torch.float32).unsqueeze_(1)
    tex = TexturesVertex(verts_features=color_tensor)

    # Initialise the mesh with textures
    meshes = Meshes(verts=verts_list, faces=faces_list, textures=tex)
    return meshes
Пример #19
0
def generate_video_from_obj(obj_path, video_path, renderer):
    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Load obj file
    verts_rgb_colors = get_verts_rgb_colors(obj_path)
    verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device)
    textures = TexturesVertex(verts_features=verts_rgb_colors)
    wo_textures = TexturesVertex(
        verts_features=torch.ones_like(verts_rgb_colors) * 0.75)

    # Load obj
    #mesh = load_objs_as_meshes([obj_path], device=device)
    mesh = trimesh.load(obj_path)
    mesh.vertices -= mesh.center_mass

    # Set mesh
    mesh_wo_tex = Meshes(torch.FloatTensor(mesh.vertices),
                         torch.FloatTensor(mesh.faces), wo_textures)

    # create VideoWriter
    fourcc = cv2.VideoWriter_fourcc(*'MP4V')
    out = cv2.VideoWriter(video_path, fourcc, 20.0, (512, 512))

    for i in tqdm(range(90)):
        R, T = look_at_view_transform(vers[0][1].mean(),
                                      0,
                                      i * 4,
                                      device=device)
        images_wo_tex = renderer(mesh_wo_tex, R=R, T=T)
        images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0,
                                1.0)[:, :, ::-1] * 255
        #image = np.concatenate([images_w_tex, images_wo_tex], axis=1)
        image = images_wo_tex
        out.write(image.astype('uint8'))
    out.release()
Пример #20
0
def save_p3d_mesh(verts, faces, filling_factors):
    features = [(int(i * 255), 0, 0) for i in filling_factors]
    features = torch.unsqueeze(torch.Tensor(features), 0)
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.cuda.set_device(device)
    else:
        device = torch.device("cpu")

    texture = TexturesVertex(features)
    mesh = Meshes(torch.unsqueeze(torch.Tensor(verts), 0),
                  torch.unsqueeze(torch.Tensor(faces), 0), texture).cuda()

    # Initialize a camera.
    # Rotate the object by increasing the elevation and azimuth angles
    R, T = look_at_view_transform(dist=2.0, elev=-50, azim=-90)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

    # Define the settings for rasterization and shading. Here we set the output image to be of size
    # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
    # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
    # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
    # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
    # the difference between naive and coarse-to-fine rasterization.
    raster_settings = RasterizationSettings(
        image_size=1024,
        blur_radius=0.0,
        faces_per_pixel=1,
    )

    # Place a point light in front of the object. As mentioned above, the front of the cow is facing the
    # -z direction.
    lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

    # Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
    # interpolate the texture uv coordinates for each vertex, sample from a texture image and
    # apply the Phong lighting model
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=SoftPhongShader(device=device,
                                                   cameras=cameras,
                                                   lights=lights))

    img = renderer(mesh)
    plt.figure(figsize=(10, 10))
    plt.imshow(img[0].cpu().numpy())
    plt.show()
Пример #21
0
    def forward(self, batch_size):
        # Offset the mesh
        deformed_mesh_verts = self.template_mesh.offset_verts(
            self.deform_verts)
        texture = TexturesVertex(self.textures)
        deformed_mesh = Meshes(
            verts=deformed_mesh_verts.verts_padded(),
            faces=deformed_mesh_verts.faces_padded(),
            textures=texture,
        )
        deformed_meshes = deformed_mesh.extend(batch_size)

        laplacian_loss = mesh_laplacian_smoothing(deformed_mesh,
                                                  method="uniform")
        flatten_loss = mesh_normal_consistency(deformed_mesh)

        return deformed_meshes, laplacian_loss, flatten_loss
Пример #22
0
def construct_meshes(shape, texture, face):
  nV = shape.size(2)
  Verts, Faces, Textures = [], [], []
  for i in range(len(shape)):
    V_ = shape[i]
    T_ = texture[i]

    range_ = torch.arange(V_.size(0)).view(-1, 1) * nV
    F_ = face.expand(V_.size(0), -1) + range_.cuda()
    # F_ = F_.view(-1, 3).float()
    Verts.append(V_.reshape(-1, 3))
    Textures.append(T_.reshape(-1, 3))
    Faces.append(F_.reshape(-1, 3).float())

  meshes = Meshes(verts=Verts, faces=Faces,
                  textures=TexturesVertex(verts_features=Textures))
  return meshes
Пример #23
0
    def test_cameras(self):
        """
        DVR cameras
        """
        device = torch.device('cuda:0')
        input_dir = '/home/ywang/Documents/points/neural_splatter/differentiable_volumetric_rendering_upstream/data/DTU/scan106/scan106'
        out_dir = os.path.join('tests', 'outputs', 'test_dvr_data')
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        dvr_camera_file = os.path.join(input_dir, 'cameras.npz')
        dvr_camera_dict = np.load(dvr_camera_file)
        n_views = len(glob.glob(os.path.join(input_dir, 'image', '*.png')))

        focal_lengths = dvr_camera_dict['camera_mat_0'][(0,1),(0,1)].reshape(1,2)
        principal_point = dvr_camera_dict['camera_mat_0'][(0,1),(2,2)].reshape(1,2)
        cameras = PerspectiveCameras(focal_length=focal_lengths, principal_point=principal_point).to(device)
        # Define the settings for rasterization and shading.
        # Refer to raster_points.py for explanations of these parameters.
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=5,
            # this setting controls whether naive or coarse-to-fine rasterization is used
            bin_size=None,
            max_faces_per_bin=None  # this setting is for coarse rasterization
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=None, raster_settings=raster_settings),
            shader=SoftPhongShader(device=device)
        )
        mesh = trimesh.load_mesh('/home/ywang/Documents/points/neural_splatter/differentiable_volumetric_rendering_upstream/out/multi_view_reconstruction/birds/ours_depth_mvs/vis/000_0000477500.ply')
        textures = TexturesVertex(verts_features=torch.ones(
            1, mesh.vertices.shape[0], 3)).to(device=device)
        meshes = Meshes(verts=[torch.tensor(mesh.vertices).float()], faces=[torch.tensor(mesh.faces)],
                        textures=textures).to(device=device)
        for i in range(n_views):
            transform_mat = torch.from_numpy(dvr_camera_dict['scale_mat_%d' % i].T @ dvr_camera_dict['world_mat_%d' % i].T).to(device).unsqueeze(0).float()
            cameras.R, cameras.T = decompose_to_R_and_t(transform_mat)
            cameras._N = cameras.R.shape[0]
            imgs = renderer(meshes, cameras=cameras, zfar=1e4, znear=1.0)
            import pdb; pdb.set_trace()
            imageio.imwrite(os.path.join(out_dir, '%06d.png' % i), (imgs[0].detach().cpu().numpy()*255).astype('uint8'))
Пример #24
0
def differentiable_face_render(vert, tri, colors, bg_img, h, w):
    """
    vert: (N, nver, 3)
    tri: (ntri, 3)
    colors: (N, nver. 3)
    bg_img: (N, 3, H, W)
    """
    assert h == w
    N, nver, _ = vert.shape
    ntri = tri.shape[0]
    tri = torch.from_numpy(tri).to(vert.device).unsqueeze(0).expand(N, ntri, 3)
    # Transform to Pytorch3D world space
    vert_t = vert + torch.tensor((0.5, 0.5, 0), dtype=torch.float, device=vert.device).view(1, 1, 3)
    vert_t = vert_t * torch.tensor((-1, 1, -1), dtype=torch.float, device=vert.device).view(1, 1, 3)
    mesh_torch = Meshes(verts=vert_t, faces=tri, textures=TexturesVertex(verts_features=colors))
    # Render
    R = look_at_rotation(camera_position=((0, 0, -300),)).to(vert.device).expand(N, 3, 3)
    T = torch.tensor((0, 0, 300), dtype=torch.float, device=vert.device).view(1, 3).expand(N, 3)
    focal = torch.tensor((2. / float(w), 2. / float(h)), dtype=torch.float, device=vert.device).view(1, 2).expand(N, 2)
    cameras = OrthographicCameras(device=vert.device, R=R, T=T, focal_length=focal)
    raster_settings = RasterizationSettings(image_size=h, blur_radius=0.0, faces_per_pixel=1)
    lights = DirectionalLights(ambient_color=((1., 1., 1.),), diffuse_color=((0., 0., 0.),),
                               specular_color=((0., 0., 0.),), direction=((0, 0, 1),), device=vert.device)
    blend_params = BlendParams(background_color=(0, 0, 0))
    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(
            cameras=cameras,
            raster_settings=raster_settings
        ),
        shader=SoftPhongShader(
            device=vert.device,
            cameras=cameras,
            lights=lights,
            blend_params=blend_params
        )
    )
    images = renderer(mesh_torch)[:, :, :, :3]        # (N, H, W, 3)
    # Add background
    if bg_img is not None:
        bg_img = bg_img.permute(0, 2, 3, 1)         # (N, H, W, 3)
        images = torch.where(torch.eq(images.sum(dim=3, keepdim=True).expand(N, h, w, 3), 0), bg_img, images)
    return images
    def _get_cube_mesh(self):
        # NOTE(ycho): duplicated from _get_cube_cloud()
        vertices = list(
            itertools.product(*zip([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5])))
        vertices = np.insert(vertices, 0, [0, 0, 0], axis=0)
        vertices = th.as_tensor(vertices, dtype=th.float32, device=self.device)

        # FIXME(ycho): Hardcoded face indices.
        # (We can't use Box.FACE since we need triangulated faces)
        faces = [[7, 3, 5], [5, 3, 1], [5, 1, 6], [6, 1, 2], [6, 2, 8],
                 [8, 2, 4], [8, 4, 7], [7, 4, 3], [3, 4, 1], [1, 4, 2],
                 [8, 7, 6], [6, 7, 5]]
        face_indices = th.as_tensor(faces, dtype=th.int32,
                                    device=self.device).reshape(-1, 3, 3)
        textures = TexturesVertex(verts_features=(0.5 + 0.5 * vertices)[None])
        mesh = Meshes(verts=vertices[None],
                      faces=face_indices[None],
                      textures=textures)

        return mesh
Пример #26
0
    def render_torch(self,
                     verts,
                     faces,
                     rgb,
                     bcg_color=(1., 1., 1.),
                     get_depth=False,
                     get_alpha=False):
        # b, h, w = grid_3d.shape[:3]
        b = verts.size(0)
        textures = TexturesVertex(verts_features=rgb.view(b, -1, 3))
        mesh = Meshes(verts=verts, faces=faces, textures=textures)

        fragments = self.rasterizer_torch(mesh)
        texels = mesh.sample_textures(fragments)
        materials = Materials(device=verts.device)
        blend_params = BlendParams(background_color=bcg_color)
        images = hard_rgb_blend(texels, fragments, blend_params)
        images = images[..., :3].permute(0, 3, 1, 2)

        out = (images, )
        if get_depth:
            depth = fragments.zbuf[..., 0]
            mask = (depth == -1.0).float()
            max_depth = self.max_depth + 0.5 * (self.max_depth -
                                                self.min_depth)
            depth = mask * max_depth * torch.ones_like(depth) + (1 -
                                                                 mask) * depth
            out = out + (depth, )
        if get_alpha:
            colors = torch.ones_like(fragments.bary_coords)
            blend_params = BlendParams(sigma=1e-2,
                                       gamma=1e-4,
                                       background_color=(1., 1., 1.))
            alpha = sigmoid_alpha_blend(colors, fragments, blend_params)[...,
                                                                         -1]
            out = tuple(out) + (alpha, )
        if len(out) == 1:
            out = out[0]
        return out
Пример #27
0
    def forward(self, coeffs, render=True):
        batch_num = coeffs.shape[0]

        id_coeff, exp_coeff, tex_coeff, angles, gamma, translation = self.split_coeffs(
            coeffs)

        vs = self.get_vs(id_coeff, exp_coeff)

        rotation = self.compute_rotation_matrix(angles)

        vs_t = self.rigid_transform(
            vs, rotation, translation)

        lms_t = self.get_lms(vs_t)
        lms_proj = self.project_vs(lms_t)
        lms_proj = torch.stack(
            [lms_proj[:, :, 0], self.img_size-lms_proj[:, :, 1]], dim=2)
        if render:
            face_texture = self.get_color(tex_coeff)
            face_norm = self.compute_norm(vs, self.tri, self.point_buf)
            face_norm_r = face_norm.bmm(rotation)
            face_color = self.add_illumination(
                face_texture, face_norm_r, gamma)
            face_color_tv = TexturesVertex(face_color)

            mesh = Meshes(vs_t, self.tri.repeat(
                batch_num, 1, 1), face_color_tv)
            rendered_img = self.renderer(mesh)
            rendered_img = torch.clamp(rendered_img, 0, 255)

            return {'rendered_img': rendered_img,
                    'lms_proj': lms_proj,
                    'face_texture': face_texture,
                    'vs': vs_t,
                    'tri': self.tri,
                    'color': face_color}
        else:
            return {'lms_proj': lms_proj}
Пример #28
0
    def __call__(self,
                 points,
                 faces,
                 colors=None,
                 mean=None,
                 std=None,
                 grayscale=True):
        assert len(points.shape) == 4 and points.shape[1] == 3
        colors = colors if colors is not None else torch.ones_like(points)
        points, colors = grid_to_list(points), grid_to_list(colors)
        if self.renderer is None:
            self.setup(points.device)
        textures = TexturesVertex(verts_features=colors)

        mesh = Meshes(verts=points, faces=faces, textures=textures)
        r_images = self.renderer(mesh)
        r_images = r_images.permute(0, 3, 1, 2).contiguous()
        r_images = r_images[:, :3, :, :]
        if grayscale:
            r_images = r_images.mean(dim=1, keepdim=True)
        if mean and std:
            r_images = (r_images - mean) / std
        return r_images
Пример #29
0
def test_my_renderer():
    vert, normal, st, color, face = load_blender_ply_mesh(
        '../data/meshes/background.ply')
    # V, _ = vert.shape
    # meshes = Meshes(
    #     verts=[vert.to(device)],
    #     faces=[face.to(device)],
    #     textures=TexturesVertex([torch.cat((color.to(device), torch.ones(V, 1, device=device)), dim=1)])
    # )

    # vert = torch.tensor([
    #     [-1, -1, 0],
    #     [1, -1, 0],
    #     [1, 1, 0],
    #     [-1, 1, 0]
    # ], dtype=torch.float) * 30

    x = 80
    y = 93

    vert = torch.tensor([[-x, -y, 0], [x, -y, 0], [x, y, 0], [-x, y, 0]],
                        dtype=torch.float)

    face = torch.LongTensor([[0, 1, 2], [0, 2, 3]])

    meshes = Meshes(verts=[vert.to(device)],
                    faces=[face.to(device)],
                    textures=TexturesVertex([color.to(device)]))

    scene = join_meshes_as_scene(meshes)
    zbuffer_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras,
        raster_settings=raster_settings,
    ),
                                    shader=IdentityShader())

    plot_channels(render(zbuffer_renderer, scene))
Пример #30
0
    def test_join_meshes_as_batch(self):
        """
        Test that join_meshes_as_batch and load_objs_as_meshes are consistent
        with single meshes.
        """
        def check_triple(mesh, mesh3):
            """
            Verify that mesh3 is three copies of mesh.
            """
            def check_item(x, y):
                self.assertEqual(x is None, y is None)
                if x is not None:
                    self.assertClose(torch.cat([x, x, x]), y)

            check_item(mesh.verts_padded(), mesh3.verts_padded())
            check_item(mesh.faces_padded(), mesh3.faces_padded())

            if mesh.textures is not None:
                if isinstance(mesh.textures, TexturesUV):
                    check_item(
                        mesh.textures.faces_uvs_padded(),
                        mesh3.textures.faces_uvs_padded(),
                    )
                    check_item(
                        mesh.textures.verts_uvs_padded(),
                        mesh3.textures.verts_uvs_padded(),
                    )
                    check_item(mesh.textures.maps_padded(),
                               mesh3.textures.maps_padded())
                elif isinstance(mesh.textures, TexturesVertex):
                    check_item(
                        mesh.textures.verts_features_padded(),
                        mesh3.textures.verts_features_padded(),
                    )
                elif isinstance(mesh.textures, TexturesAtlas):
                    check_item(mesh.textures.atlas_padded(),
                               mesh3.textures.atlas_padded())

        DATA_DIR = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = DATA_DIR / "cow_mesh/cow.obj"

        mesh = load_objs_as_meshes([obj_filename])
        mesh3 = load_objs_as_meshes([obj_filename, obj_filename, obj_filename])
        check_triple(mesh, mesh3)
        self.assertTupleEqual(mesh.textures.maps_padded().shape,
                              (1, 1024, 1024, 3))

        # Try mismatched texture map sizes, which needs a call to interpolate()
        mesh2048 = mesh.clone()
        maps = mesh.textures.maps_padded()
        mesh2048.textures._maps_padded = torch.cat([maps, maps], dim=1)
        join_meshes_as_batch([mesh.to("cuda:0"), mesh2048.to("cuda:0")])

        mesh_notex = load_objs_as_meshes([obj_filename], load_textures=False)
        mesh3_notex = load_objs_as_meshes(
            [obj_filename, obj_filename, obj_filename], load_textures=False)
        check_triple(mesh_notex, mesh3_notex)
        self.assertIsNone(mesh_notex.textures)

        # meshes with vertex texture, join into a batch.
        verts = torch.randn((4, 3), dtype=torch.float32)
        faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)
        vert_tex = torch.ones_like(verts)
        rgb_tex = TexturesVertex(verts_features=[vert_tex])
        mesh_rgb = Meshes(verts=[verts], faces=[faces], textures=rgb_tex)
        mesh_rgb3 = join_meshes_as_batch([mesh_rgb, mesh_rgb, mesh_rgb])
        check_triple(mesh_rgb, mesh_rgb3)

        # meshes with texture atlas, join into a batch.
        device = "cuda:0"
        atlas = torch.rand((2, 4, 4, 3), dtype=torch.float32, device=device)
        atlas_tex = TexturesAtlas(atlas=[atlas])
        mesh_atlas = Meshes(verts=[verts], faces=[faces], textures=atlas_tex)
        mesh_atlas3 = join_meshes_as_batch(
            [mesh_atlas, mesh_atlas, mesh_atlas])
        check_triple(mesh_atlas, mesh_atlas3)

        # Test load multiple meshes with textures into a batch.
        teapot_obj = DATA_DIR / "teapot.obj"
        mesh_teapot = load_objs_as_meshes([teapot_obj])
        teapot_verts, teapot_faces = mesh_teapot.get_mesh_verts_faces(0)
        mix_mesh = load_objs_as_meshes([obj_filename, teapot_obj],
                                       load_textures=False)
        self.assertEqual(len(mix_mesh), 2)
        self.assertClose(mix_mesh.verts_list()[0], mesh.verts_list()[0])
        self.assertClose(mix_mesh.faces_list()[0], mesh.faces_list()[0])
        self.assertClose(mix_mesh.verts_list()[1], teapot_verts)
        self.assertClose(mix_mesh.faces_list()[1], teapot_faces)

        cow3_tea = join_meshes_as_batch([mesh3, mesh_teapot],
                                        include_textures=False)
        self.assertEqual(len(cow3_tea), 4)
        check_triple(mesh_notex, cow3_tea[:3])
        self.assertClose(cow3_tea.verts_list()[3], mesh_teapot.verts_list()[0])
        self.assertClose(cow3_tea.faces_list()[3], mesh_teapot.faces_list()[0])

        # Check error raised if all meshes in the batch don't have the same texture type
        with self.assertRaisesRegex(ValueError, "same type of texture"):
            join_meshes_as_batch([mesh_atlas, mesh_rgb, mesh_atlas])