Example #1
0
    def __init__(self, image_size):
        super(Renderer, self).__init__()

        self.image_size = image_size
        self.dog_obj = load_objs_as_meshes(['data/dog_B/dog_B/dog_B_tpose.obj'])

        raster_settings = RasterizationSettings(
            image_size=self.image_size, 
            blur_radius=0.0, 
            faces_per_pixel=1, 
            bin_size=None
        )

        R, T = look_at_view_transform(2.7, 0, 0) 
        cameras = OpenGLPerspectiveCameras(device=R.device, R=R, T=T)
        lights = PointLights(device=R.device, location=[[0.0, 1.0, 0.0]])

        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftPhongShader(
                device=R.device, 
                cameras=cameras,
                lights=lights
            )
        )
Example #2
0
    def _get_renderer(self, device):
        R, T = look_at_view_transform(10, 0, 0)  # camera's position
        cameras = FoVPerspectiveCameras(
            device=device,
            R=R,
            T=T,
            znear=0.01,
            zfar=50,
            fov=2 * np.arctan(self.img_size // 2 / self.focal) * 180. / np.pi)

        lights = PointLights(device=device,
                             location=[[0.0, 0.0, 1e5]],
                             ambient_color=[[1, 1, 1]],
                             specular_color=[[0., 0., 0.]],
                             diffuse_color=[[0., 0., 0.]])

        raster_settings = RasterizationSettings(
            image_size=self.img_size,
            blur_radius=0.0,
            faces_per_pixel=1,
        )
        blend_params = blending.BlendParams(background_color=[0, 0, 0])

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=SoftPhongShader(device=device,
                                   cameras=cameras,
                                   lights=lights,
                                   blend_params=blend_params))
        return renderer
 def setup(self, device):
     R, T = look_at_view_transform(self.viewpoint_distance,
                                   self.viewpoint_elevation,
                                   self.viewpoint_azimuth,
                                   device=device)
     cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
     raster_settings = RasterizationSettings(
         image_size=self.opt.fast_image_size,
         blur_radius=self.opt.raster_blur_radius,
         faces_per_pixel=self.opt.raster_faces_per_pixel,
     )
     rasterizer = MeshRasterizer(cameras=cameras,
                                 raster_settings=raster_settings)
     lights = PointLights(device=device,
                          location=[self.opt.lights_location])
     lights = DirectionalLights(device=device,
                                direction=[self.opt.lights_direction])
     shader = SoftPhongShader(
         device=device,
         cameras=cameras,
         lights=lights,
         blend_params=BlendParams(
             self.opt.blend_params_sigma,
             self.opt.blend_params_gamma,
             self.opt.blend_params_background_color,
         ),
     )
     self.renderer = MeshRenderer(
         rasterizer=rasterizer,
         shader=shader,
     )
Example #4
0
def define_render(num):
    shapenet_cam_params_file = '../data/metadata/rendering_metadata.json'
    with open(shapenet_cam_params_file) as f:
        shapenet_cam_params = json.load(f)

    param_num = num
    R, T = look_at_view_transform(
        dist=shapenet_cam_params["distance"][param_num] * 5,
        elev=shapenet_cam_params["elevation"][param_num],
        azim=shapenet_cam_params["azimuth"][param_num])
    cameras = FoVPerspectiveCameras(
        device=device,
        R=R,
        T=T,
        fov=shapenet_cam_params["field_of_view"][param_num])

    raster_settings = RasterizationSettings(
        image_size=512,
        blur_radius=0.0,
        faces_per_pixel=1,
    )

    lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=SoftPhongShader(device=device,
                                                   cameras=cameras,
                                                   lights=lights))

    return renderer
    def __get_renderer(self, render_size, lights):

        cameras = FoVOrthographicCameras(
            device=self.device,
            znear=0.1,
            zfar=10.0,
            max_y=1.0,
            min_y=-1.0,
            max_x=1.0,
            min_x=-1.0,
            scale_xyz=((1.0, 1.0, 1.0), ),  # (1, 3)
        )

        raster_settings = RasterizationSettings(
            image_size=render_size,
            blur_radius=0,
            faces_per_pixel=1,
        )
        blend_params = BlendParams(sigma=1e-4,
                                   gamma=1e-4,
                                   background_color=(0, 0, 0))

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=SoftPhongShader(device=self.device,
                                   cameras=cameras,
                                   lights=lights,
                                   blend_params=blend_params))

        return renderer
Example #6
0
    def test_mesh_renderer_to(self):
        """
        Test moving all the tensors in the mesh renderer to a new device.
        """

        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device1)
        lights = PointLights(device=device1)
        lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device1)[None]

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )
        cameras = FoVPerspectiveCameras(
            device=device1, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=100
        )
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)

        blend_params = BlendParams(
            1e-4,
            1e-4,
            background_color=torch.zeros(3, dtype=torch.float32, device=device1),
        )

        shader = SoftPhongShader(
            lights=lights,
            cameras=cameras,
            materials=materials,
            blend_params=blend_params,
        )
        renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        textures = TexturesVertex(
            verts_features=torch.ones_like(verts_padded, device=device1)
        )
        mesh.textures = textures
        self._check_mesh_renderer_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device1)

        # Move renderer and mesh to another device and re render
        # This also tests that background_color is correctly moved to
        # the new device
        device2 = torch.device("cuda:0")
        renderer = renderer.to(device2)
        mesh = mesh.to(device2)
        self._check_mesh_renderer_props_on_device(renderer, device2)
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device2)
Example #7
0
            def on_change(value):
                img_copy = image.copy()

                x = (cv2.getTrackbarPos("x", "image") - 1000) / 1000
                y = (cv2.getTrackbarPos("y", "image") - 1000) / 1000
                z = cv2.getTrackbarPos("z", "image") / 1000
                rx = cv2.getTrackbarPos("rx", "image")
                ry = cv2.getTrackbarPos("ry", "image")
                rz = cv2.getTrackbarPos("rz", "image")

                T = torch.tensor([[x, y, z]],
                                 dtype=torch.float32,
                                 device=device)
                R = Rotation.from_euler("zyx", [rz, ry, rx],
                                        degrees=True).as_matrix()

                renderR = torch.from_numpy(R.T.reshape((1, 3, 3))).to(device)

                cameras = PerspectiveCameras(
                    R=renderR,
                    T=T,
                    focal_length=-self.f,
                    principal_point=self.p,
                    image_size=(self.img_size, ),
                    device=device,
                )

                raster_settings = RasterizationSettings(
                    image_size=(self.intrinsics.height, self.intrinsics.width),
                    blur_radius=0.0,
                    faces_per_pixel=1,
                )
                renderer = MeshRenderer(
                    rasterizer=MeshRasterizer(cameras=cameras,
                                              raster_settings=raster_settings),
                    shader=SoftPhongShader(
                        device=device,
                        cameras=cameras,
                    ),
                )
                overlay = renderer(mesh)[0, ..., :3].cpu().numpy()[:, :, ::-1]
                render_img = overlay * 0.7 + img_copy / 255 * 0.3
                cv2.imshow(windowName, render_img)

                store_and_exit = cv2.getTrackbarPos(
                    "0 : Manual Match \n1 : Store and Exit", "image")
                if store_and_exit:
                    cv2.destroyAllWindows()
                    pose[mesh_name] = {
                        "translation": T.cpu().numpy(),
                        "rotation": R
                    }
Example #8
0
    def __init__(self,
                 dist=2,
                 elev=0,
                 azimuth=180,
                 fov=40,
                 image_size=256,
                 R=None,
                 T=None,
                 cameras=None,
                 return_format="torch",
                 device='cuda'):
        super().__init__()
        # If you provide R and T, you don't need dist, elev, azimuth, fov
        self.device = device
        self.return_format = return_format

        # Data structures and functions for rendering
        if cameras is None:
            if R is None and T is None:
                R, T = look_at_view_transform(dist, elev, azimuth)
            cameras = FoVPerspectiveCameras(R=R,
                                            T=T,
                                            znear=1,
                                            zfar=10000,
                                            fov=fov,
                                            degrees=True,
                                            device=device)
            # cameras = PerspectiveCameras(R=R, T=T, focal_length=1.6319*10, device=device)

        self.raster_settings = RasterizationSettings(
            image_size=image_size,
            blur_radius=0.0,  # no blur
            bin_size=0,
        )
        # Place lights at the same point as the camera
        location = T
        if location is None:
            location = ((0, 0, 0), )
        lights = PointLights(ambient_color=((0.3, 0.3, 0.3), ),
                             diffuse_color=((0.7, 0.7, 0.7), ),
                             device=device,
                             location=location)

        self.mesh_rasterizer = MeshRasterizer(
            cameras=cameras, raster_settings=self.raster_settings)
        self._renderer = MeshRenderer(rasterizer=self.mesh_rasterizer,
                                      shader=SoftPhongShader(device=device,
                                                             cameras=cameras,
                                                             lights=lights))
        self.cameras = self.mesh_rasterizer.cameras
Example #9
0
def save_p3d_mesh(verts, faces, filling_factors):
    features = [(int(i * 255), 0, 0) for i in filling_factors]
    features = torch.unsqueeze(torch.Tensor(features), 0)
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.cuda.set_device(device)
    else:
        device = torch.device("cpu")

    texture = TexturesVertex(features)
    mesh = Meshes(torch.unsqueeze(torch.Tensor(verts), 0),
                  torch.unsqueeze(torch.Tensor(faces), 0), texture).cuda()

    # Initialize a camera.
    # Rotate the object by increasing the elevation and azimuth angles
    R, T = look_at_view_transform(dist=2.0, elev=-50, azim=-90)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

    # Define the settings for rasterization and shading. Here we set the output image to be of size
    # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
    # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
    # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
    # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
    # the difference between naive and coarse-to-fine rasterization.
    raster_settings = RasterizationSettings(
        image_size=1024,
        blur_radius=0.0,
        faces_per_pixel=1,
    )

    # Place a point light in front of the object. As mentioned above, the front of the cow is facing the
    # -z direction.
    lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

    # Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
    # interpolate the texture uv coordinates for each vertex, sample from a texture image and
    # apply the Phong lighting model
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=SoftPhongShader(device=device,
                                                   cameras=cameras,
                                                   lights=lights))

    img = renderer(mesh)
    plt.figure(figsize=(10, 10))
    plt.imshow(img[0].cpu().numpy())
    plt.show()
    def _setup_render(self):
        # Unpack options ...
        opts = self.opts

        # Initialize a camera.
        # TODO(ycho): Alternatively, specify the intrinsic matrix `K` instead.
        cameras = FoVPerspectiveCameras(znear=opts.znear,
                                        zfar=opts.zfar,
                                        aspect_ratio=opts.aspect,
                                        fov=opts.fov,
                                        degrees=True,
                                        device=self.device)

        # Define the settings for rasterization and shading.
        # As we are rendering images for visualization purposes only we will set faces_per_pixel=1
        # and blur_radius=0.0. Refer to raster_points.py for explanations of
        # these parameters.
        # points_per_pixel (Optional): We will keep track of this many points per
        # pixel, returning the nearest points_per_pixel points along the z-axis

        # Create a points renderer by compositing points using an alpha compositor (nearer points
        # are weighted more heavily). See [1] for an explanation.
        if self.opts.use_mesh:
            raster_settings = RasterizationSettings(
                image_size=opts.image_size,
                blur_radius=0.0,  # hmm...
                faces_per_pixel=1)
            rasterizer = MeshRasterizer(cameras=cameras,
                                        raster_settings=raster_settings)
            lights = PointLights(device=self.device,
                                 location=[[0.0, 0.0, -3.0]])

            renderer = MeshRenderer(rasterizer=rasterizer,
                                    shader=SoftPhongShader(device=self.device,
                                                           cameras=cameras,
                                                           lights=lights))
        else:
            raster_settings = PointsRasterizationSettings(
                image_size=opts.image_size, radius=0.1, points_per_pixel=8)
            rasterizer = PointsRasterizer(cameras=cameras,
                                          raster_settings=raster_settings)
            renderer = PointsRenderer(rasterizer=rasterizer,
                                      compositor=AlphaCompositor())
        return renderer
Example #11
0
    def test_cameras(self):
        """
        DVR cameras
        """
        device = torch.device('cuda:0')
        input_dir = '/home/ywang/Documents/points/neural_splatter/differentiable_volumetric_rendering_upstream/data/DTU/scan106/scan106'
        out_dir = os.path.join('tests', 'outputs', 'test_dvr_data')
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        dvr_camera_file = os.path.join(input_dir, 'cameras.npz')
        dvr_camera_dict = np.load(dvr_camera_file)
        n_views = len(glob.glob(os.path.join(input_dir, 'image', '*.png')))

        focal_lengths = dvr_camera_dict['camera_mat_0'][(0,1),(0,1)].reshape(1,2)
        principal_point = dvr_camera_dict['camera_mat_0'][(0,1),(2,2)].reshape(1,2)
        cameras = PerspectiveCameras(focal_length=focal_lengths, principal_point=principal_point).to(device)
        # Define the settings for rasterization and shading.
        # Refer to raster_points.py for explanations of these parameters.
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=5,
            # this setting controls whether naive or coarse-to-fine rasterization is used
            bin_size=None,
            max_faces_per_bin=None  # this setting is for coarse rasterization
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=None, raster_settings=raster_settings),
            shader=SoftPhongShader(device=device)
        )
        mesh = trimesh.load_mesh('/home/ywang/Documents/points/neural_splatter/differentiable_volumetric_rendering_upstream/out/multi_view_reconstruction/birds/ours_depth_mvs/vis/000_0000477500.ply')
        textures = TexturesVertex(verts_features=torch.ones(
            1, mesh.vertices.shape[0], 3)).to(device=device)
        meshes = Meshes(verts=[torch.tensor(mesh.vertices).float()], faces=[torch.tensor(mesh.faces)],
                        textures=textures).to(device=device)
        for i in range(n_views):
            transform_mat = torch.from_numpy(dvr_camera_dict['scale_mat_%d' % i].T @ dvr_camera_dict['world_mat_%d' % i].T).to(device).unsqueeze(0).float()
            cameras.R, cameras.T = decompose_to_R_and_t(transform_mat)
            cameras._N = cameras.R.shape[0]
            imgs = renderer(meshes, cameras=cameras, zfar=1e4, znear=1.0)
            import pdb; pdb.set_trace()
            imageio.imwrite(os.path.join(out_dir, '%06d.png' % i), (imgs[0].detach().cpu().numpy()*255).astype('uint8'))
Example #12
0
def differentiable_face_render(vert, tri, colors, bg_img, h, w):
    """
    vert: (N, nver, 3)
    tri: (ntri, 3)
    colors: (N, nver. 3)
    bg_img: (N, 3, H, W)
    """
    assert h == w
    N, nver, _ = vert.shape
    ntri = tri.shape[0]
    tri = torch.from_numpy(tri).to(vert.device).unsqueeze(0).expand(N, ntri, 3)
    # Transform to Pytorch3D world space
    vert_t = vert + torch.tensor((0.5, 0.5, 0), dtype=torch.float, device=vert.device).view(1, 1, 3)
    vert_t = vert_t * torch.tensor((-1, 1, -1), dtype=torch.float, device=vert.device).view(1, 1, 3)
    mesh_torch = Meshes(verts=vert_t, faces=tri, textures=TexturesVertex(verts_features=colors))
    # Render
    R = look_at_rotation(camera_position=((0, 0, -300),)).to(vert.device).expand(N, 3, 3)
    T = torch.tensor((0, 0, 300), dtype=torch.float, device=vert.device).view(1, 3).expand(N, 3)
    focal = torch.tensor((2. / float(w), 2. / float(h)), dtype=torch.float, device=vert.device).view(1, 2).expand(N, 2)
    cameras = OrthographicCameras(device=vert.device, R=R, T=T, focal_length=focal)
    raster_settings = RasterizationSettings(image_size=h, blur_radius=0.0, faces_per_pixel=1)
    lights = DirectionalLights(ambient_color=((1., 1., 1.),), diffuse_color=((0., 0., 0.),),
                               specular_color=((0., 0., 0.),), direction=((0, 0, 1),), device=vert.device)
    blend_params = BlendParams(background_color=(0, 0, 0))
    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(
            cameras=cameras,
            raster_settings=raster_settings
        ),
        shader=SoftPhongShader(
            device=vert.device,
            cameras=cameras,
            lights=lights,
            blend_params=blend_params
        )
    )
    images = renderer(mesh_torch)[:, :, :, :3]        # (N, H, W, 3)
    # Add background
    if bg_img is not None:
        bg_img = bg_img.permute(0, 2, 3, 1)         # (N, H, W, 3)
        images = torch.where(torch.eq(images.sum(dim=3, keepdim=True).expand(N, h, w, 3), 0), bg_img, images)
    return images
Example #13
0
def render_mesh(mesh, R, T, device, img_size=512, silhouette=False):
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)


    if silhouette:
        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=img_size, 
            blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma, 
            faces_per_pixel=100, 
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftSilhouetteShader(blend_params=blend_params)
        )
    else:
        raster_settings = RasterizationSettings(
            image_size=img_size, 
            blur_radius=0.0, 
            faces_per_pixel=1, 
        )
        lights = PointLights(device=device, location=[[0.0, 5.0, -10.0]])
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftPhongShader(
                device=device, 
                cameras=cameras,
                lights=lights
            )
        )

    rendered_images = renderer(mesh, cameras=cameras)
    return rendered_images
Example #14
0
 def setup_shader(self, cameras, lights):
     if self.shader_type == 'soft_phong':
         self.shader = SoftPhongShader(device=self.device,
                                       cameras=cameras,
                                       lights=lights)
 def _create_image_renderer(self):
     raster_settings = self._get_rasterization_settings()
     renderer = MeshRenderer(rasterizer=MeshRasterizer(cameras=self.cameras, raster_settings=raster_settings),
                             shader=SoftPhongShader(device=self.device, cameras=self.cameras))
     return renderer
    def initRender(self, method, image_size):
        cameras = OpenGLPerspectiveCameras(device=self.device, fov=15)

        if (method == "soft-silhouette"):
            blend_params = BlendParams(sigma=1e-7, gamma=1e-7)

            raster_settings = RasterizationSettings(
                image_size=image_size,
                blur_radius=np.log(1. / 1e-7 - 1.) * blend_params.sigma,
                faces_per_pixel=self.faces_per_pixel)

            renderer = MeshRenderer(
                rasterizer=MeshRasterizer(cameras=cameras,
                                          raster_settings=raster_settings),
                shader=SoftSilhouetteShader(blend_params=blend_params))
        elif (method == "hard-silhouette"):
            blend_params = BlendParams(sigma=1e-7, gamma=1e-7)

            raster_settings = RasterizationSettings(
                image_size=image_size,
                blur_radius=np.log(1. / 1e-7 - 1.) * blend_params.sigma,
                faces_per_pixel=1)

            renderer = MeshRenderer(
                rasterizer=MeshRasterizer(cameras=cameras,
                                          raster_settings=raster_settings),
                shader=SoftSilhouetteShader(blend_params=blend_params))
        elif (method == "soft-depth"):
            # Soft Rasterizer - from https://github.com/facebookresearch/pytorch3d/issues/95
            #blend_params = BlendParams(sigma=1e-7, gamma=1e-7)
            blend_params = BlendParams(sigma=1e-3, gamma=1e-4)
            raster_settings = RasterizationSettings(
                image_size=image_size,
                #blur_radius= np.log(1. / 1e-7 - 1.) * blend_params.sigma,
                blur_radius=np.log(1. / 1e-3 - 1.) * blend_params.sigma,
                faces_per_pixel=self.faces_per_pixel)

            renderer = MeshRenderer(
                rasterizer=MeshRasterizer(cameras=cameras,
                                          raster_settings=raster_settings),
                shader=SoftDepthShader(blend_params=blend_params))
        elif (method == "hard-depth"):
            raster_settings = RasterizationSettings(image_size=image_size,
                                                    blur_radius=0,
                                                    faces_per_pixel=20)

            renderer = MeshRenderer(rasterizer=MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings),
                                    shader=HardDepthShader())
        elif (method == "blurry-depth"):
            # Soft Rasterizer - from https://github.com/facebookresearch/pytorch3d/issues/95
            blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
            raster_settings = RasterizationSettings(
                image_size=image_size,
                blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma,
                faces_per_pixel=self.faces_per_pixel)

            renderer = MeshRenderer(
                rasterizer=MeshRasterizer(cameras=cameras,
                                          raster_settings=raster_settings),
                shader=SoftDepthShader(blend_params=blend_params))
        elif (method == "soft-phong"):
            blend_params = BlendParams(sigma=1e-3, gamma=1e-3)

            raster_settings = RasterizationSettings(
                image_size=image_size,
                blur_radius=np.log(1. / 1e-3 - 1.) * blend_params.sigma,
                faces_per_pixel=self.faces_per_pixel)

            # lights = DirectionalLights(device=self.device,
            #                            ambient_color=[[0.25, 0.25, 0.25]],
            #                            diffuse_color=[[0.6, 0.6, 0.6]],
            #                            specular_color=[[0.15, 0.15, 0.15]],
            #                            direction=[[0.0, 1.0, 0.0]])

            lights = DirectionalLights(device=self.device,
                                       direction=[[0.0, 1.0, 0.0]])

            renderer = MeshRenderer(
                rasterizer=MeshRasterizer(cameras=cameras,
                                          raster_settings=raster_settings),
                shader=SoftPhongShader(device=self.device,
                                       blend_params=blend_params,
                                       lights=lights))

        elif (method == "hard-phong"):
            blend_params = BlendParams(sigma=1e-8, gamma=1e-8)

            raster_settings = RasterizationSettings(image_size=image_size,
                                                    blur_radius=0.0,
                                                    faces_per_pixel=1)

            lights = DirectionalLights(device=self.device,
                                       ambient_color=[[0.25, 0.25, 0.25]],
                                       diffuse_color=[[0.6, 0.6, 0.6]],
                                       specular_color=[[0.15, 0.15, 0.15]],
                                       direction=[[-1.0, -1.0, 1.0]])
            renderer = MeshRenderer(rasterizer=MeshRasterizer(
                cameras=cameras, raster_settings=raster_settings),
                                    shader=HardPhongShader(device=self.device,
                                                           lights=lights))

        else:
            print("Unknown render method!")
            return None
        return renderer
Example #17
0
    def _compare_with_meshes_renderer(self,
                                      image_size,
                                      batch_size=11,
                                      sphere_diameter=0.6):
        """
        Generate a spherical RGB volumetric function and its corresponding mesh
        and check whether MeshesRenderer returns the same images as the
        corresponding ImplicitRenderer.
        """

        # generate NDC camera extrinsics and intrinsics
        cameras = init_cameras(batch_size, image_size=image_size, ndc=True)

        # get rand offset of the volume
        sphere_centroid = torch.randn(batch_size, 3,
                                      device=cameras.device) * 0.1
        sphere_centroid.requires_grad = True

        # init the grid raysampler with the ndc grid
        raysampler = NDCMultinomialRaysampler(
            image_width=image_size[1],
            image_height=image_size[0],
            n_pts_per_ray=256,
            min_depth=0.1,
            max_depth=2.0,
        )

        # get the EA raymarcher
        raymarcher = EmissionAbsorptionRaymarcher()

        # jitter the camera intrinsics a bit for each render
        cameras_randomized = cameras.clone()
        cameras_randomized.principal_point = (
            torch.randn_like(cameras.principal_point) * 0.3)
        cameras_randomized.focal_length = (
            cameras.focal_length +
            torch.randn_like(cameras.focal_length) * 0.2)

        # the list of differentiable camera vars
        cam_vars = ("R", "T", "focal_length", "principal_point")
        # enable the gradient caching for the camera variables
        for cam_var in cam_vars:
            getattr(cameras_randomized, cam_var).requires_grad = True

        # get the implicit renderer
        images_opacities = ImplicitRenderer(
            raysampler=raysampler, raymarcher=raymarcher)(
                cameras=cameras_randomized,
                volumetric_function=spherical_volumetric_function,
                sphere_centroid=sphere_centroid,
                sphere_diameter=sphere_diameter,
            )[0]

        # check that the renderer does not erase gradients
        loss = images_opacities.sum()
        loss.backward()
        for check_var in (
                *[
                    getattr(cameras_randomized, cam_var)
                    for cam_var in cam_vars
                ],
                sphere_centroid,
        ):
            self.assertIsNotNone(check_var.grad)

        # instantiate the corresponding spherical mesh
        ico = ico_sphere(level=4, device=cameras.device).extend(batch_size)
        verts = (torch.nn.functional.normalize(ico.verts_padded(), dim=-1) *
                 sphere_diameter + sphere_centroid[:, None])
        meshes = Meshes(
            verts=verts,
            faces=ico.faces_padded(),
            textures=TexturesVertex(verts_features=(
                torch.nn.functional.normalize(verts, dim=-1) * 0.5 + 0.5)),
        )

        # instantiate the corresponding mesh renderer
        lights = PointLights(device=cameras.device, location=[[0.0, 0.0, 0.0]])
        renderer_textured = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras_randomized,
                raster_settings=RasterizationSettings(
                    image_size=image_size,
                    blur_radius=1e-3,
                    faces_per_pixel=10,
                    z_clip_value=None,
                    perspective_correct=False,
                ),
            ),
            shader=SoftPhongShader(
                device=cameras.device,
                cameras=cameras_randomized,
                lights=lights,
                materials=Materials(
                    ambient_color=((2.0, 2.0, 2.0), ),
                    diffuse_color=((0.0, 0.0, 0.0), ),
                    specular_color=((0.0, 0.0, 0.0), ),
                    shininess=64,
                    device=cameras.device,
                ),
                blend_params=BlendParams(sigma=1e-3,
                                         gamma=1e-4,
                                         background_color=(0.0, 0.0, 0.0)),
            ),
        )

        # get the mesh render
        images_opacities_meshes = renderer_textured(meshes,
                                                    cameras=cameras_randomized,
                                                    lights=lights)

        if DEBUG:
            outdir = tempfile.gettempdir() + "/test_implicit_vs_mesh_renderer"
            os.makedirs(outdir, exist_ok=True)

            frames = []
            for (image_opacity,
                 image_opacity_mesh) in zip(images_opacities,
                                            images_opacities_meshes):
                image, opacity = image_opacity.split([3, 1], dim=-1)
                image_mesh, opacity_mesh = image_opacity_mesh.split([3, 1],
                                                                    dim=-1)
                diff_image = (((image - image_mesh) * 0.5 + 0.5).mean(
                    dim=2, keepdim=True).repeat(1, 1, 3))
                image_pil = Image.fromarray((torch.cat(
                    (
                        image,
                        image_mesh,
                        diff_image,
                        opacity.repeat(1, 1, 3),
                        opacity_mesh.repeat(1, 1, 3),
                    ),
                    dim=1,
                ).detach().cpu().numpy() * 255.0).astype(np.uint8))
                frames.append(image_pil)

            # export gif
            outfile = os.path.join(outdir, "implicit_vs_mesh_render.gif")
            frames[0].save(
                outfile,
                save_all=True,
                append_images=frames[1:],
                duration=batch_size // 15,
                loop=0,
            )
            print(f"exported {outfile}")

            # export concatenated frames
            outfile_cat = os.path.join(outdir, "implicit_vs_mesh_render.png")
            Image.fromarray(
                np.concatenate([np.array(f) for f in frames],
                               axis=0)).save(outfile_cat)
            print(f"exported {outfile_cat}")

        # compare the renders
        diff = (images_opacities - images_opacities_meshes).abs().mean(dim=-1)
        mu_diff = diff.mean(dim=(1, 2))
        std_diff = diff.std(dim=(1, 2))
        self.assertClose(mu_diff, torch.zeros_like(mu_diff), atol=5e-2)
        self.assertClose(std_diff, torch.zeros_like(std_diff), atol=6e-2)
Example #18
0
def batch_render(
    verts,
    faces,
    faces_per_pixel=10,
    K=None,
    rot=None,
    trans=None,
    colors=None,
    color=(0.53, 0.53, 0.8),  # light_purple
    ambient_col=0.5,
    specular_col=0.2,
    diffuse_col=0.3,
    face_colors=None,
    # color = (0.74117647, 0.85882353, 0.65098039),  # light_blue
    image_sizes=None,
    out_res=512,
    bin_size=0,
    shading="soft",
    mode="rgb",
    blend_gamma=1e-4,
    min_depth=None,
):
    device = torch.device("cuda:0")
    K = K.to(device)
    width, height = image_sizes[0]
    out_size = int(max(image_sizes[0]))
    raster_settings = RasterizationSettings(
        image_size=out_size,
        blur_radius=0.0,
        faces_per_pixel=faces_per_pixel,
        bin_size=bin_size,
    )

    fx = K[:, 0, 0]
    fy = K[:, 1, 1]
    focals = torch.stack([fx, fy], 1)
    px = K[:, 0, 2]
    py = K[:, 1, 2]
    principal_point = torch.stack([width - px, height - py], 1)
    if rot is None:
        rot = torch.eye(3).unsqueeze(0).to(device)
    if trans is None:
        trans = torch.zeros(3).unsqueeze(0).to(device)
    cameras = PerspectiveCameras(
        device=device,
        focal_length=focals,
        principal_point=principal_point,
        image_size=[(out_size, out_size) for _ in range(len(verts))],
        R=rot,
        T=trans,
    )
    if mode == "rgb":

        lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
        lights = DirectionalLights(
            device=device,
            direction=((0.6, -0.6, -0.6), ),
            ambient_color=((ambient_col, ambient_col, ambient_col), ),
            diffuse_color=((diffuse_col, diffuse_col, diffuse_col), ),
            specular_color=((specular_col, specular_col, specular_col), ),
        )
        if shading == "soft":
            shader = SoftPhongShader(device=device,
                                     cameras=cameras,
                                     lights=lights)
        elif shading == "hard":
            shader = HardPhongShader(device=device,
                                     cameras=cameras,
                                     lights=lights)
        else:
            raise ValueError(
                f"Shading {shading} for mode rgb not in [sort|hard]")
    elif mode == "silh":
        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        shader = SoftSilhouetteShader(blend_params=blend_params)
    elif shading == "faceidx":
        shader = FaceIdxShader()
    elif (mode == "facecolor") and (shading == "hard"):
        shader = FaceColorShader(face_colors=face_colors)
    elif (mode == "facecolor") and (shading == "soft"):
        shader = SoftFaceColorShader(face_colors=face_colors,
                                     blend_gamma=blend_gamma)
    else:
        raise ValueError(
            f"Unhandled mode {mode} and shading {shading} combination")

    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings),
        shader=shader,
    )
    if min_depth is not None:
        verts = torch.cat([verts[:, :, :2], verts[:, :, 2:].clamp(min_depth)],
                          2)
    if mode == "rgb":
        if colors is None:
            colors = get_colors(verts, color)
        tex = textures.TexturesVertex(verts_features=colors)

        meshes = Meshes(verts=verts, faces=faces, textures=tex)
    elif mode in ["silh", "facecolor"]:
        meshes = Meshes(verts=verts, faces=faces)
    else:
        raise ValueError(f"Render mode {mode} not in [rgb|silh]")

    square_images = renderer(meshes, cameras=cameras)
    square_images = torch.flip(square_images, (1, 2))
    height_off = abs(int(width - height))
    if width > height:
        images = square_images[:, height_off:, :]
    else:
        images = square_images[:, :, height_off:]
    return images
Example #19
0
def render_img(face_shape,
               face_color,
               facemodel,
               image_size=224,
               fx=1015.0,
               fy=1015.0,
               px=112.0,
               py=112.0,
               device='cuda:0'):
    '''
        ref: https://github.com/facebookresearch/pytorch3d/issues/184
        The rendering function (just for test)
        Input:
            face_shape:  Tensor[1, 35709, 3]
            face_color: Tensor[1, 35709, 3] in [0, 1]
            facemodel: contains `tri` (triangles[70789, 3], index start from 1)
    '''
    from pytorch3d.structures import Meshes
    from pytorch3d.renderer.mesh.textures import TexturesVertex
    from pytorch3d.renderer import (PerspectiveCameras, PointLights,
                                    RasterizationSettings, MeshRenderer,
                                    MeshRasterizer, SoftPhongShader,
                                    BlendParams)

    face_color = TexturesVertex(verts_features=face_color.to(device))
    face_buf = torch.from_numpy(facemodel.tri - 1)  # index start from 1
    face_idx = face_buf.unsqueeze(0)

    mesh = Meshes(face_shape.to(device), face_idx.to(device), face_color)

    R = torch.eye(3).view(1, 3, 3).to(device)
    R[0, 0, 0] *= -1.0
    T = torch.zeros([1, 3]).to(device)

    half_size = (image_size - 1.0) / 2
    focal_length = torch.tensor([fx / half_size, fy / half_size],
                                dtype=torch.float32).reshape(1, 2).to(device)
    principal_point = torch.tensor([(half_size - px) / half_size,
                                    (py - half_size) / half_size],
                                   dtype=torch.float32).reshape(1,
                                                                2).to(device)

    cameras = PerspectiveCameras(device=device,
                                 R=R,
                                 T=T,
                                 focal_length=focal_length,
                                 principal_point=principal_point)

    raster_settings = RasterizationSettings(image_size=image_size,
                                            blur_radius=0.0,
                                            faces_per_pixel=1)

    lights = PointLights(device=device,
                         ambient_color=((1.0, 1.0, 1.0), ),
                         diffuse_color=((0.0, 0.0, 0.0), ),
                         specular_color=((0.0, 0.0, 0.0), ),
                         location=((0.0, 0.0, 1e5), ))

    blend_params = BlendParams(background_color=(0.0, 0.0, 0.0))

    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=SoftPhongShader(device=device,
                                                   cameras=cameras,
                                                   lights=lights,
                                                   blend_params=blend_params))
    images = renderer(mesh)
    images = torch.clamp(images, 0.0, 1.0)
    return images
Example #20
0
    trans = cameras.unproject_points(trans.unsqueeze(0),
                                     world_coordinates=False,
                                     scaled_depth_input=True)[0]
    rot = random_rotations(1)[0].to(device)

    transform = Transform3d() \
        .scale(scale) \
        .compose(Rotate(rot)) \
        .translate(*trans)

    # TODO: transform mesh
    # Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
    # interpolate the texture uv coordinates for each vertex, sample from a texture image and
    # apply the Phong lighting model
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=SoftPhongShader(
                                device=device,
                                cameras=cameras,
                                lights=lights,
                            ))
    images = renderer(mesh.scale_verts(scale),
                      R=rot.unsqueeze(0),
                      T=trans.unsqueeze(0))
    plt.figure(figsize=(10, 10))
    plt.imshow(images[0, ..., :3].cpu().numpy())
    plt.grid("off")
    plt.axis("off")
    plt.show()
    plt.close('all')
Example #21
0
                                    principal_point=((512, 512), ),
                                    R=Rtotal,
                                    T=Ttotal,
                                    image_size=((1024, 1024), ))

    mymaterials = Materials(device=device, shininess=8)
    raster_settings = RasterizationSettings(
        image_size=1024,
        blur_radius=0.0,
        faces_per_pixel=1,
    )
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=camera, raster_settings=raster_settings),
                            shader=SoftPhongShader(
                                device=device,
                                cameras=camera,
                                lights=lights,
                                materials=mymaterials,
                            ))

    meshes = mesh.extend(num_views)
    target_images = renderer(meshes, cameras=cameras, lights=lights)
    target_rgb = [target_images[i, ..., :3] for i in range(num_views)]
    target_cameras = [
        PerspectiveCameras(device=device,
                           focal_length=4500,
                           principal_point=((512, 512), ),
                           R=Rtotal[None, i, ...],
                           T=Ttotal[None, i, ...],
                           image_size=((1024, 1024), ))
        for i in range(num_views)
    ]
def generate_cow_renders(num_views: int = 40,
                         data_dir: str = DATA_DIR,
                         azimuth_range: float = 180):
    """
    This function generates `num_views` renders of a cow mesh.
    The renders are generated from viewpoints sampled at uniformly distributed
    azimuth intervals. The elevation is kept constant so that the camera's
    vertical position coincides with the equator.

    For a more detailed explanation of this code, please refer to the
    docs/tutorials/fit_textured_mesh.ipynb notebook.

    Args:
        num_views: The number of generated renders.
        data_dir: The folder that contains the cow mesh files. If the cow mesh
            files do not exist in the folder, this function will automatically
            download them.

    Returns:
        cameras: A batch of `num_views` `FoVPerspectiveCameras` from which the
            images are rendered.
        images: A tensor of shape `(num_views, height, width, 3)` containing
            the rendered images.
        silhouettes: A tensor of shape `(num_views, height, width)` containing
            the rendered silhouettes.
    """

    # set the paths

    # download the cow mesh if not done before
    cow_mesh_files = [
        os.path.join(data_dir, fl)
        for fl in ("cow.obj", "cow.mtl", "cow_texture.png")
    ]
    if any(not os.path.isfile(f) for f in cow_mesh_files):
        os.makedirs(data_dir, exis_ok=True)
        os.system(
            f"wget -P {data_dir} " +
            "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj")
        os.system(
            f"wget -P {data_dir} " +
            "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl")
        os.system(
            f"wget -P {data_dir} " +
            "https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png"
        )

    # Setup
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.cuda.set_device(device)
    else:
        device = torch.device("cpu")

    # Load obj file
    obj_filename = os.path.join(data_dir, "cow.obj")
    mesh = load_objs_as_meshes([obj_filename], device=device)

    # We scale normalize and center the target mesh to fit in a sphere of radius 1
    # centered at (0,0,0). (scale, center) will be used to bring the predicted mesh
    # to its original center and scale.  Note that normalizing the target mesh,
    # speeds up the optimization but is not necessary!
    verts = mesh.verts_packed()
    N = verts.shape[0]
    center = verts.mean(0)
    scale = max((verts - center).abs().max(0)[0])
    mesh.offset_verts_(-(center.expand(N, 3)))
    mesh.scale_verts_((1.0 / float(scale)))

    # Get a batch of viewing angles.
    elev = torch.linspace(0, 0, num_views)  # keep constant
    azim = torch.linspace(-azimuth_range, azimuth_range, num_views) + 180.0

    # Place a point light in front of the object. As mentioned above, the front of
    # the cow is facing the -z direction.
    lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

    # Initialize an OpenGL perspective camera that represents a batch of different
    # viewing angles. All the cameras helper methods support mixed type inputs and
    # broadcasting. So we can view the camera from the a distance of dist=2.7, and
    # then specify elevation and azimuth angles for each viewpoint as tensors.
    R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

    # Define the settings for rasterization and shading. Here we set the output
    # image to be of size 128X128. As we are rendering images for visualization
    # purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to
    # rasterize_meshes.py for explanations of these parameters.  We also leave
    # bin_size and max_faces_per_bin to their default values of None, which sets
    # their values using huristics and ensures that the faster coarse-to-fine
    # rasterization method is used.  Refer to docs/notes/renderer.md for an
    # explanation of the difference between naive and coarse-to-fine rasterization.
    raster_settings = RasterizationSettings(image_size=128,
                                            blur_radius=0.0,
                                            faces_per_pixel=1)

    # Create a phong renderer by composing a rasterizer and a shader. The textured
    # phong shader will interpolate the texture uv coordinates for each vertex,
    # sample from a texture image and apply the Phong lighting model
    blend_params = BlendParams(sigma=1e-4,
                               gamma=1e-4,
                               background_color=(0.0, 0.0, 0.0))
    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings),
        shader=SoftPhongShader(device=device,
                               cameras=cameras,
                               lights=lights,
                               blend_params=blend_params),
    )

    # Create a batch of meshes by repeating the cow mesh and associated textures.
    # Meshes has a useful `extend` method which allows us do this very easily.
    # This also extends the textures.
    meshes = mesh.extend(num_views)

    # Render the cow mesh from each viewing angle
    target_images = renderer(meshes, cameras=cameras, lights=lights)

    # Rasterization settings for silhouette rendering
    sigma = 1e-4
    raster_settings_silhouette = RasterizationSettings(
        image_size=128,
        blur_radius=np.log(1.0 / 1e-4 - 1.0) * sigma,
        faces_per_pixel=50)

    # Silhouette renderer
    renderer_silhouette = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings_silhouette),
        shader=SoftSilhouetteShader(),
    )

    # Render silhouette images.  The 3rd channel of the rendering output is
    # the alpha/silhouette channel
    silhouette_images = renderer_silhouette(meshes,
                                            cameras=cameras,
                                            lights=lights)

    # binary silhouettes
    silhouette_binary = (silhouette_images[..., 3] > 1e-4).float()

    return cameras, target_images[..., :3], silhouette_binary
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 256x256. To form the blended image we use 100 faces for each pixel. Refer to rasterize_meshes.py
# for an explanation of this parameter.
raster_settings = RasterizationSettings(image_size=256,
                                        blur_radius=np.log(1. / 0.001 - 1.) *
                                        blend_params.sigma,
                                        faces_per_pixel=80,
                                        bin_size=0)

# Create a silhouette mesh renderer by composing a rasterizer and a shader.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
silhouette_renderer = MeshRenderer(
    rasterizer=MeshRasterizer(cameras=cameras,
                              raster_settings=raster_settings),
    shader=SoftPhongShader(blend_params=blend_params,
                           device=device,
                           lights=lights))

# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
raster_settings = RasterizationSettings(image_size=256,
                                        blur_radius=0.0,
                                        faces_per_pixel=1,
                                        bin_size=0)
# We can add a point light in front of the object.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
phong_renderer = MeshRenderer(rasterizer=MeshRasterizer(
    cameras=cameras, raster_settings=raster_settings),
                              shader=HardPhongShader(device=device,
                                                     lights=lights))

# Select the viewpoint using spherical angles