Beispiel #1
0
    def test_mesh_renderer_to(self):
        """
        Test moving all the tensors in the mesh renderer to a new device.
        """

        device1 = torch.device("cpu")

        R, T = look_at_view_transform(1500, 0.0, 0.0)

        # Init shader settings
        materials = Materials(device=device1)
        lights = PointLights(device=device1)
        lights.location = torch.tensor([0.0, 0.0, +1000.0], device=device1)[None]

        raster_settings = RasterizationSettings(
            image_size=256, blur_radius=0.0, faces_per_pixel=1
        )
        cameras = FoVPerspectiveCameras(
            device=device1, R=R, T=T, aspect_ratio=1.0, fov=60.0, zfar=100
        )
        rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)

        blend_params = BlendParams(
            1e-4,
            1e-4,
            background_color=torch.zeros(3, dtype=torch.float32, device=device1),
        )

        shader = SoftPhongShader(
            lights=lights,
            cameras=cameras,
            materials=materials,
            blend_params=blend_params,
        )
        renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)

        mesh = ico_sphere(2, device1)
        verts_padded = mesh.verts_padded()
        textures = TexturesVertex(
            verts_features=torch.ones_like(verts_padded, device=device1)
        )
        mesh.textures = textures
        self._check_mesh_renderer_props_on_device(renderer, device1)

        # Test rendering on cpu
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device1)

        # Move renderer and mesh to another device and re render
        # This also tests that background_color is correctly moved to
        # the new device
        device2 = torch.device("cuda:0")
        renderer = renderer.to(device2)
        mesh = mesh.to(device2)
        self._check_mesh_renderer_props_on_device(renderer, device2)
        output_images = renderer(mesh)
        self.assertEqual(output_images.device, device2)
    def __init__(self, img_size, mesh_color):
        self.device = torch.device("cuda:0")
        # self.render_size = 1920

        self.img_size = img_size

        # mesh color
        mesh_color = np.array(mesh_color)[::-1]
        self.mesh_color = torch.from_numpy(mesh_color.copy()).view(
            1, 1, 3).float().to(self.device)

        # renderer for large objects, such as whole body.
        self.render_size_large = 700
        lights = PointLights(ambient_color=[
            [1.0, 1.0, 1.0],
        ],
                             diffuse_color=[
                                 [1.0, 1.0, 1.0],
                             ],
                             device=self.device,
                             location=[[1.0, 1.0, -30]])
        self.renderer_large = self.__get_renderer(self.render_size_large,
                                                  lights)

        # renderer for small objects, such as whole body.
        self.render_size_medium = 400
        lights = PointLights(ambient_color=[
            [0.5, 0.5, 0.5],
        ],
                             diffuse_color=[
                                 [0.5, 0.5, 0.5],
                             ],
                             device=self.device,
                             location=[[1.0, 1.0, -30]])
        self.renderer_medium = self.__get_renderer(self.render_size_medium,
                                                   lights)

        # renderer for small objects, such as whole body.
        self.render_size_small = 200
        lights = PointLights(ambient_color=[
            [0.5, 0.5, 0.5],
        ],
                             diffuse_color=[
                                 [0.5, 0.5, 0.5],
                             ],
                             device=self.device,
                             location=[[1.0, 1.0, -30]])
        self.renderer_small = self.__get_renderer(self.render_size_small,
                                                  lights)
    def __init__(self, device, renderer, nclasses, args):
        super().__init__()
        self.device = device
        self.renderer = renderer
        self.nclasses = nclasses
        self.nviews = args.nviews
        self.light = args.lights
        self.net_1 = models.alexnet(pretrained=True).features
        self.net_2 = models.alexnet(pretrained=True).classifier
        self.net_2[-1] = nn.Linear(4096, self.nclasses)
        self.lights = PointLights(device=device)
        self.angle_range = np.pi * 2
        self.distance_range = args.max_scale
        phi = (1 + np.sqrt(5)) / 2

        vertices = np.array(
            [[1, 1, 1], [1, 1, -1], [1, -1, 1], [1, -1, -1], [-1, 1, 1],
             [-1, 1, -1], [-1, -1, 1], [-1, -1, -1], [0, 1 / phi, phi],
             [0, 1 / phi, -phi], [0, -1 / phi, phi], [0, -1 / phi, -phi],
             [phi, 0, 1 / phi], [phi, 0, -1 / phi], [-phi, 0, 1 / phi],
             [-phi, 0, -1 / phi], [1 / phi, phi, 0], [-1 / phi, phi, 0],
             [1 / phi, -phi, 0], [-1 / phi, -phi, 0]],
            dtype=np.float32)
        self.vertices = torch.from_numpy(vertices).to(device)

        # Create an optimizable parameter for the x, y, z position of the camera.
        self.camera_position = nn.Parameter(torch.rand(4).to(device))
Beispiel #4
0
def render_cubified_voxels(voxels: torch.Tensor,
                           shader_type=HardPhongShader,
                           device="cpu",
                           **kwargs):
    """
    Use the Cubify operator to convert inputs voxels to a mesh and then render that mesh.

    Args:
        voxels: FloatTensor of shape (N, D, D, D) where N is the batch size and
            D is the number of voxels along each dimension.
        shader_type: shader_type: shader_type: Shader to use for rendering. Examples
            include HardPhongShader (default), SoftPhongShader etc or any other type
            of valid Shader class.
        device: torch.device on which the tensors should be located.
        **kwargs: Accepts any of the kwargs that the renderer supports.
    Returns:
        Batch of rendered images of shape (N, H, W, 3).
    """
    cubified_voxels = cubify(voxels, CUBIFY_THRESH).to(device)
    cubified_voxels.textures = TexturesVertex(verts_features=torch.ones_like(
        cubified_voxels.verts_padded(), device=device))
    cameras = BlenderCamera(device=device)
    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(
            cameras=cameras,
            raster_settings=kwargs.get("raster_settings",
                                       RasterizationSettings()),
        ),
        shader=shader_type(
            device=device,
            cameras=cameras,
            lights=kwargs.get("lights", PointLights()).to(device),
        ),
    )
    return renderer(cubified_voxels)
Beispiel #5
0
def visualize_pred(img,
                   category,
                   pred,
                   image_name,
                   mesh_path,
                   down_sample_rate=8,
                   device='cuda:0'):
    render_image_size = max(IMAGE_SIZES[category])
    crop_size = IMAGE_SIZES[category]

    cameras = OpenGLPerspectiveCameras(device=device, fov=12.0)
    raster_settings = RasterizationSettings(image_size=render_image_size,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=0)
    raster_settings1 = RasterizationSettings(image_size=render_image_size //
                                             down_sample_rate,
                                             blur_radius=0.0,
                                             faces_per_pixel=1,
                                             bin_size=0)
    rasterizer = MeshRasterizer(cameras=cameras,
                                raster_settings=raster_settings1)
    lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
    phong_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                                  shader=HardPhongShader(device=device,
                                                         lights=lights,
                                                         cameras=cameras))

    theta_pred = pred['theta']
    elevation_pred = pred['elevation']
    azimuth_pred = pred['azimuth']
    distance_pred = pred['distance']
    cad_idx = pred['cad_idx']
    dx = pred['dx'] * down_sample_rate
    dy = pred['dy'] * down_sample_rate

    x3d, xface = load_off(mesh_path + '/%02d.off' % cad_idx)

    verts = torch.from_numpy(x3d).to(device)
    verts = pre_process_mesh_pascal(verts)
    faces = torch.from_numpy(xface).to(device)

    verts_rgb = torch.ones_like(verts)[None]
    textures = Textures(verts_rgb.to(device))
    meshes = Meshes(verts=[verts], faces=[faces], textures=textures)

    img_ = get_img(theta_pred, elevation_pred, azimuth_pred, distance_pred,
                   meshes, phong_renderer, crop_size, render_image_size,
                   device)
    C = camera_position_from_spherical_angles(distance_pred,
                                              elevation_pred,
                                              azimuth_pred,
                                              degrees=False,
                                              device=device)
    # get_image = np.concatenate((img, alpha_merge_imgs(img, img_)), axis=1)
    img_ = shift_img(img_, dx, dy)
    get_image = alpha_merge_imgs(img, img_)

    img = Image.fromarray(get_image).save(image_name)
def sphere_render_bsdf(bsdf,
                       integrator=None,
                       device="cuda",
                       size=256,
                       chunk_size=128,
                       scale=100):
    from pytorch3d.pathtracer.shapes import Sphere
    from pytorch3d.pathtracer.bsdf import Diffuse
    from pytorch3d.pathtracer import pathtrace
    from pytorch3d.renderer import (
        look_at_view_transform,
        OpenGLPerspectiveCameras,
        PointLights,
    )
    import pytorch3d.pathtracer.integrators as integrators
    sphere = Sphere([0, 0, 0], 1, device=device)
    R, T = look_at_view_transform(dist=2., elev=0, azim=0)
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
    lights = PointLights(device=device, location=[[0., 1., 4.]], scale=scale)
    if integrator is None:
        integrator = integrators.Direct()
    return pathtrace(
        sphere,
        cameras=cameras,
        lights=lights,
        chunk_size=chunk_size,
        size=size,
        bsdf=bsdf,
        integrator=integrator,
        device=device,
        silent=True,
    )[0]
Beispiel #7
0
def set_renderer(image_size=512, use_sfm=False):
    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Initialize an OpenGL perspective camera.
    R, T = look_at_view_transform(2.0, 0, 180)
    if use_sfm:
        cameras = SfMPerspectiveCameras(focal_length=580.0,
                                        device=device,
                                        R=R,
                                        T=T)
    else:
        cameras = OpenGLOrthographicCameras(device=device, R=R, T=T)

    raster_settings = RasterizationSettings(image_size=image_size,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=None,
                                            max_faces_per_bin=None)

    lights = PointLights(device=device, location=((2.0, 2.0, 2.0), ))

    rasterizer = MeshRasterizer(cameras=cameras,
                                raster_settings=raster_settings)
    shader = HardPhongShader(device=device, cameras=cameras, lights=lights)
    if use_sfm:
        renderer = MeshRendererWithDepth(rasterizer=rasterizer, shader=shader)
    else:
        renderer = MeshRenderer(rasterizer=rasterizer, shader=shader)
    return renderer
Beispiel #8
0
def render_obj(verts, faces, distance, elevation, azimuth):
    device = torch.device("cuda:0")

    verts_rgb = torch.ones_like(verts)[None]
    textures = Textures(verts_rgb=verts_rgb.to(device))

    cur_mesh = Meshes(verts=[verts.to(device)],
                      faces=[faces.to(device)],
                      textures=textures)

    cameras = OpenGLPerspectiveCameras(device=device)

    blend_params = BlendParams(sigma=1e-4, gamma=1e-4)

    raster_settings = RasterizationSettings(image_size=256,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=0)

    lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
    phong_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                                  shader=PhongShader(device=device,
                                                     lights=lights))

    R, T = look_at_view_transform(distance, elevation, azimuth, device=device)

    return phong_renderer(meshes_world=cur_mesh, R=R, T=T).cpu().numpy()
    def create_renderer(self):
        self.num_angles = self.config.num_angles
        azim = torch.linspace(-1 * self.config.angle_range,
                              self.config.angle_range, self.num_angles)

        R, T = look_at_view_transform(dist=1.0, elev=0, azim=azim)

        T[:, 1] = -85
        T[:, 2] = 200

        cameras = FoVPerspectiveCameras(device=self.device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=self.config.img_size,
            blur_radius=0.0,
            faces_per_pixel=1,
        )

        lights = PointLights(device=self.device, location=[[0.0, 85, 100.0]])

        renderer = MeshRenderer(rasterizer=MeshRasterizer(
            cameras=cameras, raster_settings=raster_settings),
                                shader=HardPhongShader(device=self.device,
                                                       cameras=cameras,
                                                       lights=lights))
        return renderer
    def _get_renderer(self, device):
        R, T = look_at_view_transform(10, 0, 0)  # camera's position
        cameras = FoVPerspectiveCameras(
            device=device,
            R=R,
            T=T,
            znear=0.01,
            zfar=50,
            fov=2 * np.arctan(self.img_size // 2 / self.focal) * 180. / np.pi)

        lights = PointLights(device=device,
                             location=[[0.0, 0.0, 1e5]],
                             ambient_color=[[1, 1, 1]],
                             specular_color=[[0., 0., 0.]],
                             diffuse_color=[[0., 0., 0.]])

        raster_settings = RasterizationSettings(
            image_size=self.img_size,
            blur_radius=0.0,
            faces_per_pixel=1,
        )
        blend_params = blending.BlendParams(background_color=[0, 0, 0])

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=SoftPhongShader(device=device,
                                   cameras=cameras,
                                   lights=lights,
                                   blend_params=blend_params))
        return renderer
 def render_with_batch_size(self, batch_size, dist, light_location,
                            output_path):
     self.meshes = self.meshes.extend(batch_size)
     self.batch_size = batch_size
     elev = torch.linspace(0, 180, batch_size)
     azim = torch.linspace(-180, 180, batch_size)
     self.R, self.T = look_at_view_transform(dist=dist,
                                             elev=elev,
                                             azim=azim)
     self.cameras = OpenGLPerspectiveCameras(device=self.device,
                                             R=self.R,
                                             T=self.T)
     #set light locatioin
     self.light_location = light_location
     lights = PointLights(device=self.device,
                          location=[self.light_location])
     # call pytorch3d mesh renderer with shong shader
     renderer = MeshRenderer(
         rasterizer=MeshRasterizer(cameras=self.cameras,
                                   raster_settings=self.raster_settings),
         shader=TexturedSoftPhongShader(device=self.device,
                                        cameras=self.cameras,
                                        lights=lights))
     images = renderer(self.meshes, cameras=self.cameras, lights=lights)
     for i in range(self.batch_size):
         img = images[i, ..., :3].cpu().numpy() * 255
         img = img.astype('uint8')
         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         cv2.imwrite(output_path + 'render-image-' + str(i) + '.png', img)
 def setup(self, device):
     R, T = look_at_view_transform(self.viewpoint_distance,
                                   self.viewpoint_elevation,
                                   self.viewpoint_azimuth,
                                   device=device)
     cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
     raster_settings = RasterizationSettings(
         image_size=self.opt.fast_image_size,
         blur_radius=self.opt.raster_blur_radius,
         faces_per_pixel=self.opt.raster_faces_per_pixel,
     )
     rasterizer = MeshRasterizer(cameras=cameras,
                                 raster_settings=raster_settings)
     lights = PointLights(device=device,
                          location=[self.opt.lights_location])
     lights = DirectionalLights(device=device,
                                direction=[self.opt.lights_direction])
     shader = SoftPhongShader(
         device=device,
         cameras=cameras,
         lights=lights,
         blend_params=BlendParams(
             self.opt.blend_params_sigma,
             self.opt.blend_params_gamma,
             self.opt.blend_params_background_color,
         ),
     )
     self.renderer = MeshRenderer(
         rasterizer=rasterizer,
         shader=shader,
     )
Beispiel #13
0
def createRenderer(image_size,faces_per_pixel,lights_location):
    
    # Function: createRenderer
    # Inputs:   image_size,faces_per_pixel,lights_location
    # Process:  creates an image renderer
    # Output:   returns renderer
        
    cameras = OpenGLPerspectiveCameras()
    
    #Settings for Raster
    raster_settings = RasterizationSettings(
        image_size=image_size, 
        blur_radius=0.0, 
        faces_per_pixel=faces_per_pixel, 
    )

    # We can add a point light in front of the object. 
    lights = PointLights(location=(lights_location,))
    created_renderer = MeshRenderer(
        rasterizer=MeshRasterizer(
            cameras=cameras, 
            raster_settings=raster_settings
        ),
        shader=HardPhongShader(cameras=cameras, lights=lights)
    )
    
    return created_renderer
Beispiel #14
0
    def __init__(self, image_size):
        super(Renderer, self).__init__()

        self.image_size = image_size
        self.dog_obj = load_objs_as_meshes(['data/dog_B/dog_B/dog_B_tpose.obj'])

        raster_settings = RasterizationSettings(
            image_size=self.image_size, 
            blur_radius=0.0, 
            faces_per_pixel=1, 
            bin_size=None
        )

        R, T = look_at_view_transform(2.7, 0, 0) 
        cameras = OpenGLPerspectiveCameras(device=R.device, R=R, T=T)
        lights = PointLights(device=R.device, location=[[0.0, 1.0, 0.0]])

        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftPhongShader(
                device=R.device, 
                cameras=cameras,
                lights=lights
            )
        )
Beispiel #15
0
def define_render(num):
    shapenet_cam_params_file = '../data/metadata/rendering_metadata.json'
    with open(shapenet_cam_params_file) as f:
        shapenet_cam_params = json.load(f)

    param_num = num
    R, T = look_at_view_transform(
        dist=shapenet_cam_params["distance"][param_num] * 5,
        elev=shapenet_cam_params["elevation"][param_num],
        azim=shapenet_cam_params["azimuth"][param_num])
    cameras = FoVPerspectiveCameras(
        device=device,
        R=R,
        T=T,
        fov=shapenet_cam_params["field_of_view"][param_num])

    raster_settings = RasterizationSettings(
        image_size=512,
        blur_radius=0.0,
        faces_per_pixel=1,
    )

    lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=SoftPhongShader(device=device,
                                                   cameras=cameras,
                                                   lights=lights))

    return renderer
def sphere_examples(bsdf, device="cuda", size=256, chunk_size=128, scale=100):
    from pytorch3d.pathtracer.shapes import Sphere
    from pytorch3d.pathtracer.bsdf import Diffuse
    from pytorch3d.pathtracer import pathtrace
    from pytorch3d.renderer import (
        look_at_view_transform,
        OpenGLPerspectiveCameras,
        PointLights,
    )
    import pytorch3d.pathtracer.integrators as integrators
    sphere = Sphere([0, 0, 0], 1, device=device)
    R, T = look_at_view_transform(dist=2., elev=0, azim=0)
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
    lights = PointLights(device=device, location=[[0., 1., 4.]], scale=scale)
    out = []

    for basis in bsdf.bsdfs:
        expected = pathtrace(
            sphere,
            cameras=cameras,
            lights=lights,
            chunk_size=chunk_size,
            size=size,
            bsdf=basis,
            integrator=integrators.Direct(),
            device=device,
            silent=True,
        )[0]
        out.append(expected)
    return out
Beispiel #17
0
def set_renderer():
    # Setup
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Initialize an OpenGL perspective camera.
    R, T = look_at_view_transform(2.0, 0, 180) 
    cameras = OpenGLOrthographicCameras(device=device, R=R, T=T)

    raster_settings = RasterizationSettings(
        image_size=512, 
        blur_radius=0.0, 
        faces_per_pixel=1, 
        bin_size = None, 
        max_faces_per_bin = None
    )

    lights = PointLights(device=device, location=((2.0, 2.0, 2.0),))

    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(
            cameras=cameras, 
            raster_settings=raster_settings
        ),
        shader=HardPhongShader(
            device=device, 
            cameras=cameras,
            lights=lights
        )
    )
    return renderer
Beispiel #18
0
def createRenderer(device, camera, light, imageSize):
    '''
    It creates a pytorch3D renderer with the given camera pose, light source
    and output image size.

    Parameters
    ----------
    device : 
        Device on which the renderer is created.
    camera : 
        Camera pose.
    light  : 
        Position of the light source.
    imageSize : 
        The size of the rendered image.

    Returns
    -------
    renderer : 
        Pytorch3D renderer.

    '''
    if camera is None:
        camera = (2.0, -20.0, 180.0)
    if light is None:
        light = (0.0, 2.0, 0.0)

    # Initialize an OpenGL perspective camera.
    # With world coordinates +Y up, +X left and +Z into the screen.
    R, T = look_at_view_transform(camera[0], camera[1], camera[2])
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

    # Define the settings for rasterization and shading. Here we set the output image to be of size
    # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
    # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
    # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
    # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
    # the difference between naive and coarse-to-fine rasterization.
    raster_settings = RasterizationSettings(
        image_size=imageSize,
        blur_radius=0.0,
        faces_per_pixel=1,
    )

    # Place a point light at -y direction.
    lights = PointLights(device=device,
                         location=[[light[0], light[1], light[2]]])

    # Create a phong renderer by composing a rasterizer and a shader.
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=HardPhongShader(device=device,
                                                   cameras=cameras,
                                                   lights=lights))

    return renderer
Beispiel #19
0
    def render(self,
               model_ids: Optional[List[str]] = None,
               categories: Optional[List[str]] = None,
               sample_nums: Optional[List[int]] = None,
               idxs: Optional[List[int]] = None,
               shader_type=HardPhongShader,
               device="cpu",
               **kwargs) -> torch.Tensor:
        """
        If a list of model_ids are supplied, render all the objects by the given model_ids.
        If no model_ids are supplied, but categories and sample_nums are specified, randomly
        select a number of objects (number specified in sample_nums) in the given categories
        and render these objects. If instead a list of idxs is specified, check if the idxs
        are all valid and render models by the given idxs. Otherwise, randomly select a number
        (first number in sample_nums, default is set to be 1) of models from the loaded dataset
        and render these models.

        Args:
            model_ids: List[str] of model_ids of models intended to be rendered.
            categories: List[str] of categories intended to be rendered. categories
                and sample_nums must be specified at the same time. categories can be given
                in the form of synset offsets or labels, or a combination of both.
            sample_nums: List[int] of number of models to be randomly sampled from
                each category. Could also contain one single integer, in which case it
                will be broadcasted for every category.
            idxs: List[int] of indices of models to be rendered in the dataset.
            shader_type: Select shading. Valid options include HardPhongShader (default),
                SoftPhongShader, HardGouraudShader, SoftGouraudShader, HardFlatShader,
                SoftSilhouetteShader.
            device: torch.device on which the tensors should be located.
            **kwargs: Accepts any of the kwargs that the renderer supports.

        Returns:
            Batch of rendered images of shape (N, H, W, 3).
        """
        paths = self._handle_render_inputs(model_ids, categories, sample_nums,
                                           idxs)
        meshes = load_objs_as_meshes(paths, device=device, load_textures=False)
        meshes.textures = Textures(
            verts_rgb=torch.ones_like(meshes.verts_padded(), device=device))
        cameras = kwargs.get("cameras", OpenGLPerspectiveCameras()).to(device)
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras,
                raster_settings=kwargs.get("raster_settings",
                                           RasterizationSettings()),
            ),
            shader=shader_type(
                device=device,
                cameras=cameras,
                lights=kwargs.get("lights", PointLights()).to(device),
            ),
        )
        return renderer(meshes)
def _render(
    mesh: Meshes,
    name: str,
    dist: float = 3.0,
    elev: float = 10.0,
    azim: float = 0,
    image_size: int = 256,
    pan=None,
    RT=None,
    use_ambient=False,
):
    device = mesh.device
    if RT is not None:
        R, T = RT
    else:
        R, T = look_at_view_transform(dist, elev, azim)
        if pan is not None:
            R, T = rotate_on_spot(R, T, pan)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

    raster_settings = RasterizationSettings(image_size=image_size,
                                            blur_radius=0.0,
                                            faces_per_pixel=1)

    # Init shader settings
    if use_ambient:
        lights = AmbientLights(device=device)
    else:
        lights = PointLights(device=device)
        lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]

    blend_params = BlendParams(
        sigma=1e-1,
        gamma=1e-4,
        background_color=torch.tensor([1.0, 1.0, 1.0], device=device),
    )
    # Init renderer
    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings),
        shader=HardPhongShader(device=device,
                               lights=lights,
                               cameras=cameras,
                               blend_params=blend_params),
    )

    output = renderer(mesh)

    image = (output[0, ..., :3].cpu().numpy() * 255).astype(np.uint8)

    if DEBUG:
        Image.fromarray(image).save(DATA_DIR / f"glb_{name}_.png")

    return image
    def set_phong_renderer(self, light_location):
        # Place a point light in front of the object
        self.light_location = light_location
        lights = PointLights(device=self.device, location=[light_location])

        # Create a phong renderer by composing a rasterizer and a shader
        self.phong_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=self.cameras,
                raster_settings=self.raster_settings
            ),
            shader=HardPhongShader(device=self.device, lights=lights)
        )
Beispiel #22
0
    def render_phong(self, meshes):
        lights = PointLights(device=self.device, location=((0.0, 0.0, 2.0), ))
        raster_settings = RasterizationSettings(
            image_size=self.resulution,
            blur_radius=0.0,
            faces_per_pixel=1,
        )
        phong_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=self.cameras,
                                      raster_settings=raster_settings),
            shader=HardPhongShader(device=self.device, lights=lights))

        return phong_renderer(meshes_world=meshes)
Beispiel #23
0
 def __init__(self,
              device="cpu",
              cameras=None,
              lights=None,
              materials=None,
              blend_params=None):
     super().__init__()
     self.lights = lights if lights is not None else PointLights(
         device=device)
     self.materials = (materials if materials is not None else Materials(
         device=device))
     self.cameras = cameras
     self.blend_params = blend_params if blend_params is not None else BlendParams(
     )
Beispiel #24
0
    def __init__(self,
                 dist=2,
                 elev=0,
                 azimuth=180,
                 fov=40,
                 image_size=256,
                 R=None,
                 T=None,
                 cameras=None,
                 return_format="torch",
                 device='cuda'):
        super().__init__()
        # If you provide R and T, you don't need dist, elev, azimuth, fov
        self.device = device
        self.return_format = return_format

        # Data structures and functions for rendering
        if cameras is None:
            if R is None and T is None:
                R, T = look_at_view_transform(dist, elev, azimuth)
            cameras = FoVPerspectiveCameras(R=R,
                                            T=T,
                                            znear=1,
                                            zfar=10000,
                                            fov=fov,
                                            degrees=True,
                                            device=device)
            # cameras = PerspectiveCameras(R=R, T=T, focal_length=1.6319*10, device=device)

        self.raster_settings = RasterizationSettings(
            image_size=image_size,
            blur_radius=0.0,  # no blur
            bin_size=0,
        )
        # Place lights at the same point as the camera
        location = T
        if location is None:
            location = ((0, 0, 0), )
        lights = PointLights(ambient_color=((0.3, 0.3, 0.3), ),
                             diffuse_color=((0.7, 0.7, 0.7), ),
                             device=device,
                             location=location)

        self.mesh_rasterizer = MeshRasterizer(
            cameras=cameras, raster_settings=self.raster_settings)
        self._renderer = MeshRenderer(rasterizer=self.mesh_rasterizer,
                                      shader=SoftPhongShader(device=device,
                                                             cameras=cameras,
                                                             lights=lights))
        self.cameras = self.mesh_rasterizer.cameras
Beispiel #25
0
def render(mesh, model_id, shapenet_dataset, device, camera=None):
    # Rendering settings.
    # camera_distance = 1
    # camera_elevation = 0.5 + 100 * random.random()
    # camera_azimuth = 30 + 90 * random.random()
    # R, T = look_at_view_transform(camera_distance, camera_elevation, camera_azimuth)
    # camera = FoVPerspectiveCameras(R=R, T=T, device=device)
    # raster_settings = RasterizationSettings(image_size=512)
    # lights = PointLights(location=torch.tensor([0.0, 1.0, -2.0], device=device)[None],device=device)
    # #rendering_settings = cameras, raster_settings, lights
    # image = shapenet_dataset.render(
    #     model_ids=[model_id],
    #     device=device,
    #     cameras=camera,
    #     raster_settings=raster_settings,
    #     lights=lights,
    # )[..., :3]
    if not camera:
        camera_elevation = 0 + 180 * torch.rand(
            (1))  #torch.linspace(0, 180, batch_size)
        camera_azimuth = -180 + 2 * 180 * torch.rand(
            (1))  #torch.linspace(-180, 180, batch_size)
        #R, T = look_at_view_transform(camera_distance, camera_elevation, camera_azimuth)
        R, T = look_at_view_transform(1.9, camera_elevation, camera_azimuth)
        camera = FoVPerspectiveCameras(R=R, T=T, device=device)
        camera.eval()  #necessary ?
    raster_settings = RasterizationSettings(image_size=224)  # TODO ?????
    lights = PointLights(location=torch.tensor([0.0, 1.0, -2.0],
                                               device=device)[None],
                         device=device)

    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=camera, raster_settings=raster_settings),
                            shader=HardPhongShader(device=device,
                                                   cameras=camera))
    renderer.eval()
    #rendering_settings = cameras, raster_settings, lights
    #image = shapenet_dataset.render(
    #   model_ids=[model_id],
    #    device=device,
    #  cameras=camera,
    #  raster_settings=raster_settings,
    #  lights=lights,
    #)[..., :3]
    image = renderer(mesh)[..., :3]
    #plt.imshow(image.squeeze().detach().cpu().numpy())
    #plt.show()
    image = image.permute(0, 3, 1, 2)
    return image, camera  #TODO batch of images
Beispiel #26
0
    def __init__(self, meshes, image_size=256, device='cuda'):

        super(ColorRenderer, self).__init__()

        self.meshes = meshes
        cameras = OpenGLOrthographicCameras(device=device)

        raster_settings = RasterizationSettings(image_size=image_size,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)
        lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(device=device, lights=lights))
Beispiel #27
0
def save_p3d_mesh(verts, faces, filling_factors):
    features = [(int(i * 255), 0, 0) for i in filling_factors]
    features = torch.unsqueeze(torch.Tensor(features), 0)
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.cuda.set_device(device)
    else:
        device = torch.device("cpu")

    texture = TexturesVertex(features)
    mesh = Meshes(torch.unsqueeze(torch.Tensor(verts), 0),
                  torch.unsqueeze(torch.Tensor(faces), 0), texture).cuda()

    # Initialize a camera.
    # Rotate the object by increasing the elevation and azimuth angles
    R, T = look_at_view_transform(dist=2.0, elev=-50, azim=-90)
    cameras = FoVPerspectiveCameras(device=device, R=R, T=T)

    # Define the settings for rasterization and shading. Here we set the output image to be of size
    # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
    # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
    # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
    # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
    # the difference between naive and coarse-to-fine rasterization.
    raster_settings = RasterizationSettings(
        image_size=1024,
        blur_radius=0.0,
        faces_per_pixel=1,
    )

    # Place a point light in front of the object. As mentioned above, the front of the cow is facing the
    # -z direction.
    lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

    # Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
    # interpolate the texture uv coordinates for each vertex, sample from a texture image and
    # apply the Phong lighting model
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=SoftPhongShader(device=device,
                                                   cameras=cameras,
                                                   lights=lights))

    img = renderer(mesh)
    plt.figure(figsize=(10, 10))
    plt.imshow(img[0].cpu().numpy())
    plt.show()
 def render_with_different_azim_elev_size(self, dist, elev_size, azim_size,
                                          light_location, output_path):
     azims = torch.linspace(-180, 180, azim_size)
     elevs = torch.linspace(0, 180, elev_size)
     self.light_location = light_location
     lights = PointLights(device=self.device,
                          location=[self.light_location])
     index = 0
     for elev in elevs:
         for azim in azims:
             R, T = look_at_view_transform(dist=dist, elev=elev, azim=azim)
             self.cameras = OpenGLPerspectiveCameras(device=self.device,
                                                     R=R,
                                                     T=T)
             renderer = MeshRenderer(
                 rasterizer=MeshRasterizer(
                     cameras=self.cameras,
                     raster_settings=self.raster_settings),
                 shader=TexturedSoftPhongShader(device=self.device,
                                                cameras=self.cameras,
                                                lights=lights))
             images = renderer(self.meshes,
                               cameras=self.cameras,
                               lights=lights)
             img = images[0, ..., :3].cpu().numpy() * 255
             img = img.astype('uint8')
             img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
             extent = "{:06}".format(index)
             cv2.imwrite(output_path + 'frame-' + extent + '.render.png',
                         img)
             # Save extrinsic
             M = np.zeros((4, 4))
             M[:3, :3] = R.numpy()
             M[:3, 3] = T.numpy()
             M[3, :] = [0, 0, 0, 1]
             with open(output_path + "/frame-" + extent + ".pose.txt",
                       "w") as f:
                 np.savetxt(f, M[0, :], fmt="%.5f", newline=' ')
                 f.write('\n')
                 np.savetxt(f, M[1, :], fmt="%.5f", newline=' ')
                 f.write('\n')
                 np.savetxt(f, M[2, :], fmt="%.5f", newline=' ')
                 f.write('\n')
                 np.savetxt(f, M[3, :], fmt="%.5f", newline=' ')
             index += 1
Beispiel #29
0
 def setup(self, device):
     if self.renderer is not None: return
     R, T = look_at_view_transform(self.opt.viewpoint_distance,
                                   self.opt.viewpoint_elevation,
                                   self.opt.viewpoint_azimuth,
                                   device=device)
     cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
     raster_settings = PointsRasterizationSettings(
         image_size=self.opt.raster_image_size,
         radius=self.opt.raster_radius,
         points_per_pixel=self.opt.raster_points_per_pixel,
     )
     rasterizer = PointsRasterizer(cameras=cameras,
                                   raster_settings=raster_settings)
     lights = PointLights(device=device,
                          location=[self.opt.lights_location])
     self.renderer = PulsarPointsRenderer(rasterizer=rasterizer,
                                          n_channels=3).to(device)
Beispiel #30
0
            def init_render(self):

                cameras = FoVPerspectiveCameras()
                raster_settings = RasterizationSettings(image_size=128,
                                                        blur_radius=0.0,
                                                        faces_per_pixel=1)
                lights = PointLights(
                    ambient_color=((1.0, 1.0, 1.0), ),
                    diffuse_color=((0, 0.0, 0), ),
                    specular_color=((0.0, 0, 0), ),
                    location=((0.0, 0.0, 1e5), ),
                )
                renderer = MeshRenderer(
                    rasterizer=MeshRasterizer(cameras=cameras,
                                              raster_settings=raster_settings),
                    shader=HardGouraudShader(cameras=cameras, lights=lights),
                )
                return renderer