예제 #1
0
    def __init__(self, image_size):
        super(Renderer, self).__init__()

        self.image_size = image_size
        self.dog_obj = load_objs_as_meshes(['data/dog_B/dog_B/dog_B_tpose.obj'])

        raster_settings = RasterizationSettings(
            image_size=self.image_size, 
            blur_radius=0.0, 
            faces_per_pixel=1, 
            bin_size=None
        )

        R, T = look_at_view_transform(2.7, 0, 0) 
        cameras = OpenGLPerspectiveCameras(device=R.device, R=R, T=T)
        lights = PointLights(device=R.device, location=[[0.0, 1.0, 0.0]])

        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftPhongShader(
                device=R.device, 
                cameras=cameras,
                lights=lights
            )
        )
예제 #2
0
def test():
    with torch.no_grad():
        for i in trange(len(perspectives)):
            R, T = perspectives[i]
            cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
            if isinstance(bsdf, ComposeSpatialVarying):
                got, _ = pt.pathtrace(
                    shape,
                    size=SIZE,
                    chunk_size=SIZE,
                    bundle_size=1,
                    bsdf=bsdf,
                    integrator=BasisBRDF(bsdf),
                    cameras=cameras,
                    lights=lights,
                    device=device,
                    silent=True,
                    background=0,
                )
                f, axes = plt.subplots(r, c)
                f.set_figheight(10)
                f.set_figwidth(10)
                got = got.unsqueeze(-1).expand(got.shape + (3, ))
                for k, img in enumerate(got.split(1, dim=-2)):
                    img = img.squeeze(-2).cpu().numpy()
                    axes[unroll(k, c)].imshow(img)
                    axes[unroll(k, c)].axis('off')
                plt.subplots_adjust(wspace=0, hspace=0)
                plt.savefig(f"outputs/weights_{i:04}.png", bbox_inches="tight")
                plt.clf()
                plt.close(f)
            #normals, _ = pt.pathtrace(
            #  shape,
            #  size=SIZE, chunk_size=SIZE, bundle_size=1, bsdf=bsdf, integrator=Debug(),
            #  cameras=cameras, lights=lights, device=device, silent=True,
            #  background=0,
            #)
            #save_image(f"outputs/normals_{i:04}.png", normals)
            #illum, _ = pt.pathtrace(
            #  shape,
            #  size=SIZE, chunk_size=SIZE, bundle_size=1, bsdf=bsdf, integrator=Illumination(),
            #  cameras=cameras, lights=lights, device=device, silent=True,
            #)
            #save_image(f"outputs/illum_{i:04}.png", illum)

            if (integrator is not None) and False:
                got, _ = pt.pathtrace(
                    shape,
                    size=SIZE,
                    chunk_size=SIZE,
                    bundle_size=1,
                    bsdf=bsdf,
                    integrator=integrator,
                    cameras=cameras,
                    lights=lights,
                    device=device,
                    silent=True,
                    background=0,
                )
                save_image(f"outputs/got_{i:04}.png", got)
예제 #3
0
    def set_renderer(self):
        cameras = OpenGLPerspectiveCameras(device=self.cuda_device,
                                           degrees=True,
                                           fov=VIEW['fov'],
                                           znear=VIEW['znear'],
                                           zfar=VIEW['zfar'])

        raster_settings = RasterizationSettings(image_size=VIEW['viewport'][0],
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0)

        lights = DirectionalLights(
            device=self.cuda_device,
            direction=((-40, 200, 100), ),
            ambient_color=((0.5, 0.5, 0.5), ),
            diffuse_color=((0.5, 0.5, 0.5), ),
            specular_color=((0.0, 0.0, 0.0), ),
        )

        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(cameras=cameras,
                                      raster_settings=raster_settings),
            shader=TexturedSoftPhongShader(device=self.cuda_device,
                                           cameras=cameras,
                                           lights=lights))
예제 #4
0
파일: models.py 프로젝트: yule-li/MeInGame
    def init_renderer(self):
        # nsh_face_mesh = meshio.Mesh('data/mesh/nsh_bfm_face.obj')
        # self.nsh_face_tri = torch.from_numpy(nsh_face_mesh.triangles).type(
        #     torch.int64).to(self.device)

        R, T = look_at_view_transform(10, 0, 0)
        cameras = OpenGLPerspectiveCameras(znear=0.001,
                                           zfar=30.0,
                                           aspect_ratio=1.0,
                                           fov=12.5936,
                                           degrees=True,
                                           R=R,
                                           T=T,
                                           device=self.device)
        raster_settings = RasterizationSettings(image_size=self.im_size,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0,
                                                cull_backfaces=True)
        self.rasterizer = MeshRasterizer(cameras=cameras,
                                         raster_settings=raster_settings)
        lights = DirectionalLights(device=self.device)
        shader = TexturedSoftPhongShader(device=self.device,
                                         cameras=cameras,
                                         lights=lights)
        self.renderer = MeshRenderer(rasterizer=self.rasterizer, shader=shader)
예제 #5
0
def render_obj(verts, faces, distance, elevation, azimuth):
    device = torch.device("cuda:0")

    verts_rgb = torch.ones_like(verts)[None]
    textures = Textures(verts_rgb=verts_rgb.to(device))

    cur_mesh = Meshes(verts=[verts.to(device)],
                      faces=[faces.to(device)],
                      textures=textures)

    cameras = OpenGLPerspectiveCameras(device=device)

    blend_params = BlendParams(sigma=1e-4, gamma=1e-4)

    raster_settings = RasterizationSettings(image_size=256,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=0)

    lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
    phong_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                                  shader=PhongShader(device=device,
                                                     lights=lights))

    R, T = look_at_view_transform(distance, elevation, azimuth, device=device)

    return phong_renderer(meshes_world=cur_mesh, R=R, T=T).cpu().numpy()
def project_mesh(mesh, angle):
    start = time.time()
    m = Metadata()
    R, T = look_at_view_transform(1.75,
                                  -45,
                                  angle,
                                  up=((0, 1, 0), ),
                                  at=((0, -0.25, 0), ))
    cameras = OpenGLPerspectiveCameras(device=m.device, R=R, T=T)
    raster_settings = m.raster_settings
    lights = m.lights
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=HardFlatShader(cameras=cameras,
                                                  device=m.device,
                                                  lights=lights))
    verts = mesh.verts_list()[0]

    # faces = meshes.faces_list()[0]

    verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
    # verts_rgb = torch.ones((len(mesh.verts_list()[0]), 1))[None]  # (1, V, 3)
    textures = Textures(verts_rgb=verts_rgb.to(m.device))

    mesh.textures = textures
    mesh.textures._num_faces_per_mesh = mesh._num_faces_per_mesh.tolist()
    mesh.textures._num_verts_per_mesh = mesh._num_verts_per_mesh.tolist()

    image = renderer(mesh)
    return image
예제 #7
0
def sphere_examples(bsdf, device="cuda", size=256, chunk_size=128, scale=100):
    from pytorch3d.pathtracer.shapes import Sphere
    from pytorch3d.pathtracer.bsdf import Diffuse
    from pytorch3d.pathtracer import pathtrace
    from pytorch3d.renderer import (
        look_at_view_transform,
        OpenGLPerspectiveCameras,
        PointLights,
    )
    import pytorch3d.pathtracer.integrators as integrators
    sphere = Sphere([0, 0, 0], 1, device=device)
    R, T = look_at_view_transform(dist=2., elev=0, azim=0)
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
    lights = PointLights(device=device, location=[[0., 1., 4.]], scale=scale)
    out = []

    for basis in bsdf.bsdfs:
        expected = pathtrace(
            sphere,
            cameras=cameras,
            lights=lights,
            chunk_size=chunk_size,
            size=size,
            bsdf=basis,
            integrator=integrators.Direct(),
            device=device,
            silent=True,
        )[0]
        out.append(expected)
    return out
예제 #8
0
def sphere_render_bsdf(bsdf,
                       integrator=None,
                       device="cuda",
                       size=256,
                       chunk_size=128,
                       scale=100):
    from pytorch3d.pathtracer.shapes import Sphere
    from pytorch3d.pathtracer.bsdf import Diffuse
    from pytorch3d.pathtracer import pathtrace
    from pytorch3d.renderer import (
        look_at_view_transform,
        OpenGLPerspectiveCameras,
        PointLights,
    )
    import pytorch3d.pathtracer.integrators as integrators
    sphere = Sphere([0, 0, 0], 1, device=device)
    R, T = look_at_view_transform(dist=2., elev=0, azim=0)
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
    lights = PointLights(device=device, location=[[0., 1., 4.]], scale=scale)
    if integrator is None:
        integrator = integrators.Direct()
    return pathtrace(
        sphere,
        cameras=cameras,
        lights=lights,
        chunk_size=chunk_size,
        size=size,
        bsdf=bsdf,
        integrator=integrator,
        device=device,
        silent=True,
    )[0]
예제 #9
0
def createRenderer(image_size,faces_per_pixel,lights_location):
    
    # Function: createRenderer
    # Inputs:   image_size,faces_per_pixel,lights_location
    # Process:  creates an image renderer
    # Output:   returns renderer
        
    cameras = OpenGLPerspectiveCameras()
    
    #Settings for Raster
    raster_settings = RasterizationSettings(
        image_size=image_size, 
        blur_radius=0.0, 
        faces_per_pixel=faces_per_pixel, 
    )

    # We can add a point light in front of the object. 
    lights = PointLights(location=(lights_location,))
    created_renderer = MeshRenderer(
        rasterizer=MeshRasterizer(
            cameras=cameras, 
            raster_settings=raster_settings
        ),
        shader=HardPhongShader(cameras=cameras, lights=lights)
    )
    
    return created_renderer
예제 #10
0
def visualize_pred(img,
                   category,
                   pred,
                   image_name,
                   mesh_path,
                   down_sample_rate=8,
                   device='cuda:0'):
    render_image_size = max(IMAGE_SIZES[category])
    crop_size = IMAGE_SIZES[category]

    cameras = OpenGLPerspectiveCameras(device=device, fov=12.0)
    raster_settings = RasterizationSettings(image_size=render_image_size,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=0)
    raster_settings1 = RasterizationSettings(image_size=render_image_size //
                                             down_sample_rate,
                                             blur_radius=0.0,
                                             faces_per_pixel=1,
                                             bin_size=0)
    rasterizer = MeshRasterizer(cameras=cameras,
                                raster_settings=raster_settings1)
    lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
    phong_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                                  shader=HardPhongShader(device=device,
                                                         lights=lights,
                                                         cameras=cameras))

    theta_pred = pred['theta']
    elevation_pred = pred['elevation']
    azimuth_pred = pred['azimuth']
    distance_pred = pred['distance']
    cad_idx = pred['cad_idx']
    dx = pred['dx'] * down_sample_rate
    dy = pred['dy'] * down_sample_rate

    x3d, xface = load_off(mesh_path + '/%02d.off' % cad_idx)

    verts = torch.from_numpy(x3d).to(device)
    verts = pre_process_mesh_pascal(verts)
    faces = torch.from_numpy(xface).to(device)

    verts_rgb = torch.ones_like(verts)[None]
    textures = Textures(verts_rgb.to(device))
    meshes = Meshes(verts=[verts], faces=[faces], textures=textures)

    img_ = get_img(theta_pred, elevation_pred, azimuth_pred, distance_pred,
                   meshes, phong_renderer, crop_size, render_image_size,
                   device)
    C = camera_position_from_spherical_angles(distance_pred,
                                              elevation_pred,
                                              azimuth_pred,
                                              degrees=False,
                                              device=device)
    # get_image = np.concatenate((img, alpha_merge_imgs(img, img_)), axis=1)
    img_ = shift_img(img_, dx, dy)
    get_image = alpha_merge_imgs(img, img_)

    img = Image.fromarray(get_image).save(image_name)
예제 #11
0
 def render_with_batch_size(self, batch_size, dist, light_location,
                            output_path):
     self.meshes = self.meshes.extend(batch_size)
     self.batch_size = batch_size
     elev = torch.linspace(0, 180, batch_size)
     azim = torch.linspace(-180, 180, batch_size)
     self.R, self.T = look_at_view_transform(dist=dist,
                                             elev=elev,
                                             azim=azim)
     self.cameras = OpenGLPerspectiveCameras(device=self.device,
                                             R=self.R,
                                             T=self.T)
     #set light locatioin
     self.light_location = light_location
     lights = PointLights(device=self.device,
                          location=[self.light_location])
     # call pytorch3d mesh renderer with shong shader
     renderer = MeshRenderer(
         rasterizer=MeshRasterizer(cameras=self.cameras,
                                   raster_settings=self.raster_settings),
         shader=TexturedSoftPhongShader(device=self.device,
                                        cameras=self.cameras,
                                        lights=lights))
     images = renderer(self.meshes, cameras=self.cameras, lights=lights)
     for i in range(self.batch_size):
         img = images[i, ..., :3].cpu().numpy() * 255
         img = img.astype('uint8')
         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         cv2.imwrite(output_path + 'render-image-' + str(i) + '.png', img)
예제 #12
0
    def init_camera(self, focal, fov):
        if fov is None:
            fov = 2 * np.arctan(self.size / (focal * 2)) * 180 / np.pi

        camera = OpenGLPerspectiveCameras(zfar=350,
                                          fov=fov,
                                          device=self.device)
        return camera
예제 #13
0
def load_perspective_cameras():
    device = torch.device(config.cuda.device)

    return OpenGLPerspectiveCameras(device=device,
                                    degrees=True,
                                    fov=config.generate.fov,
                                    znear=config.generate.znear,
                                    zfar=config.generate.zfar)
예제 #14
0
def load_perspective_cameras():
    device = torch.device(config.cuda.device)

    return OpenGLPerspectiveCameras(device=device,
                                    degrees=True,
                                    fov=30,
                                    znear=0.00001,
                                    zfar=10000)
예제 #15
0
class Renderer(torch.nn.Module):
    def __init__(self, image_size, device):
        super(Renderer, self).__init__()

        self.image_size = image_size
        R, T = look_at_view_transform(2.7, 0, 0, device=device) 
        self.cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        self.mesh_color = torch.FloatTensor(config.MESH_COLOR).to(device)[None, None, :] / 255.0

        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=self.image_size, 
            blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma, 
            faces_per_pixel=100, 
        )

        self.silhouette_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=self.cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftSilhouetteShader(blend_params=blend_params)
        )

        raster_settings_color = RasterizationSettings(
            image_size=self.image_size, 
            blur_radius=0.0, 
            faces_per_pixel=1, 
        )
        
        lights = PointLights(device=device, location=[[0.0, 0.0, 3.0]])

        self.color_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=self.cameras, 
                raster_settings=raster_settings_color
            ),
            shader=HardPhongShader(
                device=device, 
                cameras=self.cameras,
                lights=lights,
            )
        )

    def forward(self, vertices, points, faces, render_texture=False):
        tex = torch.ones_like(vertices) * self.mesh_color # (1, V, 3)
        textures = Textures(verts_rgb=tex)

        mesh = Meshes(verts=vertices, faces=faces, textures=textures)
        sil_images = self.silhouette_renderer(mesh)[..., -1].unsqueeze(1)
        screen_size = torch.ones(vertices.shape[0], 2).to(vertices.device) * self.image_size
        proj_points = self.cameras.transform_points_screen(points, screen_size)[:, :, [1, 0]]

        if render_texture:
            color_image = self.color_renderer(mesh).permute(0, 3, 1, 2)[:, :3, :, :]
            return sil_images, proj_points, color_image
        else:
            return sil_images, proj_points
예제 #16
0
def createRenderer(device, camera, light, imageSize):
    '''
    It creates a pytorch3D renderer with the given camera pose, light source
    and output image size.

    Parameters
    ----------
    device : 
        Device on which the renderer is created.
    camera : 
        Camera pose.
    light  : 
        Position of the light source.
    imageSize : 
        The size of the rendered image.

    Returns
    -------
    renderer : 
        Pytorch3D renderer.

    '''
    if camera is None:
        camera = (2.0, -20.0, 180.0)
    if light is None:
        light = (0.0, 2.0, 0.0)

    # Initialize an OpenGL perspective camera.
    # With world coordinates +Y up, +X left and +Z into the screen.
    R, T = look_at_view_transform(camera[0], camera[1], camera[2])
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

    # Define the settings for rasterization and shading. Here we set the output image to be of size
    # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
    # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
    # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
    # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
    # the difference between naive and coarse-to-fine rasterization.
    raster_settings = RasterizationSettings(
        image_size=imageSize,
        blur_radius=0.0,
        faces_per_pixel=1,
    )

    # Place a point light at -y direction.
    lights = PointLights(device=device,
                         location=[[light[0], light[1], light[2]]])

    # Create a phong renderer by composing a rasterizer and a shader.
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=HardPhongShader(device=device,
                                                   cameras=cameras,
                                                   lights=lights))

    return renderer
예제 #17
0
    def __init__(self, image_size, device):
        super(Renderer, self).__init__()

        self.image_size = image_size
        R, T = look_at_view_transform(2.7, 0, 0, device=device) 
        self.cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
        self.mesh_color = torch.FloatTensor(config.MESH_COLOR).to(device)[None, None, :] / 255.0

        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=self.image_size, 
            blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma, 
            faces_per_pixel=100, 
        )

        self.silhouette_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=self.cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftSilhouetteShader(blend_params=blend_params)
        )

        raster_settings_color = RasterizationSettings(
            image_size=self.image_size, 
            blur_radius=0.0, 
            faces_per_pixel=1, 
        )
        
        lights = PointLights(device=device, location=[[0.0, 0.0, 3.0]])

        self.color_renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=self.cameras, 
                raster_settings=raster_settings_color
            ),
            shader=HardPhongShader(
                device=device, 
                cameras=self.cameras,
                lights=lights,
            )
        )
    def initialize_renderer(self):
        # Initialize an OpenGL perspective camera
        self.cameras = OpenGLPerspectiveCameras(device=self.device)

        self.raster_settings = RasterizationSettings(
            image_size = 512,
            blur_radius = 0.0,
            faces_per_pixel=2,
        )

        self.set_phong_renderer([0.0,3.0,5.0])
예제 #19
0
    def render(self,
               model_ids: Optional[List[str]] = None,
               categories: Optional[List[str]] = None,
               sample_nums: Optional[List[int]] = None,
               idxs: Optional[List[int]] = None,
               shader_type=HardPhongShader,
               device="cpu",
               **kwargs) -> torch.Tensor:
        """
        If a list of model_ids are supplied, render all the objects by the given model_ids.
        If no model_ids are supplied, but categories and sample_nums are specified, randomly
        select a number of objects (number specified in sample_nums) in the given categories
        and render these objects. If instead a list of idxs is specified, check if the idxs
        are all valid and render models by the given idxs. Otherwise, randomly select a number
        (first number in sample_nums, default is set to be 1) of models from the loaded dataset
        and render these models.

        Args:
            model_ids: List[str] of model_ids of models intended to be rendered.
            categories: List[str] of categories intended to be rendered. categories
                and sample_nums must be specified at the same time. categories can be given
                in the form of synset offsets or labels, or a combination of both.
            sample_nums: List[int] of number of models to be randomly sampled from
                each category. Could also contain one single integer, in which case it
                will be broadcasted for every category.
            idxs: List[int] of indices of models to be rendered in the dataset.
            shader_type: Select shading. Valid options include HardPhongShader (default),
                SoftPhongShader, HardGouraudShader, SoftGouraudShader, HardFlatShader,
                SoftSilhouetteShader.
            device: torch.device on which the tensors should be located.
            **kwargs: Accepts any of the kwargs that the renderer supports.

        Returns:
            Batch of rendered images of shape (N, H, W, 3).
        """
        paths = self._handle_render_inputs(model_ids, categories, sample_nums,
                                           idxs)
        meshes = load_objs_as_meshes(paths, device=device, load_textures=False)
        meshes.textures = Textures(
            verts_rgb=torch.ones_like(meshes.verts_padded(), device=device))
        cameras = kwargs.get("cameras", OpenGLPerspectiveCameras()).to(device)
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras,
                raster_settings=kwargs.get("raster_settings",
                                           RasterizationSettings()),
            ),
            shader=shader_type(
                device=device,
                cameras=cameras,
                lights=kwargs.get("lights", PointLights()).to(device),
            ),
        )
        return renderer(meshes)
예제 #20
0
    def __init__(self, cfgs):
        super().__init__()
        self.device = cfgs.get('device', 'cpu')
        self.image_size = cfgs.get('image_size', 64)
        self.min_depth = cfgs.get('min_depth', 0.9)
        self.max_depth = cfgs.get('max_depth', 1.1)
        self.rot_center_depth = cfgs.get('rot_center_depth',
                                         (self.min_depth + self.max_depth) / 2)
        self.border_depth = cfgs.get(
            'border_depth', 0.3 * self.min_depth + 0.7 * self.max_depth)
        self.fov = cfgs.get('fov', 10)

        #### camera intrinsics
        #             (u)   (x)
        #    d * K^-1 (v) = (y)
        #             (1)   (z)

        ## renderer for visualization
        R = [[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]]
        R = torch.FloatTensor(R).to(self.device)
        t = torch.zeros(1, 3, dtype=torch.float32).to(self.device)
        fx = (self.image_size - 1) / 2 / (math.tan(
            self.fov / 2 * math.pi / 180))
        fy = (self.image_size - 1) / 2 / (math.tan(
            self.fov / 2 * math.pi / 180))
        cx = (self.image_size - 1) / 2
        cy = (self.image_size - 1) / 2
        K = [[fx, 0., cx], [0., fy, cy], [0., 0., 1.]]
        K = torch.FloatTensor(K).to(self.device)
        self.inv_K = torch.inverse(K).unsqueeze(0)
        self.K = K.unsqueeze(0)

        # Initialize an OpenGL perspective camera.
        R = look_at_rotation(((0, 0, 0), ),
                             at=((0, 0, 1), ),
                             up=((0, -1, 0), ))
        cameras = OpenGLPerspectiveCameras(device=self.device,
                                           fov=self.fov,
                                           R=R)
        lights = DirectionalLights(
            ambient_color=((1.0, 1.0, 1.0), ),
            diffuse_color=((0.0, 0.0, 0.0), ),
            specular_color=((0.0, 0.0, 0.0), ),
            direction=((0, 1, 0), ),
            device=self.device,
        )
        raster_settings = RasterizationSettings(
            image_size=self.image_size,
            blur_radius=0.0,
            faces_per_pixel=1,
        )
        self.rasterizer_torch = MeshRasterizer(cameras=cameras,
                                               raster_settings=raster_settings)
예제 #21
0
    def init_test(self):
        self.segmenter = Segment(self.device)

        up_line = 100
        bt_line = 80

        self.transfers = {}
        self.uv_creators = {}
        self.nsh_face_tris = {}
        self.nsh_meshes = {}
        self.nsh_face_meshes = {}
        for face_model in ['230']:
            self.transfers[face_model] = Shape_Transfer(face_model=face_model,
                                                        device=self.device)
            self.uv_creators[face_model] = UVCreator(
                face_model=face_model,
                bfm_version=self.config.bfm_version,
                device=self.device)

            self.nsh_face_meshes[face_model] = meshio.Mesh(
                'data/mesh/{}/nsh_bfm_face.obj'.format(face_model))
            self.nsh_face_tris[face_model] = self.to_tensor(
                self.nsh_face_meshes[face_model].triangles, torch.int64)
            self.nsh_meshes[face_model] = meshio.Mesh(
                'data/mesh/{}/nsh_std.obj'.format(face_model), group=True)

        self.up_line = int(up_line * (self.config.uv_size / 1024))
        self.bt_line = int(bt_line * (self.config.uv_size / 1024))

        self.eye_lm_idx = np.loadtxt('data/mesh/eye_lm_idx.txt',
                                     dtype=np.int32)

        self.cropper = ImageCropper(self.config.im_size, use_dlib=False)
        self.reconstructor = Deep3DFace(self.sess, self.graph)

        R, T = look_at_view_transform(10, 0, 0)
        self.cameras = OpenGLPerspectiveCameras(znear=0.001,
                                                zfar=30.0,
                                                aspect_ratio=1.0,
                                                fov=12.5936,
                                                degrees=True,
                                                R=R,
                                                T=T,
                                                device=self.device)

        raster_settings = RasterizationSettings(image_size=512,
                                                blur_radius=0.0,
                                                faces_per_pixel=1,
                                                bin_size=0,
                                                cull_backfaces=True)
        self.rasterizer = MeshRasterizer(cameras=self.cameras,
                                         raster_settings=raster_settings)
예제 #22
0
def build_renderer(_image_size):
    # Initialize an OpenGL perspective camera.
    cameras = OpenGLPerspectiveCameras(device=DEVICE, degrees=True, fov=FOV, znear=1e-4, zfar=100)

    raster_settings = RasterizationSettings(image_size=_image_size, blur_radius=0.0, faces_per_pixel=1, bin_size=0)

    lights = DirectionalLights(device=DEVICE, direction=((-40, 200, 100),), ambient_color=((0.5, 0.5, 0.5),),
                               diffuse_color=((0.5, 0.5, 0.5),), specular_color=((0.0, 0.0, 0.0),), )

    renderer = MeshRenderer(rasterizer=MeshRasterizer(cameras=cameras, raster_settings=raster_settings),
                            shader=TexturedSoftPhongShader(device=DEVICE, cameras=cameras, lights=lights))

    return renderer
예제 #23
0
def renderMultiPose(mesh,
                    render,
                    batch_size=20,
                    row: int = 4,
                    col: int = 5,
                    device='cuda'):
    '''
    It generates multiple camera poses to render images with settings in 
    the given renderer. The number of poses is the batch_size.

    Parameters
    ----------
    mesh : 
        Mesh to be rendered.
    render : 
        Pytorch3D renderer.
    batch_size : optional
        The number of camera poses to be generated. The default is 20.
    row : int, optional
        The number of rows when show the images. The default is 4.
    col : int, optional
        The number of columns when show the images. The default is 5.
    device : optional
        The device where the rendering takes place. The default is 'cuda'.

    Returns
    -------
    None.

    '''

    # Create a batch of meshes by repeating the cow mesh and associated textures.
    # Meshes has a useful `extend` method which allows us do this very easily.
    # This also extends the textures.
    meshes = mesh.extend(batch_size)

    # Get a batch of viewing angles.
    elev = torch.linspace(0, 180, batch_size)
    azim = torch.linspace(-180, 180, batch_size)

    # All the cameras helper methods support mixed type inputs and broadcasting. So we can
    # view the camera from the same distance and specify dist=2.7 as a float,
    # and then specify elevation and azimuth angles for each viewpoint as tensors.
    R, T = look_at_view_transform(dist=2.0, elev=elev, azim=azim)
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

    # We can pass arbirary keyword arguments to the rasterizer/shader via the renderer
    # so the renderer does not need to be reinitialized if any of the settings change.
    images = render(meshes, cameras=cameras)

    image_grid(images.cpu().numpy(), rows=row, cols=col, rgb=True)
예제 #24
0
    def test_nomal_rendering(self):
        device = torch.device('cuda:0')

        torch.cuda.set_device(device)
        # Set paths
        data_dir = Path(__file__).resolve().parent / 'data'
        data_dir.mkdir(exist_ok=True)
        obj_dir = Path(
            __file__).resolve().parent.parent / "docs/tutorials/data"
        obj_filename = obj_dir / "cow_mesh/cow.obj"

        # Load obj file
        mesh = load_objs_as_meshes([obj_filename], device=device)

        # try:
        #     texture_image = mesh.textures.maps_padded()
        # except:
        #     pass

        R, T = look_at_view_transform(2.55, 10, 180)
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        # Define the settings for rasterization and shading. Here we set the output image to be of size
        # 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
        # and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
        # the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
        # explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
        # the difference between naive and coarse-to-fine rasterization.
        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
            perspective_correct=True,
        )

        renderer = MeshRenderer(rasterizer=MeshRasterizer(
            cameras=cameras, raster_settings=raster_settings),
                                shader=NormalShader(
                                    device=device,
                                    cameras=cameras,
                                ))
        images = renderer(mesh)
        # cv2.imshow('render_normal_texture.png',
        #            ((255 * images[0, ..., :3]).squeeze().cpu().numpy().astype(np.uint8))[..., ::-1])
        Image.fromarray(
            ((255 * images[0, ..., :3]).squeeze().cpu().numpy().astype(
                np.uint8))).save(str(data_dir / 'render_normal_texture.png'))
        # cv2.imwrite(str(data_dir / 'render_normal_texture.png'),
        #             ((255 * images[0, ..., :3]).squeeze().cpu().numpy().astype(np.uint8))[..., ::-1])
        self.assertTrue((data_dir / 'render_normal_texture.png').exists())
예제 #25
0
 def render_with_different_azim_elev_size(self, dist, elev_size, azim_size,
                                          light_location, output_path):
     azims = torch.linspace(-180, 180, azim_size)
     elevs = torch.linspace(0, 180, elev_size)
     self.light_location = light_location
     lights = PointLights(device=self.device,
                          location=[self.light_location])
     index = 0
     for elev in elevs:
         for azim in azims:
             R, T = look_at_view_transform(dist=dist, elev=elev, azim=azim)
             self.cameras = OpenGLPerspectiveCameras(device=self.device,
                                                     R=R,
                                                     T=T)
             renderer = MeshRenderer(
                 rasterizer=MeshRasterizer(
                     cameras=self.cameras,
                     raster_settings=self.raster_settings),
                 shader=TexturedSoftPhongShader(device=self.device,
                                                cameras=self.cameras,
                                                lights=lights))
             images = renderer(self.meshes,
                               cameras=self.cameras,
                               lights=lights)
             img = images[0, ..., :3].cpu().numpy() * 255
             img = img.astype('uint8')
             img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
             extent = "{:06}".format(index)
             cv2.imwrite(output_path + 'frame-' + extent + '.render.png',
                         img)
             # Save extrinsic
             M = np.zeros((4, 4))
             M[:3, :3] = R.numpy()
             M[:3, 3] = T.numpy()
             M[3, :] = [0, 0, 0, 1]
             with open(output_path + "/frame-" + extent + ".pose.txt",
                       "w") as f:
                 np.savetxt(f, M[0, :], fmt="%.5f", newline=' ')
                 f.write('\n')
                 np.savetxt(f, M[1, :], fmt="%.5f", newline=' ')
                 f.write('\n')
                 np.savetxt(f, M[2, :], fmt="%.5f", newline=' ')
                 f.write('\n')
                 np.savetxt(f, M[3, :], fmt="%.5f", newline=' ')
             index += 1
def load_cameras(moon_view: MoonView):
    device = torch.device('cuda')

    eye = moon_view.eye
    at = moon_view.at
    up = moon_view.up

    R, T = look_at_view_transform(eye=((eye[0], eye[1], eye[2]), ),
                                  at=((at[0], at[1], at[2]), ),
                                  up=((up[0], up[1], up[2]), ))

    return OpenGLPerspectiveCameras(device=device,
                                    R=R,
                                    T=T,
                                    degrees=True,
                                    fov=moon_view.fov,
                                    znear=moon_view.znear,
                                    zfar=moon_view.zfar)
예제 #27
0
def visualize_gif(img, distance_ckpt, elevation_ckpt, azimuth_ckpt,
                  theta_ckpt):
    cameras = OpenGLPerspectiveCameras(device=device, fov=12.0)
    raster_settings = RasterizationSettings(image_size=render_image_size,
                                            blur_radius=0.0,
                                            faces_per_pixel=1,
                                            bin_size=0)
    raster_settings1 = RasterizationSettings(image_size=render_image_size //
                                             down_smaple_rate,
                                             blur_radius=0.0,
                                             faces_per_pixel=1,
                                             bin_size=0)
    rasterizer = MeshRasterizer(cameras=cameras,
                                raster_settings=raster_settings1)
    lights = PointLights(device=device, location=((2.0, 2.0, -2.0), ))
    phong_renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                                  shader=HardPhongShader(device=device,
                                                         lights=lights,
                                                         cameras=cameras))

    distance_ckpt = torch.from_numpy(np.array(distance_ckpt,
                                              dtype=np.float32)).to(device)
    elevation_ckpt = torch.from_numpy(
        np.array(elevation_ckpt, dtype=np.float32)).to(device)
    azimuth_ckpt = torch.from_numpy(np.array(azimuth_ckpt,
                                             dtype=np.float32)).to(device)
    theta_ckpt = torch.from_numpy(np.array(theta_ckpt,
                                           dtype=np.float32)).to(device)
    img_ = get_img(theta_pred, elevation_pred, azimuth_pred, distance_pred,
                   phong_renderer)
    C = camera_position_from_spherical_angles(distance_pred,
                                              elevation_pred,
                                              azimuth_pred,
                                              degrees=False,
                                              device=device)
    get_images = []
    for i in range(10):
        get_image = np.concatenate((img, alpha_merge_imgs(img, img_[i])),
                                   axis=1)
        get_images.append(Image.fromarray(get_image))
    get_images[0].save('optim.gif',
                       save_all=True,
                       append_images=get_images[1::])
예제 #28
0
    def forward(self, z):
        deform_verts = self.layers(z)
        new_meshes = self.src_meshes.offset_verts(deform_verts.view(-1, 3))

        # elev = torch.FloatTensor(z.shape[0]).uniform_(-5, 5)
        r = 60
        azim = torch.FloatTensor(z.shape[0]).uniform_(180 - r, 180 + r)

        elev = torch.FloatTensor(z.shape[0]).uniform_(0, 0)
        # azim = torch.FloatTensor(z.shape[0]).uniform_(160, 200)
        # azim = torch.FloatTensor(z.shape[0]).uniform_(-180, 180)

        R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)
        self.cameras = OpenGLPerspectiveCameras(R=R, T=T, device=device)

        images_predicted = self.renderer_silhouette(new_meshes,
                                                    cameras=self.cameras,
                                                    lights=self.lights)
        predicted_silhouette = images_predicted[..., 3]
        return new_meshes, predicted_silhouette
예제 #29
0
 def render(self, dist, light_location, azim, elev, image_file):
     self.R, self.T = look_at_view_transform(dist=dist,
                                             elev=elev,
                                             azim=azim)
     self.cameras = OpenGLPerspectiveCameras(device=self.device,
                                             R=self.R,
                                             T=self.T)
     self.light_location = light_location
     lights = PointLights(device=self.device,
                          location=[self.light_location])
     # call pytorch3d mesh renderer with shong shader
     renderer = MeshRenderer(
         rasterizer=MeshRasterizer(cameras=self.cameras,
                                   raster_settings=self.raster_settings),
         shader=TexturedSoftPhongShader(device=self.device,
                                        cameras=self.cameras,
                                        lights=lights))
     images = renderer(self.meshes, cameras=self.cameras, lights=lights)
     img = images[0, ..., :3].cpu().numpy() * 255
     img = img.astype('uint8')
     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
     cv2.imwrite(image_file, img)
예제 #30
0
def render_mesh(mesh, R, T, device, img_size=512, silhouette=False):
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)


    if silhouette:
        blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
        raster_settings = RasterizationSettings(
            image_size=img_size, 
            blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma, 
            faces_per_pixel=100, 
        )
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftSilhouetteShader(blend_params=blend_params)
        )
    else:
        raster_settings = RasterizationSettings(
            image_size=img_size, 
            blur_radius=0.0, 
            faces_per_pixel=1, 
        )
        lights = PointLights(device=device, location=[[0.0, 5.0, -10.0]])
        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=cameras, 
                raster_settings=raster_settings
            ),
            shader=SoftPhongShader(
                device=device, 
                cameras=cameras,
                lights=lights
            )
        )

    rendered_images = renderer(mesh, cameras=cameras)
    return rendered_images