Exemple #1
0
    def render_depth(self, vertices, faces, K=None, R=None, t=None, dist_coeffs=None, orig_size=None):

        # fill back
        if self.fill_back:
            faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach()

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
    def render_depth(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)
        elif self.camera_mode == 'projection_by_params':
            vertices = nr.projection_by_params(vertices, self.camera_f,
                                               self.camera_c, self.camera_rt,
                                               self.camera_t, self.dist_coeffs,
                                               self.orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
Exemple #3
0
    def forward(
        self,
        vertices,
        faces,
        Ks=None,
        Rs=None,
        ts=None,
        dist_coeffs=None,
        bbxs=None,
        image_size=64,
        orig_size=64,
        anti_aliasing=False,
        far=100.0,
    ):
        # batch_size = vertices.shape(0)
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1,
            ).to(vertices.device).detach()

        if Ks is None:
            print("K must not None if render depthmap")
            raise Exception()

        if Rs is None:
            Rs = torch.Tensor([
                [1, 0, 0],
                [0, 1, 0],
                [0, 0, 1],
            ]).view((1, 3, 3)).to(vertices.device)

        if ts is None:
            ts = torch.Tensor([0, 0, 0]).view((1, 3)).to(vertices.device)

        if dist_coeffs is None:
            dist_coeffs = torch.Tensor([[0., 0., 0., 0.,
                                         0.]]).to(vertices.device)
        ''' xyz -> uvd '''
        vertices = self.projection(vertices,
                                   Ks,
                                   Rs,
                                   ts,
                                   dist_coeffs,
                                   orig_size,
                                   bbxs=bbxs)

        faces = nr.vertices_to_faces(vertices, faces)
        # rasteriation
        rast = nr.rasterize_depth(faces, image_size, anti_aliasing, far=far)
        # normalize to 0~1
        rend = self.normalize_depth(rast, far=far)
        return rend
    def render_depth(self, cam, vertices):
        bs = cam.shape[0]
        faces = self.faces.repeat(bs, 1, 1)
        # set offset_z for persp proj
        proj_verts = self.proj_func(vertices, cam)
        # flipping the y-axis here to make it align with the image coordinate system!
        proj_verts[:, :, 1] *= -1

        # rasterization
        faces = self.vertices_to_faces(proj_verts, faces)
        images = nr.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
 def render_depth(self, vertices, faces, backgrounds=None):
     vertices = self.transform_vertices(vertices)
     images = neural_renderer.rasterize_depth(
         vertices,
         faces,
         background_color=self.background_color,
         backgrounds=backgrounds,
         image_size=self.image_size,
         near=self.near,
         far=self.far,
         anti_aliasing=self.anti_aliasing,
         draw_backside=self.draw_backside,
     )
     return images
Exemple #6
0
    def forward(self, vertices, faces, textures=None, mode=None):
        '''
        Implementation of forward rendering method
        The old API is preserved for back-compatibility with the Chainer implementation
        '''
        _textures = textures
        if mode not in [None, 'silhouettes', 'depth']:
            raise ValueError("mode should be one of None, 'silhouettes' or 'depth'")
        
        # fill back
        if self.fill_back:
            faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach()
            if _textures is not None:
                _textures = torch.cat((_textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1)
        
        if textures is not None:
            # lighting
            faces_lighting = nr.vertices_to_faces(vertices, faces)
            _textures = nr.lighting(
                faces_lighting,
                _textures,
                self.light_intensity_ambient,
                self.light_intensity_directional,
                self.light_color_ambient,
                self.light_color_directional,
                self.light_direction)

        # projection
        vertices = nr.projection(vertices, self.camera)
        if self.camera.perspective:
            vertices = nr.perspective(vertices, angle=self.camera.viewing_angle)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)

        if mode is None:
            images = nr.rasterize(
                faces, _textures, self.camera.image_size, self.anti_aliasing, self.camera.near, self.camera.far, self.rasterizer_eps,
                self.background_color)
        elif mode == 'silhouettes':
            images = nr.rasterize_silhouettes(faces, self.camera.image_size, self.anti_aliasing)
        elif mode == 'depth':
            images = nr.rasterize_depth(faces, self.camera.image_size, self.anti_aliasing)

        return images
Exemple #7
0
    def render_depth(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction, self.up)

        # perspective transformation
        if self.perspective:
            vertices = nr.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
    def render_depth(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = neural_renderer.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
    def forward(self, mesh, cam_param, img_affine_trans_mat, mesh):
        batch_size = mesh.shape[0]
        face_num = len(mesh['vi'])
        focal, princpt, campos, camrot = cam_param['focal'], cam_param[
            'princpt'], cam_param['campos'], cam_param['camrot']

        # project mesh world -> camera space
        mesh = mesh - campos.view(-1, 1, 3)
        mesh = torch.cat([
            torch.mm(camrot[i], mesh[i].permute(1, 0)).permute(
                1, 0)[None, :, :] for i in range(batch_size)
        ], 0)
        mesh_3d_x = mesh[:, :, 0]
        mesh_3d_y = mesh[:, :, 1]
        mesh_z = mesh[:, :, 2]
        mesh_z = mesh_z + (mesh_z == 0).type('torch.cuda.FloatTensor') * 1e-4

        # project camera -> image space
        mesh_2d_x = (mesh_3d_x / mesh_z * focal[:, 0].view(-1, 1) +
                     princpt[:, 0].view(-1, 1))[:, :, None]
        mesh_2d_y = (mesh_3d_y / mesh_z * focal[:, 1].view(-1, 1) +
                     princpt[:, 1].view(-1, 1))[:, :, None]
        mesh_2d = torch.cat([mesh_2d_x, mesh_2d_y,
                             torch.ones_like(mesh_2d_x)], 2)

        # apply affine transform (crop and resize)
        mesh_2d = torch.bmm(img_affine_trans_mat,
                            mesh_2d.permute(0, 2, 1)).permute(0, 2, 1)

        ##################################
        # neural renderer (for depth map rendering)
        mesh_2d_norm = torch.cat([mesh_2d[:,:,0:1]/cfg.rendered_img_shape[1]*2-1,\
                (cfg.rendered_img_shape[0] - 1 - mesh_2d[:,:,1:2])/cfg.rendered_img_shape[0]*2-1, \
                mesh_z[:,:,None]],2)

        # mesh_2d_v0, v1, v2: batch_size x face_num x 3. coordinates of vertices for each face
        mesh_2d_v0 = torch.cat([
            index_selection_nd(mesh_2d_norm[i], mesh['vi'][:, 0], 0)[None, ...]
            for i in range(batch_size)
        ],
                               dim=0)
        mesh_2d_v1 = torch.cat([
            index_selection_nd(mesh_2d_norm[i], mesh['vi'][:, 1], 0)[None, ...]
            for i in range(batch_size)
        ],
                               dim=0)
        mesh_2d_v2 = torch.cat([
            index_selection_nd(mesh_2d_norm[i], mesh['vi'][:, 2], 0)[None, ...]
            for i in range(batch_size)
        ],
                               dim=0)
        face_vertices = torch.cat([
            mesh_2d_v0[:, :, None, :], mesh_2d_v1[:, :, None, :],
            mesh_2d_v2[:, :, None, :]
        ], 2)

        rendered_depthmap = nr.rasterize_depth(face_vertices,
                                               cfg.rendered_img_shape[0],
                                               False,
                                               near=cfg.depth_min,
                                               far=cfg.depth_max)[:,
                                                                  None, :, :]
        rendered_depthmap[rendered_depthmap == cfg.depth_max] = 0

        return rendered_depthmap