예제 #1
0
 def project(self,
             vertices,
             K=None,
             R=None,
             t=None,
             dist_coeffs=None,
             orig_size=None):
     # viewpoint transformation
     if self.camera_mode == "look_at":
         vertices = nr.look_at(vertices, self.eye)
         # perspective transformation
         if self.perspective:
             vertices = nr.perspective(vertices, angle=self.viewing_angle)
     elif self.camera_mode == "look":
         vertices = nr.look(vertices, self.eye, self.camera_direction)
         # perspective transformation
         if self.perspective:
             vertices = nr.perspective(vertices, angle=self.viewing_angle)
     elif self.camera_mode == "projection":
         if K is None:
             K = self.K
         if R is None:
             R = self.R
         if t is None:
             t = self.t
         if dist_coeffs is None:
             dist_coeffs = self.dist_coeffs
         if orig_size is None:
             orig_size = self.orig_size
         vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)
     return vertices
예제 #2
0
파일: renderer.py 프로젝트: junzhezhang/cmr
    def render_silhouettes(self, vertices, faces, K=None, R=None, t=None, dist_coeffs=None, orig_size=None):

        # fill back
        if self.fill_back:
            faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing)
        return images
예제 #3
0
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data
            textures = cf.concat(
                (textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)

        # lighting
        faces_lighting = neural_renderer.vertices_to_faces(vertices, faces)
        textures = neural_renderer.lighting(faces_lighting, textures,
                                            self.light_intensity_ambient,
                                            self.light_intensity_directional,
                                            self.light_color_ambient,
                                            self.light_color_directional,
                                            self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye,
                                            self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices,
                                                   angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = neural_renderer.rasterize(faces, textures, self.image_size,
                                           self.anti_aliasing, self.near,
                                           self.far, self.rasterizer_eps,
                                           self.background_color)
        return images
    def render_silhouettes(self, vertices, faces):
        # fill back
        #        if self.fill_back:
        #            faces = torch.cat((faces, faces[:, :, list(reversed(list(range(faces.shape[-1]))))]), dim=1)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images_real = nr.rasterize_silhouettes(faces, self.image_size,
                                               self.anti_aliasing).detach()
        images_fake = self.forward_eff_render(faces, self.image_size,
                                              self.sigma_val, self.sigma_num,
                                              self.sigma_mul)

        return images_fake, images_real
예제 #5
0
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data
            textures = cf.concat((textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)

        # lighting
        faces_lighting = neural_renderer.vertices_to_faces(vertices, faces)
        textures = neural_renderer.lighting(
            faces_lighting,
            textures,
            self.light_intensity_ambient,
            self.light_intensity_directional,
            self.light_color_ambient,
            self.light_color_directional,
            self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = neural_renderer.rasterize(
            faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps,
            self.background_color)
        return images
예제 #6
0
    def render_depth(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
예제 #7
0
    def render_normal(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # normal
        faces_normal = nr.vertices_to_faces(vertices, faces)

        (bs, nf) = faces_normal.shape[:2]
        faces_normal = faces_normal.reshape((bs * nf, 3, 3))
        v10 = faces_normal[:, 0] - faces_normal[:, 1]
        v12 = faces_normal[:, 2] - faces_normal[:, 1]
        normals = cf.normalize(nr.cross(v10, v12))
        normals = normals.reshape((bs, nf, 3))

        textures = normals[:, :, None, None, None, :]
        textures = cf.tile(textures, (1, 1, 2, 2, 2, 1))

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction, self.up)

        # perspective transformation
        if self.perspective:
            vertices = nr.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize(
            faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps,
            self.background_color)
        return images
예제 #8
0
    def render(self,
               vertices,
               faces,
               textures,
               K=None,
               R=None,
               t=None,
               at=None,
               up=None,
               dist_coeffs=None,
               orig_size=None):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()
            textures = torch.cat((textures, textures.permute(
                (0, 1, 4, 3, 2, 5))),
                                 dim=1)

        # lighting
        faces_lighting = nr.vertices_to_faces(vertices, faces)
        textures = nr.lighting(faces_lighting, textures,
                               self.light_intensity_ambient,
                               self.light_intensity_directional,
                               self.light_color_ambient,
                               self.light_color_directional,
                               self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye, up=up)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        out = nr.rasterize_rgbad(faces, textures, self.image_size,
                                 self.anti_aliasing, self.near, self.far,
                                 self.rasterizer_eps, self.background_color)
        return out['rgb'], out['depth'], out['alpha']
예제 #9
0
    def render_silhouettes(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye,
                                            self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices,
                                                   angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        # ==== TM changes ====
        results_dict = neural_renderer.rasterize_silhouettes(
            faces, self.image_size, self.anti_aliasing)
        masks = results_dict['alpha']
        face_index_map = results_dict['face_index_map']
        weight_map = results_dict['weight_map']
        sampling_weight_map = results_dict['sampling_weight_map']
        # ==== Making another dictionary (just for clarity) ====
        return_dict = dict()
        return_dict['masks'] = masks
        return_dict['face_index_map'] = face_index_map
        return_dict['weight_map'] = weight_map
        return_dict['sampling_weight_map'] = sampling_weight_map
        return return_dict
예제 #10
0
    def render_silhouettes(self, vertices, faces):
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        if self.perspective:
            vertices = neural_renderer.perspective(vertices)
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1)
        images = neural_renderer.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing)
        return images
예제 #11
0
    def transform_vertices(self, vertices, lights=None):
        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.viewpoints)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.viewpoints,
                                            self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices,
                                                   angle=self.viewing_angle)

        return vertices
예제 #12
0
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data
            textures = cf.concat(
                (textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)

        # lighting
        faces_lighting = neural_renderer.vertices_to_faces(vertices, faces)
        textures = neural_renderer.lighting(faces_lighting, textures,
                                            self.light_intensity_ambient,
                                            self.light_intensity_directional,
                                            self.light_color_ambient,
                                            self.light_color_directional,
                                            self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye,
                                            self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices,
                                                   angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        # ==== TM changes ====
        results_dict = neural_renderer.rasterize(faces, textures,
                                                 self.image_size,
                                                 self.anti_aliasing, self.near,
                                                 self.far, self.rasterizer_eps,
                                                 self.background_color)
        images = results_dict['rgb']
        face_index_map = results_dict['face_index_map']
        weight_map = results_dict['weight_map']
        sampling_weight_map = results_dict['sampling_weight_map']

        # ==== Making another dictionary (just for clarity) ====
        return_dict = dict()
        return_dict['images'] = images
        return_dict['face_index_map'] = face_index_map
        return_dict['weight_map'] = weight_map
        return_dict['sampling_weight_map'] = sampling_weight_map
        return return_dict
예제 #13
0
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()
            textures = torch.cat((textures, textures.permute(
                (0, 1, 4, 3, 2, 5))),
                                 dim=1)

        # lighting
        faces_lighting = nr.vertices_to_faces(vertices, faces)
        textures = nr.lighting(faces_lighting, textures,
                               self.light_intensity_ambient,
                               self.light_intensity_directional,
                               self.light_color_ambient,
                               self.light_color_directional,
                               self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)
        elif self.camera_mode == 'projection_by_params':
            vertices = nr.projection_by_params(vertices, self.camera_f,
                                               self.camera_c, self.camera_rt,
                                               self.camera_t, self.dist_coeffs,
                                               self.orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize(faces, textures, self.image_size,
                              self.anti_aliasing, self.near, self.far,
                              self.rasterizer_eps, self.background_color)
        return images
예제 #14
0
    def project_points(self,
                       vertices,
                       K=None,
                       R=None,
                       t=None,
                       dist_coeffs=None,
                       orig_size=None):
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)
        elif self.camera_mode == 'weak_projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t

            vertices = weak_projection(vertices, K, R, t, orig_size)

            vertices[:, :,
                     0] = (orig_size / 2) * vertices[:, :, 0] + (orig_size / 2)
            vertices[:, :,
                     1] = orig_size - ((orig_size / 2) * vertices[:, :, 1] +
                                       (orig_size / 2))
        return vertices
예제 #15
0
    def render(self, vertices, faces, textures):
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        if self.perspective:
            vertices = neural_renderer.perspective(vertices)
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1)
            textures = cf.concat((textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)
        textures = neural_renderer.lighting(
            faces, textures, self.light_intensity_ambient, self.light_intensity_directional)
        images = neural_renderer.rasterize(
            faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps,
            self.background_color)
        return images
예제 #16
0
    def render_color(self, vertices, faces, textures):
        # lighting
        # faces_lighting = nr.vertices_to_faces(vertices, faces)
        # textures = nr.lighting(
        #     faces_lighting,
        #     textures,
        #     self.light_intensity_ambient,
        #     self.light_intensity_directional,
        #     self.light_color_ambient,
        #     self.light_color_directional,
        #     self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)

        # rasterization
        # faces_ = nr.vertices_to_faces(vertices, faces)
        # images_fake, dis = self.forward_eff_render_shading(faces_, textures, self.image_size, self.sigma_val, self.sigma_num, self.sigma_mul, self.gamma_val, self.gamma_num, self.gamma_mul, self.near, self.far)

        faces = torch.cat((faces, faces[:, :, [2, 1, 0]]), dim=1).contiguous()
        textures = torch.cat((textures, textures), dim=1).contiguous()
        faces = nr.vertices_to_faces(vertices, faces)
        images_real = nr.rasterize_rgba(faces, textures, self.image_size,
                                        self.anti_aliasing)
        # import pdb;pdb.set_trace()
        images_real = torch.cat(
            [images_real['rgb'], images_real['alpha'].unsqueeze(1)],
            dim=1)  # .detach()

        # import pdb;pdb.set_trace()

        return images_real
예제 #17
0
    def render_depth(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = neural_renderer.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
    def render_silhouettes(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = rasterize_silhouettes(faces, self.image_size, self.anti_aliasing)
        return images
예제 #19
0
    def render_rgb(self,
                   vertices,
                   faces,
                   textures,
                   K=None,
                   R=None,
                   t=None,
                   dist_coeffs=None,
                   orig_size=None,
                   lighting=True):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()
            textures = torch.cat((textures, textures.permute(
                (0, 1, 4, 3, 2, 5))),
                                 dim=1)

        # lighting
        if lighting and textures.size()[-1] == 3:  # 3 channels (RGB)
            faces_lighting = nr.vertices_to_faces(vertices, faces)
            textures = nr.lighting(faces_lighting, textures,
                                   self.light_intensity_ambient,
                                   self.light_intensity_directional,
                                   self.light_color_ambient,
                                   self.light_color_directional,
                                   self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        if isinstance(self.background_color, (tuple, list)):
            background = self.background_color
        else:
            assert (isinstance(self.background_color, (float, int)))
            background = [
                self.background_color for _ in range(textures.size()[-1])
            ]
        images = nr.rasterize(faces, textures, self.image_size,
                              self.anti_aliasing, self.near, self.far,
                              self.rasterizer_eps, background)
        return images
예제 #20
0
    def render(self,
               vertices,
               faces,
               textures,
               K=None,
               R=None,
               t=None,
               dist_coeffs=None,
               orig_size=None):
        # fill back
        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)
        elif self.camera_mode == 'weak_projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t

            vertices = weak_projection(vertices, K, R, t, orig_size)
            # 正则化一下,让脸颊到鼻尖的向量和z轴负方向夹角小于90度 (a,b,c)*(0,0,-1)>0 -> c<0
            # 另外将z值全部归到正轴上去
            from MorphableModelFitting.models.face_model import FaceModelBFM
            front_idx, back_idx_1, back_idx_2 = FaceModelBFM().keypoints[
                30], FaceModelBFM().keypoints[0], FaceModelBFM().keypoints[16]
            for i in range(len(vertices)):
                back_z = (vertices[i, back_idx_1, 2] +
                          vertices[i, back_idx_2, 2]) / 2
                if (vertices[i, front_idx, 2] - back_z) > 0:
                    vertices[i, :, 2] = -vertices[i, :, 2]
            vertices[:, :, 2] = vertices[:, :, 2] - torch.min(
                vertices[:, :, 2], dim=1)[0].unsqueeze(1) + 1

        # lighting
        if self.light_mode == "parallel":
            textures = texture_from_point2faces(faces, textures) / 255
            if self.fill_back:
                faces = torch.cat(
                    (faces, faces[:, :,
                                  list(reversed(range(faces.shape[-1])))]),
                    dim=1).detach()
                textures = torch.cat(
                    (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1)
            faces_lighting = nr.vertices_to_faces(vertices, faces)
            textures = nr.lighting(faces_lighting, textures,
                                   self.light_intensity_ambient,
                                   self.light_intensity_directional,
                                   self.light_color_ambient,
                                   self.light_color_directional,
                                   self.light_direction)
        elif self.light_mode == "SH":
            point_buf = torch.from_numpy(self.facemodel.point_buf).long() - 1
            point_norm = compute_point_norm(
                vertices, faces[0][:,
                                   list(reversed(range(faces.shape[-1])))],
                point_buf)

            # texture = texture_from_point2faces(triangles, texture).reshape((batch, -1, 3))
            textures, _ = Illumination_SH(textures, point_norm, self.SH_coeff)
            textures = texture_from_point2faces(faces, textures) / 255
            if self.fill_back:
                faces = torch.cat(
                    (faces, faces[:, :,
                                  list(reversed(range(faces.shape[-1])))]),
                    dim=1).detach()
                textures = torch.cat(
                    (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1)
        else:
            return None

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        out = nr.rasterize_rgbad(faces, textures, self.image_size,
                                 self.anti_aliasing, self.near, self.far,
                                 self.rasterizer_eps, self.background_color)
        return out['rgb'], out['depth'], out['alpha']
예제 #21
0
    def render(
        self,
        vertices,
        faces,
        textures,
        K=None,
        R=None,
        t=None,
        dist_coeffs=None,
        orig_size=None,
        detach_renders=False,
    ):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()
            textures = torch.cat((textures, textures.permute(
                (0, 1, 4, 3, 2, 5))),
                                 dim=1)

        # lighting
        if not self.no_light:
            faces_lighting = nr.vertices_to_faces(vertices, faces)
            textures = nr.lighting(
                faces_lighting,
                textures,
                self.light_intensity_ambient,
                self.light_intensity_directional,
                self.light_color_ambient,
                self.light_color_directional,
                self.light_direction,
            )

        # viewpoint transformation
        if self.camera_mode == "look_at":
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == "look":
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == "projection":
            vertices = self.project(vertices,
                                    K=K,
                                    R=R,
                                    t=t,
                                    dist_coeffs=dist_coeffs,
                                    orig_size=orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        if detach_renders:
            faces = faces.detach()
        out = rasterize.rasterize_rgbad(
            faces,
            textures,
            self.image_size,
            self.anti_aliasing,
            self.near,
            self.far,
            self.rasterizer_eps,
            self.background_color,
        )
        return out