Beispiel #1
0
    def render(self,
               vertices,
               faces,
               textures,
               K=None,
               R=None,
               t=None,
               at=None,
               up=None,
               dist_coeffs=None,
               orig_size=None):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()
            textures = torch.cat((textures, textures.permute(
                (0, 1, 4, 3, 2, 5))),
                                 dim=1)

        # lighting
        faces_lighting = nr.vertices_to_faces(vertices, faces)
        textures = nr.lighting(faces_lighting, textures,
                               self.light_intensity_ambient,
                               self.light_intensity_directional,
                               self.light_color_ambient,
                               self.light_color_directional,
                               self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye, up=up)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        out = nr.rasterize_rgbad(faces, textures, self.image_size,
                                 self.anti_aliasing, self.near, self.far,
                                 self.rasterizer_eps, self.background_color)
        return out['rgb'], out['depth'], out['alpha']
    def render_silhouettes(self, vertices, faces):
        # fill back
        #        if self.fill_back:
        #            faces = torch.cat((faces, faces[:, :, list(reversed(list(range(faces.shape[-1]))))]), dim=1)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images_real = nr.rasterize_silhouettes(faces, self.image_size,
                                               self.anti_aliasing).detach()
        images_fake = self.forward_eff_render(faces, self.image_size,
                                              self.sigma_val, self.sigma_num,
                                              self.sigma_mul)

        return images_fake, images_real
Beispiel #3
0
    def render_depth(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
Beispiel #4
0
 def project(self,
             vertices,
             K=None,
             R=None,
             t=None,
             dist_coeffs=None,
             orig_size=None):
     # viewpoint transformation
     if self.camera_mode == "look_at":
         vertices = nr.look_at(vertices, self.eye)
         # perspective transformation
         if self.perspective:
             vertices = nr.perspective(vertices, angle=self.viewing_angle)
     elif self.camera_mode == "look":
         vertices = nr.look(vertices, self.eye, self.camera_direction)
         # perspective transformation
         if self.perspective:
             vertices = nr.perspective(vertices, angle=self.viewing_angle)
     elif self.camera_mode == "projection":
         if K is None:
             K = self.K
         if R is None:
             R = self.R
         if t is None:
             t = self.t
         if dist_coeffs is None:
             dist_coeffs = self.dist_coeffs
         if orig_size is None:
             orig_size = self.orig_size
         vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)
     return vertices
Beispiel #5
0
    def render_silhouettes(self, vertices, faces, K=None, R=None, t=None, dist_coeffs=None, orig_size=None):

        # fill back
        if self.fill_back:
            faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing)
        return images
Beispiel #6
0
    def look_at_verts(self, poses):
        verts, joints, joints_36m = self.smpl_p.get_all_from_pose(poses)

        verts_lookat = nr.look_at(verts, self.renderer.eye)
        verts_proj = nr.perspective(verts_lookat)
        verts_proj *= 1024/5
        verts_proj += self.image_size/2

        joints_lookat = nr.look_at(joints_36m, self.renderer.eye)
        joints_proj = nr.perspective(joints_lookat)
        joints_proj *= 1024/5
        joints_proj += self.image_size/2
        return verts_proj, joints_proj, verts, joints, joints_36m
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data
            textures = cf.concat((textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)

        # lighting
        faces_lighting = neural_renderer.vertices_to_faces(vertices, faces)
        textures = neural_renderer.lighting(
            faces_lighting,
            textures,
            self.light_intensity_ambient,
            self.light_intensity_directional,
            self.light_color_ambient,
            self.light_color_directional,
            self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = neural_renderer.rasterize(
            faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps,
            self.background_color)
        return images
 def test_case1(self):
     v_in = [1, 2, 10]
     v_out = [np.sqrt(3) / 10, 2 * np.sqrt(3) / 10, 10]
     vertices = np.array(v_in, 'float32')
     vertices = vertices[None, None, :]
     transformed = neural_renderer.perspective(vertices)
     chainer.testing.assert_allclose(transformed.data.flatten(), np.array(v_out, 'float32'))
Beispiel #9
0
    def render_normal(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # normal
        faces_normal = nr.vertices_to_faces(vertices, faces)

        (bs, nf) = faces_normal.shape[:2]
        faces_normal = faces_normal.reshape((bs * nf, 3, 3))
        v10 = faces_normal[:, 0] - faces_normal[:, 1]
        v12 = faces_normal[:, 2] - faces_normal[:, 1]
        normals = cf.normalize(nr.cross(v10, v12))
        normals = normals.reshape((bs, nf, 3))

        textures = normals[:, :, None, None, None, :]
        textures = cf.tile(textures, (1, 1, 2, 2, 2, 1))

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction, self.up)

        # perspective transformation
        if self.perspective:
            vertices = nr.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize(
            faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps,
            self.background_color)
        return images
Beispiel #10
0
    def render_silhouettes(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye,
                                            self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices,
                                                   angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        # ==== TM changes ====
        results_dict = neural_renderer.rasterize_silhouettes(
            faces, self.image_size, self.anti_aliasing)
        masks = results_dict['alpha']
        face_index_map = results_dict['face_index_map']
        weight_map = results_dict['weight_map']
        sampling_weight_map = results_dict['sampling_weight_map']
        # ==== Making another dictionary (just for clarity) ====
        return_dict = dict()
        return_dict['masks'] = masks
        return_dict['face_index_map'] = face_index_map
        return_dict['weight_map'] = weight_map
        return_dict['sampling_weight_map'] = sampling_weight_map
        return return_dict
Beispiel #11
0
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data
            textures = cf.concat(
                (textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)

        # lighting
        faces_lighting = neural_renderer.vertices_to_faces(vertices, faces)
        textures = neural_renderer.lighting(faces_lighting, textures,
                                            self.light_intensity_ambient,
                                            self.light_intensity_directional,
                                            self.light_color_ambient,
                                            self.light_color_directional,
                                            self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye,
                                            self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices,
                                                   angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = neural_renderer.rasterize(faces, textures, self.image_size,
                                           self.anti_aliasing, self.near,
                                           self.far, self.rasterizer_eps,
                                           self.background_color)
        return images
 def test_case1(self):
     v_in = [1, 2, 10]
     v_out = [np.sqrt(3) / 10, 2 * np.sqrt(3) / 10, 10]
     vertices = np.array(v_in, 'float32')
     vertices = vertices[None, None, :]
     transformed = neural_renderer.perspective(vertices)
     chainer.testing.assert_allclose(transformed.data.flatten(),
                                     np.array(v_out, 'float32'))
Beispiel #13
0
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()
            textures = torch.cat((textures, textures.permute(
                (0, 1, 4, 3, 2, 5))),
                                 dim=1)

        # lighting
        faces_lighting = nr.vertices_to_faces(vertices, faces)
        textures = nr.lighting(faces_lighting, textures,
                               self.light_intensity_ambient,
                               self.light_intensity_directional,
                               self.light_color_ambient,
                               self.light_color_directional,
                               self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)
        elif self.camera_mode == 'projection_by_params':
            vertices = nr.projection_by_params(vertices, self.camera_f,
                                               self.camera_c, self.camera_rt,
                                               self.camera_t, self.dist_coeffs,
                                               self.orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize(faces, textures, self.image_size,
                              self.anti_aliasing, self.near, self.far,
                              self.rasterizer_eps, self.background_color)
        return images
Beispiel #14
0
    def project_points(self,
                       vertices,
                       K=None,
                       R=None,
                       t=None,
                       dist_coeffs=None,
                       orig_size=None):
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)
        elif self.camera_mode == 'weak_projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t

            vertices = weak_projection(vertices, K, R, t, orig_size)

            vertices[:, :,
                     0] = (orig_size / 2) * vertices[:, :, 0] + (orig_size / 2)
            vertices[:, :,
                     1] = orig_size - ((orig_size / 2) * vertices[:, :, 1] +
                                       (orig_size / 2))
        return vertices
    def render_color(self, vertices, faces, textures):
        # lighting
        # faces_lighting = nr.vertices_to_faces(vertices, faces)
        # textures = nr.lighting(
        #     faces_lighting,
        #     textures,
        #     self.light_intensity_ambient,
        #     self.light_intensity_directional,
        #     self.light_color_ambient,
        #     self.light_color_directional,
        #     self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            vertices = nr.projection(vertices, self.P, self.dist_coeffs,
                                     self.orig_size)

        # rasterization
        # faces_ = nr.vertices_to_faces(vertices, faces)
        # images_fake, dis = self.forward_eff_render_shading(faces_, textures, self.image_size, self.sigma_val, self.sigma_num, self.sigma_mul, self.gamma_val, self.gamma_num, self.gamma_mul, self.near, self.far)

        faces = torch.cat((faces, faces[:, :, [2, 1, 0]]), dim=1).contiguous()
        textures = torch.cat((textures, textures), dim=1).contiguous()
        faces = nr.vertices_to_faces(vertices, faces)
        images_real = nr.rasterize_rgba(faces, textures, self.image_size,
                                        self.anti_aliasing)
        # import pdb;pdb.set_trace()
        images_real = torch.cat(
            [images_real['rgb'], images_real['alpha'].unsqueeze(1)],
            dim=1)  # .detach()

        # import pdb;pdb.set_trace()

        return images_real
Beispiel #16
0
    def project_points_perspective(self, verts, cams, depth=False):
        verts = self.proj_fn(verts, cams, offset_z=self.offset_z)

        verts = neural_renderer.look_at(verts, self.renderer.eye)
        if self.renderer.perspective:
            verts = neural_renderer.perspective(
                verts, angle=self.renderer.viewing_angle)

        if depth:
            return verts[:, :, :2], verts[:, :, 2]
        else:
            return verts[:, :, :2]
Beispiel #17
0
    def render_silhouettes(self, vertices, faces):
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        if self.perspective:
            vertices = neural_renderer.perspective(vertices)
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1)
        images = neural_renderer.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing)
        return images
Beispiel #18
0
    def transform_vertices(self, vertices, lights=None):
        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.viewpoints)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.viewpoints,
                                            self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices,
                                                   angle=self.viewing_angle)

        return vertices
Beispiel #19
0
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data
            textures = cf.concat(
                (textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)

        # lighting
        faces_lighting = neural_renderer.vertices_to_faces(vertices, faces)
        textures = neural_renderer.lighting(faces_lighting, textures,
                                            self.light_intensity_ambient,
                                            self.light_intensity_directional,
                                            self.light_color_ambient,
                                            self.light_color_directional,
                                            self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye,
                                            self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices,
                                                   angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        # ==== TM changes ====
        results_dict = neural_renderer.rasterize(faces, textures,
                                                 self.image_size,
                                                 self.anti_aliasing, self.near,
                                                 self.far, self.rasterizer_eps,
                                                 self.background_color)
        images = results_dict['rgb']
        face_index_map = results_dict['face_index_map']
        weight_map = results_dict['weight_map']
        sampling_weight_map = results_dict['sampling_weight_map']

        # ==== Making another dictionary (just for clarity) ====
        return_dict = dict()
        return_dict['images'] = images
        return_dict['face_index_map'] = face_index_map
        return_dict['weight_map'] = weight_map
        return_dict['sampling_weight_map'] = sampling_weight_map
        return return_dict
Beispiel #20
0
    def forward(self, vertices, faces, textures=None, mode=None):
        '''
        Implementation of forward rendering method
        The old API is preserved for back-compatibility with the Chainer implementation
        '''
        _textures = textures
        if mode not in [None, 'silhouettes', 'depth']:
            raise ValueError("mode should be one of None, 'silhouettes' or 'depth'")
        
        # fill back
        if self.fill_back:
            faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach()
            if _textures is not None:
                _textures = torch.cat((_textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1)
        
        if textures is not None:
            # lighting
            faces_lighting = nr.vertices_to_faces(vertices, faces)
            _textures = nr.lighting(
                faces_lighting,
                _textures,
                self.light_intensity_ambient,
                self.light_intensity_directional,
                self.light_color_ambient,
                self.light_color_directional,
                self.light_direction)

        # projection
        vertices = nr.projection(vertices, self.camera)
        if self.camera.perspective:
            vertices = nr.perspective(vertices, angle=self.camera.viewing_angle)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)

        if mode is None:
            images = nr.rasterize(
                faces, _textures, self.camera.image_size, self.anti_aliasing, self.camera.near, self.camera.far, self.rasterizer_eps,
                self.background_color)
        elif mode == 'silhouettes':
            images = nr.rasterize_silhouettes(faces, self.camera.image_size, self.anti_aliasing)
        elif mode == 'depth':
            images = nr.rasterize_depth(faces, self.camera.image_size, self.anti_aliasing)

        return images
Beispiel #21
0
    def render(self, vertices, faces, textures):
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        if self.perspective:
            vertices = neural_renderer.perspective(vertices)
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1)
            textures = cf.concat((textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1)
        textures = neural_renderer.lighting(
            faces, textures, self.light_intensity_ambient, self.light_intensity_directional)
        images = neural_renderer.rasterize(
            faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps,
            self.background_color)
        return images
Beispiel #22
0
    def render_depth(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = neural_renderer.rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
    def render_silhouettes(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = neural_renderer.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = neural_renderer.look(vertices, self.eye, self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = neural_renderer.vertices_to_faces(vertices, faces)
        images = rasterize_silhouettes(faces, self.image_size, self.anti_aliasing)
        return images
Beispiel #24
0
    def render_rgb(self,
                   vertices,
                   faces,
                   textures,
                   K=None,
                   R=None,
                   t=None,
                   dist_coeffs=None,
                   orig_size=None,
                   lighting=True):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()
            textures = torch.cat((textures, textures.permute(
                (0, 1, 4, 3, 2, 5))),
                                 dim=1)

        # lighting
        if lighting and textures.size()[-1] == 3:  # 3 channels (RGB)
            faces_lighting = nr.vertices_to_faces(vertices, faces)
            textures = nr.lighting(faces_lighting, textures,
                                   self.light_intensity_ambient,
                                   self.light_intensity_directional,
                                   self.light_color_ambient,
                                   self.light_color_directional,
                                   self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        if isinstance(self.background_color, (tuple, list)):
            background = self.background_color
        else:
            assert (isinstance(self.background_color, (float, int)))
            background = [
                self.background_color for _ in range(textures.size()[-1])
            ]
        images = nr.rasterize(faces, textures, self.image_size,
                              self.anti_aliasing, self.near, self.far,
                              self.rasterizer_eps, background)
        return images
Beispiel #25
0
    def render(self,
               vertices,
               faces,
               textures,
               K=None,
               R=None,
               t=None,
               dist_coeffs=None,
               orig_size=None):
        # fill back
        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == 'projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t
            if dist_coeffs is None:
                dist_coeffs = self.dist_coeffs
            if orig_size is None:
                orig_size = self.orig_size
            vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size)
        elif self.camera_mode == 'weak_projection':
            if K is None:
                K = self.K
            if R is None:
                R = self.R
            if t is None:
                t = self.t

            vertices = weak_projection(vertices, K, R, t, orig_size)
            # 正则化一下,让脸颊到鼻尖的向量和z轴负方向夹角小于90度 (a,b,c)*(0,0,-1)>0 -> c<0
            # 另外将z值全部归到正轴上去
            from MorphableModelFitting.models.face_model import FaceModelBFM
            front_idx, back_idx_1, back_idx_2 = FaceModelBFM().keypoints[
                30], FaceModelBFM().keypoints[0], FaceModelBFM().keypoints[16]
            for i in range(len(vertices)):
                back_z = (vertices[i, back_idx_1, 2] +
                          vertices[i, back_idx_2, 2]) / 2
                if (vertices[i, front_idx, 2] - back_z) > 0:
                    vertices[i, :, 2] = -vertices[i, :, 2]
            vertices[:, :, 2] = vertices[:, :, 2] - torch.min(
                vertices[:, :, 2], dim=1)[0].unsqueeze(1) + 1

        # lighting
        if self.light_mode == "parallel":
            textures = texture_from_point2faces(faces, textures) / 255
            if self.fill_back:
                faces = torch.cat(
                    (faces, faces[:, :,
                                  list(reversed(range(faces.shape[-1])))]),
                    dim=1).detach()
                textures = torch.cat(
                    (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1)
            faces_lighting = nr.vertices_to_faces(vertices, faces)
            textures = nr.lighting(faces_lighting, textures,
                                   self.light_intensity_ambient,
                                   self.light_intensity_directional,
                                   self.light_color_ambient,
                                   self.light_color_directional,
                                   self.light_direction)
        elif self.light_mode == "SH":
            point_buf = torch.from_numpy(self.facemodel.point_buf).long() - 1
            point_norm = compute_point_norm(
                vertices, faces[0][:,
                                   list(reversed(range(faces.shape[-1])))],
                point_buf)

            # texture = texture_from_point2faces(triangles, texture).reshape((batch, -1, 3))
            textures, _ = Illumination_SH(textures, point_norm, self.SH_coeff)
            textures = texture_from_point2faces(faces, textures) / 255
            if self.fill_back:
                faces = torch.cat(
                    (faces, faces[:, :,
                                  list(reversed(range(faces.shape[-1])))]),
                    dim=1).detach()
                textures = torch.cat(
                    (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1)
        else:
            return None

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        out = nr.rasterize_rgbad(faces, textures, self.image_size,
                                 self.anti_aliasing, self.near, self.far,
                                 self.rasterizer_eps, self.background_color)
        return out['rgb'], out['depth'], out['alpha']
Beispiel #26
0
    def render(
        self,
        vertices,
        faces,
        textures,
        K=None,
        R=None,
        t=None,
        dist_coeffs=None,
        orig_size=None,
        detach_renders=False,
    ):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                (faces, faces[:, :,
                              list(reversed(range(faces.shape[-1])))]),
                dim=1).detach()
            textures = torch.cat((textures, textures.permute(
                (0, 1, 4, 3, 2, 5))),
                                 dim=1)

        # lighting
        if not self.no_light:
            faces_lighting = nr.vertices_to_faces(vertices, faces)
            textures = nr.lighting(
                faces_lighting,
                textures,
                self.light_intensity_ambient,
                self.light_intensity_directional,
                self.light_color_ambient,
                self.light_color_directional,
                self.light_direction,
            )

        # viewpoint transformation
        if self.camera_mode == "look_at":
            vertices = nr.look_at(vertices, self.eye)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == "look":
            vertices = nr.look(vertices, self.eye, self.camera_direction)
            # perspective transformation
            if self.perspective:
                vertices = nr.perspective(vertices, angle=self.viewing_angle)
        elif self.camera_mode == "projection":
            vertices = self.project(vertices,
                                    K=K,
                                    R=R,
                                    t=t,
                                    dist_coeffs=dist_coeffs,
                                    orig_size=orig_size)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        if detach_renders:
            faces = faces.detach()
        out = rasterize.rasterize_rgbad(
            faces,
            textures,
            self.image_size,
            self.anti_aliasing,
            self.near,
            self.far,
            self.rasterizer_eps,
            self.background_color,
        )
        return out
 def test_case1(self):
     vertices = torch.from_numpy(np.array([1,2,10], np.float32))
     v_out = np.array([np.sqrt(3) / 10, 2 * np.sqrt(3) / 10, 10], np.float32)
     vertices = vertices[None, None, :]
     transformer = nr.perspective(vertices)
     assert(np.allclose(transformer.data.squeeze().numpy(), v_out))