Ejemplo n.º 1
0
    def render_shape(self,
                     vertices,
                     transformed_vertices,
                     images=None,
                     lights=None):
        batch_size = vertices.shape[0]
        if lights is None:
            light_positions = torch.tensor([[-0.1, -0.1, 0.2],
                                            [0, 0, 1]])[None, :, :].expand(
                                                batch_size, -1, -1).float()
            light_intensities = torch.ones_like(light_positions).float()
            lights = torch.cat((light_positions, light_intensities),
                               2).to(vertices.device)

        ## rasterizer near 0 far 100. move mesh so minz larger than 0
        transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] + 10

        # Attributes
        face_vertices = util.face_vertices(
            vertices, self.faces.expand(batch_size, -1, -1))
        normals = util.vertex_normals(vertices,
                                      self.faces.expand(batch_size, -1, -1))
        face_normals = util.face_vertices(
            normals, self.faces.expand(batch_size, -1, -1))
        transformed_normals = util.vertex_normals(
            transformed_vertices, self.faces.expand(batch_size, -1, -1))
        transformed_face_normals = util.face_vertices(
            transformed_normals, self.faces.expand(batch_size, -1, -1))
        # render
        attributes = torch.cat([
            self.face_colors.expand(batch_size, -1, -1, -1),
            transformed_face_normals.detach(),
            face_vertices.detach(),
            face_normals.detach()
        ], -1)
        rendering = self.rasterizer(transformed_vertices,
                                    self.faces.expand(batch_size, -1, -1),
                                    attributes)
        # albedo
        albedo_images = rendering[:, :3, :, :]
        # shading
        normal_images = rendering[:, 9:12, :, :].detach()
        if lights.shape[1] == 9:
            shading_images = self.add_SHlight(normal_images, lights)
        else:
            print('directional')
            shading = self.add_directionlight(
                normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),
                lights)

            shading_images = shading.reshape([
                batch_size, lights.shape[1], albedo_images.shape[2],
                albedo_images.shape[3], 3
            ]).permute(0, 1, 4, 2, 3)
            shading_images = shading_images.mean(1)
        images = albedo_images * shading_images

        return images
    def __init__(self, image_size, obj_filename, uv_size=256, config=None):
        super(Renderer, self).__init__()
        self.image_size = image_size
        self.uv_size = uv_size

        verts, faces, aux = load_obj(obj_filename)
        uvcoords = aux.verts_uvs[None, ...]  # (N, V, 2)
        uvfaces = faces.textures_idx[None, ...]  # (N, F, 3)
        faces = faces.verts_idx[None, ...]
        if config is not None:
            _tp = np.load(config.trim_path, allow_pickle=True)
            tp = {}
            for k in _tp.keys():
                tp[k] = torch.LongTensor(_tp[k])
            verts = verts[tp['idx_verts']]
            #uvcoords = uvcoords[:,tp['idx_verts']]
            uvfaces = uvfaces[:,tp['idx_faces']]
            faces = tp['map_verts'][faces[:,tp['idx_faces']]]
        self.rasterizer = Pytorch3dRasterizer(image_size)
        self.uv_rasterizer = Pytorch3dRasterizer(uv_size)

        # faces
        self.register_buffer('faces', faces)
        self.register_buffer('raw_uvcoords', uvcoords)

        # uv coordsw
        uvcoords = torch.cat([uvcoords, uvcoords[:, :, 0:1] * 0. + 1.], -1)  # [bz, ntv, 3]
        uvcoords = uvcoords * 2 - 1
        uvcoords[..., 1] = -uvcoords[..., 1]
        face_uvcoords = util.face_vertices(uvcoords, uvfaces)
        self.register_buffer('uvcoords', uvcoords)
        self.register_buffer('uvfaces', uvfaces)
        self.register_buffer('face_uvcoords', face_uvcoords)

        # shape colors
        colors = torch.tensor([74, 120, 168])[None, None, :].repeat(1, faces.max() + 1, 1).float() / 255.
        face_colors = util.face_vertices(colors, faces)
        self.register_buffer('face_colors', face_colors)

        ## lighting
        pi = np.pi
        constant_factor = torch.tensor(
            [1 / np.sqrt(4 * pi), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), \
             ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))),
             (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), \
             (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), (pi / 4) * (3 / 2) * (np.sqrt(5 / (12 * pi))),
             (pi / 4) * (1 / 2) * (np.sqrt(5 / (4 * pi)))])
        self.register_buffer('constant_factor', constant_factor)
Ejemplo n.º 3
0
    def __init__(self, image_size, obj_filename, uv_size=256):
        super(Renderer, self).__init__()
        self.image_size = image_size
        self.uv_size = uv_size

        verts, faces, aux = load_obj(obj_filename)
        uvcoords = aux.verts_uvs[None, ...]  # (N, V, 2)
        uvfaces = faces.textures_idx[None, ...]  # (N, F, 3)
        faces = faces.verts_idx[None, ...]
        self.rasterizer = Pytorch3dRasterizer(image_size)
        self.uv_rasterizer = Pytorch3dRasterizer(uv_size)

        # faces
        self.register_buffer('faces', faces)
        self.register_buffer('raw_uvcoords', uvcoords)

        # uv coordsw
        uvcoords = torch.cat([uvcoords, uvcoords[:, :, 0:1] * 0. + 1.],
                             -1)  # [bz, ntv, 3]
        uvcoords = uvcoords * 2 - 1
        uvcoords[..., 1] = -uvcoords[..., 1]
        face_uvcoords = util.face_vertices(uvcoords, uvfaces)
        self.register_buffer('uvcoords', uvcoords)
        self.register_buffer('uvfaces', uvfaces)
        self.register_buffer('face_uvcoords', face_uvcoords)

        # shape colors
        colors = torch.tensor([74, 120, 168])[None, None, :].repeat(
            1,
            faces.max() + 1, 1).float() / 255.
        face_colors = util.face_vertices(colors, faces)
        self.register_buffer('face_colors', face_colors)

        ## lighting
        pi = np.pi
        constant_factor = torch.tensor(
            [1 / np.sqrt(4 * pi), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), \
             ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))),
             (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), \
             (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), (pi / 4) * (3 / 2) * (np.sqrt(5 / (12 * pi))),
             (pi / 4) * (1 / 2) * (np.sqrt(5 / (4 * pi)))])
        self.register_buffer('constant_factor', constant_factor)
    def world2uv(self, vertices):
        '''
        sample vertices from world space to uv space
        uv_vertices: [bz, 3, h, w]
        '''
        batch_size = vertices.shape[0]
        face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1)).clone().detach()
        uv_vertices = self.uv_rasterizer(self.uvcoords.expand(batch_size, -1, -1),
                                         self.uvfaces.expand(batch_size, -1, -1), face_vertices)[:, :3]

        return uv_vertices
    def render_normal(self, transformed_vertices, normals):
        '''
        -- rendering normal
        '''
        batch_size = normals.shape[0]

        # Attributes
        attributes = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
        # rasterize
        rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)

        ####
        alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
        normal_images = rendering[:, :3, :, :]
        return normal_images
Ejemplo n.º 6
0
    def forward(self, cam, vertices, images, cam_new):
        full_vertices, N_bd = get_full_verts(vertices)
        t_vertices = util.batch_orth_proj(full_vertices, cam)
        t_vertices[..., 1:] = -t_vertices[..., 1:]
        t_vertices[..., 2] = t_vertices[..., 2] + 10
        t_vertices = image_meshing(t_vertices, N_bd)
        t_vertices[..., :2] = torch.clamp(t_vertices[..., :2], -1, 1)
        t_vertices[:, :, 2] = t_vertices[:, :, 2] - 9
        batch_size = vertices.shape[0]
        ## rasterizer near 0 far 100. move mesh so minz larger than 0
        uvcoords = t_vertices.clone()
        # Attributes
        uvcoords = torch.cat(
            [uvcoords[:, :, :2], uvcoords[:, :, 0:1] * 0. + 1.],
            -1)  # [bz, ntv, 3]
        face_vertices = util.face_vertices(
            uvcoords, self.faces.expand(batch_size, -1, -1))
        # render
        attributes = face_vertices.detach()
        full_vertices, N_bd = get_full_verts(vertices)
        transformed_vertices = util.batch_orth_proj(full_vertices, cam_new)
        transformed_vertices[..., 1:] = -transformed_vertices[..., 1:]
        transformed_vertices[..., 2] = transformed_vertices[..., 2] + 10
        transformed_vertices = image_meshing(transformed_vertices, N_bd)
        transformed_vertices[..., :2] = torch.clamp(
            transformed_vertices[..., :2], -1, 1)
        rendering = self.rasterizer(transformed_vertices,
                                    self.faces.expand(batch_size, -1, -1),
                                    attributes)

        alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()

        # albedo
        uvcoords_images = rendering[:, :3, :, :]
        grid = (uvcoords_images).permute(0, 2, 3, 1)[:, :, :, :2]

        results = F.grid_sample(images, grid, align_corners=False)
        return {'rotate_images': results}
    def forward(self, vertices, transformed_vertices, albedos, lights=None, light_type='point'):
        '''
        lihgts:
            spherical homarnic: [N, 9(shcoeff), 3(rgb)]
        vertices: [N, V, 3], vertices in work space, for calculating normals, then shading
        transformed_vertices: [N, V, 3], range(-1, 1), projected vertices, for rendering
        '''
        batch_size = vertices.shape[0]
        ## rasterizer near 0 far 100. move mesh so minz larger than 0
        transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] + 10

        # Attributes
        face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
        normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1))
        face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
        transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1))
        transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))

        # render
        attributes = torch.cat([self.face_uvcoords.expand(batch_size, -1, -1, -1), transformed_face_normals.detach(),
                                face_vertices.detach(), face_normals.detach()], -1)
        # import ipdb;ipdb.set_trace()
        rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)

        alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()

        # albedo
        uvcoords_images = rendering[:, :3, :, :]
        grid = (uvcoords_images).permute(0, 2, 3, 1)[:, :, :, :2]

        albedo_images = F.grid_sample(albedos, grid, align_corners=False)

        # remove inner mouth region
        transformed_normal_map = rendering[:, 3:6, :, :].detach()
        pos_mask = (transformed_normal_map[:, 2:, :, :] < -0.05).float()

        # shading
        if lights is not None:
            normal_images = rendering[:, 9:12, :, :].detach()
            if lights.shape[1] == 9:
                shading_images = self.add_SHlight(normal_images, lights)
            else:
                if light_type == 'point':
                    vertice_images = rendering[:, 6:9, :, :].detach()
                    shading = self.add_pointlight(vertice_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),
                                                  normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),
                                                  lights)
                    shading_images = shading.reshape(
                        [batch_size, lights.shape[1], albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 1,
                                                                                                                  4, 2,
                                                                                                                  3)
                    shading_images = shading_images.mean(1)
                else:
                    shading = self.add_directionlight(normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),
                                                      lights)
                    shading_images = shading.reshape(
                        [batch_size, lights.shape[1], albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 1,
                                                                                                                  4, 2,
                                                                                                                  3)
                    shading_images = shading_images.mean(1)
            images = albedo_images * shading_images
        else:
            images = albedo_images
            shading_images = images.detach() * 0.

        outputs = {
            'images': images * alpha_images,
            'albedo_images': albedo_images,
            'alpha_images': alpha_images,
            'pos_mask': pos_mask,
            'shading_images': shading_images,
            'grid': grid,
            'normals': normals
        }

        return outputs