def render(self, vertices, faces, textures, K=None, R=None, t=None, at=None, up=None, dist_coeffs=None, orig_size=None): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye, up=up) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) out = nr.rasterize_rgbad(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return out['rgb'], out['depth'], out['alpha']
def render_fov(self, mesh, R, t, background_color=[0, 0, 0]): vertices, faces, textures = mesh if isinstance(t, np.ndarray): t = torch.cuda.FloatTensor(t) if isinstance(R, np.ndarray): R = torch.cuda.FloatTensor(R) t = t.view(-1, 1, 3) R = R.view(-1, 3, 3) assert len(vertices.shape) == 3 batch_size = max(t.shape[0], vertices.shape[0]) # expand dimension vertices = vertices.expand(batch_size, *vertices.shape[1:]) faces = faces.expand(batch_size, *faces.shape[1:]) textures = textures.expand(batch_size, *textures.shape[1:]) # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation vertices = nr.projection_fov(vertices, self.fov, R, t) # rasterization faces = nr.vertices_to_faces(vertices, faces) out = nr.rasterize_rgbad(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, background_color=background_color) return out['rgb'], out['depth'], out['alpha']
def render(self, vertices, faces, textures, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): # fill back # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) elif self.camera_mode == 'weak_projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t vertices = weak_projection(vertices, K, R, t, orig_size) # 正则化一下,让脸颊到鼻尖的向量和z轴负方向夹角小于90度 (a,b,c)*(0,0,-1)>0 -> c<0 # 另外将z值全部归到正轴上去 from MorphableModelFitting.models.face_model import FaceModelBFM front_idx, back_idx_1, back_idx_2 = FaceModelBFM().keypoints[ 30], FaceModelBFM().keypoints[0], FaceModelBFM().keypoints[16] for i in range(len(vertices)): back_z = (vertices[i, back_idx_1, 2] + vertices[i, back_idx_2, 2]) / 2 if (vertices[i, front_idx, 2] - back_z) > 0: vertices[i, :, 2] = -vertices[i, :, 2] vertices[:, :, 2] = vertices[:, :, 2] - torch.min( vertices[:, :, 2], dim=1)[0].unsqueeze(1) + 1 # lighting if self.light_mode == "parallel": textures = texture_from_point2faces(faces, textures) / 255 if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat( (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1) faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) elif self.light_mode == "SH": point_buf = torch.from_numpy(self.facemodel.point_buf).long() - 1 point_norm = compute_point_norm( vertices, faces[0][:, list(reversed(range(faces.shape[-1])))], point_buf) # texture = texture_from_point2faces(triangles, texture).reshape((batch, -1, 3)) textures, _ = Illumination_SH(textures, point_norm, self.SH_coeff) textures = texture_from_point2faces(faces, textures) / 255 if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat( (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1) else: return None # rasterization faces = nr.vertices_to_faces(vertices, faces) out = nr.rasterize_rgbad(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return out['rgb'], out['depth'], out['alpha']