def render_depth(self, vertices, faces): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_depth(faces, self.image_size, self.anti_aliasing) return images
def project(self, vertices, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): # viewpoint transformation if self.camera_mode == "look_at": vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == "look": vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == "projection": if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) return vertices
def render_silhouettes(self, vertices, faces, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): # fill back if self.fill_back: faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def render_silhouettes(self, vertices, faces): # fill back # if self.fill_back: # faces = torch.cat((faces, faces[:, :, list(reversed(list(range(faces.shape[-1]))))]), dim=1) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images_real = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing).detach() images_fake = self.forward_eff_render(faces, self.image_size, self.sigma_val, self.sigma_num, self.sigma_mul) return images_fake, images_real
def render(self, vertices, faces, textures, K=None, R=None, t=None, at=None, up=None, dist_coeffs=None, orig_size=None): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye, up=up) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) out = nr.rasterize_rgbad(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return out['rgb'], out['depth'], out['alpha']
def render(self, vertices, faces, textures): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) elif self.camera_mode == 'projection_by_params': vertices = nr.projection_by_params(vertices, self.camera_f, self.camera_c, self.camera_rt, self.camera_t, self.dist_coeffs, self.orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return images
def project_points(self, vertices, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) elif self.camera_mode == 'weak_projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t vertices = weak_projection(vertices, K, R, t, orig_size) vertices[:, :, 0] = (orig_size / 2) * vertices[:, :, 0] + (orig_size / 2) vertices[:, :, 1] = orig_size - ((orig_size / 2) * vertices[:, :, 1] + (orig_size / 2)) return vertices
def forward(self, vertices, faces, textures=None, mode=None): ''' Implementation of forward rendering method The old API is preserved for back-compatibility with the Chainer implementation ''' _textures = textures if mode not in [None, 'silhouettes', 'depth']: raise ValueError("mode should be one of None, 'silhouettes' or 'depth'") # fill back if self.fill_back: faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() if _textures is not None: _textures = torch.cat((_textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1) if textures is not None: # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) _textures = nr.lighting( faces_lighting, _textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # projection vertices = nr.projection(vertices, self.camera) if self.camera.perspective: vertices = nr.perspective(vertices, angle=self.camera.viewing_angle) # rasterization faces = nr.vertices_to_faces(vertices, faces) if mode is None: images = nr.rasterize( faces, _textures, self.camera.image_size, self.anti_aliasing, self.camera.near, self.camera.far, self.rasterizer_eps, self.background_color) elif mode == 'silhouettes': images = nr.rasterize_silhouettes(faces, self.camera.image_size, self.anti_aliasing) elif mode == 'depth': images = nr.rasterize_depth(faces, self.camera.image_size, self.anti_aliasing) return images
def render_color(self, vertices, faces, textures): # lighting # faces_lighting = nr.vertices_to_faces(vertices, faces) # textures = nr.lighting( # faces_lighting, # textures, # self.light_intensity_ambient, # self.light_intensity_directional, # self.light_color_ambient, # self.light_color_directional, # self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) # rasterization # faces_ = nr.vertices_to_faces(vertices, faces) # images_fake, dis = self.forward_eff_render_shading(faces_, textures, self.image_size, self.sigma_val, self.sigma_num, self.sigma_mul, self.gamma_val, self.gamma_num, self.gamma_mul, self.near, self.far) faces = torch.cat((faces, faces[:, :, [2, 1, 0]]), dim=1).contiguous() textures = torch.cat((textures, textures), dim=1).contiguous() faces = nr.vertices_to_faces(vertices, faces) images_real = nr.rasterize_rgba(faces, textures, self.image_size, self.anti_aliasing) # import pdb;pdb.set_trace() images_real = torch.cat( [images_real['rgb'], images_real['alpha'].unsqueeze(1)], dim=1) # .detach() # import pdb;pdb.set_trace() return images_real
def project_bbox(vertices, renderer, parts_labels=None, bbox_expansion=0.0): """ Computes the 2D bounding box of the vertices after projected to the image plane. TODO(@jason): Batch these operations. Args: vertices (V x 3). renderer: Renderer used to get camera parameters. parts_labels (dict): Dictionary mapping a part name to the corresponding vertex indices. bbox_expansion (float): Amount to expand the bounding boxes. Returns: If a part_label dict is given, returns a dictionary mapping part name to bbox. Else, returns the projected 2D bounding box. """ proj = nr.projection( (vertices * torch.tensor([[1, -1, 1.0]]).cuda()).unsqueeze(0), K=renderer.K, R=renderer.R, t=renderer.t, dist_coeffs=renderer.dist_coeffs, orig_size=1, ) proj = proj.squeeze(0)[:, :2] if parts_labels is None: parts_labels = {"": torch.arange(len(vertices)).to(vertices.device)} bbox_parts = {} for part, inds in parts_labels.items(): bbox = torch.cat((proj[inds].min(0).values, proj[inds].max(0).values), dim=0) if bbox_expansion: center = (bbox[:2] + bbox[2:]) / 2 b = (bbox[2:] - bbox[:2]) / 2 * (1 + bbox_expansion) bbox = torch.cat((center - b, center + b)) bbox_parts[part] = bbox if "" in parts_labels: return bbox_parts[""] return bbox_parts
def compute_offscreen_loss(self, verts): """ Computes loss for offscreen penalty. This is used to prevent the degenerate solution of moving the object offscreen to minimize the chamfer loss. """ # On-screen means xy between [-1, 1] and far > depth > 0 proj = nr.projection( verts, self.renderer.K, self.renderer.R, self.renderer.t, self.renderer.dist_coeffs, orig_size=1, ) xy, z = proj[:, :, :2], proj[:, :, 2:] zeros = torch.zeros_like(z) lower_right = torch.max(xy - 1, zeros).sum(dim=(1, 2)) # Amount greater than 1 upper_left = torch.max(-1 - xy, zeros).sum(dim=(1, 2)) # Amount less than -1 behind = torch.max(-z, zeros).sum(dim=(1, 2)) too_far = torch.max(z - self.renderer.far, zeros).sum(dim=(1, 2)) return lower_right + upper_left + behind + too_far
def render(self, vertices, faces, textures, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): # fill back # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) elif self.camera_mode == 'weak_projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t vertices = weak_projection(vertices, K, R, t, orig_size) # 正则化一下,让脸颊到鼻尖的向量和z轴负方向夹角小于90度 (a,b,c)*(0,0,-1)>0 -> c<0 # 另外将z值全部归到正轴上去 from MorphableModelFitting.models.face_model import FaceModelBFM front_idx, back_idx_1, back_idx_2 = FaceModelBFM().keypoints[ 30], FaceModelBFM().keypoints[0], FaceModelBFM().keypoints[16] for i in range(len(vertices)): back_z = (vertices[i, back_idx_1, 2] + vertices[i, back_idx_2, 2]) / 2 if (vertices[i, front_idx, 2] - back_z) > 0: vertices[i, :, 2] = -vertices[i, :, 2] vertices[:, :, 2] = vertices[:, :, 2] - torch.min( vertices[:, :, 2], dim=1)[0].unsqueeze(1) + 1 # lighting if self.light_mode == "parallel": textures = texture_from_point2faces(faces, textures) / 255 if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat( (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1) faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) elif self.light_mode == "SH": point_buf = torch.from_numpy(self.facemodel.point_buf).long() - 1 point_norm = compute_point_norm( vertices, faces[0][:, list(reversed(range(faces.shape[-1])))], point_buf) # texture = texture_from_point2faces(triangles, texture).reshape((batch, -1, 3)) textures, _ = Illumination_SH(textures, point_norm, self.SH_coeff) textures = texture_from_point2faces(faces, textures) / 255 if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat( (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1) else: return None # rasterization faces = nr.vertices_to_faces(vertices, faces) out = nr.rasterize_rgbad(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return out['rgb'], out['depth'], out['alpha']
def render_rgb(self, vertices, faces, textures, K=None, R=None, t=None, dist_coeffs=None, orig_size=None, lighting=True): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting if lighting and textures.size()[-1] == 3: # 3 channels (RGB) faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) if isinstance(self.background_color, (tuple, list)): background = self.background_color else: assert (isinstance(self.background_color, (float, int))) background = [ self.background_color for _ in range(textures.size()[-1]) ] images = nr.rasterize(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, background) return images