def look_at_verts(self, poses): verts, joints, joints_36m = self.smpl_p.get_all_from_pose(poses) verts_lookat = nr.look_at(verts, self.renderer.eye) verts_proj = nr.perspective(verts_lookat) verts_proj *= 1024/5 verts_proj += self.image_size/2 joints_lookat = nr.look_at(joints_36m, self.renderer.eye) joints_proj = nr.perspective(joints_lookat) joints_proj *= 1024/5 joints_proj += self.image_size/2 return verts_proj, joints_proj, verts, joints, joints_36m
def render_silhouettes(self, vertices, faces, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): # fill back if self.fill_back: faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def render_depth(self, vertices, faces): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_depth(faces, self.image_size, self.anti_aliasing) return images
def render(self, vertices, faces, textures): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data textures = cf.concat( (textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1) # lighting faces_lighting = neural_renderer.vertices_to_faces(vertices, faces) textures = neural_renderer.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) # rasterization faces = neural_renderer.vertices_to_faces(vertices, faces) images = neural_renderer.rasterize(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return images
def render(self, cam, vertices, textures, faces=None, get_fim=False): if faces is None: bs = cam.shape[0] faces = self.faces.repeat(bs, 1, 1) # lighting is inplace operation textures = textures.clone() # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting( faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # set offset_z for persp proj proj_verts = self.proj_func(vertices, cam) # flipping the y-axis here to make it align with the image coordinate system! proj_verts[:, :, 1] *= -1 # calculate the look_at vertices. vertices = nr.look_at(proj_verts, self.eye) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) fim = None if get_fim: fim = nr.rasterize_face_index_map(faces, image_size=self.image_size, anti_aliasing=False, near=self.near, far=self.far, eps=self.rasterizer_eps) return images, fim
def render_normal(self, vertices, faces): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data # normal faces_normal = nr.vertices_to_faces(vertices, faces) (bs, nf) = faces_normal.shape[:2] faces_normal = faces_normal.reshape((bs * nf, 3, 3)) v10 = faces_normal[:, 0] - faces_normal[:, 1] v12 = faces_normal[:, 2] - faces_normal[:, 1] normals = cf.normalize(nr.cross(v10, v12)) normals = normals.reshape((bs, nf, 3)) textures = normals[:, :, None, None, None, :] textures = cf.tile(textures, (1, 1, 2, 2, 2, 1)) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction, self.up) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize( faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return images
def render(self, vertices, texture=None, faces=None): if faces is None: bs = vertices.shape[0] faces = self.faces.repeat(bs, 1, 1) if texture is None: texture = self.debug_textures().to(vertices.device) texture = texture.unsqueeze(0).repeat(bs, 1, 1, 1, 1, 1) # lighting is inplace operation texture = texture.clone() # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) texture = nr.lighting( faces_lighting, texture, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # set offset_z for persp proj #proj_verts = self.proj_func(vertices, cam) # flipping the y-axis here to make it align with the image coordinate system! #proj_verts[:, :, 1] *= -1 # calculate the look_at vertices. vertices = nr.look_at(vertices, self.eye) # rasterization faces = nr.vertices_to_faces(vertices, faces) image = nr.rasterize(faces, texture, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return image
def render(self, vertices, faces, textures, K=None, R=None, t=None, at=None, up=None, dist_coeffs=None, orig_size=None): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye, up=up) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) out = nr.rasterize_rgbad(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return out['rgb'], out['depth'], out['alpha']
def project(self, vertices, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): # viewpoint transformation if self.camera_mode == "look_at": vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == "look": vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == "projection": if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) return vertices
def render_silhouettes(self, vertices, faces): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) # rasterization faces = neural_renderer.vertices_to_faces(vertices, faces) # ==== TM changes ==== results_dict = neural_renderer.rasterize_silhouettes( faces, self.image_size, self.anti_aliasing) masks = results_dict['alpha'] face_index_map = results_dict['face_index_map'] weight_map = results_dict['weight_map'] sampling_weight_map = results_dict['sampling_weight_map'] # ==== Making another dictionary (just for clarity) ==== return_dict = dict() return_dict['masks'] = masks return_dict['face_index_map'] = face_index_map return_dict['weight_map'] = weight_map return_dict['sampling_weight_map'] = sampling_weight_map return return_dict
def render_fim_wim(self, cam, vertices, smpl_faces=True): if smpl_faces: faces = self.smpl_faces else: faces = self.obj_faces bs = cam.shape[0] faces = faces.repeat(bs, 1, 1) # set offset_z for persp proj proj_verts = self.proj_func(vertices, cam) # flipping the y-axis here to make it align with the image coordinate system! proj_verts[:, :, 1] *= -1 # calculate the look_at vertices. vertices = nr.look_at(proj_verts, self.eye) # rasterization faces = nr.vertices_to_faces(vertices, faces) fim, wim = nr.rasterize_face_index_map_and_weight_map( faces, self.image_size, False) f2pts = faces[:, :, :, 0:2] f2pts[:, :, :, 1] *= -1 return f2pts, fim, wim
def render(self, vertices, faces, textures): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data textures = cf.concat((textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1) # lighting faces_lighting = neural_renderer.vertices_to_faces(vertices, faces) textures = neural_renderer.lighting( faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) # rasterization faces = neural_renderer.vertices_to_faces(vertices, faces) images = neural_renderer.rasterize( faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return images
def render_silhouettes(self, vertices, faces): # fill back # if self.fill_back: # faces = torch.cat((faces, faces[:, :, list(reversed(list(range(faces.shape[-1]))))]), dim=1) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images_real = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing).detach() images_fake = self.forward_eff_render(faces, self.image_size, self.sigma_val, self.sigma_num, self.sigma_mul) return images_fake, images_real
def render_fim(self, vertices, faces=None): if faces is None: bs = vertices.shape[0] faces = self.faces.repeat(bs, 1, 1) # calculate the look_at vertices. vertices = nr.look_at(vertices, self.eye) # rasterization faces = nr.vertices_to_faces(vertices, faces) fim = nr.rasterize_face_index_map(faces, self.image_size, False) return fim
def project_points_perspective(self, verts, cams, depth=False): verts = self.proj_fn(verts, cams, offset_z=self.offset_z) verts = neural_renderer.look_at(verts, self.renderer.eye) if self.renderer.perspective: verts = neural_renderer.perspective( verts, angle=self.renderer.viewing_angle) if depth: return verts[:, :, :2], verts[:, :, 2] else: return verts[:, :, :2]
def render_silhouettes(self, vertices, faces): if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) if self.perspective: vertices = neural_renderer.perspective(vertices) faces = neural_renderer.vertices_to_faces(vertices, faces) if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1) images = neural_renderer.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def transform_vertices(self, vertices, lights=None): # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.viewpoints) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.viewpoints, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) return vertices
def render(self, vertices, faces, textures): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data textures = cf.concat( (textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1) # lighting faces_lighting = neural_renderer.vertices_to_faces(vertices, faces) textures = neural_renderer.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) # rasterization faces = neural_renderer.vertices_to_faces(vertices, faces) # ==== TM changes ==== results_dict = neural_renderer.rasterize(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) images = results_dict['rgb'] face_index_map = results_dict['face_index_map'] weight_map = results_dict['weight_map'] sampling_weight_map = results_dict['sampling_weight_map'] # ==== Making another dictionary (just for clarity) ==== return_dict = dict() return_dict['images'] = images return_dict['face_index_map'] = face_index_map return_dict['weight_map'] = weight_map return_dict['sampling_weight_map'] = sampling_weight_map return return_dict
def render_silhouettes(self, cam, vertices, faces=None): if faces is None: bs = cam.shape[0] faces = self.faces.repeat(bs, 1, 1) # set offset_z for persp proj proj_verts = self.proj_func(vertices, cam) # flipping the y-axis here to make it align with the image coordinate system! proj_verts[:, :, 1] *= -1 # calculate the look_at vertices. vertices = nr.look_at(proj_verts, self.eye) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def test_case1(self): eyes = [ [1, 0, 1], [0, 0, -10], [-1, 1, 0], ] answers = [ [-np.sqrt(2) / 2, 0, np.sqrt(2) / 2], [1, 0, 10], [0, np.sqrt(2) / 2, 3. / 2. * np.sqrt(2)], ] vertices = np.array([1, 0, 0], 'float32') vertices = vertices[None, None, :] for e, a in zip(eyes, answers): eye = np.array(e, 'float32') transformed = neural_renderer.look_at(vertices, eye) chainer.testing.assert_allclose(transformed.data.flatten(), np.array(a))
def test_case1(self): viewpointss = [ [1, 0, 1], [0, 0, -10], [-1, 1, 0], ] answers = [ [-np.sqrt(2) / 2, 0, np.sqrt(2) / 2], [1, 0, 10], [0, np.sqrt(2) / 2, 3. / 2. * np.sqrt(2)], ] vertices = np.array([1, 0, 0], 'float32') vertices = vertices[None, None, :] for e, a in zip(viewpointss, answers): viewpoints = np.array(e, 'float32') transformed = neural_renderer.look_at(vertices, viewpoints) chainer.testing.assert_allclose(transformed.data.flatten(), np.array(a))
def render(self, vertices, faces, textures): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) elif self.camera_mode == 'projection_by_params': vertices = nr.projection_by_params(vertices, self.camera_f, self.camera_c, self.camera_rt, self.camera_t, self.dist_coeffs, self.orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return images
def project_points(self, vertices, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) elif self.camera_mode == 'weak_projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t vertices = weak_projection(vertices, K, R, t, orig_size) vertices[:, :, 0] = (orig_size / 2) * vertices[:, :, 0] + (orig_size / 2) vertices[:, :, 1] = orig_size - ((orig_size / 2) * vertices[:, :, 1] + (orig_size / 2)) return vertices
def test_case1(self): eyes = [ [1, 0, 1], [0, 0, -10], [-1, 1, 0], ] answers = [ [-np.sqrt(2) / 2, 0, np.sqrt(2) / 2], [1, 0, 10], [0, np.sqrt(2) / 2, 3. / 2. * np.sqrt(2)], ] vertices = torch.from_numpy(np.array([1, 0, 0], np.float32)) vertices = vertices[None, None, :] for e, a in zip(eyes, answers): eye = np.array(e, np.float32) transformed = nr.look_at(vertices, eye) assert (np.allclose(transformed.data.squeeze().numpy(), np.array(a)))
def render(self, vertices, faces, textures): if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) if self.perspective: vertices = neural_renderer.perspective(vertices) faces = neural_renderer.vertices_to_faces(vertices, faces) if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1) textures = cf.concat((textures, textures.transpose((0, 1, 4, 3, 2, 5))), axis=1) textures = neural_renderer.lighting( faces, textures, self.light_intensity_ambient, self.light_intensity_directional) images = neural_renderer.rasterize( faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return images
def render_color(self, vertices, faces, textures): # lighting # faces_lighting = nr.vertices_to_faces(vertices, faces) # textures = nr.lighting( # faces_lighting, # textures, # self.light_intensity_ambient, # self.light_intensity_directional, # self.light_color_ambient, # self.light_color_directional, # self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) # rasterization # faces_ = nr.vertices_to_faces(vertices, faces) # images_fake, dis = self.forward_eff_render_shading(faces_, textures, self.image_size, self.sigma_val, self.sigma_num, self.sigma_mul, self.gamma_val, self.gamma_num, self.gamma_mul, self.near, self.far) faces = torch.cat((faces, faces[:, :, [2, 1, 0]]), dim=1).contiguous() textures = torch.cat((textures, textures), dim=1).contiguous() faces = nr.vertices_to_faces(vertices, faces) images_real = nr.rasterize_rgba(faces, textures, self.image_size, self.anti_aliasing) # import pdb;pdb.set_trace() images_real = torch.cat( [images_real['rgb'], images_real['alpha'].unsqueeze(1)], dim=1) # .detach() # import pdb;pdb.set_trace() return images_real
def render_depth(self, vertices, faces): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) # rasterization faces = neural_renderer.vertices_to_faces(vertices, faces) images = neural_renderer.rasterize_depth(faces, self.image_size, self.anti_aliasing) return images
def render_silhouettes(self, vertices, faces): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) # rasterization faces = neural_renderer.vertices_to_faces(vertices, faces) images = rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def __call__(self, vertices, faces): images = [] for view in self.views: xp = cuda.get_array_module(vertices.data) transformed_vertices = look_at( vertices, get_points_from_angles(*view), up=xp.array([0.0, 1.0, 0.0], dtype=xp.float32) ) transformed_vertices.camera_mode = '' images.append( F.expand_dims( self.renderer.render_silhouettes( transformed_vertices, faces), axis=1 ) ) return F.concat(images, axis=1)
def render(self, vertices, faces, textures, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): # fill back # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) elif self.camera_mode == 'weak_projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t vertices = weak_projection(vertices, K, R, t, orig_size) # 正则化一下,让脸颊到鼻尖的向量和z轴负方向夹角小于90度 (a,b,c)*(0,0,-1)>0 -> c<0 # 另外将z值全部归到正轴上去 from MorphableModelFitting.models.face_model import FaceModelBFM front_idx, back_idx_1, back_idx_2 = FaceModelBFM().keypoints[ 30], FaceModelBFM().keypoints[0], FaceModelBFM().keypoints[16] for i in range(len(vertices)): back_z = (vertices[i, back_idx_1, 2] + vertices[i, back_idx_2, 2]) / 2 if (vertices[i, front_idx, 2] - back_z) > 0: vertices[i, :, 2] = -vertices[i, :, 2] vertices[:, :, 2] = vertices[:, :, 2] - torch.min( vertices[:, :, 2], dim=1)[0].unsqueeze(1) + 1 # lighting if self.light_mode == "parallel": textures = texture_from_point2faces(faces, textures) / 255 if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat( (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1) faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) elif self.light_mode == "SH": point_buf = torch.from_numpy(self.facemodel.point_buf).long() - 1 point_norm = compute_point_norm( vertices, faces[0][:, list(reversed(range(faces.shape[-1])))], point_buf) # texture = texture_from_point2faces(triangles, texture).reshape((batch, -1, 3)) textures, _ = Illumination_SH(textures, point_norm, self.SH_coeff) textures = texture_from_point2faces(faces, textures) / 255 if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat( (textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1) else: return None # rasterization faces = nr.vertices_to_faces(vertices, faces) out = nr.rasterize_rgbad(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color) return out['rgb'], out['depth'], out['alpha']
def render( self, vertices, faces, textures, K=None, R=None, t=None, dist_coeffs=None, orig_size=None, detach_renders=False, ): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting if not self.no_light: faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting( faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction, ) # viewpoint transformation if self.camera_mode == "look_at": vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == "look": vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == "projection": vertices = self.project(vertices, K=K, R=R, t=t, dist_coeffs=dist_coeffs, orig_size=orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) if detach_renders: faces = faces.detach() out = rasterize.rasterize_rgbad( faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, self.background_color, ) return out
def render_rgb(self, vertices, faces, textures, K=None, R=None, t=None, dist_coeffs=None, orig_size=None, lighting=True): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() textures = torch.cat((textures, textures.permute( (0, 1, 4, 3, 2, 5))), dim=1) # lighting if lighting and textures.size()[-1] == 3: # 3 channels (RGB) faces_lighting = nr.vertices_to_faces(vertices, faces) textures = nr.lighting(faces_lighting, textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) if isinstance(self.background_color, (tuple, list)): background = self.background_color else: assert (isinstance(self.background_color, (float, int))) background = [ self.background_color for _ in range(textures.size()[-1]) ] images = nr.rasterize(faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps, background) return images