def render_silhouettes(self, vertices, faces): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) # rasterization faces = neural_renderer.vertices_to_faces(vertices, faces) # ==== TM changes ==== results_dict = neural_renderer.rasterize_silhouettes( faces, self.image_size, self.anti_aliasing) masks = results_dict['alpha'] face_index_map = results_dict['face_index_map'] weight_map = results_dict['weight_map'] sampling_weight_map = results_dict['sampling_weight_map'] # ==== Making another dictionary (just for clarity) ==== return_dict = dict() return_dict['masks'] = masks return_dict['face_index_map'] = face_index_map return_dict['weight_map'] = weight_map return_dict['sampling_weight_map'] = sampling_weight_map return return_dict
def render_silhouettes(self, vertices, faces): # fill back # if self.fill_back: # faces = torch.cat((faces, faces[:, :, list(reversed(list(range(faces.shape[-1]))))]), dim=1) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images_real = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing).detach() images_fake = self.forward_eff_render(faces, self.image_size, self.sigma_val, self.sigma_num, self.sigma_mul) return images_fake, images_real
def render_silhouettes(self, vertices, faces, K=None, R=None, t=None, dist_coeffs=None, orig_size=None): # fill back if self.fill_back: faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': if K is None: K = self.K if R is None: R = self.R if t is None: t = self.t if dist_coeffs is None: dist_coeffs = self.dist_coeffs if orig_size is None: orig_size = self.orig_size vertices = nr.projection(vertices, K, R, t, dist_coeffs, orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def render_silhouettes(self, vertices, faces): # fill back if self.fill_back: faces = torch.cat( (faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1) # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) elif self.camera_mode == 'projection': vertices = nr.projection(vertices, self.P, self.dist_coeffs, self.orig_size) elif self.camera_mode == 'projection_by_params': vertices = nr.projection_by_params(vertices, self.camera_f, self.camera_c, self.camera_rt, self.camera_t, self.dist_coeffs, self.orig_size) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def render_silhouettes(self, vertices, faces): if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) if self.perspective: vertices = neural_renderer.perspective(vertices) faces = neural_renderer.vertices_to_faces(vertices, faces) if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1) images = neural_renderer.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def render_silhouettes(self, vertices, faces, backgrounds=None): vertices = self.transform_vertices(vertices) images = neural_renderer.rasterize_silhouettes( vertices, faces, background_color=self.background_color, backgrounds=backgrounds, image_size=self.image_size, near=self.near, far=self.far, anti_aliasing=self.anti_aliasing, draw_backside=self.draw_backside, ) return images
def stest_backward_case1(self): vertices = [ [0.1, 0.1, 1.], [-0.1, 0.1, 1.], [-0.1, -0.1, 1.], [0.1, -0.1, 1.], ] faces = [[0, 1, 2], [0, 2, 3]] # vertices = [ # [0.3, 0.4, 1.], # [-0.3, 0.6, 1.], # [-0.3, 0.62, 1.], # ] # faces = [[0, 1, 2]] ref = neural_renderer.imread('./tests/data/gradient.png') ref = 1 - ref ref = ref[:, :, 0] ref = chainer.cuda.to_gpu(ref) vertices = np.array(vertices, 'float32') faces = np.array(faces, 'int32') vertices, faces, ref = neural_renderer.to_gpu((vertices, faces, ref)) vertices = Parameter(vertices) optimizer = chainer.optimizers.Adam(0.003, beta1=0.5) optimizer.setup(vertices) for i in range(350): images = neural_renderer.rasterize_silhouettes( vertices()[None, :, :], faces, image_size=256, anti_aliasing=False) image = images[0] iou = cf.sum(image * ref) / cf.sum(image + ref - image * ref) iou = 1 - iou loss = iou optimizer.target.cleargrads() loss.backward() optimizer.update() # imageio.toimage(image.data.get()).save('../tmp/t%d.png' % i) # print i, loss.data, iou.data if float(iou.data) < 0.01: return raise Exception
def render_silhouettes(self, cam, vertices, faces=None): if faces is None: bs = cam.shape[0] faces = self.faces.repeat(bs, 1, 1) # set offset_z for persp proj proj_verts = self.proj_func(vertices, cam) # flipping the y-axis here to make it align with the image coordinate system! proj_verts[:, :, 1] *= -1 # calculate the look_at vertices. vertices = nr.look_at(proj_verts, self.eye) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def forward(self, vertices, faces, textures=None, mode=None): ''' Implementation of forward rendering method The old API is preserved for back-compatibility with the Chainer implementation ''' _textures = textures if mode not in [None, 'silhouettes', 'depth']: raise ValueError("mode should be one of None, 'silhouettes' or 'depth'") # fill back if self.fill_back: faces = torch.cat((faces, faces[:, :, list(reversed(range(faces.shape[-1])))]), dim=1).detach() if _textures is not None: _textures = torch.cat((_textures, textures.permute((0, 1, 4, 3, 2, 5))), dim=1) if textures is not None: # lighting faces_lighting = nr.vertices_to_faces(vertices, faces) _textures = nr.lighting( faces_lighting, _textures, self.light_intensity_ambient, self.light_intensity_directional, self.light_color_ambient, self.light_color_directional, self.light_direction) # projection vertices = nr.projection(vertices, self.camera) if self.camera.perspective: vertices = nr.perspective(vertices, angle=self.camera.viewing_angle) # rasterization faces = nr.vertices_to_faces(vertices, faces) if mode is None: images = nr.rasterize( faces, _textures, self.camera.image_size, self.anti_aliasing, self.camera.near, self.camera.far, self.rasterizer_eps, self.background_color) elif mode == 'silhouettes': images = nr.rasterize_silhouettes(faces, self.camera.image_size, self.anti_aliasing) elif mode == 'depth': images = nr.rasterize_depth(faces, self.camera.image_size, self.anti_aliasing) return images
def render_silhouettes(self, vertices, faces): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data # viewpoint transformation if self.camera_mode == 'look_at': vertices = nr.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = nr.look(vertices, self.eye, self.camera_direction, self.up) # perspective transformation if self.perspective: vertices = nr.perspective(vertices, angle=self.viewing_angle) # rasterization faces = nr.vertices_to_faces(vertices, faces) images = nr.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images
def render_silhouettes(self, vertices, faces): # fill back if self.fill_back: faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data # viewpoint transformation if self.camera_mode == 'look_at': vertices = neural_renderer.look_at(vertices, self.eye) elif self.camera_mode == 'look': vertices = neural_renderer.look(vertices, self.eye, self.camera_direction) # perspective transformation if self.perspective: vertices = neural_renderer.perspective(vertices, angle=self.viewing_angle) # rasterization faces = neural_renderer.vertices_to_faces(vertices, faces) images = neural_renderer.rasterize_silhouettes(faces, self.image_size, self.anti_aliasing) return images