def load_verts_faces_uv(self, json_path): with open(json_path, 'r') as f: uv_para = json.load(f) verts_uvs = np.array(uv_para['textureMapping']['pointData']) faces_uvs = np.array(uv_para['textureMapping']['triangles']) verts_uvs = to_tensor(verts_uvs, self.device).unsqueeze(0).float() faces_uvs = to_tensor(faces_uvs, self.device).unsqueeze(0).long() return verts_uvs, faces_uvs
def __init__(self, json_path, *args, **kwargs): # json_path = ".../face12.json" super().__init__(*args, **kwargs) with open(json_path, 'r') as f: uv_para = json.load(f) verts_uvs = np.array(uv_para['textureMapping']['pointData']) faces_uvs = np.array(uv_para['textureMapping']['triangles']) verts_uvs = to_tensor(verts_uvs).unsqueeze(0).float() faces_uvs = to_tensor(faces_uvs).unsqueeze(0).long() self.texture = ImagelessTexturesUV(verts_uvs=verts_uvs, faces_uvs=faces_uvs).to(self.device)
def generate_vertices(self, sp, ep): # sp, ep need to have a batch dimension batch_size = ep.shape[0] if self.use_bfm_gpu: vertices = [self.bfm.generate_vertices(sp[i], ep[i]) for i in range(batch_size)] else: vertices = [self.bfm.generate_vertices(sp[i].detach().cpu().numpy(), ep[i].detach().cpu().numpy()) for i in range(batch_size)] vertices = [to_tensor(v, self.device) for v in vertices] vertices = torch.stack(vertices) return vertices
def __init__(self, opt, use_bfm_gpu=False): super().__init__(opt) self.to(opt.device) # This factory uses the Basel Face Model self.use_bfm_gpu = use_bfm_gpu # Loads the BFM into GPU for faster mesh generation if self.use_bfm_gpu: self.bfm = BFM2017Tensor(self.opt.path_bfm, device=opt.device) else: self.bfm = BFM2017(self.opt.path_bfm, self.opt.path_uv) self.faces_tensor = to_tensor(self.bfm.faces, self.device).unsqueeze(0) verts_uv, faces_uv = self.load_verts_faces_uv(opt.path_uv) self.uv_renderer = UVRenderer(verts_uv, faces_uv, image_size=self.opt.image_h, cameras=Camera.get_camera())
def run(self, pipeline, batch, latent_from, latent_to, n): result = list() # linear interpolation all_latents = interpolation(n, latent_from=latent_from, latent_to=latent_to) all_latents = to_tensor(all_latents, batch[DIK.STYLE_LATENT].device) for latent in all_latents: batch2 = batch.copy() batch2[DIK.STYLE_LATENT] = latent.reshape(batch[DIK.STYLE_LATENT].shape) batch2 = pipeline.forward_latent2image(batch2, 0) img_out = batch2[DIK.IMAGE_OUT][0] img_out = self.tensor2image(img_out) if self.mask_key is not None: img_out = img_out * batch[self.mask_key][0].expand_as(img_out).cpu() result.append(img_out) return result