def get_image_index_finetune(self, n_frames_total, lmarks=None): assert n_frames_total == 1 # get reference index (not only for one shot) if self.opt.find_largest_mouth: assert lmarks is not None openrates = [] for i in range(len(lmarks)): openrates.append(openrate(lmarks[i])) openrates = np.asarray(openrates) max_index = np.argsort(openrates) ref_indices = max_index[:self.opt.n_shot] else: ref_indices = self.opt.ref_img_id.split(',') ref_indices = [int(i) for i in ref_indices] # random select reference indexs for finetune assert len(ref_indices) >= self.opt.finetune_shot and len( ref_indices) >= n_frames_total choice_num = len(ref_indices) if len(ref_indices) <= self.opt.finetune_shot+n_frames_total \ else self.opt.finetune_shot+n_frames_total finetune_ref_idxs = np.random.choice(len(ref_indices), choice_num, replace=False) finetune_ref_indices = ref_indices[ finetune_ref_idxs[:self.opt.finetune_shot]] target_ids = ref_indices[finetune_ref_idxs[-n_frames_total:]] return finetune_ref_indices, target_ids
def get_open_mouth(self, lmarks): # random set number ref_indices = None if np.random.rand() >= 0.6: openrates = [] for i in range(len(lmarks)): openrates.append(openrate(lmarks[i])) openrates = np.asarray(openrates) max_index = np.argsort(openrates) ref_indices = max_index[:self.opt.n_shot * 8] sel_ids = np.random.choice(self.opt.n_shot * 8, self.opt.n_shot) ref_indices = ref_indices[sel_ids] return ref_indices
def define_inference(self, real_video, lmarks): # get reference index (not only for one shot) if self.opt.find_largest_mouth: openrates = [] for i in range(len(lmarks)): openrates.append(openrate(lmarks[i])) openrates = np.asarray(openrates) max_index = np.argsort(openrates) ref_indices = max_index[:self.opt.n_shot] else: ref_indices = self.opt.ref_img_id.split(',') ref_indices = [int(i) for i in ref_indices] # get face image ref_images, ref_lmarks = self.prepare_datas(real_video, lmarks, ref_indices) # concatenate ref_images = torch.cat([ref_img.unsqueeze(0) for ref_img in ref_images], axis=0) ref_lmarks = torch.cat([ref_lmark.unsqueeze(0) for ref_lmark in ref_lmarks], axis=0) return ref_images, ref_lmarks, ref_indices