Exemple #1
0
 def morph_file(self, img_path, gt_path, expresion):
     img = cv_utils.read_cv2_img(img_path)
     gt = cv_utils.read_cv2_img(gt_path)
     # morphed_img = self._img_morph(img, expresion)
     morphed_img = self.morph_face(img, gt, expresion)
     output_name = f'{os.path.basename(img_path)}_{np.random.randint(0, 100)}_out.png'
     self.save_img(morphed_img, output_name)
Exemple #2
0
    def morph_face_from_file_video(self, img_path, n_each_pose=60, fps=30):
        img = cv_utils.read_cv2_img(img_path)
        _w_s, h_s, _c = img.shape

        crop, roi = self.crop(img, idx=0)

        morphed_imgs = list()
        t0 = util.time_now()
        cnt = 0
        poses_dict = self.rnd_poses(n_each_pose, fps, self.mouth_pose_names,
                                    self.mouth_pose_values)
        for name in poses_dict:
            cnt += len(poses_dict[name])
            for pose in poses_dict[name]:
                morphed = self.morph(crop, pose)['result']
                img_cp = img.copy()
                self.blend(img_cp, morphed, roi)

                cv2.putText(img_cp,
                            f"{name}={pose[np.argmax(pose != 0)]:.03f}",
                            (10, h_s - 20), cv2.FONT_HERSHEY_DUPLEX, 1,
                            (255, 0, 0))

                morphed_imgs.append(img_cp)

        t_avg = util.time_diff_ms(t0) / cnt
        print(f"infer: {t_avg:.03f}ms")
        output_name = f'{os.path.basename(img_path)[:-4]}.mp4'
        # output_name = f'{os.path.basename(img_path)}' \
        #     f'_{name}_n{len(poses_dict[name])}_{"left" if left else "right"}.mp4'
        self.save_imgs_as_video(morphed_imgs, output_name, fps)
Exemple #3
0
 def get_features(self, img_path, expression):
     img = cv_utils.read_cv2_img(img_path)
     if img is None:
         print('Failing to read sample: ', img_path)
         return None
     features = self.img_forward(img, expression)
     return features
Exemple #4
0
    def _extract_smpls(self, input_file):
        img = cv_utils.read_cv2_img(input_file)
        img = cv_utils.transform_img(img, image_size=224) * 2 - 1.0  # hmr receive [-1, 1]
        img = img.transpose((2, 0, 1))
        img = torch.FloatTensor(img).cuda()[None, ...]
        theta = self.hmr(img)[-1]

        return theta
Exemple #5
0
 def morph_file(self, img_path, expresion, i, j=0):
     img = cv_utils.read_cv2_img(img_path)
     morphed_img, bbs = self._img_morph(img, expresion)
     overlay_fake_img = self._overlay_fake_img(img, morphed_img['fake_imgs_masked'], bbs)
     output_name = '%s_%s_out.png' % (os.path.basename(img_path), i)
     fake_output_name = '%s_out%s%s.png' % (os.path.basename(img_path), i, j)
     self._save_img(overlay_fake_img, fake_output_name)
     return fake_output_name
    def transfer(self, tgt_path, tgt_smpl=None, cam_strategy='smooth', t=0, visualizer=None):
        with torch.no_grad():
            # 1. get source info
            src_info = self.src_info

            ori_img = cv_utils.read_cv2_img(tgt_path)
            if tgt_smpl is None:
                img_hmr = cv_utils.transform_img(ori_img, 224, transpose=True) * 2 - 1.0
                img_hmr = torch.FloatTensor(img_hmr).cuda()[None, ...]
                tgt_smpl = self.hmr(img_hmr)[-1]
            else:
                tgt_smpl = to_tensor(tgt_smpl).cuda()[None, ...]

            if t == 0 and cam_strategy == 'smooth':
                self.first_cam = tgt_smpl[:, 0:3].clone()

            # 2. compute tsf smpl
            tsf_smpl = self.swap_smpl(src_info['cam'], src_info['shape'], tgt_smpl, cam_strategy=cam_strategy)
            tsf_info = self.hmr.get_details(tsf_smpl)
            # add pose condition and face index map into source info
            tsf_info['cond'], tsf_info['fim'] = self.render.encode_fim(tsf_info['cam'],
                                                                       tsf_info['verts'], transpose=True)
            # add part condition into source info
            tsf_info['part'] = self.render.encode_front_fim(tsf_info['fim'], transpose=True)

            # 3. calculate syn front image and transformation flows
            ref_info = self.ref_info
            selected_part_id = self.PART_IDS['body']
            left_id = [i for i in self.PART_IDS['all'] if i not in selected_part_id]

            src_part_mask = (torch.sum(tsf_info['part'][:, left_id, ...], dim=1) != 0).byte()
            ref_part_mask = (torch.sum(tsf_info['part'][:, selected_part_id, ...], dim=1) != 0).byte()

            T_s = self.calculate_trans(src_info['bc_f2pts'], src_info['fim'], tsf_info['fim'], src_part_mask)
            T_r = self.calculate_trans(ref_info['bc_f2pts'], ref_info['fim'], tsf_info['fim'], ref_part_mask)

            tsf_s = self.model.transform(src_info['image'], T_s)
            tsf_r = self.model.transform(ref_info['image'], T_r)

            tsf_img = tsf_s * src_part_mask.float() + tsf_r * ref_part_mask.float()
            tsf_inputs = torch.cat([tsf_img, tsf_info['cond']], dim=1)

            preds = self.forward2(tsf_inputs, src_info['feats'], T_s, ref_info['feats'], T_r, src_info['bg'])

            if visualizer is not None:
                visualizer.vis_named_img('src', src_info['image'])
                visualizer.vis_named_img('ref', ref_info['image'])
                visualizer.vis_named_img('src_cond', src_info['cond'])
                visualizer.vis_named_img('ref_cond', ref_info['cond'])
                visualizer.vis_named_img('tsf_cond', tsf_info['cond'])
                visualizer.vis_named_img('tsf_s', tsf_s)
                visualizer.vis_named_img('tsf_r', tsf_r)
                visualizer.vis_named_img('tsf_img', tsf_img)
                visualizer.vis_named_img('preds', preds)
                visualizer.vis_named_img('src_part_mask', src_part_mask)
                visualizer.vis_named_img('ref_part_mask', ref_part_mask)

            return preds
Exemple #7
0
    def morph_file(self, img_path, expresion, desired_expresion):
        img = cv_utils.read_cv2_img(img_path)
        morphed_img, concate_face = self._img_morph(img, expresion,
                                                    desired_expresion)
        #concate_img = self._img_concate(img, expresion,desired_expresion)

        output_name = '%s_out.png' % os.path.basename(img_path)
        self._save_img(morphed_img, output_name)
        self._save_img(concate_face, "concate_" + output_name)
 def _get_img_by_id(self, idx):
     if idx < self._dataset_size:
         img_path = os.path.join(self._imgs_dir,
                                 self._info[idx]['file_path'])
         img = cv_utils.read_cv2_img(img_path)
         return img, self._info[idx]['file_path']
     else:
         print('You input idx: ', idx)
         return None, None
    def load_init_preds(self, pred_path):
        pred_img_name = os.path.split(pred_path)[-1]
        pred_img_path = os.path.join(self._opt.preds_img_folder, 'pred_' + pred_img_name)

        img = cv_utils.read_cv2_img(pred_img_path)
        img = cv_utils.transform_img(img, self._opt.image_size, transpose=True)
        img = img * 2 - 1

        return img
Exemple #10
0
    def transfer_params(self,
                        tgt_path,
                        tgt_smpl=None,
                        cam_strategy='smooth',
                        t=0):
        # get source info
        src_info = self.src_info

        ori_img = cv_utils.read_cv2_img(tgt_path)
        if tgt_smpl is None:
            img_hmr = cv_utils.transform_img(ori_img, 224,
                                             transpose=True) * 2 - 1.0
            img_hmr = torch.tensor(img_hmr, dtype=torch.float32).cuda()[None,
                                                                        ...]
            tgt_smpl = self.hmr(img_hmr)
        else:
            tgt_smpl = torch.tensor(tgt_smpl, dtype=torch.float32).cuda()[None,
                                                                          ...]

        if t == 0 and cam_strategy == 'smooth':
            self.first_cam = tgt_smpl[:, 0:3].clone()

        # get transfer smpl
        tsf_smpl = self.swap_smpl(src_info['cam'],
                                  src_info['shape'],
                                  tgt_smpl,
                                  cam_strategy=cam_strategy)
        # transfer process, {'theta', 'cam', 'pose', 'shape', 'verts', 'j2d', 'j3d'}
        tsf_info = self.hmr.get_details(tsf_smpl)

        tsf_f2verts, tsf_fim, tsf_wim = self.render.render_fim_wim(
            tsf_info['cam'], tsf_info['verts'])
        # src_f2pts = src_f2verts[:, :, :, 0:2]
        tsf_info['fim'] = tsf_fim
        tsf_info['wim'] = tsf_wim
        tsf_info['cond'], _ = self.render.encode_fim(tsf_info['cam'],
                                                     tsf_info['verts'],
                                                     fim=tsf_fim,
                                                     transpose=True)
        # tsf_info['sil'] = util.morph((tsf_fim != -1).float(), ks=self._opt.ft_ks, mode='dilate')

        T = self.render.cal_bc_transform(src_info['p2verts'], tsf_fim, tsf_wim)
        tsf_img = F.grid_sample(src_info['img'], T)
        tsf_inputs = torch.cat([tsf_img, tsf_info['cond']], dim=1)

        # add target image to tsf info
        tsf_info['tsf_img'] = tsf_img
        tsf_info['image'] = ori_img
        tsf_info['T'] = T

        self.T = T
        self.tsf_info = tsf_info

        return tsf_inputs
 def load_images(self, im_pairs):
     imgs = []
     for im_path in im_pairs:
         img = cv_utils.read_cv2_img(im_path)
         img = cv_utils.transform_img(img,
                                      self._opt.image_size,
                                      transpose=True)
         img = img * 2 - 1
         imgs.append(img)
     imgs = np.stack(imgs)
     return imgs
Exemple #12
0
    def morph_face_from_file(self, img_path, pose):
        img = cv_utils.read_cv2_img(img_path)

        crop, roi = self.crop(img)

        imgs = self.morph(crop, pose)
        morphed = imgs['result']
        self.blend(img, morphed, roi)

        output_name = f'{os.path.basename(img_path)}_{np.random.randint(0, 100)}_out.png'
        self.save_img(img, output_name)
Exemple #13
0
    def morph_face_from_file(self, img_path, pose):
        img = cv_utils.read_cv2_img(img_path)

        crop_left, roi_l, crop_right, roi_r = self.crop(img)

        imgs = self.morph(crop_left, pose)
        morphed = imgs['result']
        self.blend(img, morphed, roi_l)
        # cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0))

        output_name = f'{os.path.basename(img_path)}_{np.random.randint(0, 100)}_out.png'
        self.save_img(img, output_name)
    def __getitem__(self, index):
        assert (index < self._dataset_size)
        im_path = (self.all_file[index])[0]
        label = (self.all_file[index])[1]
        im = cv_utils.read_cv2_img(im_path)

        img = self._transform(Image.fromarray(im))

        # pack
        sample = {'img': img, 'label': label}

        return sample
Exemple #15
0
 def morph_file(self, img_path, expression):
     img = cv_utils.read_cv2_img(img_path)
     #img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # Open this one for cropped_28_01
     #newX,newY = img.shape[1]*2, img.shape[0]*2
     #img = cv2.resize(img,(newX,newY))
     morphed_img = self._img_morph(img, expression)
     #morphed_img = cv2.cvtColor(morphed_img, cv2.COLOR_RGB2BGR)
     output_name = os.path.join(
         self._opt.output_dir, '{0}_epoch_{1}_intensity_{2}_out.png'.format(
             os.path.basename(img_path)[:-4], str(self._opt.load_epoch),
             str(expression[self._opt.au_index] * 10)))
     self._save_img(morphed_img, output_name)
     print('Morphed image is saved at path {}'.format(output_name))
    def transfer_params(self, tgt_path, tgt_smpl=None, cam_strategy='smooth', t=0):
        ori_img = cv_utils.read_cv2_img(tgt_path)
        if tgt_smpl is None:
            img_hmr = cv_utils.transform_img(ori_img, 224, transpose=True) * 2 - 1.0
            img_hmr = torch.tensor(img_hmr, dtype=torch.float32).cuda()[None, ...]
            tgt_smpl = self.hmr(img_hmr)
        else:
            if isinstance(tgt_smpl, np.ndarray):
                tgt_smpl = torch.tensor(tgt_smpl, dtype=torch.float32).cuda()[None, ...]

        tsf_inputs = self.transfer_params_by_smpl(tgt_smpl=tgt_smpl, cam_strategy=cam_strategy, t=t)
        self.tsf_info['image'] = ori_img

        return tsf_inputs
    def personalize(self, src_path, src_smpl=None):

        with torch.no_grad():
            ori_img = cv_utils.read_cv2_img(src_path)

            # resize image and convert the color space from [0, 255] to [-1, 1]
            img = cv_utils.transform_img(ori_img, self._opt.image_size, transpose=True) * 2 - 1.0
            img = torch.FloatTensor(img).cuda()[None, ...]

            if src_smpl is None:
                img_hmr = cv_utils.transform_img(ori_img, 224, transpose=True) * 2 - 1.0
                img_hmr = torch.FloatTensor(img_hmr).cuda()[None, ...]
                src_smpl = self.hmr(img_hmr)[-1]
            else:
                src_smpl = to_tensor(src_smpl).cuda()[None, ...]

            # source process, {'theta', 'cam', 'pose', 'shape', 'verts', 'j2d', 'j3d'}
            src_info = self.hmr.get_details(src_smpl)

            # add source bary-center points
            src_info['bc_f2pts'] = self.get_src_bc_f2pts(src_info['cam'], src_info['verts'])

            # add image to source info
            src_info['image'] = img

            # add texture into source info
            _, src_info['tex'] = self.render.forward(src_info['cam'], src_info['verts'],
                                                     img, is_uv_sampler=False, reverse_yz=True, get_fim=False)

            # add pose condition and face index map into source info
            src_info['cond'], src_info['fim'] = self.render.encode_fim(src_info['cam'],
                                                                       src_info['verts'], transpose=True)

            # add part condition into source info
            src_info['part'] = self.render.encode_front_fim(src_info['fim'], transpose=True)

            # bg input and inpaiting background
            src_bg_mask = self.morph(src_info['cond'][:, -1:, :, :], ks=15, mode='erode')
            bg_inputs = torch.cat([img * src_bg_mask, src_bg_mask], dim=1)
            src_info['bg'] = self.model.bg_model(bg_inputs)
            #
            # source identity
            src_crop_mask = self.morph(src_info['cond'][:, -1:, :, :], ks=3, mode='erode')
            src_inputs = torch.cat([img * (1 - src_crop_mask), src_info['cond']], dim=1)
            src_info['feats'] = self.model.src_model.inference(src_inputs)
            #
            # self.src_info = src_info

            return src_info
    def _load_pairs(self, vid_info):
        length = vid_info['length']
        pair_ids = np.random.choice(length, size=2, replace=False)

        smpls = np.concatenate((vid_info['cams'][pair_ids],
                                vid_info['thetas'][pair_ids],
                                vid_info['betas'][pair_ids]), axis=1)

        images = []
        images_paths = vid_info['images']
        for t in pair_ids:
            image_path = images_paths[t]
            image = cv_utils.read_cv2_img(image_path)

            images.append(image)

        return images, smpls
Exemple #19
0
    def __getitem__(self, index):
        ###################### TEST ########################
        dataname = self.dataset_name[index]
        # blurred images
        blur_paths = self.blur_paths.get(dataname)
        blurs = []
        num_data = len(blur_paths)
        for i in range(num_data):
            blur_path = os.path.join(self.root, dataname, 'blurred', blur_paths[i])
            blur = cv_utils.read_cv2_img(blur_path, input_nc=1)
            blurs.append(blur)
        blurs = np.concatenate(blurs, axis=0)
        # event images
        events = self.eventdata.get(dataname)
        sample = {'events': events,
                  'blurred': blurs,
                  'dataname': dataname}

        return sample
    def _load_pairs(self, vid_info):
        length = vid_info['length']

        start = np.random.randint(0, 15)
        end = np.random.randint(0, length)
        pair_ids = np.array([start, end], dtype=np.int32)

        smpls = np.concatenate((vid_info['cams'][pair_ids],
                                vid_info['thetas'][pair_ids],
                                vid_info['betas'][pair_ids]), axis=1)

        images = []
        images_paths = vid_info['images']
        for t in pair_ids:
            image_path = images_paths[t]
            image = cv_utils.read_cv2_img(image_path)

            images.append(image)

        return images, smpls
Exemple #21
0
 def morph_file(self, img_path, expresion):
     img = cv_utils.read_cv2_img(img_path)
     morphed_img = self._img_morph(img, expresion)
     output_name = '%s_out.png' % os.path.basename(img_path)
     self._save_img(morphed_img, output_name)
Exemple #22
0
 def morph_file(self, img_path, expresion):
     img = cv_utils.read_cv2_img(img_path)
     morphed_img = self._img_morph(img, expresion)
     output_name = '%s_out.png' % os.path.basename(img_path)
     self._save_img(morphed_img, output_name)
Exemple #23
0
 def _get_img_by_id(self, id):
     filepath = os.path.join(self._root, self._imgs_dir, id+'.jpg')
     return cv_utils.read_cv2_img(filepath), filepath
Exemple #24
0
    def personalize(self, src_path, src_smpl=None, output_path='', visualizer=None):

        ori_img = cv_utils.read_cv2_img(src_path)

        # resize image and convert the color space from [0, 255] to [-1, 1]
        img = cv_utils.transform_img(ori_img, self._opt.image_size, transpose=True) * 2 - 1.0
        img = torch.tensor(img, dtype=torch.float32).cuda()[None, ...]

        if src_smpl is None:
            img_hmr = cv_utils.transform_img(ori_img, 224, transpose=True) * 2 - 1.0
            img_hmr = torch.tensor(img_hmr, dtype=torch.float32).cuda()[None, ...]
            src_smpl = self.hmr(img_hmr)
        else:
            src_smpl = torch.tensor(src_smpl, dtype=torch.float32).cuda()[None, ...]

        # source process, {'theta', 'cam', 'pose', 'shape', 'verts', 'j2d', 'j3d'}
        src_info = self.hmr.get_details(src_smpl)
        src_f2verts, src_fim, src_wim = self.render.render_fim_wim(src_info['cam'], src_info['verts'])
        # src_f2pts = src_f2verts[:, :, :, 0:2]
        src_info['fim'] = src_fim
        src_info['wim'] = src_wim
        src_info['cond'], _ = self.render.encode_fim(src_info['cam'], src_info['verts'], fim=src_fim, transpose=True)
        src_info['f2verts'] = src_f2verts
        src_info['p2verts'] = src_f2verts[:, :, :, 0:2]
        src_info['p2verts'][:, :, :, 1] *= -1

        if self._opt.only_vis:
            src_info['p2verts'] = self.render.get_vis_f2pts(src_info['p2verts'], src_fim)

        src_info['part'], _ = self.render.encode_fim(src_info['cam'], src_info['verts'],
                                                     fim=src_fim, transpose=True, map_fn=self.part_fn)
        # add image to source info
        src_info['img'] = img
        src_info['image'] = ori_img

        # 2. process the src inputs
        if self.detector is not None:
            bbox, body_mask = self.detector.inference(img[0])
            bg_mask = 1 - body_mask
        else:
            bg_mask = util.morph(src_info['cond'][:, -1:, :, :], ks=self._opt.bg_ks, mode='erode')
            body_mask = 1 - bg_mask

        if self._opt.bg_model != 'ORIGINAL':
            src_info['bg'] = self.bgnet(img, masks=body_mask, only_x=True)
        else:
            incomp_img = img * bg_mask
            bg_inputs = torch.cat([incomp_img, bg_mask], dim=1)
            img_bg = self.bgnet(bg_inputs)
            src_info['bg_inputs'] = bg_inputs
            # src_info['bg'] = img_bg
            src_info['bg'] = incomp_img + img_bg * body_mask

        ft_mask = 1 - util.morph(src_info['cond'][:, -1:, :, :], ks=self._opt.ft_ks, mode='erode')
        src_inputs = torch.cat([img * ft_mask, src_info['cond']], dim=1)

        src_info['feats'] = self.generator.encode_src(src_inputs)
        src_info['src_inputs'] = src_inputs

        src_info = src_info

        # if visualizer is not None:
        #     self.visualize(visualizer, src=img, bg=src_info['bg'])

        if output_path:
            cv_utils.save_cv2_img(src_info['image'], output_path, image_size=self._opt.image_size)

        return src_info
Exemple #25
0
 def run(self, img_path, expresion):
     img = cv_utils.read_cv2_img(img_path)
     morphed_img = self.modify_face(img, expresion)
     output_name = '%s_out.png' % os.path.basename(img_path)
     self._save_img(morphed_img, output_name)
Exemple #26
0
 def _get_img_by_id(self, id):
     filepath = os.path.join(self._imgs_dir,
                             '%s.%s' % (id, self.img_format))
     return cv_utils.read_cv2_img(filepath), filepath
 def morph_file(self, img_path, desired_cond):
     img = cv_utils.read_cv2_img(img_path)
     morphed_img = self._morph_face(img, desired_cond) #!!!!!!
     output_name = '%s_out.png' % os.path.basename(img_path)
     self._save_img(morphed_img, output_name)# !!!!!!!
Exemple #28
0
 def _get_img_by_id(self, id):
     filepath = id + '.jpg'
     return cv_utils.read_cv2_img(filepath), filepath
Exemple #29
0
 def _get_img_by_id(self, id):
     filepath = os.path.join(self._imgs_dir, id+'.jpg')
     return cv_utils.read_cv2_img(filepath), filepath
Exemple #30
0
 def _get_img_by_id(self, id):
     filepath = self._imgs_dir + id + '.jpg'
     # print('filepath : ',filepath)
     # print(filepath)
     return cv_utils.read_cv2_img(filepath), filepath