def __getitem__(self, idx): data = copy.deepcopy(self.datalist[idx]) img_id, img_path, img_shape, joint_cam, cam_param, mano_param = \ data['img_id'], data['img_path'], data['img_shape'], data['joint_cam'], data['cam_param'], data['mano_param'] rot, flip = 0, 0 # mano coordinates mano_mesh_cam, mano_joint_cam = self.get_mano_coord(mano_param, cam_param) mano_coord_cam = np.concatenate((mano_mesh_cam, mano_joint_cam)) # cam -> image projection # focal, princpt = cam_param['focal'], cam_param['princpt'] # joint_coord_img = cam2pixel(mano_joint_cam, focal, princpt)[:, :2] # root align cam mesh/joint mano_coord_cam = mano_coord_cam - mano_joint_cam[:1] mesh_coord_cam = mano_coord_cam[:self.vertex_num]; joint_coord_cam = mano_coord_cam[self.vertex_num:]; # use det det_data = self.datalist_pose2d_det[idx] assert img_id == det_data['img_id'] joint_coord_img = det_data['img_joint'] # make bbox bbox = get_bbox(joint_coord_img) bbox = process_bbox(bbox.copy()) # aug joint_coord_img, trans = j2d_processing(joint_coord_img.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox, rot, flip, None) # -> 0~1 joint_coord_img = joint_coord_img[:, :2] joint_coord_img /= np.array([[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) # normalize loc&scale mean, std = np.mean(joint_coord_img, axis=0), np.std(joint_coord_img, axis=0) joint_coord_img = (joint_coord_img.copy() - mean) / std if cfg.MODEL.name == 'pose2mesh_net': # default valid mesh_valid = np.ones((len(mesh_coord_cam), 1), dtype=np.float32) reg_joint_valid = np.ones((len(joint_coord_cam), 1), dtype=np.float32) lift_joint_valid = np.ones((len(joint_coord_cam), 1), dtype=np.float32) inputs = {'pose2d': joint_coord_img} targets = {'mesh': mesh_coord_cam / 1000, 'lift_pose3d': joint_coord_cam, 'reg_pose3d': joint_coord_cam} meta = {'mesh_valid': mesh_valid, 'lift_pose3d_valid': lift_joint_valid, 'reg_pose3d_valid': reg_joint_valid} return inputs, targets, meta elif cfg.MODEL.name == 'posenet': # default valid joint_valid = np.ones((len(joint_coord_cam), 1), dtype=np.float32) return joint_coord_img, joint_coord_cam, joint_valid
def __getitem__(self, idx): data = copy.deepcopy(self.datalist[idx]) annot_id, img_id, img_path, img_shape = data['annot_id'], data['image_id'], data['img_path'], data['img_shape'] cam_param, bbox, smpl_param = data['cam_param'].copy(), data['bbox'].copy(), data['smpl_param'].copy() rot, flip = 0, 0 # Detection # detection_data = copy.deepcopy(self.datadict_pose2d_det[idx]) # det_annot_id, det_joint_img_coco = detection_data['annotation_id'], np.array(detection_data['keypoints'], dtype=np.float32) detection_data = self.datadict_pose2d_det[annot_id] det_joint_img_coco = detection_data['img_joint'] joint_img_coco = self.add_pelvis_and_neck(det_joint_img_coco) # smpl coordinates mesh_cam, joint_cam_smpl = self.get_smpl_coord(smpl_param) # regress h36m, coco cam joints joint_cam_coco, gt_joint_img_coco = self.get_coco_from_mesh(mesh_cam, cam_param) joint_cam_h36m = self.get_h36mJ_from_mesh(mesh_cam) # root relative camera coordinate mesh_cam = mesh_cam - joint_cam_h36m[:1] joint_cam_coco = joint_cam_coco - joint_cam_coco[-2:-1] joint_cam_h36m = joint_cam_h36m - joint_cam_h36m[:1] # make new bbox bbox = get_bbox(gt_joint_img_coco) bbox = process_bbox(bbox.copy()) if cfg.DATASET.use_gt_input: joint_img_coco = gt_joint_img_coco # aug joint_img_coco, trans = j2d_processing(joint_img_coco.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox, rot, flip, None) # -> 0~1 joint_img_coco = joint_img_coco[:, :2] joint_img_coco /= np.array([[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) # normalize loc&scale mean, std = np.mean(joint_img_coco, axis=0), np.std(joint_img_coco, axis=0) joint_img_coco = (joint_img_coco.copy() - mean) / std if cfg.MODEL.name == 'pose2mesh_net': inputs = {'pose2d': joint_img_coco} targets = {'mesh': mesh_cam / 1000, 'reg_pose3d': joint_cam_h36m} meta = {'dummy': np.ones(1, dtype=np.float32)} return inputs, targets, meta elif cfg.MODEL.name == 'posenet': joint_valid = np.ones((len(joint_cam_coco), 1), dtype=np.float32) # dummy return joint_img_coco, joint_cam_coco, joint_valid
def optimize_cam_param(project_net, joint_input, crop_size): bbox = get_bbox(joint_input) bbox1 = process_bbox(bbox.copy(), aspect_ratio=1.0, scale=1.25) bbox2 = process_bbox(bbox.copy()) proj_target_joint_img, trans = j2d_processing(joint_input.copy(), (crop_size, crop_size), bbox1, 0, 0, None) joint_img, _ = j2d_processing(joint_input.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox2, 0, 0, None) joint_img = joint_img[:, :2] joint_img /= np.array([[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) mean, std = np.mean(joint_img, axis=0), np.std(joint_img, axis=0) joint_img = (joint_img.copy() - mean) / std joint_img = torch.Tensor(joint_img[None, :, :]).cuda() target_joint = torch.Tensor(proj_target_joint_img[None, :, :2]).cuda() # get optimization settings for projection criterion = nn.L1Loss() optimizer = optim.Adam(project_net.parameters(), lr=0.1) # estimate mesh, pose model.eval() pred_mesh, _ = model(joint_img) pred_mesh = pred_mesh[:, graph_perm_reverse[:mesh_model.face.max() + 1], :] pred_3d_joint = torch.matmul(joint_regressor, pred_mesh) out = {} # assume batch=1 project_net.train() for j in range(0, 1500): # projection pred_2d_joint = project_net(pred_3d_joint.detach()) loss = criterion(pred_2d_joint, target_joint[:, :17, :]) optimizer.zero_grad() loss.backward() optimizer.step() if j == 500: for param_group in optimizer.param_groups: param_group['lr'] = 0.05 if j == 1000: for param_group in optimizer.param_groups: param_group['lr'] = 0.001 out['mesh'] = pred_mesh[0].detach().cpu().numpy() out['cam_param'] = project_net.cam_param[0].detach().cpu().numpy() out['bbox'] = bbox1 out['target'] = proj_target_joint_img return out
def __getitem__(self, idx): data = copy.deepcopy(self.datalist[idx]) flip, rot = 0, 0 aid, img_path, img_shape = data['aid'], data['img_path'], data[ 'img_shape'] gt_joint_img_coco = data['joint_img'] det_data = self.datalist_cocoj_det[idx] det_aid = det_data['aid'] assert det_aid == aid, f"detection aid: {det_aid}, dataset aid: {aid} / det_aid type: {type(det_aid)}, aid type: {type(aid)}" det_img_coco = np.array(det_data['keypoints']).reshape(-1, 3) joint_img = self.add_pelvis_and_neck(det_img_coco) # vis_2d_joints(gt_joint_img, img_path, self.coco_skeleton, prefix=f"{img_path.split('/')[-1]}") bbox = get_bbox(joint_img) bbox1 = process_bbox(bbox.copy(), aspect_ratio=1.0, scale=1.25) bbox2 = process_bbox(bbox.copy()) proj_target_joint_img, trans = j2d_processing( joint_img.copy(), (self.img_res, self.img_res), bbox1, rot, flip, None) joint_img, _ = j2d_processing( joint_img.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox2, rot, flip, None) # -> 0~1 joint_img = joint_img[:, :2] joint_img /= np.array( [[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) # normalize loc&scale mean, std = np.mean(joint_img, axis=0), np.std(joint_img, axis=0) joint_img = (joint_img.copy() - mean) / std return joint_img[:, :2], proj_target_joint_img[:, :2], img_path, trans
def __getitem__(self, idx): data = copy.deepcopy(self.datalist[idx]) img_id, img_name, cam_param, bbox, smpl_param, img_shape = data[ 'img_id'], data['img_name'], data['cam_param'], data['bbox'], data[ 'smpl_param'], data['img_shape'] flip, rot = augm_params(is_train=(self.data_split == 'train')) # smpl coordinates smpl_mesh_coord_cam, smpl_joint_coord_cam = self.get_smpl_coord( smpl_param) smpl_coord_cam = np.concatenate( (smpl_mesh_coord_cam, smpl_joint_coord_cam)) smpl_coord_img = cam2pixel(smpl_coord_cam, cam_param['focal'], cam_param['princpt']) joint_coord_img = smpl_coord_img[self.smpl_vertex_num:][:, :2] # vis_2d_pose(joint_coord_img, img_path, self.smpl_skeleton, prefix='gt') # root relative cam coord smpl_coord_cam = smpl_coord_cam - smpl_coord_cam[ self.smpl_vertex_num + self.smpl_root_joint_idx] mesh_coord_cam = smpl_coord_cam[:self.smpl_vertex_num] joint_coord_cam = smpl_coord_cam[self.smpl_vertex_num:] if not cfg.DATASET.use_gt_input: # train / test with 2d dection input det_data = self.datalist_pose2d_det[idx] assert img_id == det_data['img_id'] joint_coord_img = det_data['img_joint'] # vis_2d_pose(joint_coord_img, img_path, self.smpl_skeleton, prefix='det') # vis_3d_pose(joint_coord_cam, self.smpl_skeleton, joint_set_name='smpl') # make new bbox bbox = get_bbox(joint_coord_img) bbox = process_bbox(bbox.copy()) # aug joint_coord_img, trans = j2d_processing( joint_coord_img.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox, rot, flip, self.flip_pairs) joint_coord_cam = j3d_processing(joint_coord_cam, rot, flip, self.flip_pairs) # -> 0~1 joint_coord_img = joint_coord_img[:, :2] joint_coord_img /= np.array( [[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) # normalize loc&scale mean, std = np.mean(joint_coord_img, axis=0), np.std(joint_coord_img, axis=0) joint_coord_img = (joint_coord_img.copy() - mean) / std if cfg.MODEL.name == 'pose2mesh_net': # default valid mesh_valid = np.ones((len(mesh_coord_cam), 1), dtype=np.float32) reg_joint_valid = np.ones((len(joint_coord_cam), 1), dtype=np.float32) lift_joint_valid = np.ones((len(joint_coord_cam), 1), dtype=np.float32) inputs = {'pose2d': joint_coord_img} targets = { 'mesh': mesh_coord_cam / 1000, 'lift_pose3d': joint_coord_cam, 'reg_pose3d': joint_coord_cam } meta = { 'mesh_valid': mesh_valid, 'lift_pose3d_valid': lift_joint_valid, 'reg_pose3d_valid': reg_joint_valid } elif cfg.MODEL.name == 'posenet': # default valid joint_valid = np.ones((len(joint_coord_cam), 1), dtype=np.float32) return joint_coord_img, joint_coord_cam, joint_valid return inputs, targets, meta
def __getitem__(self, idx): data = copy.deepcopy(self.datalist[idx]) flip, rot = augm_params(is_train=(self.data_split == 'train')) # get smpl mesh, joints smpl_param, cam_param, img_shape = data['smpl_param'], data[ 'cam_param'], data['img_shape'] mesh_cam, joint_cam_smpl = self.get_smpl_coord(smpl_param, cam_param) # regress coco joints joint_cam_h36m, joint_img_h36m = self.get_joints_from_mesh( mesh_cam, 'human36', cam_param) joint_cam_coco, joint_img_coco = self.get_joints_from_mesh( mesh_cam, 'coco', cam_param) # debug vis # vis_3d_pose(joint_cam_coco, self.coco_skeleton, joint_set_name='coco', prefix=f'coco_joint_cam_{idx}') # img = np.zeros((int(img_shape[0]), int(img_shape[1]), 3)) # vis_2d_pose(joint_img_coco, img, self.coco_skeleton, prefix='coco joint img') # root relative camera coordinate mesh_cam = mesh_cam - joint_cam_h36m[:1] joint_cam_coco = joint_cam_coco - joint_cam_coco[-2:-1] joint_cam_h36m = joint_cam_h36m - joint_cam_h36m[:1] # joint_cam is PoseNet target if self.input_joint_name == 'coco': joint_img, joint_cam = joint_img_coco, joint_cam_coco elif self.input_joint_name == 'human36': joint_img, joint_cam = joint_img_h36m, joint_cam_h36m # make new bbox tight_bbox = get_bbox(joint_img) bbox = process_bbox(tight_bbox.copy()) # aug joint_img, trans = j2d_processing( joint_img.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox, rot, 0, None) if not cfg.DATASET.use_gt_input: joint_img = self.replace_joint_img(joint_img, tight_bbox, trans) if flip: joint_img = flip_2d_joint(joint_img, cfg.MODEL.input_shape[1], self.flip_pairs) joint_cam = j3d_processing(joint_cam, rot, flip, self.flip_pairs) # -> 0~1 joint_img = joint_img[:, :2] joint_img /= np.array( [[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) # normalize loc&scale mean, std = np.mean(joint_img, axis=0), np.std(joint_img, axis=0) joint_img = (joint_img.copy() - mean) / std if cfg.MODEL.name == 'pose2mesh_net': # default valid mesh_valid = np.ones((len(mesh_cam), 1), dtype=np.float32) reg_joint_valid = np.ones((len(joint_cam_h36m), 1), dtype=np.float32) lift_joint_valid = np.ones((len(joint_cam), 1), dtype=np.float32) inputs = {'pose2d': joint_img} targets = { 'mesh': mesh_cam / 1000, 'lift_pose3d': joint_cam, 'reg_pose3d': joint_cam_h36m } meta = { 'mesh_valid': mesh_valid, 'lift_pose3d_valid': lift_joint_valid, 'reg_pose3d_valid': reg_joint_valid } return inputs, targets, meta elif cfg.MODEL.name == 'posenet': # default valid joint_valid = np.ones((len(joint_cam), 1), dtype=np.float32) return joint_img, joint_cam, joint_valid
def __getitem__(self, idx): data = copy.deepcopy(self.datalist[idx]) annot_id, img_id, img_path, img_shape = data['annot_id'], data[ 'image_id'], data['img_path'], data['img_shape'] cam_param, bbox, smpl_param = data['cam_param'].copy( ), data['bbox'].copy(), data['smpl_param'].copy() rot, flip = 0, 0 # get coco img joints from detection joint_img_coco = data['pred_pose2d'] # vis # img = cv2.imread(img_path) # vis_2d_pose(joint_img_coco, img, self.coco_skeleton, prefix='vis2dpose', bbox=None) # import pdb; pdb.set_trace() # smpl coordinates mesh_cam, joint_cam_smpl = self.get_smpl_coord(smpl_param) # regress h36m, coco cam joints joint_cam_coco, gt_joint_img_coco = self.get_coco_from_mesh( mesh_cam, cam_param) joint_cam_h36m = self.get_h36mJ_from_mesh(mesh_cam) # root relative camera coordinate mesh_cam = mesh_cam - joint_cam_h36m[:1] joint_cam_coco = joint_cam_coco - joint_cam_coco[-2:-1] joint_cam_h36m = joint_cam_h36m - joint_cam_h36m[:1] if cfg.DATASET.use_gt_input: joint_img_coco = gt_joint_img_coco # make new bbox bbox = get_bbox(joint_img_coco) bbox = process_bbox(bbox.copy()) # aug joint_img_coco, trans = j2d_processing( joint_img_coco.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox, rot, flip, None) # -> 0~1 joint_img_coco = joint_img_coco[:, :2] joint_img_coco /= np.array( [[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) # normalize loc&scale mean, std = np.mean(joint_img_coco, axis=0), np.std(joint_img_coco, axis=0) joint_img_coco = (joint_img_coco.copy() - mean) / std if cfg.MODEL.name == 'pose2mesh_net': inputs = {'pose2d': joint_img_coco} targets = {'mesh': mesh_cam / 1000, 'reg_pose3d': joint_cam_h36m} meta = {'dummy': np.ones(1, dtype=np.float32)} return inputs, targets, meta elif cfg.MODEL.name == 'posenet': joint_valid = np.ones((len(joint_cam_coco), 1), dtype=np.float32) # dummy return joint_img_coco, joint_cam_coco, joint_valid
def __getitem__(self, idx): data = copy.deepcopy(self.datalist[idx]) img_id, bbox, smpl_param, cam_param, img_shape = data['img_id'], data['bbox'].copy(), data['smpl_param'].copy(), data['cam_param'].copy(), data['img_hw'] flip, rot = augm_params(is_train=(self.data_split == 'train')) # smpl coordinates mesh_cam, joint_cam_smpl = self.get_smpl_coord(smpl_param, cam_param) # regress coco joints joint_cam_coco, joint_img_coco = self.get_coco_from_mesh(mesh_cam, cam_param) # h36m joints from datasets joint_cam_h36m, joint_img_h36m = data['joint_cam'], data['joint_img'][:, :2] # root relative camera coordinate mesh_cam = mesh_cam - joint_cam_h36m[:1] # joint_cam_smpl = joint_cam_smpl - joint_cam_h36m[:1] joint_cam_coco = joint_cam_coco - joint_cam_coco[-2:-1] joint_cam_h36m = joint_cam_h36m - joint_cam_h36m[:1] # joint_cam is PoseNet target if self.input_joint_name == 'coco': joint_img, joint_cam = joint_img_coco, joint_cam_coco elif self.input_joint_name == 'human36': joint_img, joint_cam = joint_img_h36m, joint_cam_h36m # make new bbox bbox = get_bbox(joint_img) bbox = process_bbox(bbox.copy()) # aug joint_img, trans = j2d_processing(joint_img.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox, rot, flip, self.flip_pairs) joint_cam = j3d_processing(joint_cam, rot, flip, self.flip_pairs) if not cfg.DATASET.use_gt_input: joint_img = self.replace_joint_img(idx, img_id, joint_img, bbox, trans) # vis # img = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # new_img = cv2.warpAffine(img, trans, (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), flags=cv2.INTER_LINEAR) # vis_2d_pose(joint_img, new_img, self.human36_skeleton, prefix='detection') # vis_3d_pose(joint_cam, self.human36_skeleton, joint_set_name='human36', gt=True) # -> 0~1 joint_img = joint_img[:, :2] joint_img /= np.array([[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) # normalize loc&scale mean, std = np.mean(joint_img, axis=0), np.std(joint_img, axis=0) joint_img = (joint_img.copy() - mean) / std if cfg.MODEL.name == 'pose2mesh_net': # default valid mesh_valid = np.ones((len(mesh_cam), 1), dtype=np.float32) reg_joint_valid = np.ones((len(joint_cam_h36m), 1), dtype=np.float32) lift_joint_valid = np.ones((len(joint_cam), 1), dtype=np.float32) # if fitted mesh is too far from h36m gt, discard it error = self.get_fitting_error(joint_cam_h36m, mesh_cam) if error > self.fitting_thr: mesh_valid[:] = 0 if self.input_joint_name == 'coco': lift_joint_valid[:] = 0 inputs = {'pose2d': joint_img} targets = {'mesh': mesh_cam / 1000, 'lift_pose3d': joint_cam, 'reg_pose3d': joint_cam_h36m} meta = {'mesh_valid': mesh_valid, 'lift_pose3d_valid': lift_joint_valid, 'reg_pose3d_valid': reg_joint_valid} return inputs, targets, meta elif cfg.MODEL.name == 'posenet': # default valid joint_valid = np.ones((len(joint_cam), 1), dtype=np.float32) # if fitted mesh is too far from h36m gt, discard it error = self.get_fitting_error(joint_cam_h36m, mesh_cam) if (error > self.fitting_thr) and (self.input_joint_name == 'coco'): joint_valid[:] = 0 return joint_img, joint_cam, joint_valid
def __getitem__(self, idx): data = copy.deepcopy(self.datalist[idx]) img_shape, bbox, dp_data, smplify_result = data['img_shape'], data[ 'bbox'], data['dp_data'], data['smplify_result'] flip, rot = augm_params(is_train=(self.data_split == 'train')) # img_name = img_path.split('/')[-1][:-4] smpl_param, cam_param = smplify_result['smpl_param'], smplify_result[ 'cam_param'] # regress h36m, coco joints mesh_cam, joint_cam_smpl = self.get_smpl_coord(smpl_param) joint_cam_h36m, joint_img_h36m = self.get_joints_from_mesh( mesh_cam, 'human36', cam_param) joint_cam_coco, joint_img_coco = self.get_joints_from_mesh( mesh_cam, 'coco', cam_param) # vis_2d_pose(joint_img_h36m, img_path, self.human36_skeleton, prefix='h36m joint') # vis_2d_pose(joint_img_coco, img_path, self.coco_skeleton, prefix='coco joint') # vis_3d_pose(joint_cam_h36m, self.human36_skeleton, 'human36', gt=True) # root relative camera coordinate mesh_cam = mesh_cam - joint_cam_h36m[:1] joint_cam_coco = joint_cam_coco - joint_cam_coco[-2:-1] joint_cam_h36m = joint_cam_h36m - joint_cam_h36m[:1] if self.input_joint_name == 'coco': joint_img, joint_cam = joint_img_coco, joint_cam_coco elif self.input_joint_name == 'human36': joint_img, joint_cam = joint_img_h36m, joint_cam_h36m # make new bbox tight_bbox = get_bbox(joint_img) bbox = process_bbox(tight_bbox.copy()) # aug joint_img, trans = j2d_processing( joint_img.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox, rot, 0, None) if not cfg.DATASET.use_gt_input: joint_img = self.replace_joint_img(joint_img, tight_bbox, trans) if flip: joint_img = flip_2d_joint(joint_img, cfg.MODEL.input_shape[1], self.flip_pairs) joint_cam = j3d_processing(joint_cam, rot, flip, self.flip_pairs) # -> 0~1 joint_img = joint_img[:, :2] joint_img /= np.array( [[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) # normalize loc&scale mean, std = np.mean(joint_img, axis=0), np.std(joint_img, axis=0) joint_img = (joint_img.copy() - mean) / std if cfg.MODEL.name == 'pose2mesh_net': # default valid mesh_valid = np.ones((len(mesh_cam), 1), dtype=np.float32) reg_joint_valid = np.ones((len(joint_cam_h36m), 1), dtype=np.float32) lift_joint_valid = np.ones((len(joint_cam), 1), dtype=np.float32) error = self.get_fitting_error(tight_bbox, data['joint_img'], joint_img_coco[:17], data['joint_valid']) if error > self.fitting_thr: mesh_valid[:], reg_joint_valid[:], lift_joint_valid[:] = 0, 0, 0 inputs = {'pose2d': joint_img} targets = { 'mesh': mesh_cam / 1000, 'lift_pose3d': joint_cam, 'reg_pose3d': joint_cam_h36m } meta = { 'mesh_valid': mesh_valid, 'lift_pose3d_valid': lift_joint_valid, 'reg_pose3d_valid': reg_joint_valid } return inputs, targets, meta elif cfg.MODEL.name == 'posenet': # default valid joint_valid = np.ones((len(joint_cam), 1), dtype=np.float32) # compute fitting error error = self.get_fitting_error(tight_bbox, data['joint_img'], joint_img_coco[:17], data['joint_valid']) if error > self.fitting_thr: joint_valid[:, :] = 0 return joint_img, joint_cam, joint_valid
cfg.MODEL.posenet_pretrained = False # get model and input if joint_set == 'mano': mesh_model = MANO() else: mesh_model = SMPL() model, joint_regressor, joint_num, skeleton, graph_L, graph_perm_reverse = get_joint_setting( mesh_model, joint_category=joint_set) model = model.cuda() project_net = models.project_net.get_model().cuda() joint_regressor = torch.Tensor(joint_regressor).cuda() joint_input = np.load(input_path) # pre-process input bbox = get_bbox(joint_input) bbox1 = process_bbox(bbox.copy(), aspect_ratio=1.0, scale=1.25) bbox2 = process_bbox(bbox.copy()) proj_target_joint_img, trans = j2d_processing(joint_input.copy(), (IMG_RES, IMG_RES), bbox1, 0, 0, None) joint_img, _ = j2d_processing( joint_input.copy(), (cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]), bbox2, 0, 0, None) joint_img = joint_img[:, :2] joint_img /= np.array( [[cfg.MODEL.input_shape[1], cfg.MODEL.input_shape[0]]]) mean, std = np.mean(joint_img, axis=0), np.std(joint_img, axis=0) joint_img = (joint_img.copy() - mean) / std