def __init__(self, args): self._args = args # Create the generator model that will predict the SMPL paramaters self._generator = Generator(self._args) self._discriminator = Discriminator(self._args) self._gen_optim = tf.optimizers.Adam(learning_rate=self._args.gen_lr) self._disc_optim = tf.optimizers.Adam(learning_rate=self._args.disc_lr) self._data_loader = DataLoader(args) self._lsp_train_ds, self._lsp_test_ds = self._data_loader.load_lsp_dataset( ) self._smpl_train_ds, self._smpl_test_ds = self._data_loader.load_smpl_dataset( ) self._smpl_model = SMPL(self._args) self._create_summary_writer() # Setup saving and restoring model self._ckpt = tf.train.Checkpoint(generator=self._generator, discriminator=self._discriminator, gen_optim=self._gen_optim, disc_optim=self._disc_optim) self._ckpt_manager = tf.train.CheckpointManager( self._ckpt, directory=self._args.model_dir, max_to_keep=3) if self._args.load_model: self._ckpt.restore(self._ckpt_manager.latest_checkpoint) self._load_train_data()
def __init__(self, img_res): dataset_name = 'COCO' self.data_split = 'val' # 'train' if data_split == 'train' else 'val' self.img_path = osp.join(cfg.data_dir, dataset_name, 'images') self.annot_path = osp.join(cfg.data_dir, dataset_name, 'annotations') self.img_res = img_res self.smpl = SMPL() # COCO joint set self.coco_joint_num = 19 # 17 + 2, manually added pelvis and neck self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') self.coco_flip_pairs = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16)) # self.coco_skeleton = ( # (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), # (5, 6), (11, 12)) self.coco_skeleton = ( (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (11, 13), (13, 15), (17, 11), (17, 12), (17, 18), (18, 5), (18, 6), (18, 0), (12, 14), (14, 16), ) self.coco_root_joint_idx = self.coco_joints_name.index('Pelvis') self.joint_regressor_coco = self.smpl.joint_regressor_coco input_joint_name = 'coco' self.joint_num, self.skeleton, self.flip_pairs = self.get_joint_setting( input_joint_name) self.datalist_cocoj_det = self.load_coco_joints_det( osp.join(cfg.data_dir, dataset_name, 'hrnet_output_on_valset.json')) self.datalist = self.load_data()[:6337] self.graph_Adj, self.graph_L, self.graph_perm, self.graph_perm_reverse = \ build_coarse_graphs(self.smpl.face, self.joint_num, self.skeleton, self.flip_pairs, levels=9) print("# of COCO annotation and detection data: ", len(self.datalist), len(self.datalist_cocoj_det))
def __init__(self, data_split, args): dataset_name = 'PW3D' self.data_split = 'test' self.data_path = osp.join(cfg.data_dir, dataset_name, 'data') self.det_data_path = osp.join(self.data_path, 'darkpose_output_on_testset.json') #'hrnet_output_on_testset.json') self.img_path = osp.join(cfg.data_dir, dataset_name, 'imageFiles') # SMPL joint set self.mesh_model = SMPL() self.smpl_root_joint_idx = self.mesh_model.root_joint_idx self.face_kps_vertex = self.mesh_model.face_kps_vertex self.smpl_vertex_num = 6890 self.smpl_joint_num = 24 self.smpl_flip_pairs = ((1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23)) self.smpl_skeleton = ( (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), (17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 15)) self.joint_regressor_smpl = self.mesh_model.layer['neutral'].th_J_regressor # H36M joint set self.human36_root_joint_idx = 0 self.human36_eval_joint = (1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16) self.human36_skeleton = ( (0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6)) self.joint_regressor_human36 = torch.Tensor(self.mesh_model.joint_regressor_h36m) # COCO joint set self.coco_joint_num = 19 # 17 + 2, manually added pelvis and neck self.coco_joints_name = ( 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') self.coco_flip_pairs = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16)) self.coco_skeleton = ( (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), #(5, 6), (11, 12)) (17, 11), (17, 12), (17, 18), (18, 5), (18, 6), (18, 0)) self.joint_regressor_coco = torch.Tensor(self.mesh_model.joint_regressor_coco) input_joint_name = 'coco' self.joint_num, self.skeleton, self.flip_pairs = self.get_joint_setting(input_joint_name) self.datalist, self.video_indices = self.load_data() # self.video_indexes: 37 video, and indices of each video self.datadict_pose2d_det = self.load_pose2d_det(self.det_data_path) self.graph_Adj, self.graph_L, self.graph_perm, self.graph_perm_reverse = \ build_coarse_graphs(self.mesh_model.face, self.joint_num, self.skeleton, self.flip_pairs, levels=9) print(self.graph_perm_reverse) print("Check lengths of annotation and detection output: ", len(self.datalist), len(self.datadict_pose2d_det))
def __init__(self, data_split, args): dataset_name = 'SURREAL' self.data_split = 'train' if data_split == 'train' else 'val' self.debug = args.debug self.data_path = osp.join(cfg.data_dir, dataset_name, 'data') self.img_path = osp.join(cfg.data_dir, dataset_name, 'images', self.data_split) self.mesh_model = SMPL() self.smpl_vertex_num = 6890 self.smpl_joint_num = 24 self.smpl_joints_name = self.mesh_model.joints_name self.smpl_flip_pairs = ((1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23)) self.smpl_skeleton = ((0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), (17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 15)) self.smpl_root_joint_idx = self.mesh_model.root_joint_idx self.joint_regressor_smpl = self.mesh_model.layer[ 'neutral'].th_J_regressor self.datalist = self.load_data() det_file_path = osp.join(self.data_path, f'hrnet_output_on_{self.data_split}set.json') self.datalist_pose2d_det = self.load_pose2d_det(det_file_path) print("Check lengths of annotation and detection output: ", len(self.datalist), len(self.datalist_pose2d_det)) self.input_joint_name = cfg.DATASET.input_joint_set # 'coco' self.joint_num, self.skeleton, self.flip_pairs = self.get_joint_setting( self.input_joint_name) self.graph_Adj, self.graph_L, self.graph_perm, self.graph_perm_reverse = \ build_coarse_graphs(self.mesh_model.face, self.joint_num, self.skeleton, self.flip_pairs, levels=9)
def __init__(self, data_split, args): dataset_name = 'AMASS' self.data_split = 'train' self.debug = args.debug self.data_path = osp.join(cfg.data_dir, dataset_name, 'data') # SMPL joint set self.mesh_model = SMPL() self.smpl_root_joint_idx = self.mesh_model.root_joint_idx self.face_kps_vertex = self.mesh_model.face_kps_vertex self.smpl_vertex_num = 6890 self.smpl_joint_num = 24 self.smpl_flip_pairs = ((1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23)) self.smpl_skeleton = ((0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), (17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 15)) # h36m skeleton self.human36_joint_num = 17 self.human36_joints_name = ('Pelvis', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Torso', 'Neck', 'Nose', 'Head', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist') self.human36_skeleton = ((0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6)) self.human36_flip_pairs = ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13)) self.human36_root_joint_idx = self.human36_joints_name.index('Pelvis') self.human36_error_distribution = self.get_stat() self.joint_regressor_h36m = self.mesh_model.joint_regressor_h36m # COCO joint set self.coco_joint_num = 19 # 17 + 2, manually added pelvis and neck self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') self.coco_flip_pairs = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16)) self.coco_skeleton = ( (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), # (5, 6), #(11, 12), (17, 11), (17, 12), (17, 18), (18, 5), (18, 6), (18, 0)) self.coco_root_joint_idx = self.coco_joints_name.index('Pelvis') self.joint_regressor_coco = self.mesh_model.joint_regressor_coco self.input_joint_name = cfg.DATASET.input_joint_set self.joint_num, self.skeleton, self.flip_pairs = self.get_joint_setting( self.input_joint_name) self.datalist = self.load_data()
from resize_img import * from model import * from smpl import SMPL model = HMR() smpl = SMPL( '/Users/choehanjoon/PycharmProjects/HMR/neutral_smpl_with_cocoplus_reg.pkl' ) #SMPL('neutral_smpl_with_cocoplus_reg.pkl') pic = 'result_7_512.jpg' pred = model.predict(pic)[0]['theta'] print(pred) cams = pred[:, :3][0] poses = pred[:, 3:(3 + 72)][0] shapes = pred[:, (3 + 72):][0] smpl.set_params(beta=shapes, pose=poses, trans=np.array([0, 0.2, 0])) smpl.save_to_obj('./smpl_np.obj')
def __init__(self, mode, args): dataset_name = 'Human36M' self.debug = args.debug self.data_split = mode self.img_dir = osp.join(cfg.data_dir, dataset_name, 'images') self.annot_path = osp.join(cfg.data_dir, dataset_name, 'annotations') self.subject_genders = {1: 'female', 5: 'female', 6: 'male', 7: 'female', 8: 'male', 9: 'male', 11: 'male'} self.protocol = 2 self.action_name = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Posing', 'Purchases', 'Sitting', 'SittingDown', 'Smoking', 'Photo', 'Waiting', 'Walking', 'WalkDog', 'WalkTogether'] self.fitting_thr = 25 # milimeter # SMPL joint set self.mesh_model = SMPL() self.smpl_root_joint_idx = self.mesh_model.root_joint_idx self.smpl_face_kps_vertex = self.mesh_model.face_kps_vertex self.smpl_vertex_num = 6890 self.smpl_joint_num = 24 self.smpl_flip_pairs = ((1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23)) self.smpl_skeleton = ( (0, 1), (1, 4), (4, 7), (7, 10), (0, 2), (2, 5), (5, 8), (8, 11), (0, 3), (3, 6), (6, 9), (9, 14), (14, 17), (17, 19), (19, 21), (21, 23), (9, 13), (13, 16), (16, 18), (18, 20), (20, 22), (9, 12), (12, 15)) self.joint_regressor_smpl = self.mesh_model.layer['neutral'].th_J_regressor # H36M joint set self.human36_joint_num = 17 self.human36_joints_name = ( 'Pelvis', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Torso', 'Neck', 'Nose', 'Head', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist') self.human36_flip_pairs = ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13)) self.human36_skeleton = ( (0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6)) self.human36_root_joint_idx = self.human36_joints_name.index('Pelvis') self.human36_error_distribution = self.get_stat() self.human36_eval_joint = (1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16) self.joint_regressor_human36 = self.mesh_model.joint_regressor_h36m # COCO joint set self.coco_joint_num = 19 # 17 + 2, manually added pelvis and neck self.coco_joints_name = ( 'Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis', 'Neck') self.coco_flip_pairs = ((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16)) self.coco_skeleton = ( (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), #(5, 6), #(11, 12), (17, 11), (17, 12), (17, 18), (18, 5), (18, 6), (18, 0)) self.joint_regressor_coco = self.mesh_model.joint_regressor_coco self.input_joint_name = cfg.DATASET.input_joint_set # 'coco' self.joint_num, self.skeleton, self.flip_pairs = self.get_joint_setting(self.input_joint_name) self.datalist, skip_idx, skip_img_path = self.load_data() if self.data_split == 'test': det_2d_data_path = osp.join(cfg.data_dir, dataset_name, 'absnet_output_on_testset.json') self.datalist_pose2d_det = self.load_pose2d_det(det_2d_data_path, skip_img_path) print("Check lengths of annotation and detection output: ", len(self.datalist), len(self.datalist_pose2d_det)) self.graph_Adj, self.graph_L, self.graph_perm, self.graph_perm_reverse = \ build_coarse_graphs(self.mesh_model.face, self.joint_num, self.skeleton, self.flip_pairs, levels=9)
FOCAL_LENGTH = 1500 IMG_RES = args.res joint_set = args.joint_set input_path = args.input output_path = './demo/result/' # fix config cfg.DATASET.target_joint_set = joint_set cfg.MODEL.posenet_pretrained = False # get model and input if joint_set == 'mano': mesh_model = MANO() else: mesh_model = SMPL() model, joint_regressor, joint_num, skeleton, graph_L, graph_perm_reverse = get_joint_setting( mesh_model, joint_category=joint_set) model = model.cuda() project_net = models.project_net.get_model().cuda() joint_regressor = torch.Tensor(joint_regressor).cuda() joint_input = np.load(input_path) # pre-process input bbox = get_bbox(joint_input) bbox1 = process_bbox(bbox.copy(), aspect_ratio=1.0, scale=1.25) bbox2 = process_bbox(bbox.copy()) proj_target_joint_img, trans = j2d_processing(joint_input.copy(), (IMG_RES, IMG_RES), bbox1, 0, 0, None) joint_img, _ = j2d_processing(
final_principal_pt = (principal_pt + start_pt) * undo_scale cam_for_render = np.hstack( [np.mean(flength * undo_scale), final_principal_pt]) # This is in padded image. # kp_original = (joints + proc_param['start_pt']) * undo_scale # Subtract padding from joints. margin = int(img_size / 2) kp_original = (joints + proc_param['start_pt'] - margin) * undo_scale return cam_for_render, vert_shifted, kp_original from resize_img import * from model import * from smpl import SMPL model = HMR() smpl = SMPL( '/Users/choehanjoon/PycharmProjects/HMR/neutral_smpl_with_cocoplus_reg.pkl' ) pic = '/Users/choehanjoon/PycharmProjects/HMR/coco0.png' pred, proc_param, img = model.predict(pic) print(pred.keys()) joints = pred['joints'] verts = pred['verts'] cams = pred['cams'] img_path = pic visualize(img, proc_param, joints[0], verts[0], cams[0], img_path)
def main(**args): output_folder = args.pop('output_folder') output_folder = osp.expandvars(output_folder) if not osp.exists(output_folder): os.makedirs(output_folder) # Store the arguments for the current experiment conf_fn = osp.join(output_folder, 'conf.yaml') with open(conf_fn, 'w') as conf_file: yaml.dump(args, conf_file) result_folder = args.pop('result_folder', 'results') result_folder = osp.join(output_folder, result_folder) if not osp.exists(result_folder): os.makedirs(result_folder) mesh_folder = args.pop('mesh_folder', 'meshes') mesh_folder = osp.join(output_folder, mesh_folder) if not osp.exists(mesh_folder): os.makedirs(mesh_folder) float_dtype = args['float_dtype'] if float_dtype == 'float64': dtype = torch.float64 elif float_dtype == 'float32': dtype = torch.float64 else: print('Unknown float type {}, exiting!'.format(float_dtype)) sys.exit(-1) use_cuda = args.get('use_cuda', True) if use_cuda and not torch.cuda.is_available(): print('CUDA is not available, exiting!') sys.exit(-1) dataset_obj = create_dataset(**args) start = time.time() input_gender = args.pop('gender', 'neutral') gender_lbl_type = args.pop('gender_lbl_type', 'none') max_persons = args.pop('max_persons', -1) float_dtype = args.get('float_dtype', 'float32') if float_dtype == 'float64': dtype = torch.float64 elif float_dtype == 'float32': dtype = torch.float32 else: raise ValueError('Unknown float type {}, exiting!'.format(float_dtype)) body_model = SMPL(args.get('model_folder'), dtype=dtype, joint_regressor_extra=args.get('j_regressor_extra')) use_hands = args.get('use_hands', True) use_face = args.get('use_face', True) body_pose_prior = create_prior( prior_type=args.get('body_prior_type'), dtype=dtype, **args) jaw_prior, expr_prior = None, None if use_face: jaw_prior = create_prior( prior_type=args.get('jaw_prior_type'), dtype=dtype, **args) expr_prior = create_prior( prior_type=args.get('expr_prior_type', 'l2'), dtype=dtype, **args) left_hand_prior, right_hand_prior = None, None if use_hands: lhand_args = args.copy() lhand_args['num_gaussians'] = args.get('num_pca_comps') left_hand_prior = create_prior( prior_type=args.get('left_hand_prior_type'), dtype=dtype, use_left_hand=True, **lhand_args) rhand_args = args.copy() rhand_args['num_gaussians'] = args.get('num_pca_comps') right_hand_prior = create_prior( prior_type=args.get('right_hand_prior_type'), dtype=dtype, use_right_hand=True, **rhand_args) shape_prior = create_prior( prior_type=args.get('shape_prior_type', 'l2'), dtype=dtype, **args) angle_prior = create_prior(prior_type='angle', dtype=dtype) if use_cuda and torch.cuda.is_available(): device = torch.device('cuda') body_model = body_model.to(device=device) body_pose_prior = body_pose_prior.to(device=device) angle_prior = angle_prior.to(device=device) shape_prior = shape_prior.to(device=device) if use_face: expr_prior = expr_prior.to(device=device) jaw_prior = jaw_prior.to(device=device) if use_hands: left_hand_prior = left_hand_prior.to(device=device) right_hand_prior = right_hand_prior.to(device=device) else: device = torch.device('cpu') # A weight for every joint of the model joint_weights = dataset_obj.get_joint_weights().to(device=device, dtype=dtype) # Add a fake batch dimension for broadcasting joint_weights.unsqueeze_(dim=0) for idx, keypoints_3d in enumerate(dataset_obj): curr_result_folder = os.path.join(result_folder, str(idx).zfill(7)) if not osp.exists(curr_result_folder): os.makedirs(curr_result_folder) curr_mesh_folder = os.path.join(mesh_folder, str(idx).zfill(7)) if not osp.exists(curr_mesh_folder): os.makedirs(curr_mesh_folder) for person_idx in range(len(keypoints_3d)): print(f'Processing: Image {idx} Person {person_idx}') curr_result_fn = osp.join(curr_result_folder, f'{person_idx:02d}.pkl') curr_mesh_fn = osp.join(curr_mesh_folder, f'{person_idx:02d}.obj') try: fit_single_frame(keypoints_3d[[person_idx]], body_model=body_model, joint_weights=joint_weights, dtype=dtype, result_fn=curr_result_fn, mesh_fn=curr_mesh_fn, shape_prior=shape_prior, expr_prior=expr_prior, body_pose_prior=body_pose_prior, left_hand_prior=left_hand_prior, right_hand_prior=right_hand_prior, jaw_prior=jaw_prior, angle_prior=angle_prior, **args) elapsed = time.time() - start time_msg = time.strftime('%H hours, %M minutes, %S seconds', time.gmtime(elapsed)) print('Processing the data took: {}'.format(time_msg)) except: print('Some error occured during fitting')