def proc_coco(train_dir, test_dir, train_id, test_id, coco_dataset): # read dataset coco = COCO(coco_dataset) tupleIds = coco.getImgIds(catIds=1) # id = 1 means person faces = np.load("../predef/smpl_faces.npy") face_num = len(faces) hmr_pred = hmr_predictor() renderer = rd.SMPLRenderer(face_path="../predef/smpl_faces.npy") with open('../predef/mesh_joint_list.pkl', 'rb') as fp: mesh_joint = pickle.load(fp) count_all = 0. count_work = 0. total_num = len(tupleIds) train_num = int(np.floor(total_num * 0.8)) # make train set tr = trange(3584, train_num, desc='Bar desc', leave=True) # for i in tr: # tr.set_description("COCO - train part") # tr.refresh() # to show immediately the update # sleep(0.01) # count_all += 1 # # get tuple # one_tuple = coco.loadImgs(tupleIds[i])[0] # img_size = (one_tuple['height'], one_tuple['width']) # crt_id = one_tuple['id'] # # get anns # annIds = coco.getAnnIds(imgIds=one_tuple['id'], catIds=1, iscrowd=None) # anns = coco.loadAnns(annIds) # # RULE 1: objects < 5 # if len(anns)>4: # #print("filter out by too many objects") # take_notes("COCO %05d BAN -1\n" % (i), "./data_log.txt") # continue # for j in range(len(anns)): # # get sil points # seg_points = anns[j]['segmentation'][0] # # RULE 2: seg_points number >= 80 # if len(seg_points)<80: # take_notes("COCO %05d%03d BAN -1\n" % (i, j), "./data_log.txt") # #print("filter out by too few seg_points number") # continue # # get key points # key_points = anns[j]['keypoints'] # key_points = np.resize(key_points,(17,3)) # # draw sil # sil = points2sil(seg_points, img_size) # result = coco_filter(key_points, sil) # if result is False: # take_notes("COCO %05d BAN -1\n" % (i), "./data_log.txt") # continue # # Here we finally decide to use it # if result is True: # # read image # print(one_tuple['coco_url']) # ori_img = io.imread(one_tuple['coco_url']) # # read sil # src_gt_sil = sil # # hmr predict # verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, # True, # src_gt_sil) # # unnormalize std_img # src_img = ((std_img+1)/2.0*255).astype(np.uint8) # # save img # img_file = "img/COCO_%08d%02d.png" % (crt_id, j) # PIL.Image.fromarray(src_img).save(train_dir + img_file) # # process sil # gt_sil = proc_sil(src_gt_sil, proc_para) # # get proj sil # proj_sil = renderer.silhouette(verts = verts, # cam = cam, # img_size = src_img.shape, # norm = False) # # make TriMesh # mesh = make_trimesh(verts, faces, compute_vn = True) # vert_norms = mesh.vertex_normals() # # get joint move # coco_joints_t = transform_coco_joints(key_points) # new_jv, _, joint_move, joint_posi = get_joint_move(verts, # coco_joints_t, # proc_para, # mesh_joint, # unseen_mode = True, # ) # joint_move = joint_move.flatten() # # joint deform # fd_ja = fast_deform_dja(weight = 10.0) # ja_verts = fd_ja.deform(np.asarray(verts), new_jv) # # get achr move # proj_sil_ja = renderer.silhouette(verts = ja_verts, # norm = False) # _, achr_verts, achr_move = get_achr_move(gt_sil, # ja_verts, # vert_norms, # proj_sil_ja) # achr_posi = get_anchor_posi(achr_verts) # # save sil # compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja)) # compose_sil = np.moveaxis(compose_sil, 0, 2) # compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8)) # compose_sil.save(train_dir + "sil/%08d.png" % train_id) # # save para # proc_para['end_pt'] = proc_para['end_pt'].tolist() # proc_para['start_pt'] = proc_para['start_pt'].tolist() # para = {"verts": verts.tolist(), # "vert_norms": vert_norms.tolist(), # "proc_para": proc_para, # "joint_move": joint_move.tolist(), # "joint_posi": joint_posi.tolist(), # "achr_move": achr_move.tolist(), # "achr_posi": achr_posi.tolist(), # "img_file": img_file, # } # with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp: # json.dump(para, fp) # take_notes("COCO %05d%03d TRAIN %08d\n" % (i, j, train_id), # "./data_log.txt") # train_id += 1 # count_work += 1 # make test set test_num = total_num - train_num tr = trange(1855, test_num, desc='Bar desc', leave=True) for i in tr: tr.set_description("COCO - test part") tr.refresh() # to show immediately the update sleep(0.01) count_all += 1 # get tuple one_tuple = coco.loadImgs(tupleIds[i + train_num])[0] img_size = (one_tuple['height'], one_tuple['width']) crt_id = one_tuple['id'] # get anns annIds = coco.getAnnIds(imgIds=one_tuple['id'], catIds=1, iscrowd=None) anns = coco.loadAnns(annIds) # RULE 1: objects < 4 if len(anns) > 3: #print("filter out by too many objects") take_notes("COCO %05d BAN -1\n" % (i + train_num), "./data_log.txt") continue for j in range(len(anns)): # get sil points seg_points = anns[j]['segmentation'][0] # RULE 2: seg_points number >= 100 if len(seg_points) < 100: take_notes("COCO %05d%03d BAN -1\n" % (i + train_num, j), "./data_log.txt") #print("filter out by too few seg_points number") continue # get key points key_points = anns[j]['keypoints'] key_points = np.resize(key_points, (17, 3)) # draw sil sil = points2sil(seg_points, img_size) result = coco_filter(key_points, sil) if result is False: take_notes("COCO %05d BAN -1\n" % (i + train_num), "./data_log.txt") continue # Here we finally decide to use it if result is True: # read image ori_img = io.imread(one_tuple['coco_url']) # read sil src_gt_sil = sil # hmr predict verts, cam, proc_para, std_img = hmr_pred.predict( ori_img, True, src_gt_sil) # unnormalize std_img src_img = ((std_img + 1) / 2.0 * 255).astype(np.uint8) # save img img_file = "img/COCO_%08d%02d.png" % (crt_id, j) PIL.Image.fromarray(src_img).save(test_dir + img_file) # process sil gt_sil = proc_sil(src_gt_sil, proc_para) # get proj sil proj_sil = renderer.silhouette(verts=verts, cam=cam, img_size=src_img.shape, norm=False) # make TriMesh mesh = make_trimesh(verts, faces, compute_vn=True) vert_norms = mesh.vertex_normals() # get joint move coco_joints_t = transform_coco_joints(key_points) new_jv, _, joint_move, joint_posi = get_joint_move( verts, coco_joints_t, proc_para, mesh_joint, unseen_mode=True, ) joint_move = joint_move.flatten() # joint deform fd_ja = fast_deform_dja(weight=10.0) ja_verts = fd_ja.deform(np.asarray(verts), new_jv) # get achr move proj_sil_ja = renderer.silhouette(verts=ja_verts, norm=False) _, achr_verts, achr_move = get_achr_move( gt_sil, ja_verts, vert_norms, proj_sil_ja) achr_posi = get_anchor_posi(achr_verts) # save sil compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja)) compose_sil = np.moveaxis(compose_sil, 0, 2) compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8)) compose_sil.save(test_dir + "sil/%08d.png" % test_id) # save para proc_para['end_pt'] = proc_para['end_pt'].tolist() proc_para['start_pt'] = proc_para['start_pt'].tolist() para = { "verts": verts.tolist(), "vert_norms": vert_norms.tolist(), "proc_para": proc_para, "joint_move": joint_move.tolist(), "joint_posi": joint_posi.tolist(), "achr_move": achr_move.tolist(), "achr_posi": achr_posi.tolist(), "img_file": img_file, } with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp: json.dump(para, fp) take_notes( "COCO %05d%03d TEST %08d\n" % (i + train_num, j, test_id), "./data_log.txt") test_id += 1 count_work += 1 print("work ratio = %f, (%d / %d)" % (count_work / count_all, count_work, count_all)) return train_id, test_id
def __init__(self): curr_path = os.path.dirname(os.path.abspath(__file__)) self.SMPL_FACE_PATH = os.path.join(curr_path, '../tf_smpl', 'smpl_faces.npy') self.renderer = vis_util.SMPLRenderer(face_path=self.SMPL_FACE_PATH)
def proc_mpii(train_dir, test_dir, train_id, test_id, upi_dir): mpii_dir = upi_dir + "data/mpii/" faces = np.load("../predef/smpl_faces.npy") face_num = len(faces) hmr_pred = hmr_predictor() renderer = rd.SMPLRenderer(face_path = "../predef/smpl_faces.npy") mpii_joints_o = np.load(mpii_dir + "poses.npz")['poses'] mpii_joints = transform_mpii_joints(mpii_joints_o) with open ('../predef/mesh_joint_list.pkl', 'rb') as fp: mesh_joint = pickle.load(fp) count_all = 0. count_work = 0. # make train set tr = trange(10424, desc='Bar desc', leave=True) for i in tr: tr.set_description("MPII - train part") tr.refresh() # to show immediately the update sleep(0.01) count_all += 1 # read sil src_gt_sil = np.array(PIL.Image.open(mpii_dir + \ "images/%05d_segmentation.png"%(i+1)))[:,:,0] # judge using filter result = mpii_filter(mpii_joints[:,:,i], src_gt_sil) if result is False: take_notes("MPII %05d BAN -1\n" % (i+1), "./data_log.txt") continue # read ori img ori_img = np.array(PIL.Image.open( mpii_dir + "images/%05d.png"%(i+1))) # hmr predict verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, True, src_gt_sil) # unnormalize std_img src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8) # save img img_file = "img/MPII_%08d.png" % (i + 1) PIL.Image.fromarray(src_img).save(train_dir + img_file) # process sil gt_sil = proc_sil(src_gt_sil, proc_para) # get proj sil proj_sil = renderer.silhouette(verts = verts, cam = cam, img_size = src_img.shape, norm = False) # make TriMesh mesh = make_trimesh(verts, faces, compute_vn = True) vert_norms = mesh.vertex_normals() # get joint move new_jv, _, joint_move, joint_posi = get_joint_move(verts, mpii_joints[:,:,i], proc_para, mesh_joint) joint_move = joint_move.flatten() # joint deform fd_ja = fast_deform_dja(weight = 10.0) ja_verts = fd_ja.deform(np.asarray(verts), new_jv) # get achr move proj_sil_ja = renderer.silhouette(verts = ja_verts, norm = False) _, achr_verts, achr_move = get_achr_move(gt_sil, ja_verts, vert_norms, proj_sil_ja) achr_posi = get_anchor_posi(achr_verts) # save sil compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja)) compose_sil = np.moveaxis(compose_sil, 0, 2) compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8)) compose_sil.save(train_dir + "sil/%08d.png" % train_id) # save para proc_para['end_pt'] = proc_para['end_pt'].tolist() proc_para['start_pt'] = proc_para['start_pt'].tolist() para = {"verts": verts.tolist(), "vert_norms": vert_norms.tolist(), "proc_para": proc_para, "joint_move": joint_move.tolist(), "joint_posi": joint_posi.tolist(), "achr_move": achr_move.tolist(), "achr_posi": achr_posi.tolist(), "img_file": img_file, } with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp: json.dump(para, fp) take_notes("MPII %05d TRAIN %08d\n" % (i+1, train_id), "./data_log.txt") train_id += 1 count_work += 1 #make test set tr = trange(2606, desc='Bar desc', leave=True) for i in tr: tr.set_description("MPII - test part") tr.refresh() # to show immediately the update sleep(0.01) count_all += 1 # read sil src_gt_sil = np.array(PIL.Image.open(mpii_dir + \ "images/%05d_segmentation.png"%(i+10425)))[:,:,0] # judge using filter result = mpii_filter(mpii_joints[:,:,i+10424], src_gt_sil) if result is False: take_notes("MPII %05d BAN -1\n" % (i+10425), "./data_log.txt") continue # read ori img ori_img = np.array(PIL.Image.open( mpii_dir + "images/%05d.png"%(i+10425))) # hmr predict verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, True, src_gt_sil) # unnormalize std_img src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8) # save img img_file = "img/MPII_%08d.png" % (i+10425) PIL.Image.fromarray(src_img).save(test_dir + img_file) # process sil gt_sil = proc_sil(src_gt_sil, proc_para) # get proj sil proj_sil = renderer.silhouette(verts = verts, cam = cam, img_size = src_img.shape, norm = False) # make TriMesh mesh = make_trimesh(verts, faces, compute_vn = True) vert_norms = mesh.vertex_normals() # get joint move new_jv, _, joint_move, joint_posi = get_joint_move(verts, mpii_joints[:,:,i+10424], proc_para, mesh_joint) joint_move = joint_move.flatten() # joint deform fd_ja = fast_deform_dja(weight = 10.0) ja_verts = fd_ja.deform(np.asarray(verts), new_jv) # get achr move proj_sil_ja = renderer.silhouette(verts = ja_verts, norm = False) _, achr_verts, achr_move = get_achr_move(gt_sil, ja_verts, vert_norms, proj_sil_ja) achr_posi = get_anchor_posi(achr_verts) # save sil compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja)) compose_sil = np.moveaxis(compose_sil, 0, 2) compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8)) compose_sil.save(test_dir + "sil/%08d.png" % test_id) # save para proc_para['end_pt'] = proc_para['end_pt'].tolist() proc_para['start_pt'] = proc_para['start_pt'].tolist() para = {"verts": verts.tolist(), "vert_norms": vert_norms.tolist(), "proc_para": proc_para, "joint_move": joint_move.tolist(), "joint_posi": joint_posi.tolist(), "achr_move": achr_move.tolist(), "achr_posi": achr_posi.tolist(), "img_file": img_file, } with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp: json.dump(para, fp) take_notes("MPII %05d TEST %08d\n" % (i+10425, test_id), "./data_log.txt") test_id += 1 count_work += 1 print("work ratio = %f, (%d / %d)" % (count_work/count_all, count_work, count_all)) return train_id, test_id
"cpu mode is slow, use '--gpu True' to enable gpu mode if conditions permit." ) # parse configures conf = configparser.ConfigParser() conf.read(u'../conf.ini', encoding='utf8') dataset_path = conf.get('DEMO', 'dataset_path') joint_model = conf.get('DEMO', 'joint_model') anchor_model = conf.get('DEMO', 'anchor_model') shading_model = conf.get('DEMO', 'shading_model') # ==============================initialize============================== print("initialize......", end='') # initialize renderer my_renderer = rd.SMPLRenderer() # initialize joint and anchor predictor pdt_j = joint_predictor(joint_model, gpu=opt.gpu) pdt_a = anchor_predictor(anchor_model, gpu=opt.gpu) # dataset = dataloader_demo(dataset_path) # make verts for joint deform with open('../predef/mesh_joint_list.pkl', 'rb') as fp: item_dic = pickle.load(fp) point_list = item_dic["point_list"] index_map = item_dic["index_map"] # make verts for anchor deform
def proc_lspet(train_dir, train_id, lspet_dir, upi_dir): faces = np.load("../predef/smpl_faces.npy") face_num = len(faces) hmr_pred = hmr_predictor() renderer = rd.SMPLRenderer(face_path = "../predef/smpl_faces.npy") lspet_joints = loadmat(lspet_dir + "joints.mat")['joints'] # roll axis because the definition order of lspet is different from lsp lspet_joints = np.rollaxis(lspet_joints,1,0) with open ('../predef/mesh_joint_list.pkl', 'rb') as fp: mesh_joint = pickle.load(fp) count_all = 0. count_work = 0. # make train set tr = trange(10000, desc='Bar desc', leave=True) for i in tr: tr.set_description("LSPET - train part") tr.refresh() # to show immediately the update sleep(0.01) count_all += 1 # judge if sil file exists, if not, skip it if not os.path.isfile(upi_dir + \ "lsp_extended/im%05d_segmentation.png"%(i+1)): take_notes("LSPET %05d BAN -1\n" % (i+1), "./data_log.txt") continue # read sil src_gt_sil = np.array(PIL.Image.open(upi_dir + \ "lsp_extended/im%05d_segmentation.png"%(i+1)))[:,:,0] # judge using filter result = lspet_filter(lspet_joints[:,:,i], src_gt_sil) if result is False: take_notes("LSPET %05d BAN -1\n" % (i+1), "./data_log.txt") continue # read ori img ori_img = np.array(PIL.Image.open( lspet_dir + "images/im%05d.jpg"%(i+1))) # hmr predict verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, True, src_gt_sil) # unnormalize std_img src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8) # save img img_file = "img/LSPET_%08d.png" % (i + 1) PIL.Image.fromarray(src_img).save(train_dir + img_file) # process sil gt_sil = proc_sil(src_gt_sil, proc_para) # get proj sil proj_sil = renderer.silhouette(verts = verts, cam = cam, img_size = src_img.shape, norm = False) # make TriMesh mesh = make_trimesh(verts, faces, compute_vn = True) vert_norms = mesh.vertex_normals() # get joint move new_jv, _, joint_move, joint_posi = get_joint_move(verts, lspet_joints[:,:,i], proc_para, mesh_joint, unseen_mode = True) joint_move = joint_move.flatten() # joint deform fd_ja = fast_deform_dja(weight = 10.0) ja_verts = fd_ja.deform(np.asarray(verts), new_jv) # get achr move proj_sil_ja = renderer.silhouette(verts = ja_verts, norm = False) _, achr_verts, achr_move = get_achr_move(gt_sil, ja_verts, vert_norms, proj_sil_ja) achr_posi = get_anchor_posi(achr_verts) # save sil compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja)) compose_sil = np.moveaxis(compose_sil, 0, 2) compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8)) compose_sil.save(train_dir + "sil/%08d.png" % train_id) # save para proc_para['end_pt'] = proc_para['end_pt'].tolist() proc_para['start_pt'] = proc_para['start_pt'].tolist() para = {"verts": verts.tolist(), "vert_norms": vert_norms.tolist(), "proc_para": proc_para, "joint_move": joint_move.tolist(), "joint_posi": joint_posi.tolist(), "achr_move": achr_move.tolist(), "achr_posi": achr_posi.tolist(), "img_file": img_file, } with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp: json.dump(para, fp) take_notes("LSPET %05d TRAIN %08d\n" % (i+1, train_id), "./data_log.txt") train_id += 1 count_work += 1 print("work ratio = %f, (%d / %d)" % (count_work/count_all, count_work, count_all)) return train_id
# parse arguments parser = argparse.ArgumentParser() parser.add_argument('--num', type=int, required=True, help='data_num') parser.add_argument('--set', type=str, required=True, help='recon or syn') opt = parser.parse_args() assert opt.set in ["recon", "syn"], \ "set must be one of [recon, syn]" # prepare data_num = int(opt.num) model_file = "../demo/pretrained_model/pretrained_shading.pth" device = torch.device("cuda:0") net_shading = shading_net().to(device).eval() net_shading.load_state_dict(torch.load(model_file, map_location='cuda:0')) renderer = rd.SMPLRenderer(face_path="../predef/smpl_faces.npy") cam_para = CamPara(K=np.array([[1000, 0, 224], [0, 1000, 224], [0, 0, 1]])) with open('../predef/exempt_vert_list.pkl', 'rb') as fp: exempt_vert_list = pickle.load(fp) tr = trange(data_num, desc='Bar desc', leave=True) for test_num in tr: # read mesh mesh = om.read_trimesh("./eval_data/%s_set/pred_save/a_%03d.obj" % \ (opt.set, test_num)) proj_sil = renderer.silhouette(verts=mesh.points()) proj_sil_l = cv2.resize(proj_sil, dsize=(448, 448)) proj_sil_l[proj_sil_l < 0.5] = 0 proj_sil_l[proj_sil_l >= 0.5] = 1
def proc_h36m(train_dir, test_dir, train_id, test_id, h36m_dir): sample_interval = 10 faces = np.load("../predef/smpl_faces.npy") face_num = len(faces) with open('../predef/mesh_joint_list.pkl', 'rb') as fp: mesh_joint = pickle.load(fp) hmr_pred = hmr_predictor() renderer = rd.SMPLRenderer(face_path="../predef/smpl_faces.npy") # open available video list with open("./h36m_list.txt") as f: h36m_list = f.read().split("\r\n") vid_num = int(h36m_list[0]) h36m_list = [[ h36m_list[i * 3 + 1], h36m_list[i * 3 + 2], h36m_list[i * 3 + 3] ] for i in range(vid_num)] # compute data number for training and testing train_num = int(vid_num * 0.8) test_num = vid_num - train_num count_all = 0. count_work = 0. # make test set tr = trange(test_num, desc='Bar desc', leave=True) for i in tr: tr.set_description("H36M - test part") tr.refresh() # to show immediately the update sleep(0.01) vid_idx = i + train_num # read video of image, silhouette and pose vid_img = cv2.VideoCapture(h36m_dir + h36m_list[vid_idx][0]) vid_sil = cv2.VideoCapture(h36m_dir + h36m_list[vid_idx][1]) pose_list = loadmat(h36m_dir + h36m_list[vid_idx][2])['pose'] vid_len = min(int(vid_img.get(cv2.CAP_PROP_FRAME_COUNT)), int(vid_sil.get(cv2.CAP_PROP_FRAME_COUNT)), len(pose_list)) for frm_idx in range(0, vid_len, sample_interval): count_all += 1 # read sil vid_sil.set(1, frm_idx) _, src_gt_sil = vid_sil.read() src_gt_sil[src_gt_sil < 128] = 0 src_gt_sil[src_gt_sil >= 128] = 255 src_gt_sil = refine_sil(src_gt_sil, 100) src_gt_sil = src_gt_sil[:, :, 0] # read ori img vid_img.set(1, frm_idx) _, ori_img = vid_img.read() # BGR to RGB ori_img = np.stack( (ori_img[:, :, 2], ori_img[:, :, 1], ori_img[:, :, 0]), axis=2) # hmr predict verts, cam, proc_para, std_img = hmr_pred.predict( ori_img, True, src_gt_sil) # unnormalize std_img src_img = ((std_img + 1).astype(np.float) / 2.0 * 255).astype( np.uint8) # save img img_file = "img/H36M_%04d%04d.png" % (vid_idx, frm_idx) PIL.Image.fromarray(src_img).save(test_dir + img_file) # process sil gt_sil = proc_sil(src_gt_sil, proc_para) # get proj sil proj_sil = renderer.silhouette(verts=verts, cam=cam, img_size=src_img.shape, norm=False) # make TriMesh mesh = make_trimesh(verts, faces, compute_vn=True) vert_norms = mesh.vertex_normals() h36m_joint = transform_h36m_joints(pose_list[frm_idx]) # get joint move new_jv, _, joint_move, joint_posi = get_joint_move( verts, h36m_joint, proc_para, mesh_joint) joint_move = joint_move.flatten() # joint deform fd_ja = fast_deform_dja(weight=10.0) ja_verts = fd_ja.deform(np.asarray(verts), new_jv) # get achr move proj_sil_ja = renderer.silhouette(verts=ja_verts, norm=False) _, achr_verts, achr_move = get_achr_move(gt_sil, ja_verts, vert_norms, proj_sil_ja) achr_posi = get_anchor_posi(achr_verts) # save sil compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja)) compose_sil = np.moveaxis(compose_sil, 0, 2) compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8)) compose_sil.save(test_dir + "sil/%08d.png" % test_id) # save para proc_para['end_pt'] = proc_para['end_pt'].tolist() proc_para['start_pt'] = proc_para['start_pt'].tolist() para = { "verts": verts.tolist(), "vert_norms": vert_norms.tolist(), "proc_para": proc_para, "joint_move": joint_move.tolist(), "joint_posi": joint_posi.tolist(), "achr_move": achr_move.tolist(), "achr_posi": achr_posi.tolist(), "img_file": img_file, } with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp: json.dump(para, fp) take_notes( "H36M %04d%04d TEST %08d\n" % (vid_idx, frm_idx, test_id), "./data_log.txt") test_id += 1 count_work += 1 print("work ratio = %f, (%d / %d)" % (count_work / count_all, count_work, count_all)) return train_id, test_id