コード例 #1
0
ファイル: utility.py プロジェクト: gsygsygsy123/hmd
    def __init__(self, predef_vert=True, verts=[], max_dist=0.1):

        self.predef_vert = predef_vert
        self.max_dist = max_dist
        self.fd_j = fast_deform_dja(weight=10.0)

        # read joint index list
        with open('../predef/mesh_joint_list.pkl', 'rb') as fp:
            item_dic = pickle.load(fp)
        self.point_list = item_dic["point_list"]

        if self.predef_vert == True:
            if verts == []:
                print("ERROR: no predefine verts found when initialize RJD")
            else:
                self.verts = verts
コード例 #2
0
def proc_coco(train_dir, test_dir, train_id, test_id, coco_dataset):

    # read dataset
    coco = COCO(coco_dataset)
    tupleIds = coco.getImgIds(catIds=1)  # id = 1 means person

    faces = np.load("../predef/smpl_faces.npy")
    face_num = len(faces)

    hmr_pred = hmr_predictor()
    renderer = rd.SMPLRenderer(face_path="../predef/smpl_faces.npy")

    with open('../predef/mesh_joint_list.pkl', 'rb') as fp:
        mesh_joint = pickle.load(fp)

    count_all = 0.
    count_work = 0.

    total_num = len(tupleIds)
    train_num = int(np.floor(total_num * 0.8))

    # make train set
    tr = trange(3584, train_num, desc='Bar desc', leave=True)
    # for i in tr:
    #     tr.set_description("COCO - train part")
    #     tr.refresh() # to show immediately the update
    #     sleep(0.01)

    #     count_all += 1

    #     # get tuple
    #     one_tuple = coco.loadImgs(tupleIds[i])[0]
    #     img_size = (one_tuple['height'], one_tuple['width'])
    #     crt_id = one_tuple['id']

    #     # get anns
    #     annIds = coco.getAnnIds(imgIds=one_tuple['id'], catIds=1, iscrowd=None)
    #     anns = coco.loadAnns(annIds)

    #     # RULE 1: objects < 5
    #     if len(anns)>4:
    #         #print("filter out by too many objects")
    #         take_notes("COCO %05d BAN -1\n" % (i), "./data_log.txt")
    #         continue

    #     for j in range(len(anns)):

    #         # get sil points
    #         seg_points = anns[j]['segmentation'][0]

    #         # RULE 2: seg_points number >= 80
    #         if len(seg_points)<80:
    #             take_notes("COCO %05d%03d BAN -1\n" % (i, j), "./data_log.txt")
    #             #print("filter out by too few seg_points number")
    #             continue

    #         # get key points
    #         key_points = anns[j]['keypoints']
    #         key_points = np.resize(key_points,(17,3))

    #         # draw sil
    #         sil = points2sil(seg_points, img_size)

    #         result = coco_filter(key_points, sil)

    #         if result is False:
    #             take_notes("COCO %05d BAN -1\n" % (i), "./data_log.txt")
    #             continue
    #         # Here we finally decide to use it
    #         if result is True:
    #             # read image
    #             print(one_tuple['coco_url'])
    #             ori_img = io.imread(one_tuple['coco_url'])

    #             # read sil
    #             src_gt_sil = sil

    #             # hmr predict
    #             verts, cam, proc_para, std_img = hmr_pred.predict(ori_img,
    #                                                               True,
    #                                                               src_gt_sil)

    #             # unnormalize std_img
    #             src_img = ((std_img+1)/2.0*255).astype(np.uint8)

    #             # save img
    #             img_file = "img/COCO_%08d%02d.png" % (crt_id, j)
    #             PIL.Image.fromarray(src_img).save(train_dir + img_file)

    #             # process sil
    #             gt_sil = proc_sil(src_gt_sil, proc_para)

    #             # get proj sil
    #             proj_sil = renderer.silhouette(verts = verts,
    #                                            cam = cam,
    #                                            img_size = src_img.shape,
    #                                            norm = False)

    #             # make TriMesh
    #             mesh = make_trimesh(verts, faces, compute_vn = True)
    #             vert_norms = mesh.vertex_normals()

    #             # get joint move
    #             coco_joints_t = transform_coco_joints(key_points)
    #             new_jv, _, joint_move, joint_posi = get_joint_move(verts,
    #                                                    coco_joints_t,
    #                                                    proc_para,
    #                                                    mesh_joint,
    #                                                    unseen_mode = True,
    #                                                   )
    #             joint_move = joint_move.flatten()

    #             # joint deform
    #             fd_ja = fast_deform_dja(weight = 10.0)
    #             ja_verts = fd_ja.deform(np.asarray(verts), new_jv)

    #             # get achr move
    #             proj_sil_ja = renderer.silhouette(verts = ja_verts,
    #                                            norm = False)
    #             _, achr_verts, achr_move = get_achr_move(gt_sil,
    #                                                      ja_verts,
    #                                                      vert_norms,
    #                                                      proj_sil_ja)
    #             achr_posi = get_anchor_posi(achr_verts)

    #             # save sil
    #             compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
    #             compose_sil = np.moveaxis(compose_sil, 0, 2)
    #             compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
    #             compose_sil.save(train_dir + "sil/%08d.png" % train_id)

    #             # save para
    #             proc_para['end_pt'] = proc_para['end_pt'].tolist()
    #             proc_para['start_pt'] = proc_para['start_pt'].tolist()
    #             para = {"verts": verts.tolist(),
    #                     "vert_norms": vert_norms.tolist(),
    #                     "proc_para": proc_para,
    #                     "joint_move": joint_move.tolist(),
    #                     "joint_posi": joint_posi.tolist(),
    #                     "achr_move": achr_move.tolist(),
    #                     "achr_posi": achr_posi.tolist(),
    #                     "img_file": img_file,
    #                    }
    #             with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp:
    #                 json.dump(para, fp)

    #             take_notes("COCO %05d%03d TRAIN %08d\n" % (i, j, train_id),
    #                        "./data_log.txt")
    #             train_id += 1
    #             count_work += 1

    # make test set
    test_num = total_num - train_num
    tr = trange(1855, test_num, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("COCO - test part")
        tr.refresh()  # to show immediately the update
        sleep(0.01)

        count_all += 1

        # get tuple
        one_tuple = coco.loadImgs(tupleIds[i + train_num])[0]
        img_size = (one_tuple['height'], one_tuple['width'])
        crt_id = one_tuple['id']

        # get anns
        annIds = coco.getAnnIds(imgIds=one_tuple['id'], catIds=1, iscrowd=None)
        anns = coco.loadAnns(annIds)

        # RULE 1: objects < 4
        if len(anns) > 3:
            #print("filter out by too many objects")
            take_notes("COCO %05d BAN -1\n" % (i + train_num),
                       "./data_log.txt")
            continue

        for j in range(len(anns)):

            # get sil points
            seg_points = anns[j]['segmentation'][0]

            # RULE 2: seg_points number >= 100
            if len(seg_points) < 100:
                take_notes("COCO %05d%03d BAN -1\n" % (i + train_num, j),
                           "./data_log.txt")
                #print("filter out by too few seg_points number")
                continue

            # get key points
            key_points = anns[j]['keypoints']
            key_points = np.resize(key_points, (17, 3))

            # draw sil
            sil = points2sil(seg_points, img_size)

            result = coco_filter(key_points, sil)

            if result is False:
                take_notes("COCO %05d BAN -1\n" % (i + train_num),
                           "./data_log.txt")
                continue
            # Here we finally decide to use it
            if result is True:
                # read image
                ori_img = io.imread(one_tuple['coco_url'])

                # read sil
                src_gt_sil = sil

                # hmr predict
                verts, cam, proc_para, std_img = hmr_pred.predict(
                    ori_img, True, src_gt_sil)

                # unnormalize std_img
                src_img = ((std_img + 1) / 2.0 * 255).astype(np.uint8)

                # save img
                img_file = "img/COCO_%08d%02d.png" % (crt_id, j)
                PIL.Image.fromarray(src_img).save(test_dir + img_file)

                # process sil
                gt_sil = proc_sil(src_gt_sil, proc_para)

                # get proj sil
                proj_sil = renderer.silhouette(verts=verts,
                                               cam=cam,
                                               img_size=src_img.shape,
                                               norm=False)

                # make TriMesh
                mesh = make_trimesh(verts, faces, compute_vn=True)
                vert_norms = mesh.vertex_normals()

                # get joint move
                coco_joints_t = transform_coco_joints(key_points)
                new_jv, _, joint_move, joint_posi = get_joint_move(
                    verts,
                    coco_joints_t,
                    proc_para,
                    mesh_joint,
                    unseen_mode=True,
                )
                joint_move = joint_move.flatten()

                # joint deform
                fd_ja = fast_deform_dja(weight=10.0)
                ja_verts = fd_ja.deform(np.asarray(verts), new_jv)

                # get achr move
                proj_sil_ja = renderer.silhouette(verts=ja_verts, norm=False)
                _, achr_verts, achr_move = get_achr_move(
                    gt_sil, ja_verts, vert_norms, proj_sil_ja)
                achr_posi = get_anchor_posi(achr_verts)

                # save sil
                compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
                compose_sil = np.moveaxis(compose_sil, 0, 2)
                compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
                compose_sil.save(test_dir + "sil/%08d.png" % test_id)

                # save para
                proc_para['end_pt'] = proc_para['end_pt'].tolist()
                proc_para['start_pt'] = proc_para['start_pt'].tolist()
                para = {
                    "verts": verts.tolist(),
                    "vert_norms": vert_norms.tolist(),
                    "proc_para": proc_para,
                    "joint_move": joint_move.tolist(),
                    "joint_posi": joint_posi.tolist(),
                    "achr_move": achr_move.tolist(),
                    "achr_posi": achr_posi.tolist(),
                    "img_file": img_file,
                }
                with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp:
                    json.dump(para, fp)

                take_notes(
                    "COCO %05d%03d TEST %08d\n" % (i + train_num, j, test_id),
                    "./data_log.txt")
                test_id += 1
                count_work += 1

    print("work ratio = %f, (%d / %d)" %
          (count_work / count_all, count_work, count_all))
    return train_id, test_id
コード例 #3
0
ファイル: MPII.py プロジェクト: zhuhao-nju/hmd
def proc_mpii(train_dir, test_dir, train_id, test_id, upi_dir):
    mpii_dir = upi_dir + "data/mpii/"
    faces = np.load("../predef/smpl_faces.npy")
    face_num = len(faces)
    
    hmr_pred = hmr_predictor()
    renderer = rd.SMPLRenderer(face_path = "../predef/smpl_faces.npy")
    
    mpii_joints_o = np.load(mpii_dir + "poses.npz")['poses']
    mpii_joints = transform_mpii_joints(mpii_joints_o)
    with open ('../predef/mesh_joint_list.pkl', 'rb') as fp:
        mesh_joint = pickle.load(fp)
    
    count_all = 0.
    count_work = 0.
        
    # make train set
    tr = trange(10424, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("MPII - train part")
        tr.refresh() # to show immediately the update
        sleep(0.01)
        
        count_all += 1
        
        # read sil
        src_gt_sil = np.array(PIL.Image.open(mpii_dir + \
                     "images/%05d_segmentation.png"%(i+1)))[:,:,0]
        
        # judge using filter
        result = mpii_filter(mpii_joints[:,:,i], src_gt_sil)
        if result is False:
            take_notes("MPII %05d BAN -1\n" % (i+1), "./data_log.txt")
            continue        
        
        # read ori img
        ori_img = np.array(PIL.Image.open(
                  mpii_dir + "images/%05d.png"%(i+1)))

        # hmr predict
        verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, 
                                                          True, 
                                                          src_gt_sil)
                
        # unnormalize std_img
        src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8)
        
        # save img
        img_file = "img/MPII_%08d.png" % (i + 1)
        PIL.Image.fromarray(src_img).save(train_dir + img_file)
        
        # process sil
        gt_sil = proc_sil(src_gt_sil, proc_para)
        
        # get proj sil
        proj_sil = renderer.silhouette(verts = verts,
                                       cam = cam,
                                       img_size = src_img.shape,
                                       norm = False)

        # make TriMesh
        mesh = make_trimesh(verts, faces, compute_vn = True)
        vert_norms = mesh.vertex_normals()

        # get joint move
        new_jv, _, joint_move, joint_posi = get_joint_move(verts, 
                                               mpii_joints[:,:,i], 
                                               proc_para,
                                               mesh_joint)
        joint_move = joint_move.flatten()

        # joint deform
        fd_ja = fast_deform_dja(weight = 10.0)
        ja_verts = fd_ja.deform(np.asarray(verts), new_jv)
        
        
        # get achr move
        proj_sil_ja = renderer.silhouette(verts = ja_verts,
                                       norm = False)
        _, achr_verts, achr_move = get_achr_move(gt_sil, 
                                                 ja_verts, 
                                                 vert_norms,
                                                 proj_sil_ja)
        achr_posi = get_anchor_posi(achr_verts)

        # save sil
        compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
        compose_sil = np.moveaxis(compose_sil, 0, 2)
        compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
        compose_sil.save(train_dir + "sil/%08d.png" % train_id)

        # save para
        proc_para['end_pt'] = proc_para['end_pt'].tolist()
        proc_para['start_pt'] = proc_para['start_pt'].tolist()
        para = {"verts": verts.tolist(),
                "vert_norms": vert_norms.tolist(),
                "proc_para": proc_para,
                "joint_move": joint_move.tolist(),
                "joint_posi": joint_posi.tolist(),
                "achr_move": achr_move.tolist(),
                "achr_posi": achr_posi.tolist(),
                "img_file": img_file,
               }
        with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp:
            json.dump(para, fp)
    
        take_notes("MPII %05d TRAIN %08d\n" % (i+1, train_id), 
                   "./data_log.txt")
        train_id += 1
        count_work += 1
    
    #make test set
    tr = trange(2606, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("MPII - test part")
        tr.refresh() # to show immediately the update
        sleep(0.01)

        count_all += 1
        
        # read sil
        src_gt_sil = np.array(PIL.Image.open(mpii_dir + \
                     "images/%05d_segmentation.png"%(i+10425)))[:,:,0]
        
        # judge using filter
        result = mpii_filter(mpii_joints[:,:,i+10424], src_gt_sil)
        if result is False:
            take_notes("MPII %05d BAN -1\n" % (i+10425), "./data_log.txt")
            continue        
        
        # read ori img
        ori_img = np.array(PIL.Image.open(
                  mpii_dir + "images/%05d.png"%(i+10425)))
        
        # hmr predict
        verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, 
                                                          True, 
                                                          src_gt_sil)
                
        # unnormalize std_img
        src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8)
        
        # save img
        img_file = "img/MPII_%08d.png" % (i+10425)
        PIL.Image.fromarray(src_img).save(test_dir + img_file)
        
        # process sil
        gt_sil = proc_sil(src_gt_sil, proc_para)
        
        # get proj sil
        proj_sil = renderer.silhouette(verts = verts,
                                       cam = cam,
                                       img_size = src_img.shape,
                                       norm = False)

        # make TriMesh
        mesh = make_trimesh(verts, faces, compute_vn = True)
        vert_norms = mesh.vertex_normals()

        # get joint move
        new_jv, _, joint_move, joint_posi = get_joint_move(verts, 
                                               mpii_joints[:,:,i+10424], 
                                               proc_para,
                                               mesh_joint)
        joint_move = joint_move.flatten()

        # joint deform
        fd_ja = fast_deform_dja(weight = 10.0)
        ja_verts = fd_ja.deform(np.asarray(verts), new_jv)
        
        
        # get achr move
        proj_sil_ja = renderer.silhouette(verts = ja_verts,
                                       norm = False)
        _, achr_verts, achr_move = get_achr_move(gt_sil, 
                                                 ja_verts, 
                                                 vert_norms,
                                                 proj_sil_ja)
        achr_posi = get_anchor_posi(achr_verts)

        # save sil
        compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
        compose_sil = np.moveaxis(compose_sil, 0, 2)
        compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
        compose_sil.save(test_dir + "sil/%08d.png" % test_id)

        # save para
        proc_para['end_pt'] = proc_para['end_pt'].tolist()
        proc_para['start_pt'] = proc_para['start_pt'].tolist()
        para = {"verts": verts.tolist(),
                "vert_norms": vert_norms.tolist(),
                "proc_para": proc_para,
                "joint_move": joint_move.tolist(),
                "joint_posi": joint_posi.tolist(),
                "achr_move": achr_move.tolist(),
                "achr_posi": achr_posi.tolist(),
                "img_file": img_file,
               }
        with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp:
            json.dump(para, fp)
    
        take_notes("MPII %05d TEST %08d\n" % (i+10425, test_id), 
                   "./data_log.txt")
        test_id += 1
        count_work += 1
        
    print("work ratio = %f, (%d / %d)" 
          % (count_work/count_all, count_work, count_all))
    return train_id, test_id
コード例 #4
0
    for j in range(len(point_list[i])):
        j_p_list.append(verts[point_list[i][j]])
    j_list.append(sum(j_p_list) / len(j_p_list))

new_jv = []
ori_jv = []
for i in range(len(j_list)):
    # make new joint verts
    for j in point_list[i]:
        new_jv.append(verts[j] + joint_para[i])
        ori_jv.append(verts[j])
new_jv = np.array(new_jv)
ori_jv = np.array(ori_jv)

# joint deform
fd_ja = fast_deform_dja(weight=10.0)
ja_verts = fd_ja.deform(np.asarray(verts), new_jv)

print("done")

# ==============================predict anchor==============================
print("anchor deform......", end='')

# make src_a
proj_sil_j = my_renderer.silhouette(verts=ja_verts)
src_sil_j = np.zeros((224, 224, 2))
src_a = np.zeros((200, 4, 32, 32))

# make anchor posi
anchor_verts = np.zeros((200, 3))
for i in range(achr_num):
コード例 #5
0
def proc_lspet(train_dir, train_id, lspet_dir, upi_dir):
    
    faces = np.load("../predef/smpl_faces.npy")
    face_num = len(faces)

    hmr_pred = hmr_predictor()
    renderer = rd.SMPLRenderer(face_path = 
                               "../predef/smpl_faces.npy")
    
    lspet_joints = loadmat(lspet_dir + "joints.mat")['joints']
    # roll axis because the definition order of lspet is different from lsp
    lspet_joints = np.rollaxis(lspet_joints,1,0)

    with open ('../predef/mesh_joint_list.pkl', 'rb') as fp:
        mesh_joint = pickle.load(fp)
    
    count_all = 0.
    count_work = 0.
        
    # make train set
    tr = trange(10000, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("LSPET - train part")
        tr.refresh() # to show immediately the update
        sleep(0.01)
              
        count_all += 1
        
        # judge if sil file exists, if not, skip it
        if not os.path.isfile(upi_dir + \
                              "lsp_extended/im%05d_segmentation.png"%(i+1)):
            take_notes("LSPET %05d BAN -1\n" % (i+1), "./data_log.txt")
            continue
        
        
        # read sil
        src_gt_sil = np.array(PIL.Image.open(upi_dir + \
                     "lsp_extended/im%05d_segmentation.png"%(i+1)))[:,:,0]

        # judge using filter
        result = lspet_filter(lspet_joints[:,:,i], src_gt_sil)
        if result is False:
            take_notes("LSPET %05d BAN -1\n" % (i+1), "./data_log.txt")
            continue
            
        # read ori img
        ori_img = np.array(PIL.Image.open(
                  lspet_dir + "images/im%05d.jpg"%(i+1)))
        
        # hmr predict
        verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, 
                                                          True, 
                                                          src_gt_sil)
                
        # unnormalize std_img
        src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8)
        
        # save img
        img_file = "img/LSPET_%08d.png" % (i + 1)
        PIL.Image.fromarray(src_img).save(train_dir + img_file)
        
        # process sil
        gt_sil = proc_sil(src_gt_sil, proc_para)
        
        # get proj sil
        proj_sil = renderer.silhouette(verts = verts,
                                       cam = cam,
                                       img_size = src_img.shape,
                                       norm = False)

        # make TriMesh
        mesh = make_trimesh(verts, faces, compute_vn = True)
        vert_norms = mesh.vertex_normals()

        # get joint move
        new_jv, _, joint_move, joint_posi = get_joint_move(verts, 
                                               lspet_joints[:,:,i], 
                                               proc_para,
                                               mesh_joint,
                                               unseen_mode = True)
        joint_move = joint_move.flatten()

        # joint deform
        fd_ja = fast_deform_dja(weight = 10.0)
        ja_verts = fd_ja.deform(np.asarray(verts), new_jv)
        
        
        # get achr move
        proj_sil_ja = renderer.silhouette(verts = ja_verts,
                                       norm = False)
        _, achr_verts, achr_move = get_achr_move(gt_sil, 
                                                 ja_verts, 
                                                 vert_norms,
                                                 proj_sil_ja)
        achr_posi = get_anchor_posi(achr_verts)
        
        # save sil
        compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
        compose_sil = np.moveaxis(compose_sil, 0, 2)
        compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
        compose_sil.save(train_dir + "sil/%08d.png" % train_id)

        # save para
        proc_para['end_pt'] = proc_para['end_pt'].tolist()
        proc_para['start_pt'] = proc_para['start_pt'].tolist()
        para = {"verts": verts.tolist(),
                "vert_norms": vert_norms.tolist(),
                "proc_para": proc_para,
                "joint_move": joint_move.tolist(),
                "joint_posi": joint_posi.tolist(),
                "achr_move": achr_move.tolist(),
                "achr_posi": achr_posi.tolist(),
                "img_file": img_file,
               }
        with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp:
            json.dump(para, fp)
        
        take_notes("LSPET %05d TRAIN %08d\n" % (i+1, train_id),
                   "./data_log.txt")
        train_id += 1
        count_work += 1
        
    print("work ratio = %f, (%d / %d)" 
          % (count_work/count_all, count_work, count_all))
    return train_id
コード例 #6
0
ファイル: H36M.py プロジェクト: zhuhao-nju/hmd
def proc_h36m(train_dir, test_dir, train_id, test_id, h36m_dir):

    sample_interval = 10

    faces = np.load("../predef/smpl_faces.npy")
    face_num = len(faces)

    with open('../predef/mesh_joint_list.pkl', 'rb') as fp:
        mesh_joint = pickle.load(fp)

    hmr_pred = hmr_predictor()
    renderer = rd.SMPLRenderer(face_path="../predef/smpl_faces.npy")

    # open available video list
    with open("./h36m_list.txt") as f:
        h36m_list = f.read().split("\r\n")
    vid_num = int(h36m_list[0])
    h36m_list = [[
        h36m_list[i * 3 + 1], h36m_list[i * 3 + 2], h36m_list[i * 3 + 3]
    ] for i in range(vid_num)]

    # compute data number for training and testing
    train_num = int(vid_num * 0.8)
    test_num = vid_num - train_num

    count_all = 0.
    count_work = 0.

    # make test set
    tr = trange(test_num, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("H36M - test part")
        tr.refresh()  # to show immediately the update
        sleep(0.01)

        vid_idx = i + train_num

        # read video of image, silhouette and pose
        vid_img = cv2.VideoCapture(h36m_dir + h36m_list[vid_idx][0])
        vid_sil = cv2.VideoCapture(h36m_dir + h36m_list[vid_idx][1])
        pose_list = loadmat(h36m_dir + h36m_list[vid_idx][2])['pose']
        vid_len = min(int(vid_img.get(cv2.CAP_PROP_FRAME_COUNT)),
                      int(vid_sil.get(cv2.CAP_PROP_FRAME_COUNT)),
                      len(pose_list))

        for frm_idx in range(0, vid_len, sample_interval):

            count_all += 1

            # read sil
            vid_sil.set(1, frm_idx)
            _, src_gt_sil = vid_sil.read()
            src_gt_sil[src_gt_sil < 128] = 0
            src_gt_sil[src_gt_sil >= 128] = 255
            src_gt_sil = refine_sil(src_gt_sil, 100)
            src_gt_sil = src_gt_sil[:, :, 0]

            # read ori img
            vid_img.set(1, frm_idx)
            _, ori_img = vid_img.read()
            # BGR to RGB
            ori_img = np.stack(
                (ori_img[:, :, 2], ori_img[:, :, 1], ori_img[:, :, 0]), axis=2)

            # hmr predict
            verts, cam, proc_para, std_img = hmr_pred.predict(
                ori_img, True, src_gt_sil)

            # unnormalize std_img
            src_img = ((std_img + 1).astype(np.float) / 2.0 * 255).astype(
                np.uint8)

            # save img
            img_file = "img/H36M_%04d%04d.png" % (vid_idx, frm_idx)
            PIL.Image.fromarray(src_img).save(test_dir + img_file)

            # process sil
            gt_sil = proc_sil(src_gt_sil, proc_para)

            # get proj sil
            proj_sil = renderer.silhouette(verts=verts,
                                           cam=cam,
                                           img_size=src_img.shape,
                                           norm=False)

            # make TriMesh
            mesh = make_trimesh(verts, faces, compute_vn=True)
            vert_norms = mesh.vertex_normals()

            h36m_joint = transform_h36m_joints(pose_list[frm_idx])
            # get joint move
            new_jv, _, joint_move, joint_posi = get_joint_move(
                verts, h36m_joint, proc_para, mesh_joint)
            joint_move = joint_move.flatten()

            # joint deform
            fd_ja = fast_deform_dja(weight=10.0)
            ja_verts = fd_ja.deform(np.asarray(verts), new_jv)

            # get achr move
            proj_sil_ja = renderer.silhouette(verts=ja_verts, norm=False)
            _, achr_verts, achr_move = get_achr_move(gt_sil, ja_verts,
                                                     vert_norms, proj_sil_ja)
            achr_posi = get_anchor_posi(achr_verts)

            # save sil
            compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
            compose_sil = np.moveaxis(compose_sil, 0, 2)
            compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
            compose_sil.save(test_dir + "sil/%08d.png" % test_id)

            # save para
            proc_para['end_pt'] = proc_para['end_pt'].tolist()
            proc_para['start_pt'] = proc_para['start_pt'].tolist()
            para = {
                "verts": verts.tolist(),
                "vert_norms": vert_norms.tolist(),
                "proc_para": proc_para,
                "joint_move": joint_move.tolist(),
                "joint_posi": joint_posi.tolist(),
                "achr_move": achr_move.tolist(),
                "achr_posi": achr_posi.tolist(),
                "img_file": img_file,
            }
            with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp:
                json.dump(para, fp)

            take_notes(
                "H36M %04d%04d TEST %08d\n" % (vid_idx, frm_idx, test_id),
                "./data_log.txt")
            test_id += 1
            count_work += 1

    print("work ratio = %f, (%d / %d)" %
          (count_work / count_all, count_work, count_all))
    return train_id, test_id