예제 #1
0
def render_hmr_smpl(hmr_coeff, f_len=500., rend_size=224., req_model=False):
    # hmr_coeff is a 85-vector, named as theta in hmr
    hmr_coeff = np.asarray(hmr_coeff).tolist()

    # make scene
    scene = pyrender.Scene()

    # initialize camera
    camera = pyrender.PerspectiveCamera(
        yfov=np.arctan(rend_size * 0.5 / f_len) * 2, aspectRatio=1)
    camera_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0],
                            [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
    scene.add(camera, pose=camera_pose)

    # initialize light
    light_posi1 = np.array([[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, -1.0],
                            [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]])
    light_posi2 = np.array([[1.0, 0.0, 0.0, -1.0], [0.0, 0.0, 1.0, -1.0],
                            [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]])
    light_posi3 = np.array([[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0],
                            [0.0, 1.0, 0.0, -2.0], [0.0, 0.0, 0.0, 1.0]])
    light = pyrender.SpotLight(color=np.array(
        [0.65098039, 0.74117647, 0.85882353]),
                               intensity=100,
                               innerConeAngle=np.pi / 16.0,
                               outerConeAngle=np.pi / 6.0)
    scene.add(light, pose=light_posi1)
    scene.add(light, pose=light_posi2)
    scene.add(light, pose=light_posi3)

    # get renderer
    r = pyrender.OffscreenRenderer(viewport_width=rend_size,
                                   viewport_height=rend_size)

    # get verts from smpl coefficients
    smpl_op = load_model("./tf_smpl/neutral_smpl_with_cocoplus_reg.pkl")
    smpl_op.pose[:] = np.asarray(hmr_coeff[3:75])
    smpl_op.betas[:] = np.array(hmr_coeff[75:85])
    verts = np.array(smpl_op)
    global_t = np.array(
        [hmr_coeff[1], hmr_coeff[2], f_len / (0.5 * rend_size * hmr_coeff[0])])
    verts = verts + global_t
    faces = np.load("./tf_smpl/smpl_faces.npy").astype(np.int32)

    # smooth and expand
    om_mesh = make_trimesh(verts, faces)
    om_mesh = smooth_mesh(om_mesh, 4)
    om_mesh = expand_mesh(om_mesh, 0.026)

    this_trimesh = trimesh.Trimesh(vertices=om_mesh.points(),
                                   faces=om_mesh.face_vertex_indices())
    this_mesh = pyrender.Mesh.from_trimesh(this_trimesh)

    scene.add(this_mesh)
    rend_img, depth = r.render(scene)

    if req_model is True:
        return rend_img, verts, faces, depth
    else:
        return rend_img
예제 #2
0
def proc_coco(train_dir, test_dir, train_id, test_id, coco_dataset):

    # read dataset
    coco = COCO(coco_dataset)
    tupleIds = coco.getImgIds(catIds=1)  # id = 1 means person

    faces = np.load("../predef/smpl_faces.npy")
    face_num = len(faces)

    hmr_pred = hmr_predictor()
    renderer = rd.SMPLRenderer(face_path="../predef/smpl_faces.npy")

    with open('../predef/mesh_joint_list.pkl', 'rb') as fp:
        mesh_joint = pickle.load(fp)

    count_all = 0.
    count_work = 0.

    total_num = len(tupleIds)
    train_num = int(np.floor(total_num * 0.8))

    # make train set
    tr = trange(3584, train_num, desc='Bar desc', leave=True)
    # for i in tr:
    #     tr.set_description("COCO - train part")
    #     tr.refresh() # to show immediately the update
    #     sleep(0.01)

    #     count_all += 1

    #     # get tuple
    #     one_tuple = coco.loadImgs(tupleIds[i])[0]
    #     img_size = (one_tuple['height'], one_tuple['width'])
    #     crt_id = one_tuple['id']

    #     # get anns
    #     annIds = coco.getAnnIds(imgIds=one_tuple['id'], catIds=1, iscrowd=None)
    #     anns = coco.loadAnns(annIds)

    #     # RULE 1: objects < 5
    #     if len(anns)>4:
    #         #print("filter out by too many objects")
    #         take_notes("COCO %05d BAN -1\n" % (i), "./data_log.txt")
    #         continue

    #     for j in range(len(anns)):

    #         # get sil points
    #         seg_points = anns[j]['segmentation'][0]

    #         # RULE 2: seg_points number >= 80
    #         if len(seg_points)<80:
    #             take_notes("COCO %05d%03d BAN -1\n" % (i, j), "./data_log.txt")
    #             #print("filter out by too few seg_points number")
    #             continue

    #         # get key points
    #         key_points = anns[j]['keypoints']
    #         key_points = np.resize(key_points,(17,3))

    #         # draw sil
    #         sil = points2sil(seg_points, img_size)

    #         result = coco_filter(key_points, sil)

    #         if result is False:
    #             take_notes("COCO %05d BAN -1\n" % (i), "./data_log.txt")
    #             continue
    #         # Here we finally decide to use it
    #         if result is True:
    #             # read image
    #             print(one_tuple['coco_url'])
    #             ori_img = io.imread(one_tuple['coco_url'])

    #             # read sil
    #             src_gt_sil = sil

    #             # hmr predict
    #             verts, cam, proc_para, std_img = hmr_pred.predict(ori_img,
    #                                                               True,
    #                                                               src_gt_sil)

    #             # unnormalize std_img
    #             src_img = ((std_img+1)/2.0*255).astype(np.uint8)

    #             # save img
    #             img_file = "img/COCO_%08d%02d.png" % (crt_id, j)
    #             PIL.Image.fromarray(src_img).save(train_dir + img_file)

    #             # process sil
    #             gt_sil = proc_sil(src_gt_sil, proc_para)

    #             # get proj sil
    #             proj_sil = renderer.silhouette(verts = verts,
    #                                            cam = cam,
    #                                            img_size = src_img.shape,
    #                                            norm = False)

    #             # make TriMesh
    #             mesh = make_trimesh(verts, faces, compute_vn = True)
    #             vert_norms = mesh.vertex_normals()

    #             # get joint move
    #             coco_joints_t = transform_coco_joints(key_points)
    #             new_jv, _, joint_move, joint_posi = get_joint_move(verts,
    #                                                    coco_joints_t,
    #                                                    proc_para,
    #                                                    mesh_joint,
    #                                                    unseen_mode = True,
    #                                                   )
    #             joint_move = joint_move.flatten()

    #             # joint deform
    #             fd_ja = fast_deform_dja(weight = 10.0)
    #             ja_verts = fd_ja.deform(np.asarray(verts), new_jv)

    #             # get achr move
    #             proj_sil_ja = renderer.silhouette(verts = ja_verts,
    #                                            norm = False)
    #             _, achr_verts, achr_move = get_achr_move(gt_sil,
    #                                                      ja_verts,
    #                                                      vert_norms,
    #                                                      proj_sil_ja)
    #             achr_posi = get_anchor_posi(achr_verts)

    #             # save sil
    #             compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
    #             compose_sil = np.moveaxis(compose_sil, 0, 2)
    #             compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
    #             compose_sil.save(train_dir + "sil/%08d.png" % train_id)

    #             # save para
    #             proc_para['end_pt'] = proc_para['end_pt'].tolist()
    #             proc_para['start_pt'] = proc_para['start_pt'].tolist()
    #             para = {"verts": verts.tolist(),
    #                     "vert_norms": vert_norms.tolist(),
    #                     "proc_para": proc_para,
    #                     "joint_move": joint_move.tolist(),
    #                     "joint_posi": joint_posi.tolist(),
    #                     "achr_move": achr_move.tolist(),
    #                     "achr_posi": achr_posi.tolist(),
    #                     "img_file": img_file,
    #                    }
    #             with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp:
    #                 json.dump(para, fp)

    #             take_notes("COCO %05d%03d TRAIN %08d\n" % (i, j, train_id),
    #                        "./data_log.txt")
    #             train_id += 1
    #             count_work += 1

    # make test set
    test_num = total_num - train_num
    tr = trange(1855, test_num, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("COCO - test part")
        tr.refresh()  # to show immediately the update
        sleep(0.01)

        count_all += 1

        # get tuple
        one_tuple = coco.loadImgs(tupleIds[i + train_num])[0]
        img_size = (one_tuple['height'], one_tuple['width'])
        crt_id = one_tuple['id']

        # get anns
        annIds = coco.getAnnIds(imgIds=one_tuple['id'], catIds=1, iscrowd=None)
        anns = coco.loadAnns(annIds)

        # RULE 1: objects < 4
        if len(anns) > 3:
            #print("filter out by too many objects")
            take_notes("COCO %05d BAN -1\n" % (i + train_num),
                       "./data_log.txt")
            continue

        for j in range(len(anns)):

            # get sil points
            seg_points = anns[j]['segmentation'][0]

            # RULE 2: seg_points number >= 100
            if len(seg_points) < 100:
                take_notes("COCO %05d%03d BAN -1\n" % (i + train_num, j),
                           "./data_log.txt")
                #print("filter out by too few seg_points number")
                continue

            # get key points
            key_points = anns[j]['keypoints']
            key_points = np.resize(key_points, (17, 3))

            # draw sil
            sil = points2sil(seg_points, img_size)

            result = coco_filter(key_points, sil)

            if result is False:
                take_notes("COCO %05d BAN -1\n" % (i + train_num),
                           "./data_log.txt")
                continue
            # Here we finally decide to use it
            if result is True:
                # read image
                ori_img = io.imread(one_tuple['coco_url'])

                # read sil
                src_gt_sil = sil

                # hmr predict
                verts, cam, proc_para, std_img = hmr_pred.predict(
                    ori_img, True, src_gt_sil)

                # unnormalize std_img
                src_img = ((std_img + 1) / 2.0 * 255).astype(np.uint8)

                # save img
                img_file = "img/COCO_%08d%02d.png" % (crt_id, j)
                PIL.Image.fromarray(src_img).save(test_dir + img_file)

                # process sil
                gt_sil = proc_sil(src_gt_sil, proc_para)

                # get proj sil
                proj_sil = renderer.silhouette(verts=verts,
                                               cam=cam,
                                               img_size=src_img.shape,
                                               norm=False)

                # make TriMesh
                mesh = make_trimesh(verts, faces, compute_vn=True)
                vert_norms = mesh.vertex_normals()

                # get joint move
                coco_joints_t = transform_coco_joints(key_points)
                new_jv, _, joint_move, joint_posi = get_joint_move(
                    verts,
                    coco_joints_t,
                    proc_para,
                    mesh_joint,
                    unseen_mode=True,
                )
                joint_move = joint_move.flatten()

                # joint deform
                fd_ja = fast_deform_dja(weight=10.0)
                ja_verts = fd_ja.deform(np.asarray(verts), new_jv)

                # get achr move
                proj_sil_ja = renderer.silhouette(verts=ja_verts, norm=False)
                _, achr_verts, achr_move = get_achr_move(
                    gt_sil, ja_verts, vert_norms, proj_sil_ja)
                achr_posi = get_anchor_posi(achr_verts)

                # save sil
                compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
                compose_sil = np.moveaxis(compose_sil, 0, 2)
                compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
                compose_sil.save(test_dir + "sil/%08d.png" % test_id)

                # save para
                proc_para['end_pt'] = proc_para['end_pt'].tolist()
                proc_para['start_pt'] = proc_para['start_pt'].tolist()
                para = {
                    "verts": verts.tolist(),
                    "vert_norms": vert_norms.tolist(),
                    "proc_para": proc_para,
                    "joint_move": joint_move.tolist(),
                    "joint_posi": joint_posi.tolist(),
                    "achr_move": achr_move.tolist(),
                    "achr_posi": achr_posi.tolist(),
                    "img_file": img_file,
                }
                with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp:
                    json.dump(para, fp)

                take_notes(
                    "COCO %05d%03d TEST %08d\n" % (i + train_num, j, test_id),
                    "./data_log.txt")
                test_id += 1
                count_work += 1

    print("work ratio = %f, (%d / %d)" %
          (count_work / count_all, count_work, count_all))
    return train_id, test_id
예제 #3
0
파일: MPII.py 프로젝트: zhuhao-nju/hmd
def proc_mpii(train_dir, test_dir, train_id, test_id, upi_dir):
    mpii_dir = upi_dir + "data/mpii/"
    faces = np.load("../predef/smpl_faces.npy")
    face_num = len(faces)
    
    hmr_pred = hmr_predictor()
    renderer = rd.SMPLRenderer(face_path = "../predef/smpl_faces.npy")
    
    mpii_joints_o = np.load(mpii_dir + "poses.npz")['poses']
    mpii_joints = transform_mpii_joints(mpii_joints_o)
    with open ('../predef/mesh_joint_list.pkl', 'rb') as fp:
        mesh_joint = pickle.load(fp)
    
    count_all = 0.
    count_work = 0.
        
    # make train set
    tr = trange(10424, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("MPII - train part")
        tr.refresh() # to show immediately the update
        sleep(0.01)
        
        count_all += 1
        
        # read sil
        src_gt_sil = np.array(PIL.Image.open(mpii_dir + \
                     "images/%05d_segmentation.png"%(i+1)))[:,:,0]
        
        # judge using filter
        result = mpii_filter(mpii_joints[:,:,i], src_gt_sil)
        if result is False:
            take_notes("MPII %05d BAN -1\n" % (i+1), "./data_log.txt")
            continue        
        
        # read ori img
        ori_img = np.array(PIL.Image.open(
                  mpii_dir + "images/%05d.png"%(i+1)))

        # hmr predict
        verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, 
                                                          True, 
                                                          src_gt_sil)
                
        # unnormalize std_img
        src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8)
        
        # save img
        img_file = "img/MPII_%08d.png" % (i + 1)
        PIL.Image.fromarray(src_img).save(train_dir + img_file)
        
        # process sil
        gt_sil = proc_sil(src_gt_sil, proc_para)
        
        # get proj sil
        proj_sil = renderer.silhouette(verts = verts,
                                       cam = cam,
                                       img_size = src_img.shape,
                                       norm = False)

        # make TriMesh
        mesh = make_trimesh(verts, faces, compute_vn = True)
        vert_norms = mesh.vertex_normals()

        # get joint move
        new_jv, _, joint_move, joint_posi = get_joint_move(verts, 
                                               mpii_joints[:,:,i], 
                                               proc_para,
                                               mesh_joint)
        joint_move = joint_move.flatten()

        # joint deform
        fd_ja = fast_deform_dja(weight = 10.0)
        ja_verts = fd_ja.deform(np.asarray(verts), new_jv)
        
        
        # get achr move
        proj_sil_ja = renderer.silhouette(verts = ja_verts,
                                       norm = False)
        _, achr_verts, achr_move = get_achr_move(gt_sil, 
                                                 ja_verts, 
                                                 vert_norms,
                                                 proj_sil_ja)
        achr_posi = get_anchor_posi(achr_verts)

        # save sil
        compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
        compose_sil = np.moveaxis(compose_sil, 0, 2)
        compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
        compose_sil.save(train_dir + "sil/%08d.png" % train_id)

        # save para
        proc_para['end_pt'] = proc_para['end_pt'].tolist()
        proc_para['start_pt'] = proc_para['start_pt'].tolist()
        para = {"verts": verts.tolist(),
                "vert_norms": vert_norms.tolist(),
                "proc_para": proc_para,
                "joint_move": joint_move.tolist(),
                "joint_posi": joint_posi.tolist(),
                "achr_move": achr_move.tolist(),
                "achr_posi": achr_posi.tolist(),
                "img_file": img_file,
               }
        with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp:
            json.dump(para, fp)
    
        take_notes("MPII %05d TRAIN %08d\n" % (i+1, train_id), 
                   "./data_log.txt")
        train_id += 1
        count_work += 1
    
    #make test set
    tr = trange(2606, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("MPII - test part")
        tr.refresh() # to show immediately the update
        sleep(0.01)

        count_all += 1
        
        # read sil
        src_gt_sil = np.array(PIL.Image.open(mpii_dir + \
                     "images/%05d_segmentation.png"%(i+10425)))[:,:,0]
        
        # judge using filter
        result = mpii_filter(mpii_joints[:,:,i+10424], src_gt_sil)
        if result is False:
            take_notes("MPII %05d BAN -1\n" % (i+10425), "./data_log.txt")
            continue        
        
        # read ori img
        ori_img = np.array(PIL.Image.open(
                  mpii_dir + "images/%05d.png"%(i+10425)))
        
        # hmr predict
        verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, 
                                                          True, 
                                                          src_gt_sil)
                
        # unnormalize std_img
        src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8)
        
        # save img
        img_file = "img/MPII_%08d.png" % (i+10425)
        PIL.Image.fromarray(src_img).save(test_dir + img_file)
        
        # process sil
        gt_sil = proc_sil(src_gt_sil, proc_para)
        
        # get proj sil
        proj_sil = renderer.silhouette(verts = verts,
                                       cam = cam,
                                       img_size = src_img.shape,
                                       norm = False)

        # make TriMesh
        mesh = make_trimesh(verts, faces, compute_vn = True)
        vert_norms = mesh.vertex_normals()

        # get joint move
        new_jv, _, joint_move, joint_posi = get_joint_move(verts, 
                                               mpii_joints[:,:,i+10424], 
                                               proc_para,
                                               mesh_joint)
        joint_move = joint_move.flatten()

        # joint deform
        fd_ja = fast_deform_dja(weight = 10.0)
        ja_verts = fd_ja.deform(np.asarray(verts), new_jv)
        
        
        # get achr move
        proj_sil_ja = renderer.silhouette(verts = ja_verts,
                                       norm = False)
        _, achr_verts, achr_move = get_achr_move(gt_sil, 
                                                 ja_verts, 
                                                 vert_norms,
                                                 proj_sil_ja)
        achr_posi = get_anchor_posi(achr_verts)

        # save sil
        compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
        compose_sil = np.moveaxis(compose_sil, 0, 2)
        compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
        compose_sil.save(test_dir + "sil/%08d.png" % test_id)

        # save para
        proc_para['end_pt'] = proc_para['end_pt'].tolist()
        proc_para['start_pt'] = proc_para['start_pt'].tolist()
        para = {"verts": verts.tolist(),
                "vert_norms": vert_norms.tolist(),
                "proc_para": proc_para,
                "joint_move": joint_move.tolist(),
                "joint_posi": joint_posi.tolist(),
                "achr_move": achr_move.tolist(),
                "achr_posi": achr_posi.tolist(),
                "img_file": img_file,
               }
        with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp:
            json.dump(para, fp)
    
        take_notes("MPII %05d TEST %08d\n" % (i+10425, test_id), 
                   "./data_log.txt")
        test_id += 1
        count_work += 1
        
    print("work ratio = %f, (%d / %d)" 
          % (count_work/count_all, count_work, count_all))
    return train_id, test_id
예제 #4
0
if opt.gpu is True:
    torch.cuda.empty_cache()

# prepare shading refine
device = torch.device("cuda:0" if opt.gpu else "cpu")

net_shading = shading_net(init_weights=True).eval().to(device)
if opt.gpu is True:
    net_shading.load_state_dict(
        torch.load(shading_model,
                   map_location='cuda:0'))  # load pretrained model
else:
    net_shading.load_state_dict(torch.load(shading_model, map_location='cpu'))

faces_smpl = np.load("../predef/smpl_faces.npy")
mesh = make_trimesh(sa_verts, faces_smpl)  # mesh after adjust joint and anchor

proj_sil = my_renderer.silhouette(verts=mesh.points())

proj_sil_l = cv2.resize(proj_sil, dsize=(448, 448))
proj_sil_l[proj_sil_l < 0.5] = 0
proj_sil_l[proj_sil_l >= 0.5] = 1
# img = PIL.Image.fromarray(proj_sil_l)
# img.show()
# load data
src_img_l = cv2.resize(src_img, dsize=(448, 448))
# img = PIL.Image.fromarray(src_img_l)
# img.show()
input_arr = np.rollaxis(src_img_l, 2, 0)
input_arr = np.expand_dims(input_arr, 0)
input_arr = input_arr / 255.0
예제 #5
0
def proc_lspet(train_dir, train_id, lspet_dir, upi_dir):
    
    faces = np.load("../predef/smpl_faces.npy")
    face_num = len(faces)

    hmr_pred = hmr_predictor()
    renderer = rd.SMPLRenderer(face_path = 
                               "../predef/smpl_faces.npy")
    
    lspet_joints = loadmat(lspet_dir + "joints.mat")['joints']
    # roll axis because the definition order of lspet is different from lsp
    lspet_joints = np.rollaxis(lspet_joints,1,0)

    with open ('../predef/mesh_joint_list.pkl', 'rb') as fp:
        mesh_joint = pickle.load(fp)
    
    count_all = 0.
    count_work = 0.
        
    # make train set
    tr = trange(10000, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("LSPET - train part")
        tr.refresh() # to show immediately the update
        sleep(0.01)
              
        count_all += 1
        
        # judge if sil file exists, if not, skip it
        if not os.path.isfile(upi_dir + \
                              "lsp_extended/im%05d_segmentation.png"%(i+1)):
            take_notes("LSPET %05d BAN -1\n" % (i+1), "./data_log.txt")
            continue
        
        
        # read sil
        src_gt_sil = np.array(PIL.Image.open(upi_dir + \
                     "lsp_extended/im%05d_segmentation.png"%(i+1)))[:,:,0]

        # judge using filter
        result = lspet_filter(lspet_joints[:,:,i], src_gt_sil)
        if result is False:
            take_notes("LSPET %05d BAN -1\n" % (i+1), "./data_log.txt")
            continue
            
        # read ori img
        ori_img = np.array(PIL.Image.open(
                  lspet_dir + "images/im%05d.jpg"%(i+1)))
        
        # hmr predict
        verts, cam, proc_para, std_img = hmr_pred.predict(ori_img, 
                                                          True, 
                                                          src_gt_sil)
                
        # unnormalize std_img
        src_img = ((std_img+1).astype(np.float)/2.0*255).astype(np.uint8)
        
        # save img
        img_file = "img/LSPET_%08d.png" % (i + 1)
        PIL.Image.fromarray(src_img).save(train_dir + img_file)
        
        # process sil
        gt_sil = proc_sil(src_gt_sil, proc_para)
        
        # get proj sil
        proj_sil = renderer.silhouette(verts = verts,
                                       cam = cam,
                                       img_size = src_img.shape,
                                       norm = False)

        # make TriMesh
        mesh = make_trimesh(verts, faces, compute_vn = True)
        vert_norms = mesh.vertex_normals()

        # get joint move
        new_jv, _, joint_move, joint_posi = get_joint_move(verts, 
                                               lspet_joints[:,:,i], 
                                               proc_para,
                                               mesh_joint,
                                               unseen_mode = True)
        joint_move = joint_move.flatten()

        # joint deform
        fd_ja = fast_deform_dja(weight = 10.0)
        ja_verts = fd_ja.deform(np.asarray(verts), new_jv)
        
        
        # get achr move
        proj_sil_ja = renderer.silhouette(verts = ja_verts,
                                       norm = False)
        _, achr_verts, achr_move = get_achr_move(gt_sil, 
                                                 ja_verts, 
                                                 vert_norms,
                                                 proj_sil_ja)
        achr_posi = get_anchor_posi(achr_verts)
        
        # save sil
        compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
        compose_sil = np.moveaxis(compose_sil, 0, 2)
        compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
        compose_sil.save(train_dir + "sil/%08d.png" % train_id)

        # save para
        proc_para['end_pt'] = proc_para['end_pt'].tolist()
        proc_para['start_pt'] = proc_para['start_pt'].tolist()
        para = {"verts": verts.tolist(),
                "vert_norms": vert_norms.tolist(),
                "proc_para": proc_para,
                "joint_move": joint_move.tolist(),
                "joint_posi": joint_posi.tolist(),
                "achr_move": achr_move.tolist(),
                "achr_posi": achr_posi.tolist(),
                "img_file": img_file,
               }
        with open(train_dir + "para/%08d.json" % train_id, 'wb') as fp:
            json.dump(para, fp)
        
        take_notes("LSPET %05d TRAIN %08d\n" % (i+1, train_id),
                   "./data_log.txt")
        train_id += 1
        count_work += 1
        
    print("work ratio = %f, (%d / %d)" 
          % (count_work/count_all, count_work, count_all))
    return train_id
예제 #6
0
# Shading Refine Start
if opt.gpu is True:
    torch.cuda.empty_cache()

# prepare shading refine
device = torch.device("cuda:0" if opt.gpu else "cpu")

net_shading = shading_net(init_weights=True).eval().to(device)
if opt.gpu is True:
    net_shading.load_state_dict(
        torch.load(shading_model, map_location='cuda:0'))
else:
    net_shading.load_state_dict(torch.load(shading_model, map_location='cpu'))

faces_smpl = np.load("../predef/smpl_faces.npy")
mesh = make_trimesh(sa_verts, faces_smpl)

proj_sil = my_renderer.silhouette(verts=mesh.points())

proj_sil_l = cv2.resize(proj_sil, dsize=(448, 448))
proj_sil_l[proj_sil_l < 0.5] = 0
proj_sil_l[proj_sil_l >= 0.5] = 1

# load data
src_img_l = cv2.resize(src_img, dsize=(448, 448))
input_arr = np.rollaxis(src_img_l, 2, 0)
input_arr = np.expand_dims(input_arr, 0)
input_arr = torch.tensor(input_arr).float().to(device)
input_arr = input_arr / 255.0

proj_sil_l = np.expand_dims(proj_sil_l, 0)
예제 #7
0
                visi_vert_inds.append(fv[0])
                visi_vert_inds.append(fv[1])
                visi_vert_inds.append(fv[2])
    visi_vert_inds = set(visi_vert_inds)
    # filter out exempt version
    visi_vert_inds = list(set(visi_vert_inds).difference(exempt_vert_list))

    visi_vert_inds_m = []
    for i in visi_vert_inds:
        xy = cam_para.project(verts[i])
        x = int(round(xy[1]))
        y = int(round(xy[0]))
        if x < 0 or y < 0 or x >= 448 or y >= 448:
            continue
        if np.absolute(proj_depth[x, y] - verts[i, 2]) < 0.01:
            visi_vert_inds_m.append(i)

    for i in visi_vert_inds_m:
        xy = cam_para.project(verts[i])
        x = int(round(xy[1]))
        y = int(round(xy[0]))
        depth = proj_depth[x, y] + pred_depth[x, y]
        #print(depth, verts[i])
        if depth > 8.:
            continue
        verts[i][2] = depth

    deformed_mesh = make_trimesh(verts, faces)
    om.write_mesh("./eval_data/%s_set/pred_save/s_%03d.obj" % \
                  (opt.set, test_num), deformed_mesh)
예제 #8
0
assert opt.set in ["recon", "syn"], \
       "set must be one of [recon, syn]"

data_num = int(opt.num)
output_dir = "./eval_data/%s_set/pred_save/" % opt.set
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

hmr_pred = hmr_predictor()
faces = np.load("../predef/smpl_faces.npy")

tr = trange(data_num, desc='Bar desc', leave=True)
for i in tr:
    src_img = np.array(PIL.Image.open("./eval_data/%s_set/input_masked/%03d_img.png" % \
                                      (opt.set, i)))[:,:,:3]
    verts, cam, proc_para, std_img = hmr_pred.predict(src_img)

    with open(output_dir + "pre_%03d.pkl" % i, 'wb') as fp:
        pickle.dump(
            {
                "verts": verts,
                "cam": cam,
                "proc_para": proc_para,
                "std_img": std_img,
            }, fp)

    mesh = make_trimesh(verts, faces)
    om.write_mesh("./eval_data/%s_set/pred_save/hmr_%03d.obj" % (opt.set, i),
                  mesh)
예제 #9
0
파일: pred_hmd_ja.py 프로젝트: Taye310/hmd
tr = trange(data_num, desc='Bar desc', leave=True)
for test_ind in tr:

    src_img = np.array(PIL.Image.open("./eval_data/%s_set/input_masked/%03d_img.png" \
                                      % (opt.set, test_ind)))

    #verts, cam, proc_para, std_img = hmr_pred.predict(src_img)
    with open ('./eval_data/%s_set/pred_save/pre_%03d.pkl' % \
               (opt.set, test_ind), 'rb') as fp:
        hmr_data = pickle.load(fp)
    verts = hmr_data['verts']
    cam = hmr_data['cam']
    proc_para = hmr_data['proc_para']
    std_img = hmr_data['std_img']

    mesh = make_trimesh(verts, faces, compute_vn=True)
    vert_norms = mesh.vertex_normals()

    # get proj sil
    proj_sil = renderer.silhouette(verts=verts,
                                   cam=cam,
                                   img_size=src_img.shape,
                                   norm=True)

    # make joint posi
    joint_posi = get_joint_posi(verts, point_list)

    sil_j = np.expand_dims(proj_sil, 2)
    src_j = np.zeros((10, 4, 64, 64))
    for i in range(len(joint_posi)):
        crop_sil = center_crop(sil_j, joint_posi[i], 64)
예제 #10
0
if opt.crop_y != -1:
    if len(opt.crop_y) != 2:
        print("ERROR: crop_y must be a list with 2 elements")
    crop_img = crop_img[opt.crop_y[0]:-opt.crop_y[1], :]
if opt.pad>0:
    crop_img = pad_arr(crop_img, 50)
std_img, proc_para = preproc_img(crop_img, img_size = 224, 
                                 margin = 30, normalize = True)

# initialize
hmr_pred = hmr_predictor()
renderer = SMPLRenderer()
faces = np.load("../predef/smpl_faces.npy")

# hmr predict
verts, cam, proc_para, std_img = hmr_pred.predict(std_img, normalize=False)

# build output folder if not exist
if not os.path.exists(opt.outf):
    os.makedirs(opt.outf)

# write results
result_mesh = make_trimesh(verts, faces)
om.write_mesh(opt.outf + "hmr_mesh.obj", result_mesh)

final_img = ((std_img.copy()+1)*127).astype(np.uint8)
PIL.Image.fromarray(final_img).save(opt.outf + "std_img.jpg")

print("%s - finished, results are saved to [%s]" % (opt.img, opt.outf))
print("hmr done")
예제 #11
0
파일: H36M.py 프로젝트: zhuhao-nju/hmd
def proc_h36m(train_dir, test_dir, train_id, test_id, h36m_dir):

    sample_interval = 10

    faces = np.load("../predef/smpl_faces.npy")
    face_num = len(faces)

    with open('../predef/mesh_joint_list.pkl', 'rb') as fp:
        mesh_joint = pickle.load(fp)

    hmr_pred = hmr_predictor()
    renderer = rd.SMPLRenderer(face_path="../predef/smpl_faces.npy")

    # open available video list
    with open("./h36m_list.txt") as f:
        h36m_list = f.read().split("\r\n")
    vid_num = int(h36m_list[0])
    h36m_list = [[
        h36m_list[i * 3 + 1], h36m_list[i * 3 + 2], h36m_list[i * 3 + 3]
    ] for i in range(vid_num)]

    # compute data number for training and testing
    train_num = int(vid_num * 0.8)
    test_num = vid_num - train_num

    count_all = 0.
    count_work = 0.

    # make test set
    tr = trange(test_num, desc='Bar desc', leave=True)
    for i in tr:
        tr.set_description("H36M - test part")
        tr.refresh()  # to show immediately the update
        sleep(0.01)

        vid_idx = i + train_num

        # read video of image, silhouette and pose
        vid_img = cv2.VideoCapture(h36m_dir + h36m_list[vid_idx][0])
        vid_sil = cv2.VideoCapture(h36m_dir + h36m_list[vid_idx][1])
        pose_list = loadmat(h36m_dir + h36m_list[vid_idx][2])['pose']
        vid_len = min(int(vid_img.get(cv2.CAP_PROP_FRAME_COUNT)),
                      int(vid_sil.get(cv2.CAP_PROP_FRAME_COUNT)),
                      len(pose_list))

        for frm_idx in range(0, vid_len, sample_interval):

            count_all += 1

            # read sil
            vid_sil.set(1, frm_idx)
            _, src_gt_sil = vid_sil.read()
            src_gt_sil[src_gt_sil < 128] = 0
            src_gt_sil[src_gt_sil >= 128] = 255
            src_gt_sil = refine_sil(src_gt_sil, 100)
            src_gt_sil = src_gt_sil[:, :, 0]

            # read ori img
            vid_img.set(1, frm_idx)
            _, ori_img = vid_img.read()
            # BGR to RGB
            ori_img = np.stack(
                (ori_img[:, :, 2], ori_img[:, :, 1], ori_img[:, :, 0]), axis=2)

            # hmr predict
            verts, cam, proc_para, std_img = hmr_pred.predict(
                ori_img, True, src_gt_sil)

            # unnormalize std_img
            src_img = ((std_img + 1).astype(np.float) / 2.0 * 255).astype(
                np.uint8)

            # save img
            img_file = "img/H36M_%04d%04d.png" % (vid_idx, frm_idx)
            PIL.Image.fromarray(src_img).save(test_dir + img_file)

            # process sil
            gt_sil = proc_sil(src_gt_sil, proc_para)

            # get proj sil
            proj_sil = renderer.silhouette(verts=verts,
                                           cam=cam,
                                           img_size=src_img.shape,
                                           norm=False)

            # make TriMesh
            mesh = make_trimesh(verts, faces, compute_vn=True)
            vert_norms = mesh.vertex_normals()

            h36m_joint = transform_h36m_joints(pose_list[frm_idx])
            # get joint move
            new_jv, _, joint_move, joint_posi = get_joint_move(
                verts, h36m_joint, proc_para, mesh_joint)
            joint_move = joint_move.flatten()

            # joint deform
            fd_ja = fast_deform_dja(weight=10.0)
            ja_verts = fd_ja.deform(np.asarray(verts), new_jv)

            # get achr move
            proj_sil_ja = renderer.silhouette(verts=ja_verts, norm=False)
            _, achr_verts, achr_move = get_achr_move(gt_sil, ja_verts,
                                                     vert_norms, proj_sil_ja)
            achr_posi = get_anchor_posi(achr_verts)

            # save sil
            compose_sil = np.stack((gt_sil, proj_sil, proj_sil_ja))
            compose_sil = np.moveaxis(compose_sil, 0, 2)
            compose_sil = PIL.Image.fromarray(compose_sil.astype(np.uint8))
            compose_sil.save(test_dir + "sil/%08d.png" % test_id)

            # save para
            proc_para['end_pt'] = proc_para['end_pt'].tolist()
            proc_para['start_pt'] = proc_para['start_pt'].tolist()
            para = {
                "verts": verts.tolist(),
                "vert_norms": vert_norms.tolist(),
                "proc_para": proc_para,
                "joint_move": joint_move.tolist(),
                "joint_posi": joint_posi.tolist(),
                "achr_move": achr_move.tolist(),
                "achr_posi": achr_posi.tolist(),
                "img_file": img_file,
            }
            with open(test_dir + "para/%08d.json" % test_id, 'wb') as fp:
                json.dump(para, fp)

            take_notes(
                "H36M %04d%04d TEST %08d\n" % (vid_idx, frm_idx, test_id),
                "./data_log.txt")
            test_id += 1
            count_work += 1

    print("work ratio = %f, (%d / %d)" %
          (count_work / count_all, count_work, count_all))
    return train_id, test_id