Exemplo n.º 1
0
def h36m_train_extract(dataset_path, out_path, extract_img=False):

    # convert joints to global order
    h36m_idx = [11, 6, 7, 8, 1, 2, 3, 12, 24, 14, 15, 17, 18, 19, 25, 26, 27]
    global_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]

    # structs we use
    imgnames_, scales_, centers_, parts_, Ss_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    # users in validation set
    user_list = [1, 5, 6, 7, 8]

    # go over each user
    for user_i in user_list:
        user_name = 'S%d' % user_i

        # bbox_path = os.path.join(dataset_path, user_name, 'MySegmentsMat', 'ground_truth_bb')
        # path with GT 3D pose
        pose_path = os.path.join(dataset_path, user_name,
                                 'Poses_D3_Positions_mono')
        # path with GT 2D pose
        pose2d_path = os.path.join(dataset_path, user_name,
                                   'Poses_D2_Positions')
        # path with videos
        # vid_path = os.path.join(dataset_path, user_name, 'Videos')

        # go over all the sequences of each user
        seq_list = glob.glob(os.path.join(pose_path, '*.cdf'))
        seq_list.sort()
        for seq_i in seq_list:

            print('processing: {}'.format(seq_i))
            # sequence info
            seq_name = seq_i.split('/')[-1]
            action, camera, _ = seq_name.split('.')
            action = action.replace(' ', '_')
            # irrelevant sequences
            if action == '_ALL':
                continue

            # 3D pose file
            poses_3d = pycdf.CDF(seq_i)['Pose'][0]  #Nx96

            #load 2D pose file
            seq_i_2D_pose = os.path.join(pose2d_path, os.path.basename(seq_i))
            poses_2d = pycdf.CDF(seq_i_2D_pose)['Pose'][0]  #Nx64
            poses_2d = np.reshape(poses_2d, (-1, 32, 2))

            # # bbox file
            # bbox_file = os.path.join(bbox_path, seq_name.replace('cdf', 'mat'))
            # bbox_h5py = h5py.File(bbox_file)

            # video file
            if extract_img:
                vid_file = os.path.join(vid_path,
                                        seq_name.replace('cdf', 'mp4'))
                imgs_path = os.path.join(dataset_path, 'images')
                vidcap = cv2.VideoCapture(vid_file)
                success, image = vidcap.read()

            # go over each frame of the sequence
            for frame_i in range(poses_3d.shape[0]):
                # read video frame
                if extract_img:
                    success, image = vidcap.read()
                    if not success:
                        break

                # check if you can keep this frame
                if frame_i % 5 == 0:
                    # image name
                    imgname = '%s_%s.%s_%06d.jpg' % (user_name, action, camera,
                                                     frame_i + 1)

                    #Read img

                    # read GT 3D pose
                    Sall = np.reshape(poses_3d[frame_i, :],
                                      [-1, 3]) / 1000.  #[32,3]
                    S17 = Sall[h36m_idx]
                    S17 -= S17[0]  # root-centered
                    S24 = np.zeros([24, 4])
                    S24[global_idx, :3] = S17
                    S24[global_idx, 3] = 1

                    # # read GT bounding box
                    # mask = bbox_h5py[bbox_h5py['Masks'][frame_i,0]].value.T
                    # ys, xs = np.where(mask==1)
                    # bbox = np.array([np.min(xs), np.min(ys), np.max(xs)+1, np.max(ys)+1])

                    curPose_2d = poses_2d[frame_i, :]
                    min_pt = np.min(curPose_2d, axis=0)
                    max_pt = np.max(curPose_2d, axis=0)
                    bbox = [min_pt[0], min_pt[1], max_pt[0], max_pt[1]]

                    #Skeleton
                    if False:
                        from renderer import glViewer
                        from renderer import viewer2D
                        image = viewer2D.Vis_Bbox_minmaxPt(
                            image, min_pt, max_pt)
                        viewer2D.ImShow(image, waitTime=1)

                        S17s_ = np.array(S17) * 100
                        skelVis = S17s_.ravel()[:, np.newaxis]

                        glViewer.setSkeleton([skelVis])
                        glViewer.show()
                        continue

                    center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
                    scale = 0.9 * max(bbox[2] - bbox[0],
                                      bbox[3] - bbox[1]) / 200.

                    # store data
                    imgnames_.append(os.path.join('images', imgname))
                    centers_.append(center)
                    scales_.append(scale)
                    Ss_.append(S24)

    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, 'h36m_train.npz')
    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             S=Ss_)
Exemplo n.º 2
0
def exportOursToSpin(cocoPose3DAll, out_path):

    scaleFactor = 1.2


    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_ , shapes_, skel3D_, has_smpl_  = [], [] ,[], []


    # for imgSample in cocoPose3DAll:
    imgNum = len(cocoPose3DAll)
    totalSampleNum = [ len(cocoPose3DAll[imgSample]) for imgSample in cocoPose3DAll ]
    totalSampleNum = sum(totalSampleNum)

    print("\n\n### ImageNum: {}, SampleNum: {} ###".format(imgNum, totalSampleNum))
    # for imgSample in cocoPose3DAll:
    # for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
    for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
        #load image
        imgPathFull = imgSample[0]['imgId']           
        fileName = os.path.basename(imgPathFull)
        fileName_saved = os.path.join(os.path.basename(os.path.dirname(imgPathFull)), fileName) #start from train2014


        for sample in imgSample:

            validJointNum = np.sum(sample['pose2D_validity'][::2])

            if validJointNum<4:
                continue

            if np.isnan(sample['pose3DParam']['camScale']):     
                continue

            gt_skel = np.reshape(sample['pose2D_gt'],(26,-1))       #(26,2) This is from data
            gt_validity = np.reshape(sample['pose2D_validity'],(26,-1))     #(26,2)

            
            # Filtering ########################################################################################################
            if True:
                requiredJoints= [0,1,2, 3,4,5, 6,7,8, 9,10,11]      #In Total26
                if np.min(gt_validity[requiredJoints,0])== False:
                    continue

            min_pt = np.min(gt_skel[gt_validity[:,0]], axis=0)
            max_pt = np.max(gt_skel[gt_validity[:,0]], axis=0)
            # bbox= [ min_pt[0], min_pt[1], max_pt[0], max_pt[1] ]
            bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]


            center = [bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2]
            scale = scaleFactor*max(bbox[2], bbox[3])/200


            #Save data
            imgnames_.append(os.path.join('images',fileName_saved)) 
            openposes_.append(np.zeros([25,3]))       #blank
            centers_.append(center)
            scales_.append(scale)
            has_smpl_.append(1)
            poses_.append(sample['pose3DParam']['pose'])        #(72,)
            shapes_.append(sample['pose3DParam']['shape'])       #(10,)


            #2D keypoints (total26 -> SPIN24)
            poseidx_spin24 = [0,1,2,  3,4,5, 6,7,8,  9,10,11, 19,20,21,22,23] 
            poseidx_total26 =  [0,1,2,  3,4,5,  6,7,8,  9,10,11,  14, 15, 16, 17, 18  ]
            part = np.zeros([24,3])
            part[poseidx_spin24,:2] = gt_skel[poseidx_total26] #(52,)  totalGT26 type
            part[poseidx_spin24,2] = 1*gt_validity[poseidx_total26,0]   
            parts_.append(part)

            #3D joint
            S = np.zeros([24,4])
            S[poseidx_spin24,:3] = sample['pose3D_pred'][poseidx_total26,:]  * 0.001     #Scaling skeleton 3D (currently mm) -> meter
            S[poseidx_spin24,3] = 1
            
            skel3D_.append(S)

            #Debug 2D Visualize
            if False:
                img = cv2.imread( os.path.join( '/run/media/hjoo/disk/data/mpii_human_pose_v1/images',imgnames_[-1]) )
                img = viewer2D.Vis_Skeleton_2D_smplCOCO(gt_skel, pt2d_visibility = gt_validity[:,0], image =img)
                img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
                viewer2D.ImShow(img, waitTime=0)  

            #Debug 3D Visualize smpl_coco
            if False:
                # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
                # data3D_coco_vis *=0.1   #mm to cm
                # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
                # glViewer.show()

                #Debug 3D Visualize, h36m
                data3D_h36m_vis = np.reshape(data3D_h36m, (data3D_h36m.shape[0],-1)).transpose()   #(Dim, F)
                data3D_h36m_vis *=0.001   #meter to cm

                # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
                # data3D_smpl24 *=0.1

                glViewer.setSkeleton( [ data3D_h36m_vis]  ,jointType='smplcoco')
                glViewer.show()


            # keypoints

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format( len(imgnames_)))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    # out_file = os.path.join(out_path, '1031-mpii3D_train_44257_all.npz')
    out_file = os.path.join(out_path, '1031-mpii3D_train_44257_validlimbs.npz')

    np.savez(out_file, imgname=imgnames_,
                       center=centers_,
                       scale=scales_,
                       part=parts_,
                       openpose=openposes_,
                       pose=poses_,

                        shape=shapes_,
                        has_smpl=has_smpl_,
                        S=skel3D_)
Exemplo n.º 3
0
def LoadAllH36mdata_wSMPL_perSeq(out_path):
    # data_dir = '/home/hjoo/data/h36m-fetch/human36m_50fps/'
    # data_dir = '/home/hjoo/data/h36m-fetch/human36m_10fps/'

    list_skel2Ds_h36m = []
    list_skel3Ds_h36m = []
    list_smplPose = []
    list_smplShape = []
    list_openpose = []

    list_imgNames = []
    list_scale = []
    list_center = []

    # list_joint2d_spin24 = []
    # list_joint3d_spin24 = []

    TRAIN_SUBJECTS = [1, 5, 6, 7, 8]
    actionList = [
        "Directions", "Discussion", "Eating", "Greeting", "Phoning", "Photo",
        "Posing", "Purchases", "Sitting", "SittingDown", "Smoking", "Waiting",
        "WalkDog", "Walking", "WalkTogether"
    ]

    subjectList = TRAIN_SUBJECTS

    for subId in subjectList:
        for action in actionList:

            gtPathList = sorted(
                glob.glob('{}/S{}/{}_*/*/gt_poses_coco_smpl.pkl'.format(
                    h36mraw_dir, subId, action)))

            print("S{} - {}: {} files".format(subId, action, len(gtPathList)))

            for gtPath in gtPathList:
                with open(gtPath, 'rb') as f:
                    gt_data = pickle.load(f, encoding='latin1')

                #Get Image List
                imgDir = os.path.dirname(gtPath)
                imgList_original = sorted(
                    glob.glob(os.path.join(imgDir, '*.png')))
                folderLeg = len(h36mraw_dir) + 1
                imgList = [n[folderLeg:] for n in imgList_original]
                data2D_h36m = np.array(gt_data['2d'])  #List -> (N,17,2)
                data3D_h36m = np.array(gt_data['3d'])  #List -> (N,17,3)
                data3D_smplParams_pose = np.array(
                    gt_data['smplParms']['poses_camCoord'])  #List -> (N,72)
                data3D_smplParams_shape = np.array(
                    gt_data['smplParms']['betas'])  #(10,)

                N = data3D_smplParams_pose.shape[0]
                data3D_smplParams_shape = np.repeat(
                    data3D_smplParams_shape[np.newaxis, :], N,
                    axis=0)  #List -> (N,10)

                #Scaling skeleton 3D (currently mm) -> meter
                data3D_h36m *= 0.001
                #optional (centering)
                data3D_h36m = data3D_h36m - data3D_h36m[:, 0:1, :]

                scalelist = []
                centerlist = []
                bboxlist = []
                #Generate BBox
                for i in range(len(data2D_h36m)):
                    min_pt = np.min(data2D_h36m[i], axis=0)
                    max_pt = np.max(data2D_h36m[i], axis=0)
                    bbox = [min_pt[0], min_pt[1], max_pt[0], max_pt[1]]
                    center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
                    scale = scaleFactor * max(bbox[2] - bbox[0],
                                              bbox[3] - bbox[1]) / 200

                    bboxlist.append(bbox)
                    centerlist.append(center)
                    scalelist.append(scale)

                    bDraw = True
                    if bDraw:
                        rawImg = cv2.imread(imgFullPath)

                        # bbox_xyxy = conv_bboxinfo_centerscale_to_bboxXYXY(center, scale)
                        # rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,bbox_xyxy[:2], bbox_xyxy[2:])
                        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
                            rawImg, center, scale,
                            (constants.IMG_RES, constants.IMG_RES))

                        #Visualize image
                        if False:
                            rawImg = viewer2D.Vis_Skeleton_2D_SPIN49(
                                data['keypoint2d'][0][:, :2],
                                pt2d_visibility=data['keypoint2d'][0][:, 2],
                                image=rawImg)
                            viewer2D.ImShow(rawImg, name='rawImg')
                            viewer2D.ImShow(croppedImg, name='croppedImg')

                        b = 0
                        ############### Visualize Mesh ###############
                        camParam_scale = pred_camera_vis[b, 0]
                        camParam_trans = pred_camera_vis[b, 1:]
                        pred_vert_vis = ours_vertices[b].copy()
                        pred_vert_vis = convert_smpl_to_bbox(
                            pred_vert_vis, camParam_scale, camParam_trans)

                        #From cropped space to original
                        pred_vert_vis = convert_bbox_to_oriIm(
                            pred_vert_vis, boxScale_o2n, bboxTopLeft,
                            rawImg.shape[1], rawImg.shape[0])

                        #Generate multi-level BBOx
                        bbox_list = multilvel_bbox_crop_gen(rawImg,
                                                            pred_vert_vis,
                                                            center,
                                                            scale,
                                                            bDebug=False)

                        if False:
                            pred_meshes = {
                                'ver': pred_vert_vis,
                                'f': smpl.faces
                            }
                            glViewer.setMeshData([pred_meshes],
                                                 bComputeNormal=True)

                            # ################ Visualize Skeletons ###############
                            #Vis pred-SMPL joint
                            pred_joints_vis = ours_joints_3d[
                                b, :, :3].copy()  #(N,3)
                            pred_joints_vis = convert_smpl_to_bbox(
                                pred_joints_vis, camParam_scale,
                                camParam_trans)
                            pred_joints_vis = convert_bbox_to_oriIm(
                                pred_joints_vis, boxScale_o2n, bboxTopLeft,
                                rawImg.shape[1], rawImg.shape[0])

                            glViewer.setBackgroundTexture(rawImg)
                            glViewer.setWindowSize(rawImg.shape[1],
                                                   rawImg.shape[0])
                            glViewer.SetOrthoCamera(True)
                            glViewer.show(1)

                assert len(imgList) == len(data2D_h36m)
                assert len(imgList) == len(data3D_h36m)
                assert len(imgList) == len(data3D_smplParams_pose)
                assert len(imgList) == len(data3D_smplParams_shape)
                assert len(imgList) == len(scalelist)
                assert len(imgList) == len(centerlist)
                assert len(imgList) == len(bboxlist)

                list_skel2Ds_h36m.append(data2D_h36m)
                list_skel3Ds_h36m.append(data3D_h36m)
                list_smplPose.append(data3D_smplParams_pose)
                list_smplShape.append(data3D_smplParams_shape)

                list_imgNames += imgList
                list_scale += scalelist
                list_center += centerlist

                blankopenpose = np.zeros([N, 25, 3])
                list_openpose.append(blankopenpose)

                #Debug 2D Visualize
                if True:
                    for idx in range(data2D_h36m.shape[0]):
                        img = cv2.imread(imgList_original[idx])
                        img = viewer2D.Vis_Skeleton_2D_H36m(data2D_h36m[idx],
                                                            image=img)
                        img = viewer2D.Vis_Bbox_minmaxPt(
                            img, bboxlist[idx][:2], bboxlist[idx][2:])
                        viewer2D.ImShow(img)

                #Debug 3D Visualize smpl_coco
                if False:
                    # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_coco_vis *=0.1   #mm to cm
                    # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
                    # glViewer.show()

                    #Debug 3D Visualize, h36m
                    data3D_h36m_vis = np.reshape(
                        data3D_h36m,
                        (data3D_h36m.shape[0], -1)).transpose()  #(Dim, F)
                    data3D_h36m_vis *= 100  #meter to cm

                    # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_smpl24 *=0.1

                    glViewer.setSkeleton([data3D_h36m_vis],
                                         jointType='smplcoco')
                    glViewer.show()

            #     break   #debug
            # break       #debug

    list_skel2Ds_h36m = np.vstack(
        list_skel2Ds_h36m)  #List of (N,17,2) ->  (NM, 17, 2)
    list_skel3Ds_h36m = np.vstack(
        list_skel3Ds_h36m)  #List of (N,17,3) ->  (NM, 17, 3)
    list_smplPose = np.vstack(list_smplPose)  #List of (N,72) ->  (NM, 72)
    list_smplShape = np.vstack(list_smplShape)  #List of (N,10) ->  (NM, 10/)
    list_openpose = np.vstack(list_openpose)  #List of (N,10) ->  (NM, 10/)

    assert len(list_imgNames) == list_skel2Ds_h36m.shape[0]
    assert len(list_imgNames) == list_skel3Ds_h36m.shape[0]
    assert len(list_imgNames) == list_smplPose.shape[0]
    assert len(list_imgNames) == list_smplShape.shape[0]
    assert len(list_imgNames) == list_openpose.shape[0]

    assert len(list_imgNames) == len(list_scale)
    assert len(list_imgNames) == len(list_center)

    #Convert H36M -> SPIN24
    # convert joints to global order
    # h36m_idx = [11, 6, 7, 8, 1, 2, 3, 12, 24, 14, 15, 17, 18, 19, 25, 26, 27]
    h36m_idx = [0, 4, 5, 6, 1, 2, 3, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
    global_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]

    sampleNum = len(list_imgNames)
    joint2d_spin24 = np.zeros((sampleNum, 24, 3))
    joint2d_spin24[:, global_idx, :2] = list_skel2Ds_h36m[:, h36m_idx, :]
    joint2d_spin24[:, global_idx, 2] = 1

    joint3d_spin24 = np.zeros((sampleNum, 24, 4))
    joint3d_spin24[:, global_idx, :3] = list_skel3Ds_h36m[:, h36m_idx, :]
    joint3d_spin24[:, global_idx, 3] = 1

    list_has_smpl = np.ones((sampleNum, ), dtype=np.uint8)

    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)

    out_file = os.path.join(out_path, 'h36m_training_fair_meter.npz')
    print("output: {}".format(out_file))
    np.savez(out_file,
             imgname=list_imgNames,
             center=list_center,
             scale=list_scale,
             part=joint2d_spin24,
             pose=list_smplPose,
             shape=list_smplShape,
             has_smpl=list_has_smpl,
             S=joint3d_spin24,
             openpose=list_openpose)
Exemplo n.º 4
0
def exportOursToSpin(out_path):

    scaleFactor = 1.2

    imgDir = '/run/media/hjoo/disk/data/panoptic_mtc/a4_release/hdImgs/'

    with open(
            '/run/media/hjoo/disk/data/panoptic_mtc/a4_release/annotation.pkl',
            'rb') as f:
        data = pickle.load(f)

    with open(
            '/run/media/hjoo/disk/data/panoptic_mtc/a4_release/camera_data.pkl',
            'rb') as f:
        cam = pickle.load(f)

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    rhand_2d_list, rhand_3d_list, lhand_2d_list, lhand_3d_list = [], [], [], []

    for training_testing, mode_data in data.items():
        len_mode = len(mode_data)
        for i, sample in enumerate(tqdm(mode_data)):
            seqName = sample['seqName']
            # print(seqName)
            frame_str = sample['frame_str']
            frame_path = '{}/{}'.format(seqName, frame_str)

            assert 'body' in sample

            if 'right_hand' not in sample or 'left_hand' not in sample:
                continue

            body_landmark = np.array(sample['body']['landmarks']).reshape(
                -1, 3)  #19, 3          #SMC19 order

            rhand_landmark = np.array(
                sample['right_hand']['landmarks']).reshape(
                    -1, 3)  #19, 3          #SMC19 order
            lhand_landmark = np.array(
                sample['left_hand']['landmarks']).reshape(
                    -1, 3)  #19, 3          #SMC19 order

            # randomly project the skeleton to a viewpoint

            for c in range(0, 30):

                if c in [1, 2, 4, 6, 7, 13, 17, 19, 28]:  #Exclude top views
                    continue

                calib_data = cam[seqName][c]

                skeleton_3d_camview = applyExtrinsic(body_landmark,
                                                     calib_data)  #19,3
                rhand_3d_camview = applyExtrinsic(rhand_landmark,
                                                  calib_data)  #21,3
                lhand_3d_camview = applyExtrinsic(lhand_landmark,
                                                  calib_data)  #21,3

                skeleton_2d = project2D(body_landmark, calib_data)  #19,2
                rhand_2d = project2D(rhand_landmark, calib_data)  #19,2
                lhand_2d = project2D(lhand_landmark, calib_data)  #19,2

                imgName = os.path.join(frame_path,
                                       '00_{:02d}_{}.jpg'.format(c, frame_str))

                # print(imgName)
                imgFullPath = os.path.join(imgDir, imgName)
                if os.path.exists(imgFullPath) == False:
                    continue
                # print(imgName)

                #Visulaize 3D
                if False:
                    img = cv2.imread(imgFullPath)
                    # img = viewer2D.Vis_Skeleton_2D_SMC19(skeleton_2d, image=img)
                    # viewer2D.ImShow(img, waitTime=1)

                    skeleton_3d_camview = skeleton_3d_camview.ravel()[:, np.
                                                                      newaxis]
                    rhand_3d_camview = rhand_3d_camview.ravel()[:, np.newaxis]
                    lhand_3d_camview = lhand_3d_camview.ravel()[:, np.newaxis]
                    glViewer.setSkeleton([
                        skeleton_3d_camview, rhand_3d_camview, lhand_3d_camview
                    ])

                    glViewer.setBackgroundTexture(img)
                    glViewer.SetOrthoCamera(True)
                    glViewer.show(0)

                min_pt = np.min(skeleton_2d, axis=0)
                min_pt[0] = max(min_pt[0], 0)
                min_pt[1] = max(min_pt[1], 0)

                max_pt = np.max(skeleton_2d, axis=0)
                max_pt[0] = min(max_pt[0], 1920)
                max_pt[1] = min(max_pt[1], 1080)
                # bbox= [ min_pt[0], min_pt[1], max_pt[0], max_pt[1] ]
                bbox = [
                    min_pt[0], min_pt[1], max_pt[0] - min_pt[0],
                    max_pt[1] - min_pt[1]
                ]

                center = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2]
                scale = scaleFactor * max(bbox[2], bbox[3]) / 200

                #Save data
                # imgnames_.append(os.path.join('train',fileName_saved))
                # imgnames_.append(os.path.join('train',fileName_saved))
                imgnames_.append(imgName)
                openposes_.append(np.zeros([25, 3]))  #blank
                centers_.append(center)
                scales_.append(scale)
                # has_smpl_.append(1)
                # poses_.append(sample['pose3DParam']['pose'])        #(72,)
                # shapes_.append(sample['pose3DParam']['shape'])       #(10,)

                #2D keypoints (total26 -> SPIN24)
                poseidx_spin24 = [
                    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14, 12, 19, 20, 22,
                    21, 23
                ]
                poseidx_smc19 = [
                    14, 13, 12, 6, 7, 8, 11, 10, 9, 3, 4, 5, 2, 0, 1, 15, 16,
                    17, 18
                ]
                part = np.zeros([24, 3])
                part[poseidx_spin24, :2] = skeleton_2d[
                    poseidx_smc19]  #(52,)  totalGT26 type
                part[poseidx_spin24, 2] = 1
                parts_.append(part)

                #3D joint
                S = np.zeros([24, 4])
                S[poseidx_spin24, :3] = skeleton_3d_camview[
                    poseidx_smc19, :] * 0.01  #Scaling skeleton 3D (currently cm) -> meter
                S[poseidx_spin24, 3] = 1

                skel3D_.append(S)

                rhand_2d_list.append(rhand_2d)
                rhand_3d_list.append(rhand_3d_camview * 0.01)

                lhand_2d_list.append(lhand_2d)
                lhand_3d_list.append(lhand_3d_camview * 0.01)

                #Add hand joints

                #Debug 2D Visualize
                if False:
                    img = cv2.imread(imgFullPath)
                    # img = cv2.imread( os.path.join( '/run/media/hjoo/disk/data/mpii_human_pose_v1/images',imgnames_[-1]) )
                    img = viewer2D.Vis_Skeleton_2D_SMC19(skeleton_2d,
                                                         image=img)
                    img = viewer2D.Vis_Skeleton_2D_Hand(rhand_2d, image=img)
                    img = viewer2D.Vis_Skeleton_2D_Hand(lhand_2d, image=img)

                    img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
                    viewer2D.ImShow(img, waitTime=0)

                #Debug 3D Visualize smpl_coco
                if False:
                    # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_coco_vis *=0.1   #mm to cm
                    # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
                    # glViewer.show()

                    #Debug 3D Visualize, h36m
                    data3D_h36m_vis = np.reshape(
                        data3D_h36m,
                        (data3D_h36m.shape[0], -1)).transpose()  #(Dim, F)
                    data3D_h36m_vis *= 0.001  #meter to cm

                    # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_smpl24 *=0.1

                    glViewer.setSkeleton([data3D_h36m_vis],
                                         jointType='smplcoco')
                    glViewer.show()

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format(len(imgnames_)))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    # out_file = os.path.join(out_path, '1031-mpii3D_train_44257_all.npz')
    out_file = os.path.join(out_path, 'panopticDB.npz')

    print(f"Save to {out_file}")

    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             openpose=openposes_,
             S=skel3D_,
             rhand_3d=rhand_3d_list,
             rhand_2d=rhand_2d_list,
             lhand_3d=lhand_3d_list,
             lhand_2d=lhand_2d_list)
Exemplo n.º 5
0
def pw3d_extract(dataset_path, out_path):

    pw3d_multicrop_info = {}
    # pw3d_crop_info[ (image_name, pid)]
    # E.g., ('downtown_sitOnStairs_00/image_00000.jpg', 0)
    # return an array with 8 level of bbox (0 face, 7 whole body)
    # bbox_list[0]
    # {'bbox_xyhw': [995.412413595738, 374.69671840965594, 98.54587305319353, 81.94162583240131], 'center': [1044.6853501223347, 415.6675313258566], 'ratio_bbox_over_face': 1.0, 'scale': 0.5912752383191612}

    # scale factor
    scaleFactor = 1.2

    # structs we use
    imgnames_, scales_, centers_, parts_ = [], [], [], []
    poses_, shapes_, genders_ = [], [], []
    multilevel_bboxinfo_ = []
    # get a list of .pkl files in the directory
    dataset_path = os.path.join(dataset_path, 'sequenceFiles', 'test')
    files = [
        os.path.join(dataset_path, f) for f in os.listdir(dataset_path)
        if f.endswith('.pkl')
    ]
    # go through all the .pkl files
    for filename in files:
        with open(filename, 'rb') as f:
            print(f"processing: {filename}")
            data = pickle.load(f, encoding='latin1')
            smpl_pose = data['poses']
            smpl_betas = data['betas']
            poses2d = data['poses2d']  #(N, 3, 18)
            global_poses = data['cam_poses']
            genders = data['genders']
            valid = np.array(data['campose_valid']).astype(np.bool)

            # if False:        #Temporal. To export all 3DPW data
            #     for ii in range(len(valid)):
            #         valid[ii][:] =True

            num_people = len(smpl_pose)
            num_frames = len(smpl_pose[0])
            seq_name = str(data['sequence'])
            img_names = np.array([
                'imageFiles/' + seq_name + '/image_%s.jpg' % str(i).zfill(5)
                for i in range(num_frames)
            ])
            smpl_trans = data['trans']

            # get through all the people in the sequence
            for p_id in range(num_people):

                valid_pose = smpl_pose[p_id][valid[p_id]]
                valid_betas = np.tile(smpl_betas[p_id][:10].reshape(1, -1),
                                      (num_frames, 1))
                valid_betas = valid_betas[valid[p_id]]
                valid_keypoints_2d = poses2d[p_id][valid[p_id]]
                valid_img_names = img_names[valid[p_id]]
                valid_global_poses = global_poses[valid[p_id]]
                valid_smpl_trans = smpl_trans[p_id][valid[p_id]]

                gender = genders[p_id]

                assert (gender == 'm')

                # consider only valid frames
                for valid_i in tqdm(range(valid_pose.shape[0])):
                    part = valid_keypoints_2d[valid_i, :, :].T
                    cur_img_name = valid_img_names[valid_i]

                    #Disable lower bodies (openpose COCO18 index)
                    # part[ [9,10,12,13], 2] = 0      #Upper body only  by ignoring
                    # bHeadOnly = False
                    # if bHeadOnly:
                    #     part[ [4,7, 3,6, 8, 11], 2] = 0

                    target_joint = [
                        10, 9, 8, 11, 12, 13, 4, 3, 2, 5, 6, 7, 0, 0
                    ]  #From VIBE
                    valid_joint_cnt = part[target_joint, 2] > 0.3
                    valid_joint_cnt[12:] = 0

                    if vibe_protocol and sum(
                            valid_joint_cnt) <= 6:  #Following VIBE's prop
                        # reject_cnt+=1
                        continue

                    part = part[part[:, 2] > 0, :]
                    bbox = [
                        min(part[:, 0]),
                        min(
                            part[:, 1]
                        ),  #Tight bbox from keypoint, minX, minY, maxX, maxY
                        max(part[:, 0]),
                        max(part[:, 1])
                    ]
                    # if bHeadOnly:       #To cover head top
                    #     bbox[1] -= abs(bbox[3] - bbox[1])
                    center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
                    scale = scaleFactor * max(bbox[2] - bbox[0],
                                              bbox[3] - bbox[1]) / 200

                    # transform global pose
                    pose = valid_pose[valid_i].copy()
                    extrinsics = valid_global_poses[valid_i][:3, :3]
                    pose[:3] = cv2.Rodrigues(
                        np.dot(extrinsics,
                               cv2.Rodrigues(pose[:3])[0]))[0].T[0]

                    imgnames_.append(valid_img_names[valid_i])
                    centers_.append(center)
                    scales_.append(scale)
                    poses_.append(pose)
                    shapes_.append(valid_betas[valid_i])
                    genders_.append(gender)

                    poseidx_spin24 = [
                        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 19, 20, 21, 22,
                        23
                    ]
                    poseidx_openpose18 = [
                        10, 9, 8, 11, 12, 13, 4, 3, 2, 5, 6, 7, 0, 15, 14, 17,
                        16
                    ]
                    part = np.zeros([24, 3])
                    openpose_pt2d = valid_keypoints_2d[valid_i, :, :].T  #18,3
                    part[poseidx_spin24, :] = openpose_pt2d[
                        poseidx_openpose18, :]
                    part[poseidx_spin24,
                         2] = 1 * (part[poseidx_spin24, 2] > 0.3)
                    parts_.append(part)

                    #2D keypoints (total26 -> SPIN24)
                    if False:

                        imgName = os.path.join(
                            '/run/media/hjoo/disk/data/3dpw',
                            valid_img_names[valid_i])
                        rawImg = cv2.imread(imgName)
                        # viewer2D.ImShow(rawImg)
                        # rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox[:2], bbox[2:])
                        # rawImg = viewer2D.Vis_Skeleton_2D_Openpose18(openpose_pt2d[:,:2].ravel(), image= rawImg, pt2d_visibility=openpose_pt2d[:,2]>0.2)
                        # rawImg = viewer2D.Vis_Skeleton_2D_Openpose18(openpose_pt2d[:,:2].ravel(), image= rawImg, pt2d_visibility=openpose_pt2d[:,2]>0.2)
                        # rawImg = viewer2D.Vis_Skeleton_2D_SPIN49(openpose_pt2d[:,:2], image= rawImg, pt2d_visibility=openpose_pt2d[:,2]>0.2)
                        # viewer2D.ImShow(rawImg)

                    #Draw Mesh SMPL
                    bDebugVis = False
                    if True:
                        imgName = os.path.join(
                            '/run/media/hjoo/disk/data/3dpw',
                            valid_img_names[valid_i])
                        rawImg = cv2.imread(imgName)

                        bbox_list = []
                        # cam_ext = data['cam_poses'][valid_i]    #4x4
                        cam_int = data['cam_intrinsics']  #3x3

                        import torch
                        import glViewer
                        valid_betas_vis = torch.from_numpy(
                            shapes_[-1][np.newaxis, :]).float()
                        valid_pose_vis = torch.from_numpy(
                            valid_pose[valid_i].copy()[np.newaxis, :]).float()
                        smpl_out = smpl_male(
                            betas=valid_betas_vis,
                            body_pose=valid_pose_vis[:, 3:],
                            global_orient=valid_pose_vis[:, :3])

                        ours_vertices = smpl_out.vertices.detach().cpu().numpy(
                        )[0]
                        ours_vertices += valid_smpl_trans[valid_i]

                        #Projection
                        ver_3d_camview = np.matmul(
                            valid_global_poses[valid_i, :3, :3],
                            ours_vertices.transpose()).transpose(
                            ) + valid_global_poses[valid_i, :3, 3]
                        ver_2d = np.matmul(
                            cam_int, ver_3d_camview.transpose()).transpose()
                        ver_2d[:, 0] = ver_2d[:, 0] / ver_2d[:, 2]
                        ver_2d[:, 1] = ver_2d[:, 1] / ver_2d[:, 2]
                        ver_2d = ver_2d[:, :2]

                        #Find face bbox, tight human bbox
                        bbox_xyxy_full = np.array([
                            min(ver_2d[:, 0]),
                            min(ver_2d[:, 1]),
                            max(ver_2d[:, 0]),
                            max(ver_2d[:, 1])
                        ])

                        # Get face bbox (min size)
                        headVerIdx = g_smpl_facepart['head']
                        headVert = ver_2d[headVerIdx]
                        minPt = [min(headVert[:, 0]), min(headVert[:, 1])]
                        maxPt = [max(headVert[:, 0]), max(headVert[:, 1])]
                        bbox_xyxy_small = np.array(
                            [minPt[0], minPt[1], maxPt[0], maxPt[1]])

                        # rawImg= viewer2D.Vis_Pt2ds(ver_2d,rawImg)
                        # rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox_xyxy_full[:2], bbox_xyxy_full[2:], color=(255,255,0))
                        # rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,  bbox_xyxy_small[:2], bbox_xyxy_small[2:] ,color=(255,255,0))

                        #Interpolation
                        minPt_d = bbox_xyxy_full[:2] - bbox_xyxy_small[:2]
                        maxPt_d = bbox_xyxy_full[2:] - bbox_xyxy_small[2:]

                        for i in range(8):
                            crop_level = i
                            # if True:
                            # i = crop_level
                            cur_minPt = bbox_xyxy_small[:2] + minPt_d * i / 7.0
                            cur_maxPt = bbox_xyxy_small[2:] + maxPt_d * i / 7.0

                            bbox_xyhw = [
                                cur_minPt[0], cur_minPt[1],
                                cur_maxPt[0] - cur_minPt[0],
                                cur_maxPt[1] - cur_minPt[1]
                            ]
                            cur_center, cur_scale = conv_bboxinfo_bboxXYHW_to_centerscale(
                                bbox_xyhw)
                            cur_scale *= 1.2  #Scaling factor
                            cur_new_bboxXYXY = conv_bboxinfo_centerscale_to_bboxXYXY(
                                cur_center, cur_scale)

                            if is_export_imgs and crop_level in [7]:  #[1,2,4]:
                                #export cropped image into files
                                """Process rgb image and do augmentation."""
                                cropped_img = crop(rawImg,
                                                   cur_center,
                                                   cur_scale, [224, 224],
                                                   rot=0)
                                # viewer2D.ImShow(cropped_img,waitTime=0,name="cropped")
                                export_img_name = seq_name + '_' + os.path.basename(
                                    imgName)[:-4] + f'_pid{p_id}.jpg'
                                export_img_path = os.path.join(
                                    export_root, f'croplev_{crop_level}',
                                    export_img_name)

                                cv2.imwrite(export_img_path, cropped_img)
                            #Compute face to cur bbox ratio   cur_scale / face_scale
                            if i == 0:
                                ratio_bbox_over_face = 1.0
                            else:
                                ratio_bbox_over_face = cur_scale / bbox_list[
                                    0]['scale']

                            bbox_list.append({
                                "scale": cur_scale,
                                "center": cur_center,
                                "ratio_bbox_over_face": ratio_bbox_over_face,
                                "bbox_xyhw": bbox_xyhw
                            })

                            if bDebugVis:  #Draw full size bbox
                                print(
                                    f"{i}: {cur_scale}, {center}, {ratio_bbox_over_face}"
                                )
                                # tempImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,  cur_minPt, cur_maxPt ,color=(255,255,255))
                                if i in [1, 2, 4]:
                                    tempImg = viewer2D.Vis_Bbox_minmaxPt(
                                        rawImg,
                                        cur_new_bboxXYXY[:2],
                                        cur_new_bboxXYXY[2:],
                                        color=(0, 255, 255))
                                else:
                                    tempImg = viewer2D.Vis_Bbox_minmaxPt(
                                        rawImg,
                                        cur_new_bboxXYXY[:2],
                                        cur_new_bboxXYXY[2:],
                                        color=(255, 0, 0))
                                viewer2D.ImShow(tempImg,
                                                name="bboxGen",
                                                waitTime=0)

                        # viewer2D.ImShow(rawImg)
                        multilevel_bboxinfo_.append(bbox_list)
                        key_name = (cur_img_name[11:], p_id)
                        assert key_name not in pw3d_multicrop_info.keys()
                        pw3d_multicrop_info[key_name] = [
                            dt['bbox_xyhw'] for dt in bbox_list
                        ]
                        # if valid_i==5:
                        #     break

    # store data
    if not os.path.isdir(out_path):
        os.makedirs(out_path)

    multicrop_out_path = os.path.join(
        out_path,
        'pw3d_multicrop_info_sample{}_py2.pkl'.format(len(imgnames_)))
    with open(multicrop_out_path, 'wb') as f:
        pickle.dump(pw3d_multicrop_info, f, protocol=2)
        # pickle.dump(your_object, your_file, protocol=2)

    for level in range(8):
        scales_ = []
        centers_ = []
        for i in range(len(multilevel_bboxinfo_)):
            scales_.append(multilevel_bboxinfo_[i][level]['scale'])
            centers_.append(multilevel_bboxinfo_[i][level]['center'])

        out_file = os.path.join(out_path, f'3dpw_test_multilevel_{level}.npz')

        np.savez(out_file,
                 imgname=imgnames_,
                 center=centers_,
                 scale=scales_,
                 pose=poses_,
                 shape=shapes_,
                 gender=genders_,
                 part=parts_)
Exemplo n.º 6
0
def multilvel_bbox_crop_gen(rawImg,
                            smpl_vert,
                            fullsize_center,
                            fullsize_scale,
                            bDebug=True):
    """
    Generate bbox from smallest size(face) to full size
    args:
        fullsize_center, fullsize_scale: bbox given by original annotation  (full body or maximum size)
        smpl_vert: 
    """
    bbox_list = []

    bbox_xyxy_full = conv_bboxinfo_centerscale_to_bboxXYXY(
        fullsize_center, fullsize_scale)

    # Get face bbox (min size)
    headVerIdx = g_smpl_facepart['head']
    headVert = smpl_vert[headVerIdx]
    minPt = [
        min(headVert[:, 0]) + rawImg.shape[1] * 0.5,
        min(headVert[:, 1]) + rawImg.shape[0] * 0.5
    ]
    maxPt = [
        max(headVert[:, 0]) + rawImg.shape[1] * 0.5,
        max(headVert[:, 1]) + rawImg.shape[0] * 0.5
    ]
    bbox_xyxy_small = [minPt[0], minPt[1], maxPt[0], maxPt[1]]

    if bDebug:  #Draw full size bbox
        rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(),
                                            bbox_xyxy_full[:2],
                                            bbox_xyxy_full[2:],
                                            color=(255, 255, 0))
        rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,
                                            bbox_xyxy_small[:2],
                                            bbox_xyxy_small[2:],
                                            color=(255, 255, 0))
        viewer2D.ImShow(rawImg, name="bboxGen")

    #Interpolation
    minPt_d = bbox_xyxy_full[:2] - bbox_xyxy_small[:2]
    maxPt_d = bbox_xyxy_full[2:] - bbox_xyxy_small[2:]
    for i in range(8):
        cur_minPt = bbox_xyxy_small[:2] + minPt_d * i / 7.0
        cur_maxPt = bbox_xyxy_small[2:] + maxPt_d * i / 7.0

        bbox_xyhw = [
            cur_minPt[0], cur_minPt[1], cur_maxPt[0] - cur_minPt[0],
            cur_maxPt[1] - cur_minPt[1]
        ]
        cur_center, cur_scale = conv_bboxinfo_bboxXYHW_to_centerscale(
            bbox_xyhw)
        #Compute face to cur bbox ratio   cur_scale / face_scale
        if i == 0:
            ratio_bbox_over_face = 1.0
        else:
            ratio_bbox_over_face = cur_scale / bbox_list[0]['scale']

        bbox_list.append({
            "scale": cur_scale,
            "center": cur_center,
            "ratio_bbox_over_face": ratio_bbox_over_face
        })

        if bDebug:  #Draw full size bbox
            print(f"{i}: {cur_scale}, {center}, {ratio_bbox_over_face}")
            tempImg = viewer2D.Vis_Bbox_minmaxPt(rawImg,
                                                 cur_minPt,
                                                 cur_maxPt,
                                                 color=(255, 255, 255))
            viewer2D.ImShow(tempImg, name="bboxGen", waitTime=0)

    if bDebug:  #Draw full size bbox
        for b in bbox_list:
            bbox_xyxy = conv_bboxinfo_centerscale_to_bboxXYXY(
                b['center'], b['scale'])
            vis = viewer2D.Vis_Bbox_minmaxPt(rawImg,
                                             bbox_xyxy[:2],
                                             bbox_xyxy[2:],
                                             color=(0, 255, 255))
            viewer2D.ImShow(vis, name="bboxGen", waitTime=0)

    return bbox_list
Exemplo n.º 7
0
def LoadMocap(dbDir, imgRoot):

    blackList = [
        '0001', '0003', '0026', '0034', '1005', '1021', '1024', '1032'
    ]

    from scipy.io import loadmat

    annotations_all = []

    # seqPathList = sorted(glob.glob('{0}/*.json'.format(dbDir)) )
    seqPathList = sorted(glob.glob('{0}/*.mat'.format(dbDir)))

    #Iterate for each person
    for i, gtPath in enumerate(seqPathList):

        imageInfo = []
        seqName = os.path.basename(gtPath)[:4]

        if seqName in blackList:
            continue
        # seqName = os.path.basename(jsonData)[:-5]
        print('{}: {}'.format(seqName, i))

        mocap = loadmat(gtPath)
        # N x 13
        vis = mocap['visibility']

        x = mocap['x']
        y = mocap['y']
        skel2d_tracked = np.dstack((x, y, vis))  #(N, 13, 3)

        subjectId = 'pennaction_{}-id{:03d}'.format(
            seqName, 0)  #only single person in pennaction

        frameLeng = len(skel2d_tracked)
        for idx in range(frameLeng):
            skel2d = skel2d_tracked[idx]  #13,3

            if idx >= len(mocap['bbox']):
                print("out of range for bbox")
                break
            bbox = mocap['bbox'][idx]

            annot = {}  #Current annotation

            annot['keypoints'] = skel2d  #13,3
            annot['subjectId'] = subjectId
            imgPathFull = "{0}/{1}/{2:06d}.jpg".format(imgRoot, seqName,
                                                       idx + 1)

            annot['imgname'] = imgPathFull
            annot['bbox_xyxy'] = bbox

            if False:
                inputImg = cv2.imread(imgPathFull)
                inputImg = viewer2D.Vis_Skeleton_2D_pennaction(skel2d[:, :2],
                                                               skel2d[:, 2],
                                                               image=inputImg)
                inputImg = viewer2D.Vis_Bbox_minmaxPt(inputImg,
                                                      annot['bbox_xyxy'][:2],
                                                      annot['bbox_xyxy'][2:])
                viewer2D.ImShow(inputImg, waitTime=0)

            annotations_all.append(annot)

        # if(np.sum(j2d_validity_coco19)==0):
        #     print("No valid annotations")
        #     continue

    # trackid = annot['track_id']
    # bbox = annot['bbox']
    # image_id = annot['image_id']
    # annot_id = annot['id']
    # keypoints = annot['keypoints']
    #Img path, 2D keypoint, bbox
    return annotations_all
Exemplo n.º 8
0
            ours_vertices = ours_output.vertices.detach().cpu().numpy()

            rawImg = cv2.imread(imgFullPath)
            # croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, center, scale, (224, 224) )

            # camParam_scale = pred_camera_vis[0,0]
            # camParam_trans = pred_camera_vis[0,1:]
            # vertices_bbox = convert_smpl_to_bbox(ours_vertices[0], camParam_scale, camParam_trans)
            # vertices_img = convert_bbox_to_oriIm(vertices_bbox, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])

            if bDraw:
                # rawImg = cv2.imread(imgFullPath)

                bbox_xyxy = conv_bboxinfo_centerscale_to_bboxXYXY(
                    center, scale)
                rawImg = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox_xyxy[:2],
                                                    bbox_xyxy[2:])

                # croppedImg = crop(rawImg, center, scale,
                #         [constants.IMG_RES, constants.IMG_RES])
                croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
                    rawImg, center, scale,
                    (constants.IMG_RES, constants.IMG_RES))

                #Visualize image
                if False:
                    rawImg = viewer2D.Vis_Skeleton_2D_SPIN49(
                        data['keypoint2d'][0][:, :2],
                        pt2d_visibility=data['keypoint2d'][0][:, 2],
                        image=rawImg)
                    viewer2D.ImShow(rawImg, name='rawImg')
                    viewer2D.ImShow(croppedImg, name='croppedImg')
Exemplo n.º 9
0
def visEFT_singleSubject(renderer):

    MAGNIFY_RATIO = 3           #onbbox only. To magnify the rendered image size 

    bStopForEachSample = args.waitforkeys      #if True, it will wait for any key pressed to move to the next sample
    bShowTurnTable = args.turntable

    inputData = args.fit_data
    imgDir = args.img_dir

    #Load SMPL model
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL_19(smplModelPath, batch_size=1, create_transl=False)
    
    #Load EFT fitting data
    print(f"Loading EFT data from {inputData}")
    if os.path.exists(inputData):
        with open(inputData,'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False


    #Visualize each EFT Fitting output
    for idx, eft_data in enumerate(tqdm(eft_data_all)):
        
        #Get raw image path
        imgFullPath = eft_data['imageName']
        # imgName = os.path.basename(imgFullPath)
        imgName = imgFullPath
        imgFullPath =os.path.join(imgDir, imgName)
        if os.path.exists(imgFullPath) ==False:
            print(f"Img path is not valid: {imgFullPath}")
            assert False
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['bbox_scale']
        bbox_center = eft_data['bbox_center']

        pred_camera = np.array(eft_data['parm_cam'])
        pred_betas = np.reshape(np.array( eft_data['parm_shape'], dtype=np.float32), (1,10) )     #(10,)
        pred_betas = torch.from_numpy(pred_betas)

        pred_pose_rotmat = np.reshape( np.array( eft_data['parm_pose'], dtype=np.float32), (1,24,3,3)  )        #(24,3,3)
        pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)

        keypoint_2d_validity = eft_data['joint_validity_openpose18']

        #COCO only. Annotation index
        if 'annotId' in eft_data.keys():
            print("COCO annotId: {}".format(eft_data['annotId']))


        #Get SMPL mesh and joints from SMPL parameters
        smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,[0]], pose2rot=False)
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0]
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0]

        #Crop image using cropping information
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )


        if MAGNIFY_RATIO>1:
            croppedImg = cv2.resize(croppedImg, (croppedImg.shape[1]*MAGNIFY_RATIO, croppedImg.shape[0]*MAGNIFY_RATIO) )

        ########################
        # Visualization
        ########################

        # Visualize 2D image
        if True:
            viewer2D.ImShow(rawImg, name='rawImg', waitTime=1)      #You should press any key 
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            #Convert bbox_center, bbox_scale --> bbox_xyxy
            bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale,bbox_center)
            img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(),bbox_xyxy[:2], bbox_xyxy[2:])
            viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1)

        # Visualization Mesh
        if True:    
            camParam_scale = pred_camera[0]
            camParam_trans = pred_camera[1:]
            pred_vert_vis = smpl_vertices
            smpl_joints_3d_vis = smpl_joints_3d

            if args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])

                pred_vert_vis *=MAGNIFY_RATIO
            else:
                #Covert SMPL to BBox first
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)

                #From cropped space to original
                pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) 
                smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])
                renderer.setBackgroundTexture(rawImg)
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])

                #In orthographic model. XY of 3D is just 2D projection
                smpl_joints_2d_vis = conv_3djoint_2djoint(smpl_joints_3d_vis,rawImg.shape )
                # image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_smpl45(smpl_joints_2d_vis, image=rawImg.copy(),color=(0,255,255))
                image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(smpl_joints_2d_vis, image=rawImg.copy(),color=(255,0,0))        #All 2D joint
                image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(smpl_joints_2d_vis, pt2d_visibility=keypoint_2d_validity, image=image_2dkeypoint_pred,color=(0,255,255))        #Only valid
                viewer2D.ImShow(image_2dkeypoint_pred, name='keypoint_2d_pred', waitTime=1)

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver'] 
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.set_mesh(v,f)
            renderer.showBackground(True)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("cam")

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
                
            renderer.display()
            renderImg = renderer.get_screen_color_ibgr()
            viewer2D.ImShow(renderImg,waitTime=1)
        
        # Visualize multi-level cropped bbox
        if args.multi_bbox:
            from demo.multi_bbox_gen import multilvel_bbox_crop_gen
            
            bbox_list = multilvel_bbox_crop_gen(rawImg, pred_vert_vis, bbox_center, bbox_scale)

            #Visualize BBox
            for b_idx, b in enumerate(bbox_list):
                # bbox_xyxy= conv_bboxinfo_centerscale_to_bboxXYXY(b['center'], b['scale'])
                bbox_xyxy= b['bbox_xyxy']
                if b_idx==0:
                    img_multi_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg,  bbox_xyxy[:2], bbox_xyxy[2:] ,color=(0,255,0))
                else:
                    img_multi_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg,  bbox_xyxy[:2], bbox_xyxy[2:] ,color=(0,255,255))
            viewer2D.ImShow(img_multi_bbox, name='multi_bbox', waitTime=1)
            # for bbox in bbox_list:


        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            # renderer.setCameraViewMode("side")    #To show the object in side vie
            renderer.setCameraViewMode("free")     
            renderer.setViewAngle(90,20)

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()        #Overwite on rawImg
            viewer2D.ImShow(sideImg,waitTime=1)
            
            sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) )
            # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) )
        
        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            # renderer.setCameraViewMode("side")    #To show the object in side vie
            renderer.setCameraViewMode("free")     
            renderer.setViewAngle(-60,50)

            #Set image size for rendering
            if args.onbbox:
                renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            else:
                renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])
            renderer.display()
            sideImg_2 = renderer.get_screen_color_ibgr()        #Overwite on rawImg
            viewer2D.ImShow(sideImg_2,waitTime=1)
            
            sideImg_2 = cv2.resize(sideImg_2, (renderImg.shape[1], renderImg.shape[0]) )
            # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) )


        #Visualize camera view and side view
        saveImg = np.concatenate( (renderImg,sideImg), axis =1)
        # saveImg = np.concatenate( (croppedImg, renderImg,sideImg, sideImg_2), axis =1)

        if bStopForEachSample:
            viewer2D.ImShow(saveImg,waitTime=0) #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(saveImg,waitTime=1)
        
        #Render Mesh on the rotating view
        if bShowTurnTable:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("free")
            for i in range(90):
                renderer.setViewAngle(i*4,0)
                renderer.display()
                sideImg = renderer.get_screen_color_ibgr()        #Overwite on rawImg
                viewer2D.ImShow(sideImg,waitTime=1,name="turn_table")

                if False:       #If you want to save this into files
                    render_output_path = args.render_dir + '/turntable_{}_{:08d}.jpg'.format(os.path.basename(imgName),i)
                    cv2.imwrite(render_output_path, sideImg)

        #Save the rendered image to files
        if True:    
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{}_eft{:08d}.jpg'.format(imgName[:-4],idx)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, saveImg)
Exemplo n.º 10
0
def exportOursToSpin(eftDir, out_path):

    # scaleFactor = 1.2

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    pose3DList = os.listdir(eftDir)

    # for imgSample in cocoPose3DAll:
    sampleNum = len(pose3DList)
    # totalSampleNum = [ len(cocoPose3DAll[imgSample]) for imgSample in cocoPose3DAll ]
    # totalSampleNum = sum(totalSampleNum)
    print("\n\n### SampleNum: {} ###".format(sampleNum))

    maxDiff = 0
    for fname in tqdm(sorted(pose3DList)):

        fname_path = os.path.join(eftDir, fname)

        pose3d = pickle.load(open(fname_path, 'rb'))

        #load image
        imgPathFull = pose3d['imageName'][0]
        fileName = os.path.basename(imgPathFull)
        fileName_saved = os.path.join(
            os.path.basename(os.path.dirname(imgPathFull)),
            fileName)  #start from train2014
        center = pose3d['center'][0]
        scale = pose3d['scale'][0]

        smpl_shape = pose3d['pred_shape'].ravel()
        smpl_pose_mat = torch.from_numpy(
            pose3d['pred_pose_rotmat'][0])  #24,3,3
        pred_rotmat_hom = torch.cat([
            smpl_pose_mat.view(-1, 3, 3),
            torch.tensor(
                [0, 0, 0],
                dtype=torch.float32,
            ).view(1, 3, 1).expand(24, -1, -1)
        ],
                                    dim=-1)
        smpl_pose = tgm.rotation_matrix_to_angle_axis(
            pred_rotmat_hom).contiguous().view(-1, 72)

        #verification
        if True:
            recon_mat = batch_rodrigues(smpl_pose.view(
                -1, 3))  #24,3... axis -> rotmat
            diff = abs(recon_mat.numpy() -
                       pose3d['pred_pose_rotmat'][0])  #2.1234155e-07
            # print(np.max(diff))
            maxDiff = max(maxDiff, np.max(diff))

        smpl_pose = smpl_pose.numpy().ravel()

        openpose2d = pose3d['keypoint2d'][0][:25]  #25,3
        spin2d_skel24 = pose3d['keypoint2d'][0][25:]  #24,3

        #Save data
        imgnames_.append(fileName_saved)
        centers_.append(center)
        scales_.append(scale)
        has_smpl_.append(1)
        poses_.append(smpl_pose)  #(72,)
        shapes_.append(smpl_shape)  #(10,)

        openposes_.append(openpose2d)  #blank
        # print(openpose2d)/
        parts_.append(spin2d_skel24)

        #3D joint
        S = np.zeros(
            [24, 4])  #blank for 3d. TODO: may need to add valid data for this
        skel3D_.append(S)

        #Debug 2D Visualize
        if False:
            img = cv2.imread(
                os.path.join('/run/media/hjoo/disk/data/coco', imgnames_[-1]))
            img = viewer2D.Vis_Skeleton_2D_smplCOCO(
                gt_skel, pt2d_visibility=gt_validity[:, 0], image=img)
            img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
            viewer2D.ImShow(img, waitTime=0)

        #Debug 3D Visualize smpl_coco
        if False:
            # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
            # data3D_coco_vis *=0.1   #mm to cm
            # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
            # glViewer.show()

            #Debug 3D Visualize, h36m
            data3D_h36m_vis = np.reshape(
                data3D_h36m, (data3D_h36m.shape[0], -1)).transpose()  #(Dim, F)
            data3D_h36m_vis *= 100  #meter to cm

            # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
            # data3D_smpl24 *=0.1

            glViewer.setSkeleton([data3D_h36m_vis], jointType='smplcoco')
            glViewer.show()

        # keypoints

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format(len(imgnames_)))
    print("maxDiff in rot conv.: {}".format(maxDiff))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, os.path.basename(eftDir) + '.npz')

    print(f"Save to {out_file}")
    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             openpose=openposes_,
             pose=poses_,
             shape=shapes_,
             has_smpl=has_smpl_,
             S=skel3D_)
Exemplo n.º 11
0
def visEFT_singleSubject(renderer):

    bStopForEachSample = args.waitforkeys  #if True, it will wait for any key pressed to move to the next sample

    inputData = args.fit_data
    imgDir = args.img_dir

    #Load SMPL model
    smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
    smpl = SMPL(smplModelPath, batch_size=1, create_transl=False)

    print("Loading coco annotation from:{}".format(args.cocoAnnotFile))
    assert os.path.exists(args.cocoAnnotFile)
    cocoAnnotDic = loadCOCOAnnot(args.cocoAnnotFile)

    #Load EFT fitting data
    print(f"Loading EFT data from {inputData}")
    if os.path.exists(inputData):
        with open(inputData, 'r') as f:
            eft_data = json.load(f)
            print("EFT data: ver {}".format(eft_data['ver']))
            eft_data_all = eft_data['data']
    else:
        print(f"ERROR:: Cannot find EFT data: {inputData}")
        assert False

    #Visualize each EFT Fitting output
    for idx, eft_data in enumerate(eft_data_all):

        #Get raw image path
        imgFullPath = eft_data['imageName']
        imgName = os.path.basename(imgFullPath)
        imgFullPath = os.path.join(imgDir, imgName)
        if os.path.exists(imgFullPath) == False:
            print(f"Img path is not valid: {imgFullPath}")
            assert False
        rawImg = cv2.imread(imgFullPath)
        print(f'Input image: {imgFullPath}')

        #EFT data
        bbox_scale = eft_data['bbox_scale']
        bbox_center = eft_data['bbox_center']

        pred_camera = np.array(eft_data['parm_cam'])
        pred_betas = np.reshape(
            np.array(eft_data['parm_shape'], dtype=np.float32),
            (1, 10))  #(10,)
        pred_betas = torch.from_numpy(pred_betas)

        pred_pose_rotmat = np.reshape(
            np.array(eft_data['parm_pose'], dtype=np.float32),
            (1, 24, 3, 3))  #(24,3,3)
        pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)

        keypoint_2d_validity = eft_data['joint_validity_openpose18']

        #COCO only. Annotation index
        print("COCO annotId: {}".format(eft_data['annotId']))
        annot = cocoAnnotDic[eft_data['annotId']]
        print(annot['bbox'])

        ########################
        #Visualize COCO annotation
        annot_keypoint = np.reshape(
            np.array(annot['keypoints'], dtype=np.float32), (-1, 3))  #17,3
        rawImg = viewer2D.Vis_Skeleton_2D_coco(annot_keypoint[:, :2],
                                               annot_keypoint[:, 2],
                                               image=rawImg)
        rawImg = viewer2D.Vis_Bbox(rawImg, annot['bbox'], color=(0, 255, 0))

        #Get SMPL mesh and joints from SMPL parameters
        smpl_output = smpl(betas=pred_betas,
                           body_pose=pred_pose_rotmat[:, 1:],
                           global_orient=pred_pose_rotmat[:, [0]],
                           pose2rot=False)
        smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0]
        smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0]

        #Crop image using cropping information
        croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(
            rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES))

        ########################
        # Visualization of EFT
        ########################

        # Visualize 2D image
        if True:
            viewer2D.ImShow(rawImg, name='rawImg',
                            waitTime=1)  #You should press any key
            viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)

            #Convert bbox_center, bbox_scale --> bbox_xyxy
            bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale, bbox_center)
            img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(), bbox_xyxy[:2],
                                                  bbox_xyxy[2:])
            viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1)

        # Visualization Mesh
        if True:
            camParam_scale = pred_camera[0]
            camParam_trans = pred_camera[1:]
            pred_vert_vis = smpl_vertices
            smpl_joints_3d_vis = smpl_joints_3d

            if True:  #args.onbbox:
                pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis,
                                                     camParam_scale,
                                                     camParam_trans)
                smpl_joints_3d_vis = convert_smpl_to_bbox(
                    smpl_joints_3d_vis, camParam_scale, camParam_trans)
                renderer.setBackgroundTexture(croppedImg)
                renderer.setViewportSize(croppedImg.shape[1],
                                         croppedImg.shape[0])

            pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}
            v = pred_meshes['ver']
            f = pred_meshes['f']

            #Visualize in the original image space
            renderer.set_mesh(v, f)
            renderer.showBackground(True)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("cam")

            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            renderer.display()
            renderImg = renderer.get_screen_color_ibgr()
            viewer2D.ImShow(renderImg, waitTime=1)

        # Visualization Mesh on side view
        if True:
            renderer.showBackground(False)
            renderer.setWorldCenterBySceneCenter()
            renderer.setCameraViewMode("side")

            renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])
            renderer.display()
            sideImg = renderer.get_screen_color_ibgr()  #Overwite on rawImg
            viewer2D.ImShow(sideImg, waitTime=1)

            sideImg = cv2.resize(sideImg,
                                 (renderImg.shape[1], renderImg.shape[0]))

        #Visualize camera view and side view
        saveImg = np.concatenate((renderImg, sideImg), axis=1)

        if bStopForEachSample:
            viewer2D.ImShow(
                saveImg, waitTime=0
            )  #waitTime=0 means that it will wait for any key pressed
        else:
            viewer2D.ImShow(saveImg, waitTime=1)

        #Save the rendered image to files
        if False:
            if os.path.exists(args.render_dir) == False:
                os.mkdir(args.render_dir)
            render_output_path = args.render_dir + '/render_{:08d}.jpg'.format(
                idx)
            print(f"Save to {render_output_path}")
            cv2.imwrite(render_output_path, saveImg)
Exemplo n.º 12
0
def exportOursToSpin(out_path):

    scaleFactor = 1.2

    hagglingDBdir = '/home/hjoo/data/pytorch_motionSynth/motionsynth_data/data/processed_panoptic'

    haggling_files = os.listdir(hagglingDBdir +
                                '/panopticDB_pkl_hagglingProcessed')

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    rhand_2d_list, rhand_3d_list, lhand_2d_list, lhand_3d_list = [], [], [], []
    subject_id_list = []

    for mocapName in haggling_files:
        seqname = mocapName[:18]  #'170221_haggling_b1'

        if "170221_haggling_b" not in seqname and "170228_haggling_b" not in seqname:  #testing set
            continue

        print(f"processing{mocapName}")

        groupid = mocapName[24:-4]  # 3

        print(f"{mocapName}, {seqname}, {groupid}")

        # groupname = f"group{groupid}"
        # mocapName =f'{seqname}_{groupname}.pkl'
        imgDir = f'/home/hjoo/data/panoptic-toolbox/{seqname}/hdImgs/00_00'
        calibdata = f'/home/hjoo/data/panoptic-toolbox/{seqname}/calibration_{seqname}.json'
        cam = json.load(open(calibdata, "r"))['cameras']
        cam = cam[479:]  #479 is 00_00

        # with open('/run/media/hjoo/disk/data/panoptic_mtc/a4_release/annotation.pkl', 'rb') as f:
        #     data = pickle.load(f)

        # with open('/run/media/hjoo/disk/data/panoptic_mtc/a4_release/camera_data.pkl', 'rb') as f:
        #     cam = pickle.load(f)

        bodydatapath = f'{hagglingDBdir}/panopticDB_pkl_hagglingProcessed/' + mocapName
        bodydata = pickle.load(open(bodydatapath, "rb"), encoding='latin1')

        handdatapath = f'{hagglingDBdir}/panopticDB_hand_pkl_hagglingProcessed/' + mocapName
        handdata = pickle.load(open(handdatapath, "rb"), encoding='latin1')

        # if True:
        #     for subj in bodydata['subjects']:
        #         startFrame = subj['startFrame']

        #         for n in range(subj['joints19'].shape[1]):
        #             imgpath = os.path.join(imgDir,"00_00_%08d.jpg" % (startFrame+n))
        #             img = cv2.imread(imgpath)
        #             viewer2D.ImShow(img)

        groupStartFrame = bodydata['startFrame']
        subNum = len(bodydata['subjects'])
        for subid in range(subNum):  #[1]:

            subj_body = bodydata['subjects'][subid]['joints19']

            if len(handdata['hand_right']) <= subid:
                continue

            if len(handdata['hand_left']) <= subid:
                continue

            subj_righthand = handdata['hand_right'][subid]['hand21']
            subj_lefthand = handdata['hand_left'][subid]['hand21']

            #Validity Score Right
            subj_righthand_validity = handdata['hand_right'][subid][
                'bValidFrame']
            # subj_righthand_score = handdata['hand_right'][subid]['scores']
            localStart = groupStartFrame - handdata['hand_right'][subid][
                'startFrame']  #This
            subj_righthand_validity = subj_righthand_validity[localStart:]
            # subj_righthand_score = subj_righthand_score[: , localStart:]

            #Validity Score Left
            subj_lefthand_validity = handdata['hand_left'][subid][
                'bValidFrame']
            # subj_lefthand_score = handdata['hand_left'][subid]['scores']
            localStart = groupStartFrame - handdata['hand_left'][subid][
                'startFrame']  #This
            subj_lefthand_validity = subj_lefthand_validity[localStart:]
            # subj_lefthand_score = subj_lefthand_score[:, localStart:]

            print("valid: {}/{}".format(
                sum(handdata['hand_right'][subid]['bValidFrame']),
                len(handdata['hand_right'][subid]['bValidFrame'])))
            print("valid: {}/{}".format(
                sum(handdata['hand_left'][subid]['bValidFrame']),
                len(handdata['hand_left'][subid]['bValidFrame'])))

            startFrame = bodydata['startFrame']

            # assert subj_body.shape[1] == subj_righthand.shape[1]

            frameLeng = subj_body.shape[1]

            for ii in range(frameLeng):

                bVis = False
                frameid = startFrame + ii
                subject_id = "{}_g{}_s{}_{:08d}".format(
                    seqname, groupid, subid,
                    frameid)  #seqname, groupid. subid, frameid
                # print(subject_id)

                imgFullPath = os.path.join(imgDir,
                                           "00_00_%08d.jpg" % (frameid))
                body_landmark = np.array(subj_body[:, ii]).reshape(
                    -1, 3)  #19, 3          #SMC19 order
                # body_landmark = np.array(sample['body']['landmarks']).reshape(-1, 3)            #19, 3          #SMC19 order

                if subj_righthand.shape[1] <= ii:
                    rhand_landmark = np.zeros(
                        (21, 3))  #21, 3          #SMC19 order
                else:
                    rhand_landmark = np.array(subj_righthand[:, ii]).reshape(
                        -1, 3)  #21, 3          #SMC19 order

                if subj_lefthand.shape[1] <= ii:
                    lhand_landmark = np.zeros(
                        (21, 3))  #21, 3          #SMC19 order
                else:
                    lhand_landmark = np.array(subj_lefthand[:, ii]).reshape(
                        -1, 3)  #21, 3          #SMC19 order

                calib_data = cam[0]  #00
                for c in calib_data:
                    calib_data[c] = np.array(calib_data[c])

                skeleton_3d_camview = applyExtrinsic(body_landmark,
                                                     calib_data)  #19,3
                rhand_3d_camview = applyExtrinsic(rhand_landmark,
                                                  calib_data)  #21,3
                lhand_3d_camview = applyExtrinsic(lhand_landmark,
                                                  calib_data)  #21,3

                skeleton_2d = project2D(body_landmark, calib_data)  #19,2
                rhand_2d = project2D(rhand_landmark, calib_data)  #19,2
                lhand_2d = project2D(lhand_landmark, calib_data)  #19,2

                imgName = "{}/hdImgs/00_00/00_00_{:08d}.jpg".format(
                    seqname, frameid)

                # print(imgName)
                # imgFullPath = os.path.join(imgDir, imgName)
                # if os.path.exists(imgFullPath) == False:
                #     continue
                # print(imgName)

                #Visulaize 3D
                if False:
                    img = cv2.imread(imgFullPath)
                    img = viewer2D.Vis_Skeleton_2D_SMC19(skeleton_2d,
                                                         image=img)
                    viewer2D.ImShow(img, waitTime=1)

                    skeleton_3d_camview = skeleton_3d_camview.ravel()[:, np.
                                                                      newaxis]
                    rhand_3d_camview = rhand_3d_camview.ravel()[:, np.newaxis]
                    lhand_3d_camview = lhand_3d_camview.ravel()[:, np.newaxis]
                    glViewer.setSkeleton([
                        skeleton_3d_camview, rhand_3d_camview, lhand_3d_camview
                    ])
                    # glViewer.setSkeleton([skeleton_3d_camview])

                    glViewer.setBackgroundTexture(img)
                    glViewer.SetOrthoCamera(True)
                    glViewer.show(0)
                    continue

                min_pt = np.min(skeleton_2d, axis=0)
                min_pt[0] = max(min_pt[0], 0)
                min_pt[1] = max(min_pt[1], 0)

                max_pt = np.max(skeleton_2d, axis=0)
                max_pt[0] = min(max_pt[0], 1920)
                max_pt[1] = min(max_pt[1], 1080)
                # bbox= [ min_pt[0], min_pt[1], max_pt[0], max_pt[1] ]
                bbox = [
                    min_pt[0], min_pt[1], max_pt[0] - min_pt[0],
                    max_pt[1] - min_pt[1]
                ]

                center = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2]
                scale = scaleFactor * max(bbox[2], bbox[3]) / 200

                #Save data
                # imgnames_.append(os.path.join('train',fileName_saved))
                # imgnames_.append(os.path.join('train',fileName_saved))
                imgnames_.append(imgName)
                openposes_.append(np.zeros([25, 3]))  #blank
                centers_.append(center)
                scales_.append(scale)
                # has_smpl_.append(1)
                # poses_.append(sample['pose3DParam']['pose'])        #(72,)
                # shapes_.append(sample['pose3DParam']['shape'])       #(10,)

                #2D keypoints (total26 -> SPIN24)
                poseidx_spin24 = [
                    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14, 12, 19, 20, 22,
                    21, 23
                ]
                poseidx_smc19 = [
                    14, 13, 12, 6, 7, 8, 11, 10, 9, 3, 4, 5, 2, 0, 1, 15, 16,
                    17, 18
                ]
                part = np.zeros([24, 3])
                part[poseidx_spin24, :2] = skeleton_2d[
                    poseidx_smc19]  #(52,)  totalGT26 type
                part[poseidx_spin24, 2] = 1
                parts_.append(part)

                #3D joint
                S = np.zeros([24, 4])
                S[poseidx_spin24, :3] = skeleton_3d_camview[
                    poseidx_smc19, :] * 0.01  #Scaling skeleton 3D (currently cm) -> meter
                S[poseidx_spin24, 3] = 1

                skel3D_.append(S)

                rhand_2d_list.append(rhand_2d)

                if len(subj_righthand_validity) <= ii:
                    rhand_validity = 0
                else:
                    rhand_validity = subj_righthand_validity[ii]
                # rhand_score = subj_righthand_score[:,ii].mean()
                rhand_3d_camview = rhand_3d_camview * 0.01
                if rhand_validity == 1:
                    rhand_3d_camview = np.concatenate(
                        [rhand_3d_camview, np.ones((21, 1))], axis=1)
                else:
                    rhand_3d_camview = np.concatenate(
                        [rhand_3d_camview, np.zeros((21, 1))], axis=1)
                    bVis = True
                rhand_3d_list.append(rhand_3d_camview)

                lhand_2d_list.append(lhand_2d)

                if len(subj_lefthand_validity) <= ii:
                    lhand_validity = 0
                else:
                    lhand_validity = subj_lefthand_validity[ii]

                # lhand_validity = subj_lefthand_validity[ii]
                lhand_3d_camview = lhand_3d_camview * 0.01
                if lhand_validity == 1:
                    lhand_3d_camview = np.concatenate(
                        [lhand_3d_camview, np.ones((21, 1))], axis=1)
                else:
                    lhand_3d_camview = np.concatenate(
                        [lhand_3d_camview, np.zeros((21, 1))], axis=1)
                    bVis = True
                lhand_3d_list.append(lhand_3d_camview)

                subject_id_list.append(subject_id)

                #Add hand joints
                # print("right: {} : left: {}".format(rhand_score, lhand_score))
                # print("right: {} : left: {}".format(rhand_validity, lhand_validity))

                #Debug 2D Visualize
                if False:  #bVis:
                    img = cv2.imread(imgFullPath)
                    # img = cv2.imread( os.path.join( '/run/media/hjoo/disk/data/mpii_human_pose_v1/images',imgnames_[-1]) )
                    img = viewer2D.Vis_Skeleton_2D_SMC19(skeleton_2d,
                                                         image=img)
                    img = viewer2D.Vis_Skeleton_2D_Hand(rhand_2d, image=img)
                    img = viewer2D.Vis_Skeleton_2D_Hand(lhand_2d, image=img)

                    img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
                    viewer2D.ImShow(img, waitTime=0)

                #Debug 3D Visualize smpl_coco
                if False:
                    # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_coco_vis *=0.1   #mm to cm
                    # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
                    # glViewer.show()

                    #Debug 3D Visualize, h36m
                    data3D_h36m_vis = np.reshape(
                        data3D_h36m,
                        (data3D_h36m.shape[0], -1)).transpose()  #(Dim, F)
                    data3D_h36m_vis *= 0.001  #meter to cm

                    # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
                    # data3D_smpl24 *=0.1

                    glViewer.setSkeleton([data3D_h36m_vis],
                                         jointType='smplcoco')
                    glViewer.show()

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format(len(imgnames_)))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    # out_file = os.path.join(out_path, '1031-mpii3D_train_44257_all.npz')
    out_file = os.path.join(out_path, f'panoptic_{mocapName[:-4]}')

    print(f"Save to {out_file}")

    np.savez(
        out_file,
        imgname=imgnames_,
        center=centers_,
        scale=scales_,
        part=parts_,
        openpose=openposes_,
        S=skel3D_,
        rhand_3d=rhand_3d_list,
        rhand_2d=rhand_2d_list,
        lhand_3d=lhand_3d_list,
        lhand_2d=lhand_2d_list,
        subjectid=
        subject_id_list  #To handle sequence data and track the same person in output
    )