Beispiel #1
0
def exportOursToSpin(eftDir, out_path):

    # scaleFactor = 1.2

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    pose3DList = os.listdir(eftDir)

    # for imgSample in cocoPose3DAll:
    sampleNum = len(pose3DList)
    # totalSampleNum = [ len(cocoPose3DAll[imgSample]) for imgSample in cocoPose3DAll ]
    # totalSampleNum = sum(totalSampleNum)
    print("\n\n### SampleNum: {} ###".format(sampleNum))

    maxDiff = 0
    for fname in tqdm(sorted(pose3DList)):

        fname_path = os.path.join(eftDir, fname)

        pose3d = pickle.load(open(fname_path, 'rb'))

        #load image
        imgPathFull = pose3d['imageName'][0]
        fileName = os.path.basename(imgPathFull)
        fileName_saved = os.path.join(
            os.path.basename(os.path.dirname(imgPathFull)),
            fileName)  #start from train2014
        center = pose3d['center'][0]
        scale = pose3d['scale'][0]

        smpl_shape = pose3d['pred_shape'].ravel()
        smpl_pose_mat = torch.from_numpy(
            pose3d['pred_pose_rotmat'][0])  #24,3,3
        pred_rotmat_hom = torch.cat([
            smpl_pose_mat.view(-1, 3, 3),
            torch.tensor(
                [0, 0, 0],
                dtype=torch.float32,
            ).view(1, 3, 1).expand(24, -1, -1)
        ],
                                    dim=-1)
        smpl_pose = tgm.rotation_matrix_to_angle_axis(
            pred_rotmat_hom).contiguous().view(-1, 72)

        #verification
        if True:
            recon_mat = batch_rodrigues(smpl_pose.view(
                -1, 3))  #24,3... axis -> rotmat
            diff = abs(recon_mat.numpy() -
                       pose3d['pred_pose_rotmat'][0])  #2.1234155e-07
            # print(np.max(diff))
            maxDiff = max(maxDiff, np.max(diff))

        smpl_pose = smpl_pose.numpy().ravel()

        openpose2d = pose3d['keypoint2d'][0][:25]  #25,3
        spin2d_skel24 = pose3d['keypoint2d'][0][25:]  #24,3

        #Save data
        imgnames_.append(fileName_saved)
        centers_.append(center)
        scales_.append(scale)
        has_smpl_.append(1)
        poses_.append(smpl_pose)  #(72,)
        shapes_.append(smpl_shape)  #(10,)

        openposes_.append(openpose2d)  #blank
        # print(openpose2d)/
        parts_.append(spin2d_skel24)

        #3D joint
        S = np.zeros(
            [24, 4])  #blank for 3d. TODO: may need to add valid data for this
        skel3D_.append(S)

        #Debug 2D Visualize
        if False:
            img = cv2.imread(
                os.path.join('/run/media/hjoo/disk/data/coco', imgnames_[-1]))
            img = viewer2D.Vis_Skeleton_2D_smplCOCO(
                gt_skel, pt2d_visibility=gt_validity[:, 0], image=img)
            img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
            viewer2D.ImShow(img, waitTime=0)

        #Debug 3D Visualize smpl_coco
        if False:
            # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
            # data3D_coco_vis *=0.1   #mm to cm
            # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
            # glViewer.show()

            #Debug 3D Visualize, h36m
            data3D_h36m_vis = np.reshape(
                data3D_h36m, (data3D_h36m.shape[0], -1)).transpose()  #(Dim, F)
            data3D_h36m_vis *= 100  #meter to cm

            # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
            # data3D_smpl24 *=0.1

            glViewer.setSkeleton([data3D_h36m_vis], jointType='smplcoco')
            glViewer.show()

        # keypoints

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format(len(imgnames_)))
    print("maxDiff in rot conv.: {}".format(maxDiff))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path, os.path.basename(eftDir) + '.npz')

    print(f"Save to {out_file}")
    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             openpose=openposes_,
             pose=poses_,
             shape=shapes_,
             has_smpl=has_smpl_,
             S=skel3D_)
Beispiel #2
0
def exportOursToSpin(cocoPose3DAll, out_path):

    scaleFactor = 1.2


    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_ , shapes_, skel3D_, has_smpl_  = [], [] ,[], []


    # for imgSample in cocoPose3DAll:
    imgNum = len(cocoPose3DAll)
    totalSampleNum = [ len(cocoPose3DAll[imgSample]) for imgSample in cocoPose3DAll ]
    totalSampleNum = sum(totalSampleNum)

    print("\n\n### ImageNum: {}, SampleNum: {} ###".format(imgNum, totalSampleNum))
    # for imgSample in cocoPose3DAll:
    # for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
    for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
        #load image
        imgPathFull = imgSample[0]['imgId']           
        fileName = os.path.basename(imgPathFull)
        fileName_saved = os.path.join(os.path.basename(os.path.dirname(imgPathFull)), fileName) #start from train2014


        for sample in imgSample:

            validJointNum = np.sum(sample['pose2D_validity'][::2])

            if validJointNum<4:
                continue

            if np.isnan(sample['pose3DParam']['camScale']):     
                continue

            gt_skel = np.reshape(sample['pose2D_gt'],(26,-1))       #(26,2) This is from data
            gt_validity = np.reshape(sample['pose2D_validity'],(26,-1))     #(26,2)

            
            # Filtering ########################################################################################################
            if True:
                requiredJoints= [0,1,2, 3,4,5, 6,7,8, 9,10,11]      #In Total26
                if np.min(gt_validity[requiredJoints,0])== False:
                    continue

            min_pt = np.min(gt_skel[gt_validity[:,0]], axis=0)
            max_pt = np.max(gt_skel[gt_validity[:,0]], axis=0)
            # bbox= [ min_pt[0], min_pt[1], max_pt[0], max_pt[1] ]
            bbox= [ min_pt[0], min_pt[1], max_pt[0] - min_pt[0], max_pt[1] - min_pt[1]]


            center = [bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2]
            scale = scaleFactor*max(bbox[2], bbox[3])/200


            #Save data
            imgnames_.append(os.path.join('images',fileName_saved)) 
            openposes_.append(np.zeros([25,3]))       #blank
            centers_.append(center)
            scales_.append(scale)
            has_smpl_.append(1)
            poses_.append(sample['pose3DParam']['pose'])        #(72,)
            shapes_.append(sample['pose3DParam']['shape'])       #(10,)


            #2D keypoints (total26 -> SPIN24)
            poseidx_spin24 = [0,1,2,  3,4,5, 6,7,8,  9,10,11, 19,20,21,22,23] 
            poseidx_total26 =  [0,1,2,  3,4,5,  6,7,8,  9,10,11,  14, 15, 16, 17, 18  ]
            part = np.zeros([24,3])
            part[poseidx_spin24,:2] = gt_skel[poseidx_total26] #(52,)  totalGT26 type
            part[poseidx_spin24,2] = 1*gt_validity[poseidx_total26,0]   
            parts_.append(part)

            #3D joint
            S = np.zeros([24,4])
            S[poseidx_spin24,:3] = sample['pose3D_pred'][poseidx_total26,:]  * 0.001     #Scaling skeleton 3D (currently mm) -> meter
            S[poseidx_spin24,3] = 1
            
            skel3D_.append(S)

            #Debug 2D Visualize
            if False:
                img = cv2.imread( os.path.join( '/run/media/hjoo/disk/data/mpii_human_pose_v1/images',imgnames_[-1]) )
                img = viewer2D.Vis_Skeleton_2D_smplCOCO(gt_skel, pt2d_visibility = gt_validity[:,0], image =img)
                img = viewer2D.Vis_Bbox_minmaxPt(img, min_pt, max_pt)
                viewer2D.ImShow(img, waitTime=0)  

            #Debug 3D Visualize smpl_coco
            if False:
                # data3D_coco_vis = np.reshape(data3D_coco, (data3D_coco.shape[0],-1)).transpose()   #(Dim, F)
                # data3D_coco_vis *=0.1   #mm to cm
                # glViewer.setSkeleton( [ data3D_coco_vis] ,jointType='smplcoco')
                # glViewer.show()

                #Debug 3D Visualize, h36m
                data3D_h36m_vis = np.reshape(data3D_h36m, (data3D_h36m.shape[0],-1)).transpose()   #(Dim, F)
                data3D_h36m_vis *=0.001   #meter to cm

                # data3D_smpl24 = np.reshape(data3D_smpl24, (data3D_smpl24.shape[0],-1)).transpose()   #(Dim, F)
                # data3D_smpl24 *=0.1

                glViewer.setSkeleton( [ data3D_h36m_vis]  ,jointType='smplcoco')
                glViewer.show()


            # keypoints

    # print("Final Img Num: {}, Final Sample Num: {}".format( len(set(imgnames_) , len(imgnames_)) ) )
    print("Final Sample Num: {}".format( len(imgnames_)))
    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    # out_file = os.path.join(out_path, '1031-mpii3D_train_44257_all.npz')
    out_file = os.path.join(out_path, '1031-mpii3D_train_44257_validlimbs.npz')

    np.savez(out_file, imgname=imgnames_,
                       center=centers_,
                       scale=scales_,
                       part=parts_,
                       openpose=openposes_,
                       pose=poses_,

                        shape=shapes_,
                        has_smpl=has_smpl_,
                        S=skel3D_)
Beispiel #3
0
def exportOursToSpin(cocoPose3DAll, out_path):

    scaleFactor = 1.2

    # annotation files
    annot_file = os.path.join('/home/hjoo/codes/SPIN/data', 'train.h5')
    # read annotations
    f = h5py.File(annot_file, 'r')
    centers, imgnames, parts, scales = \
        f['center'], f['imgname'], f['part'], f['scale']

    # structs we need
    imgnames_, scales_, centers_, parts_, openposes_ = [], [], [], [], []

    #additional 3D
    poses_, shapes_, skel3D_, has_smpl_ = [], [], [], []

    # for imgSample in cocoPose3DAll:
    imgNum = len(cocoPose3DAll)
    totalSampleNum = [
        len(cocoPose3DAll[imgSample]) for imgSample in cocoPose3DAll
    ]
    totalSampleNum = sum(totalSampleNum)

    print("\n\n### ImageNum: {}, SampleNum: {} ###".format(
        imgNum, totalSampleNum))
    # for imgSample in cocoPose3DAll:
    # for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
    for key_imgId, imgSample in sorted(cocoPose3DAll.items()):
        #load image
        fileName_saved = os.path.join('images', imgSample[0]['imgId'])
        # fileName = os.path.basename(imgPathFull)
        # fileName_saved = os.path.join(os.path.basename(os.path.dirname(imgPathFull)), fileName) #start from train2014

        for sample in imgSample:

            validJointNum = np.sum(sample['pose2D_validity'][::2])

            if validJointNum < 4:
                continue

            if np.isnan(sample['pose3DParam']['camScale']):
                continue

            #visualize
            # if True:        #Save BBox
            #     inputImg_2dvis = inputImg.copy()
            #     inputImg_2dvis = viewer2D.Vis_Bbox(inputImg_2dvis,sample['bbr'])
            #     # viewer2D.ImShow(inputImg_2dvis)
            #     imgFilePath = '{0}/{1}.jpg'.format(bboxFolder, fileName)
            #     cv2.imwrite(imgFilePath, inputImg_2dvis)

            if 'bbr' in sample.keys():
                bbox = sample['bbr']
            else:

                keypoints = np.reshape(sample['pose2D_gt'], (-1, 2))  #26,2
                valid = sample['pose2D_validity'][::2]  #(26,)
                valid_keypoints = keypoints[valid]

                min_pt = np.min(valid_keypoints, axis=0)
                max_pt = np.max(valid_keypoints, axis=0)
                bbox = [
                    min_pt[0], min_pt[1], max_pt[0] - min_pt[0],
                    max_pt[1] - min_pt[1]
                ]

            #Debug Visualization
            if False:
                imgPathFull = os.path.join(
                    '/run/media/hjoo/disk/data/mpii_human_pose_v1',
                    fileName_saved)
                inputImg_2dvis = cv2.imread(imgPathFull)
                inputImg_2dvis = viewer2D.Vis_Skeleton_2D_smplCOCO(
                    sample['pose2D_gt'],
                    pt2d_visibility=sample['pose2D_validity'],
                    image=inputImg_2dvis)
                inputImg_2dvis = viewer2D.Vis_Bbox(inputImg_2dvis, bbox)
                viewer2D.ImShow(inputImg_2dvis, waitTime=0)
                # continue

            center = [bbox[0] + bbox[2] / 2, bbox[1] + bbox[3] / 2]
            scale = scaleFactor * max(bbox[2], bbox[3]) / 200

            #Save data
            imgnames_.append(fileName_saved)
            openposes_.append(np.zeros([25, 3]))  #blank
            centers_.append(center)
            scales_.append(scale)
            has_smpl_.append(1)
            poses_.append(sample['pose3DParam']['pose'])  #(72,)
            shapes_.append(sample['pose3DParam']['shape'])  #(10,)

            #2D keypoints (total26 -> SPIN24)
            poseidx_spin24 = [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 19, 20, 21, 22, 23, 13
            ]  #13 head top
            poseidx_total26 = [
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14, 15, 16, 17, 18, 19
            ]  #19 head top
            part = np.zeros([24, 3])
            gt_skel = np.reshape(sample['pose2D_gt'], (26, -1))
            gt_validity = np.reshape(sample['pose2D_validity'], (26, -1))
            part[poseidx_spin24, :2] = gt_skel[
                poseidx_total26]  #(52,)  totalGT26 type
            part[poseidx_spin24, 2] = 1 * gt_validity[poseidx_total26, 0]
            parts_.append(part)

            #3D joint
            S = np.zeros([24, 4])
            S[poseidx_spin24, :3] = sample['pose3D_pred'][poseidx_total26, :]
            S[poseidx_spin24, 3] = 1

            skel3D_.append(S)

            # keypoints

    # store the data struct
    if not os.path.isdir(out_path):
        os.makedirs(out_path)
    out_file = os.path.join(out_path,
                            'mpi_train_wShape_1537_ep200_exemplar.npz')

    np.savez(out_file,
             imgname=imgnames_,
             center=centers_,
             scale=scales_,
             part=parts_,
             openpose=openposes_,
             pose=poses_,
             shape=shapes_,
             has_smpl=has_smpl_,
             S=skel3D_)